xref: /freebsd/sys/dev/xdma/xdma.c (revision 06c3fb2749bda94cb5201f81ffdb8fa6c3161b2e)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause
3  *
4  * Copyright (c) 2016-2019 Ruslan Bukin <br@bsdpad.com>
5  *
6  * This software was developed by SRI International and the University of
7  * Cambridge Computer Laboratory under DARPA/AFRL contract FA8750-10-C-0237
8  * ("CTSRD"), as part of the DARPA CRASH research programme.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
20  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
23  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
25  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
26  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29  * SUCH DAMAGE.
30  */
31 
32 #include <sys/cdefs.h>
33 #include "opt_platform.h"
34 #include <sys/param.h>
35 #include <sys/conf.h>
36 #include <sys/bus.h>
37 #include <sys/epoch.h>
38 #include <sys/kernel.h>
39 #include <sys/queue.h>
40 #include <sys/kobj.h>
41 #include <sys/malloc.h>
42 #include <sys/limits.h>
43 #include <sys/lock.h>
44 #include <sys/mutex.h>
45 #include <sys/sysctl.h>
46 #include <sys/systm.h>
47 
48 #include <machine/bus.h>
49 
50 #ifdef FDT
51 #include <dev/fdt/fdt_common.h>
52 #include <dev/ofw/ofw_bus.h>
53 #include <dev/ofw/ofw_bus_subr.h>
54 #endif
55 
56 #include <dev/xdma/xdma.h>
57 
58 #include <xdma_if.h>
59 
60 /*
61  * Multiple xDMA controllers may work with single DMA device,
62  * so we have global lock for physical channel management.
63  */
64 static struct mtx xdma_mtx;
65 
66 #define	XDMA_LOCK()			mtx_lock(&xdma_mtx)
67 #define	XDMA_UNLOCK()			mtx_unlock(&xdma_mtx)
68 #define	XDMA_ASSERT_LOCKED()		mtx_assert(&xdma_mtx, MA_OWNED)
69 
70 #define	FDT_REG_CELLS	4
71 
72 #ifdef FDT
73 static int
74 xdma_get_iommu_fdt(xdma_controller_t *xdma, xdma_channel_t *xchan)
75 {
76 	struct xdma_iommu *xio;
77 	phandle_t node;
78 	pcell_t prop;
79 	size_t len;
80 
81 	node = ofw_bus_get_node(xdma->dma_dev);
82 	if (OF_getproplen(node, "xdma,iommu") <= 0)
83 		return (0);
84 
85 	len = OF_getencprop(node, "xdma,iommu", &prop, sizeof(prop));
86 	if (len != sizeof(prop)) {
87 		device_printf(xdma->dev,
88 		    "%s: Can't get iommu device node\n", __func__);
89 		return (0);
90 	}
91 
92 	xio = &xchan->xio;
93 	xio->dev = OF_device_from_xref(prop);
94 	if (xio->dev == NULL) {
95 		device_printf(xdma->dev,
96 		    "%s: Can't get iommu device\n", __func__);
97 		return (0);
98 	}
99 
100 	/* Found */
101 	return (1);
102 }
103 #endif
104 
105 /*
106  * Allocate virtual xDMA channel.
107  */
108 xdma_channel_t *
109 xdma_channel_alloc(xdma_controller_t *xdma, uint32_t caps)
110 {
111 	xdma_channel_t *xchan;
112 	int ret;
113 
114 	xchan = malloc(sizeof(xdma_channel_t), M_XDMA, M_WAITOK | M_ZERO);
115 	xchan->xdma = xdma;
116 
117 #ifdef FDT
118 	/* Check if this DMA controller supports IOMMU. */
119 	if (xdma_get_iommu_fdt(xdma, xchan))
120 		caps |= XCHAN_CAP_IOMMU | XCHAN_CAP_NOSEG;
121 #endif
122 
123 	xchan->caps = caps;
124 
125 	XDMA_LOCK();
126 
127 	/* Request a real channel from hardware driver. */
128 	ret = XDMA_CHANNEL_ALLOC(xdma->dma_dev, xchan);
129 	if (ret != 0) {
130 		device_printf(xdma->dev,
131 		    "%s: Can't request hardware channel.\n", __func__);
132 		XDMA_UNLOCK();
133 		free(xchan, M_XDMA);
134 
135 		return (NULL);
136 	}
137 
138 	TAILQ_INIT(&xchan->ie_handlers);
139 
140 	mtx_init(&xchan->mtx_lock, "xDMA chan", NULL, MTX_DEF);
141 	mtx_init(&xchan->mtx_qin_lock, "xDMA qin", NULL, MTX_DEF);
142 	mtx_init(&xchan->mtx_qout_lock, "xDMA qout", NULL, MTX_DEF);
143 	mtx_init(&xchan->mtx_bank_lock, "xDMA bank", NULL, MTX_DEF);
144 	mtx_init(&xchan->mtx_proc_lock, "xDMA proc", NULL, MTX_DEF);
145 
146 	TAILQ_INIT(&xchan->bank);
147 	TAILQ_INIT(&xchan->queue_in);
148 	TAILQ_INIT(&xchan->queue_out);
149 	TAILQ_INIT(&xchan->processing);
150 
151 	if (xchan->caps & XCHAN_CAP_IOMMU)
152 		xdma_iommu_init(&xchan->xio);
153 
154 	TAILQ_INSERT_TAIL(&xdma->channels, xchan, xchan_next);
155 
156 	XDMA_UNLOCK();
157 
158 	return (xchan);
159 }
160 
161 int
162 xdma_channel_free(xdma_channel_t *xchan)
163 {
164 	xdma_controller_t *xdma;
165 	int err;
166 
167 	xdma = xchan->xdma;
168 	KASSERT(xdma != NULL, ("xdma is NULL"));
169 
170 	XDMA_LOCK();
171 
172 	/* Free the real DMA channel. */
173 	err = XDMA_CHANNEL_FREE(xdma->dma_dev, xchan);
174 	if (err != 0) {
175 		device_printf(xdma->dev,
176 		    "%s: Can't free real hw channel.\n", __func__);
177 		XDMA_UNLOCK();
178 		return (-1);
179 	}
180 
181 	if (xchan->flags & XCHAN_TYPE_SG)
182 		xdma_channel_free_sg(xchan);
183 
184 	if (xchan->caps & XCHAN_CAP_IOMMU)
185 		xdma_iommu_release(&xchan->xio);
186 
187 	xdma_teardown_all_intr(xchan);
188 
189 	mtx_destroy(&xchan->mtx_lock);
190 	mtx_destroy(&xchan->mtx_qin_lock);
191 	mtx_destroy(&xchan->mtx_qout_lock);
192 	mtx_destroy(&xchan->mtx_bank_lock);
193 	mtx_destroy(&xchan->mtx_proc_lock);
194 
195 	TAILQ_REMOVE(&xdma->channels, xchan, xchan_next);
196 
197 	free(xchan, M_XDMA);
198 
199 	XDMA_UNLOCK();
200 
201 	return (0);
202 }
203 
204 int
205 xdma_setup_intr(xdma_channel_t *xchan, int flags,
206     int (*cb)(void *, xdma_transfer_status_t *),
207     void *arg, void **ihandler)
208 {
209 	struct xdma_intr_handler *ih;
210 	xdma_controller_t *xdma;
211 
212 	xdma = xchan->xdma;
213 	KASSERT(xdma != NULL, ("xdma is NULL"));
214 
215 	/* Sanity check. */
216 	if (cb == NULL) {
217 		device_printf(xdma->dev,
218 		    "%s: Can't setup interrupt handler.\n",
219 		    __func__);
220 
221 		return (-1);
222 	}
223 
224 	ih = malloc(sizeof(struct xdma_intr_handler),
225 	    M_XDMA, M_WAITOK | M_ZERO);
226 	ih->flags = flags;
227 	ih->cb = cb;
228 	ih->cb_user = arg;
229 
230 	XCHAN_LOCK(xchan);
231 	TAILQ_INSERT_TAIL(&xchan->ie_handlers, ih, ih_next);
232 	XCHAN_UNLOCK(xchan);
233 
234 	if (ihandler != NULL)
235 		*ihandler = ih;
236 
237 	return (0);
238 }
239 
240 int
241 xdma_teardown_intr(xdma_channel_t *xchan, struct xdma_intr_handler *ih)
242 {
243 	xdma_controller_t *xdma;
244 
245 	xdma = xchan->xdma;
246 	KASSERT(xdma != NULL, ("xdma is NULL"));
247 
248 	/* Sanity check. */
249 	if (ih == NULL) {
250 		device_printf(xdma->dev,
251 		    "%s: Can't teardown interrupt.\n", __func__);
252 		return (-1);
253 	}
254 
255 	TAILQ_REMOVE(&xchan->ie_handlers, ih, ih_next);
256 	free(ih, M_XDMA);
257 
258 	return (0);
259 }
260 
261 int
262 xdma_teardown_all_intr(xdma_channel_t *xchan)
263 {
264 	struct xdma_intr_handler *ih_tmp;
265 	struct xdma_intr_handler *ih;
266 
267 	KASSERT(xchan->xdma != NULL, ("xdma is NULL"));
268 
269 	TAILQ_FOREACH_SAFE(ih, &xchan->ie_handlers, ih_next, ih_tmp) {
270 		TAILQ_REMOVE(&xchan->ie_handlers, ih, ih_next);
271 		free(ih, M_XDMA);
272 	}
273 
274 	return (0);
275 }
276 
277 int
278 xdma_request(xdma_channel_t *xchan, struct xdma_request *req)
279 {
280 	xdma_controller_t *xdma;
281 	int ret;
282 
283 	xdma = xchan->xdma;
284 
285 	KASSERT(xdma != NULL, ("xdma is NULL"));
286 
287 	XCHAN_LOCK(xchan);
288 	ret = XDMA_CHANNEL_REQUEST(xdma->dma_dev, xchan, req);
289 	if (ret != 0) {
290 		device_printf(xdma->dev,
291 		    "%s: Can't request a transfer.\n", __func__);
292 		XCHAN_UNLOCK(xchan);
293 
294 		return (-1);
295 	}
296 	XCHAN_UNLOCK(xchan);
297 
298 	return (0);
299 }
300 
301 int
302 xdma_control(xdma_channel_t *xchan, enum xdma_command cmd)
303 {
304 	xdma_controller_t *xdma;
305 	int ret;
306 
307 	xdma = xchan->xdma;
308 	KASSERT(xdma != NULL, ("xdma is NULL"));
309 
310 	ret = XDMA_CHANNEL_CONTROL(xdma->dma_dev, xchan, cmd);
311 	if (ret != 0) {
312 		device_printf(xdma->dev,
313 		    "%s: Can't process command.\n", __func__);
314 		return (-1);
315 	}
316 
317 	return (0);
318 }
319 
320 void
321 xdma_callback(xdma_channel_t *xchan, xdma_transfer_status_t *status)
322 {
323 	struct xdma_intr_handler *ih_tmp;
324 	struct xdma_intr_handler *ih;
325 	struct epoch_tracker et;
326 
327 	KASSERT(xchan->xdma != NULL, ("xdma is NULL"));
328 
329 	TAILQ_FOREACH_SAFE(ih, &xchan->ie_handlers, ih_next, ih_tmp) {
330 		if (ih->cb != NULL) {
331 			if (ih->flags & XDMA_INTR_NET)
332 				NET_EPOCH_ENTER(et);
333 			ih->cb(ih->cb_user, status);
334 			if (ih->flags & XDMA_INTR_NET)
335 				NET_EPOCH_EXIT(et);
336 		}
337 	}
338 
339 	if (xchan->flags & XCHAN_TYPE_SG)
340 		xdma_queue_submit(xchan);
341 }
342 
343 #ifdef FDT
344 /*
345  * Notify the DMA driver we have machine-dependent data in FDT.
346  */
347 static int
348 xdma_ofw_md_data(xdma_controller_t *xdma, pcell_t *cells, int ncells)
349 {
350 	uint32_t ret;
351 
352 	ret = XDMA_OFW_MD_DATA(xdma->dma_dev,
353 	    cells, ncells, (void **)&xdma->data);
354 
355 	return (ret);
356 }
357 
358 int
359 xdma_handle_mem_node(vmem_t *vmem, phandle_t memory)
360 {
361 	pcell_t reg[FDT_REG_CELLS * FDT_MEM_REGIONS];
362 	pcell_t *regp;
363 	int addr_cells, size_cells;
364 	int i, reg_len, ret, tuple_size, tuples;
365 	u_long mem_start, mem_size;
366 
367 	if ((ret = fdt_addrsize_cells(OF_parent(memory), &addr_cells,
368 	    &size_cells)) != 0)
369 		return (ret);
370 
371 	if (addr_cells > 2)
372 		return (ERANGE);
373 
374 	tuple_size = sizeof(pcell_t) * (addr_cells + size_cells);
375 	reg_len = OF_getproplen(memory, "reg");
376 	if (reg_len <= 0 || reg_len > sizeof(reg))
377 		return (ERANGE);
378 
379 	if (OF_getprop(memory, "reg", reg, reg_len) <= 0)
380 		return (ENXIO);
381 
382 	tuples = reg_len / tuple_size;
383 	regp = (pcell_t *)&reg;
384 	for (i = 0; i < tuples; i++) {
385 		ret = fdt_data_to_res(regp, addr_cells, size_cells,
386 		    &mem_start, &mem_size);
387 		if (ret != 0)
388 			return (ret);
389 
390 		vmem_add(vmem, mem_start, mem_size, 0);
391 		regp += addr_cells + size_cells;
392 	}
393 
394 	return (0);
395 }
396 
397 vmem_t *
398 xdma_get_memory(device_t dev)
399 {
400 	phandle_t mem_node, node;
401 	pcell_t mem_handle;
402 	vmem_t *vmem;
403 
404 	node = ofw_bus_get_node(dev);
405 	if (node <= 0) {
406 		device_printf(dev,
407 		    "%s called on not ofw based device.\n", __func__);
408 		return (NULL);
409 	}
410 
411 	if (!OF_hasprop(node, "memory-region"))
412 		return (NULL);
413 
414 	if (OF_getencprop(node, "memory-region", (void *)&mem_handle,
415 	    sizeof(mem_handle)) <= 0)
416 		return (NULL);
417 
418 	vmem = vmem_create("xDMA vmem", 0, 0, PAGE_SIZE,
419 	    PAGE_SIZE, M_BESTFIT | M_WAITOK);
420 	if (vmem == NULL)
421 		return (NULL);
422 
423 	mem_node = OF_node_from_xref(mem_handle);
424 	if (xdma_handle_mem_node(vmem, mem_node) != 0) {
425 		vmem_destroy(vmem);
426 		return (NULL);
427 	}
428 
429 	return (vmem);
430 }
431 
432 void
433 xdma_put_memory(vmem_t *vmem)
434 {
435 
436 	vmem_destroy(vmem);
437 }
438 
439 void
440 xchan_set_memory(xdma_channel_t *xchan, vmem_t *vmem)
441 {
442 
443 	xchan->vmem = vmem;
444 }
445 
446 /*
447  * Allocate xdma controller.
448  */
449 xdma_controller_t *
450 xdma_ofw_get(device_t dev, const char *prop)
451 {
452 	phandle_t node, parent;
453 	xdma_controller_t *xdma;
454 	device_t dma_dev;
455 	pcell_t *cells;
456 	int ncells;
457 	int error;
458 	int ndmas;
459 	int idx;
460 
461 	node = ofw_bus_get_node(dev);
462 	if (node <= 0)
463 		device_printf(dev,
464 		    "%s called on not ofw based device.\n", __func__);
465 
466 	error = ofw_bus_parse_xref_list_get_length(node,
467 	    "dmas", "#dma-cells", &ndmas);
468 	if (error) {
469 		device_printf(dev,
470 		    "%s can't get dmas list.\n", __func__);
471 		return (NULL);
472 	}
473 
474 	if (ndmas == 0) {
475 		device_printf(dev,
476 		    "%s dmas list is empty.\n", __func__);
477 		return (NULL);
478 	}
479 
480 	error = ofw_bus_find_string_index(node, "dma-names", prop, &idx);
481 	if (error != 0) {
482 		device_printf(dev,
483 		    "%s can't find string index.\n", __func__);
484 		return (NULL);
485 	}
486 
487 	error = ofw_bus_parse_xref_list_alloc(node, "dmas", "#dma-cells",
488 	    idx, &parent, &ncells, &cells);
489 	if (error != 0) {
490 		device_printf(dev,
491 		    "%s can't get dma device xref.\n", __func__);
492 		return (NULL);
493 	}
494 
495 	dma_dev = OF_device_from_xref(parent);
496 	if (dma_dev == NULL) {
497 		device_printf(dev,
498 		    "%s can't get dma device.\n", __func__);
499 		return (NULL);
500 	}
501 
502 	xdma = malloc(sizeof(struct xdma_controller),
503 	    M_XDMA, M_WAITOK | M_ZERO);
504 	xdma->dev = dev;
505 	xdma->dma_dev = dma_dev;
506 
507 	TAILQ_INIT(&xdma->channels);
508 
509 	xdma_ofw_md_data(xdma, cells, ncells);
510 	free(cells, M_OFWPROP);
511 
512 	return (xdma);
513 }
514 #endif
515 
516 /*
517  * Allocate xdma controller.
518  */
519 xdma_controller_t *
520 xdma_get(device_t dev, device_t dma_dev)
521 {
522 	xdma_controller_t *xdma;
523 
524 	xdma = malloc(sizeof(struct xdma_controller),
525 	    M_XDMA, M_WAITOK | M_ZERO);
526 	xdma->dev = dev;
527 	xdma->dma_dev = dma_dev;
528 
529 	TAILQ_INIT(&xdma->channels);
530 
531 	return (xdma);
532 }
533 
534 /*
535  * Free xDMA controller object.
536  */
537 int
538 xdma_put(xdma_controller_t *xdma)
539 {
540 
541 	XDMA_LOCK();
542 
543 	/* Ensure no channels allocated. */
544 	if (!TAILQ_EMPTY(&xdma->channels)) {
545 		device_printf(xdma->dev, "%s: Can't free xDMA\n", __func__);
546 		return (-1);
547 	}
548 
549 	free(xdma->data, M_DEVBUF);
550 	free(xdma, M_XDMA);
551 
552 	XDMA_UNLOCK();
553 
554 	return (0);
555 }
556 
557 static void
558 xdma_init(void)
559 {
560 
561 	mtx_init(&xdma_mtx, "xDMA", NULL, MTX_DEF);
562 }
563 
564 SYSINIT(xdma, SI_SUB_DRIVERS, SI_ORDER_FIRST, xdma_init, NULL);
565