xref: /freebsd/sys/dev/xdma/xdma_sg.c (revision b056a52947aa19921787a77db886a8162f2347b6)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause
3  *
4  * Copyright (c) 2018-2019 Ruslan Bukin <br@bsdpad.com>
5  *
6  * This software was developed by SRI International and the University of
7  * Cambridge Computer Laboratory under DARPA/AFRL contract FA8750-10-C-0237
8  * ("CTSRD"), as part of the DARPA CRASH research programme.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
20  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
23  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
25  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
26  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29  * SUCH DAMAGE.
30  */
31 
32 #include <sys/cdefs.h>
33 __FBSDID("$FreeBSD$");
34 
35 #include "opt_platform.h"
36 #include <sys/param.h>
37 #include <sys/conf.h>
38 #include <sys/bus.h>
39 #include <sys/kernel.h>
40 #include <sys/malloc.h>
41 #include <sys/mbuf.h>
42 #include <sys/rwlock.h>
43 
44 #include <machine/bus.h>
45 
46 #include <vm/vm.h>
47 #include <vm/vm_extern.h>
48 #include <vm/vm_page.h>
49 
50 #ifdef FDT
51 #include <dev/fdt/fdt_common.h>
52 #include <dev/ofw/ofw_bus.h>
53 #include <dev/ofw/ofw_bus_subr.h>
54 #endif
55 
56 #include <dev/xdma/xdma.h>
57 
58 #include <xdma_if.h>
59 
60 struct seg_load_request {
61 	struct bus_dma_segment *seg;
62 	uint32_t nsegs;
63 	uint32_t error;
64 };
65 
66 static void
67 xchan_bufs_free_reserved(xdma_channel_t *xchan)
68 {
69 	struct xdma_request *xr;
70 	vm_size_t size;
71 	int i;
72 
73 	for (i = 0; i < xchan->xr_num; i++) {
74 		xr = &xchan->xr_mem[i];
75 		size = xr->buf.size;
76 		if (xr->buf.vaddr) {
77 			pmap_kremove_device(xr->buf.vaddr, size);
78 			kva_free(xr->buf.vaddr, size);
79 			xr->buf.vaddr = 0;
80 		}
81 		if (xr->buf.paddr) {
82 			vmem_free(xchan->vmem, xr->buf.paddr, size);
83 			xr->buf.paddr = 0;
84 		}
85 		xr->buf.size = 0;
86 	}
87 }
88 
89 static int
90 xchan_bufs_alloc_reserved(xdma_channel_t *xchan)
91 {
92 	xdma_controller_t *xdma;
93 	struct xdma_request *xr;
94 	vmem_addr_t addr;
95 	vm_size_t size;
96 	int i;
97 
98 	xdma = xchan->xdma;
99 
100 	if (xchan->vmem == NULL)
101 		return (ENOBUFS);
102 
103 	for (i = 0; i < xchan->xr_num; i++) {
104 		xr = &xchan->xr_mem[i];
105 		size = round_page(xchan->maxsegsize);
106 		if (vmem_alloc(xchan->vmem, size,
107 		    M_BESTFIT | M_NOWAIT, &addr)) {
108 			device_printf(xdma->dev,
109 			    "%s: Can't allocate memory\n", __func__);
110 			xchan_bufs_free_reserved(xchan);
111 			return (ENOMEM);
112 		}
113 
114 		xr->buf.size = size;
115 		xr->buf.paddr = addr;
116 		xr->buf.vaddr = kva_alloc(size);
117 		if (xr->buf.vaddr == 0) {
118 			device_printf(xdma->dev,
119 			    "%s: Can't allocate KVA\n", __func__);
120 			xchan_bufs_free_reserved(xchan);
121 			return (ENOMEM);
122 		}
123 		pmap_kenter_device(xr->buf.vaddr, size, addr);
124 	}
125 
126 	return (0);
127 }
128 
129 static int
130 xchan_bufs_alloc_busdma(xdma_channel_t *xchan)
131 {
132 	xdma_controller_t *xdma;
133 	struct xdma_request *xr;
134 	int err;
135 	int i;
136 
137 	xdma = xchan->xdma;
138 
139 	/* Create bus_dma tag */
140 	err = bus_dma_tag_create(
141 	    bus_get_dma_tag(xdma->dev),	/* Parent tag. */
142 	    xchan->alignment,		/* alignment */
143 	    xchan->boundary,		/* boundary */
144 	    xchan->lowaddr,		/* lowaddr */
145 	    xchan->highaddr,		/* highaddr */
146 	    NULL, NULL,			/* filter, filterarg */
147 	    xchan->maxsegsize * xchan->maxnsegs, /* maxsize */
148 	    xchan->maxnsegs,		/* nsegments */
149 	    xchan->maxsegsize,		/* maxsegsize */
150 	    0,				/* flags */
151 	    NULL, NULL,			/* lockfunc, lockarg */
152 	    &xchan->dma_tag_bufs);
153 	if (err != 0) {
154 		device_printf(xdma->dev,
155 		    "%s: Can't create bus_dma tag.\n", __func__);
156 		return (-1);
157 	}
158 
159 	for (i = 0; i < xchan->xr_num; i++) {
160 		xr = &xchan->xr_mem[i];
161 		err = bus_dmamap_create(xchan->dma_tag_bufs, 0,
162 		    &xr->buf.map);
163 		if (err != 0) {
164 			device_printf(xdma->dev,
165 			    "%s: Can't create buf DMA map.\n", __func__);
166 
167 			/* Cleanup. */
168 			bus_dma_tag_destroy(xchan->dma_tag_bufs);
169 
170 			return (-1);
171 		}
172 	}
173 
174 	return (0);
175 }
176 
177 static int
178 xchan_bufs_alloc(xdma_channel_t *xchan)
179 {
180 	xdma_controller_t *xdma;
181 	int ret;
182 
183 	xdma = xchan->xdma;
184 
185 	if (xdma == NULL) {
186 		device_printf(xdma->dev,
187 		    "%s: Channel was not allocated properly.\n", __func__);
188 		return (-1);
189 	}
190 
191 	if (xchan->caps & XCHAN_CAP_BUSDMA)
192 		ret = xchan_bufs_alloc_busdma(xchan);
193 	else {
194 		ret = xchan_bufs_alloc_reserved(xchan);
195 	}
196 	if (ret != 0) {
197 		device_printf(xdma->dev,
198 		    "%s: Can't allocate bufs.\n", __func__);
199 		return (-1);
200 	}
201 
202 	xchan->flags |= XCHAN_BUFS_ALLOCATED;
203 
204 	return (0);
205 }
206 
207 static int
208 xchan_bufs_free(xdma_channel_t *xchan)
209 {
210 	struct xdma_request *xr;
211 	struct xchan_buf *b;
212 	int i;
213 
214 	if ((xchan->flags & XCHAN_BUFS_ALLOCATED) == 0)
215 		return (-1);
216 
217 	if (xchan->caps & XCHAN_CAP_BUSDMA) {
218 		for (i = 0; i < xchan->xr_num; i++) {
219 			xr = &xchan->xr_mem[i];
220 			b = &xr->buf;
221 			bus_dmamap_destroy(xchan->dma_tag_bufs, b->map);
222 		}
223 		bus_dma_tag_destroy(xchan->dma_tag_bufs);
224 	} else
225 		xchan_bufs_free_reserved(xchan);
226 
227 	xchan->flags &= ~XCHAN_BUFS_ALLOCATED;
228 
229 	return (0);
230 }
231 
232 void
233 xdma_channel_free_sg(xdma_channel_t *xchan)
234 {
235 
236 	xchan_bufs_free(xchan);
237 	xchan_sglist_free(xchan);
238 	xchan_bank_free(xchan);
239 }
240 
241 /*
242  * Prepare xchan for a scatter-gather transfer.
243  * xr_num - xdma requests queue size,
244  * maxsegsize - maximum allowed scatter-gather list element size in bytes
245  */
246 int
247 xdma_prep_sg(xdma_channel_t *xchan, uint32_t xr_num,
248     bus_size_t maxsegsize, bus_size_t maxnsegs,
249     bus_size_t alignment, bus_addr_t boundary,
250     bus_addr_t lowaddr, bus_addr_t highaddr)
251 {
252 	xdma_controller_t *xdma;
253 	int ret;
254 
255 	xdma = xchan->xdma;
256 
257 	KASSERT(xdma != NULL, ("xdma is NULL"));
258 
259 	if (xchan->flags & XCHAN_CONFIGURED) {
260 		device_printf(xdma->dev,
261 		    "%s: Channel is already configured.\n", __func__);
262 		return (-1);
263 	}
264 
265 	xchan->xr_num = xr_num;
266 	xchan->maxsegsize = maxsegsize;
267 	xchan->maxnsegs = maxnsegs;
268 	xchan->alignment = alignment;
269 	xchan->boundary = boundary;
270 	xchan->lowaddr = lowaddr;
271 	xchan->highaddr = highaddr;
272 
273 	if (xchan->maxnsegs > XDMA_MAX_SEG) {
274 		device_printf(xdma->dev, "%s: maxnsegs is too big\n",
275 		    __func__);
276 		return (-1);
277 	}
278 
279 	xchan_bank_init(xchan);
280 
281 	/* Allocate sglist. */
282 	ret = xchan_sglist_alloc(xchan);
283 	if (ret != 0) {
284 		device_printf(xdma->dev,
285 		    "%s: Can't allocate sglist.\n", __func__);
286 		return (-1);
287 	}
288 
289 	/* Allocate buffers if required. */
290 	if ((xchan->caps & XCHAN_CAP_NOBUFS) == 0) {
291 		ret = xchan_bufs_alloc(xchan);
292 		if (ret != 0) {
293 			device_printf(xdma->dev,
294 			    "%s: Can't allocate bufs.\n", __func__);
295 
296 			/* Cleanup */
297 			xchan_sglist_free(xchan);
298 			xchan_bank_free(xchan);
299 
300 			return (-1);
301 		}
302 	}
303 
304 	xchan->flags |= (XCHAN_CONFIGURED | XCHAN_TYPE_SG);
305 
306 	XCHAN_LOCK(xchan);
307 	ret = XDMA_CHANNEL_PREP_SG(xdma->dma_dev, xchan);
308 	if (ret != 0) {
309 		device_printf(xdma->dev,
310 		    "%s: Can't prepare SG transfer.\n", __func__);
311 		XCHAN_UNLOCK(xchan);
312 
313 		return (-1);
314 	}
315 	XCHAN_UNLOCK(xchan);
316 
317 	return (0);
318 }
319 
320 void
321 xchan_seg_done(xdma_channel_t *xchan,
322     struct xdma_transfer_status *st)
323 {
324 	struct xdma_request *xr;
325 	xdma_controller_t *xdma;
326 	struct xchan_buf *b;
327 
328 	xdma = xchan->xdma;
329 
330 	xr = TAILQ_FIRST(&xchan->processing);
331 	if (xr == NULL)
332 		panic("request not found\n");
333 
334 	b = &xr->buf;
335 
336 	atomic_subtract_int(&b->nsegs_left, 1);
337 
338 	if (b->nsegs_left == 0) {
339 		if (xchan->caps & XCHAN_CAP_BUSDMA) {
340 			if (xr->direction == XDMA_MEM_TO_DEV)
341 				bus_dmamap_sync(xchan->dma_tag_bufs, b->map,
342 				    BUS_DMASYNC_POSTWRITE);
343 			else
344 				bus_dmamap_sync(xchan->dma_tag_bufs, b->map,
345 				    BUS_DMASYNC_POSTREAD);
346 			bus_dmamap_unload(xchan->dma_tag_bufs, b->map);
347 		} else {
348 			if (xr->req_type == XR_TYPE_MBUF &&
349 			    xr->direction == XDMA_DEV_TO_MEM)
350 				m_copyback(xr->m, 0, st->transferred,
351 				    (void *)xr->buf.vaddr);
352 		}
353 		xr->status.error = st->error;
354 		xr->status.transferred = st->transferred;
355 
356 		QUEUE_PROC_LOCK(xchan);
357 		TAILQ_REMOVE(&xchan->processing, xr, xr_next);
358 		QUEUE_PROC_UNLOCK(xchan);
359 
360 		QUEUE_OUT_LOCK(xchan);
361 		TAILQ_INSERT_TAIL(&xchan->queue_out, xr, xr_next);
362 		QUEUE_OUT_UNLOCK(xchan);
363 	}
364 }
365 
366 static void
367 xdma_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
368 {
369 	struct seg_load_request *slr;
370 	struct bus_dma_segment *seg;
371 	int i;
372 
373 	slr = arg;
374 	seg = slr->seg;
375 
376 	if (error != 0) {
377 		slr->error = error;
378 		return;
379 	}
380 
381 	slr->nsegs = nsegs;
382 
383 	for (i = 0; i < nsegs; i++) {
384 		seg[i].ds_addr = segs[i].ds_addr;
385 		seg[i].ds_len = segs[i].ds_len;
386 	}
387 }
388 
389 static int
390 _xdma_load_data_busdma(xdma_channel_t *xchan, struct xdma_request *xr,
391     struct bus_dma_segment *seg)
392 {
393 	xdma_controller_t *xdma;
394 	struct seg_load_request slr;
395 	uint32_t nsegs;
396 	void *addr;
397 	int error;
398 
399 	xdma = xchan->xdma;
400 
401 	error = 0;
402 	nsegs = 0;
403 
404 	switch (xr->req_type) {
405 	case XR_TYPE_MBUF:
406 		error = bus_dmamap_load_mbuf_sg(xchan->dma_tag_bufs,
407 		    xr->buf.map, xr->m, seg, &nsegs, BUS_DMA_NOWAIT);
408 		break;
409 	case XR_TYPE_BIO:
410 		slr.nsegs = 0;
411 		slr.error = 0;
412 		slr.seg = seg;
413 		error = bus_dmamap_load_bio(xchan->dma_tag_bufs,
414 		    xr->buf.map, xr->bp, xdma_dmamap_cb, &slr, BUS_DMA_NOWAIT);
415 		if (slr.error != 0) {
416 			device_printf(xdma->dma_dev,
417 			    "%s: bus_dmamap_load failed, err %d\n",
418 			    __func__, slr.error);
419 			return (0);
420 		}
421 		nsegs = slr.nsegs;
422 		break;
423 	case XR_TYPE_VIRT:
424 		switch (xr->direction) {
425 		case XDMA_MEM_TO_DEV:
426 			addr = (void *)xr->src_addr;
427 			break;
428 		case XDMA_DEV_TO_MEM:
429 			addr = (void *)xr->dst_addr;
430 			break;
431 		default:
432 			device_printf(xdma->dma_dev,
433 			    "%s: Direction is not supported\n", __func__);
434 			return (0);
435 		}
436 		slr.nsegs = 0;
437 		slr.error = 0;
438 		slr.seg = seg;
439 		error = bus_dmamap_load(xchan->dma_tag_bufs, xr->buf.map,
440 		    addr, (xr->block_len * xr->block_num),
441 		    xdma_dmamap_cb, &slr, BUS_DMA_NOWAIT);
442 		if (slr.error != 0) {
443 			device_printf(xdma->dma_dev,
444 			    "%s: bus_dmamap_load failed, err %d\n",
445 			    __func__, slr.error);
446 			return (0);
447 		}
448 		nsegs = slr.nsegs;
449 		break;
450 	default:
451 		break;
452 	}
453 
454 	if (error != 0) {
455 		if (error == ENOMEM) {
456 			/*
457 			 * Out of memory. Try again later.
458 			 * TODO: count errors.
459 			 */
460 		} else
461 			device_printf(xdma->dma_dev,
462 			    "%s: bus_dmamap_load failed with err %d\n",
463 			    __func__, error);
464 		return (0);
465 	}
466 
467 	if (xr->direction == XDMA_MEM_TO_DEV)
468 		bus_dmamap_sync(xchan->dma_tag_bufs, xr->buf.map,
469 		    BUS_DMASYNC_PREWRITE);
470 	else
471 		bus_dmamap_sync(xchan->dma_tag_bufs, xr->buf.map,
472 		    BUS_DMASYNC_PREREAD);
473 
474 	return (nsegs);
475 }
476 
477 static int
478 _xdma_load_data(xdma_channel_t *xchan, struct xdma_request *xr,
479     struct bus_dma_segment *seg)
480 {
481 	xdma_controller_t *xdma;
482 	struct mbuf *m;
483 	uint32_t nsegs;
484 
485 	xdma = xchan->xdma;
486 
487 	m = xr->m;
488 
489 	nsegs = 1;
490 
491 	switch (xr->req_type) {
492 	case XR_TYPE_MBUF:
493 		if ((xchan->caps & XCHAN_CAP_NOBUFS) == 0) {
494 			if (xr->direction == XDMA_MEM_TO_DEV)
495 				m_copydata(m, 0, m->m_pkthdr.len,
496 				    (void *)xr->buf.vaddr);
497 			seg[0].ds_addr = (bus_addr_t)xr->buf.paddr;
498 		} else
499 			seg[0].ds_addr = mtod(m, bus_addr_t);
500 		seg[0].ds_len = m->m_pkthdr.len;
501 		break;
502 	case XR_TYPE_BIO:
503 	case XR_TYPE_VIRT:
504 	default:
505 		panic("implement me\n");
506 	}
507 
508 	return (nsegs);
509 }
510 
511 static int
512 xdma_load_data(xdma_channel_t *xchan,
513     struct xdma_request *xr, struct bus_dma_segment *seg)
514 {
515 	xdma_controller_t *xdma;
516 	int error;
517 	int nsegs;
518 
519 	xdma = xchan->xdma;
520 
521 	error = 0;
522 	nsegs = 0;
523 
524 	if (xchan->caps & XCHAN_CAP_BUSDMA)
525 		nsegs = _xdma_load_data_busdma(xchan, xr, seg);
526 	else
527 		nsegs = _xdma_load_data(xchan, xr, seg);
528 	if (nsegs == 0)
529 		return (0); /* Try again later. */
530 
531 	xr->buf.nsegs = nsegs;
532 	xr->buf.nsegs_left = nsegs;
533 
534 	return (nsegs);
535 }
536 
537 static int
538 xdma_process(xdma_channel_t *xchan,
539     struct xdma_sglist *sg)
540 {
541 	struct bus_dma_segment seg[XDMA_MAX_SEG];
542 	struct xdma_request *xr;
543 	struct xdma_request *xr_tmp;
544 	xdma_controller_t *xdma;
545 	uint32_t capacity;
546 	uint32_t n;
547 	uint32_t c;
548 	int nsegs;
549 	int ret;
550 
551 	XCHAN_ASSERT_LOCKED(xchan);
552 
553 	xdma = xchan->xdma;
554 
555 	n = 0;
556 	c = 0;
557 
558 	ret = XDMA_CHANNEL_CAPACITY(xdma->dma_dev, xchan, &capacity);
559 	if (ret != 0) {
560 		device_printf(xdma->dev,
561 		    "%s: Can't get DMA controller capacity.\n", __func__);
562 		return (-1);
563 	}
564 
565 	TAILQ_FOREACH_SAFE(xr, &xchan->queue_in, xr_next, xr_tmp) {
566 		switch (xr->req_type) {
567 		case XR_TYPE_MBUF:
568 			if ((xchan->caps & XCHAN_CAP_NOSEG) ||
569 			    (c > xchan->maxnsegs))
570 				c = xdma_mbuf_defrag(xchan, xr);
571 			break;
572 		case XR_TYPE_BIO:
573 		case XR_TYPE_VIRT:
574 		default:
575 			c = 1;
576 		}
577 
578 		if (capacity <= (c + n)) {
579 			/*
580 			 * No space yet available for the entire
581 			 * request in the DMA engine.
582 			 */
583 			break;
584 		}
585 
586 		if ((c + n + xchan->maxnsegs) >= XDMA_SGLIST_MAXLEN) {
587 			/* Sglist is full. */
588 			break;
589 		}
590 
591 		nsegs = xdma_load_data(xchan, xr, seg);
592 		if (nsegs == 0)
593 			break;
594 
595 		xdma_sglist_add(&sg[n], seg, nsegs, xr);
596 		n += nsegs;
597 
598 		QUEUE_IN_LOCK(xchan);
599 		TAILQ_REMOVE(&xchan->queue_in, xr, xr_next);
600 		QUEUE_IN_UNLOCK(xchan);
601 
602 		QUEUE_PROC_LOCK(xchan);
603 		TAILQ_INSERT_TAIL(&xchan->processing, xr, xr_next);
604 		QUEUE_PROC_UNLOCK(xchan);
605 	}
606 
607 	return (n);
608 }
609 
610 int
611 xdma_queue_submit_sg(xdma_channel_t *xchan)
612 {
613 	struct xdma_sglist *sg;
614 	xdma_controller_t *xdma;
615 	uint32_t sg_n;
616 	int ret;
617 
618 	xdma = xchan->xdma;
619 	KASSERT(xdma != NULL, ("xdma is NULL"));
620 
621 	XCHAN_ASSERT_LOCKED(xchan);
622 
623 	sg = xchan->sg;
624 
625 	if ((xchan->caps & XCHAN_CAP_NOBUFS) == 0 &&
626 	   (xchan->flags & XCHAN_BUFS_ALLOCATED) == 0) {
627 		device_printf(xdma->dev,
628 		    "%s: Can't submit a transfer: no bufs\n",
629 		    __func__);
630 		return (-1);
631 	}
632 
633 	sg_n = xdma_process(xchan, sg);
634 	if (sg_n == 0)
635 		return (0); /* Nothing to submit */
636 
637 	/* Now submit sglist to DMA engine driver. */
638 	ret = XDMA_CHANNEL_SUBMIT_SG(xdma->dma_dev, xchan, sg, sg_n);
639 	if (ret != 0) {
640 		device_printf(xdma->dev,
641 		    "%s: Can't submit an sglist.\n", __func__);
642 		return (-1);
643 	}
644 
645 	return (0);
646 }
647