xref: /freebsd/sys/dev/xdma/xdma_sg.c (revision daceb336172a6b0572de864b97e70b28451ca636)
1 /*-
2  * Copyright (c) 2018 Ruslan Bukin <br@bsdpad.com>
3  * All rights reserved.
4  *
5  * This software was developed by SRI International and the University of
6  * Cambridge Computer Laboratory under DARPA/AFRL contract FA8750-10-C-0237
7  * ("CTSRD"), as part of the DARPA CRASH research programme.
8  *
9  * Redistribution and use in source and binary forms, with or without
10  * modification, are permitted provided that the following conditions
11  * are met:
12  * 1. Redistributions of source code must retain the above copyright
13  *    notice, this list of conditions and the following disclaimer.
14  * 2. Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in the
16  *    documentation and/or other materials provided with the distribution.
17  *
18  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
19  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
22  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
23  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
24  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
25  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
26  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28  * SUCH DAMAGE.
29  */
30 
31 #include <sys/cdefs.h>
32 __FBSDID("$FreeBSD$");
33 
34 #include "opt_platform.h"
35 #include <sys/param.h>
36 #include <sys/conf.h>
37 #include <sys/bus.h>
38 #include <sys/kernel.h>
39 #include <sys/malloc.h>
40 #include <sys/mbuf.h>
41 #include <sys/sx.h>
42 
43 #include <machine/bus.h>
44 
45 #ifdef FDT
46 #include <dev/fdt/fdt_common.h>
47 #include <dev/ofw/ofw_bus.h>
48 #include <dev/ofw/ofw_bus_subr.h>
49 #endif
50 
51 #include <dev/xdma/xdma.h>
52 
53 #include <xdma_if.h>
54 
55 struct seg_load_request {
56 	struct bus_dma_segment *seg;
57 	uint32_t nsegs;
58 	uint32_t error;
59 };
60 
61 static int
62 _xchan_bufs_alloc(xdma_channel_t *xchan)
63 {
64 	xdma_controller_t *xdma;
65 	struct xdma_request *xr;
66 	int i;
67 
68 	xdma = xchan->xdma;
69 
70 	for (i = 0; i < xchan->xr_num; i++) {
71 		xr = &xchan->xr_mem[i];
72 		/* TODO: bounce buffer */
73 	}
74 
75 	return (0);
76 }
77 
78 static int
79 _xchan_bufs_alloc_busdma(xdma_channel_t *xchan)
80 {
81 	xdma_controller_t *xdma;
82 	struct xdma_request *xr;
83 	int err;
84 	int i;
85 
86 	xdma = xchan->xdma;
87 
88 	/* Create bus_dma tag */
89 	err = bus_dma_tag_create(
90 	    bus_get_dma_tag(xdma->dev),	/* Parent tag. */
91 	    xchan->alignment,		/* alignment */
92 	    xchan->boundary,		/* boundary */
93 	    xchan->lowaddr,		/* lowaddr */
94 	    xchan->highaddr,		/* highaddr */
95 	    NULL, NULL,			/* filter, filterarg */
96 	    xchan->maxsegsize * xchan->maxnsegs, /* maxsize */
97 	    xchan->maxnsegs,		/* nsegments */
98 	    xchan->maxsegsize,		/* maxsegsize */
99 	    0,				/* flags */
100 	    NULL, NULL,			/* lockfunc, lockarg */
101 	    &xchan->dma_tag_bufs);
102 	if (err != 0) {
103 		device_printf(xdma->dev,
104 		    "%s: Can't create bus_dma tag.\n", __func__);
105 		return (-1);
106 	}
107 
108 	for (i = 0; i < xchan->xr_num; i++) {
109 		xr = &xchan->xr_mem[i];
110 		err = bus_dmamap_create(xchan->dma_tag_bufs, 0,
111 		    &xr->buf.map);
112 		if (err != 0) {
113 			device_printf(xdma->dev,
114 			    "%s: Can't create buf DMA map.\n", __func__);
115 
116 			/* Cleanup. */
117 			bus_dma_tag_destroy(xchan->dma_tag_bufs);
118 
119 			return (-1);
120 		}
121 	}
122 
123 	return (0);
124 }
125 
126 static int
127 xchan_bufs_alloc(xdma_channel_t *xchan)
128 {
129 	xdma_controller_t *xdma;
130 	int ret;
131 
132 	xdma = xchan->xdma;
133 
134 	if (xdma == NULL) {
135 		device_printf(xdma->dev,
136 		    "%s: Channel was not allocated properly.\n", __func__);
137 		return (-1);
138 	}
139 
140 	if (xchan->caps & XCHAN_CAP_BUSDMA)
141 		ret = _xchan_bufs_alloc_busdma(xchan);
142 	else
143 		ret = _xchan_bufs_alloc(xchan);
144 	if (ret != 0) {
145 		device_printf(xdma->dev,
146 		    "%s: Can't allocate bufs.\n", __func__);
147 		return (-1);
148 	}
149 
150 	xchan->flags |= XCHAN_BUFS_ALLOCATED;
151 
152 	return (0);
153 }
154 
155 static int
156 xchan_bufs_free(xdma_channel_t *xchan)
157 {
158 	struct xdma_request *xr;
159 	struct xchan_buf *b;
160 	int i;
161 
162 	if ((xchan->flags & XCHAN_BUFS_ALLOCATED) == 0)
163 		return (-1);
164 
165 	if (xchan->caps & XCHAN_CAP_BUSDMA) {
166 		for (i = 0; i < xchan->xr_num; i++) {
167 			xr = &xchan->xr_mem[i];
168 			b = &xr->buf;
169 			bus_dmamap_destroy(xchan->dma_tag_bufs, b->map);
170 		}
171 		bus_dma_tag_destroy(xchan->dma_tag_bufs);
172 	} else {
173 		for (i = 0; i < xchan->xr_num; i++) {
174 			xr = &xchan->xr_mem[i];
175 			/* TODO: bounce buffer */
176 		}
177 	}
178 
179 	xchan->flags &= ~XCHAN_BUFS_ALLOCATED;
180 
181 	return (0);
182 }
183 
184 void
185 xdma_channel_free_sg(xdma_channel_t *xchan)
186 {
187 
188 	xchan_bufs_free(xchan);
189 	xchan_sglist_free(xchan);
190 	xchan_bank_free(xchan);
191 }
192 
193 /*
194  * Prepare xchan for a scatter-gather transfer.
195  * xr_num - xdma requests queue size,
196  * maxsegsize - maximum allowed scatter-gather list element size in bytes
197  */
198 int
199 xdma_prep_sg(xdma_channel_t *xchan, uint32_t xr_num,
200     bus_size_t maxsegsize, bus_size_t maxnsegs,
201     bus_size_t alignment, bus_addr_t boundary,
202     bus_addr_t lowaddr, bus_addr_t highaddr)
203 {
204 	xdma_controller_t *xdma;
205 	int ret;
206 
207 	xdma = xchan->xdma;
208 
209 	KASSERT(xdma != NULL, ("xdma is NULL"));
210 
211 	if (xchan->flags & XCHAN_CONFIGURED) {
212 		device_printf(xdma->dev,
213 		    "%s: Channel is already configured.\n", __func__);
214 		return (-1);
215 	}
216 
217 	xchan->xr_num = xr_num;
218 	xchan->maxsegsize = maxsegsize;
219 	xchan->maxnsegs = maxnsegs;
220 	xchan->alignment = alignment;
221 	xchan->boundary = boundary;
222 	xchan->lowaddr = lowaddr;
223 	xchan->highaddr = highaddr;
224 
225 	if (xchan->maxnsegs > XDMA_MAX_SEG) {
226 		device_printf(xdma->dev, "%s: maxnsegs is too big\n",
227 		    __func__);
228 		return (-1);
229 	}
230 
231 	xchan_bank_init(xchan);
232 
233 	/* Allocate sglist. */
234 	ret = xchan_sglist_alloc(xchan);
235 	if (ret != 0) {
236 		device_printf(xdma->dev,
237 		    "%s: Can't allocate sglist.\n", __func__);
238 		return (-1);
239 	}
240 
241 	/* Allocate buffers if required. */
242 	if ((xchan->caps & XCHAN_CAP_NOBUFS) == 0) {
243 		ret = xchan_bufs_alloc(xchan);
244 		if (ret != 0) {
245 			device_printf(xdma->dev,
246 			    "%s: Can't allocate bufs.\n", __func__);
247 
248 			/* Cleanup */
249 			xchan_sglist_free(xchan);
250 			xchan_bank_free(xchan);
251 
252 			return (-1);
253 		}
254 	}
255 
256 	xchan->flags |= (XCHAN_CONFIGURED | XCHAN_TYPE_SG);
257 
258 	XCHAN_LOCK(xchan);
259 	ret = XDMA_CHANNEL_PREP_SG(xdma->dma_dev, xchan);
260 	if (ret != 0) {
261 		device_printf(xdma->dev,
262 		    "%s: Can't prepare SG transfer.\n", __func__);
263 		XCHAN_UNLOCK(xchan);
264 
265 		return (-1);
266 	}
267 	XCHAN_UNLOCK(xchan);
268 
269 	return (0);
270 }
271 
272 void
273 xchan_seg_done(xdma_channel_t *xchan,
274     struct xdma_transfer_status *st)
275 {
276 	struct xdma_request *xr;
277 	xdma_controller_t *xdma;
278 	struct xchan_buf *b;
279 
280 	xdma = xchan->xdma;
281 
282 	xr = TAILQ_FIRST(&xchan->processing);
283 	if (xr == NULL)
284 		panic("request not found\n");
285 
286 	b = &xr->buf;
287 
288 	atomic_subtract_int(&b->nsegs_left, 1);
289 
290 	if (b->nsegs_left == 0) {
291 		if (xchan->caps & XCHAN_CAP_BUSDMA) {
292 			if (xr->direction == XDMA_MEM_TO_DEV)
293 				bus_dmamap_sync(xchan->dma_tag_bufs, b->map,
294 				    BUS_DMASYNC_POSTWRITE);
295 			else
296 				bus_dmamap_sync(xchan->dma_tag_bufs, b->map,
297 				    BUS_DMASYNC_POSTREAD);
298 			bus_dmamap_unload(xchan->dma_tag_bufs, b->map);
299 		}
300 		xr->status.error = st->error;
301 		xr->status.transferred = st->transferred;
302 
303 		QUEUE_PROC_LOCK(xchan);
304 		TAILQ_REMOVE(&xchan->processing, xr, xr_next);
305 		QUEUE_PROC_UNLOCK(xchan);
306 
307 		QUEUE_OUT_LOCK(xchan);
308 		TAILQ_INSERT_TAIL(&xchan->queue_out, xr, xr_next);
309 		QUEUE_OUT_UNLOCK(xchan);
310 	}
311 }
312 
313 static void
314 xdma_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
315 {
316 	struct seg_load_request *slr;
317 	struct bus_dma_segment *seg;
318 	int i;
319 
320 	slr = arg;
321 	seg = slr->seg;
322 
323 	if (error != 0) {
324 		slr->error = error;
325 		return;
326 	}
327 
328 	slr->nsegs = nsegs;
329 
330 	for (i = 0; i < nsegs; i++) {
331 		seg[i].ds_addr = segs[i].ds_addr;
332 		seg[i].ds_len = segs[i].ds_len;
333 	}
334 }
335 
336 static int
337 _xdma_load_data_busdma(xdma_channel_t *xchan, struct xdma_request *xr,
338     struct bus_dma_segment *seg)
339 {
340 	xdma_controller_t *xdma;
341 	struct seg_load_request slr;
342 	uint32_t nsegs;
343 	void *addr;
344 	int error;
345 
346 	xdma = xchan->xdma;
347 
348 	error = 0;
349 	nsegs = 0;
350 
351 	switch (xr->req_type) {
352 	case XR_TYPE_MBUF:
353 		error = bus_dmamap_load_mbuf_sg(xchan->dma_tag_bufs,
354 		    xr->buf.map, xr->m, seg, &nsegs, BUS_DMA_NOWAIT);
355 		break;
356 	case XR_TYPE_BIO:
357 		slr.nsegs = 0;
358 		slr.error = 0;
359 		slr.seg = seg;
360 		error = bus_dmamap_load_bio(xchan->dma_tag_bufs,
361 		    xr->buf.map, xr->bp, xdma_dmamap_cb, &slr, BUS_DMA_NOWAIT);
362 		if (slr.error != 0) {
363 			device_printf(xdma->dma_dev,
364 			    "%s: bus_dmamap_load failed, err %d\n",
365 			    __func__, slr.error);
366 			return (0);
367 		}
368 		nsegs = slr.nsegs;
369 		break;
370 	case XR_TYPE_VIRT:
371 		switch (xr->direction) {
372 		case XDMA_MEM_TO_DEV:
373 			addr = (void *)xr->src_addr;
374 			break;
375 		case XDMA_DEV_TO_MEM:
376 			addr = (void *)xr->dst_addr;
377 			break;
378 		default:
379 			device_printf(xdma->dma_dev,
380 			    "%s: Direction is not supported\n", __func__);
381 			return (0);
382 		}
383 		slr.nsegs = 0;
384 		slr.error = 0;
385 		slr.seg = seg;
386 		error = bus_dmamap_load(xchan->dma_tag_bufs, xr->buf.map,
387 		    addr, (xr->block_len * xr->block_num),
388 		    xdma_dmamap_cb, &slr, BUS_DMA_NOWAIT);
389 		if (slr.error != 0) {
390 			device_printf(xdma->dma_dev,
391 			    "%s: bus_dmamap_load failed, err %d\n",
392 			    __func__, slr.error);
393 			return (0);
394 		}
395 		nsegs = slr.nsegs;
396 		break;
397 	default:
398 		break;
399 	}
400 
401 	if (error != 0) {
402 		if (error == ENOMEM) {
403 			/*
404 			 * Out of memory. Try again later.
405 			 * TODO: count errors.
406 			 */
407 		} else
408 			device_printf(xdma->dma_dev,
409 			    "%s: bus_dmamap_load failed with err %d\n",
410 			    __func__, error);
411 		return (0);
412 	}
413 
414 	if (xr->direction == XDMA_MEM_TO_DEV)
415 		bus_dmamap_sync(xchan->dma_tag_bufs, xr->buf.map,
416 		    BUS_DMASYNC_PREWRITE);
417 	else
418 		bus_dmamap_sync(xchan->dma_tag_bufs, xr->buf.map,
419 		    BUS_DMASYNC_PREREAD);
420 
421 	return (nsegs);
422 }
423 
424 static int
425 _xdma_load_data(xdma_channel_t *xchan, struct xdma_request *xr,
426     struct bus_dma_segment *seg)
427 {
428 	xdma_controller_t *xdma;
429 	struct mbuf *m;
430 	uint32_t nsegs;
431 
432 	xdma = xchan->xdma;
433 
434 	m = xr->m;
435 
436 	nsegs = 1;
437 
438 	switch (xr->req_type) {
439 	case XR_TYPE_MBUF:
440 		seg[0].ds_addr = mtod(m, bus_addr_t);
441 		seg[0].ds_len = m->m_pkthdr.len;
442 		break;
443 	case XR_TYPE_BIO:
444 	case XR_TYPE_VIRT:
445 	default:
446 		panic("implement me\n");
447 	}
448 
449 	return (nsegs);
450 }
451 
452 static int
453 xdma_load_data(xdma_channel_t *xchan,
454     struct xdma_request *xr, struct bus_dma_segment *seg)
455 {
456 	xdma_controller_t *xdma;
457 	int error;
458 	int nsegs;
459 
460 	xdma = xchan->xdma;
461 
462 	error = 0;
463 	nsegs = 0;
464 
465 	if (xchan->caps & XCHAN_CAP_BUSDMA)
466 		nsegs = _xdma_load_data_busdma(xchan, xr, seg);
467 	else
468 		nsegs = _xdma_load_data(xchan, xr, seg);
469 	if (nsegs == 0)
470 		return (0); /* Try again later. */
471 
472 	xr->buf.nsegs = nsegs;
473 	xr->buf.nsegs_left = nsegs;
474 
475 	return (nsegs);
476 }
477 
478 static int
479 xdma_process(xdma_channel_t *xchan,
480     struct xdma_sglist *sg)
481 {
482 	struct bus_dma_segment seg[XDMA_MAX_SEG];
483 	struct xdma_request *xr;
484 	struct xdma_request *xr_tmp;
485 	xdma_controller_t *xdma;
486 	uint32_t capacity;
487 	uint32_t n;
488 	uint32_t c;
489 	int nsegs;
490 	int ret;
491 
492 	XCHAN_ASSERT_LOCKED(xchan);
493 
494 	xdma = xchan->xdma;
495 
496 	n = 0;
497 
498 	ret = XDMA_CHANNEL_CAPACITY(xdma->dma_dev, xchan, &capacity);
499 	if (ret != 0) {
500 		device_printf(xdma->dev,
501 		    "%s: Can't get DMA controller capacity.\n", __func__);
502 		return (-1);
503 	}
504 
505 	TAILQ_FOREACH_SAFE(xr, &xchan->queue_in, xr_next, xr_tmp) {
506 		switch (xr->req_type) {
507 		case XR_TYPE_MBUF:
508 			if ((xchan->caps & XCHAN_CAP_NOSEG) ||
509 			    (c > xchan->maxnsegs))
510 				c = xdma_mbuf_defrag(xchan, xr);
511 			break;
512 		case XR_TYPE_BIO:
513 		case XR_TYPE_VIRT:
514 		default:
515 			c = 1;
516 		}
517 
518 		if (capacity <= (c + n)) {
519 			/*
520 			 * No space yet available for the entire
521 			 * request in the DMA engine.
522 			 */
523 			break;
524 		}
525 
526 		if ((c + n + xchan->maxnsegs) >= XDMA_SGLIST_MAXLEN) {
527 			/* Sglist is full. */
528 			break;
529 		}
530 
531 		nsegs = xdma_load_data(xchan, xr, seg);
532 		if (nsegs == 0)
533 			break;
534 
535 		xdma_sglist_add(&sg[n], seg, nsegs, xr);
536 		n += nsegs;
537 
538 		QUEUE_IN_LOCK(xchan);
539 		TAILQ_REMOVE(&xchan->queue_in, xr, xr_next);
540 		QUEUE_IN_UNLOCK(xchan);
541 
542 		QUEUE_PROC_LOCK(xchan);
543 		TAILQ_INSERT_TAIL(&xchan->processing, xr, xr_next);
544 		QUEUE_PROC_UNLOCK(xchan);
545 	}
546 
547 	return (n);
548 }
549 
550 int
551 xdma_queue_submit_sg(xdma_channel_t *xchan)
552 {
553 	struct xdma_sglist *sg;
554 	xdma_controller_t *xdma;
555 	uint32_t sg_n;
556 	int ret;
557 
558 	xdma = xchan->xdma;
559 	KASSERT(xdma != NULL, ("xdma is NULL"));
560 
561 	XCHAN_ASSERT_LOCKED(xchan);
562 
563 	sg = xchan->sg;
564 
565 	if ((xchan->caps & XCHAN_CAP_NOBUFS) == 0 &&
566 	   (xchan->flags & XCHAN_BUFS_ALLOCATED) == 0) {
567 		device_printf(xdma->dev,
568 		    "%s: Can't submit a transfer: no bufs\n",
569 		    __func__);
570 		return (-1);
571 	}
572 
573 	sg_n = xdma_process(xchan, sg);
574 	if (sg_n == 0)
575 		return (0); /* Nothing to submit */
576 
577 	/* Now submit sglist to DMA engine driver. */
578 	ret = XDMA_CHANNEL_SUBMIT_SG(xdma->dma_dev, xchan, sg, sg_n);
579 	if (ret != 0) {
580 		device_printf(xdma->dev,
581 		    "%s: Can't submit an sglist.\n", __func__);
582 		return (-1);
583 	}
584 
585 	return (0);
586 }
587