xref: /freebsd/sys/dev/altera/msgdma/msgdma.c (revision af23369a6deaaeb612ab266eb88b8bb8d560c322)
1 /*-
2  * Copyright (c) 2016-2018 Ruslan Bukin <br@bsdpad.com>
3  * All rights reserved.
4  *
5  * This software was developed by SRI International and the University of
6  * Cambridge Computer Laboratory under DARPA/AFRL contract FA8750-10-C-0237
7  * ("CTSRD"), as part of the DARPA CRASH research programme.
8  *
9  * Redistribution and use in source and binary forms, with or without
10  * modification, are permitted provided that the following conditions
11  * are met:
12  * 1. Redistributions of source code must retain the above copyright
13  *    notice, this list of conditions and the following disclaimer.
14  * 2. Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in the
16  *    documentation and/or other materials provided with the distribution.
17  *
18  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
19  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
22  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
23  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
24  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
25  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
26  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28  * SUCH DAMAGE.
29  */
30 
31 /* Altera mSGDMA driver. */
32 
33 #include <sys/cdefs.h>
34 __FBSDID("$FreeBSD$");
35 
36 #include "opt_platform.h"
37 #include <sys/param.h>
38 #include <sys/endian.h>
39 #include <sys/systm.h>
40 #include <sys/conf.h>
41 #include <sys/bus.h>
42 #include <sys/kernel.h>
43 #include <sys/kthread.h>
44 #include <sys/sglist.h>
45 #include <sys/module.h>
46 #include <sys/lock.h>
47 #include <sys/mutex.h>
48 #include <sys/resource.h>
49 #include <sys/rman.h>
50 
51 #include <machine/bus.h>
52 #include <machine/fdt.h>
53 #include <machine/cache.h>
54 
55 #ifdef FDT
56 #include <dev/fdt/fdt_common.h>
57 #include <dev/ofw/ofw_bus.h>
58 #include <dev/ofw/ofw_bus_subr.h>
59 #endif
60 
61 #include <dev/xdma/xdma.h>
62 #include "xdma_if.h"
63 #include "opt_altera_msgdma.h"
64 
65 #include <dev/altera/msgdma/msgdma.h>
66 
67 #define MSGDMA_DEBUG
68 #undef MSGDMA_DEBUG
69 
70 #ifdef MSGDMA_DEBUG
71 #define dprintf(fmt, ...)  printf(fmt, ##__VA_ARGS__)
72 #else
73 #define dprintf(fmt, ...)
74 #endif
75 
76 #define	MSGDMA_NCHANNELS	1
77 
78 struct msgdma_channel {
79 	struct msgdma_softc	*sc;
80 	struct mtx		mtx;
81 	xdma_channel_t		*xchan;
82 	struct proc		*p;
83 	int			used;
84 	int			index;
85 	int			idx_head;
86 	int			idx_tail;
87 
88 	struct msgdma_desc	**descs;
89 	bus_dma_segment_t	*descs_phys;
90 	uint32_t		descs_num;
91 	bus_dma_tag_t		dma_tag;
92 	bus_dmamap_t		*dma_map;
93 	uint32_t		map_descr;
94 	uint8_t			map_err;
95 	uint32_t		descs_used_count;
96 };
97 
98 struct msgdma_softc {
99 	device_t		dev;
100 	struct resource		*res[3];
101 	bus_space_tag_t		bst;
102 	bus_space_handle_t	bsh;
103 	bus_space_tag_t		bst_d;
104 	bus_space_handle_t	bsh_d;
105 	void			*ih;
106 	struct msgdma_desc	desc;
107 	struct msgdma_channel	channels[MSGDMA_NCHANNELS];
108 };
109 
110 static struct resource_spec msgdma_spec[] = {
111 	{ SYS_RES_MEMORY,	0,	RF_ACTIVE },
112 	{ SYS_RES_MEMORY,	1,	RF_ACTIVE },
113 	{ SYS_RES_IRQ,		0,	RF_ACTIVE },
114 	{ -1, 0 }
115 };
116 
117 #define	HWTYPE_NONE	0
118 #define	HWTYPE_STD	1
119 
120 static struct ofw_compat_data compat_data[] = {
121 	{ "altr,msgdma-16.0",	HWTYPE_STD },
122 	{ "altr,msgdma-1.0",	HWTYPE_STD },
123 	{ NULL,			HWTYPE_NONE },
124 };
125 
126 static int msgdma_probe(device_t dev);
127 static int msgdma_attach(device_t dev);
128 static int msgdma_detach(device_t dev);
129 
130 static inline uint32_t
131 msgdma_next_desc(struct msgdma_channel *chan, uint32_t curidx)
132 {
133 
134 	return ((curidx + 1) % chan->descs_num);
135 }
136 
137 static void
138 msgdma_intr(void *arg)
139 {
140 	xdma_transfer_status_t status;
141 	struct xdma_transfer_status st;
142 	struct msgdma_desc *desc;
143 	struct msgdma_channel *chan;
144 	struct xdma_channel *xchan;
145 	struct msgdma_softc *sc;
146 	uint32_t tot_copied;
147 
148 	sc = arg;
149 	chan = &sc->channels[0];
150 	xchan = chan->xchan;
151 
152 	dprintf("%s(%d): status 0x%08x next_descr 0x%08x, control 0x%08x\n",
153 	    __func__, device_get_unit(sc->dev),
154 		READ4_DESC(sc, PF_STATUS),
155 		READ4_DESC(sc, PF_NEXT_LO),
156 		READ4_DESC(sc, PF_CONTROL));
157 
158 	tot_copied = 0;
159 
160 	while (chan->idx_tail != chan->idx_head) {
161 		dprintf("%s: idx_tail %d idx_head %d\n", __func__,
162 		    chan->idx_tail, chan->idx_head);
163 		bus_dmamap_sync(chan->dma_tag, chan->dma_map[chan->idx_tail],
164 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
165 
166 		desc = chan->descs[chan->idx_tail];
167 		if ((le32toh(desc->control) & CONTROL_OWN) != 0) {
168 			break;
169 		}
170 
171 		tot_copied += le32toh(desc->transferred);
172 		st.error = 0;
173 		st.transferred = le32toh(desc->transferred);
174 		xchan_seg_done(xchan, &st);
175 
176 		chan->idx_tail = msgdma_next_desc(chan, chan->idx_tail);
177 		atomic_subtract_int(&chan->descs_used_count, 1);
178 	}
179 
180 	WRITE4_DESC(sc, PF_STATUS, PF_STATUS_IRQ);
181 
182 	/* Finish operation */
183 	status.error = 0;
184 	status.transferred = tot_copied;
185 	xdma_callback(chan->xchan, &status);
186 }
187 
188 static int
189 msgdma_reset(struct msgdma_softc *sc)
190 {
191 	int timeout;
192 
193 	dprintf("%s: read status: %x\n", __func__, READ4(sc, 0x00));
194 	dprintf("%s: read control: %x\n", __func__, READ4(sc, 0x04));
195 	dprintf("%s: read 1: %x\n", __func__, READ4(sc, 0x08));
196 	dprintf("%s: read 2: %x\n", __func__, READ4(sc, 0x0C));
197 
198 	WRITE4(sc, DMA_CONTROL, CONTROL_RESET);
199 
200 	timeout = 100;
201 	do {
202 		if ((READ4(sc, DMA_STATUS) & STATUS_RESETTING) == 0)
203 			break;
204 	} while (timeout--);
205 
206 	dprintf("timeout %d\n", timeout);
207 
208 	if (timeout == 0)
209 		return (-1);
210 
211 	dprintf("%s: read control after reset: %x\n",
212 	    __func__, READ4(sc, DMA_CONTROL));
213 
214 	return (0);
215 }
216 
217 static int
218 msgdma_probe(device_t dev)
219 {
220 	int hwtype;
221 
222 	if (!ofw_bus_status_okay(dev))
223 		return (ENXIO);
224 
225 	hwtype = ofw_bus_search_compatible(dev, compat_data)->ocd_data;
226 	if (hwtype == HWTYPE_NONE)
227 		return (ENXIO);
228 
229 	device_set_desc(dev, "Altera mSGDMA");
230 
231 	return (BUS_PROBE_DEFAULT);
232 }
233 
234 static int
235 msgdma_attach(device_t dev)
236 {
237 	struct msgdma_softc *sc;
238 	phandle_t xref, node;
239 	int err;
240 
241 	sc = device_get_softc(dev);
242 	sc->dev = dev;
243 
244 	if (bus_alloc_resources(dev, msgdma_spec, sc->res)) {
245 		device_printf(dev, "could not allocate resources for device\n");
246 		return (ENXIO);
247 	}
248 
249 	/* CSR memory interface */
250 	sc->bst = rman_get_bustag(sc->res[0]);
251 	sc->bsh = rman_get_bushandle(sc->res[0]);
252 
253 	/* Descriptor memory interface */
254 	sc->bst_d = rman_get_bustag(sc->res[1]);
255 	sc->bsh_d = rman_get_bushandle(sc->res[1]);
256 
257 	/* Setup interrupt handler */
258 	err = bus_setup_intr(dev, sc->res[2], INTR_TYPE_MISC | INTR_MPSAFE,
259 	    NULL, msgdma_intr, sc, &sc->ih);
260 	if (err) {
261 		device_printf(dev, "Unable to alloc interrupt resource.\n");
262 		return (ENXIO);
263 	}
264 
265 	node = ofw_bus_get_node(dev);
266 	xref = OF_xref_from_node(node);
267 	OF_device_register_xref(xref, dev);
268 
269 	if (msgdma_reset(sc) != 0)
270 		return (-1);
271 
272 	WRITE4(sc, DMA_CONTROL, CONTROL_GIEM);
273 
274 	return (0);
275 }
276 
277 static int
278 msgdma_detach(device_t dev)
279 {
280 	struct msgdma_softc *sc;
281 
282 	sc = device_get_softc(dev);
283 
284 	return (0);
285 }
286 
287 static void
288 msgdma_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nseg, int err)
289 {
290 	struct msgdma_channel *chan;
291 
292 	chan = (struct msgdma_channel *)arg;
293 	KASSERT(chan != NULL, ("xchan is NULL"));
294 
295 	if (err) {
296 		chan->map_err = 1;
297 		return;
298 	}
299 
300 	chan->descs_phys[chan->map_descr].ds_addr = segs[0].ds_addr;
301 	chan->descs_phys[chan->map_descr].ds_len = segs[0].ds_len;
302 
303 	dprintf("map desc %d: descs phys %lx len %ld\n",
304 	    chan->map_descr, segs[0].ds_addr, segs[0].ds_len);
305 }
306 
307 static int
308 msgdma_desc_free(struct msgdma_softc *sc, struct msgdma_channel *chan)
309 {
310 	struct msgdma_desc *desc;
311 	int nsegments;
312 	int i;
313 
314 	nsegments = chan->descs_num;
315 
316 	for (i = 0; i < nsegments; i++) {
317 		desc = chan->descs[i];
318 		bus_dmamap_unload(chan->dma_tag, chan->dma_map[i]);
319 		bus_dmamem_free(chan->dma_tag, desc, chan->dma_map[i]);
320 	}
321 
322 	bus_dma_tag_destroy(chan->dma_tag);
323 	free(chan->descs, M_DEVBUF);
324 	free(chan->dma_map, M_DEVBUF);
325 	free(chan->descs_phys, M_DEVBUF);
326 
327 	return (0);
328 }
329 
330 static int
331 msgdma_desc_alloc(struct msgdma_softc *sc, struct msgdma_channel *chan,
332     uint32_t desc_size, uint32_t align)
333 {
334 	int nsegments;
335 	int err;
336 	int i;
337 
338 	nsegments = chan->descs_num;
339 
340 	dprintf("%s: nseg %d\n", __func__, nsegments);
341 
342 	err = bus_dma_tag_create(
343 	    bus_get_dma_tag(sc->dev),
344 	    align, 0,			/* alignment, boundary */
345 	    BUS_SPACE_MAXADDR_32BIT,	/* lowaddr */
346 	    BUS_SPACE_MAXADDR,		/* highaddr */
347 	    NULL, NULL,			/* filter, filterarg */
348 	    desc_size, 1,		/* maxsize, nsegments*/
349 	    desc_size, 0,		/* maxsegsize, flags */
350 	    NULL, NULL,			/* lockfunc, lockarg */
351 	    &chan->dma_tag);
352 	if (err) {
353 		device_printf(sc->dev,
354 		    "%s: Can't create bus_dma tag.\n", __func__);
355 		return (-1);
356 	}
357 
358 	/* Descriptors. */
359 	chan->descs = malloc(nsegments * sizeof(struct msgdma_desc *),
360 	    M_DEVBUF, (M_WAITOK | M_ZERO));
361 	if (chan->descs == NULL) {
362 		device_printf(sc->dev,
363 		    "%s: Can't allocate memory.\n", __func__);
364 		return (-1);
365 	}
366 	chan->dma_map = malloc(nsegments * sizeof(bus_dmamap_t),
367 	    M_DEVBUF, (M_WAITOK | M_ZERO));
368 	chan->descs_phys = malloc(nsegments * sizeof(bus_dma_segment_t),
369 	    M_DEVBUF, (M_WAITOK | M_ZERO));
370 
371 	/* Allocate bus_dma memory for each descriptor. */
372 	for (i = 0; i < nsegments; i++) {
373 		err = bus_dmamem_alloc(chan->dma_tag, (void **)&chan->descs[i],
374 		    BUS_DMA_WAITOK | BUS_DMA_ZERO, &chan->dma_map[i]);
375 		if (err) {
376 			device_printf(sc->dev,
377 			    "%s: Can't allocate memory for descriptors.\n",
378 			    __func__);
379 			return (-1);
380 		}
381 
382 		chan->map_err = 0;
383 		chan->map_descr = i;
384 		err = bus_dmamap_load(chan->dma_tag, chan->dma_map[i], chan->descs[i],
385 		    desc_size, msgdma_dmamap_cb, chan, BUS_DMA_WAITOK);
386 		if (err) {
387 			device_printf(sc->dev,
388 			    "%s: Can't load DMA map.\n", __func__);
389 			return (-1);
390 		}
391 
392 		if (chan->map_err != 0) {
393 			device_printf(sc->dev,
394 			    "%s: Can't load DMA map.\n", __func__);
395 			return (-1);
396 		}
397 	}
398 
399 	return (0);
400 }
401 
402 static int
403 msgdma_channel_alloc(device_t dev, struct xdma_channel *xchan)
404 {
405 	struct msgdma_channel *chan;
406 	struct msgdma_softc *sc;
407 	int i;
408 
409 	sc = device_get_softc(dev);
410 
411 	for (i = 0; i < MSGDMA_NCHANNELS; i++) {
412 		chan = &sc->channels[i];
413 		if (chan->used == 0) {
414 			chan->xchan = xchan;
415 			xchan->chan = (void *)chan;
416 			if ((xchan->caps & XCHAN_CAP_IOMMU) == 0)
417 				xchan->caps |= XCHAN_CAP_BUSDMA;
418 			chan->index = i;
419 			chan->sc = sc;
420 			chan->used = 1;
421 			chan->idx_head = 0;
422 			chan->idx_tail = 0;
423 			chan->descs_used_count = 0;
424 			chan->descs_num = 1024;
425 
426 			return (0);
427 		}
428 	}
429 
430 	return (-1);
431 }
432 
433 static int
434 msgdma_channel_free(device_t dev, struct xdma_channel *xchan)
435 {
436 	struct msgdma_channel *chan;
437 	struct msgdma_softc *sc;
438 
439 	sc = device_get_softc(dev);
440 
441 	chan = (struct msgdma_channel *)xchan->chan;
442 
443 	msgdma_desc_free(sc, chan);
444 
445 	chan->used = 0;
446 
447 	return (0);
448 }
449 
450 static int
451 msgdma_channel_capacity(device_t dev, xdma_channel_t *xchan,
452     uint32_t *capacity)
453 {
454 	struct msgdma_channel *chan;
455 	uint32_t c;
456 
457 	chan = (struct msgdma_channel *)xchan->chan;
458 
459 	/* At least one descriptor must be left empty. */
460 	c = (chan->descs_num - chan->descs_used_count - 1);
461 
462 	*capacity = c;
463 
464 	return (0);
465 }
466 
467 static int
468 msgdma_channel_submit_sg(device_t dev, struct xdma_channel *xchan,
469     struct xdma_sglist *sg, uint32_t sg_n)
470 {
471 	struct msgdma_channel *chan;
472 	struct msgdma_desc *desc;
473 	struct msgdma_softc *sc;
474 	bus_addr_t src_addr_lo;
475 	bus_addr_t dst_addr_lo;
476 	uint32_t len;
477 	uint32_t tmp;
478 	int i;
479 
480 	sc = device_get_softc(dev);
481 
482 	chan = (struct msgdma_channel *)xchan->chan;
483 
484 	for (i = 0; i < sg_n; i++) {
485 		src_addr_lo = sg[i].src_addr;
486 		dst_addr_lo = sg[i].dst_addr;
487 		len = (uint32_t)sg[i].len;
488 
489 		dprintf("%s: src %x dst %x len %d\n", __func__,
490 		    src_addr_lo, dst_addr_lo, len);
491 
492 		desc = chan->descs[chan->idx_head];
493 #if defined(ALTERA_MSGDMA_DESC_EXT) || defined(ALTERA_MSGDMA_DESC_PF_EXT)
494 		desc->read_hi = htole32(src_addr_lo >> 32);
495 		desc->write_hi = htole32(dst_addr_lo >> 32);
496 #endif
497 		desc->read_lo = htole32(src_addr_lo);
498 		desc->write_lo = htole32(dst_addr_lo);
499 		desc->length = htole32(len);
500 		desc->transferred = 0;
501 		desc->status = 0;
502 		desc->reserved = 0;
503 		desc->control = 0;
504 
505 		if (sg[i].direction == XDMA_MEM_TO_DEV) {
506 			if (sg[i].first == 1) {
507 				desc->control |= htole32(CONTROL_GEN_SOP);
508 			}
509 
510 			if (sg[i].last == 1) {
511 				desc->control |= htole32(CONTROL_GEN_EOP);
512 				desc->control |= htole32(CONTROL_TC_IRQ_EN |
513 				    CONTROL_ET_IRQ_EN | CONTROL_ERR_M);
514 			}
515 		} else {
516 			desc->control |= htole32(CONTROL_END_ON_EOP | (1 << 13));
517 			desc->control |= htole32(CONTROL_TC_IRQ_EN |
518 			    CONTROL_ET_IRQ_EN | CONTROL_ERR_M);
519 		}
520 
521 		tmp = chan->idx_head;
522 
523 		atomic_add_int(&chan->descs_used_count, 1);
524 		chan->idx_head = msgdma_next_desc(chan, chan->idx_head);
525 
526 		desc->control |= htole32(CONTROL_OWN | CONTROL_GO);
527 
528 		bus_dmamap_sync(chan->dma_tag, chan->dma_map[tmp],
529 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
530 	}
531 
532 	return (0);
533 }
534 
535 static int
536 msgdma_channel_prep_sg(device_t dev, struct xdma_channel *xchan)
537 {
538 	struct msgdma_channel *chan;
539 	struct msgdma_desc *desc;
540 	struct msgdma_softc *sc;
541 	uint32_t addr;
542 	uint32_t reg;
543 	int ret;
544 	int i;
545 
546 	sc = device_get_softc(dev);
547 
548 	dprintf("%s(%d)\n", __func__, device_get_unit(dev));
549 
550 	chan = (struct msgdma_channel *)xchan->chan;
551 
552 	ret = msgdma_desc_alloc(sc, chan, sizeof(struct msgdma_desc), 16);
553 	if (ret != 0) {
554 		device_printf(sc->dev,
555 		    "%s: Can't allocate descriptors.\n", __func__);
556 		return (-1);
557 	}
558 
559 	for (i = 0; i < chan->descs_num; i++) {
560 		desc = chan->descs[i];
561 
562 		if (i == (chan->descs_num - 1)) {
563 			desc->next = htole32(chan->descs_phys[0].ds_addr);
564 		} else {
565 			desc->next = htole32(chan->descs_phys[i+1].ds_addr);
566 		}
567 
568 		dprintf("%s(%d): desc %d vaddr %lx next paddr %x\n", __func__,
569 		    device_get_unit(dev), i, (uint64_t)desc, le32toh(desc->next));
570 	}
571 
572 	addr = chan->descs_phys[0].ds_addr;
573 	WRITE4_DESC(sc, PF_NEXT_LO, addr);
574 	WRITE4_DESC(sc, PF_NEXT_HI, 0);
575 	WRITE4_DESC(sc, PF_POLL_FREQ, 1000);
576 
577 	reg = (PF_CONTROL_GIEM | PF_CONTROL_DESC_POLL_EN);
578 	reg |= PF_CONTROL_RUN;
579 	WRITE4_DESC(sc, PF_CONTROL, reg);
580 
581 	return (0);
582 }
583 
584 static int
585 msgdma_channel_control(device_t dev, xdma_channel_t *xchan, int cmd)
586 {
587 	struct msgdma_channel *chan;
588 	struct msgdma_softc *sc;
589 
590 	sc = device_get_softc(dev);
591 
592 	chan = (struct msgdma_channel *)xchan->chan;
593 
594 	switch (cmd) {
595 	case XDMA_CMD_BEGIN:
596 	case XDMA_CMD_TERMINATE:
597 	case XDMA_CMD_PAUSE:
598 		/* TODO: implement me */
599 		return (-1);
600 	}
601 
602 	return (0);
603 }
604 
605 #ifdef FDT
606 static int
607 msgdma_ofw_md_data(device_t dev, pcell_t *cells, int ncells, void **ptr)
608 {
609 
610 	return (0);
611 }
612 #endif
613 
614 static device_method_t msgdma_methods[] = {
615 	/* Device interface */
616 	DEVMETHOD(device_probe,			msgdma_probe),
617 	DEVMETHOD(device_attach,		msgdma_attach),
618 	DEVMETHOD(device_detach,		msgdma_detach),
619 
620 	/* xDMA Interface */
621 	DEVMETHOD(xdma_channel_alloc,		msgdma_channel_alloc),
622 	DEVMETHOD(xdma_channel_free,		msgdma_channel_free),
623 	DEVMETHOD(xdma_channel_control,		msgdma_channel_control),
624 
625 	/* xDMA SG Interface */
626 	DEVMETHOD(xdma_channel_capacity,	msgdma_channel_capacity),
627 	DEVMETHOD(xdma_channel_prep_sg,		msgdma_channel_prep_sg),
628 	DEVMETHOD(xdma_channel_submit_sg,	msgdma_channel_submit_sg),
629 
630 #ifdef FDT
631 	DEVMETHOD(xdma_ofw_md_data,		msgdma_ofw_md_data),
632 #endif
633 
634 	DEVMETHOD_END
635 };
636 
637 static driver_t msgdma_driver = {
638 	"msgdma",
639 	msgdma_methods,
640 	sizeof(struct msgdma_softc),
641 };
642 
643 EARLY_DRIVER_MODULE(msgdma, simplebus, msgdma_driver, 0, 0,
644     BUS_PASS_INTERRUPT + BUS_PASS_ORDER_LATE);
645