xref: /freebsd/sys/dev/altera/msgdma/msgdma.c (revision b0d29bc47dba79f6f38e67eabadfb4b32ffd9390)
1 /*-
2  * Copyright (c) 2016-2018 Ruslan Bukin <br@bsdpad.com>
3  * All rights reserved.
4  *
5  * This software was developed by SRI International and the University of
6  * Cambridge Computer Laboratory under DARPA/AFRL contract FA8750-10-C-0237
7  * ("CTSRD"), as part of the DARPA CRASH research programme.
8  *
9  * Redistribution and use in source and binary forms, with or without
10  * modification, are permitted provided that the following conditions
11  * are met:
12  * 1. Redistributions of source code must retain the above copyright
13  *    notice, this list of conditions and the following disclaimer.
14  * 2. Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in the
16  *    documentation and/or other materials provided with the distribution.
17  *
18  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
19  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
22  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
23  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
24  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
25  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
26  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28  * SUCH DAMAGE.
29  */
30 
31 /* Altera mSGDMA driver. */
32 
33 #include <sys/cdefs.h>
34 __FBSDID("$FreeBSD$");
35 
36 #include "opt_platform.h"
37 #include <sys/param.h>
38 #include <sys/endian.h>
39 #include <sys/systm.h>
40 #include <sys/conf.h>
41 #include <sys/bus.h>
42 #include <sys/kernel.h>
43 #include <sys/kthread.h>
44 #include <sys/sglist.h>
45 #include <sys/module.h>
46 #include <sys/lock.h>
47 #include <sys/mutex.h>
48 #include <sys/resource.h>
49 #include <sys/rman.h>
50 
51 #include <machine/bus.h>
52 #include <machine/fdt.h>
53 #include <machine/cache.h>
54 
55 #ifdef FDT
56 #include <dev/fdt/fdt_common.h>
57 #include <dev/ofw/ofw_bus.h>
58 #include <dev/ofw/ofw_bus_subr.h>
59 #endif
60 
61 #include <dev/xdma/xdma.h>
62 #include "xdma_if.h"
63 #include "opt_altera_msgdma.h"
64 
65 #include <dev/altera/msgdma/msgdma.h>
66 
67 #define MSGDMA_DEBUG
68 #undef MSGDMA_DEBUG
69 
70 #ifdef MSGDMA_DEBUG
71 #define dprintf(fmt, ...)  printf(fmt, ##__VA_ARGS__)
72 #else
73 #define dprintf(fmt, ...)
74 #endif
75 
76 #define	MSGDMA_NCHANNELS	1
77 
78 struct msgdma_channel {
79 	struct msgdma_softc	*sc;
80 	struct mtx		mtx;
81 	xdma_channel_t		*xchan;
82 	struct proc		*p;
83 	int			used;
84 	int			index;
85 	int			idx_head;
86 	int			idx_tail;
87 
88 	struct msgdma_desc	**descs;
89 	bus_dma_segment_t	*descs_phys;
90 	uint32_t		descs_num;
91 	bus_dma_tag_t		dma_tag;
92 	bus_dmamap_t		*dma_map;
93 	uint32_t		map_descr;
94 	uint8_t			map_err;
95 	uint32_t		descs_used_count;
96 };
97 
98 struct msgdma_softc {
99 	device_t		dev;
100 	struct resource		*res[3];
101 	bus_space_tag_t		bst;
102 	bus_space_handle_t	bsh;
103 	bus_space_tag_t		bst_d;
104 	bus_space_handle_t	bsh_d;
105 	void			*ih;
106 	struct msgdma_desc	desc;
107 	struct msgdma_channel	channels[MSGDMA_NCHANNELS];
108 };
109 
110 static struct resource_spec msgdma_spec[] = {
111 	{ SYS_RES_MEMORY,	0,	RF_ACTIVE },
112 	{ SYS_RES_MEMORY,	1,	RF_ACTIVE },
113 	{ SYS_RES_IRQ,		0,	RF_ACTIVE },
114 	{ -1, 0 }
115 };
116 
117 #define	HWTYPE_NONE	0
118 #define	HWTYPE_STD	1
119 
120 static struct ofw_compat_data compat_data[] = {
121 	{ "altr,msgdma-16.0",	HWTYPE_STD },
122 	{ "altr,msgdma-1.0",	HWTYPE_STD },
123 	{ NULL,			HWTYPE_NONE },
124 };
125 
126 static int msgdma_probe(device_t dev);
127 static int msgdma_attach(device_t dev);
128 static int msgdma_detach(device_t dev);
129 
130 static inline uint32_t
131 msgdma_next_desc(struct msgdma_channel *chan, uint32_t curidx)
132 {
133 
134 	return ((curidx + 1) % chan->descs_num);
135 }
136 
137 static void
138 msgdma_intr(void *arg)
139 {
140 	xdma_transfer_status_t status;
141 	struct xdma_transfer_status st;
142 	struct msgdma_desc *desc;
143 	struct msgdma_channel *chan;
144 	struct xdma_channel *xchan;
145 	struct msgdma_softc *sc;
146 	uint32_t tot_copied;
147 
148 	sc = arg;
149 	chan = &sc->channels[0];
150 	xchan = chan->xchan;
151 
152 	dprintf("%s(%d): status 0x%08x next_descr 0x%08x, control 0x%08x\n",
153 	    __func__, device_get_unit(sc->dev),
154 		READ4_DESC(sc, PF_STATUS),
155 		READ4_DESC(sc, PF_NEXT_LO),
156 		READ4_DESC(sc, PF_CONTROL));
157 
158 	tot_copied = 0;
159 
160 	while (chan->idx_tail != chan->idx_head) {
161 		dprintf("%s: idx_tail %d idx_head %d\n", __func__,
162 		    chan->idx_tail, chan->idx_head);
163 		bus_dmamap_sync(chan->dma_tag, chan->dma_map[chan->idx_tail],
164 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
165 
166 		desc = chan->descs[chan->idx_tail];
167 		if ((le32toh(desc->control) & CONTROL_OWN) != 0) {
168 			break;
169 		}
170 
171 		tot_copied += le32toh(desc->transferred);
172 		st.error = 0;
173 		st.transferred = le32toh(desc->transferred);
174 		xchan_seg_done(xchan, &st);
175 
176 		chan->idx_tail = msgdma_next_desc(chan, chan->idx_tail);
177 		atomic_subtract_int(&chan->descs_used_count, 1);
178 	}
179 
180 	WRITE4_DESC(sc, PF_STATUS, PF_STATUS_IRQ);
181 
182 	/* Finish operation */
183 	status.error = 0;
184 	status.transferred = tot_copied;
185 	xdma_callback(chan->xchan, &status);
186 }
187 
188 static int
189 msgdma_reset(struct msgdma_softc *sc)
190 {
191 	int timeout;
192 
193 	dprintf("%s: read status: %x\n", __func__, READ4(sc, 0x00));
194 	dprintf("%s: read control: %x\n", __func__, READ4(sc, 0x04));
195 	dprintf("%s: read 1: %x\n", __func__, READ4(sc, 0x08));
196 	dprintf("%s: read 2: %x\n", __func__, READ4(sc, 0x0C));
197 
198 	WRITE4(sc, DMA_CONTROL, CONTROL_RESET);
199 
200 	timeout = 100;
201 	do {
202 		if ((READ4(sc, DMA_STATUS) & STATUS_RESETTING) == 0)
203 			break;
204 	} while (timeout--);
205 
206 	dprintf("timeout %d\n", timeout);
207 
208 	if (timeout == 0)
209 		return (-1);
210 
211 	dprintf("%s: read control after reset: %x\n",
212 	    __func__, READ4(sc, DMA_CONTROL));
213 
214 	return (0);
215 }
216 
217 static int
218 msgdma_probe(device_t dev)
219 {
220 	int hwtype;
221 
222 	if (!ofw_bus_status_okay(dev))
223 		return (ENXIO);
224 
225 	hwtype = ofw_bus_search_compatible(dev, compat_data)->ocd_data;
226 	if (hwtype == HWTYPE_NONE)
227 		return (ENXIO);
228 
229 	device_set_desc(dev, "Altera mSGDMA");
230 
231 	return (BUS_PROBE_DEFAULT);
232 }
233 
234 static int
235 msgdma_attach(device_t dev)
236 {
237 	struct msgdma_softc *sc;
238 	phandle_t xref, node;
239 	int err;
240 
241 	sc = device_get_softc(dev);
242 	sc->dev = dev;
243 
244 	if (bus_alloc_resources(dev, msgdma_spec, sc->res)) {
245 		device_printf(dev, "could not allocate resources for device\n");
246 		return (ENXIO);
247 	}
248 
249 	/* CSR memory interface */
250 	sc->bst = rman_get_bustag(sc->res[0]);
251 	sc->bsh = rman_get_bushandle(sc->res[0]);
252 
253 	/* Descriptor memory interface */
254 	sc->bst_d = rman_get_bustag(sc->res[1]);
255 	sc->bsh_d = rman_get_bushandle(sc->res[1]);
256 
257 	/* Setup interrupt handler */
258 	err = bus_setup_intr(dev, sc->res[2], INTR_TYPE_MISC | INTR_MPSAFE,
259 	    NULL, msgdma_intr, sc, &sc->ih);
260 	if (err) {
261 		device_printf(dev, "Unable to alloc interrupt resource.\n");
262 		return (ENXIO);
263 	}
264 
265 	node = ofw_bus_get_node(dev);
266 	xref = OF_xref_from_node(node);
267 	OF_device_register_xref(xref, dev);
268 
269 	if (msgdma_reset(sc) != 0)
270 		return (-1);
271 
272 	WRITE4(sc, DMA_CONTROL, CONTROL_GIEM);
273 
274 	return (0);
275 }
276 
277 static int
278 msgdma_detach(device_t dev)
279 {
280 	struct msgdma_softc *sc;
281 
282 	sc = device_get_softc(dev);
283 
284 	return (0);
285 }
286 
287 static void
288 msgdma_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nseg, int err)
289 {
290 	struct msgdma_channel *chan;
291 
292 	chan = (struct msgdma_channel *)arg;
293 	KASSERT(chan != NULL, ("xchan is NULL"));
294 
295 	if (err) {
296 		chan->map_err = 1;
297 		return;
298 	}
299 
300 	chan->descs_phys[chan->map_descr].ds_addr = segs[0].ds_addr;
301 	chan->descs_phys[chan->map_descr].ds_len = segs[0].ds_len;
302 
303 	dprintf("map desc %d: descs phys %lx len %ld\n",
304 	    chan->map_descr, segs[0].ds_addr, segs[0].ds_len);
305 }
306 
307 static int
308 msgdma_desc_free(struct msgdma_softc *sc, struct msgdma_channel *chan)
309 {
310 	struct msgdma_desc *desc;
311 	int nsegments;
312 	int i;
313 
314 	nsegments = chan->descs_num;
315 
316 	for (i = 0; i < nsegments; i++) {
317 		desc = chan->descs[i];
318 		bus_dmamap_unload(chan->dma_tag, chan->dma_map[i]);
319 		bus_dmamem_free(chan->dma_tag, desc, chan->dma_map[i]);
320 	}
321 
322 	bus_dma_tag_destroy(chan->dma_tag);
323 	free(chan->descs, M_DEVBUF);
324 	free(chan->dma_map, M_DEVBUF);
325 	free(chan->descs_phys, M_DEVBUF);
326 
327 	return (0);
328 }
329 
330 static int
331 msgdma_desc_alloc(struct msgdma_softc *sc, struct msgdma_channel *chan,
332     uint32_t desc_size, uint32_t align)
333 {
334 	int nsegments;
335 	int err;
336 	int i;
337 
338 	nsegments = chan->descs_num;
339 
340 	dprintf("%s: nseg %d\n", __func__, nsegments);
341 
342 	err = bus_dma_tag_create(
343 	    bus_get_dma_tag(sc->dev),
344 	    align, 0,			/* alignment, boundary */
345 	    BUS_SPACE_MAXADDR_32BIT,	/* lowaddr */
346 	    BUS_SPACE_MAXADDR,		/* highaddr */
347 	    NULL, NULL,			/* filter, filterarg */
348 	    desc_size, 1,		/* maxsize, nsegments*/
349 	    desc_size, 0,		/* maxsegsize, flags */
350 	    NULL, NULL,			/* lockfunc, lockarg */
351 	    &chan->dma_tag);
352 	if (err) {
353 		device_printf(sc->dev,
354 		    "%s: Can't create bus_dma tag.\n", __func__);
355 		return (-1);
356 	}
357 
358 	/* Descriptors. */
359 	chan->descs = malloc(nsegments * sizeof(struct msgdma_desc *),
360 	    M_DEVBUF, (M_WAITOK | M_ZERO));
361 	if (chan->descs == NULL) {
362 		device_printf(sc->dev,
363 		    "%s: Can't allocate memory.\n", __func__);
364 		return (-1);
365 	}
366 	chan->dma_map = malloc(nsegments * sizeof(bus_dmamap_t),
367 	    M_DEVBUF, (M_WAITOK | M_ZERO));
368 	chan->descs_phys = malloc(nsegments * sizeof(bus_dma_segment_t),
369 	    M_DEVBUF, (M_WAITOK | M_ZERO));
370 
371 	/* Allocate bus_dma memory for each descriptor. */
372 	for (i = 0; i < nsegments; i++) {
373 		err = bus_dmamem_alloc(chan->dma_tag, (void **)&chan->descs[i],
374 		    BUS_DMA_WAITOK | BUS_DMA_ZERO, &chan->dma_map[i]);
375 		if (err) {
376 			device_printf(sc->dev,
377 			    "%s: Can't allocate memory for descriptors.\n",
378 			    __func__);
379 			return (-1);
380 		}
381 
382 		chan->map_err = 0;
383 		chan->map_descr = i;
384 		err = bus_dmamap_load(chan->dma_tag, chan->dma_map[i], chan->descs[i],
385 		    desc_size, msgdma_dmamap_cb, chan, BUS_DMA_WAITOK);
386 		if (err) {
387 			device_printf(sc->dev,
388 			    "%s: Can't load DMA map.\n", __func__);
389 			return (-1);
390 		}
391 
392 		if (chan->map_err != 0) {
393 			device_printf(sc->dev,
394 			    "%s: Can't load DMA map.\n", __func__);
395 			return (-1);
396 		}
397 	}
398 
399 	return (0);
400 }
401 
402 
403 static int
404 msgdma_channel_alloc(device_t dev, struct xdma_channel *xchan)
405 {
406 	struct msgdma_channel *chan;
407 	struct msgdma_softc *sc;
408 	int i;
409 
410 	sc = device_get_softc(dev);
411 
412 	for (i = 0; i < MSGDMA_NCHANNELS; i++) {
413 		chan = &sc->channels[i];
414 		if (chan->used == 0) {
415 			chan->xchan = xchan;
416 			xchan->chan = (void *)chan;
417 			if ((xchan->caps & XCHAN_CAP_IOMMU) == 0)
418 				xchan->caps |= XCHAN_CAP_BUSDMA;
419 			chan->index = i;
420 			chan->sc = sc;
421 			chan->used = 1;
422 			chan->idx_head = 0;
423 			chan->idx_tail = 0;
424 			chan->descs_used_count = 0;
425 			chan->descs_num = 1024;
426 
427 			return (0);
428 		}
429 	}
430 
431 	return (-1);
432 }
433 
434 static int
435 msgdma_channel_free(device_t dev, struct xdma_channel *xchan)
436 {
437 	struct msgdma_channel *chan;
438 	struct msgdma_softc *sc;
439 
440 	sc = device_get_softc(dev);
441 
442 	chan = (struct msgdma_channel *)xchan->chan;
443 
444 	msgdma_desc_free(sc, chan);
445 
446 	chan->used = 0;
447 
448 	return (0);
449 }
450 
451 static int
452 msgdma_channel_capacity(device_t dev, xdma_channel_t *xchan,
453     uint32_t *capacity)
454 {
455 	struct msgdma_channel *chan;
456 	uint32_t c;
457 
458 	chan = (struct msgdma_channel *)xchan->chan;
459 
460 	/* At least one descriptor must be left empty. */
461 	c = (chan->descs_num - chan->descs_used_count - 1);
462 
463 	*capacity = c;
464 
465 	return (0);
466 }
467 
468 static int
469 msgdma_channel_submit_sg(device_t dev, struct xdma_channel *xchan,
470     struct xdma_sglist *sg, uint32_t sg_n)
471 {
472 	struct msgdma_channel *chan;
473 	struct msgdma_desc *desc;
474 	struct msgdma_softc *sc;
475 	bus_addr_t src_addr_lo;
476 	bus_addr_t dst_addr_lo;
477 	uint32_t len;
478 	uint32_t tmp;
479 	int i;
480 
481 	sc = device_get_softc(dev);
482 
483 	chan = (struct msgdma_channel *)xchan->chan;
484 
485 	for (i = 0; i < sg_n; i++) {
486 		src_addr_lo = sg[i].src_addr;
487 		dst_addr_lo = sg[i].dst_addr;
488 		len = (uint32_t)sg[i].len;
489 
490 		dprintf("%s: src %x dst %x len %d\n", __func__,
491 		    src_addr_lo, dst_addr_lo, len);
492 
493 		desc = chan->descs[chan->idx_head];
494 #if defined(ALTERA_MSGDMA_DESC_EXT) || defined(ALTERA_MSGDMA_DESC_PF_EXT)
495 		desc->read_hi = htole32(src_addr_lo >> 32);
496 		desc->write_hi = htole32(dst_addr_lo >> 32);
497 #endif
498 		desc->read_lo = htole32(src_addr_lo);
499 		desc->write_lo = htole32(dst_addr_lo);
500 		desc->length = htole32(len);
501 		desc->transferred = 0;
502 		desc->status = 0;
503 		desc->reserved = 0;
504 		desc->control = 0;
505 
506 		if (sg[i].direction == XDMA_MEM_TO_DEV) {
507 			if (sg[i].first == 1) {
508 				desc->control |= htole32(CONTROL_GEN_SOP);
509 			}
510 
511 			if (sg[i].last == 1) {
512 				desc->control |= htole32(CONTROL_GEN_EOP);
513 				desc->control |= htole32(CONTROL_TC_IRQ_EN |
514 				    CONTROL_ET_IRQ_EN | CONTROL_ERR_M);
515 			}
516 		} else {
517 			desc->control |= htole32(CONTROL_END_ON_EOP | (1 << 13));
518 			desc->control |= htole32(CONTROL_TC_IRQ_EN |
519 			    CONTROL_ET_IRQ_EN | CONTROL_ERR_M);
520 		}
521 
522 		tmp = chan->idx_head;
523 
524 		atomic_add_int(&chan->descs_used_count, 1);
525 		chan->idx_head = msgdma_next_desc(chan, chan->idx_head);
526 
527 		desc->control |= htole32(CONTROL_OWN | CONTROL_GO);
528 
529 		bus_dmamap_sync(chan->dma_tag, chan->dma_map[tmp],
530 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
531 	}
532 
533 	return (0);
534 }
535 
536 static int
537 msgdma_channel_prep_sg(device_t dev, struct xdma_channel *xchan)
538 {
539 	struct msgdma_channel *chan;
540 	struct msgdma_desc *desc;
541 	struct msgdma_softc *sc;
542 	uint32_t addr;
543 	uint32_t reg;
544 	int ret;
545 	int i;
546 
547 	sc = device_get_softc(dev);
548 
549 	dprintf("%s(%d)\n", __func__, device_get_unit(dev));
550 
551 	chan = (struct msgdma_channel *)xchan->chan;
552 
553 	ret = msgdma_desc_alloc(sc, chan, sizeof(struct msgdma_desc), 16);
554 	if (ret != 0) {
555 		device_printf(sc->dev,
556 		    "%s: Can't allocate descriptors.\n", __func__);
557 		return (-1);
558 	}
559 
560 	for (i = 0; i < chan->descs_num; i++) {
561 		desc = chan->descs[i];
562 
563 		if (i == (chan->descs_num - 1)) {
564 			desc->next = htole32(chan->descs_phys[0].ds_addr);
565 		} else {
566 			desc->next = htole32(chan->descs_phys[i+1].ds_addr);
567 		}
568 
569 		dprintf("%s(%d): desc %d vaddr %lx next paddr %x\n", __func__,
570 		    device_get_unit(dev), i, (uint64_t)desc, le32toh(desc->next));
571 	}
572 
573 	addr = chan->descs_phys[0].ds_addr;
574 	WRITE4_DESC(sc, PF_NEXT_LO, addr);
575 	WRITE4_DESC(sc, PF_NEXT_HI, 0);
576 	WRITE4_DESC(sc, PF_POLL_FREQ, 1000);
577 
578 	reg = (PF_CONTROL_GIEM | PF_CONTROL_DESC_POLL_EN);
579 	reg |= PF_CONTROL_RUN;
580 	WRITE4_DESC(sc, PF_CONTROL, reg);
581 
582 	return (0);
583 }
584 
585 static int
586 msgdma_channel_control(device_t dev, xdma_channel_t *xchan, int cmd)
587 {
588 	struct msgdma_channel *chan;
589 	struct msgdma_softc *sc;
590 
591 	sc = device_get_softc(dev);
592 
593 	chan = (struct msgdma_channel *)xchan->chan;
594 
595 	switch (cmd) {
596 	case XDMA_CMD_BEGIN:
597 	case XDMA_CMD_TERMINATE:
598 	case XDMA_CMD_PAUSE:
599 		/* TODO: implement me */
600 		return (-1);
601 	}
602 
603 	return (0);
604 }
605 
606 #ifdef FDT
607 static int
608 msgdma_ofw_md_data(device_t dev, pcell_t *cells, int ncells, void **ptr)
609 {
610 
611 	return (0);
612 }
613 #endif
614 
615 static device_method_t msgdma_methods[] = {
616 	/* Device interface */
617 	DEVMETHOD(device_probe,			msgdma_probe),
618 	DEVMETHOD(device_attach,		msgdma_attach),
619 	DEVMETHOD(device_detach,		msgdma_detach),
620 
621 	/* xDMA Interface */
622 	DEVMETHOD(xdma_channel_alloc,		msgdma_channel_alloc),
623 	DEVMETHOD(xdma_channel_free,		msgdma_channel_free),
624 	DEVMETHOD(xdma_channel_control,		msgdma_channel_control),
625 
626 	/* xDMA SG Interface */
627 	DEVMETHOD(xdma_channel_capacity,	msgdma_channel_capacity),
628 	DEVMETHOD(xdma_channel_prep_sg,		msgdma_channel_prep_sg),
629 	DEVMETHOD(xdma_channel_submit_sg,	msgdma_channel_submit_sg),
630 
631 #ifdef FDT
632 	DEVMETHOD(xdma_ofw_md_data,		msgdma_ofw_md_data),
633 #endif
634 
635 	DEVMETHOD_END
636 };
637 
638 static driver_t msgdma_driver = {
639 	"msgdma",
640 	msgdma_methods,
641 	sizeof(struct msgdma_softc),
642 };
643 
644 static devclass_t msgdma_devclass;
645 
646 EARLY_DRIVER_MODULE(msgdma, simplebus, msgdma_driver, msgdma_devclass, 0, 0,
647     BUS_PASS_INTERRUPT + BUS_PASS_ORDER_LATE);
648