xref: /freebsd/sys/dev/altera/msgdma/msgdma.c (revision d0b2dbfa0ecf2bbc9709efc5e20baf8e4b44bbbf)
1 /*-
2  * Copyright (c) 2016-2018 Ruslan Bukin <br@bsdpad.com>
3  * All rights reserved.
4  *
5  * This software was developed by SRI International and the University of
6  * Cambridge Computer Laboratory under DARPA/AFRL contract FA8750-10-C-0237
7  * ("CTSRD"), as part of the DARPA CRASH research programme.
8  *
9  * Redistribution and use in source and binary forms, with or without
10  * modification, are permitted provided that the following conditions
11  * are met:
12  * 1. Redistributions of source code must retain the above copyright
13  *    notice, this list of conditions and the following disclaimer.
14  * 2. Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in the
16  *    documentation and/or other materials provided with the distribution.
17  *
18  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
19  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
22  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
23  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
24  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
25  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
26  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28  * SUCH DAMAGE.
29  */
30 
31 /* Altera mSGDMA driver. */
32 
33 #include <sys/cdefs.h>
34 #include "opt_platform.h"
35 #include <sys/param.h>
36 #include <sys/endian.h>
37 #include <sys/systm.h>
38 #include <sys/conf.h>
39 #include <sys/bus.h>
40 #include <sys/kernel.h>
41 #include <sys/kthread.h>
42 #include <sys/sglist.h>
43 #include <sys/module.h>
44 #include <sys/lock.h>
45 #include <sys/mutex.h>
46 #include <sys/resource.h>
47 #include <sys/rman.h>
48 
49 #include <machine/bus.h>
50 #include <machine/fdt.h>
51 #include <machine/cache.h>
52 
53 #ifdef FDT
54 #include <dev/fdt/fdt_common.h>
55 #include <dev/ofw/ofw_bus.h>
56 #include <dev/ofw/ofw_bus_subr.h>
57 #endif
58 
59 #include <dev/xdma/xdma.h>
60 #include "xdma_if.h"
61 #include "opt_altera_msgdma.h"
62 
63 #include <dev/altera/msgdma/msgdma.h>
64 
65 #define MSGDMA_DEBUG
66 #undef MSGDMA_DEBUG
67 
68 #ifdef MSGDMA_DEBUG
69 #define dprintf(fmt, ...)  printf(fmt, ##__VA_ARGS__)
70 #else
71 #define dprintf(fmt, ...)
72 #endif
73 
74 #define	MSGDMA_NCHANNELS	1
75 
76 struct msgdma_channel {
77 	struct msgdma_softc	*sc;
78 	struct mtx		mtx;
79 	xdma_channel_t		*xchan;
80 	struct proc		*p;
81 	int			used;
82 	int			index;
83 	int			idx_head;
84 	int			idx_tail;
85 
86 	struct msgdma_desc	**descs;
87 	bus_dma_segment_t	*descs_phys;
88 	uint32_t		descs_num;
89 	bus_dma_tag_t		dma_tag;
90 	bus_dmamap_t		*dma_map;
91 	uint32_t		map_descr;
92 	uint8_t			map_err;
93 	uint32_t		descs_used_count;
94 };
95 
96 struct msgdma_softc {
97 	device_t		dev;
98 	struct resource		*res[3];
99 	bus_space_tag_t		bst;
100 	bus_space_handle_t	bsh;
101 	bus_space_tag_t		bst_d;
102 	bus_space_handle_t	bsh_d;
103 	void			*ih;
104 	struct msgdma_desc	desc;
105 	struct msgdma_channel	channels[MSGDMA_NCHANNELS];
106 };
107 
108 static struct resource_spec msgdma_spec[] = {
109 	{ SYS_RES_MEMORY,	0,	RF_ACTIVE },
110 	{ SYS_RES_MEMORY,	1,	RF_ACTIVE },
111 	{ SYS_RES_IRQ,		0,	RF_ACTIVE },
112 	{ -1, 0 }
113 };
114 
115 #define	HWTYPE_NONE	0
116 #define	HWTYPE_STD	1
117 
118 static struct ofw_compat_data compat_data[] = {
119 	{ "altr,msgdma-16.0",	HWTYPE_STD },
120 	{ "altr,msgdma-1.0",	HWTYPE_STD },
121 	{ NULL,			HWTYPE_NONE },
122 };
123 
124 static int msgdma_probe(device_t dev);
125 static int msgdma_attach(device_t dev);
126 static int msgdma_detach(device_t dev);
127 
128 static inline uint32_t
129 msgdma_next_desc(struct msgdma_channel *chan, uint32_t curidx)
130 {
131 
132 	return ((curidx + 1) % chan->descs_num);
133 }
134 
135 static void
136 msgdma_intr(void *arg)
137 {
138 	xdma_transfer_status_t status;
139 	struct xdma_transfer_status st;
140 	struct msgdma_desc *desc;
141 	struct msgdma_channel *chan;
142 	struct xdma_channel *xchan;
143 	struct msgdma_softc *sc;
144 	uint32_t tot_copied;
145 
146 	sc = arg;
147 	chan = &sc->channels[0];
148 	xchan = chan->xchan;
149 
150 	dprintf("%s(%d): status 0x%08x next_descr 0x%08x, control 0x%08x\n",
151 	    __func__, device_get_unit(sc->dev),
152 		READ4_DESC(sc, PF_STATUS),
153 		READ4_DESC(sc, PF_NEXT_LO),
154 		READ4_DESC(sc, PF_CONTROL));
155 
156 	tot_copied = 0;
157 
158 	while (chan->idx_tail != chan->idx_head) {
159 		dprintf("%s: idx_tail %d idx_head %d\n", __func__,
160 		    chan->idx_tail, chan->idx_head);
161 		bus_dmamap_sync(chan->dma_tag, chan->dma_map[chan->idx_tail],
162 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
163 
164 		desc = chan->descs[chan->idx_tail];
165 		if ((le32toh(desc->control) & CONTROL_OWN) != 0) {
166 			break;
167 		}
168 
169 		tot_copied += le32toh(desc->transferred);
170 		st.error = 0;
171 		st.transferred = le32toh(desc->transferred);
172 		xchan_seg_done(xchan, &st);
173 
174 		chan->idx_tail = msgdma_next_desc(chan, chan->idx_tail);
175 		atomic_subtract_int(&chan->descs_used_count, 1);
176 	}
177 
178 	WRITE4_DESC(sc, PF_STATUS, PF_STATUS_IRQ);
179 
180 	/* Finish operation */
181 	status.error = 0;
182 	status.transferred = tot_copied;
183 	xdma_callback(chan->xchan, &status);
184 }
185 
186 static int
187 msgdma_reset(struct msgdma_softc *sc)
188 {
189 	int timeout;
190 
191 	dprintf("%s: read status: %x\n", __func__, READ4(sc, 0x00));
192 	dprintf("%s: read control: %x\n", __func__, READ4(sc, 0x04));
193 	dprintf("%s: read 1: %x\n", __func__, READ4(sc, 0x08));
194 	dprintf("%s: read 2: %x\n", __func__, READ4(sc, 0x0C));
195 
196 	WRITE4(sc, DMA_CONTROL, CONTROL_RESET);
197 
198 	timeout = 100;
199 	do {
200 		if ((READ4(sc, DMA_STATUS) & STATUS_RESETTING) == 0)
201 			break;
202 	} while (timeout--);
203 
204 	dprintf("timeout %d\n", timeout);
205 
206 	if (timeout == 0)
207 		return (-1);
208 
209 	dprintf("%s: read control after reset: %x\n",
210 	    __func__, READ4(sc, DMA_CONTROL));
211 
212 	return (0);
213 }
214 
215 static int
216 msgdma_probe(device_t dev)
217 {
218 	int hwtype;
219 
220 	if (!ofw_bus_status_okay(dev))
221 		return (ENXIO);
222 
223 	hwtype = ofw_bus_search_compatible(dev, compat_data)->ocd_data;
224 	if (hwtype == HWTYPE_NONE)
225 		return (ENXIO);
226 
227 	device_set_desc(dev, "Altera mSGDMA");
228 
229 	return (BUS_PROBE_DEFAULT);
230 }
231 
232 static int
233 msgdma_attach(device_t dev)
234 {
235 	struct msgdma_softc *sc;
236 	phandle_t xref, node;
237 	int err;
238 
239 	sc = device_get_softc(dev);
240 	sc->dev = dev;
241 
242 	if (bus_alloc_resources(dev, msgdma_spec, sc->res)) {
243 		device_printf(dev, "could not allocate resources for device\n");
244 		return (ENXIO);
245 	}
246 
247 	/* CSR memory interface */
248 	sc->bst = rman_get_bustag(sc->res[0]);
249 	sc->bsh = rman_get_bushandle(sc->res[0]);
250 
251 	/* Descriptor memory interface */
252 	sc->bst_d = rman_get_bustag(sc->res[1]);
253 	sc->bsh_d = rman_get_bushandle(sc->res[1]);
254 
255 	/* Setup interrupt handler */
256 	err = bus_setup_intr(dev, sc->res[2], INTR_TYPE_MISC | INTR_MPSAFE,
257 	    NULL, msgdma_intr, sc, &sc->ih);
258 	if (err) {
259 		device_printf(dev, "Unable to alloc interrupt resource.\n");
260 		return (ENXIO);
261 	}
262 
263 	node = ofw_bus_get_node(dev);
264 	xref = OF_xref_from_node(node);
265 	OF_device_register_xref(xref, dev);
266 
267 	if (msgdma_reset(sc) != 0)
268 		return (-1);
269 
270 	WRITE4(sc, DMA_CONTROL, CONTROL_GIEM);
271 
272 	return (0);
273 }
274 
275 static int
276 msgdma_detach(device_t dev)
277 {
278 	struct msgdma_softc *sc;
279 
280 	sc = device_get_softc(dev);
281 
282 	return (0);
283 }
284 
285 static void
286 msgdma_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nseg, int err)
287 {
288 	struct msgdma_channel *chan;
289 
290 	chan = (struct msgdma_channel *)arg;
291 	KASSERT(chan != NULL, ("xchan is NULL"));
292 
293 	if (err) {
294 		chan->map_err = 1;
295 		return;
296 	}
297 
298 	chan->descs_phys[chan->map_descr].ds_addr = segs[0].ds_addr;
299 	chan->descs_phys[chan->map_descr].ds_len = segs[0].ds_len;
300 
301 	dprintf("map desc %d: descs phys %lx len %ld\n",
302 	    chan->map_descr, segs[0].ds_addr, segs[0].ds_len);
303 }
304 
305 static int
306 msgdma_desc_free(struct msgdma_softc *sc, struct msgdma_channel *chan)
307 {
308 	struct msgdma_desc *desc;
309 	int nsegments;
310 	int i;
311 
312 	nsegments = chan->descs_num;
313 
314 	for (i = 0; i < nsegments; i++) {
315 		desc = chan->descs[i];
316 		bus_dmamap_unload(chan->dma_tag, chan->dma_map[i]);
317 		bus_dmamem_free(chan->dma_tag, desc, chan->dma_map[i]);
318 	}
319 
320 	bus_dma_tag_destroy(chan->dma_tag);
321 	free(chan->descs, M_DEVBUF);
322 	free(chan->dma_map, M_DEVBUF);
323 	free(chan->descs_phys, M_DEVBUF);
324 
325 	return (0);
326 }
327 
328 static int
329 msgdma_desc_alloc(struct msgdma_softc *sc, struct msgdma_channel *chan,
330     uint32_t desc_size, uint32_t align)
331 {
332 	int nsegments;
333 	int err;
334 	int i;
335 
336 	nsegments = chan->descs_num;
337 
338 	dprintf("%s: nseg %d\n", __func__, nsegments);
339 
340 	err = bus_dma_tag_create(
341 	    bus_get_dma_tag(sc->dev),
342 	    align, 0,			/* alignment, boundary */
343 	    BUS_SPACE_MAXADDR_32BIT,	/* lowaddr */
344 	    BUS_SPACE_MAXADDR,		/* highaddr */
345 	    NULL, NULL,			/* filter, filterarg */
346 	    desc_size, 1,		/* maxsize, nsegments*/
347 	    desc_size, 0,		/* maxsegsize, flags */
348 	    NULL, NULL,			/* lockfunc, lockarg */
349 	    &chan->dma_tag);
350 	if (err) {
351 		device_printf(sc->dev,
352 		    "%s: Can't create bus_dma tag.\n", __func__);
353 		return (-1);
354 	}
355 
356 	/* Descriptors. */
357 	chan->descs = malloc(nsegments * sizeof(struct msgdma_desc *),
358 	    M_DEVBUF, (M_WAITOK | M_ZERO));
359 	if (chan->descs == NULL) {
360 		device_printf(sc->dev,
361 		    "%s: Can't allocate memory.\n", __func__);
362 		return (-1);
363 	}
364 	chan->dma_map = malloc(nsegments * sizeof(bus_dmamap_t),
365 	    M_DEVBUF, (M_WAITOK | M_ZERO));
366 	chan->descs_phys = malloc(nsegments * sizeof(bus_dma_segment_t),
367 	    M_DEVBUF, (M_WAITOK | M_ZERO));
368 
369 	/* Allocate bus_dma memory for each descriptor. */
370 	for (i = 0; i < nsegments; i++) {
371 		err = bus_dmamem_alloc(chan->dma_tag, (void **)&chan->descs[i],
372 		    BUS_DMA_WAITOK | BUS_DMA_ZERO, &chan->dma_map[i]);
373 		if (err) {
374 			device_printf(sc->dev,
375 			    "%s: Can't allocate memory for descriptors.\n",
376 			    __func__);
377 			return (-1);
378 		}
379 
380 		chan->map_err = 0;
381 		chan->map_descr = i;
382 		err = bus_dmamap_load(chan->dma_tag, chan->dma_map[i], chan->descs[i],
383 		    desc_size, msgdma_dmamap_cb, chan, BUS_DMA_WAITOK);
384 		if (err) {
385 			device_printf(sc->dev,
386 			    "%s: Can't load DMA map.\n", __func__);
387 			return (-1);
388 		}
389 
390 		if (chan->map_err != 0) {
391 			device_printf(sc->dev,
392 			    "%s: Can't load DMA map.\n", __func__);
393 			return (-1);
394 		}
395 	}
396 
397 	return (0);
398 }
399 
400 static int
401 msgdma_channel_alloc(device_t dev, struct xdma_channel *xchan)
402 {
403 	struct msgdma_channel *chan;
404 	struct msgdma_softc *sc;
405 	int i;
406 
407 	sc = device_get_softc(dev);
408 
409 	for (i = 0; i < MSGDMA_NCHANNELS; i++) {
410 		chan = &sc->channels[i];
411 		if (chan->used == 0) {
412 			chan->xchan = xchan;
413 			xchan->chan = (void *)chan;
414 			if ((xchan->caps & XCHAN_CAP_IOMMU) == 0)
415 				xchan->caps |= XCHAN_CAP_BUSDMA;
416 			chan->index = i;
417 			chan->sc = sc;
418 			chan->used = 1;
419 			chan->idx_head = 0;
420 			chan->idx_tail = 0;
421 			chan->descs_used_count = 0;
422 			chan->descs_num = 1024;
423 
424 			return (0);
425 		}
426 	}
427 
428 	return (-1);
429 }
430 
431 static int
432 msgdma_channel_free(device_t dev, struct xdma_channel *xchan)
433 {
434 	struct msgdma_channel *chan;
435 	struct msgdma_softc *sc;
436 
437 	sc = device_get_softc(dev);
438 
439 	chan = (struct msgdma_channel *)xchan->chan;
440 
441 	msgdma_desc_free(sc, chan);
442 
443 	chan->used = 0;
444 
445 	return (0);
446 }
447 
448 static int
449 msgdma_channel_capacity(device_t dev, xdma_channel_t *xchan,
450     uint32_t *capacity)
451 {
452 	struct msgdma_channel *chan;
453 	uint32_t c;
454 
455 	chan = (struct msgdma_channel *)xchan->chan;
456 
457 	/* At least one descriptor must be left empty. */
458 	c = (chan->descs_num - chan->descs_used_count - 1);
459 
460 	*capacity = c;
461 
462 	return (0);
463 }
464 
465 static int
466 msgdma_channel_submit_sg(device_t dev, struct xdma_channel *xchan,
467     struct xdma_sglist *sg, uint32_t sg_n)
468 {
469 	struct msgdma_channel *chan;
470 	struct msgdma_desc *desc;
471 	struct msgdma_softc *sc;
472 	bus_addr_t src_addr_lo;
473 	bus_addr_t dst_addr_lo;
474 	uint32_t len;
475 	uint32_t tmp;
476 	int i;
477 
478 	sc = device_get_softc(dev);
479 
480 	chan = (struct msgdma_channel *)xchan->chan;
481 
482 	for (i = 0; i < sg_n; i++) {
483 		src_addr_lo = sg[i].src_addr;
484 		dst_addr_lo = sg[i].dst_addr;
485 		len = (uint32_t)sg[i].len;
486 
487 		dprintf("%s: src %x dst %x len %d\n", __func__,
488 		    src_addr_lo, dst_addr_lo, len);
489 
490 		desc = chan->descs[chan->idx_head];
491 #if defined(ALTERA_MSGDMA_DESC_EXT) || defined(ALTERA_MSGDMA_DESC_PF_EXT)
492 		desc->read_hi = htole32(src_addr_lo >> 32);
493 		desc->write_hi = htole32(dst_addr_lo >> 32);
494 #endif
495 		desc->read_lo = htole32(src_addr_lo);
496 		desc->write_lo = htole32(dst_addr_lo);
497 		desc->length = htole32(len);
498 		desc->transferred = 0;
499 		desc->status = 0;
500 		desc->reserved = 0;
501 		desc->control = 0;
502 
503 		if (sg[i].direction == XDMA_MEM_TO_DEV) {
504 			if (sg[i].first == 1) {
505 				desc->control |= htole32(CONTROL_GEN_SOP);
506 			}
507 
508 			if (sg[i].last == 1) {
509 				desc->control |= htole32(CONTROL_GEN_EOP);
510 				desc->control |= htole32(CONTROL_TC_IRQ_EN |
511 				    CONTROL_ET_IRQ_EN | CONTROL_ERR_M);
512 			}
513 		} else {
514 			desc->control |= htole32(CONTROL_END_ON_EOP | (1 << 13));
515 			desc->control |= htole32(CONTROL_TC_IRQ_EN |
516 			    CONTROL_ET_IRQ_EN | CONTROL_ERR_M);
517 		}
518 
519 		tmp = chan->idx_head;
520 
521 		atomic_add_int(&chan->descs_used_count, 1);
522 		chan->idx_head = msgdma_next_desc(chan, chan->idx_head);
523 
524 		desc->control |= htole32(CONTROL_OWN | CONTROL_GO);
525 
526 		bus_dmamap_sync(chan->dma_tag, chan->dma_map[tmp],
527 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
528 	}
529 
530 	return (0);
531 }
532 
533 static int
534 msgdma_channel_prep_sg(device_t dev, struct xdma_channel *xchan)
535 {
536 	struct msgdma_channel *chan;
537 	struct msgdma_desc *desc;
538 	struct msgdma_softc *sc;
539 	uint32_t addr;
540 	uint32_t reg;
541 	int ret;
542 	int i;
543 
544 	sc = device_get_softc(dev);
545 
546 	dprintf("%s(%d)\n", __func__, device_get_unit(dev));
547 
548 	chan = (struct msgdma_channel *)xchan->chan;
549 
550 	ret = msgdma_desc_alloc(sc, chan, sizeof(struct msgdma_desc), 16);
551 	if (ret != 0) {
552 		device_printf(sc->dev,
553 		    "%s: Can't allocate descriptors.\n", __func__);
554 		return (-1);
555 	}
556 
557 	for (i = 0; i < chan->descs_num; i++) {
558 		desc = chan->descs[i];
559 
560 		if (i == (chan->descs_num - 1)) {
561 			desc->next = htole32(chan->descs_phys[0].ds_addr);
562 		} else {
563 			desc->next = htole32(chan->descs_phys[i+1].ds_addr);
564 		}
565 
566 		dprintf("%s(%d): desc %d vaddr %lx next paddr %x\n", __func__,
567 		    device_get_unit(dev), i, (uint64_t)desc, le32toh(desc->next));
568 	}
569 
570 	addr = chan->descs_phys[0].ds_addr;
571 	WRITE4_DESC(sc, PF_NEXT_LO, addr);
572 	WRITE4_DESC(sc, PF_NEXT_HI, 0);
573 	WRITE4_DESC(sc, PF_POLL_FREQ, 1000);
574 
575 	reg = (PF_CONTROL_GIEM | PF_CONTROL_DESC_POLL_EN);
576 	reg |= PF_CONTROL_RUN;
577 	WRITE4_DESC(sc, PF_CONTROL, reg);
578 
579 	return (0);
580 }
581 
582 static int
583 msgdma_channel_control(device_t dev, xdma_channel_t *xchan, int cmd)
584 {
585 	struct msgdma_channel *chan;
586 	struct msgdma_softc *sc;
587 
588 	sc = device_get_softc(dev);
589 
590 	chan = (struct msgdma_channel *)xchan->chan;
591 
592 	switch (cmd) {
593 	case XDMA_CMD_BEGIN:
594 	case XDMA_CMD_TERMINATE:
595 	case XDMA_CMD_PAUSE:
596 		/* TODO: implement me */
597 		return (-1);
598 	}
599 
600 	return (0);
601 }
602 
603 #ifdef FDT
604 static int
605 msgdma_ofw_md_data(device_t dev, pcell_t *cells, int ncells, void **ptr)
606 {
607 
608 	return (0);
609 }
610 #endif
611 
612 static device_method_t msgdma_methods[] = {
613 	/* Device interface */
614 	DEVMETHOD(device_probe,			msgdma_probe),
615 	DEVMETHOD(device_attach,		msgdma_attach),
616 	DEVMETHOD(device_detach,		msgdma_detach),
617 
618 	/* xDMA Interface */
619 	DEVMETHOD(xdma_channel_alloc,		msgdma_channel_alloc),
620 	DEVMETHOD(xdma_channel_free,		msgdma_channel_free),
621 	DEVMETHOD(xdma_channel_control,		msgdma_channel_control),
622 
623 	/* xDMA SG Interface */
624 	DEVMETHOD(xdma_channel_capacity,	msgdma_channel_capacity),
625 	DEVMETHOD(xdma_channel_prep_sg,		msgdma_channel_prep_sg),
626 	DEVMETHOD(xdma_channel_submit_sg,	msgdma_channel_submit_sg),
627 
628 #ifdef FDT
629 	DEVMETHOD(xdma_ofw_md_data,		msgdma_ofw_md_data),
630 #endif
631 
632 	DEVMETHOD_END
633 };
634 
635 static driver_t msgdma_driver = {
636 	"msgdma",
637 	msgdma_methods,
638 	sizeof(struct msgdma_softc),
639 };
640 
641 EARLY_DRIVER_MODULE(msgdma, simplebus, msgdma_driver, 0, 0,
642     BUS_PASS_INTERRUPT + BUS_PASS_ORDER_LATE);
643