xref: /freebsd/sys/arm/freescale/imx/imx6_sdma.c (revision dd41de95a84d979615a2ef11df6850622bf6184e)
1 /*-
2  * Copyright (c) 2015 Ruslan Bukin <br@bsdpad.com>
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  *
14  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24  * SUCH DAMAGE.
25  */
26 
27 /*
28  * i.MX6 Smart Direct Memory Access Controller (sDMA)
29  * Chapter 41, i.MX 6Dual/6Quad Applications Processor Reference Manual,
30  * Rev. 1, 04/2013
31  */
32 
33 #include <sys/cdefs.h>
34 __FBSDID("$FreeBSD$");
35 
36 #include <sys/param.h>
37 #include <sys/systm.h>
38 #include <sys/bus.h>
39 #include <sys/kernel.h>
40 #include <sys/module.h>
41 #include <sys/malloc.h>
42 #include <sys/endian.h>
43 #include <sys/rman.h>
44 #include <sys/timeet.h>
45 #include <sys/timetc.h>
46 #include <sys/firmware.h>
47 
48 #include <vm/vm.h>
49 #include <vm/vm_extern.h>
50 #include <vm/vm_kern.h>
51 #include <vm/pmap.h>
52 
53 #include <dev/ofw/openfirm.h>
54 #include <dev/ofw/ofw_bus.h>
55 #include <dev/ofw/ofw_bus_subr.h>
56 
57 #include <machine/bus.h>
58 #include <machine/cpu.h>
59 #include <machine/intr.h>
60 
61 #include <arm/freescale/imx/imx6_sdma.h>
62 
63 #define	MAX_BD	(PAGE_SIZE / sizeof(struct sdma_buffer_descriptor))
64 
65 #define	READ4(_sc, _reg)	\
66 	bus_space_read_4(_sc->bst, _sc->bsh, _reg)
67 #define	WRITE4(_sc, _reg, _val)	\
68 	bus_space_write_4(_sc->bst, _sc->bsh, _reg, _val)
69 
70 struct sdma_softc *sdma_sc;
71 
72 static struct resource_spec sdma_spec[] = {
73 	{ SYS_RES_MEMORY,	0,	RF_ACTIVE },
74 	{ SYS_RES_IRQ,		0,	RF_ACTIVE },
75 	{ -1, 0 }
76 };
77 
78 /*
79  * This will get set to true if we can't load firmware while attaching, to
80  * prevent multiple attempts to re-attach the device on each bus pass.
81  */
82 static bool firmware_unavailable;
83 
84 static void
85 sdma_intr(void *arg)
86 {
87 	struct sdma_buffer_descriptor *bd;
88 	struct sdma_channel *channel;
89 	struct sdma_conf *conf;
90 	struct sdma_softc *sc;
91 	int pending;
92 	int i;
93 	int j;
94 
95 	sc = arg;
96 
97 	pending = READ4(sc, SDMAARM_INTR);
98 
99 	/* Ack intr */
100 	WRITE4(sc, SDMAARM_INTR, pending);
101 
102 	for (i = 0; i < SDMA_N_CHANNELS; i++) {
103 		if ((pending & (1 << i)) == 0)
104 			continue;
105 		channel = &sc->channel[i];
106 		conf = channel->conf;
107 		if (!conf)
108 			continue;
109 		for (j = 0; j < conf->num_bd; j++) {
110 			bd = &channel->bd[j];
111 			bd->mode.status |= BD_DONE;
112 			if (bd->mode.status & BD_RROR)
113 				printf("sDMA error\n");
114 		}
115 
116 		conf->ih(conf->ih_user, 1);
117 
118 		WRITE4(sc, SDMAARM_HSTART, (1 << i));
119 	}
120 }
121 
122 static int
123 sdma_probe(device_t dev)
124 {
125 
126 	if (!ofw_bus_status_okay(dev) || firmware_unavailable)
127 		return (ENXIO);
128 
129 	if (!ofw_bus_is_compatible(dev, "fsl,imx6q-sdma"))
130 		return (ENXIO);
131 
132 	device_set_desc(dev, "i.MX6 Smart Direct Memory Access Controller");
133 	return (BUS_PROBE_DEFAULT);
134 }
135 
136 int
137 sdma_start(int chn)
138 {
139 	struct sdma_softc *sc;
140 
141 	sc = sdma_sc;
142 
143 	WRITE4(sc, SDMAARM_HSTART, (1 << chn));
144 
145 	return (0);
146 }
147 
148 int
149 sdma_stop(int chn)
150 {
151 	struct sdma_softc *sc;
152 
153 	sc = sdma_sc;
154 
155 	WRITE4(sc, SDMAARM_STOP_STAT, (1 << chn));
156 
157 	return (0);
158 }
159 
160 int
161 sdma_alloc(void)
162 {
163 	struct sdma_channel *channel;
164 	struct sdma_softc *sc;
165 	int found;
166 	int chn;
167 	int i;
168 
169 	sc = sdma_sc;
170 	found = 0;
171 
172 	/* Channel 0 can't be used */
173 	for (i = 1; i < SDMA_N_CHANNELS; i++) {
174 		channel = &sc->channel[i];
175 		if (channel->in_use == 0) {
176 			channel->in_use = 1;
177 			found = 1;
178 			break;
179 		}
180 	}
181 
182 	if (!found)
183 		return (-1);
184 
185 	chn = i;
186 
187 	/* Allocate area for buffer descriptors */
188 	channel->bd = (void *)kmem_alloc_contig(PAGE_SIZE, M_ZERO, 0, ~0,
189 	    PAGE_SIZE, 0, VM_MEMATTR_UNCACHEABLE);
190 
191 	return (chn);
192 }
193 
194 int
195 sdma_free(int chn)
196 {
197 	struct sdma_channel *channel;
198 	struct sdma_softc *sc;
199 
200 	sc = sdma_sc;
201 
202 	channel = &sc->channel[chn];
203 	channel->in_use = 0;
204 
205 	kmem_free((vm_offset_t)channel->bd, PAGE_SIZE);
206 
207 	return (0);
208 }
209 
210 static int
211 sdma_overrides(struct sdma_softc *sc, int chn,
212 		int evt, int host, int dsp)
213 {
214 	int reg;
215 
216 	/* Ignore sDMA requests */
217 	reg = READ4(sc, SDMAARM_EVTOVR);
218 	if (evt)
219 		reg |= (1 << chn);
220 	else
221 		reg &= ~(1 << chn);
222 	WRITE4(sc, SDMAARM_EVTOVR, reg);
223 
224 	/* Ignore enable bit (HE) */
225 	reg = READ4(sc, SDMAARM_HOSTOVR);
226 	if (host)
227 		reg |= (1 << chn);
228 	else
229 		reg &= ~(1 << chn);
230 	WRITE4(sc, SDMAARM_HOSTOVR, reg);
231 
232 	/* Prevent sDMA channel from starting */
233 	reg = READ4(sc, SDMAARM_DSPOVR);
234 	if (!dsp)
235 		reg |= (1 << chn);
236 	else
237 		reg &= ~(1 << chn);
238 	WRITE4(sc, SDMAARM_DSPOVR, reg);
239 
240 	return (0);
241 }
242 
243 int
244 sdma_configure(int chn, struct sdma_conf *conf)
245 {
246 	struct sdma_buffer_descriptor *bd0;
247 	struct sdma_buffer_descriptor *bd;
248 	struct sdma_context_data *context;
249 	struct sdma_channel *channel;
250 	struct sdma_softc *sc;
251 #if 0
252 	int timeout;
253 	int ret;
254 #endif
255 	int i;
256 
257 	sc = sdma_sc;
258 
259 	channel = &sc->channel[chn];
260 	channel->conf = conf;
261 
262 	/* Ensure operation has stopped */
263 	sdma_stop(chn);
264 
265 	/* Set priority and enable the channel */
266 	WRITE4(sc, SDMAARM_SDMA_CHNPRI(chn), 1);
267 	WRITE4(sc, SDMAARM_CHNENBL(conf->event), (1 << chn));
268 
269 	sdma_overrides(sc, chn, 0, 0, 0);
270 
271 	if (conf->num_bd > MAX_BD) {
272 		device_printf(sc->dev, "Error: too much buffer"
273 				" descriptors requested\n");
274 		return (-1);
275 	}
276 
277 	for (i = 0; i < conf->num_bd; i++) {
278 		bd = &channel->bd[i];
279 		bd->mode.command = conf->command;
280 		bd->mode.status = BD_DONE | BD_EXTD | BD_CONT | BD_INTR;
281 		if (i == (conf->num_bd - 1))
282 			bd->mode.status |= BD_WRAP;
283 		bd->mode.count = conf->period;
284 		bd->buffer_addr = conf->saddr + (conf->period * i);
285 		bd->ext_buffer_addr = 0;
286 	}
287 
288 	sc->ccb[chn].base_bd_ptr = vtophys(channel->bd);
289 	sc->ccb[chn].current_bd_ptr = vtophys(channel->bd);
290 
291 	/*
292 	 * Load context.
293 	 *
294 	 * i.MX6 Reference Manual: Appendix A SDMA Scripts
295 	 * A.3.1.7.1 (mcu_2_app)
296 	 */
297 
298 	/*
299 	 * TODO: allow using other scripts
300 	 */
301 	context = sc->context;
302 	memset(context, 0, sizeof(*context));
303 	context->channel_state.pc = sc->fw_scripts->mcu_2_app_addr;
304 
305 	/*
306 	 * Tx FIFO 0 address (r6)
307 	 * Event_mask (r1)
308 	 * Event2_mask (r0)
309 	 * Watermark level (r7)
310 	 */
311 
312 	if (conf->event > 32) {
313 		context->gReg[0] = (1 << (conf->event % 32));
314 		context->gReg[1] = 0;
315 	} else {
316 		context->gReg[0] = 0;
317 		context->gReg[1] = (1 << conf->event);
318 	}
319 
320 	context->gReg[6] = conf->daddr;
321 	context->gReg[7] = conf->word_length;
322 
323 	bd0 = sc->bd0;
324 	bd0->mode.command = C0_SETDM;
325 	bd0->mode.status = BD_DONE | BD_INTR | BD_WRAP | BD_EXTD;
326 	bd0->mode.count = sizeof(*context) / 4;
327 	bd0->buffer_addr = sc->context_phys;
328 	bd0->ext_buffer_addr = 2048 + (sizeof(*context) / 4) * chn;
329 
330 	WRITE4(sc, SDMAARM_HSTART, 1);
331 
332 #if 0
333 	/* Debug purposes */
334 
335 	timeout = 1000;
336 	while (!(ret = READ4(sc, SDMAARM_INTR) & 1)) {
337 		if (timeout-- <= 0)
338 			break;
339 		DELAY(10);
340 	};
341 
342 	if (!ret) {
343 		device_printf(sc->dev, "Failed to load context.\n");
344 		return (-1);
345 	}
346 
347 	WRITE4(sc, SDMAARM_INTR, ret);
348 
349 	device_printf(sc->dev, "Context loaded successfully.\n");
350 #endif
351 
352 	return (0);
353 }
354 
355 static int
356 load_firmware(struct sdma_softc *sc)
357 {
358 	const struct sdma_firmware_header *header;
359 	const struct firmware *fp;
360 
361 	fp = firmware_get("sdma-imx6q");
362 	if (fp == NULL) {
363 		device_printf(sc->dev, "Can't get firmware.\n");
364 		return (-1);
365 	}
366 
367 	header = fp->data;
368 	if (header->magic != FW_HEADER_MAGIC) {
369 		device_printf(sc->dev, "Can't use firmware.\n");
370 		return (-1);
371 	}
372 
373 	sc->fw_header = header;
374 	sc->fw_scripts = (const void *)((const char *)header +
375 				header->script_addrs_start);
376 
377 	return (0);
378 }
379 
380 static int
381 boot_firmware(struct sdma_softc *sc)
382 {
383 	struct sdma_buffer_descriptor *bd0;
384 	const uint32_t *ram_code;
385 	int timeout;
386 	int ret;
387 	int chn;
388 	int sz;
389 	int i;
390 
391 	ram_code = (const void *)((const char *)sc->fw_header +
392 			sc->fw_header->ram_code_start);
393 
394 	/* Make sure SDMA has not started yet */
395 	WRITE4(sc, SDMAARM_MC0PTR, 0);
396 
397 	sz = SDMA_N_CHANNELS * sizeof(struct sdma_channel_control) + \
398 	    sizeof(struct sdma_context_data);
399 	sc->ccb = (void *)kmem_alloc_contig(sz, M_ZERO, 0, ~0, PAGE_SIZE, 0,
400 	    VM_MEMATTR_UNCACHEABLE);
401 	sc->ccb_phys = vtophys(sc->ccb);
402 
403 	sc->context = (void *)((char *)sc->ccb + \
404 	    SDMA_N_CHANNELS * sizeof(struct sdma_channel_control));
405 	sc->context_phys = vtophys(sc->context);
406 
407 	/* Disable all the channels */
408 	for (i = 0; i < SDMA_N_EVENTS; i++)
409 		WRITE4(sc, SDMAARM_CHNENBL(i), 0);
410 
411 	/* All channels have priority 0 */
412 	for (i = 0; i < SDMA_N_CHANNELS; i++)
413 		WRITE4(sc, SDMAARM_SDMA_CHNPRI(i), 0);
414 
415 	/* Channel 0 is used for booting firmware */
416 	chn = 0;
417 
418 	sc->bd0 = (void *)kmem_alloc_contig(PAGE_SIZE, M_ZERO, 0, ~0, PAGE_SIZE,
419 	    0, VM_MEMATTR_UNCACHEABLE);
420 	bd0 = sc->bd0;
421 	sc->ccb[chn].base_bd_ptr = vtophys(bd0);
422 	sc->ccb[chn].current_bd_ptr = vtophys(bd0);
423 
424 	WRITE4(sc, SDMAARM_SDMA_CHNPRI(chn), 1);
425 
426 	sdma_overrides(sc, chn, 1, 0, 0);
427 
428 	/* XXX: not sure what is that */
429 	WRITE4(sc, SDMAARM_CHN0ADDR, 0x4050);
430 
431 	WRITE4(sc, SDMAARM_CONFIG, 0);
432 	WRITE4(sc, SDMAARM_MC0PTR, sc->ccb_phys);
433 	WRITE4(sc, SDMAARM_CONFIG, CONFIG_CSM);
434 	WRITE4(sc, SDMAARM_SDMA_CHNPRI(chn), 1);
435 
436 	bd0->mode.command = C0_SETPM;
437 	bd0->mode.status = BD_DONE | BD_INTR | BD_WRAP | BD_EXTD;
438 	bd0->mode.count = sc->fw_header->ram_code_size / 2;
439 	bd0->buffer_addr = vtophys(ram_code);
440 	bd0->ext_buffer_addr = sc->fw_scripts->ram_code_start_addr;
441 
442 	WRITE4(sc, SDMAARM_HSTART, 1);
443 
444 	timeout = 100;
445 	while (!(ret = READ4(sc, SDMAARM_INTR) & 1)) {
446 		if (timeout-- <= 0)
447 			break;
448 		DELAY(10);
449 	}
450 
451 	if (ret == 0) {
452 		device_printf(sc->dev, "SDMA failed to boot\n");
453 		return (-1);
454 	}
455 
456 	WRITE4(sc, SDMAARM_INTR, ret);
457 
458 #if 0
459 	device_printf(sc->dev, "SDMA booted successfully.\n");
460 #endif
461 
462 	/* Debug is disabled */
463 	WRITE4(sc, SDMAARM_ONCE_ENB, 0);
464 
465 	return (0);
466 }
467 
468 static int
469 sdma_attach(device_t dev)
470 {
471 	struct sdma_softc *sc;
472 	int err;
473 
474 	sc = device_get_softc(dev);
475 	sc->dev = dev;
476 
477 	if (load_firmware(sc) == -1) {
478 		firmware_unavailable = true;
479 		return (ENXIO);
480 	}
481 
482 	if (bus_alloc_resources(dev, sdma_spec, sc->res)) {
483 		device_printf(dev, "could not allocate resources\n");
484 		return (ENXIO);
485 	}
486 
487 	/* Memory interface */
488 	sc->bst = rman_get_bustag(sc->res[0]);
489 	sc->bsh = rman_get_bushandle(sc->res[0]);
490 
491 	sdma_sc = sc;
492 
493 	/* Setup interrupt handler */
494 	err = bus_setup_intr(dev, sc->res[1], INTR_TYPE_MISC | INTR_MPSAFE,
495 	    NULL, sdma_intr, sc, &sc->ih);
496 	if (err) {
497 		device_printf(dev, "Unable to alloc interrupt resource.\n");
498 		return (ENXIO);
499 	}
500 
501 	if (boot_firmware(sc) == -1)
502 		return (ENXIO);
503 
504 	return (0);
505 };
506 
507 static device_method_t sdma_methods[] = {
508 	/* Device interface */
509 	DEVMETHOD(device_probe,		sdma_probe),
510 	DEVMETHOD(device_attach,	sdma_attach),
511 	{ 0, 0 }
512 };
513 
514 static driver_t sdma_driver = {
515 	"sdma",
516 	sdma_methods,
517 	sizeof(struct sdma_softc),
518 };
519 
520 static devclass_t sdma_devclass;
521 
522 /* We want to attach after all interrupt controllers, before anything else. */
523 EARLY_DRIVER_MODULE(sdma, simplebus, sdma_driver, sdma_devclass, 0, 0,
524     BUS_PASS_INTERRUPT + BUS_PASS_ORDER_LAST);
525