xref: /freebsd/sys/arm/ti/ti_sdma.c (revision eb69d1f144a6fcc765d1b9d44a5ae8082353e70b)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3  *
4  * Copyright (c) 2011
5  *	Ben Gray <ben.r.gray@gmail.com>.
6  * All rights reserved.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  *
17  * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20  * ARE DISCLAIMED.  IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
21  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27  * SUCH DAMAGE.
28  */
29 
30 #include <sys/cdefs.h>
31 __FBSDID("$FreeBSD$");
32 
33 #include <sys/param.h>
34 #include <sys/systm.h>
35 #include <sys/bus.h>
36 #include <sys/kernel.h>
37 #include <sys/lock.h>
38 #include <sys/interrupt.h>
39 #include <sys/module.h>
40 #include <sys/malloc.h>
41 #include <sys/mutex.h>
42 #include <sys/rman.h>
43 #include <sys/queue.h>
44 #include <sys/taskqueue.h>
45 #include <sys/timetc.h>
46 #include <machine/bus.h>
47 #include <machine/intr.h>
48 
49 #include <dev/ofw/openfirm.h>
50 #include <dev/ofw/ofw_bus.h>
51 #include <dev/ofw/ofw_bus_subr.h>
52 
53 #include <arm/ti/ti_cpuid.h>
54 #include <arm/ti/ti_prcm.h>
55 #include <arm/ti/ti_sdma.h>
56 #include <arm/ti/ti_sdmareg.h>
57 
58 /**
59  *	Kernel functions for using the DMA controller
60  *
61  *
62  *	DMA TRANSFERS:
63  *	A DMA transfer block consists of a number of frames (FN). Each frame
64  *	consists of a number of elements, and each element can have a size of 8, 16,
65  *	or 32 bits.
66  *
67  *	OMAP44xx and newer chips support linked list (aka scatter gather) transfers,
68  *	where a linked list of source/destination pairs can be placed in memory
69  *	for the H/W to process.  Earlier chips only allowed you to chain multiple
70  *	channels together.  However currently this linked list feature is not
71  *	supported by the driver.
72  *
73  */
74 
75 /**
76  *	Data structure per DMA channel.
77  *
78  *
79  */
80 struct ti_sdma_channel {
81 
82 	/*
83 	 * The configuration registers for the given channel, these are modified
84 	 * by the set functions and only written to the actual registers when a
85 	 * transaction is started.
86 	 */
87 	uint32_t		reg_csdp;
88 	uint32_t		reg_ccr;
89 	uint32_t		reg_cicr;
90 
91 	/* Set when one of the configuration registers above change */
92 	uint32_t		need_reg_write;
93 
94 	/* Callback function used when an interrupt is tripped on the given channel */
95 	void (*callback)(unsigned int ch, uint32_t ch_status, void *data);
96 
97 	/* Callback data passed in the callback ... duh */
98 	void*			callback_data;
99 
100 };
101 
102 /**
103  *	DMA driver context, allocated and stored globally, this driver is not
104  *	intetned to ever be unloaded (see ti_sdma_sc).
105  *
106  */
107 struct ti_sdma_softc {
108 	device_t		sc_dev;
109 	struct resource*	sc_irq_res;
110 	struct resource*	sc_mem_res;
111 
112 	/*
113 	 * I guess in theory we should have a mutex per DMA channel for register
114 	 * modifications. But since we know we are never going to be run on a SMP
115 	 * system, we can use just the single lock for all channels.
116 	 */
117 	struct mtx		sc_mtx;
118 
119 	/* Stores the H/W revision read from the registers */
120 	uint32_t		sc_hw_rev;
121 
122 	/*
123 	 * Bits in the sc_active_channels data field indicate if the channel has
124 	 * been activated.
125 	 */
126 	uint32_t		sc_active_channels;
127 
128 	struct ti_sdma_channel sc_channel[NUM_DMA_CHANNELS];
129 
130 };
131 
132 static struct ti_sdma_softc *ti_sdma_sc = NULL;
133 
134 /**
135  *	Macros for driver mutex locking
136  */
137 #define TI_SDMA_LOCK(_sc)             mtx_lock_spin(&(_sc)->sc_mtx)
138 #define TI_SDMA_UNLOCK(_sc)           mtx_unlock_spin(&(_sc)->sc_mtx)
139 #define TI_SDMA_LOCK_INIT(_sc) \
140 	mtx_init(&_sc->sc_mtx, device_get_nameunit(_sc->sc_dev), \
141 	         "ti_sdma", MTX_SPIN)
142 #define TI_SDMA_LOCK_DESTROY(_sc)     mtx_destroy(&_sc->sc_mtx);
143 #define TI_SDMA_ASSERT_LOCKED(_sc)    mtx_assert(&_sc->sc_mtx, MA_OWNED);
144 #define TI_SDMA_ASSERT_UNLOCKED(_sc)  mtx_assert(&_sc->sc_mtx, MA_NOTOWNED);
145 
146 /**
147  *	Function prototypes
148  *
149  */
150 static void ti_sdma_intr(void *);
151 
152 /**
153  *	ti_sdma_read_4 - reads a 32-bit value from one of the DMA registers
154  *	@sc: DMA device context
155  *	@off: The offset of a register from the DMA register address range
156  *
157  *
158  *	RETURNS:
159  *	32-bit value read from the register.
160  */
161 static inline uint32_t
162 ti_sdma_read_4(struct ti_sdma_softc *sc, bus_size_t off)
163 {
164 	return bus_read_4(sc->sc_mem_res, off);
165 }
166 
167 /**
168  *	ti_sdma_write_4 - writes a 32-bit value to one of the DMA registers
169  *	@sc: DMA device context
170  *	@off: The offset of a register from the DMA register address range
171  *
172  *
173  *	RETURNS:
174  *	32-bit value read from the register.
175  */
176 static inline void
177 ti_sdma_write_4(struct ti_sdma_softc *sc, bus_size_t off, uint32_t val)
178 {
179 	bus_write_4(sc->sc_mem_res, off, val);
180 }
181 
182 /**
183  *	ti_sdma_is_omap3_rev - returns true if H/W is from OMAP3 series
184  *	@sc: DMA device context
185  *
186  */
187 static inline int
188 ti_sdma_is_omap3_rev(struct ti_sdma_softc *sc)
189 {
190 	return (sc->sc_hw_rev == DMA4_OMAP3_REV);
191 }
192 
193 /**
194  *	ti_sdma_is_omap4_rev - returns true if H/W is from OMAP4 series
195  *	@sc: DMA device context
196  *
197  */
198 static inline int
199 ti_sdma_is_omap4_rev(struct ti_sdma_softc *sc)
200 {
201 	return (sc->sc_hw_rev == DMA4_OMAP4_REV);
202 }
203 
204 /**
205  *	ti_sdma_intr - interrupt handler for all 4 DMA IRQs
206  *	@arg: ignored
207  *
208  *	Called when any of the four DMA IRQs are triggered.
209  *
210  *	LOCKING:
211  *	DMA registers protected by internal mutex
212  *
213  *	RETURNS:
214  *	nothing
215  */
216 static void
217 ti_sdma_intr(void *arg)
218 {
219 	struct ti_sdma_softc *sc = ti_sdma_sc;
220 	uint32_t intr;
221 	uint32_t csr;
222 	unsigned int ch, j;
223 	struct ti_sdma_channel* channel;
224 
225 	TI_SDMA_LOCK(sc);
226 
227 	for (j = 0; j < NUM_DMA_IRQS; j++) {
228 
229 		/* Get the flag interrupts (enabled) */
230 		intr = ti_sdma_read_4(sc, DMA4_IRQSTATUS_L(j));
231 		intr &= ti_sdma_read_4(sc, DMA4_IRQENABLE_L(j));
232 		if (intr == 0x00000000)
233 			continue;
234 
235 		/* Loop through checking the status bits */
236 		for (ch = 0; ch < NUM_DMA_CHANNELS; ch++) {
237 			if (intr & (1 << ch)) {
238 				channel = &sc->sc_channel[ch];
239 
240 				/* Read the CSR regsiter and verify we don't have a spurious IRQ */
241 				csr = ti_sdma_read_4(sc, DMA4_CSR(ch));
242 				if (csr == 0) {
243 					device_printf(sc->sc_dev, "Spurious DMA IRQ for channel "
244 					              "%d\n", ch);
245 					continue;
246 				}
247 
248 				/* Sanity check this channel is active */
249 				if ((sc->sc_active_channels & (1 << ch)) == 0) {
250 					device_printf(sc->sc_dev, "IRQ %d for a non-activated "
251 					              "channel %d\n", j, ch);
252 					continue;
253 				}
254 
255 				/* Check the status error codes */
256 				if (csr & DMA4_CSR_DROP)
257 					device_printf(sc->sc_dev, "Synchronization event drop "
258 					              "occurred during the transfer on channel %u\n",
259 								  ch);
260 				if (csr & DMA4_CSR_SECURE_ERR)
261 					device_printf(sc->sc_dev, "Secure transaction error event "
262 					              "on channel %u\n", ch);
263 				if (csr & DMA4_CSR_MISALIGNED_ADRS_ERR)
264 					device_printf(sc->sc_dev, "Misaligned address error event "
265 					              "on channel %u\n", ch);
266 				if (csr & DMA4_CSR_TRANS_ERR) {
267 					device_printf(sc->sc_dev, "Transaction error event on "
268 					              "channel %u\n", ch);
269 					/*
270 					 * Apparently according to linux code, there is an errata
271 					 * that says the channel is not disabled upon this error.
272 					 * They explicitly disable the channel here .. since I
273 					 * haven't seen the errata, I'm going to ignore for now.
274 					 */
275 				}
276 
277 				/* Clear the status flags for the IRQ */
278 				ti_sdma_write_4(sc, DMA4_CSR(ch), DMA4_CSR_CLEAR_MASK);
279 				ti_sdma_write_4(sc, DMA4_IRQSTATUS_L(j), (1 << ch));
280 
281 				/* Call the callback for the given channel */
282 				if (channel->callback)
283 					channel->callback(ch, csr, channel->callback_data);
284 			}
285 		}
286 	}
287 
288 	TI_SDMA_UNLOCK(sc);
289 
290 	return;
291 }
292 
293 /**
294  *	ti_sdma_activate_channel - activates a DMA channel
295  *	@ch: upon return contains the channel allocated
296  *	@callback: a callback function to associate with the channel
297  *	@data: optional data supplied when the callback is called
298  *
299  *	Simply activates a channel be enabling and writing default values to the
300  *	channel's register set.  It doesn't start a transaction, just populates the
301  *	internal data structures and sets defaults.
302  *
303  *	Note this function doesn't enable interrupts, for that you need to call
304  *	ti_sdma_enable_channel_irq(). If not using IRQ to detect the end of the
305  *	transfer, you can use ti_sdma_status_poll() to detect a change in the
306  *	status.
307  *
308  *	A channel must be activated before any of the other DMA functions can be
309  *	called on it.
310  *
311  *	LOCKING:
312  *	DMA registers protected by internal mutex
313  *
314  *	RETURNS:
315  *	0 on success, otherwise an error code
316  */
317 int
318 ti_sdma_activate_channel(unsigned int *ch,
319                           void (*callback)(unsigned int ch, uint32_t status, void *data),
320                           void *data)
321 {
322 	struct ti_sdma_softc *sc = ti_sdma_sc;
323 	struct ti_sdma_channel *channel = NULL;
324 	uint32_t addr;
325 	unsigned int i;
326 
327 	/* Sanity check */
328 	if (sc == NULL)
329 		return (ENOMEM);
330 
331 	if (ch == NULL)
332 		return (EINVAL);
333 
334 	TI_SDMA_LOCK(sc);
335 
336 	/* Check to see if all channels are in use */
337 	if (sc->sc_active_channels == 0xffffffff) {
338 		TI_SDMA_UNLOCK(sc);
339 		return (ENOMEM);
340 	}
341 
342 	/* Find the first non-active channel */
343 	for (i = 0; i < NUM_DMA_CHANNELS; i++) {
344 		if (!(sc->sc_active_channels & (0x1 << i))) {
345 			sc->sc_active_channels |= (0x1 << i);
346 			*ch = i;
347 			break;
348 		}
349 	}
350 
351 	/* Get the channel struct and populate the fields */
352 	channel = &sc->sc_channel[*ch];
353 
354 	channel->callback = callback;
355 	channel->callback_data = data;
356 
357 	channel->need_reg_write = 1;
358 
359 	/* Set the default configuration for the DMA channel */
360 	channel->reg_csdp = DMA4_CSDP_DATA_TYPE(0x2)
361 		| DMA4_CSDP_SRC_BURST_MODE(0)
362 		| DMA4_CSDP_DST_BURST_MODE(0)
363 		| DMA4_CSDP_SRC_ENDIANISM(0)
364 		| DMA4_CSDP_DST_ENDIANISM(0)
365 		| DMA4_CSDP_WRITE_MODE(0)
366 		| DMA4_CSDP_SRC_PACKED(0)
367 		| DMA4_CSDP_DST_PACKED(0);
368 
369 	channel->reg_ccr = DMA4_CCR_DST_ADDRESS_MODE(1)
370 		| DMA4_CCR_SRC_ADDRESS_MODE(1)
371 		| DMA4_CCR_READ_PRIORITY(0)
372 		| DMA4_CCR_WRITE_PRIORITY(0)
373 		| DMA4_CCR_SYNC_TRIGGER(0)
374 		| DMA4_CCR_FRAME_SYNC(0)
375 		| DMA4_CCR_BLOCK_SYNC(0);
376 
377 	channel->reg_cicr = DMA4_CICR_TRANS_ERR_IE
378 		| DMA4_CICR_SECURE_ERR_IE
379 		| DMA4_CICR_SUPERVISOR_ERR_IE
380 		| DMA4_CICR_MISALIGNED_ADRS_ERR_IE;
381 
382 	/* Clear all the channel registers, this should abort any transaction */
383 	for (addr = DMA4_CCR(*ch); addr <= DMA4_COLOR(*ch); addr += 4)
384 		ti_sdma_write_4(sc, addr, 0x00000000);
385 
386 	TI_SDMA_UNLOCK(sc);
387 
388 	return 0;
389 }
390 
391 /**
392  *	ti_sdma_deactivate_channel - deactivates a channel
393  *	@ch: the channel to deactivate
394  *
395  *
396  *
397  *	LOCKING:
398  *	DMA registers protected by internal mutex
399  *
400  *	RETURNS:
401  *	EH_HANDLED or EH_NOT_HANDLED
402  */
403 int
404 ti_sdma_deactivate_channel(unsigned int ch)
405 {
406 	struct ti_sdma_softc *sc = ti_sdma_sc;
407 	unsigned int j;
408 	unsigned int addr;
409 
410 	/* Sanity check */
411 	if (sc == NULL)
412 		return (ENOMEM);
413 
414 	TI_SDMA_LOCK(sc);
415 
416 	/* First check if the channel is currently active */
417 	if ((sc->sc_active_channels & (1 << ch)) == 0) {
418 		TI_SDMA_UNLOCK(sc);
419 		return (EBUSY);
420 	}
421 
422 	/* Mark the channel as inactive */
423 	sc->sc_active_channels &= ~(1 << ch);
424 
425 	/* Disable all DMA interrupts for the channel. */
426 	ti_sdma_write_4(sc, DMA4_CICR(ch), 0);
427 
428 	/* Make sure the DMA transfer is stopped. */
429 	ti_sdma_write_4(sc, DMA4_CCR(ch), 0);
430 
431 	/* Clear the CSR register and IRQ status register */
432 	ti_sdma_write_4(sc, DMA4_CSR(ch), DMA4_CSR_CLEAR_MASK);
433 	for (j = 0; j < NUM_DMA_IRQS; j++) {
434 		ti_sdma_write_4(sc, DMA4_IRQSTATUS_L(j), (1 << ch));
435 	}
436 
437 	/* Clear all the channel registers, this should abort any transaction */
438 	for (addr = DMA4_CCR(ch); addr <= DMA4_COLOR(ch); addr += 4)
439 		ti_sdma_write_4(sc, addr, 0x00000000);
440 
441 	TI_SDMA_UNLOCK(sc);
442 
443 	return 0;
444 }
445 
446 /**
447  *	ti_sdma_disable_channel_irq - disables IRQ's on the given channel
448  *	@ch: the channel to disable IRQ's on
449  *
450  *	Disable interrupt generation for the given channel.
451  *
452  *	LOCKING:
453  *	DMA registers protected by internal mutex
454  *
455  *	RETURNS:
456  *	EH_HANDLED or EH_NOT_HANDLED
457  */
458 int
459 ti_sdma_disable_channel_irq(unsigned int ch)
460 {
461 	struct ti_sdma_softc *sc = ti_sdma_sc;
462 	uint32_t irq_enable;
463 	unsigned int j;
464 
465 	/* Sanity check */
466 	if (sc == NULL)
467 		return (ENOMEM);
468 
469 	TI_SDMA_LOCK(sc);
470 
471 	if ((sc->sc_active_channels & (1 << ch)) == 0) {
472 		TI_SDMA_UNLOCK(sc);
473 		return (EINVAL);
474 	}
475 
476 	/* Disable all the individual error conditions */
477 	sc->sc_channel[ch].reg_cicr = 0x0000;
478 	ti_sdma_write_4(sc, DMA4_CICR(ch), 0x0000);
479 
480 	/* Disable the channel interrupt enable */
481 	for (j = 0; j < NUM_DMA_IRQS; j++) {
482 		irq_enable = ti_sdma_read_4(sc, DMA4_IRQENABLE_L(j));
483 		irq_enable &= ~(1 << ch);
484 
485 		ti_sdma_write_4(sc, DMA4_IRQENABLE_L(j), irq_enable);
486 	}
487 
488 	/* Indicate the registers need to be rewritten on the next transaction */
489 	sc->sc_channel[ch].need_reg_write = 1;
490 
491 	TI_SDMA_UNLOCK(sc);
492 
493 	return (0);
494 }
495 
496 /**
497  *	ti_sdma_disable_channel_irq - enables IRQ's on the given channel
498  *	@ch: the channel to enable IRQ's on
499  *	@flags: bitmask of interrupt types to enable
500  *
501  *	Flags can be a bitmask of the following options:
502  *		DMA_IRQ_FLAG_DROP
503  *		DMA_IRQ_FLAG_HALF_FRAME_COMPL
504  *		DMA_IRQ_FLAG_FRAME_COMPL
505  *		DMA_IRQ_FLAG_START_LAST_FRAME
506  *		DMA_IRQ_FLAG_BLOCK_COMPL
507  *		DMA_IRQ_FLAG_ENDOF_PKT
508  *		DMA_IRQ_FLAG_DRAIN
509  *
510  *
511  *	LOCKING:
512  *	DMA registers protected by internal mutex
513  *
514  *	RETURNS:
515  *	EH_HANDLED or EH_NOT_HANDLED
516  */
517 int
518 ti_sdma_enable_channel_irq(unsigned int ch, uint32_t flags)
519 {
520 	struct ti_sdma_softc *sc = ti_sdma_sc;
521 	uint32_t irq_enable;
522 
523 	/* Sanity check */
524 	if (sc == NULL)
525 		return (ENOMEM);
526 
527 	TI_SDMA_LOCK(sc);
528 
529 	if ((sc->sc_active_channels & (1 << ch)) == 0) {
530 		TI_SDMA_UNLOCK(sc);
531 		return (EINVAL);
532 	}
533 
534 	/* Always enable the error interrupts if we have interrupts enabled */
535 	flags |= DMA4_CICR_TRANS_ERR_IE | DMA4_CICR_SECURE_ERR_IE |
536 	         DMA4_CICR_SUPERVISOR_ERR_IE | DMA4_CICR_MISALIGNED_ADRS_ERR_IE;
537 
538 	sc->sc_channel[ch].reg_cicr = flags;
539 
540 	/* Write the values to the register */
541 	ti_sdma_write_4(sc, DMA4_CICR(ch), flags);
542 
543 	/* Enable the channel interrupt enable */
544 	irq_enable = ti_sdma_read_4(sc, DMA4_IRQENABLE_L(0));
545 	irq_enable |= (1 << ch);
546 
547 	ti_sdma_write_4(sc, DMA4_IRQENABLE_L(0), irq_enable);
548 
549 	/* Indicate the registers need to be rewritten on the next transaction */
550 	sc->sc_channel[ch].need_reg_write = 1;
551 
552 	TI_SDMA_UNLOCK(sc);
553 
554 	return (0);
555 }
556 
557 /**
558  *	ti_sdma_get_channel_status - returns the status of a given channel
559  *	@ch: the channel number to get the status of
560  *	@status: upon return will contain the status bitmask, see below for possible
561  *	         values.
562  *
563  *	      DMA_STATUS_DROP
564  *	      DMA_STATUS_HALF
565  *	      DMA_STATUS_FRAME
566  *	      DMA_STATUS_LAST
567  *	      DMA_STATUS_BLOCK
568  *	      DMA_STATUS_SYNC
569  *	      DMA_STATUS_PKT
570  *	      DMA_STATUS_TRANS_ERR
571  *	      DMA_STATUS_SECURE_ERR
572  *	      DMA_STATUS_SUPERVISOR_ERR
573  *	      DMA_STATUS_MISALIGNED_ADRS_ERR
574  *	      DMA_STATUS_DRAIN_END
575  *
576  *
577  *	LOCKING:
578  *	DMA registers protected by internal mutex
579  *
580  *	RETURNS:
581  *	EH_HANDLED or EH_NOT_HANDLED
582  */
583 int
584 ti_sdma_get_channel_status(unsigned int ch, uint32_t *status)
585 {
586 	struct ti_sdma_softc *sc = ti_sdma_sc;
587 	uint32_t csr;
588 
589 	/* Sanity check */
590 	if (sc == NULL)
591 		return (ENOMEM);
592 
593 	TI_SDMA_LOCK(sc);
594 
595 	if ((sc->sc_active_channels & (1 << ch)) == 0) {
596 		TI_SDMA_UNLOCK(sc);
597 		return (EINVAL);
598 	}
599 
600 	TI_SDMA_UNLOCK(sc);
601 
602 	csr = ti_sdma_read_4(sc, DMA4_CSR(ch));
603 
604 	if (status != NULL)
605 		*status = csr;
606 
607 	return (0);
608 }
609 
610 /**
611  *	ti_sdma_start_xfer - starts a DMA transfer
612  *	@ch: the channel number to set the endianness of
613  *	@src_paddr: the source phsyical address
614  *	@dst_paddr: the destination phsyical address
615  *	@frmcnt: the number of frames per block
616  *	@elmcnt: the number of elements in a frame, an element is either an 8, 16
617  *           or 32-bit value as defined by ti_sdma_set_xfer_burst()
618  *
619  *
620  *	LOCKING:
621  *	DMA registers protected by internal mutex
622  *
623  *	RETURNS:
624  *	EH_HANDLED or EH_NOT_HANDLED
625  */
626 int
627 ti_sdma_start_xfer(unsigned int ch, unsigned int src_paddr,
628                     unsigned long dst_paddr,
629                     unsigned int frmcnt, unsigned int elmcnt)
630 {
631 	struct ti_sdma_softc *sc = ti_sdma_sc;
632 	struct ti_sdma_channel *channel;
633 	uint32_t ccr;
634 
635 	/* Sanity check */
636 	if (sc == NULL)
637 		return (ENOMEM);
638 
639 	TI_SDMA_LOCK(sc);
640 
641 	if ((sc->sc_active_channels & (1 << ch)) == 0) {
642 		TI_SDMA_UNLOCK(sc);
643 		return (EINVAL);
644 	}
645 
646 	channel = &sc->sc_channel[ch];
647 
648 	/* a) Write the CSDP register */
649 	ti_sdma_write_4(sc, DMA4_CSDP(ch),
650 	    channel->reg_csdp | DMA4_CSDP_WRITE_MODE(1));
651 
652 	/* b) Set the number of element per frame CEN[23:0] */
653 	ti_sdma_write_4(sc, DMA4_CEN(ch), elmcnt);
654 
655 	/* c) Set the number of frame per block CFN[15:0] */
656 	ti_sdma_write_4(sc, DMA4_CFN(ch), frmcnt);
657 
658 	/* d) Set the Source/dest start address index CSSA[31:0]/CDSA[31:0] */
659 	ti_sdma_write_4(sc, DMA4_CSSA(ch), src_paddr);
660 	ti_sdma_write_4(sc, DMA4_CDSA(ch), dst_paddr);
661 
662 	/* e) Write the CCR register */
663 	ti_sdma_write_4(sc, DMA4_CCR(ch), channel->reg_ccr);
664 
665 	/* f)  - Set the source element index increment CSEI[15:0] */
666 	ti_sdma_write_4(sc, DMA4_CSE(ch), 0x0001);
667 
668 	/*     - Set the source frame index increment CSFI[15:0] */
669 	ti_sdma_write_4(sc, DMA4_CSF(ch), 0x0001);
670 
671 	/*     - Set the destination element index increment CDEI[15:0]*/
672 	ti_sdma_write_4(sc, DMA4_CDE(ch), 0x0001);
673 
674 	/* - Set the destination frame index increment CDFI[31:0] */
675 	ti_sdma_write_4(sc, DMA4_CDF(ch), 0x0001);
676 
677 	/* Clear the status register */
678 	ti_sdma_write_4(sc, DMA4_CSR(ch), 0x1FFE);
679 
680 	/* Write the start-bit and away we go */
681 	ccr = ti_sdma_read_4(sc, DMA4_CCR(ch));
682 	ccr |= (1 << 7);
683 	ti_sdma_write_4(sc, DMA4_CCR(ch), ccr);
684 
685 	/* Clear the reg write flag */
686 	channel->need_reg_write = 0;
687 
688 	TI_SDMA_UNLOCK(sc);
689 
690 	return (0);
691 }
692 
693 /**
694  *	ti_sdma_start_xfer_packet - starts a packet DMA transfer
695  *	@ch: the channel number to use for the transfer
696  *	@src_paddr: the source physical address
697  *	@dst_paddr: the destination physical address
698  *	@frmcnt: the number of frames to transfer
699  *	@elmcnt: the number of elements in a frame, an element is either an 8, 16
700  *           or 32-bit value as defined by ti_sdma_set_xfer_burst()
701  *	@pktsize: the number of elements in each transfer packet
702  *
703  *	The @frmcnt and @elmcnt define the overall number of bytes to transfer,
704  *	typically @frmcnt is 1 and @elmcnt contains the total number of elements.
705  *	@pktsize is the size of each individual packet, there might be multiple
706  *	packets per transfer.  i.e. for the following with element size of 32-bits
707  *
708  *		frmcnt = 1, elmcnt = 512, pktsize = 128
709  *
710  *	       Total transfer bytes = 1 * 512 = 512 elements or 2048 bytes
711  *	       Packets transferred   = 128 / 512 = 4
712  *
713  *
714  *	LOCKING:
715  *	DMA registers protected by internal mutex
716  *
717  *	RETURNS:
718  *	EH_HANDLED or EH_NOT_HANDLED
719  */
720 int
721 ti_sdma_start_xfer_packet(unsigned int ch, unsigned int src_paddr,
722                            unsigned long dst_paddr, unsigned int frmcnt,
723                            unsigned int elmcnt, unsigned int pktsize)
724 {
725 	struct ti_sdma_softc *sc = ti_sdma_sc;
726 	struct ti_sdma_channel *channel;
727 	uint32_t ccr;
728 
729 	/* Sanity check */
730 	if (sc == NULL)
731 		return (ENOMEM);
732 
733 	TI_SDMA_LOCK(sc);
734 
735 	if ((sc->sc_active_channels & (1 << ch)) == 0) {
736 		TI_SDMA_UNLOCK(sc);
737 		return (EINVAL);
738 	}
739 
740 	channel = &sc->sc_channel[ch];
741 
742 	/* a) Write the CSDP register */
743 	if (channel->need_reg_write)
744 		ti_sdma_write_4(sc, DMA4_CSDP(ch),
745 		    channel->reg_csdp | DMA4_CSDP_WRITE_MODE(1));
746 
747 	/* b) Set the number of elements to transfer CEN[23:0] */
748 	ti_sdma_write_4(sc, DMA4_CEN(ch), elmcnt);
749 
750 	/* c) Set the number of frames to transfer CFN[15:0] */
751 	ti_sdma_write_4(sc, DMA4_CFN(ch), frmcnt);
752 
753 	/* d) Set the Source/dest start address index CSSA[31:0]/CDSA[31:0] */
754 	ti_sdma_write_4(sc, DMA4_CSSA(ch), src_paddr);
755 	ti_sdma_write_4(sc, DMA4_CDSA(ch), dst_paddr);
756 
757 	/* e) Write the CCR register */
758 	ti_sdma_write_4(sc, DMA4_CCR(ch),
759 	    channel->reg_ccr | DMA4_CCR_PACKET_TRANS);
760 
761 	/* f)  - Set the source element index increment CSEI[15:0] */
762 	ti_sdma_write_4(sc, DMA4_CSE(ch), 0x0001);
763 
764 	/*     - Set the packet size, this is dependent on the sync source */
765 	if (channel->reg_ccr & DMA4_CCR_SEL_SRC_DST_SYNC(1))
766 		ti_sdma_write_4(sc, DMA4_CSF(ch), pktsize);
767 	else
768 		ti_sdma_write_4(sc, DMA4_CDF(ch), pktsize);
769 
770 	/* - Set the destination frame index increment CDFI[31:0] */
771 	ti_sdma_write_4(sc, DMA4_CDE(ch), 0x0001);
772 
773 	/* Clear the status register */
774 	ti_sdma_write_4(sc, DMA4_CSR(ch), 0x1FFE);
775 
776 	/* Write the start-bit and away we go */
777 	ccr = ti_sdma_read_4(sc, DMA4_CCR(ch));
778 	ccr |= (1 << 7);
779 	ti_sdma_write_4(sc, DMA4_CCR(ch), ccr);
780 
781 	/* Clear the reg write flag */
782 	channel->need_reg_write = 0;
783 
784 	TI_SDMA_UNLOCK(sc);
785 
786 	return (0);
787 }
788 
789 /**
790  *	ti_sdma_stop_xfer - stops any currently active transfers
791  *	@ch: the channel number to set the endianness of
792  *
793  *	This function call is effectively a NOP if no transaction is in progress.
794  *
795  *	LOCKING:
796  *	DMA registers protected by internal mutex
797  *
798  *	RETURNS:
799  *	EH_HANDLED or EH_NOT_HANDLED
800  */
801 int
802 ti_sdma_stop_xfer(unsigned int ch)
803 {
804 	struct ti_sdma_softc *sc = ti_sdma_sc;
805 	unsigned int j;
806 
807 	/* Sanity check */
808 	if (sc == NULL)
809 		return (ENOMEM);
810 
811 	TI_SDMA_LOCK(sc);
812 
813 	if ((sc->sc_active_channels & (1 << ch)) == 0) {
814 		TI_SDMA_UNLOCK(sc);
815 		return (EINVAL);
816 	}
817 
818 	/* Disable all DMA interrupts for the channel. */
819 	ti_sdma_write_4(sc, DMA4_CICR(ch), 0);
820 
821 	/* Make sure the DMA transfer is stopped. */
822 	ti_sdma_write_4(sc, DMA4_CCR(ch), 0);
823 
824 	/* Clear the CSR register and IRQ status register */
825 	ti_sdma_write_4(sc, DMA4_CSR(ch), DMA4_CSR_CLEAR_MASK);
826 	for (j = 0; j < NUM_DMA_IRQS; j++) {
827 		ti_sdma_write_4(sc, DMA4_IRQSTATUS_L(j), (1 << ch));
828 	}
829 
830 	/* Configuration registers need to be re-written on the next xfer */
831 	sc->sc_channel[ch].need_reg_write = 1;
832 
833 	TI_SDMA_UNLOCK(sc);
834 
835 	return (0);
836 }
837 
838 /**
839  *	ti_sdma_set_xfer_endianess - sets the endianness of subsequent transfers
840  *	@ch: the channel number to set the endianness of
841  *	@src: the source endianness (either DMA_ENDIAN_LITTLE or DMA_ENDIAN_BIG)
842  *	@dst: the destination endianness (either DMA_ENDIAN_LITTLE or DMA_ENDIAN_BIG)
843  *
844  *
845  *	LOCKING:
846  *	DMA registers protected by internal mutex
847  *
848  *	RETURNS:
849  *	EH_HANDLED or EH_NOT_HANDLED
850  */
851 int
852 ti_sdma_set_xfer_endianess(unsigned int ch, unsigned int src, unsigned int dst)
853 {
854 	struct ti_sdma_softc *sc = ti_sdma_sc;
855 
856 	/* Sanity check */
857 	if (sc == NULL)
858 		return (ENOMEM);
859 
860 	TI_SDMA_LOCK(sc);
861 
862 	if ((sc->sc_active_channels & (1 << ch)) == 0) {
863 		TI_SDMA_UNLOCK(sc);
864 		return (EINVAL);
865 	}
866 
867 	sc->sc_channel[ch].reg_csdp &= ~DMA4_CSDP_SRC_ENDIANISM(1);
868 	sc->sc_channel[ch].reg_csdp |= DMA4_CSDP_SRC_ENDIANISM(src);
869 
870 	sc->sc_channel[ch].reg_csdp &= ~DMA4_CSDP_DST_ENDIANISM(1);
871 	sc->sc_channel[ch].reg_csdp |= DMA4_CSDP_DST_ENDIANISM(dst);
872 
873 	sc->sc_channel[ch].need_reg_write = 1;
874 
875 	TI_SDMA_UNLOCK(sc);
876 
877 	return 0;
878 }
879 
880 /**
881  *	ti_sdma_set_xfer_burst - sets the source and destination element size
882  *	@ch: the channel number to set the burst settings of
883  *	@src: the source endianness (either DMA_BURST_NONE, DMA_BURST_16, DMA_BURST_32
884  *	      or DMA_BURST_64)
885  *	@dst: the destination endianness (either DMA_BURST_NONE, DMA_BURST_16,
886  *	      DMA_BURST_32 or DMA_BURST_64)
887  *
888  *	This function sets the size of the elements for all subsequent transfers.
889  *
890  *	LOCKING:
891  *	DMA registers protected by internal mutex
892  *
893  *	RETURNS:
894  *	EH_HANDLED or EH_NOT_HANDLED
895  */
896 int
897 ti_sdma_set_xfer_burst(unsigned int ch, unsigned int src, unsigned int dst)
898 {
899 	struct ti_sdma_softc *sc = ti_sdma_sc;
900 
901 	/* Sanity check */
902 	if (sc == NULL)
903 		return (ENOMEM);
904 
905 	TI_SDMA_LOCK(sc);
906 
907 	if ((sc->sc_active_channels & (1 << ch)) == 0) {
908 		TI_SDMA_UNLOCK(sc);
909 		return (EINVAL);
910 	}
911 
912 	sc->sc_channel[ch].reg_csdp &= ~DMA4_CSDP_SRC_BURST_MODE(0x3);
913 	sc->sc_channel[ch].reg_csdp |= DMA4_CSDP_SRC_BURST_MODE(src);
914 
915 	sc->sc_channel[ch].reg_csdp &= ~DMA4_CSDP_DST_BURST_MODE(0x3);
916 	sc->sc_channel[ch].reg_csdp |= DMA4_CSDP_DST_BURST_MODE(dst);
917 
918 	sc->sc_channel[ch].need_reg_write = 1;
919 
920 	TI_SDMA_UNLOCK(sc);
921 
922 	return 0;
923 }
924 
925 /**
926  *	ti_sdma_set_xfer_data_type - driver attach function
927  *	@ch: the channel number to set the endianness of
928  *	@type: the xfer data type (either DMA_DATA_8BITS_SCALAR, DMA_DATA_16BITS_SCALAR
929  *	       or DMA_DATA_32BITS_SCALAR)
930  *
931  *
932  *	LOCKING:
933  *	DMA registers protected by internal mutex
934  *
935  *	RETURNS:
936  *	EH_HANDLED or EH_NOT_HANDLED
937  */
938 int
939 ti_sdma_set_xfer_data_type(unsigned int ch, unsigned int type)
940 {
941 	struct ti_sdma_softc *sc = ti_sdma_sc;
942 
943 	/* Sanity check */
944 	if (sc == NULL)
945 		return (ENOMEM);
946 
947 	TI_SDMA_LOCK(sc);
948 
949 	if ((sc->sc_active_channels & (1 << ch)) == 0) {
950 		TI_SDMA_UNLOCK(sc);
951 		return (EINVAL);
952 	}
953 
954 	sc->sc_channel[ch].reg_csdp &= ~DMA4_CSDP_DATA_TYPE(0x3);
955 	sc->sc_channel[ch].reg_csdp |= DMA4_CSDP_DATA_TYPE(type);
956 
957 	sc->sc_channel[ch].need_reg_write = 1;
958 
959 	TI_SDMA_UNLOCK(sc);
960 
961 	return 0;
962 }
963 
964 /**
965  *	ti_sdma_set_callback - driver attach function
966  *	@dev: dma device handle
967  *
968  *
969  *
970  *	LOCKING:
971  *	DMA registers protected by internal mutex
972  *
973  *	RETURNS:
974  *	EH_HANDLED or EH_NOT_HANDLED
975  */
976 int
977 ti_sdma_set_callback(unsigned int ch,
978                       void (*callback)(unsigned int ch, uint32_t status, void *data),
979                       void *data)
980 {
981 	struct ti_sdma_softc *sc = ti_sdma_sc;
982 
983 	/* Sanity check */
984 	if (sc == NULL)
985 		return (ENOMEM);
986 
987 	TI_SDMA_LOCK(sc);
988 
989 	if ((sc->sc_active_channels & (1 << ch)) == 0) {
990 		TI_SDMA_UNLOCK(sc);
991 		return (EINVAL);
992 	}
993 
994 	sc->sc_channel[ch].callback = callback;
995 	sc->sc_channel[ch].callback_data = data;
996 
997 	sc->sc_channel[ch].need_reg_write = 1;
998 
999 	TI_SDMA_UNLOCK(sc);
1000 
1001 	return 0;
1002 }
1003 
1004 /**
1005  *	ti_sdma_sync_params - sets channel sync settings
1006  *	@ch: the channel number to set the sync on
1007  *	@trigger: the number of the sync trigger, this depends on what other H/W
1008  *	          module is triggering/receiving the DMA transactions
1009  *	@mode: flags describing the sync mode to use, it may have one or more of
1010  *	          the following bits set; TI_SDMA_SYNC_FRAME,
1011  *	          TI_SDMA_SYNC_BLOCK, TI_SDMA_SYNC_TRIG_ON_SRC.
1012  *
1013  *
1014  *
1015  *	LOCKING:
1016  *	DMA registers protected by internal mutex
1017  *
1018  *	RETURNS:
1019  *	EH_HANDLED or EH_NOT_HANDLED
1020  */
1021 int
1022 ti_sdma_sync_params(unsigned int ch, unsigned int trigger, unsigned int mode)
1023 {
1024 	struct ti_sdma_softc *sc = ti_sdma_sc;
1025 	uint32_t ccr;
1026 
1027 	/* Sanity check */
1028 	if (sc == NULL)
1029 		return (ENOMEM);
1030 
1031 	TI_SDMA_LOCK(sc);
1032 
1033 	if ((sc->sc_active_channels & (1 << ch)) == 0) {
1034 		TI_SDMA_UNLOCK(sc);
1035 		return (EINVAL);
1036 	}
1037 
1038 	ccr = sc->sc_channel[ch].reg_ccr;
1039 
1040 	ccr &= ~DMA4_CCR_SYNC_TRIGGER(0x7F);
1041 	ccr |= DMA4_CCR_SYNC_TRIGGER(trigger + 1);
1042 
1043 	if (mode & TI_SDMA_SYNC_FRAME)
1044 		ccr |= DMA4_CCR_FRAME_SYNC(1);
1045 	else
1046 		ccr &= ~DMA4_CCR_FRAME_SYNC(1);
1047 
1048 	if (mode & TI_SDMA_SYNC_BLOCK)
1049 		ccr |= DMA4_CCR_BLOCK_SYNC(1);
1050 	else
1051 		ccr &= ~DMA4_CCR_BLOCK_SYNC(1);
1052 
1053 	if (mode & TI_SDMA_SYNC_TRIG_ON_SRC)
1054 		ccr |= DMA4_CCR_SEL_SRC_DST_SYNC(1);
1055 	else
1056 		ccr &= ~DMA4_CCR_SEL_SRC_DST_SYNC(1);
1057 
1058 	sc->sc_channel[ch].reg_ccr = ccr;
1059 
1060 	sc->sc_channel[ch].need_reg_write = 1;
1061 
1062 	TI_SDMA_UNLOCK(sc);
1063 
1064 	return 0;
1065 }
1066 
1067 /**
1068  *	ti_sdma_set_addr_mode - driver attach function
1069  *	@ch: the channel number to set the endianness of
1070  *	@rd_mode: the xfer source addressing mode (either DMA_ADDR_CONSTANT,
1071  *	          DMA_ADDR_POST_INCREMENT, DMA_ADDR_SINGLE_INDEX or
1072  *	          DMA_ADDR_DOUBLE_INDEX)
1073  *	@wr_mode: the xfer destination addressing mode (either DMA_ADDR_CONSTANT,
1074  *	          DMA_ADDR_POST_INCREMENT, DMA_ADDR_SINGLE_INDEX or
1075  *	          DMA_ADDR_DOUBLE_INDEX)
1076  *
1077  *
1078  *	LOCKING:
1079  *	DMA registers protected by internal mutex
1080  *
1081  *	RETURNS:
1082  *	EH_HANDLED or EH_NOT_HANDLED
1083  */
1084 int
1085 ti_sdma_set_addr_mode(unsigned int ch, unsigned int src_mode,
1086                        unsigned int dst_mode)
1087 {
1088 	struct ti_sdma_softc *sc = ti_sdma_sc;
1089 	uint32_t ccr;
1090 
1091 	/* Sanity check */
1092 	if (sc == NULL)
1093 		return (ENOMEM);
1094 
1095 	TI_SDMA_LOCK(sc);
1096 
1097 	if ((sc->sc_active_channels & (1 << ch)) == 0) {
1098 		TI_SDMA_UNLOCK(sc);
1099 		return (EINVAL);
1100 	}
1101 
1102 	ccr = sc->sc_channel[ch].reg_ccr;
1103 
1104 	ccr &= ~DMA4_CCR_SRC_ADDRESS_MODE(0x3);
1105 	ccr |= DMA4_CCR_SRC_ADDRESS_MODE(src_mode);
1106 
1107 	ccr &= ~DMA4_CCR_DST_ADDRESS_MODE(0x3);
1108 	ccr |= DMA4_CCR_DST_ADDRESS_MODE(dst_mode);
1109 
1110 	sc->sc_channel[ch].reg_ccr = ccr;
1111 
1112 	sc->sc_channel[ch].need_reg_write = 1;
1113 
1114 	TI_SDMA_UNLOCK(sc);
1115 
1116 	return 0;
1117 }
1118 
1119 /**
1120  *	ti_sdma_probe - driver probe function
1121  *	@dev: dma device handle
1122  *
1123  *
1124  *
1125  *	RETURNS:
1126  *	Always returns 0.
1127  */
1128 static int
1129 ti_sdma_probe(device_t dev)
1130 {
1131 
1132 	if (!ofw_bus_status_okay(dev))
1133 		return (ENXIO);
1134 
1135 	if (!ofw_bus_is_compatible(dev, "ti,omap4430-sdma"))
1136 		return (ENXIO);
1137 
1138 	device_set_desc(dev, "TI sDMA Controller");
1139 	return (0);
1140 }
1141 
1142 /**
1143  *	ti_sdma_attach - driver attach function
1144  *	@dev: dma device handle
1145  *
1146  *	Initialises memory mapping/pointers to the DMA register set and requests
1147  *	IRQs. This is effectively the setup function for the driver.
1148  *
1149  *	RETURNS:
1150  *	0 on success or a negative error code failure.
1151  */
1152 static int
1153 ti_sdma_attach(device_t dev)
1154 {
1155 	struct ti_sdma_softc *sc = device_get_softc(dev);
1156 	unsigned int timeout;
1157 	unsigned int i;
1158 	int      rid;
1159 	void    *ihl;
1160 	int      err;
1161 
1162 	/* Setup the basics */
1163 	sc->sc_dev = dev;
1164 
1165 	/* No channels active at the moment */
1166 	sc->sc_active_channels = 0x00000000;
1167 
1168 	/* Mutex to protect the shared data structures */
1169 	TI_SDMA_LOCK_INIT(sc);
1170 
1171 	/* Get the memory resource for the register mapping */
1172 	rid = 0;
1173 	sc->sc_mem_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, RF_ACTIVE);
1174 	if (sc->sc_mem_res == NULL)
1175 		panic("%s: Cannot map registers", device_get_name(dev));
1176 
1177 	/* Enable the interface and functional clocks */
1178 	ti_prcm_clk_enable(SDMA_CLK);
1179 
1180 	/* Read the sDMA revision register and sanity check it's known */
1181 	sc->sc_hw_rev = ti_sdma_read_4(sc, DMA4_REVISION);
1182 	device_printf(dev, "sDMA revision %08x\n", sc->sc_hw_rev);
1183 
1184 	if (!ti_sdma_is_omap4_rev(sc) && !ti_sdma_is_omap3_rev(sc)) {
1185 		device_printf(sc->sc_dev, "error - unknown sDMA H/W revision\n");
1186 		return (EINVAL);
1187 	}
1188 
1189 	/* Disable all interrupts */
1190 	for (i = 0; i < NUM_DMA_IRQS; i++) {
1191 		ti_sdma_write_4(sc, DMA4_IRQENABLE_L(i), 0x00000000);
1192 	}
1193 
1194 	/* Soft-reset is only supported on pre-OMAP44xx devices */
1195 	if (ti_sdma_is_omap3_rev(sc)) {
1196 
1197 		/* Soft-reset */
1198 		ti_sdma_write_4(sc, DMA4_OCP_SYSCONFIG, 0x0002);
1199 
1200 		/* Set the timeout to 100ms*/
1201 		timeout = (hz < 10) ? 1 : ((100 * hz) / 1000);
1202 
1203 		/* Wait for DMA reset to complete */
1204 		while ((ti_sdma_read_4(sc, DMA4_SYSSTATUS) & 0x1) == 0x0) {
1205 
1206 			/* Sleep for a tick */
1207 			pause("DMARESET", 1);
1208 
1209 			if (timeout-- == 0) {
1210 				device_printf(sc->sc_dev, "sDMA reset operation timed out\n");
1211 				return (EINVAL);
1212 			}
1213 		}
1214 	}
1215 
1216 	/*
1217 	 * Install interrupt handlers for the for possible interrupts. Any channel
1218 	 * can trip one of the four IRQs
1219 	 */
1220 	rid = 0;
1221 	sc->sc_irq_res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
1222 	    RF_ACTIVE | RF_SHAREABLE);
1223 	if (sc->sc_irq_res == NULL)
1224 		panic("Unable to setup the dma irq handler.\n");
1225 
1226 	err = bus_setup_intr(dev, sc->sc_irq_res, INTR_TYPE_MISC | INTR_MPSAFE,
1227 	    NULL, ti_sdma_intr, NULL, &ihl);
1228 	if (err)
1229 		panic("%s: Cannot register IRQ", device_get_name(dev));
1230 
1231 	/* Store the DMA structure globally ... this driver should never be unloaded */
1232 	ti_sdma_sc = sc;
1233 
1234 	return (0);
1235 }
1236 
1237 static device_method_t ti_sdma_methods[] = {
1238 	DEVMETHOD(device_probe, ti_sdma_probe),
1239 	DEVMETHOD(device_attach, ti_sdma_attach),
1240 	{0, 0},
1241 };
1242 
1243 static driver_t ti_sdma_driver = {
1244 	"ti_sdma",
1245 	ti_sdma_methods,
1246 	sizeof(struct ti_sdma_softc),
1247 };
1248 static devclass_t ti_sdma_devclass;
1249 
1250 DRIVER_MODULE(ti_sdma, simplebus, ti_sdma_driver, ti_sdma_devclass, 0, 0);
1251 MODULE_DEPEND(ti_sdma, ti_prcm, 1, 1, 1);
1252