xref: /freebsd/sys/arm/ti/ti_sdma.c (revision 61ba55bcf70f2340f9c943c9571113b3fd8eda69)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause
3  *
4  * Copyright (c) 2011
5  *	Ben Gray <ben.r.gray@gmail.com>.
6  * All rights reserved.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  *
17  * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20  * ARE DISCLAIMED.  IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
21  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27  * SUCH DAMAGE.
28  */
29 
30 #include <sys/cdefs.h>
31 #include <sys/param.h>
32 #include <sys/systm.h>
33 #include <sys/bus.h>
34 #include <sys/kernel.h>
35 #include <sys/lock.h>
36 #include <sys/interrupt.h>
37 #include <sys/module.h>
38 #include <sys/malloc.h>
39 #include <sys/mutex.h>
40 #include <sys/rman.h>
41 #include <sys/queue.h>
42 #include <sys/taskqueue.h>
43 #include <sys/timetc.h>
44 #include <machine/bus.h>
45 #include <machine/intr.h>
46 
47 #include <dev/ofw/openfirm.h>
48 #include <dev/ofw/ofw_bus.h>
49 #include <dev/ofw/ofw_bus_subr.h>
50 
51 #include <arm/ti/ti_cpuid.h>
52 #include <arm/ti/ti_sysc.h>
53 #include <arm/ti/ti_sdma.h>
54 #include <arm/ti/ti_sdmareg.h>
55 
56 /**
57  *	Kernel functions for using the DMA controller
58  *
59  *
60  *	DMA TRANSFERS:
61  *	A DMA transfer block consists of a number of frames (FN). Each frame
62  *	consists of a number of elements, and each element can have a size of 8, 16,
63  *	or 32 bits.
64  *
65  *	OMAP44xx and newer chips support linked list (aka scatter gather) transfers,
66  *	where a linked list of source/destination pairs can be placed in memory
67  *	for the H/W to process.  Earlier chips only allowed you to chain multiple
68  *	channels together.  However currently this linked list feature is not
69  *	supported by the driver.
70  *
71  */
72 
73 /**
74  *	Data structure per DMA channel.
75  *
76  *
77  */
78 struct ti_sdma_channel {
79 	/*
80 	 * The configuration registers for the given channel, these are modified
81 	 * by the set functions and only written to the actual registers when a
82 	 * transaction is started.
83 	 */
84 	uint32_t		reg_csdp;
85 	uint32_t		reg_ccr;
86 	uint32_t		reg_cicr;
87 
88 	/* Set when one of the configuration registers above change */
89 	uint32_t		need_reg_write;
90 
91 	/* Callback function used when an interrupt is tripped on the given channel */
92 	void (*callback)(unsigned int ch, uint32_t ch_status, void *data);
93 
94 	/* Callback data passed in the callback ... duh */
95 	void*			callback_data;
96 
97 };
98 
99 /**
100  *	DMA driver context, allocated and stored globally, this driver is not
101  *	intetned to ever be unloaded (see ti_sdma_sc).
102  *
103  */
104 struct ti_sdma_softc {
105 	device_t		sc_dev;
106 	struct resource*	sc_irq_res;
107 	struct resource*	sc_mem_res;
108 
109 	/*
110 	 * I guess in theory we should have a mutex per DMA channel for register
111 	 * modifications. But since we know we are never going to be run on a SMP
112 	 * system, we can use just the single lock for all channels.
113 	 */
114 	struct mtx		sc_mtx;
115 
116 	/* Stores the H/W revision read from the registers */
117 	uint32_t		sc_hw_rev;
118 
119 	/*
120 	 * Bits in the sc_active_channels data field indicate if the channel has
121 	 * been activated.
122 	 */
123 	uint32_t		sc_active_channels;
124 
125 	struct ti_sdma_channel sc_channel[NUM_DMA_CHANNELS];
126 
127 };
128 
129 static struct ti_sdma_softc *ti_sdma_sc = NULL;
130 
131 /**
132  *	Macros for driver mutex locking
133  */
134 #define TI_SDMA_LOCK(_sc)             mtx_lock_spin(&(_sc)->sc_mtx)
135 #define TI_SDMA_UNLOCK(_sc)           mtx_unlock_spin(&(_sc)->sc_mtx)
136 #define TI_SDMA_LOCK_INIT(_sc) \
137 	mtx_init(&_sc->sc_mtx, device_get_nameunit(_sc->sc_dev), \
138 	         "ti_sdma", MTX_SPIN)
139 #define TI_SDMA_LOCK_DESTROY(_sc)     mtx_destroy(&_sc->sc_mtx);
140 #define TI_SDMA_ASSERT_LOCKED(_sc)    mtx_assert(&_sc->sc_mtx, MA_OWNED);
141 #define TI_SDMA_ASSERT_UNLOCKED(_sc)  mtx_assert(&_sc->sc_mtx, MA_NOTOWNED);
142 
143 /**
144  *	Function prototypes
145  *
146  */
147 static void ti_sdma_intr(void *);
148 
149 /**
150  *	ti_sdma_read_4 - reads a 32-bit value from one of the DMA registers
151  *	@sc: DMA device context
152  *	@off: The offset of a register from the DMA register address range
153  *
154  *
155  *	RETURNS:
156  *	32-bit value read from the register.
157  */
158 static inline uint32_t
159 ti_sdma_read_4(struct ti_sdma_softc *sc, bus_size_t off)
160 {
161 	return bus_read_4(sc->sc_mem_res, off);
162 }
163 
164 /**
165  *	ti_sdma_write_4 - writes a 32-bit value to one of the DMA registers
166  *	@sc: DMA device context
167  *	@off: The offset of a register from the DMA register address range
168  *
169  *
170  *	RETURNS:
171  *	32-bit value read from the register.
172  */
173 static inline void
174 ti_sdma_write_4(struct ti_sdma_softc *sc, bus_size_t off, uint32_t val)
175 {
176 	bus_write_4(sc->sc_mem_res, off, val);
177 }
178 
179 /**
180  *	ti_sdma_is_omap3_rev - returns true if H/W is from OMAP3 series
181  *	@sc: DMA device context
182  *
183  */
184 static inline int
185 ti_sdma_is_omap3_rev(struct ti_sdma_softc *sc)
186 {
187 	return (sc->sc_hw_rev == DMA4_OMAP3_REV);
188 }
189 
190 /**
191  *	ti_sdma_is_omap4_rev - returns true if H/W is from OMAP4 series
192  *	@sc: DMA device context
193  *
194  */
195 static inline int
196 ti_sdma_is_omap4_rev(struct ti_sdma_softc *sc)
197 {
198 	return (sc->sc_hw_rev == DMA4_OMAP4_REV);
199 }
200 
201 /**
202  *	ti_sdma_intr - interrupt handler for all 4 DMA IRQs
203  *	@arg: ignored
204  *
205  *	Called when any of the four DMA IRQs are triggered.
206  *
207  *	LOCKING:
208  *	DMA registers protected by internal mutex
209  *
210  *	RETURNS:
211  *	nothing
212  */
213 static void
214 ti_sdma_intr(void *arg)
215 {
216 	struct ti_sdma_softc *sc = ti_sdma_sc;
217 	uint32_t intr;
218 	uint32_t csr;
219 	unsigned int ch, j;
220 	struct ti_sdma_channel* channel;
221 
222 	TI_SDMA_LOCK(sc);
223 
224 	for (j = 0; j < NUM_DMA_IRQS; j++) {
225 		/* Get the flag interrupts (enabled) */
226 		intr = ti_sdma_read_4(sc, DMA4_IRQSTATUS_L(j));
227 		intr &= ti_sdma_read_4(sc, DMA4_IRQENABLE_L(j));
228 		if (intr == 0x00000000)
229 			continue;
230 
231 		/* Loop through checking the status bits */
232 		for (ch = 0; ch < NUM_DMA_CHANNELS; ch++) {
233 			if (intr & (1 << ch)) {
234 				channel = &sc->sc_channel[ch];
235 
236 				/* Read the CSR regsiter and verify we don't have a spurious IRQ */
237 				csr = ti_sdma_read_4(sc, DMA4_CSR(ch));
238 				if (csr == 0) {
239 					device_printf(sc->sc_dev, "Spurious DMA IRQ for channel "
240 					              "%d\n", ch);
241 					continue;
242 				}
243 
244 				/* Sanity check this channel is active */
245 				if ((sc->sc_active_channels & (1 << ch)) == 0) {
246 					device_printf(sc->sc_dev, "IRQ %d for a non-activated "
247 					              "channel %d\n", j, ch);
248 					continue;
249 				}
250 
251 				/* Check the status error codes */
252 				if (csr & DMA4_CSR_DROP)
253 					device_printf(sc->sc_dev, "Synchronization event drop "
254 					              "occurred during the transfer on channel %u\n",
255 								  ch);
256 				if (csr & DMA4_CSR_SECURE_ERR)
257 					device_printf(sc->sc_dev, "Secure transaction error event "
258 					              "on channel %u\n", ch);
259 				if (csr & DMA4_CSR_MISALIGNED_ADRS_ERR)
260 					device_printf(sc->sc_dev, "Misaligned address error event "
261 					              "on channel %u\n", ch);
262 				if (csr & DMA4_CSR_TRANS_ERR) {
263 					device_printf(sc->sc_dev, "Transaction error event on "
264 					              "channel %u\n", ch);
265 					/*
266 					 * Apparently according to linux code, there is an errata
267 					 * that says the channel is not disabled upon this error.
268 					 * They explicitly disable the channel here .. since I
269 					 * haven't seen the errata, I'm going to ignore for now.
270 					 */
271 				}
272 
273 				/* Clear the status flags for the IRQ */
274 				ti_sdma_write_4(sc, DMA4_CSR(ch), DMA4_CSR_CLEAR_MASK);
275 				ti_sdma_write_4(sc, DMA4_IRQSTATUS_L(j), (1 << ch));
276 
277 				/* Call the callback for the given channel */
278 				if (channel->callback)
279 					channel->callback(ch, csr, channel->callback_data);
280 			}
281 		}
282 	}
283 
284 	TI_SDMA_UNLOCK(sc);
285 
286 	return;
287 }
288 
289 /**
290  *	ti_sdma_activate_channel - activates a DMA channel
291  *	@ch: upon return contains the channel allocated
292  *	@callback: a callback function to associate with the channel
293  *	@data: optional data supplied when the callback is called
294  *
295  *	Simply activates a channel be enabling and writing default values to the
296  *	channel's register set.  It doesn't start a transaction, just populates the
297  *	internal data structures and sets defaults.
298  *
299  *	Note this function doesn't enable interrupts, for that you need to call
300  *	ti_sdma_enable_channel_irq(). If not using IRQ to detect the end of the
301  *	transfer, you can use ti_sdma_status_poll() to detect a change in the
302  *	status.
303  *
304  *	A channel must be activated before any of the other DMA functions can be
305  *	called on it.
306  *
307  *	LOCKING:
308  *	DMA registers protected by internal mutex
309  *
310  *	RETURNS:
311  *	0 on success, otherwise an error code
312  */
313 int
314 ti_sdma_activate_channel(unsigned int *ch,
315                           void (*callback)(unsigned int ch, uint32_t status, void *data),
316                           void *data)
317 {
318 	struct ti_sdma_softc *sc = ti_sdma_sc;
319 	struct ti_sdma_channel *channel = NULL;
320 	uint32_t addr;
321 	unsigned int i;
322 
323 	/* Sanity check */
324 	if (sc == NULL)
325 		return (ENOMEM);
326 
327 	if (ch == NULL)
328 		return (EINVAL);
329 
330 	TI_SDMA_LOCK(sc);
331 
332 	/* Check to see if all channels are in use */
333 	if (sc->sc_active_channels == 0xffffffff) {
334 		TI_SDMA_UNLOCK(sc);
335 		return (ENOMEM);
336 	}
337 
338 	/* Find the first non-active channel */
339 	for (i = 0; i < NUM_DMA_CHANNELS; i++) {
340 		if (!(sc->sc_active_channels & (0x1 << i))) {
341 			sc->sc_active_channels |= (0x1 << i);
342 			*ch = i;
343 			break;
344 		}
345 	}
346 
347 	/* Get the channel struct and populate the fields */
348 	channel = &sc->sc_channel[*ch];
349 
350 	channel->callback = callback;
351 	channel->callback_data = data;
352 
353 	channel->need_reg_write = 1;
354 
355 	/* Set the default configuration for the DMA channel */
356 	channel->reg_csdp = DMA4_CSDP_DATA_TYPE(0x2)
357 		| DMA4_CSDP_SRC_BURST_MODE(0)
358 		| DMA4_CSDP_DST_BURST_MODE(0)
359 		| DMA4_CSDP_SRC_ENDIANISM(0)
360 		| DMA4_CSDP_DST_ENDIANISM(0)
361 		| DMA4_CSDP_WRITE_MODE(0)
362 		| DMA4_CSDP_SRC_PACKED(0)
363 		| DMA4_CSDP_DST_PACKED(0);
364 
365 	channel->reg_ccr = DMA4_CCR_DST_ADDRESS_MODE(1)
366 		| DMA4_CCR_SRC_ADDRESS_MODE(1)
367 		| DMA4_CCR_READ_PRIORITY(0)
368 		| DMA4_CCR_WRITE_PRIORITY(0)
369 		| DMA4_CCR_SYNC_TRIGGER(0)
370 		| DMA4_CCR_FRAME_SYNC(0)
371 		| DMA4_CCR_BLOCK_SYNC(0);
372 
373 	channel->reg_cicr = DMA4_CICR_TRANS_ERR_IE
374 		| DMA4_CICR_SECURE_ERR_IE
375 		| DMA4_CICR_SUPERVISOR_ERR_IE
376 		| DMA4_CICR_MISALIGNED_ADRS_ERR_IE;
377 
378 	/* Clear all the channel registers, this should abort any transaction */
379 	for (addr = DMA4_CCR(*ch); addr <= DMA4_COLOR(*ch); addr += 4)
380 		ti_sdma_write_4(sc, addr, 0x00000000);
381 
382 	TI_SDMA_UNLOCK(sc);
383 
384 	return 0;
385 }
386 
387 /**
388  *	ti_sdma_deactivate_channel - deactivates a channel
389  *	@ch: the channel to deactivate
390  *
391  *
392  *
393  *	LOCKING:
394  *	DMA registers protected by internal mutex
395  *
396  *	RETURNS:
397  *	EH_HANDLED or EH_NOT_HANDLED
398  */
399 int
400 ti_sdma_deactivate_channel(unsigned int ch)
401 {
402 	struct ti_sdma_softc *sc = ti_sdma_sc;
403 	unsigned int j;
404 	unsigned int addr;
405 
406 	/* Sanity check */
407 	if (sc == NULL)
408 		return (ENOMEM);
409 
410 	TI_SDMA_LOCK(sc);
411 
412 	/* First check if the channel is currently active */
413 	if ((sc->sc_active_channels & (1 << ch)) == 0) {
414 		TI_SDMA_UNLOCK(sc);
415 		return (EBUSY);
416 	}
417 
418 	/* Mark the channel as inactive */
419 	sc->sc_active_channels &= ~(1 << ch);
420 
421 	/* Disable all DMA interrupts for the channel. */
422 	ti_sdma_write_4(sc, DMA4_CICR(ch), 0);
423 
424 	/* Make sure the DMA transfer is stopped. */
425 	ti_sdma_write_4(sc, DMA4_CCR(ch), 0);
426 
427 	/* Clear the CSR register and IRQ status register */
428 	ti_sdma_write_4(sc, DMA4_CSR(ch), DMA4_CSR_CLEAR_MASK);
429 	for (j = 0; j < NUM_DMA_IRQS; j++) {
430 		ti_sdma_write_4(sc, DMA4_IRQSTATUS_L(j), (1 << ch));
431 	}
432 
433 	/* Clear all the channel registers, this should abort any transaction */
434 	for (addr = DMA4_CCR(ch); addr <= DMA4_COLOR(ch); addr += 4)
435 		ti_sdma_write_4(sc, addr, 0x00000000);
436 
437 	TI_SDMA_UNLOCK(sc);
438 
439 	return 0;
440 }
441 
442 /**
443  *	ti_sdma_disable_channel_irq - disables IRQ's on the given channel
444  *	@ch: the channel to disable IRQ's on
445  *
446  *	Disable interrupt generation for the given channel.
447  *
448  *	LOCKING:
449  *	DMA registers protected by internal mutex
450  *
451  *	RETURNS:
452  *	EH_HANDLED or EH_NOT_HANDLED
453  */
454 int
455 ti_sdma_disable_channel_irq(unsigned int ch)
456 {
457 	struct ti_sdma_softc *sc = ti_sdma_sc;
458 	uint32_t irq_enable;
459 	unsigned int j;
460 
461 	/* Sanity check */
462 	if (sc == NULL)
463 		return (ENOMEM);
464 
465 	TI_SDMA_LOCK(sc);
466 
467 	if ((sc->sc_active_channels & (1 << ch)) == 0) {
468 		TI_SDMA_UNLOCK(sc);
469 		return (EINVAL);
470 	}
471 
472 	/* Disable all the individual error conditions */
473 	sc->sc_channel[ch].reg_cicr = 0x0000;
474 	ti_sdma_write_4(sc, DMA4_CICR(ch), 0x0000);
475 
476 	/* Disable the channel interrupt enable */
477 	for (j = 0; j < NUM_DMA_IRQS; j++) {
478 		irq_enable = ti_sdma_read_4(sc, DMA4_IRQENABLE_L(j));
479 		irq_enable &= ~(1 << ch);
480 
481 		ti_sdma_write_4(sc, DMA4_IRQENABLE_L(j), irq_enable);
482 	}
483 
484 	/* Indicate the registers need to be rewritten on the next transaction */
485 	sc->sc_channel[ch].need_reg_write = 1;
486 
487 	TI_SDMA_UNLOCK(sc);
488 
489 	return (0);
490 }
491 
492 /**
493  *	ti_sdma_disable_channel_irq - enables IRQ's on the given channel
494  *	@ch: the channel to enable IRQ's on
495  *	@flags: bitmask of interrupt types to enable
496  *
497  *	Flags can be a bitmask of the following options:
498  *		DMA_IRQ_FLAG_DROP
499  *		DMA_IRQ_FLAG_HALF_FRAME_COMPL
500  *		DMA_IRQ_FLAG_FRAME_COMPL
501  *		DMA_IRQ_FLAG_START_LAST_FRAME
502  *		DMA_IRQ_FLAG_BLOCK_COMPL
503  *		DMA_IRQ_FLAG_ENDOF_PKT
504  *		DMA_IRQ_FLAG_DRAIN
505  *
506  *
507  *	LOCKING:
508  *	DMA registers protected by internal mutex
509  *
510  *	RETURNS:
511  *	EH_HANDLED or EH_NOT_HANDLED
512  */
513 int
514 ti_sdma_enable_channel_irq(unsigned int ch, uint32_t flags)
515 {
516 	struct ti_sdma_softc *sc = ti_sdma_sc;
517 	uint32_t irq_enable;
518 
519 	/* Sanity check */
520 	if (sc == NULL)
521 		return (ENOMEM);
522 
523 	TI_SDMA_LOCK(sc);
524 
525 	if ((sc->sc_active_channels & (1 << ch)) == 0) {
526 		TI_SDMA_UNLOCK(sc);
527 		return (EINVAL);
528 	}
529 
530 	/* Always enable the error interrupts if we have interrupts enabled */
531 	flags |= DMA4_CICR_TRANS_ERR_IE | DMA4_CICR_SECURE_ERR_IE |
532 	         DMA4_CICR_SUPERVISOR_ERR_IE | DMA4_CICR_MISALIGNED_ADRS_ERR_IE;
533 
534 	sc->sc_channel[ch].reg_cicr = flags;
535 
536 	/* Write the values to the register */
537 	ti_sdma_write_4(sc, DMA4_CICR(ch), flags);
538 
539 	/* Enable the channel interrupt enable */
540 	irq_enable = ti_sdma_read_4(sc, DMA4_IRQENABLE_L(0));
541 	irq_enable |= (1 << ch);
542 
543 	ti_sdma_write_4(sc, DMA4_IRQENABLE_L(0), irq_enable);
544 
545 	/* Indicate the registers need to be rewritten on the next transaction */
546 	sc->sc_channel[ch].need_reg_write = 1;
547 
548 	TI_SDMA_UNLOCK(sc);
549 
550 	return (0);
551 }
552 
553 /**
554  *	ti_sdma_get_channel_status - returns the status of a given channel
555  *	@ch: the channel number to get the status of
556  *	@status: upon return will contain the status bitmask, see below for possible
557  *	         values.
558  *
559  *	      DMA_STATUS_DROP
560  *	      DMA_STATUS_HALF
561  *	      DMA_STATUS_FRAME
562  *	      DMA_STATUS_LAST
563  *	      DMA_STATUS_BLOCK
564  *	      DMA_STATUS_SYNC
565  *	      DMA_STATUS_PKT
566  *	      DMA_STATUS_TRANS_ERR
567  *	      DMA_STATUS_SECURE_ERR
568  *	      DMA_STATUS_SUPERVISOR_ERR
569  *	      DMA_STATUS_MISALIGNED_ADRS_ERR
570  *	      DMA_STATUS_DRAIN_END
571  *
572  *
573  *	LOCKING:
574  *	DMA registers protected by internal mutex
575  *
576  *	RETURNS:
577  *	EH_HANDLED or EH_NOT_HANDLED
578  */
579 int
580 ti_sdma_get_channel_status(unsigned int ch, uint32_t *status)
581 {
582 	struct ti_sdma_softc *sc = ti_sdma_sc;
583 	uint32_t csr;
584 
585 	/* Sanity check */
586 	if (sc == NULL)
587 		return (ENOMEM);
588 
589 	TI_SDMA_LOCK(sc);
590 
591 	if ((sc->sc_active_channels & (1 << ch)) == 0) {
592 		TI_SDMA_UNLOCK(sc);
593 		return (EINVAL);
594 	}
595 
596 	TI_SDMA_UNLOCK(sc);
597 
598 	csr = ti_sdma_read_4(sc, DMA4_CSR(ch));
599 
600 	if (status != NULL)
601 		*status = csr;
602 
603 	return (0);
604 }
605 
606 /**
607  *	ti_sdma_start_xfer - starts a DMA transfer
608  *	@ch: the channel number to set the endianness of
609  *	@src_paddr: the source phsyical address
610  *	@dst_paddr: the destination phsyical address
611  *	@frmcnt: the number of frames per block
612  *	@elmcnt: the number of elements in a frame, an element is either an 8, 16
613  *           or 32-bit value as defined by ti_sdma_set_xfer_burst()
614  *
615  *
616  *	LOCKING:
617  *	DMA registers protected by internal mutex
618  *
619  *	RETURNS:
620  *	EH_HANDLED or EH_NOT_HANDLED
621  */
622 int
623 ti_sdma_start_xfer(unsigned int ch, unsigned int src_paddr,
624                     unsigned long dst_paddr,
625                     unsigned int frmcnt, unsigned int elmcnt)
626 {
627 	struct ti_sdma_softc *sc = ti_sdma_sc;
628 	struct ti_sdma_channel *channel;
629 	uint32_t ccr;
630 
631 	/* Sanity check */
632 	if (sc == NULL)
633 		return (ENOMEM);
634 
635 	TI_SDMA_LOCK(sc);
636 
637 	if ((sc->sc_active_channels & (1 << ch)) == 0) {
638 		TI_SDMA_UNLOCK(sc);
639 		return (EINVAL);
640 	}
641 
642 	channel = &sc->sc_channel[ch];
643 
644 	/* a) Write the CSDP register */
645 	ti_sdma_write_4(sc, DMA4_CSDP(ch),
646 	    channel->reg_csdp | DMA4_CSDP_WRITE_MODE(1));
647 
648 	/* b) Set the number of element per frame CEN[23:0] */
649 	ti_sdma_write_4(sc, DMA4_CEN(ch), elmcnt);
650 
651 	/* c) Set the number of frame per block CFN[15:0] */
652 	ti_sdma_write_4(sc, DMA4_CFN(ch), frmcnt);
653 
654 	/* d) Set the Source/dest start address index CSSA[31:0]/CDSA[31:0] */
655 	ti_sdma_write_4(sc, DMA4_CSSA(ch), src_paddr);
656 	ti_sdma_write_4(sc, DMA4_CDSA(ch), dst_paddr);
657 
658 	/* e) Write the CCR register */
659 	ti_sdma_write_4(sc, DMA4_CCR(ch), channel->reg_ccr);
660 
661 	/* f)  - Set the source element index increment CSEI[15:0] */
662 	ti_sdma_write_4(sc, DMA4_CSE(ch), 0x0001);
663 
664 	/*     - Set the source frame index increment CSFI[15:0] */
665 	ti_sdma_write_4(sc, DMA4_CSF(ch), 0x0001);
666 
667 	/*     - Set the destination element index increment CDEI[15:0]*/
668 	ti_sdma_write_4(sc, DMA4_CDE(ch), 0x0001);
669 
670 	/* - Set the destination frame index increment CDFI[31:0] */
671 	ti_sdma_write_4(sc, DMA4_CDF(ch), 0x0001);
672 
673 	/* Clear the status register */
674 	ti_sdma_write_4(sc, DMA4_CSR(ch), 0x1FFE);
675 
676 	/* Write the start-bit and away we go */
677 	ccr = ti_sdma_read_4(sc, DMA4_CCR(ch));
678 	ccr |= (1 << 7);
679 	ti_sdma_write_4(sc, DMA4_CCR(ch), ccr);
680 
681 	/* Clear the reg write flag */
682 	channel->need_reg_write = 0;
683 
684 	TI_SDMA_UNLOCK(sc);
685 
686 	return (0);
687 }
688 
689 /**
690  *	ti_sdma_start_xfer_packet - starts a packet DMA transfer
691  *	@ch: the channel number to use for the transfer
692  *	@src_paddr: the source physical address
693  *	@dst_paddr: the destination physical address
694  *	@frmcnt: the number of frames to transfer
695  *	@elmcnt: the number of elements in a frame, an element is either an 8, 16
696  *           or 32-bit value as defined by ti_sdma_set_xfer_burst()
697  *	@pktsize: the number of elements in each transfer packet
698  *
699  *	The @frmcnt and @elmcnt define the overall number of bytes to transfer,
700  *	typically @frmcnt is 1 and @elmcnt contains the total number of elements.
701  *	@pktsize is the size of each individual packet, there might be multiple
702  *	packets per transfer.  i.e. for the following with element size of 32-bits
703  *
704  *		frmcnt = 1, elmcnt = 512, pktsize = 128
705  *
706  *	       Total transfer bytes = 1 * 512 = 512 elements or 2048 bytes
707  *	       Packets transferred   = 128 / 512 = 4
708  *
709  *
710  *	LOCKING:
711  *	DMA registers protected by internal mutex
712  *
713  *	RETURNS:
714  *	EH_HANDLED or EH_NOT_HANDLED
715  */
716 int
717 ti_sdma_start_xfer_packet(unsigned int ch, unsigned int src_paddr,
718                            unsigned long dst_paddr, unsigned int frmcnt,
719                            unsigned int elmcnt, unsigned int pktsize)
720 {
721 	struct ti_sdma_softc *sc = ti_sdma_sc;
722 	struct ti_sdma_channel *channel;
723 	uint32_t ccr;
724 
725 	/* Sanity check */
726 	if (sc == NULL)
727 		return (ENOMEM);
728 
729 	TI_SDMA_LOCK(sc);
730 
731 	if ((sc->sc_active_channels & (1 << ch)) == 0) {
732 		TI_SDMA_UNLOCK(sc);
733 		return (EINVAL);
734 	}
735 
736 	channel = &sc->sc_channel[ch];
737 
738 	/* a) Write the CSDP register */
739 	if (channel->need_reg_write)
740 		ti_sdma_write_4(sc, DMA4_CSDP(ch),
741 		    channel->reg_csdp | DMA4_CSDP_WRITE_MODE(1));
742 
743 	/* b) Set the number of elements to transfer CEN[23:0] */
744 	ti_sdma_write_4(sc, DMA4_CEN(ch), elmcnt);
745 
746 	/* c) Set the number of frames to transfer CFN[15:0] */
747 	ti_sdma_write_4(sc, DMA4_CFN(ch), frmcnt);
748 
749 	/* d) Set the Source/dest start address index CSSA[31:0]/CDSA[31:0] */
750 	ti_sdma_write_4(sc, DMA4_CSSA(ch), src_paddr);
751 	ti_sdma_write_4(sc, DMA4_CDSA(ch), dst_paddr);
752 
753 	/* e) Write the CCR register */
754 	ti_sdma_write_4(sc, DMA4_CCR(ch),
755 	    channel->reg_ccr | DMA4_CCR_PACKET_TRANS);
756 
757 	/* f)  - Set the source element index increment CSEI[15:0] */
758 	ti_sdma_write_4(sc, DMA4_CSE(ch), 0x0001);
759 
760 	/*     - Set the packet size, this is dependent on the sync source */
761 	if (channel->reg_ccr & DMA4_CCR_SEL_SRC_DST_SYNC(1))
762 		ti_sdma_write_4(sc, DMA4_CSF(ch), pktsize);
763 	else
764 		ti_sdma_write_4(sc, DMA4_CDF(ch), pktsize);
765 
766 	/* - Set the destination frame index increment CDFI[31:0] */
767 	ti_sdma_write_4(sc, DMA4_CDE(ch), 0x0001);
768 
769 	/* Clear the status register */
770 	ti_sdma_write_4(sc, DMA4_CSR(ch), 0x1FFE);
771 
772 	/* Write the start-bit and away we go */
773 	ccr = ti_sdma_read_4(sc, DMA4_CCR(ch));
774 	ccr |= (1 << 7);
775 	ti_sdma_write_4(sc, DMA4_CCR(ch), ccr);
776 
777 	/* Clear the reg write flag */
778 	channel->need_reg_write = 0;
779 
780 	TI_SDMA_UNLOCK(sc);
781 
782 	return (0);
783 }
784 
785 /**
786  *	ti_sdma_stop_xfer - stops any currently active transfers
787  *	@ch: the channel number to set the endianness of
788  *
789  *	This function call is effectively a NOP if no transaction is in progress.
790  *
791  *	LOCKING:
792  *	DMA registers protected by internal mutex
793  *
794  *	RETURNS:
795  *	EH_HANDLED or EH_NOT_HANDLED
796  */
797 int
798 ti_sdma_stop_xfer(unsigned int ch)
799 {
800 	struct ti_sdma_softc *sc = ti_sdma_sc;
801 	unsigned int j;
802 
803 	/* Sanity check */
804 	if (sc == NULL)
805 		return (ENOMEM);
806 
807 	TI_SDMA_LOCK(sc);
808 
809 	if ((sc->sc_active_channels & (1 << ch)) == 0) {
810 		TI_SDMA_UNLOCK(sc);
811 		return (EINVAL);
812 	}
813 
814 	/* Disable all DMA interrupts for the channel. */
815 	ti_sdma_write_4(sc, DMA4_CICR(ch), 0);
816 
817 	/* Make sure the DMA transfer is stopped. */
818 	ti_sdma_write_4(sc, DMA4_CCR(ch), 0);
819 
820 	/* Clear the CSR register and IRQ status register */
821 	ti_sdma_write_4(sc, DMA4_CSR(ch), DMA4_CSR_CLEAR_MASK);
822 	for (j = 0; j < NUM_DMA_IRQS; j++) {
823 		ti_sdma_write_4(sc, DMA4_IRQSTATUS_L(j), (1 << ch));
824 	}
825 
826 	/* Configuration registers need to be re-written on the next xfer */
827 	sc->sc_channel[ch].need_reg_write = 1;
828 
829 	TI_SDMA_UNLOCK(sc);
830 
831 	return (0);
832 }
833 
834 /**
835  *	ti_sdma_set_xfer_endianess - sets the endianness of subsequent transfers
836  *	@ch: the channel number to set the endianness of
837  *	@src: the source endianness (either DMA_ENDIAN_LITTLE or DMA_ENDIAN_BIG)
838  *	@dst: the destination endianness (either DMA_ENDIAN_LITTLE or DMA_ENDIAN_BIG)
839  *
840  *
841  *	LOCKING:
842  *	DMA registers protected by internal mutex
843  *
844  *	RETURNS:
845  *	EH_HANDLED or EH_NOT_HANDLED
846  */
847 int
848 ti_sdma_set_xfer_endianess(unsigned int ch, unsigned int src, unsigned int dst)
849 {
850 	struct ti_sdma_softc *sc = ti_sdma_sc;
851 
852 	/* Sanity check */
853 	if (sc == NULL)
854 		return (ENOMEM);
855 
856 	TI_SDMA_LOCK(sc);
857 
858 	if ((sc->sc_active_channels & (1 << ch)) == 0) {
859 		TI_SDMA_UNLOCK(sc);
860 		return (EINVAL);
861 	}
862 
863 	sc->sc_channel[ch].reg_csdp &= ~DMA4_CSDP_SRC_ENDIANISM(1);
864 	sc->sc_channel[ch].reg_csdp |= DMA4_CSDP_SRC_ENDIANISM(src);
865 
866 	sc->sc_channel[ch].reg_csdp &= ~DMA4_CSDP_DST_ENDIANISM(1);
867 	sc->sc_channel[ch].reg_csdp |= DMA4_CSDP_DST_ENDIANISM(dst);
868 
869 	sc->sc_channel[ch].need_reg_write = 1;
870 
871 	TI_SDMA_UNLOCK(sc);
872 
873 	return 0;
874 }
875 
876 /**
877  *	ti_sdma_set_xfer_burst - sets the source and destination element size
878  *	@ch: the channel number to set the burst settings of
879  *	@src: the source endianness (either DMA_BURST_NONE, DMA_BURST_16, DMA_BURST_32
880  *	      or DMA_BURST_64)
881  *	@dst: the destination endianness (either DMA_BURST_NONE, DMA_BURST_16,
882  *	      DMA_BURST_32 or DMA_BURST_64)
883  *
884  *	This function sets the size of the elements for all subsequent transfers.
885  *
886  *	LOCKING:
887  *	DMA registers protected by internal mutex
888  *
889  *	RETURNS:
890  *	EH_HANDLED or EH_NOT_HANDLED
891  */
892 int
893 ti_sdma_set_xfer_burst(unsigned int ch, unsigned int src, unsigned int dst)
894 {
895 	struct ti_sdma_softc *sc = ti_sdma_sc;
896 
897 	/* Sanity check */
898 	if (sc == NULL)
899 		return (ENOMEM);
900 
901 	TI_SDMA_LOCK(sc);
902 
903 	if ((sc->sc_active_channels & (1 << ch)) == 0) {
904 		TI_SDMA_UNLOCK(sc);
905 		return (EINVAL);
906 	}
907 
908 	sc->sc_channel[ch].reg_csdp &= ~DMA4_CSDP_SRC_BURST_MODE(0x3);
909 	sc->sc_channel[ch].reg_csdp |= DMA4_CSDP_SRC_BURST_MODE(src);
910 
911 	sc->sc_channel[ch].reg_csdp &= ~DMA4_CSDP_DST_BURST_MODE(0x3);
912 	sc->sc_channel[ch].reg_csdp |= DMA4_CSDP_DST_BURST_MODE(dst);
913 
914 	sc->sc_channel[ch].need_reg_write = 1;
915 
916 	TI_SDMA_UNLOCK(sc);
917 
918 	return 0;
919 }
920 
921 /**
922  *	ti_sdma_set_xfer_data_type - driver attach function
923  *	@ch: the channel number to set the endianness of
924  *	@type: the xfer data type (either DMA_DATA_8BITS_SCALAR, DMA_DATA_16BITS_SCALAR
925  *	       or DMA_DATA_32BITS_SCALAR)
926  *
927  *
928  *	LOCKING:
929  *	DMA registers protected by internal mutex
930  *
931  *	RETURNS:
932  *	EH_HANDLED or EH_NOT_HANDLED
933  */
934 int
935 ti_sdma_set_xfer_data_type(unsigned int ch, unsigned int type)
936 {
937 	struct ti_sdma_softc *sc = ti_sdma_sc;
938 
939 	/* Sanity check */
940 	if (sc == NULL)
941 		return (ENOMEM);
942 
943 	TI_SDMA_LOCK(sc);
944 
945 	if ((sc->sc_active_channels & (1 << ch)) == 0) {
946 		TI_SDMA_UNLOCK(sc);
947 		return (EINVAL);
948 	}
949 
950 	sc->sc_channel[ch].reg_csdp &= ~DMA4_CSDP_DATA_TYPE(0x3);
951 	sc->sc_channel[ch].reg_csdp |= DMA4_CSDP_DATA_TYPE(type);
952 
953 	sc->sc_channel[ch].need_reg_write = 1;
954 
955 	TI_SDMA_UNLOCK(sc);
956 
957 	return 0;
958 }
959 
960 /**
961  *	ti_sdma_set_callback - driver attach function
962  *	@dev: dma device handle
963  *
964  *
965  *
966  *	LOCKING:
967  *	DMA registers protected by internal mutex
968  *
969  *	RETURNS:
970  *	EH_HANDLED or EH_NOT_HANDLED
971  */
972 int
973 ti_sdma_set_callback(unsigned int ch,
974                       void (*callback)(unsigned int ch, uint32_t status, void *data),
975                       void *data)
976 {
977 	struct ti_sdma_softc *sc = ti_sdma_sc;
978 
979 	/* Sanity check */
980 	if (sc == NULL)
981 		return (ENOMEM);
982 
983 	TI_SDMA_LOCK(sc);
984 
985 	if ((sc->sc_active_channels & (1 << ch)) == 0) {
986 		TI_SDMA_UNLOCK(sc);
987 		return (EINVAL);
988 	}
989 
990 	sc->sc_channel[ch].callback = callback;
991 	sc->sc_channel[ch].callback_data = data;
992 
993 	sc->sc_channel[ch].need_reg_write = 1;
994 
995 	TI_SDMA_UNLOCK(sc);
996 
997 	return 0;
998 }
999 
1000 /**
1001  *	ti_sdma_sync_params - sets channel sync settings
1002  *	@ch: the channel number to set the sync on
1003  *	@trigger: the number of the sync trigger, this depends on what other H/W
1004  *	          module is triggering/receiving the DMA transactions
1005  *	@mode: flags describing the sync mode to use, it may have one or more of
1006  *	          the following bits set; TI_SDMA_SYNC_FRAME,
1007  *	          TI_SDMA_SYNC_BLOCK, TI_SDMA_SYNC_TRIG_ON_SRC.
1008  *
1009  *
1010  *
1011  *	LOCKING:
1012  *	DMA registers protected by internal mutex
1013  *
1014  *	RETURNS:
1015  *	EH_HANDLED or EH_NOT_HANDLED
1016  */
1017 int
1018 ti_sdma_sync_params(unsigned int ch, unsigned int trigger, unsigned int mode)
1019 {
1020 	struct ti_sdma_softc *sc = ti_sdma_sc;
1021 	uint32_t ccr;
1022 
1023 	/* Sanity check */
1024 	if (sc == NULL)
1025 		return (ENOMEM);
1026 
1027 	TI_SDMA_LOCK(sc);
1028 
1029 	if ((sc->sc_active_channels & (1 << ch)) == 0) {
1030 		TI_SDMA_UNLOCK(sc);
1031 		return (EINVAL);
1032 	}
1033 
1034 	ccr = sc->sc_channel[ch].reg_ccr;
1035 
1036 	ccr &= ~DMA4_CCR_SYNC_TRIGGER(0x7F);
1037 	ccr |= DMA4_CCR_SYNC_TRIGGER(trigger + 1);
1038 
1039 	if (mode & TI_SDMA_SYNC_FRAME)
1040 		ccr |= DMA4_CCR_FRAME_SYNC(1);
1041 	else
1042 		ccr &= ~DMA4_CCR_FRAME_SYNC(1);
1043 
1044 	if (mode & TI_SDMA_SYNC_BLOCK)
1045 		ccr |= DMA4_CCR_BLOCK_SYNC(1);
1046 	else
1047 		ccr &= ~DMA4_CCR_BLOCK_SYNC(1);
1048 
1049 	if (mode & TI_SDMA_SYNC_TRIG_ON_SRC)
1050 		ccr |= DMA4_CCR_SEL_SRC_DST_SYNC(1);
1051 	else
1052 		ccr &= ~DMA4_CCR_SEL_SRC_DST_SYNC(1);
1053 
1054 	sc->sc_channel[ch].reg_ccr = ccr;
1055 
1056 	sc->sc_channel[ch].need_reg_write = 1;
1057 
1058 	TI_SDMA_UNLOCK(sc);
1059 
1060 	return 0;
1061 }
1062 
1063 /**
1064  *	ti_sdma_set_addr_mode - driver attach function
1065  *	@ch: the channel number to set the endianness of
1066  *	@rd_mode: the xfer source addressing mode (either DMA_ADDR_CONSTANT,
1067  *	          DMA_ADDR_POST_INCREMENT, DMA_ADDR_SINGLE_INDEX or
1068  *	          DMA_ADDR_DOUBLE_INDEX)
1069  *	@wr_mode: the xfer destination addressing mode (either DMA_ADDR_CONSTANT,
1070  *	          DMA_ADDR_POST_INCREMENT, DMA_ADDR_SINGLE_INDEX or
1071  *	          DMA_ADDR_DOUBLE_INDEX)
1072  *
1073  *
1074  *	LOCKING:
1075  *	DMA registers protected by internal mutex
1076  *
1077  *	RETURNS:
1078  *	EH_HANDLED or EH_NOT_HANDLED
1079  */
1080 int
1081 ti_sdma_set_addr_mode(unsigned int ch, unsigned int src_mode,
1082                        unsigned int dst_mode)
1083 {
1084 	struct ti_sdma_softc *sc = ti_sdma_sc;
1085 	uint32_t ccr;
1086 
1087 	/* Sanity check */
1088 	if (sc == NULL)
1089 		return (ENOMEM);
1090 
1091 	TI_SDMA_LOCK(sc);
1092 
1093 	if ((sc->sc_active_channels & (1 << ch)) == 0) {
1094 		TI_SDMA_UNLOCK(sc);
1095 		return (EINVAL);
1096 	}
1097 
1098 	ccr = sc->sc_channel[ch].reg_ccr;
1099 
1100 	ccr &= ~DMA4_CCR_SRC_ADDRESS_MODE(0x3);
1101 	ccr |= DMA4_CCR_SRC_ADDRESS_MODE(src_mode);
1102 
1103 	ccr &= ~DMA4_CCR_DST_ADDRESS_MODE(0x3);
1104 	ccr |= DMA4_CCR_DST_ADDRESS_MODE(dst_mode);
1105 
1106 	sc->sc_channel[ch].reg_ccr = ccr;
1107 
1108 	sc->sc_channel[ch].need_reg_write = 1;
1109 
1110 	TI_SDMA_UNLOCK(sc);
1111 
1112 	return 0;
1113 }
1114 
1115 /**
1116  *	ti_sdma_probe - driver probe function
1117  *	@dev: dma device handle
1118  *
1119  *
1120  *
1121  *	RETURNS:
1122  *	Always returns 0.
1123  */
1124 static int
1125 ti_sdma_probe(device_t dev)
1126 {
1127 
1128 	if (!ofw_bus_status_okay(dev))
1129 		return (ENXIO);
1130 
1131 	if (!ofw_bus_is_compatible(dev, "ti,omap4430-sdma"))
1132 		return (ENXIO);
1133 
1134 	device_set_desc(dev, "TI sDMA Controller");
1135 	return (0);
1136 }
1137 
1138 /**
1139  *	ti_sdma_attach - driver attach function
1140  *	@dev: dma device handle
1141  *
1142  *	Initialises memory mapping/pointers to the DMA register set and requests
1143  *	IRQs. This is effectively the setup function for the driver.
1144  *
1145  *	RETURNS:
1146  *	0 on success or a negative error code failure.
1147  */
1148 static int
1149 ti_sdma_attach(device_t dev)
1150 {
1151 	struct ti_sdma_softc *sc = device_get_softc(dev);
1152 	unsigned int timeout;
1153 	unsigned int i;
1154 	int      rid;
1155 	void    *ihl;
1156 	int      err;
1157 
1158 	/* Setup the basics */
1159 	sc->sc_dev = dev;
1160 
1161 	/* No channels active at the moment */
1162 	sc->sc_active_channels = 0x00000000;
1163 
1164 	/* Mutex to protect the shared data structures */
1165 	TI_SDMA_LOCK_INIT(sc);
1166 
1167 	/* Get the memory resource for the register mapping */
1168 	rid = 0;
1169 	sc->sc_mem_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, RF_ACTIVE);
1170 	if (sc->sc_mem_res == NULL)
1171 		panic("%s: Cannot map registers", device_get_name(dev));
1172 
1173 	/* Enable the interface and functional clocks */
1174 	ti_sysc_clock_enable(device_get_parent(dev));
1175 
1176 	/* Read the sDMA revision register and sanity check it's known */
1177 	sc->sc_hw_rev = ti_sdma_read_4(sc,
1178 	    ti_sysc_get_rev_address_offset_host(device_get_parent(dev)));
1179 	device_printf(dev, "sDMA revision %08x\n", sc->sc_hw_rev);
1180 
1181 	if (!ti_sdma_is_omap4_rev(sc) && !ti_sdma_is_omap3_rev(sc)) {
1182 		device_printf(sc->sc_dev, "error - unknown sDMA H/W revision\n");
1183 		return (EINVAL);
1184 	}
1185 
1186 	/* Disable all interrupts */
1187 	for (i = 0; i < NUM_DMA_IRQS; i++) {
1188 		ti_sdma_write_4(sc, DMA4_IRQENABLE_L(i), 0x00000000);
1189 	}
1190 
1191 	/* Soft-reset is only supported on pre-OMAP44xx devices */
1192 	if (ti_sdma_is_omap3_rev(sc)) {
1193 		/* Soft-reset */
1194 		ti_sdma_write_4(sc, DMA4_OCP_SYSCONFIG, 0x0002);
1195 
1196 		/* Set the timeout to 100ms*/
1197 		timeout = (hz < 10) ? 1 : ((100 * hz) / 1000);
1198 
1199 		/* Wait for DMA reset to complete */
1200 		while ((ti_sdma_read_4(sc, DMA4_SYSSTATUS) & 0x1) == 0x0) {
1201 			/* Sleep for a tick */
1202 			pause("DMARESET", 1);
1203 
1204 			if (timeout-- == 0) {
1205 				device_printf(sc->sc_dev, "sDMA reset operation timed out\n");
1206 				return (EINVAL);
1207 			}
1208 		}
1209 	}
1210 
1211 	/*
1212 	 * Install interrupt handlers for the for possible interrupts. Any channel
1213 	 * can trip one of the four IRQs
1214 	 */
1215 	rid = 0;
1216 	sc->sc_irq_res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
1217 	    RF_ACTIVE | RF_SHAREABLE);
1218 	if (sc->sc_irq_res == NULL)
1219 		panic("Unable to setup the dma irq handler.\n");
1220 
1221 	err = bus_setup_intr(dev, sc->sc_irq_res, INTR_TYPE_MISC | INTR_MPSAFE,
1222 	    NULL, ti_sdma_intr, NULL, &ihl);
1223 	if (err)
1224 		panic("%s: Cannot register IRQ", device_get_name(dev));
1225 
1226 	/* Store the DMA structure globally ... this driver should never be unloaded */
1227 	ti_sdma_sc = sc;
1228 
1229 	return (0);
1230 }
1231 
1232 static device_method_t ti_sdma_methods[] = {
1233 	DEVMETHOD(device_probe, ti_sdma_probe),
1234 	DEVMETHOD(device_attach, ti_sdma_attach),
1235 	{0, 0},
1236 };
1237 
1238 static driver_t ti_sdma_driver = {
1239 	"ti_sdma",
1240 	ti_sdma_methods,
1241 	sizeof(struct ti_sdma_softc),
1242 };
1243 
1244 DRIVER_MODULE(ti_sdma, simplebus, ti_sdma_driver, 0, 0);
1245 MODULE_DEPEND(ti_sdma, ti_sysc, 1, 1, 1);
1246