xref: /linux/arch/sh/drivers/dma/dma-sh.c (revision 3932b9ca55b0be314a36d3e84faff3e823c081f5)
1 /*
2  * arch/sh/drivers/dma/dma-sh.c
3  *
4  * SuperH On-chip DMAC Support
5  *
6  * Copyright (C) 2000 Takashi YOSHII
7  * Copyright (C) 2003, 2004 Paul Mundt
8  * Copyright (C) 2005 Andriy Skulysh
9  *
10  * This file is subject to the terms and conditions of the GNU General Public
11  * License.  See the file "COPYING" in the main directory of this archive
12  * for more details.
13  */
14 #include <linux/init.h>
15 #include <linux/interrupt.h>
16 #include <linux/module.h>
17 #include <linux/io.h>
18 #include <mach-dreamcast/mach/dma.h>
19 #include <asm/dma.h>
20 #include <asm/dma-register.h>
21 #include <cpu/dma-register.h>
22 #include <cpu/dma.h>
23 
24 /*
25  * Define the default configuration for dual address memory-memory transfer.
26  * The 0x400 value represents auto-request, external->external.
27  */
28 #define RS_DUAL	(DM_INC | SM_INC | RS_AUTO | TS_INDEX2VAL(XMIT_SZ_32BIT))
29 
30 static unsigned long dma_find_base(unsigned int chan)
31 {
32 	unsigned long base = SH_DMAC_BASE0;
33 
34 #ifdef SH_DMAC_BASE1
35 	if (chan >= 6)
36 		base = SH_DMAC_BASE1;
37 #endif
38 
39 	return base;
40 }
41 
42 static unsigned long dma_base_addr(unsigned int chan)
43 {
44 	unsigned long base = dma_find_base(chan);
45 
46 	/* Normalize offset calculation */
47 	if (chan >= 9)
48 		chan -= 6;
49 	if (chan >= 4)
50 		base += 0x10;
51 
52 	return base + (chan * 0x10);
53 }
54 
55 #ifdef CONFIG_SH_DMA_IRQ_MULTI
56 static inline unsigned int get_dmte_irq(unsigned int chan)
57 {
58 	return chan >= 6 ? DMTE6_IRQ : DMTE0_IRQ;
59 }
60 #else
61 
62 static unsigned int dmte_irq_map[] = {
63 	DMTE0_IRQ, DMTE0_IRQ + 1, DMTE0_IRQ + 2, DMTE0_IRQ + 3,
64 
65 #ifdef DMTE4_IRQ
66 	DMTE4_IRQ, DMTE4_IRQ + 1,
67 #endif
68 
69 #ifdef DMTE6_IRQ
70 	DMTE6_IRQ, DMTE6_IRQ + 1,
71 #endif
72 
73 #ifdef DMTE8_IRQ
74 	DMTE8_IRQ, DMTE9_IRQ, DMTE10_IRQ, DMTE11_IRQ,
75 #endif
76 };
77 
78 static inline unsigned int get_dmte_irq(unsigned int chan)
79 {
80 	return dmte_irq_map[chan];
81 }
82 #endif
83 
84 /*
85  * We determine the correct shift size based off of the CHCR transmit size
86  * for the given channel. Since we know that it will take:
87  *
88  *	info->count >> ts_shift[transmit_size]
89  *
90  * iterations to complete the transfer.
91  */
92 static unsigned int ts_shift[] = TS_SHIFT;
93 
94 static inline unsigned int calc_xmit_shift(struct dma_channel *chan)
95 {
96 	u32 chcr = __raw_readl(dma_base_addr(chan->chan) + CHCR);
97 	int cnt = ((chcr & CHCR_TS_LOW_MASK) >> CHCR_TS_LOW_SHIFT) |
98 		((chcr & CHCR_TS_HIGH_MASK) >> CHCR_TS_HIGH_SHIFT);
99 
100 	return ts_shift[cnt];
101 }
102 
103 /*
104  * The transfer end interrupt must read the chcr register to end the
105  * hardware interrupt active condition.
106  * Besides that it needs to waken any waiting process, which should handle
107  * setting up the next transfer.
108  */
109 static irqreturn_t dma_tei(int irq, void *dev_id)
110 {
111 	struct dma_channel *chan = dev_id;
112 	u32 chcr;
113 
114 	chcr = __raw_readl(dma_base_addr(chan->chan) + CHCR);
115 
116 	if (!(chcr & CHCR_TE))
117 		return IRQ_NONE;
118 
119 	chcr &= ~(CHCR_IE | CHCR_DE);
120 	__raw_writel(chcr, (dma_base_addr(chan->chan) + CHCR));
121 
122 	wake_up(&chan->wait_queue);
123 
124 	return IRQ_HANDLED;
125 }
126 
127 static int sh_dmac_request_dma(struct dma_channel *chan)
128 {
129 	if (unlikely(!(chan->flags & DMA_TEI_CAPABLE)))
130 		return 0;
131 
132 	return request_irq(get_dmte_irq(chan->chan), dma_tei, IRQF_SHARED,
133 			   chan->dev_id, chan);
134 }
135 
136 static void sh_dmac_free_dma(struct dma_channel *chan)
137 {
138 	free_irq(get_dmte_irq(chan->chan), chan);
139 }
140 
141 static int
142 sh_dmac_configure_channel(struct dma_channel *chan, unsigned long chcr)
143 {
144 	if (!chcr)
145 		chcr = RS_DUAL | CHCR_IE;
146 
147 	if (chcr & CHCR_IE) {
148 		chcr &= ~CHCR_IE;
149 		chan->flags |= DMA_TEI_CAPABLE;
150 	} else {
151 		chan->flags &= ~DMA_TEI_CAPABLE;
152 	}
153 
154 	__raw_writel(chcr, (dma_base_addr(chan->chan) + CHCR));
155 
156 	chan->flags |= DMA_CONFIGURED;
157 	return 0;
158 }
159 
160 static void sh_dmac_enable_dma(struct dma_channel *chan)
161 {
162 	int irq;
163 	u32 chcr;
164 
165 	chcr = __raw_readl(dma_base_addr(chan->chan) + CHCR);
166 	chcr |= CHCR_DE;
167 
168 	if (chan->flags & DMA_TEI_CAPABLE)
169 		chcr |= CHCR_IE;
170 
171 	__raw_writel(chcr, (dma_base_addr(chan->chan) + CHCR));
172 
173 	if (chan->flags & DMA_TEI_CAPABLE) {
174 		irq = get_dmte_irq(chan->chan);
175 		enable_irq(irq);
176 	}
177 }
178 
179 static void sh_dmac_disable_dma(struct dma_channel *chan)
180 {
181 	int irq;
182 	u32 chcr;
183 
184 	if (chan->flags & DMA_TEI_CAPABLE) {
185 		irq = get_dmte_irq(chan->chan);
186 		disable_irq(irq);
187 	}
188 
189 	chcr = __raw_readl(dma_base_addr(chan->chan) + CHCR);
190 	chcr &= ~(CHCR_DE | CHCR_TE | CHCR_IE);
191 	__raw_writel(chcr, (dma_base_addr(chan->chan) + CHCR));
192 }
193 
194 static int sh_dmac_xfer_dma(struct dma_channel *chan)
195 {
196 	/*
197 	 * If we haven't pre-configured the channel with special flags, use
198 	 * the defaults.
199 	 */
200 	if (unlikely(!(chan->flags & DMA_CONFIGURED)))
201 		sh_dmac_configure_channel(chan, 0);
202 
203 	sh_dmac_disable_dma(chan);
204 
205 	/*
206 	 * Single-address mode usage note!
207 	 *
208 	 * It's important that we don't accidentally write any value to SAR/DAR
209 	 * (this includes 0) that hasn't been directly specified by the user if
210 	 * we're in single-address mode.
211 	 *
212 	 * In this case, only one address can be defined, anything else will
213 	 * result in a DMA address error interrupt (at least on the SH-4),
214 	 * which will subsequently halt the transfer.
215 	 *
216 	 * Channel 2 on the Dreamcast is a special case, as this is used for
217 	 * cascading to the PVR2 DMAC. In this case, we still need to write
218 	 * SAR and DAR, regardless of value, in order for cascading to work.
219 	 */
220 	if (chan->sar || (mach_is_dreamcast() &&
221 			  chan->chan == PVR2_CASCADE_CHAN))
222 		__raw_writel(chan->sar, (dma_base_addr(chan->chan) + SAR));
223 	if (chan->dar || (mach_is_dreamcast() &&
224 			  chan->chan == PVR2_CASCADE_CHAN))
225 		__raw_writel(chan->dar, (dma_base_addr(chan->chan) + DAR));
226 
227 	__raw_writel(chan->count >> calc_xmit_shift(chan),
228 		(dma_base_addr(chan->chan) + TCR));
229 
230 	sh_dmac_enable_dma(chan);
231 
232 	return 0;
233 }
234 
235 static int sh_dmac_get_dma_residue(struct dma_channel *chan)
236 {
237 	if (!(__raw_readl(dma_base_addr(chan->chan) + CHCR) & CHCR_DE))
238 		return 0;
239 
240 	return __raw_readl(dma_base_addr(chan->chan) + TCR)
241 		 << calc_xmit_shift(chan);
242 }
243 
244 /*
245  * DMAOR handling
246  */
247 #if defined(CONFIG_CPU_SUBTYPE_SH7723)	|| \
248     defined(CONFIG_CPU_SUBTYPE_SH7724)	|| \
249     defined(CONFIG_CPU_SUBTYPE_SH7780)	|| \
250     defined(CONFIG_CPU_SUBTYPE_SH7785)
251 #define NR_DMAOR	2
252 #else
253 #define NR_DMAOR	1
254 #endif
255 
256 /*
257  * DMAOR bases are broken out amongst channel groups. DMAOR0 manages
258  * channels 0 - 5, DMAOR1 6 - 11 (optional).
259  */
260 #define dmaor_read_reg(n)		__raw_readw(dma_find_base((n)*6))
261 #define dmaor_write_reg(n, data)	__raw_writew(data, dma_find_base(n)*6)
262 
263 static inline int dmaor_reset(int no)
264 {
265 	unsigned long dmaor = dmaor_read_reg(no);
266 
267 	/* Try to clear the error flags first, incase they are set */
268 	dmaor &= ~(DMAOR_NMIF | DMAOR_AE);
269 	dmaor_write_reg(no, dmaor);
270 
271 	dmaor |= DMAOR_INIT;
272 	dmaor_write_reg(no, dmaor);
273 
274 	/* See if we got an error again */
275 	if ((dmaor_read_reg(no) & (DMAOR_AE | DMAOR_NMIF))) {
276 		printk(KERN_ERR "dma-sh: Can't initialize DMAOR.\n");
277 		return -EINVAL;
278 	}
279 
280 	return 0;
281 }
282 
283 /*
284  * DMAE handling
285  */
286 #ifdef CONFIG_CPU_SH4
287 
288 #if defined(DMAE1_IRQ)
289 #define NR_DMAE		2
290 #else
291 #define NR_DMAE		1
292 #endif
293 
294 static const char *dmae_name[] = {
295 	"DMAC Address Error0",
296 	"DMAC Address Error1"
297 };
298 
299 #ifdef CONFIG_SH_DMA_IRQ_MULTI
300 static inline unsigned int get_dma_error_irq(int n)
301 {
302 	return get_dmte_irq(n * 6);
303 }
304 #else
305 
306 static unsigned int dmae_irq_map[] = {
307 	DMAE0_IRQ,
308 
309 #ifdef DMAE1_IRQ
310 	DMAE1_IRQ,
311 #endif
312 };
313 
314 static inline unsigned int get_dma_error_irq(int n)
315 {
316 	return dmae_irq_map[n];
317 }
318 #endif
319 
320 static irqreturn_t dma_err(int irq, void *dummy)
321 {
322 	int i;
323 
324 	for (i = 0; i < NR_DMAOR; i++)
325 		dmaor_reset(i);
326 
327 	disable_irq(irq);
328 
329 	return IRQ_HANDLED;
330 }
331 
332 static int dmae_irq_init(void)
333 {
334 	int n;
335 
336 	for (n = 0; n < NR_DMAE; n++) {
337 		int i = request_irq(get_dma_error_irq(n), dma_err,
338 				    IRQF_SHARED, dmae_name[n], (void *)dmae_name[n]);
339 		if (unlikely(i < 0)) {
340 			printk(KERN_ERR "%s request_irq fail\n", dmae_name[n]);
341 			return i;
342 		}
343 	}
344 
345 	return 0;
346 }
347 
348 static void dmae_irq_free(void)
349 {
350 	int n;
351 
352 	for (n = 0; n < NR_DMAE; n++)
353 		free_irq(get_dma_error_irq(n), NULL);
354 }
355 #else
356 static inline int dmae_irq_init(void)
357 {
358 	return 0;
359 }
360 
361 static void dmae_irq_free(void)
362 {
363 }
364 #endif
365 
366 static struct dma_ops sh_dmac_ops = {
367 	.request	= sh_dmac_request_dma,
368 	.free		= sh_dmac_free_dma,
369 	.get_residue	= sh_dmac_get_dma_residue,
370 	.xfer		= sh_dmac_xfer_dma,
371 	.configure	= sh_dmac_configure_channel,
372 };
373 
374 static struct dma_info sh_dmac_info = {
375 	.name		= "sh_dmac",
376 	.nr_channels	= CONFIG_NR_ONCHIP_DMA_CHANNELS,
377 	.ops		= &sh_dmac_ops,
378 	.flags		= DMAC_CHANNELS_TEI_CAPABLE,
379 };
380 
381 static int __init sh_dmac_init(void)
382 {
383 	struct dma_info *info = &sh_dmac_info;
384 	int i, rc;
385 
386 	/*
387 	 * Initialize DMAE, for parts that support it.
388 	 */
389 	rc = dmae_irq_init();
390 	if (unlikely(rc != 0))
391 		return rc;
392 
393 	/*
394 	 * Initialize DMAOR, and clean up any error flags that may have
395 	 * been set.
396 	 */
397 	for (i = 0; i < NR_DMAOR; i++) {
398 		rc = dmaor_reset(i);
399 		if (unlikely(rc != 0))
400 			return rc;
401 	}
402 
403 	return register_dmac(info);
404 }
405 
406 static void __exit sh_dmac_exit(void)
407 {
408 	dmae_irq_free();
409 	unregister_dmac(&sh_dmac_info);
410 }
411 
412 subsys_initcall(sh_dmac_init);
413 module_exit(sh_dmac_exit);
414 
415 MODULE_AUTHOR("Takashi YOSHII, Paul Mundt, Andriy Skulysh");
416 MODULE_DESCRIPTION("SuperH On-Chip DMAC Support");
417 MODULE_LICENSE("GPL");
418