xref: /linux/arch/sh/drivers/dma/dma-sh.c (revision f3d9478b2ce468c3115b02ecae7e975990697f15)
1 /*
2  * arch/sh/drivers/dma/dma-sh.c
3  *
4  * SuperH On-chip DMAC Support
5  *
6  * Copyright (C) 2000 Takashi YOSHII
7  * Copyright (C) 2003, 2004 Paul Mundt
8  * Copyright (C) 2005 Andriy Skulysh
9  *
10  * This file is subject to the terms and conditions of the GNU General Public
11  * License.  See the file "COPYING" in the main directory of this archive
12  * for more details.
13  */
14 
15 #include <linux/config.h>
16 #include <linux/init.h>
17 #include <linux/irq.h>
18 #include <linux/interrupt.h>
19 #include <linux/module.h>
20 #include <asm/dreamcast/dma.h>
21 #include <asm/signal.h>
22 #include <asm/irq.h>
23 #include <asm/dma.h>
24 #include <asm/io.h>
25 #include "dma-sh.h"
26 
27 static inline unsigned int get_dmte_irq(unsigned int chan)
28 {
29 	unsigned int irq = 0;
30 
31 	/*
32 	 * Normally we could just do DMTE0_IRQ + chan outright, though in the
33 	 * case of the 7751R, the DMTE IRQs for channels > 4 start right above
34 	 * the SCIF
35 	 */
36 	if (chan < 4) {
37 		irq = DMTE0_IRQ + chan;
38 	} else {
39 #ifdef DMTE4_IRQ
40 		irq = DMTE4_IRQ + chan - 4;
41 #endif
42 	}
43 
44 	return irq;
45 }
46 
47 /*
48  * We determine the correct shift size based off of the CHCR transmit size
49  * for the given channel. Since we know that it will take:
50  *
51  *	info->count >> ts_shift[transmit_size]
52  *
53  * iterations to complete the transfer.
54  */
55 static inline unsigned int calc_xmit_shift(struct dma_channel *chan)
56 {
57 	u32 chcr = ctrl_inl(CHCR[chan->chan]);
58 
59 	return ts_shift[(chcr & CHCR_TS_MASK)>>CHCR_TS_SHIFT];
60 }
61 
62 /*
63  * The transfer end interrupt must read the chcr register to end the
64  * hardware interrupt active condition.
65  * Besides that it needs to waken any waiting process, which should handle
66  * setting up the next transfer.
67  */
68 static irqreturn_t dma_tei(int irq, void *dev_id, struct pt_regs *regs)
69 {
70 	struct dma_channel *chan = (struct dma_channel *)dev_id;
71 	u32 chcr;
72 
73 	chcr = ctrl_inl(CHCR[chan->chan]);
74 
75 	if (!(chcr & CHCR_TE))
76 		return IRQ_NONE;
77 
78 	chcr &= ~(CHCR_IE | CHCR_DE);
79 	ctrl_outl(chcr, CHCR[chan->chan]);
80 
81 	wake_up(&chan->wait_queue);
82 
83 	return IRQ_HANDLED;
84 }
85 
86 static int sh_dmac_request_dma(struct dma_channel *chan)
87 {
88 	char name[32];
89 
90 	snprintf(name, sizeof(name), "DMAC Transfer End (Channel %d)",
91 		 chan->chan);
92 
93 	return request_irq(get_dmte_irq(chan->chan), dma_tei,
94 			   SA_INTERRUPT, name, chan);
95 }
96 
97 static void sh_dmac_free_dma(struct dma_channel *chan)
98 {
99 	free_irq(get_dmte_irq(chan->chan), chan);
100 }
101 
102 static void
103 sh_dmac_configure_channel(struct dma_channel *chan, unsigned long chcr)
104 {
105 	if (!chcr)
106 		chcr = RS_DUAL | CHCR_IE;
107 
108 	if (chcr & CHCR_IE) {
109 		chcr &= ~CHCR_IE;
110 		chan->flags |= DMA_TEI_CAPABLE;
111 	} else {
112 		chan->flags &= ~DMA_TEI_CAPABLE;
113 	}
114 
115 	ctrl_outl(chcr, CHCR[chan->chan]);
116 
117 	chan->flags |= DMA_CONFIGURED;
118 }
119 
120 static void sh_dmac_enable_dma(struct dma_channel *chan)
121 {
122 	int irq;
123 	u32 chcr;
124 
125 	chcr = ctrl_inl(CHCR[chan->chan]);
126 	chcr |= CHCR_DE;
127 
128 	if (chan->flags & DMA_TEI_CAPABLE)
129 		chcr |= CHCR_IE;
130 
131 	ctrl_outl(chcr, CHCR[chan->chan]);
132 
133 	if (chan->flags & DMA_TEI_CAPABLE) {
134 		irq = get_dmte_irq(chan->chan);
135 		enable_irq(irq);
136 	}
137 }
138 
139 static void sh_dmac_disable_dma(struct dma_channel *chan)
140 {
141 	int irq;
142 	u32 chcr;
143 
144 	if (chan->flags & DMA_TEI_CAPABLE) {
145 		irq = get_dmte_irq(chan->chan);
146 		disable_irq(irq);
147 	}
148 
149 	chcr = ctrl_inl(CHCR[chan->chan]);
150 	chcr &= ~(CHCR_DE | CHCR_TE | CHCR_IE);
151 	ctrl_outl(chcr, CHCR[chan->chan]);
152 }
153 
154 static int sh_dmac_xfer_dma(struct dma_channel *chan)
155 {
156 	/*
157 	 * If we haven't pre-configured the channel with special flags, use
158 	 * the defaults.
159 	 */
160 	if (unlikely(!(chan->flags & DMA_CONFIGURED)))
161 		sh_dmac_configure_channel(chan, 0);
162 
163 	sh_dmac_disable_dma(chan);
164 
165 	/*
166 	 * Single-address mode usage note!
167 	 *
168 	 * It's important that we don't accidentally write any value to SAR/DAR
169 	 * (this includes 0) that hasn't been directly specified by the user if
170 	 * we're in single-address mode.
171 	 *
172 	 * In this case, only one address can be defined, anything else will
173 	 * result in a DMA address error interrupt (at least on the SH-4),
174 	 * which will subsequently halt the transfer.
175 	 *
176 	 * Channel 2 on the Dreamcast is a special case, as this is used for
177 	 * cascading to the PVR2 DMAC. In this case, we still need to write
178 	 * SAR and DAR, regardless of value, in order for cascading to work.
179 	 */
180 	if (chan->sar || (mach_is_dreamcast() &&
181 			  chan->chan == PVR2_CASCADE_CHAN))
182 		ctrl_outl(chan->sar, SAR[chan->chan]);
183 	if (chan->dar || (mach_is_dreamcast() &&
184 			  chan->chan == PVR2_CASCADE_CHAN))
185 		ctrl_outl(chan->dar, DAR[chan->chan]);
186 
187 	ctrl_outl(chan->count >> calc_xmit_shift(chan), DMATCR[chan->chan]);
188 
189 	sh_dmac_enable_dma(chan);
190 
191 	return 0;
192 }
193 
194 static int sh_dmac_get_dma_residue(struct dma_channel *chan)
195 {
196 	if (!(ctrl_inl(CHCR[chan->chan]) & CHCR_DE))
197 		return 0;
198 
199 	return ctrl_inl(DMATCR[chan->chan]) << calc_xmit_shift(chan);
200 }
201 
202 #ifdef CONFIG_CPU_SUBTYPE_SH7780
203 #define dmaor_read_reg()	ctrl_inw(DMAOR)
204 #define dmaor_write_reg(data)	ctrl_outw(data, DMAOR)
205 #else
206 #define dmaor_read_reg()	ctrl_inl(DMAOR)
207 #define dmaor_write_reg(data)	ctrl_outl(data, DMAOR)
208 #endif
209 
210 static inline int dmaor_reset(void)
211 {
212 	unsigned long dmaor = dmaor_read_reg();
213 
214 	/* Try to clear the error flags first, incase they are set */
215 	dmaor &= ~(DMAOR_NMIF | DMAOR_AE);
216 	dmaor_write_reg(dmaor);
217 
218 	dmaor |= DMAOR_INIT;
219 	dmaor_write_reg(dmaor);
220 
221 	/* See if we got an error again */
222 	if ((dmaor_read_reg() & (DMAOR_AE | DMAOR_NMIF))) {
223 		printk(KERN_ERR "dma-sh: Can't initialize DMAOR.\n");
224 		return -EINVAL;
225 	}
226 
227 	return 0;
228 }
229 
230 #if defined(CONFIG_CPU_SH4)
231 static irqreturn_t dma_err(int irq, void *dev_id, struct pt_regs *regs)
232 {
233 	dmaor_reset();
234 	disable_irq(irq);
235 
236 	return IRQ_HANDLED;
237 }
238 #endif
239 
240 static struct dma_ops sh_dmac_ops = {
241 	.request	= sh_dmac_request_dma,
242 	.free		= sh_dmac_free_dma,
243 	.get_residue	= sh_dmac_get_dma_residue,
244 	.xfer		= sh_dmac_xfer_dma,
245 	.configure	= sh_dmac_configure_channel,
246 };
247 
248 static struct dma_info sh_dmac_info = {
249 	.name		= "sh_dmac",
250 	.nr_channels	= CONFIG_NR_ONCHIP_DMA_CHANNELS,
251 	.ops		= &sh_dmac_ops,
252 	.flags		= DMAC_CHANNELS_TEI_CAPABLE,
253 };
254 
255 static int __init sh_dmac_init(void)
256 {
257 	struct dma_info *info = &sh_dmac_info;
258 	int i;
259 
260 #ifdef CONFIG_CPU_SH4
261 	make_ipr_irq(DMAE_IRQ, DMA_IPR_ADDR, DMA_IPR_POS, DMA_PRIORITY);
262 	i = request_irq(DMAE_IRQ, dma_err, SA_INTERRUPT, "DMAC Address Error", 0);
263 	if (i < 0)
264 		return i;
265 #endif
266 
267 	for (i = 0; i < info->nr_channels; i++) {
268 		int irq = get_dmte_irq(i);
269 
270 		make_ipr_irq(irq, DMA_IPR_ADDR, DMA_IPR_POS, DMA_PRIORITY);
271 	}
272 
273 	/*
274 	 * Initialize DMAOR, and clean up any error flags that may have
275 	 * been set.
276 	 */
277 	i = dmaor_reset();
278 	if (i < 0)
279 		return i;
280 
281 	return register_dmac(info);
282 }
283 
284 static void __exit sh_dmac_exit(void)
285 {
286 #ifdef CONFIG_CPU_SH4
287 	free_irq(DMAE_IRQ, 0);
288 #endif
289 	unregister_dmac(&sh_dmac_info);
290 }
291 
292 subsys_initcall(sh_dmac_init);
293 module_exit(sh_dmac_exit);
294 
295 MODULE_AUTHOR("Takashi YOSHII, Paul Mundt, Andriy Skulysh");
296 MODULE_DESCRIPTION("SuperH On-Chip DMAC Support");
297 MODULE_LICENSE("GPL");
298