1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * linux/arch/arm/mach-rpc/dma.c
4 *
5 * Copyright (C) 1998 Russell King
6 *
7 * DMA functions specific to RiscPC architecture
8 */
9 #include <linux/mman.h>
10 #include <linux/init.h>
11 #include <linux/interrupt.h>
12 #include <linux/dma-mapping.h>
13 #include <linux/io.h>
14
15 #include <asm/page.h>
16 #include <asm/dma.h>
17 #include <asm/fiq.h>
18 #include <asm/irq.h>
19 #include <mach/hardware.h>
20 #include <linux/uaccess.h>
21
22 #include <asm/mach/dma.h>
23 #include <asm/hardware/iomd.h>
24
25 struct iomd_dma {
26 struct dma_struct dma;
27 void __iomem *base; /* Controller base address */
28 int irq; /* Controller IRQ */
29 unsigned int state;
30 dma_addr_t cur_addr;
31 unsigned int cur_len;
32 dma_addr_t dma_addr;
33 unsigned int dma_len;
34 };
35
36 #if 0
37 typedef enum {
38 dma_size_8 = 1,
39 dma_size_16 = 2,
40 dma_size_32 = 4,
41 dma_size_128 = 16
42 } dma_size_t;
43 #endif
44
45 #define TRANSFER_SIZE 2
46
47 #define CURA (0)
48 #define ENDA (IOMD_IO0ENDA - IOMD_IO0CURA)
49 #define CURB (IOMD_IO0CURB - IOMD_IO0CURA)
50 #define ENDB (IOMD_IO0ENDB - IOMD_IO0CURA)
51 #define CR (IOMD_IO0CR - IOMD_IO0CURA)
52 #define ST (IOMD_IO0ST - IOMD_IO0CURA)
53
iomd_get_next_sg(struct iomd_dma * idma)54 static void iomd_get_next_sg(struct iomd_dma *idma)
55 {
56 unsigned long end, offset, flags = 0;
57
58 if (idma->dma.sg) {
59 idma->cur_addr = idma->dma_addr;
60 offset = idma->cur_addr & ~PAGE_MASK;
61
62 end = offset + idma->dma_len;
63
64 if (end > PAGE_SIZE)
65 end = PAGE_SIZE;
66
67 if (offset + TRANSFER_SIZE >= end)
68 flags |= DMA_END_L;
69
70 idma->cur_len = end - TRANSFER_SIZE;
71
72 idma->dma_len -= end - offset;
73 idma->dma_addr += end - offset;
74
75 if (idma->dma_len == 0) {
76 if (idma->dma.sgcount > 1) {
77 idma->dma.sg = sg_next(idma->dma.sg);
78 idma->dma_addr = idma->dma.sg->dma_address;
79 idma->dma_len = idma->dma.sg->length;
80 idma->dma.sgcount--;
81 } else {
82 idma->dma.sg = NULL;
83 flags |= DMA_END_S;
84 }
85 }
86 } else {
87 flags = DMA_END_S | DMA_END_L;
88 idma->cur_addr = 0;
89 idma->cur_len = 0;
90 }
91
92 idma->cur_len |= flags;
93 }
94
iomd_dma_handle(int irq,void * dev_id)95 static irqreturn_t iomd_dma_handle(int irq, void *dev_id)
96 {
97 struct iomd_dma *idma = dev_id;
98 void __iomem *base = idma->base;
99 unsigned int state = idma->state;
100 unsigned int status, cur, end;
101
102 do {
103 status = readb(base + ST);
104 if (!(status & DMA_ST_INT))
105 goto out;
106
107 if ((state ^ status) & DMA_ST_AB)
108 iomd_get_next_sg(idma);
109
110 // This efficiently implements state = OFL != AB ? AB : 0
111 state = ((status >> 2) ^ status) & DMA_ST_AB;
112 if (state) {
113 cur = CURA;
114 end = ENDA;
115 } else {
116 cur = CURB;
117 end = ENDB;
118 }
119 writel(idma->cur_addr, base + cur);
120 writel(idma->cur_len, base + end);
121
122 if (status & DMA_ST_OFL &&
123 idma->cur_len == (DMA_END_S|DMA_END_L))
124 break;
125 } while (1);
126
127 state = ~DMA_ST_AB;
128 disable_irq_nosync(irq);
129 out:
130 idma->state = state;
131 return IRQ_HANDLED;
132 }
133
iomd_request_dma(unsigned int chan,dma_t * dma)134 static int iomd_request_dma(unsigned int chan, dma_t *dma)
135 {
136 struct iomd_dma *idma = container_of(dma, struct iomd_dma, dma);
137
138 return request_irq(idma->irq, iomd_dma_handle,
139 0, idma->dma.device_id, idma);
140 }
141
iomd_free_dma(unsigned int chan,dma_t * dma)142 static void iomd_free_dma(unsigned int chan, dma_t *dma)
143 {
144 struct iomd_dma *idma = container_of(dma, struct iomd_dma, dma);
145
146 free_irq(idma->irq, idma);
147 }
148
149 static struct device isa_dma_dev = {
150 .init_name = "fallback device",
151 .coherent_dma_mask = ~(dma_addr_t)0,
152 .dma_mask = &isa_dma_dev.coherent_dma_mask,
153 };
154
iomd_enable_dma(unsigned int chan,dma_t * dma)155 static void iomd_enable_dma(unsigned int chan, dma_t *dma)
156 {
157 struct iomd_dma *idma = container_of(dma, struct iomd_dma, dma);
158 void __iomem *base = idma->base;
159 unsigned int ctrl = TRANSFER_SIZE | DMA_CR_E;
160
161 if (idma->dma.invalid) {
162 idma->dma.invalid = 0;
163
164 /*
165 * Cope with ISA-style drivers which expect cache
166 * coherence.
167 */
168 if (!idma->dma.sg) {
169 idma->dma.sg = &idma->dma.buf;
170 idma->dma.sgcount = 1;
171 idma->dma.buf.length = idma->dma.count;
172 idma->dma.buf.dma_address = dma_map_single(&isa_dma_dev,
173 idma->dma.addr, idma->dma.count,
174 idma->dma.dma_mode == DMA_MODE_READ ?
175 DMA_FROM_DEVICE : DMA_TO_DEVICE);
176 }
177
178 idma->dma_addr = idma->dma.sg->dma_address;
179 idma->dma_len = idma->dma.sg->length;
180
181 writeb(DMA_CR_C, base + CR);
182 idma->state = DMA_ST_AB;
183 }
184
185 if (idma->dma.dma_mode == DMA_MODE_READ)
186 ctrl |= DMA_CR_D;
187
188 writeb(ctrl, base + CR);
189 enable_irq(idma->irq);
190 }
191
iomd_disable_dma(unsigned int chan,dma_t * dma)192 static void iomd_disable_dma(unsigned int chan, dma_t *dma)
193 {
194 struct iomd_dma *idma = container_of(dma, struct iomd_dma, dma);
195 void __iomem *base = idma->base;
196 unsigned long flags;
197
198 local_irq_save(flags);
199 if (idma->state != ~DMA_ST_AB)
200 disable_irq(idma->irq);
201 writeb(0, base + CR);
202 local_irq_restore(flags);
203 }
204
iomd_set_dma_speed(unsigned int chan,dma_t * dma,int cycle)205 static int iomd_set_dma_speed(unsigned int chan, dma_t *dma, int cycle)
206 {
207 int tcr, speed;
208
209 if (cycle < 188)
210 speed = 3;
211 else if (cycle <= 250)
212 speed = 2;
213 else if (cycle < 438)
214 speed = 1;
215 else
216 speed = 0;
217
218 tcr = iomd_readb(IOMD_DMATCR);
219 speed &= 3;
220
221 switch (chan) {
222 case DMA_0:
223 tcr = (tcr & ~0x03) | speed;
224 break;
225
226 case DMA_1:
227 tcr = (tcr & ~0x0c) | (speed << 2);
228 break;
229
230 case DMA_2:
231 tcr = (tcr & ~0x30) | (speed << 4);
232 break;
233
234 case DMA_3:
235 tcr = (tcr & ~0xc0) | (speed << 6);
236 break;
237
238 default:
239 break;
240 }
241
242 iomd_writeb(tcr, IOMD_DMATCR);
243
244 return speed;
245 }
246
247 static struct dma_ops iomd_dma_ops = {
248 .type = "IOMD",
249 .request = iomd_request_dma,
250 .free = iomd_free_dma,
251 .enable = iomd_enable_dma,
252 .disable = iomd_disable_dma,
253 .setspeed = iomd_set_dma_speed,
254 };
255
256 static struct fiq_handler fh = {
257 .name = "floppydma"
258 };
259
260 struct floppy_dma {
261 struct dma_struct dma;
262 unsigned int fiq;
263 };
264
floppy_enable_dma(unsigned int chan,dma_t * dma)265 static void floppy_enable_dma(unsigned int chan, dma_t *dma)
266 {
267 struct floppy_dma *fdma = container_of(dma, struct floppy_dma, dma);
268 void *fiqhandler_start;
269 unsigned int fiqhandler_length;
270 struct pt_regs regs;
271
272 if (fdma->dma.sg)
273 BUG();
274
275 if (fdma->dma.dma_mode == DMA_MODE_READ) {
276 extern unsigned char floppy_fiqin_start, floppy_fiqin_end;
277 fiqhandler_start = &floppy_fiqin_start;
278 fiqhandler_length = &floppy_fiqin_end - &floppy_fiqin_start;
279 } else {
280 extern unsigned char floppy_fiqout_start, floppy_fiqout_end;
281 fiqhandler_start = &floppy_fiqout_start;
282 fiqhandler_length = &floppy_fiqout_end - &floppy_fiqout_start;
283 }
284
285 regs.ARM_r9 = fdma->dma.count;
286 regs.ARM_r10 = (unsigned long)fdma->dma.addr;
287 regs.ARM_fp = (unsigned long)FLOPPYDMA_BASE;
288
289 if (claim_fiq(&fh)) {
290 printk("floppydma: couldn't claim FIQ.\n");
291 return;
292 }
293
294 set_fiq_handler(fiqhandler_start, fiqhandler_length);
295 set_fiq_regs(®s);
296 enable_fiq(fdma->fiq);
297 }
298
floppy_disable_dma(unsigned int chan,dma_t * dma)299 static void floppy_disable_dma(unsigned int chan, dma_t *dma)
300 {
301 struct floppy_dma *fdma = container_of(dma, struct floppy_dma, dma);
302 disable_fiq(fdma->fiq);
303 release_fiq(&fh);
304 }
305
floppy_get_residue(unsigned int chan,dma_t * dma)306 static int floppy_get_residue(unsigned int chan, dma_t *dma)
307 {
308 struct pt_regs regs;
309 get_fiq_regs(®s);
310 return regs.ARM_r9;
311 }
312
313 static struct dma_ops floppy_dma_ops = {
314 .type = "FIQDMA",
315 .enable = floppy_enable_dma,
316 .disable = floppy_disable_dma,
317 .residue = floppy_get_residue,
318 };
319
320 /*
321 * This is virtual DMA - we don't need anything here.
322 */
sound_enable_disable_dma(unsigned int chan,dma_t * dma)323 static void sound_enable_disable_dma(unsigned int chan, dma_t *dma)
324 {
325 }
326
327 static struct dma_ops sound_dma_ops = {
328 .type = "VIRTUAL",
329 .enable = sound_enable_disable_dma,
330 .disable = sound_enable_disable_dma,
331 };
332
333 static struct iomd_dma iomd_dma[6];
334
335 static struct floppy_dma floppy_dma = {
336 .dma = {
337 .d_ops = &floppy_dma_ops,
338 },
339 .fiq = FIQ_FLOPPYDATA,
340 };
341
342 static dma_t sound_dma = {
343 .d_ops = &sound_dma_ops,
344 };
345
rpc_dma_init(void)346 static int __init rpc_dma_init(void)
347 {
348 unsigned int i;
349 int ret;
350
351 iomd_writeb(0, IOMD_IO0CR);
352 iomd_writeb(0, IOMD_IO1CR);
353 iomd_writeb(0, IOMD_IO2CR);
354 iomd_writeb(0, IOMD_IO3CR);
355
356 iomd_writeb(0xa0, IOMD_DMATCR);
357
358 /*
359 * Setup DMA channels 2,3 to be for podules
360 * and channels 0,1 for internal devices
361 */
362 iomd_writeb(DMA_EXT_IO3|DMA_EXT_IO2, IOMD_DMAEXT);
363
364 iomd_dma[DMA_0].base = IOMD_BASE + IOMD_IO0CURA;
365 iomd_dma[DMA_0].irq = IRQ_DMA0;
366 iomd_dma[DMA_1].base = IOMD_BASE + IOMD_IO1CURA;
367 iomd_dma[DMA_1].irq = IRQ_DMA1;
368 iomd_dma[DMA_2].base = IOMD_BASE + IOMD_IO2CURA;
369 iomd_dma[DMA_2].irq = IRQ_DMA2;
370 iomd_dma[DMA_3].base = IOMD_BASE + IOMD_IO3CURA;
371 iomd_dma[DMA_3].irq = IRQ_DMA3;
372 iomd_dma[DMA_S0].base = IOMD_BASE + IOMD_SD0CURA;
373 iomd_dma[DMA_S0].irq = IRQ_DMAS0;
374 iomd_dma[DMA_S1].base = IOMD_BASE + IOMD_SD1CURA;
375 iomd_dma[DMA_S1].irq = IRQ_DMAS1;
376
377 for (i = DMA_0; i <= DMA_S1; i++) {
378 iomd_dma[i].dma.d_ops = &iomd_dma_ops;
379
380 ret = isa_dma_add(i, &iomd_dma[i].dma);
381 if (ret)
382 printk("IOMDDMA%u: unable to register: %d\n", i, ret);
383 }
384
385 ret = isa_dma_add(DMA_VIRTUAL_FLOPPY, &floppy_dma.dma);
386 if (ret)
387 printk("IOMDFLOPPY: unable to register: %d\n", ret);
388 ret = isa_dma_add(DMA_VIRTUAL_SOUND, &sound_dma);
389 if (ret)
390 printk("IOMDSOUND: unable to register: %d\n", ret);
391 return 0;
392 }
393 core_initcall(rpc_dma_init);
394