1 /* 2 * 3 * BRIEF MODULE DESCRIPTION 4 * A DMA channel allocator for Au1x00. API is modeled loosely off of 5 * linux/kernel/dma.c. 6 * 7 * Copyright 2000, 2008 MontaVista Software Inc. 8 * Author: MontaVista Software, Inc. <source@mvista.com> 9 * Copyright (C) 2005 Ralf Baechle (ralf@linux-mips.org) 10 * 11 * This program is free software; you can redistribute it and/or modify it 12 * under the terms of the GNU General Public License as published by the 13 * Free Software Foundation; either version 2 of the License, or (at your 14 * option) any later version. 15 * 16 * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED 17 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF 18 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN 19 * NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 20 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 21 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF 22 * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON 23 * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 24 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 25 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 26 * 27 * You should have received a copy of the GNU General Public License along 28 * with this program; if not, write to the Free Software Foundation, Inc., 29 * 675 Mass Ave, Cambridge, MA 02139, USA. 30 * 31 */ 32 33 #include <linux/init.h> 34 #include <linux/export.h> 35 #include <linux/kernel.h> 36 #include <linux/errno.h> 37 #include <linux/spinlock.h> 38 #include <linux/interrupt.h> 39 40 #include <asm/mach-au1x00/au1000.h> 41 #include <asm/mach-au1x00/au1000_dma.h> 42 43 /* 44 * A note on resource allocation: 45 * 46 * All drivers needing DMA channels, should allocate and release them 47 * through the public routines `request_dma()' and `free_dma()'. 48 * 49 * In order to avoid problems, all processes should allocate resources in 50 * the same sequence and release them in the reverse order. 51 * 52 * So, when allocating DMAs and IRQs, first allocate the DMA, then the IRQ. 53 * When releasing them, first release the IRQ, then release the DMA. The 54 * main reason for this order is that, if you are requesting the DMA buffer 55 * done interrupt, you won't know the irq number until the DMA channel is 56 * returned from request_dma. 57 */ 58 59 /* DMA Channel register block spacing */ 60 #define DMA_CHANNEL_LEN 0x00000100 61 62 DEFINE_SPINLOCK(au1000_dma_spin_lock); 63 64 struct dma_chan au1000_dma_table[NUM_AU1000_DMA_CHANNELS] = { 65 {.dev_id = -1,}, 66 {.dev_id = -1,}, 67 {.dev_id = -1,}, 68 {.dev_id = -1,}, 69 {.dev_id = -1,}, 70 {.dev_id = -1,}, 71 {.dev_id = -1,}, 72 {.dev_id = -1,} 73 }; 74 EXPORT_SYMBOL(au1000_dma_table); 75 76 /* Device FIFO addresses and default DMA modes */ 77 static const struct dma_dev { 78 unsigned int fifo_addr; 79 unsigned int dma_mode; 80 } dma_dev_table[DMA_NUM_DEV] = { 81 { AU1000_UART0_PHYS_ADDR + 0x04, DMA_DW8 }, /* UART0_TX */ 82 { AU1000_UART0_PHYS_ADDR + 0x00, DMA_DW8 | DMA_DR }, /* UART0_RX */ 83 { 0, 0 }, /* DMA_REQ0 */ 84 { 0, 0 }, /* DMA_REQ1 */ 85 { AU1000_AC97_PHYS_ADDR + 0x08, DMA_DW16 }, /* AC97 TX c */ 86 { AU1000_AC97_PHYS_ADDR + 0x08, DMA_DW16 | DMA_DR }, /* AC97 RX c */ 87 { AU1000_UART3_PHYS_ADDR + 0x04, DMA_DW8 | DMA_NC }, /* UART3_TX */ 88 { AU1000_UART3_PHYS_ADDR + 0x00, DMA_DW8 | DMA_NC | DMA_DR }, /* UART3_RX */ 89 { AU1000_USB_UDC_PHYS_ADDR + 0x00, DMA_DW8 | DMA_NC | DMA_DR }, /* EP0RD */ 90 { AU1000_USB_UDC_PHYS_ADDR + 0x04, DMA_DW8 | DMA_NC }, /* EP0WR */ 91 { AU1000_USB_UDC_PHYS_ADDR + 0x08, DMA_DW8 | DMA_NC }, /* EP2WR */ 92 { AU1000_USB_UDC_PHYS_ADDR + 0x0c, DMA_DW8 | DMA_NC }, /* EP3WR */ 93 { AU1000_USB_UDC_PHYS_ADDR + 0x10, DMA_DW8 | DMA_NC | DMA_DR }, /* EP4RD */ 94 { AU1000_USB_UDC_PHYS_ADDR + 0x14, DMA_DW8 | DMA_NC | DMA_DR }, /* EP5RD */ 95 /* on Au1500, these 2 are DMA_REQ2/3 (GPIO208/209) instead! */ 96 { AU1000_I2S_PHYS_ADDR + 0x00, DMA_DW32 | DMA_NC}, /* I2S TX */ 97 { AU1000_I2S_PHYS_ADDR + 0x00, DMA_DW32 | DMA_NC | DMA_DR}, /* I2S RX */ 98 }; 99 100 int au1000_dma_read_proc(char *buf, char **start, off_t fpos, 101 int length, int *eof, void *data) 102 { 103 int i, len = 0; 104 struct dma_chan *chan; 105 106 for (i = 0; i < NUM_AU1000_DMA_CHANNELS; i++) { 107 chan = get_dma_chan(i); 108 if (chan != NULL) 109 len += sprintf(buf + len, "%2d: %s\n", 110 i, chan->dev_str); 111 } 112 113 if (fpos >= len) { 114 *start = buf; 115 *eof = 1; 116 return 0; 117 } 118 *start = buf + fpos; 119 len -= fpos; 120 if (len > length) 121 return length; 122 *eof = 1; 123 return len; 124 } 125 126 /* Device FIFO addresses and default DMA modes - 2nd bank */ 127 static const struct dma_dev dma_dev_table_bank2[DMA_NUM_DEV_BANK2] = { 128 { AU1100_SD0_PHYS_ADDR + 0x00, DMA_DS | DMA_DW8 }, /* coherent */ 129 { AU1100_SD0_PHYS_ADDR + 0x04, DMA_DS | DMA_DW8 | DMA_DR }, /* coherent */ 130 { AU1100_SD1_PHYS_ADDR + 0x00, DMA_DS | DMA_DW8 }, /* coherent */ 131 { AU1100_SD1_PHYS_ADDR + 0x04, DMA_DS | DMA_DW8 | DMA_DR } /* coherent */ 132 }; 133 134 /* 135 * Finds a free channel, and binds the requested device to it. 136 * Returns the allocated channel number, or negative on error. 137 * Requests the DMA done IRQ if irqhandler != NULL. 138 */ 139 int request_au1000_dma(int dev_id, const char *dev_str, 140 irq_handler_t irqhandler, 141 unsigned long irqflags, 142 void *irq_dev_id) 143 { 144 struct dma_chan *chan; 145 const struct dma_dev *dev; 146 int i, ret; 147 148 if (alchemy_get_cputype() == ALCHEMY_CPU_AU1100) { 149 if (dev_id < 0 || dev_id >= (DMA_NUM_DEV + DMA_NUM_DEV_BANK2)) 150 return -EINVAL; 151 } else { 152 if (dev_id < 0 || dev_id >= DMA_NUM_DEV) 153 return -EINVAL; 154 } 155 156 for (i = 0; i < NUM_AU1000_DMA_CHANNELS; i++) 157 if (au1000_dma_table[i].dev_id < 0) 158 break; 159 160 if (i == NUM_AU1000_DMA_CHANNELS) 161 return -ENODEV; 162 163 chan = &au1000_dma_table[i]; 164 165 if (dev_id >= DMA_NUM_DEV) { 166 dev_id -= DMA_NUM_DEV; 167 dev = &dma_dev_table_bank2[dev_id]; 168 } else 169 dev = &dma_dev_table[dev_id]; 170 171 if (irqhandler) { 172 chan->irq_dev = irq_dev_id; 173 ret = request_irq(chan->irq, irqhandler, irqflags, dev_str, 174 chan->irq_dev); 175 if (ret) { 176 chan->irq_dev = NULL; 177 return ret; 178 } 179 } else { 180 chan->irq_dev = NULL; 181 } 182 183 /* fill it in */ 184 chan->io = (void __iomem *)(KSEG1ADDR(AU1000_DMA_PHYS_ADDR) + 185 i * DMA_CHANNEL_LEN); 186 chan->dev_id = dev_id; 187 chan->dev_str = dev_str; 188 chan->fifo_addr = dev->fifo_addr; 189 chan->mode = dev->dma_mode; 190 191 /* initialize the channel before returning */ 192 init_dma(i); 193 194 return i; 195 } 196 EXPORT_SYMBOL(request_au1000_dma); 197 198 void free_au1000_dma(unsigned int dmanr) 199 { 200 struct dma_chan *chan = get_dma_chan(dmanr); 201 202 if (!chan) { 203 printk(KERN_ERR "Error trying to free DMA%d\n", dmanr); 204 return; 205 } 206 207 disable_dma(dmanr); 208 if (chan->irq_dev) 209 free_irq(chan->irq, chan->irq_dev); 210 211 chan->irq_dev = NULL; 212 chan->dev_id = -1; 213 } 214 EXPORT_SYMBOL(free_au1000_dma); 215 216 static int __init au1000_dma_init(void) 217 { 218 int base, i; 219 220 switch (alchemy_get_cputype()) { 221 case ALCHEMY_CPU_AU1000: 222 base = AU1000_DMA_INT_BASE; 223 break; 224 case ALCHEMY_CPU_AU1500: 225 base = AU1500_DMA_INT_BASE; 226 break; 227 case ALCHEMY_CPU_AU1100: 228 base = AU1100_DMA_INT_BASE; 229 break; 230 default: 231 goto out; 232 } 233 234 for (i = 0; i < NUM_AU1000_DMA_CHANNELS; i++) 235 au1000_dma_table[i].irq = base + i; 236 237 printk(KERN_INFO "Alchemy DMA initialized\n"); 238 239 out: 240 return 0; 241 } 242 arch_initcall(au1000_dma_init); 243