1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * linux/kernel/dma.c: A DMA channel allocator. Inspired by linux/kernel/irq.c. 4 * 5 * Written by Hennus Bergman, 1992. 6 * 7 * 1994/12/26: Changes by Alex Nash to fix a minor bug in /proc/dma. 8 * In the previous version the reported device could end up being wrong, 9 * if a device requested a DMA channel that was already in use. 10 * [It also happened to remove the sizeof(char *) == sizeof(int) 11 * assumption introduced because of those /proc/dma patches. -- Hennus] 12 */ 13 #include <linux/export.h> 14 #include <linux/kernel.h> 15 #include <linux/errno.h> 16 #include <linux/spinlock.h> 17 #include <linux/string.h> 18 #include <linux/seq_file.h> 19 #include <linux/proc_fs.h> 20 #include <linux/init.h> 21 #include <asm/dma.h> 22 23 24 25 /* A note on resource allocation: 26 * 27 * All drivers needing DMA channels, should allocate and release them 28 * through the public routines `request_dma()' and `free_dma()'. 29 * 30 * In order to avoid problems, all processes should allocate resources in 31 * the same sequence and release them in the reverse order. 32 * 33 * So, when allocating DMAs and IRQs, first allocate the IRQ, then the DMA. 34 * When releasing them, first release the DMA, then release the IRQ. 35 * If you don't, you may cause allocation requests to fail unnecessarily. 36 * This doesn't really matter now, but it will once we get real semaphores 37 * in the kernel. 38 */ 39 40 41 DEFINE_SPINLOCK(dma_spin_lock); 42 43 /* 44 * If our port doesn't define this it has no PC like DMA 45 */ 46 47 #ifdef MAX_DMA_CHANNELS 48 49 50 /* Channel n is busy iff dma_chan_busy[n].lock != 0. 51 * DMA0 used to be reserved for DRAM refresh, but apparently not any more... 52 * DMA4 is reserved for cascading. 53 */ 54 55 struct dma_chan { 56 int lock; 57 const char *device_id; 58 }; 59 60 static struct dma_chan dma_chan_busy[MAX_DMA_CHANNELS] = { 61 [4] = { 1, "cascade" }, 62 }; 63 64 65 /** 66 * request_dma - request and reserve a system DMA channel 67 * @dmanr: DMA channel number 68 * @device_id: reserving device ID string, used in /proc/dma 69 */ 70 int request_dma(unsigned int dmanr, const char * device_id) 71 { 72 if (dmanr >= MAX_DMA_CHANNELS) 73 return -EINVAL; 74 75 if (xchg(&dma_chan_busy[dmanr].lock, 1) != 0) 76 return -EBUSY; 77 78 dma_chan_busy[dmanr].device_id = device_id; 79 80 /* old flag was 0, now contains 1 to indicate busy */ 81 return 0; 82 } /* request_dma */ 83 84 /** 85 * free_dma - free a reserved system DMA channel 86 * @dmanr: DMA channel number 87 */ 88 void free_dma(unsigned int dmanr) 89 { 90 if (dmanr >= MAX_DMA_CHANNELS) { 91 printk(KERN_WARNING "Trying to free DMA%d\n", dmanr); 92 return; 93 } 94 95 if (xchg(&dma_chan_busy[dmanr].lock, 0) == 0) { 96 printk(KERN_WARNING "Trying to free free DMA%d\n", dmanr); 97 return; 98 } 99 100 } /* free_dma */ 101 102 #else 103 104 int request_dma(unsigned int dmanr, const char *device_id) 105 { 106 return -EINVAL; 107 } 108 109 void free_dma(unsigned int dmanr) 110 { 111 } 112 113 #endif 114 115 #ifdef CONFIG_PROC_FS 116 117 #ifdef MAX_DMA_CHANNELS 118 static int proc_dma_show(struct seq_file *m, void *v) 119 { 120 int i; 121 122 for (i = 0 ; i < MAX_DMA_CHANNELS ; i++) { 123 if (dma_chan_busy[i].lock) { 124 seq_printf(m, "%2d: %s\n", i, 125 dma_chan_busy[i].device_id); 126 } 127 } 128 return 0; 129 } 130 #else 131 static int proc_dma_show(struct seq_file *m, void *v) 132 { 133 seq_puts(m, "No DMA\n"); 134 return 0; 135 } 136 #endif /* MAX_DMA_CHANNELS */ 137 138 static int proc_dma_open(struct inode *inode, struct file *file) 139 { 140 return single_open(file, proc_dma_show, NULL); 141 } 142 143 static const struct file_operations proc_dma_operations = { 144 .open = proc_dma_open, 145 .read = seq_read, 146 .llseek = seq_lseek, 147 .release = single_release, 148 }; 149 150 static int __init proc_dma_init(void) 151 { 152 proc_create("dma", 0, NULL, &proc_dma_operations); 153 return 0; 154 } 155 156 __initcall(proc_dma_init); 157 #endif 158 159 EXPORT_SYMBOL(request_dma); 160 EXPORT_SYMBOL(free_dma); 161 EXPORT_SYMBOL(dma_spin_lock); 162