1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Framework for userspace DMA-BUF allocations 4 * 5 * Copyright (C) 2011 Google, Inc. 6 * Copyright (C) 2019 Linaro Ltd. 7 */ 8 9 #include <linux/cdev.h> 10 #include <linux/debugfs.h> 11 #include <linux/device.h> 12 #include <linux/dma-buf.h> 13 #include <linux/err.h> 14 #include <linux/xarray.h> 15 #include <linux/list.h> 16 #include <linux/slab.h> 17 #include <linux/uaccess.h> 18 #include <linux/syscalls.h> 19 #include <linux/dma-heap.h> 20 #include <uapi/linux/dma-heap.h> 21 22 #define DEVNAME "dma_heap" 23 24 #define NUM_HEAP_MINORS 128 25 26 /** 27 * struct dma_heap - represents a dmabuf heap in the system 28 * @name: used for debugging/device-node name 29 * @ops: ops struct for this heap 30 * @heap_devt heap device node 31 * @list list head connecting to list of heaps 32 * @heap_cdev heap char device 33 * 34 * Represents a heap of memory from which buffers can be made. 35 */ 36 struct dma_heap { 37 const char *name; 38 const struct dma_heap_ops *ops; 39 void *priv; 40 dev_t heap_devt; 41 struct list_head list; 42 struct cdev heap_cdev; 43 }; 44 45 static LIST_HEAD(heap_list); 46 static DEFINE_MUTEX(heap_list_lock); 47 static dev_t dma_heap_devt; 48 static struct class *dma_heap_class; 49 static DEFINE_XARRAY_ALLOC(dma_heap_minors); 50 51 static int dma_heap_buffer_alloc(struct dma_heap *heap, size_t len, 52 unsigned int fd_flags, 53 unsigned int heap_flags) 54 { 55 /* 56 * Allocations from all heaps have to begin 57 * and end on page boundaries. 58 */ 59 len = PAGE_ALIGN(len); 60 if (!len) 61 return -EINVAL; 62 63 return heap->ops->allocate(heap, len, fd_flags, heap_flags); 64 } 65 66 static int dma_heap_open(struct inode *inode, struct file *file) 67 { 68 struct dma_heap *heap; 69 70 heap = xa_load(&dma_heap_minors, iminor(inode)); 71 if (!heap) { 72 pr_err("dma_heap: minor %d unknown.\n", iminor(inode)); 73 return -ENODEV; 74 } 75 76 /* instance data as context */ 77 file->private_data = heap; 78 nonseekable_open(inode, file); 79 80 return 0; 81 } 82 83 static long dma_heap_ioctl_allocate(struct file *file, void *data) 84 { 85 struct dma_heap_allocation_data *heap_allocation = data; 86 struct dma_heap *heap = file->private_data; 87 int fd; 88 89 if (heap_allocation->fd) 90 return -EINVAL; 91 92 if (heap_allocation->fd_flags & ~DMA_HEAP_VALID_FD_FLAGS) 93 return -EINVAL; 94 95 if (heap_allocation->heap_flags & ~DMA_HEAP_VALID_HEAP_FLAGS) 96 return -EINVAL; 97 98 fd = dma_heap_buffer_alloc(heap, heap_allocation->len, 99 heap_allocation->fd_flags, 100 heap_allocation->heap_flags); 101 if (fd < 0) 102 return fd; 103 104 heap_allocation->fd = fd; 105 106 return 0; 107 } 108 109 static unsigned int dma_heap_ioctl_cmds[] = { 110 DMA_HEAP_IOCTL_ALLOC, 111 }; 112 113 static long dma_heap_ioctl(struct file *file, unsigned int ucmd, 114 unsigned long arg) 115 { 116 char stack_kdata[128]; 117 char *kdata = stack_kdata; 118 unsigned int kcmd; 119 unsigned int in_size, out_size, drv_size, ksize; 120 int nr = _IOC_NR(ucmd); 121 int ret = 0; 122 123 if (nr >= ARRAY_SIZE(dma_heap_ioctl_cmds)) 124 return -EINVAL; 125 126 /* Get the kernel ioctl cmd that matches */ 127 kcmd = dma_heap_ioctl_cmds[nr]; 128 129 /* Figure out the delta between user cmd size and kernel cmd size */ 130 drv_size = _IOC_SIZE(kcmd); 131 out_size = _IOC_SIZE(ucmd); 132 in_size = out_size; 133 if ((ucmd & kcmd & IOC_IN) == 0) 134 in_size = 0; 135 if ((ucmd & kcmd & IOC_OUT) == 0) 136 out_size = 0; 137 ksize = max(max(in_size, out_size), drv_size); 138 139 /* If necessary, allocate buffer for ioctl argument */ 140 if (ksize > sizeof(stack_kdata)) { 141 kdata = kmalloc(ksize, GFP_KERNEL); 142 if (!kdata) 143 return -ENOMEM; 144 } 145 146 if (copy_from_user(kdata, (void __user *)arg, in_size) != 0) { 147 ret = -EFAULT; 148 goto err; 149 } 150 151 /* zero out any difference between the kernel/user structure size */ 152 if (ksize > in_size) 153 memset(kdata + in_size, 0, ksize - in_size); 154 155 switch (kcmd) { 156 case DMA_HEAP_IOCTL_ALLOC: 157 ret = dma_heap_ioctl_allocate(file, kdata); 158 break; 159 default: 160 ret = -ENOTTY; 161 goto err; 162 } 163 164 if (copy_to_user((void __user *)arg, kdata, out_size) != 0) 165 ret = -EFAULT; 166 err: 167 if (kdata != stack_kdata) 168 kfree(kdata); 169 return ret; 170 } 171 172 static const struct file_operations dma_heap_fops = { 173 .owner = THIS_MODULE, 174 .open = dma_heap_open, 175 .unlocked_ioctl = dma_heap_ioctl, 176 #ifdef CONFIG_COMPAT 177 .compat_ioctl = dma_heap_ioctl, 178 #endif 179 }; 180 181 /** 182 * dma_heap_get_drvdata() - get per-subdriver data for the heap 183 * @heap: DMA-Heap to retrieve private data for 184 * 185 * Returns: 186 * The per-subdriver data for the heap. 187 */ 188 void *dma_heap_get_drvdata(struct dma_heap *heap) 189 { 190 return heap->priv; 191 } 192 193 struct dma_heap *dma_heap_add(const struct dma_heap_export_info *exp_info) 194 { 195 struct dma_heap *heap, *h, *err_ret; 196 struct device *dev_ret; 197 unsigned int minor; 198 int ret; 199 200 if (!exp_info->name || !strcmp(exp_info->name, "")) { 201 pr_err("dma_heap: Cannot add heap without a name\n"); 202 return ERR_PTR(-EINVAL); 203 } 204 205 if (!exp_info->ops || !exp_info->ops->allocate) { 206 pr_err("dma_heap: Cannot add heap with invalid ops struct\n"); 207 return ERR_PTR(-EINVAL); 208 } 209 210 /* check the name is unique */ 211 mutex_lock(&heap_list_lock); 212 list_for_each_entry(h, &heap_list, list) { 213 if (!strcmp(h->name, exp_info->name)) { 214 mutex_unlock(&heap_list_lock); 215 pr_err("dma_heap: Already registered heap named %s\n", 216 exp_info->name); 217 return ERR_PTR(-EINVAL); 218 } 219 } 220 mutex_unlock(&heap_list_lock); 221 222 heap = kzalloc(sizeof(*heap), GFP_KERNEL); 223 if (!heap) 224 return ERR_PTR(-ENOMEM); 225 226 heap->name = exp_info->name; 227 heap->ops = exp_info->ops; 228 heap->priv = exp_info->priv; 229 230 /* Find unused minor number */ 231 ret = xa_alloc(&dma_heap_minors, &minor, heap, 232 XA_LIMIT(0, NUM_HEAP_MINORS - 1), GFP_KERNEL); 233 if (ret < 0) { 234 pr_err("dma_heap: Unable to get minor number for heap\n"); 235 err_ret = ERR_PTR(ret); 236 goto err0; 237 } 238 239 /* Create device */ 240 heap->heap_devt = MKDEV(MAJOR(dma_heap_devt), minor); 241 242 cdev_init(&heap->heap_cdev, &dma_heap_fops); 243 ret = cdev_add(&heap->heap_cdev, heap->heap_devt, 1); 244 if (ret < 0) { 245 pr_err("dma_heap: Unable to add char device\n"); 246 err_ret = ERR_PTR(ret); 247 goto err1; 248 } 249 250 dev_ret = device_create(dma_heap_class, 251 NULL, 252 heap->heap_devt, 253 NULL, 254 heap->name); 255 if (IS_ERR(dev_ret)) { 256 pr_err("dma_heap: Unable to create device\n"); 257 err_ret = ERR_CAST(dev_ret); 258 goto err2; 259 } 260 /* Add heap to the list */ 261 mutex_lock(&heap_list_lock); 262 list_add(&heap->list, &heap_list); 263 mutex_unlock(&heap_list_lock); 264 265 return heap; 266 267 err2: 268 cdev_del(&heap->heap_cdev); 269 err1: 270 xa_erase(&dma_heap_minors, minor); 271 err0: 272 kfree(heap); 273 return err_ret; 274 } 275 276 static char *dma_heap_devnode(struct device *dev, umode_t *mode) 277 { 278 return kasprintf(GFP_KERNEL, "dma_heap/%s", dev_name(dev)); 279 } 280 281 static int dma_heap_init(void) 282 { 283 int ret; 284 285 ret = alloc_chrdev_region(&dma_heap_devt, 0, NUM_HEAP_MINORS, DEVNAME); 286 if (ret) 287 return ret; 288 289 dma_heap_class = class_create(THIS_MODULE, DEVNAME); 290 if (IS_ERR(dma_heap_class)) { 291 unregister_chrdev_region(dma_heap_devt, NUM_HEAP_MINORS); 292 return PTR_ERR(dma_heap_class); 293 } 294 dma_heap_class->devnode = dma_heap_devnode; 295 296 return 0; 297 } 298 subsys_initcall(dma_heap_init); 299