1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/cred.h>
3 #include <linux/device.h>
4 #include <linux/dma-buf.h>
5 #include <linux/dma-resv.h>
6 #include <linux/highmem.h>
7 #include <linux/init.h>
8 #include <linux/kernel.h>
9 #include <linux/memfd.h>
10 #include <linux/miscdevice.h>
11 #include <linux/module.h>
12 #include <linux/shmem_fs.h>
13 #include <linux/hugetlb.h>
14 #include <linux/slab.h>
15 #include <linux/udmabuf.h>
16 #include <linux/vmalloc.h>
17 #include <linux/iosys-map.h>
18
19 static int list_limit = 1024;
20 module_param(list_limit, int, 0644);
21 MODULE_PARM_DESC(list_limit, "udmabuf_create_list->count limit. Default is 1024.");
22
23 static int size_limit_mb = 64;
24 module_param(size_limit_mb, int, 0644);
25 MODULE_PARM_DESC(size_limit_mb, "Max size of a dmabuf, in megabytes. Default is 64.");
26
27 struct udmabuf {
28 pgoff_t pagecount;
29 struct folio **folios;
30
31 /**
32 * Unlike folios, pinned_folios is only used for unpin.
33 * So, nr_pinned is not the same to pagecount, the pinned_folios
34 * only set each folio which already pinned when udmabuf_create.
35 * Note that, since a folio may be pinned multiple times, each folio
36 * can be added to pinned_folios multiple times, depending on how many
37 * times the folio has been pinned when create.
38 */
39 pgoff_t nr_pinned;
40 struct folio **pinned_folios;
41
42 struct sg_table *sg;
43 struct miscdevice *device;
44 pgoff_t *offsets;
45 };
46
udmabuf_vm_fault(struct vm_fault * vmf)47 static vm_fault_t udmabuf_vm_fault(struct vm_fault *vmf)
48 {
49 struct vm_area_struct *vma = vmf->vma;
50 struct udmabuf *ubuf = vma->vm_private_data;
51 pgoff_t pgoff = vmf->pgoff;
52 unsigned long addr, pfn;
53 vm_fault_t ret;
54
55 if (pgoff >= ubuf->pagecount)
56 return VM_FAULT_SIGBUS;
57
58 pfn = folio_pfn(ubuf->folios[pgoff]);
59 pfn += ubuf->offsets[pgoff] >> PAGE_SHIFT;
60
61 ret = vmf_insert_pfn(vma, vmf->address, pfn);
62 if (ret & VM_FAULT_ERROR)
63 return ret;
64
65 /* pre fault */
66 pgoff = vma->vm_pgoff;
67 addr = vma->vm_start;
68
69 for (; addr < vma->vm_end; pgoff++, addr += PAGE_SIZE) {
70 if (addr == vmf->address)
71 continue;
72
73 if (WARN_ON(pgoff >= ubuf->pagecount))
74 break;
75
76 pfn = folio_pfn(ubuf->folios[pgoff]);
77 pfn += ubuf->offsets[pgoff] >> PAGE_SHIFT;
78
79 /**
80 * If the below vmf_insert_pfn() fails, we do not return an
81 * error here during this pre-fault step. However, an error
82 * will be returned if the failure occurs when the addr is
83 * truly accessed.
84 */
85 if (vmf_insert_pfn(vma, addr, pfn) & VM_FAULT_ERROR)
86 break;
87 }
88
89 return ret;
90 }
91
92 static const struct vm_operations_struct udmabuf_vm_ops = {
93 .fault = udmabuf_vm_fault,
94 };
95
mmap_udmabuf(struct dma_buf * buf,struct vm_area_struct * vma)96 static int mmap_udmabuf(struct dma_buf *buf, struct vm_area_struct *vma)
97 {
98 struct udmabuf *ubuf = buf->priv;
99
100 if ((vma->vm_flags & (VM_SHARED | VM_MAYSHARE)) == 0)
101 return -EINVAL;
102
103 vma->vm_ops = &udmabuf_vm_ops;
104 vma->vm_private_data = ubuf;
105 vm_flags_set(vma, VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP);
106 return 0;
107 }
108
vmap_udmabuf(struct dma_buf * buf,struct iosys_map * map)109 static int vmap_udmabuf(struct dma_buf *buf, struct iosys_map *map)
110 {
111 struct udmabuf *ubuf = buf->priv;
112 unsigned long *pfns;
113 void *vaddr;
114 pgoff_t pg;
115
116 dma_resv_assert_held(buf->resv);
117
118 /**
119 * HVO may free tail pages, so just use pfn to map each folio
120 * into vmalloc area.
121 */
122 pfns = kvmalloc_array(ubuf->pagecount, sizeof(*pfns), GFP_KERNEL);
123 if (!pfns)
124 return -ENOMEM;
125
126 for (pg = 0; pg < ubuf->pagecount; pg++) {
127 unsigned long pfn = folio_pfn(ubuf->folios[pg]);
128
129 pfn += ubuf->offsets[pg] >> PAGE_SHIFT;
130 pfns[pg] = pfn;
131 }
132
133 vaddr = vmap_pfn(pfns, ubuf->pagecount, PAGE_KERNEL);
134 kvfree(pfns);
135 if (!vaddr)
136 return -EINVAL;
137
138 iosys_map_set_vaddr(map, vaddr);
139 return 0;
140 }
141
vunmap_udmabuf(struct dma_buf * buf,struct iosys_map * map)142 static void vunmap_udmabuf(struct dma_buf *buf, struct iosys_map *map)
143 {
144 struct udmabuf *ubuf = buf->priv;
145
146 dma_resv_assert_held(buf->resv);
147
148 vm_unmap_ram(map->vaddr, ubuf->pagecount);
149 }
150
get_sg_table(struct device * dev,struct dma_buf * buf,enum dma_data_direction direction)151 static struct sg_table *get_sg_table(struct device *dev, struct dma_buf *buf,
152 enum dma_data_direction direction)
153 {
154 struct udmabuf *ubuf = buf->priv;
155 struct sg_table *sg;
156 struct scatterlist *sgl;
157 unsigned int i = 0;
158 int ret;
159
160 sg = kzalloc(sizeof(*sg), GFP_KERNEL);
161 if (!sg)
162 return ERR_PTR(-ENOMEM);
163
164 ret = sg_alloc_table(sg, ubuf->pagecount, GFP_KERNEL);
165 if (ret < 0)
166 goto err_alloc;
167
168 for_each_sg(sg->sgl, sgl, ubuf->pagecount, i)
169 sg_set_folio(sgl, ubuf->folios[i], PAGE_SIZE,
170 ubuf->offsets[i]);
171
172 ret = dma_map_sgtable(dev, sg, direction, 0);
173 if (ret < 0)
174 goto err_map;
175 return sg;
176
177 err_map:
178 sg_free_table(sg);
179 err_alloc:
180 kfree(sg);
181 return ERR_PTR(ret);
182 }
183
put_sg_table(struct device * dev,struct sg_table * sg,enum dma_data_direction direction)184 static void put_sg_table(struct device *dev, struct sg_table *sg,
185 enum dma_data_direction direction)
186 {
187 dma_unmap_sgtable(dev, sg, direction, 0);
188 sg_free_table(sg);
189 kfree(sg);
190 }
191
map_udmabuf(struct dma_buf_attachment * at,enum dma_data_direction direction)192 static struct sg_table *map_udmabuf(struct dma_buf_attachment *at,
193 enum dma_data_direction direction)
194 {
195 return get_sg_table(at->dev, at->dmabuf, direction);
196 }
197
unmap_udmabuf(struct dma_buf_attachment * at,struct sg_table * sg,enum dma_data_direction direction)198 static void unmap_udmabuf(struct dma_buf_attachment *at,
199 struct sg_table *sg,
200 enum dma_data_direction direction)
201 {
202 return put_sg_table(at->dev, sg, direction);
203 }
204
unpin_all_folios(struct udmabuf * ubuf)205 static void unpin_all_folios(struct udmabuf *ubuf)
206 {
207 pgoff_t i;
208
209 for (i = 0; i < ubuf->nr_pinned; ++i)
210 unpin_folio(ubuf->pinned_folios[i]);
211
212 kvfree(ubuf->pinned_folios);
213 }
214
init_udmabuf(struct udmabuf * ubuf,pgoff_t pgcnt)215 static __always_inline int init_udmabuf(struct udmabuf *ubuf, pgoff_t pgcnt)
216 {
217 ubuf->folios = kvmalloc_array(pgcnt, sizeof(*ubuf->folios), GFP_KERNEL);
218 if (!ubuf->folios)
219 return -ENOMEM;
220
221 ubuf->offsets = kvcalloc(pgcnt, sizeof(*ubuf->offsets), GFP_KERNEL);
222 if (!ubuf->offsets)
223 return -ENOMEM;
224
225 ubuf->pinned_folios = kvmalloc_array(pgcnt,
226 sizeof(*ubuf->pinned_folios),
227 GFP_KERNEL);
228 if (!ubuf->pinned_folios)
229 return -ENOMEM;
230
231 return 0;
232 }
233
deinit_udmabuf(struct udmabuf * ubuf)234 static __always_inline void deinit_udmabuf(struct udmabuf *ubuf)
235 {
236 unpin_all_folios(ubuf);
237 kvfree(ubuf->offsets);
238 kvfree(ubuf->folios);
239 }
240
release_udmabuf(struct dma_buf * buf)241 static void release_udmabuf(struct dma_buf *buf)
242 {
243 struct udmabuf *ubuf = buf->priv;
244 struct device *dev = ubuf->device->this_device;
245
246 if (ubuf->sg)
247 put_sg_table(dev, ubuf->sg, DMA_BIDIRECTIONAL);
248
249 deinit_udmabuf(ubuf);
250 kfree(ubuf);
251 }
252
begin_cpu_udmabuf(struct dma_buf * buf,enum dma_data_direction direction)253 static int begin_cpu_udmabuf(struct dma_buf *buf,
254 enum dma_data_direction direction)
255 {
256 struct udmabuf *ubuf = buf->priv;
257 struct device *dev = ubuf->device->this_device;
258 int ret = 0;
259
260 if (!ubuf->sg) {
261 ubuf->sg = get_sg_table(dev, buf, direction);
262 if (IS_ERR(ubuf->sg)) {
263 ret = PTR_ERR(ubuf->sg);
264 ubuf->sg = NULL;
265 }
266 } else {
267 dma_sync_sgtable_for_cpu(dev, ubuf->sg, direction);
268 }
269
270 return ret;
271 }
272
end_cpu_udmabuf(struct dma_buf * buf,enum dma_data_direction direction)273 static int end_cpu_udmabuf(struct dma_buf *buf,
274 enum dma_data_direction direction)
275 {
276 struct udmabuf *ubuf = buf->priv;
277 struct device *dev = ubuf->device->this_device;
278
279 if (!ubuf->sg)
280 return -EINVAL;
281
282 dma_sync_sgtable_for_device(dev, ubuf->sg, direction);
283 return 0;
284 }
285
286 static const struct dma_buf_ops udmabuf_ops = {
287 .map_dma_buf = map_udmabuf,
288 .unmap_dma_buf = unmap_udmabuf,
289 .release = release_udmabuf,
290 .mmap = mmap_udmabuf,
291 .vmap = vmap_udmabuf,
292 .vunmap = vunmap_udmabuf,
293 .begin_cpu_access = begin_cpu_udmabuf,
294 .end_cpu_access = end_cpu_udmabuf,
295 };
296
297 #define SEALS_WANTED (F_SEAL_SHRINK)
298 #define SEALS_DENIED (F_SEAL_WRITE|F_SEAL_FUTURE_WRITE)
299
check_memfd_seals(struct file * memfd)300 static int check_memfd_seals(struct file *memfd)
301 {
302 int seals;
303
304 if (!shmem_file(memfd) && !is_file_hugepages(memfd))
305 return -EBADFD;
306
307 seals = memfd_fcntl(memfd, F_GET_SEALS, 0);
308 if (seals == -EINVAL)
309 return -EBADFD;
310
311 if ((seals & SEALS_WANTED) != SEALS_WANTED ||
312 (seals & SEALS_DENIED) != 0)
313 return -EINVAL;
314
315 return 0;
316 }
317
export_udmabuf(struct udmabuf * ubuf,struct miscdevice * device)318 static struct dma_buf *export_udmabuf(struct udmabuf *ubuf,
319 struct miscdevice *device)
320 {
321 DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
322
323 ubuf->device = device;
324 exp_info.ops = &udmabuf_ops;
325 exp_info.size = ubuf->pagecount << PAGE_SHIFT;
326 exp_info.priv = ubuf;
327 exp_info.flags = O_RDWR;
328
329 return dma_buf_export(&exp_info);
330 }
331
udmabuf_pin_folios(struct udmabuf * ubuf,struct file * memfd,loff_t start,loff_t size,struct folio ** folios)332 static long udmabuf_pin_folios(struct udmabuf *ubuf, struct file *memfd,
333 loff_t start, loff_t size, struct folio **folios)
334 {
335 pgoff_t nr_pinned = ubuf->nr_pinned;
336 pgoff_t upgcnt = ubuf->pagecount;
337 u32 cur_folio, cur_pgcnt;
338 pgoff_t pgoff, pgcnt;
339 long nr_folios;
340 loff_t end;
341
342 pgcnt = size >> PAGE_SHIFT;
343 end = start + (pgcnt << PAGE_SHIFT) - 1;
344 nr_folios = memfd_pin_folios(memfd, start, end, folios, pgcnt, &pgoff);
345 if (nr_folios <= 0)
346 return nr_folios ? nr_folios : -EINVAL;
347
348 cur_pgcnt = 0;
349 for (cur_folio = 0; cur_folio < nr_folios; ++cur_folio) {
350 pgoff_t subpgoff = pgoff;
351 size_t fsize = folio_size(folios[cur_folio]);
352
353 ubuf->pinned_folios[nr_pinned++] = folios[cur_folio];
354
355 for (; subpgoff < fsize; subpgoff += PAGE_SIZE) {
356 ubuf->folios[upgcnt] = folios[cur_folio];
357 ubuf->offsets[upgcnt] = subpgoff;
358 ++upgcnt;
359
360 if (++cur_pgcnt >= pgcnt)
361 goto end;
362 }
363
364 /**
365 * In a given range, only the first subpage of the first folio
366 * has an offset, that is returned by memfd_pin_folios().
367 * The first subpages of other folios (in the range) have an
368 * offset of 0.
369 */
370 pgoff = 0;
371 }
372 end:
373 ubuf->pagecount = upgcnt;
374 ubuf->nr_pinned = nr_pinned;
375 return 0;
376 }
377
udmabuf_create(struct miscdevice * device,struct udmabuf_create_list * head,struct udmabuf_create_item * list)378 static long udmabuf_create(struct miscdevice *device,
379 struct udmabuf_create_list *head,
380 struct udmabuf_create_item *list)
381 {
382 unsigned long max_nr_folios = 0;
383 struct folio **folios = NULL;
384 pgoff_t pgcnt = 0, pglimit;
385 struct udmabuf *ubuf;
386 struct dma_buf *dmabuf;
387 long ret = -EINVAL;
388 u32 i, flags;
389
390 ubuf = kzalloc(sizeof(*ubuf), GFP_KERNEL);
391 if (!ubuf)
392 return -ENOMEM;
393
394 pglimit = ((u64)size_limit_mb * 1024 * 1024) >> PAGE_SHIFT;
395 for (i = 0; i < head->count; i++) {
396 pgoff_t subpgcnt;
397
398 if (!PAGE_ALIGNED(list[i].offset))
399 goto err_noinit;
400 if (!PAGE_ALIGNED(list[i].size))
401 goto err_noinit;
402
403 subpgcnt = list[i].size >> PAGE_SHIFT;
404 pgcnt += subpgcnt;
405 if (pgcnt > pglimit)
406 goto err_noinit;
407
408 max_nr_folios = max_t(unsigned long, subpgcnt, max_nr_folios);
409 }
410
411 if (!pgcnt)
412 goto err_noinit;
413
414 ret = init_udmabuf(ubuf, pgcnt);
415 if (ret)
416 goto err;
417
418 folios = kvmalloc_array(max_nr_folios, sizeof(*folios), GFP_KERNEL);
419 if (!folios) {
420 ret = -ENOMEM;
421 goto err;
422 }
423
424 for (i = 0; i < head->count; i++) {
425 struct file *memfd = fget(list[i].memfd);
426
427 if (!memfd) {
428 ret = -EBADFD;
429 goto err;
430 }
431
432 /*
433 * Take the inode lock to protect against concurrent
434 * memfd_add_seals(), which takes this lock in write mode.
435 */
436 inode_lock_shared(file_inode(memfd));
437 ret = check_memfd_seals(memfd);
438 if (ret)
439 goto out_unlock;
440
441 ret = udmabuf_pin_folios(ubuf, memfd, list[i].offset,
442 list[i].size, folios);
443 out_unlock:
444 inode_unlock_shared(file_inode(memfd));
445 fput(memfd);
446 if (ret)
447 goto err;
448 }
449
450 flags = head->flags & UDMABUF_FLAGS_CLOEXEC ? O_CLOEXEC : 0;
451 dmabuf = export_udmabuf(ubuf, device);
452 if (IS_ERR(dmabuf)) {
453 ret = PTR_ERR(dmabuf);
454 goto err;
455 }
456 /*
457 * Ownership of ubuf is held by the dmabuf from here.
458 * If the following dma_buf_fd() fails, dma_buf_put() cleans up both the
459 * dmabuf and the ubuf (through udmabuf_ops.release).
460 */
461
462 ret = dma_buf_fd(dmabuf, flags);
463 if (ret < 0)
464 dma_buf_put(dmabuf);
465
466 kvfree(folios);
467 return ret;
468
469 err:
470 deinit_udmabuf(ubuf);
471 err_noinit:
472 kfree(ubuf);
473 kvfree(folios);
474 return ret;
475 }
476
udmabuf_ioctl_create(struct file * filp,unsigned long arg)477 static long udmabuf_ioctl_create(struct file *filp, unsigned long arg)
478 {
479 struct udmabuf_create create;
480 struct udmabuf_create_list head;
481 struct udmabuf_create_item list;
482
483 if (copy_from_user(&create, (void __user *)arg,
484 sizeof(create)))
485 return -EFAULT;
486
487 head.flags = create.flags;
488 head.count = 1;
489 list.memfd = create.memfd;
490 list.offset = create.offset;
491 list.size = create.size;
492
493 return udmabuf_create(filp->private_data, &head, &list);
494 }
495
udmabuf_ioctl_create_list(struct file * filp,unsigned long arg)496 static long udmabuf_ioctl_create_list(struct file *filp, unsigned long arg)
497 {
498 struct udmabuf_create_list head;
499 struct udmabuf_create_item *list;
500 int ret = -EINVAL;
501 u32 lsize;
502
503 if (copy_from_user(&head, (void __user *)arg, sizeof(head)))
504 return -EFAULT;
505 if (head.count > list_limit)
506 return -EINVAL;
507 lsize = sizeof(struct udmabuf_create_item) * head.count;
508 list = memdup_user((void __user *)(arg + sizeof(head)), lsize);
509 if (IS_ERR(list))
510 return PTR_ERR(list);
511
512 ret = udmabuf_create(filp->private_data, &head, list);
513 kfree(list);
514 return ret;
515 }
516
udmabuf_ioctl(struct file * filp,unsigned int ioctl,unsigned long arg)517 static long udmabuf_ioctl(struct file *filp, unsigned int ioctl,
518 unsigned long arg)
519 {
520 long ret;
521
522 switch (ioctl) {
523 case UDMABUF_CREATE:
524 ret = udmabuf_ioctl_create(filp, arg);
525 break;
526 case UDMABUF_CREATE_LIST:
527 ret = udmabuf_ioctl_create_list(filp, arg);
528 break;
529 default:
530 ret = -ENOTTY;
531 break;
532 }
533 return ret;
534 }
535
536 static const struct file_operations udmabuf_fops = {
537 .owner = THIS_MODULE,
538 .unlocked_ioctl = udmabuf_ioctl,
539 #ifdef CONFIG_COMPAT
540 .compat_ioctl = udmabuf_ioctl,
541 #endif
542 };
543
544 static struct miscdevice udmabuf_misc = {
545 .minor = MISC_DYNAMIC_MINOR,
546 .name = "udmabuf",
547 .fops = &udmabuf_fops,
548 };
549
udmabuf_dev_init(void)550 static int __init udmabuf_dev_init(void)
551 {
552 int ret;
553
554 ret = misc_register(&udmabuf_misc);
555 if (ret < 0) {
556 pr_err("Could not initialize udmabuf device\n");
557 return ret;
558 }
559
560 ret = dma_coerce_mask_and_coherent(udmabuf_misc.this_device,
561 DMA_BIT_MASK(64));
562 if (ret < 0) {
563 pr_err("Could not setup DMA mask for udmabuf device\n");
564 misc_deregister(&udmabuf_misc);
565 return ret;
566 }
567
568 return 0;
569 }
570
udmabuf_dev_exit(void)571 static void __exit udmabuf_dev_exit(void)
572 {
573 misc_deregister(&udmabuf_misc);
574 }
575
576 module_init(udmabuf_dev_init)
577 module_exit(udmabuf_dev_exit)
578
579 MODULE_AUTHOR("Gerd Hoffmann <kraxel@redhat.com>");
580