xref: /linux/drivers/dma-buf/udmabuf.c (revision a4871e6201c46c8e1d04308265b4b4c5753c8209)
1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/cred.h>
3 #include <linux/device.h>
4 #include <linux/dma-buf.h>
5 #include <linux/dma-resv.h>
6 #include <linux/highmem.h>
7 #include <linux/init.h>
8 #include <linux/kernel.h>
9 #include <linux/memfd.h>
10 #include <linux/miscdevice.h>
11 #include <linux/module.h>
12 #include <linux/shmem_fs.h>
13 #include <linux/hugetlb.h>
14 #include <linux/slab.h>
15 #include <linux/udmabuf.h>
16 #include <linux/vmalloc.h>
17 #include <linux/iosys-map.h>
18 
19 static int list_limit = 1024;
20 module_param(list_limit, int, 0644);
21 MODULE_PARM_DESC(list_limit, "udmabuf_create_list->count limit. Default is 1024.");
22 
23 static int size_limit_mb = 64;
24 module_param(size_limit_mb, int, 0644);
25 MODULE_PARM_DESC(size_limit_mb, "Max size of a dmabuf, in megabytes. Default is 64.");
26 
27 struct udmabuf {
28 	pgoff_t pagecount;
29 	struct folio **folios;
30 
31 	/**
32 	 * Unlike folios, pinned_folios is only used for unpin.
33 	 * So, nr_pinned is not the same to pagecount, the pinned_folios
34 	 * only set each folio which already pinned when udmabuf_create.
35 	 * Note that, since a folio may be pinned multiple times, each folio
36 	 * can be added to pinned_folios multiple times, depending on how many
37 	 * times the folio has been pinned when create.
38 	 */
39 	pgoff_t nr_pinned;
40 	struct folio **pinned_folios;
41 
42 	struct sg_table *sg;
43 	struct miscdevice *device;
44 	pgoff_t *offsets;
45 };
46 
47 static vm_fault_t udmabuf_vm_fault(struct vm_fault *vmf)
48 {
49 	struct vm_area_struct *vma = vmf->vma;
50 	struct udmabuf *ubuf = vma->vm_private_data;
51 	pgoff_t pgoff = vmf->pgoff;
52 	unsigned long addr, pfn;
53 	vm_fault_t ret;
54 
55 	if (pgoff >= ubuf->pagecount)
56 		return VM_FAULT_SIGBUS;
57 
58 	pfn = folio_pfn(ubuf->folios[pgoff]);
59 	pfn += ubuf->offsets[pgoff] >> PAGE_SHIFT;
60 
61 	ret = vmf_insert_pfn(vma, vmf->address, pfn);
62 	if (ret & VM_FAULT_ERROR)
63 		return ret;
64 
65 	/* pre fault */
66 	pgoff = vma->vm_pgoff;
67 	addr = vma->vm_start;
68 
69 	for (; addr < vma->vm_end; pgoff++, addr += PAGE_SIZE) {
70 		if (addr == vmf->address)
71 			continue;
72 
73 		if (WARN_ON(pgoff >= ubuf->pagecount))
74 			break;
75 
76 		pfn = folio_pfn(ubuf->folios[pgoff]);
77 		pfn += ubuf->offsets[pgoff] >> PAGE_SHIFT;
78 
79 		/**
80 		 * If the below vmf_insert_pfn() fails, we do not return an
81 		 * error here during this pre-fault step. However, an error
82 		 * will be returned if the failure occurs when the addr is
83 		 * truly accessed.
84 		 */
85 		if (vmf_insert_pfn(vma, addr, pfn) & VM_FAULT_ERROR)
86 			break;
87 	}
88 
89 	return ret;
90 }
91 
92 static const struct vm_operations_struct udmabuf_vm_ops = {
93 	.fault = udmabuf_vm_fault,
94 };
95 
96 static int mmap_udmabuf(struct dma_buf *buf, struct vm_area_struct *vma)
97 {
98 	struct udmabuf *ubuf = buf->priv;
99 
100 	if ((vma->vm_flags & (VM_SHARED | VM_MAYSHARE)) == 0)
101 		return -EINVAL;
102 
103 	vma->vm_ops = &udmabuf_vm_ops;
104 	vma->vm_private_data = ubuf;
105 	vm_flags_set(vma, VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP);
106 	return 0;
107 }
108 
109 static int vmap_udmabuf(struct dma_buf *buf, struct iosys_map *map)
110 {
111 	struct udmabuf *ubuf = buf->priv;
112 	struct page **pages;
113 	void *vaddr;
114 	pgoff_t pg;
115 
116 	dma_resv_assert_held(buf->resv);
117 
118 	pages = kvmalloc_array(ubuf->pagecount, sizeof(*pages), GFP_KERNEL);
119 	if (!pages)
120 		return -ENOMEM;
121 
122 	for (pg = 0; pg < ubuf->pagecount; pg++)
123 		pages[pg] = folio_page(ubuf->folios[pg],
124 				       ubuf->offsets[pg] >> PAGE_SHIFT);
125 
126 	vaddr = vm_map_ram(pages, ubuf->pagecount, -1);
127 	kvfree(pages);
128 	if (!vaddr)
129 		return -EINVAL;
130 
131 	iosys_map_set_vaddr(map, vaddr);
132 	return 0;
133 }
134 
135 static void vunmap_udmabuf(struct dma_buf *buf, struct iosys_map *map)
136 {
137 	struct udmabuf *ubuf = buf->priv;
138 
139 	dma_resv_assert_held(buf->resv);
140 
141 	vm_unmap_ram(map->vaddr, ubuf->pagecount);
142 }
143 
144 static struct sg_table *get_sg_table(struct device *dev, struct dma_buf *buf,
145 				     enum dma_data_direction direction)
146 {
147 	struct udmabuf *ubuf = buf->priv;
148 	struct sg_table *sg;
149 	struct scatterlist *sgl;
150 	unsigned int i = 0;
151 	int ret;
152 
153 	sg = kzalloc(sizeof(*sg), GFP_KERNEL);
154 	if (!sg)
155 		return ERR_PTR(-ENOMEM);
156 
157 	ret = sg_alloc_table(sg, ubuf->pagecount, GFP_KERNEL);
158 	if (ret < 0)
159 		goto err_alloc;
160 
161 	for_each_sg(sg->sgl, sgl, ubuf->pagecount, i)
162 		sg_set_folio(sgl, ubuf->folios[i], PAGE_SIZE,
163 			     ubuf->offsets[i]);
164 
165 	ret = dma_map_sgtable(dev, sg, direction, 0);
166 	if (ret < 0)
167 		goto err_map;
168 	return sg;
169 
170 err_map:
171 	sg_free_table(sg);
172 err_alloc:
173 	kfree(sg);
174 	return ERR_PTR(ret);
175 }
176 
177 static void put_sg_table(struct device *dev, struct sg_table *sg,
178 			 enum dma_data_direction direction)
179 {
180 	dma_unmap_sgtable(dev, sg, direction, 0);
181 	sg_free_table(sg);
182 	kfree(sg);
183 }
184 
185 static struct sg_table *map_udmabuf(struct dma_buf_attachment *at,
186 				    enum dma_data_direction direction)
187 {
188 	return get_sg_table(at->dev, at->dmabuf, direction);
189 }
190 
191 static void unmap_udmabuf(struct dma_buf_attachment *at,
192 			  struct sg_table *sg,
193 			  enum dma_data_direction direction)
194 {
195 	return put_sg_table(at->dev, sg, direction);
196 }
197 
198 static void unpin_all_folios(struct udmabuf *ubuf)
199 {
200 	pgoff_t i;
201 
202 	for (i = 0; i < ubuf->nr_pinned; ++i)
203 		unpin_folio(ubuf->pinned_folios[i]);
204 
205 	kvfree(ubuf->pinned_folios);
206 }
207 
208 static __always_inline int init_udmabuf(struct udmabuf *ubuf, pgoff_t pgcnt)
209 {
210 	ubuf->folios = kvmalloc_array(pgcnt, sizeof(*ubuf->folios), GFP_KERNEL);
211 	if (!ubuf->folios)
212 		return -ENOMEM;
213 
214 	ubuf->offsets = kvcalloc(pgcnt, sizeof(*ubuf->offsets), GFP_KERNEL);
215 	if (!ubuf->offsets)
216 		return -ENOMEM;
217 
218 	ubuf->pinned_folios = kvmalloc_array(pgcnt,
219 					     sizeof(*ubuf->pinned_folios),
220 					     GFP_KERNEL);
221 	if (!ubuf->pinned_folios)
222 		return -ENOMEM;
223 
224 	return 0;
225 }
226 
227 static __always_inline void deinit_udmabuf(struct udmabuf *ubuf)
228 {
229 	unpin_all_folios(ubuf);
230 	kvfree(ubuf->offsets);
231 	kvfree(ubuf->folios);
232 }
233 
234 static void release_udmabuf(struct dma_buf *buf)
235 {
236 	struct udmabuf *ubuf = buf->priv;
237 	struct device *dev = ubuf->device->this_device;
238 
239 	if (ubuf->sg)
240 		put_sg_table(dev, ubuf->sg, DMA_BIDIRECTIONAL);
241 
242 	deinit_udmabuf(ubuf);
243 	kfree(ubuf);
244 }
245 
246 static int begin_cpu_udmabuf(struct dma_buf *buf,
247 			     enum dma_data_direction direction)
248 {
249 	struct udmabuf *ubuf = buf->priv;
250 	struct device *dev = ubuf->device->this_device;
251 	int ret = 0;
252 
253 	if (!ubuf->sg) {
254 		ubuf->sg = get_sg_table(dev, buf, direction);
255 		if (IS_ERR(ubuf->sg)) {
256 			ret = PTR_ERR(ubuf->sg);
257 			ubuf->sg = NULL;
258 		}
259 	} else {
260 		dma_sync_sg_for_cpu(dev, ubuf->sg->sgl, ubuf->sg->nents,
261 				    direction);
262 	}
263 
264 	return ret;
265 }
266 
267 static int end_cpu_udmabuf(struct dma_buf *buf,
268 			   enum dma_data_direction direction)
269 {
270 	struct udmabuf *ubuf = buf->priv;
271 	struct device *dev = ubuf->device->this_device;
272 
273 	if (!ubuf->sg)
274 		return -EINVAL;
275 
276 	dma_sync_sg_for_device(dev, ubuf->sg->sgl, ubuf->sg->nents, direction);
277 	return 0;
278 }
279 
280 static const struct dma_buf_ops udmabuf_ops = {
281 	.map_dma_buf	   = map_udmabuf,
282 	.unmap_dma_buf	   = unmap_udmabuf,
283 	.release	   = release_udmabuf,
284 	.mmap		   = mmap_udmabuf,
285 	.vmap		   = vmap_udmabuf,
286 	.vunmap		   = vunmap_udmabuf,
287 	.begin_cpu_access  = begin_cpu_udmabuf,
288 	.end_cpu_access    = end_cpu_udmabuf,
289 };
290 
291 #define SEALS_WANTED (F_SEAL_SHRINK)
292 #define SEALS_DENIED (F_SEAL_WRITE|F_SEAL_FUTURE_WRITE)
293 
294 static int check_memfd_seals(struct file *memfd)
295 {
296 	int seals;
297 
298 	if (!shmem_file(memfd) && !is_file_hugepages(memfd))
299 		return -EBADFD;
300 
301 	seals = memfd_fcntl(memfd, F_GET_SEALS, 0);
302 	if (seals == -EINVAL)
303 		return -EBADFD;
304 
305 	if ((seals & SEALS_WANTED) != SEALS_WANTED ||
306 	    (seals & SEALS_DENIED) != 0)
307 		return -EINVAL;
308 
309 	return 0;
310 }
311 
312 static struct dma_buf *export_udmabuf(struct udmabuf *ubuf,
313 				      struct miscdevice *device)
314 {
315 	DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
316 
317 	ubuf->device = device;
318 	exp_info.ops  = &udmabuf_ops;
319 	exp_info.size = ubuf->pagecount << PAGE_SHIFT;
320 	exp_info.priv = ubuf;
321 	exp_info.flags = O_RDWR;
322 
323 	return dma_buf_export(&exp_info);
324 }
325 
326 static long udmabuf_pin_folios(struct udmabuf *ubuf, struct file *memfd,
327 			       loff_t start, loff_t size, struct folio **folios)
328 {
329 	pgoff_t nr_pinned = ubuf->nr_pinned;
330 	pgoff_t upgcnt = ubuf->pagecount;
331 	u32 cur_folio, cur_pgcnt;
332 	pgoff_t pgoff, pgcnt;
333 	long nr_folios;
334 	loff_t end;
335 
336 	pgcnt = size >> PAGE_SHIFT;
337 	end = start + (pgcnt << PAGE_SHIFT) - 1;
338 	nr_folios = memfd_pin_folios(memfd, start, end, folios, pgcnt, &pgoff);
339 	if (nr_folios <= 0)
340 		return nr_folios ? nr_folios : -EINVAL;
341 
342 	cur_pgcnt = 0;
343 	for (cur_folio = 0; cur_folio < nr_folios; ++cur_folio) {
344 		pgoff_t subpgoff = pgoff;
345 		size_t fsize = folio_size(folios[cur_folio]);
346 
347 		ubuf->pinned_folios[nr_pinned++] = folios[cur_folio];
348 
349 		for (; subpgoff < fsize; subpgoff += PAGE_SIZE) {
350 			ubuf->folios[upgcnt] = folios[cur_folio];
351 			ubuf->offsets[upgcnt] = subpgoff;
352 			++upgcnt;
353 
354 			if (++cur_pgcnt >= pgcnt)
355 				goto end;
356 		}
357 
358 		/**
359 		 * In a given range, only the first subpage of the first folio
360 		 * has an offset, that is returned by memfd_pin_folios().
361 		 * The first subpages of other folios (in the range) have an
362 		 * offset of 0.
363 		 */
364 		pgoff = 0;
365 	}
366 end:
367 	ubuf->pagecount = upgcnt;
368 	ubuf->nr_pinned = nr_pinned;
369 	return 0;
370 }
371 
372 static long udmabuf_create(struct miscdevice *device,
373 			   struct udmabuf_create_list *head,
374 			   struct udmabuf_create_item *list)
375 {
376 	unsigned long max_nr_folios = 0;
377 	struct folio **folios = NULL;
378 	pgoff_t pgcnt = 0, pglimit;
379 	struct udmabuf *ubuf;
380 	struct dma_buf *dmabuf;
381 	long ret = -EINVAL;
382 	u32 i, flags;
383 
384 	ubuf = kzalloc(sizeof(*ubuf), GFP_KERNEL);
385 	if (!ubuf)
386 		return -ENOMEM;
387 
388 	pglimit = ((u64)size_limit_mb * 1024 * 1024) >> PAGE_SHIFT;
389 	for (i = 0; i < head->count; i++) {
390 		pgoff_t subpgcnt;
391 
392 		if (!PAGE_ALIGNED(list[i].offset))
393 			goto err_noinit;
394 		if (!PAGE_ALIGNED(list[i].size))
395 			goto err_noinit;
396 
397 		subpgcnt = list[i].size >> PAGE_SHIFT;
398 		pgcnt += subpgcnt;
399 		if (pgcnt > pglimit)
400 			goto err_noinit;
401 
402 		max_nr_folios = max_t(unsigned long, subpgcnt, max_nr_folios);
403 	}
404 
405 	if (!pgcnt)
406 		goto err_noinit;
407 
408 	ret = init_udmabuf(ubuf, pgcnt);
409 	if (ret)
410 		goto err;
411 
412 	folios = kvmalloc_array(max_nr_folios, sizeof(*folios), GFP_KERNEL);
413 	if (!folios) {
414 		ret = -ENOMEM;
415 		goto err;
416 	}
417 
418 	for (i = 0; i < head->count; i++) {
419 		struct file *memfd = fget(list[i].memfd);
420 
421 		if (!memfd) {
422 			ret = -EBADFD;
423 			goto err;
424 		}
425 
426 		/*
427 		 * Take the inode lock to protect against concurrent
428 		 * memfd_add_seals(), which takes this lock in write mode.
429 		 */
430 		inode_lock_shared(file_inode(memfd));
431 		ret = check_memfd_seals(memfd);
432 		if (ret)
433 			goto out_unlock;
434 
435 		ret = udmabuf_pin_folios(ubuf, memfd, list[i].offset,
436 					 list[i].size, folios);
437 out_unlock:
438 		inode_unlock_shared(file_inode(memfd));
439 		fput(memfd);
440 		if (ret)
441 			goto err;
442 	}
443 
444 	flags = head->flags & UDMABUF_FLAGS_CLOEXEC ? O_CLOEXEC : 0;
445 	dmabuf = export_udmabuf(ubuf, device);
446 	if (IS_ERR(dmabuf)) {
447 		ret = PTR_ERR(dmabuf);
448 		goto err;
449 	}
450 	/*
451 	 * Ownership of ubuf is held by the dmabuf from here.
452 	 * If the following dma_buf_fd() fails, dma_buf_put() cleans up both the
453 	 * dmabuf and the ubuf (through udmabuf_ops.release).
454 	 */
455 
456 	ret = dma_buf_fd(dmabuf, flags);
457 	if (ret < 0)
458 		dma_buf_put(dmabuf);
459 
460 	kvfree(folios);
461 	return ret;
462 
463 err:
464 	deinit_udmabuf(ubuf);
465 err_noinit:
466 	kfree(ubuf);
467 	kvfree(folios);
468 	return ret;
469 }
470 
471 static long udmabuf_ioctl_create(struct file *filp, unsigned long arg)
472 {
473 	struct udmabuf_create create;
474 	struct udmabuf_create_list head;
475 	struct udmabuf_create_item list;
476 
477 	if (copy_from_user(&create, (void __user *)arg,
478 			   sizeof(create)))
479 		return -EFAULT;
480 
481 	head.flags  = create.flags;
482 	head.count  = 1;
483 	list.memfd  = create.memfd;
484 	list.offset = create.offset;
485 	list.size   = create.size;
486 
487 	return udmabuf_create(filp->private_data, &head, &list);
488 }
489 
490 static long udmabuf_ioctl_create_list(struct file *filp, unsigned long arg)
491 {
492 	struct udmabuf_create_list head;
493 	struct udmabuf_create_item *list;
494 	int ret = -EINVAL;
495 	u32 lsize;
496 
497 	if (copy_from_user(&head, (void __user *)arg, sizeof(head)))
498 		return -EFAULT;
499 	if (head.count > list_limit)
500 		return -EINVAL;
501 	lsize = sizeof(struct udmabuf_create_item) * head.count;
502 	list = memdup_user((void __user *)(arg + sizeof(head)), lsize);
503 	if (IS_ERR(list))
504 		return PTR_ERR(list);
505 
506 	ret = udmabuf_create(filp->private_data, &head, list);
507 	kfree(list);
508 	return ret;
509 }
510 
511 static long udmabuf_ioctl(struct file *filp, unsigned int ioctl,
512 			  unsigned long arg)
513 {
514 	long ret;
515 
516 	switch (ioctl) {
517 	case UDMABUF_CREATE:
518 		ret = udmabuf_ioctl_create(filp, arg);
519 		break;
520 	case UDMABUF_CREATE_LIST:
521 		ret = udmabuf_ioctl_create_list(filp, arg);
522 		break;
523 	default:
524 		ret = -ENOTTY;
525 		break;
526 	}
527 	return ret;
528 }
529 
530 static const struct file_operations udmabuf_fops = {
531 	.owner		= THIS_MODULE,
532 	.unlocked_ioctl = udmabuf_ioctl,
533 #ifdef CONFIG_COMPAT
534 	.compat_ioctl   = udmabuf_ioctl,
535 #endif
536 };
537 
538 static struct miscdevice udmabuf_misc = {
539 	.minor          = MISC_DYNAMIC_MINOR,
540 	.name           = "udmabuf",
541 	.fops           = &udmabuf_fops,
542 };
543 
544 static int __init udmabuf_dev_init(void)
545 {
546 	int ret;
547 
548 	ret = misc_register(&udmabuf_misc);
549 	if (ret < 0) {
550 		pr_err("Could not initialize udmabuf device\n");
551 		return ret;
552 	}
553 
554 	ret = dma_coerce_mask_and_coherent(udmabuf_misc.this_device,
555 					   DMA_BIT_MASK(64));
556 	if (ret < 0) {
557 		pr_err("Could not setup DMA mask for udmabuf device\n");
558 		misc_deregister(&udmabuf_misc);
559 		return ret;
560 	}
561 
562 	return 0;
563 }
564 
565 static void __exit udmabuf_dev_exit(void)
566 {
567 	misc_deregister(&udmabuf_misc);
568 }
569 
570 module_init(udmabuf_dev_init)
571 module_exit(udmabuf_dev_exit)
572 
573 MODULE_AUTHOR("Gerd Hoffmann <kraxel@redhat.com>");
574