xref: /linux/drivers/dma-buf/dma-buf.c (revision f88cb2660bd09fd76b54e6bd2e62f3d7501147b6)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Framework for buffer objects that can be shared across devices/subsystems.
4  *
5  * Copyright(C) 2011 Linaro Limited. All rights reserved.
6  * Author: Sumit Semwal <sumit.semwal@ti.com>
7  *
8  * Many thanks to linaro-mm-sig list, and specially
9  * Arnd Bergmann <arnd@arndb.de>, Rob Clark <rob@ti.com> and
10  * Daniel Vetter <daniel@ffwll.ch> for their support in creation and
11  * refining of this idea.
12  */
13 
14 #include <linux/fs.h>
15 #include <linux/slab.h>
16 #include <linux/dma-buf.h>
17 #include <linux/dma-fence.h>
18 #include <linux/dma-fence-unwrap.h>
19 #include <linux/anon_inodes.h>
20 #include <linux/export.h>
21 #include <linux/debugfs.h>
22 #include <linux/list.h>
23 #include <linux/module.h>
24 #include <linux/mutex.h>
25 #include <linux/seq_file.h>
26 #include <linux/sync_file.h>
27 #include <linux/poll.h>
28 #include <linux/dma-resv.h>
29 #include <linux/mm.h>
30 #include <linux/mount.h>
31 #include <linux/pseudo_fs.h>
32 
33 #include <uapi/linux/dma-buf.h>
34 #include <uapi/linux/magic.h>
35 
36 #include "dma-buf-sysfs-stats.h"
37 
38 /* Wrapper to hide the sg_table page link from the importer */
39 struct dma_buf_sg_table_wrapper {
40 	struct sg_table *original;
41 	struct sg_table wrapper;
42 };
43 
44 static inline int is_dma_buf_file(struct file *);
45 
46 static DEFINE_MUTEX(dmabuf_list_mutex);
47 static LIST_HEAD(dmabuf_list);
48 
49 static void __dma_buf_list_add(struct dma_buf *dmabuf)
50 {
51 	mutex_lock(&dmabuf_list_mutex);
52 	list_add(&dmabuf->list_node, &dmabuf_list);
53 	mutex_unlock(&dmabuf_list_mutex);
54 }
55 
56 static void __dma_buf_list_del(struct dma_buf *dmabuf)
57 {
58 	if (!dmabuf)
59 		return;
60 
61 	mutex_lock(&dmabuf_list_mutex);
62 	list_del(&dmabuf->list_node);
63 	mutex_unlock(&dmabuf_list_mutex);
64 }
65 
66 /**
67  * dma_buf_iter_begin - begin iteration through global list of all DMA buffers
68  *
69  * Returns the first buffer in the global list of DMA-bufs that's not in the
70  * process of being destroyed. Increments that buffer's reference count to
71  * prevent buffer destruction. Callers must release the reference, either by
72  * continuing iteration with dma_buf_iter_next(), or with dma_buf_put().
73  *
74  * Return:
75  * * First buffer from global list, with refcount elevated
76  * * NULL if no active buffers are present
77  */
78 struct dma_buf *dma_buf_iter_begin(void)
79 {
80 	struct dma_buf *ret = NULL, *dmabuf;
81 
82 	/*
83 	 * The list mutex does not protect a dmabuf's refcount, so it can be
84 	 * zeroed while we are iterating. We cannot call get_dma_buf() since the
85 	 * caller may not already own a reference to the buffer.
86 	 */
87 	mutex_lock(&dmabuf_list_mutex);
88 	list_for_each_entry(dmabuf, &dmabuf_list, list_node) {
89 		if (file_ref_get(&dmabuf->file->f_ref)) {
90 			ret = dmabuf;
91 			break;
92 		}
93 	}
94 	mutex_unlock(&dmabuf_list_mutex);
95 	return ret;
96 }
97 
98 /**
99  * dma_buf_iter_next - continue iteration through global list of all DMA buffers
100  * @dmabuf:	[in]	pointer to dma_buf
101  *
102  * Decrements the reference count on the provided buffer. Returns the next
103  * buffer from the remainder of the global list of DMA-bufs with its reference
104  * count incremented. Callers must release the reference, either by continuing
105  * iteration with dma_buf_iter_next(), or with dma_buf_put().
106  *
107  * Return:
108  * * Next buffer from global list, with refcount elevated
109  * * NULL if no additional active buffers are present
110  */
111 struct dma_buf *dma_buf_iter_next(struct dma_buf *dmabuf)
112 {
113 	struct dma_buf *ret = NULL;
114 
115 	/*
116 	 * The list mutex does not protect a dmabuf's refcount, so it can be
117 	 * zeroed while we are iterating. We cannot call get_dma_buf() since the
118 	 * caller may not already own a reference to the buffer.
119 	 */
120 	mutex_lock(&dmabuf_list_mutex);
121 	dma_buf_put(dmabuf);
122 	list_for_each_entry_continue(dmabuf, &dmabuf_list, list_node) {
123 		if (file_ref_get(&dmabuf->file->f_ref)) {
124 			ret = dmabuf;
125 			break;
126 		}
127 	}
128 	mutex_unlock(&dmabuf_list_mutex);
129 	return ret;
130 }
131 
132 static char *dmabuffs_dname(struct dentry *dentry, char *buffer, int buflen)
133 {
134 	struct dma_buf *dmabuf;
135 	char name[DMA_BUF_NAME_LEN];
136 	ssize_t ret = 0;
137 
138 	dmabuf = dentry->d_fsdata;
139 	spin_lock(&dmabuf->name_lock);
140 	if (dmabuf->name)
141 		ret = strscpy(name, dmabuf->name, sizeof(name));
142 	spin_unlock(&dmabuf->name_lock);
143 
144 	return dynamic_dname(buffer, buflen, "/%s:%s",
145 			     dentry->d_name.name, ret > 0 ? name : "");
146 }
147 
148 static void dma_buf_release(struct dentry *dentry)
149 {
150 	struct dma_buf *dmabuf;
151 
152 	dmabuf = dentry->d_fsdata;
153 	if (unlikely(!dmabuf))
154 		return;
155 
156 	BUG_ON(dmabuf->vmapping_counter);
157 
158 	/*
159 	 * If you hit this BUG() it could mean:
160 	 * * There's a file reference imbalance in dma_buf_poll / dma_buf_poll_cb or somewhere else
161 	 * * dmabuf->cb_in/out.active are non-0 despite no pending fence callback
162 	 */
163 	BUG_ON(dmabuf->cb_in.active || dmabuf->cb_out.active);
164 
165 	dma_buf_stats_teardown(dmabuf);
166 	dmabuf->ops->release(dmabuf);
167 
168 	if (dmabuf->resv == (struct dma_resv *)&dmabuf[1])
169 		dma_resv_fini(dmabuf->resv);
170 
171 	WARN_ON(!list_empty(&dmabuf->attachments));
172 	module_put(dmabuf->owner);
173 	kfree(dmabuf->name);
174 	kfree(dmabuf);
175 }
176 
177 static int dma_buf_file_release(struct inode *inode, struct file *file)
178 {
179 	if (!is_dma_buf_file(file))
180 		return -EINVAL;
181 
182 	__dma_buf_list_del(file->private_data);
183 
184 	return 0;
185 }
186 
187 static const struct dentry_operations dma_buf_dentry_ops = {
188 	.d_dname = dmabuffs_dname,
189 	.d_release = dma_buf_release,
190 };
191 
192 static struct vfsmount *dma_buf_mnt;
193 
194 static int dma_buf_fs_init_context(struct fs_context *fc)
195 {
196 	struct pseudo_fs_context *ctx;
197 
198 	ctx = init_pseudo(fc, DMA_BUF_MAGIC);
199 	if (!ctx)
200 		return -ENOMEM;
201 	ctx->dops = &dma_buf_dentry_ops;
202 	return 0;
203 }
204 
205 static struct file_system_type dma_buf_fs_type = {
206 	.name = "dmabuf",
207 	.init_fs_context = dma_buf_fs_init_context,
208 	.kill_sb = kill_anon_super,
209 };
210 
211 static int dma_buf_mmap_internal(struct file *file, struct vm_area_struct *vma)
212 {
213 	struct dma_buf *dmabuf;
214 
215 	if (!is_dma_buf_file(file))
216 		return -EINVAL;
217 
218 	dmabuf = file->private_data;
219 
220 	/* check if buffer supports mmap */
221 	if (!dmabuf->ops->mmap)
222 		return -EINVAL;
223 
224 	/* check for overflowing the buffer's size */
225 	if (vma->vm_pgoff + vma_pages(vma) >
226 	    dmabuf->size >> PAGE_SHIFT)
227 		return -EINVAL;
228 
229 	return dmabuf->ops->mmap(dmabuf, vma);
230 }
231 
232 static loff_t dma_buf_llseek(struct file *file, loff_t offset, int whence)
233 {
234 	struct dma_buf *dmabuf;
235 	loff_t base;
236 
237 	if (!is_dma_buf_file(file))
238 		return -EBADF;
239 
240 	dmabuf = file->private_data;
241 
242 	/* only support discovering the end of the buffer,
243 	 * but also allow SEEK_SET to maintain the idiomatic
244 	 * SEEK_END(0), SEEK_CUR(0) pattern.
245 	 */
246 	if (whence == SEEK_END)
247 		base = dmabuf->size;
248 	else if (whence == SEEK_SET)
249 		base = 0;
250 	else
251 		return -EINVAL;
252 
253 	if (offset != 0)
254 		return -EINVAL;
255 
256 	return base + offset;
257 }
258 
259 /**
260  * DOC: implicit fence polling
261  *
262  * To support cross-device and cross-driver synchronization of buffer access
263  * implicit fences (represented internally in the kernel with &struct dma_fence)
264  * can be attached to a &dma_buf. The glue for that and a few related things are
265  * provided in the &dma_resv structure.
266  *
267  * Userspace can query the state of these implicitly tracked fences using poll()
268  * and related system calls:
269  *
270  * - Checking for EPOLLIN, i.e. read access, can be use to query the state of the
271  *   most recent write or exclusive fence.
272  *
273  * - Checking for EPOLLOUT, i.e. write access, can be used to query the state of
274  *   all attached fences, shared and exclusive ones.
275  *
276  * Note that this only signals the completion of the respective fences, i.e. the
277  * DMA transfers are complete. Cache flushing and any other necessary
278  * preparations before CPU access can begin still need to happen.
279  *
280  * As an alternative to poll(), the set of fences on DMA buffer can be
281  * exported as a &sync_file using &dma_buf_sync_file_export.
282  */
283 
284 static void dma_buf_poll_cb(struct dma_fence *fence, struct dma_fence_cb *cb)
285 {
286 	struct dma_buf_poll_cb_t *dcb = (struct dma_buf_poll_cb_t *)cb;
287 	struct dma_buf *dmabuf = container_of(dcb->poll, struct dma_buf, poll);
288 	unsigned long flags;
289 
290 	spin_lock_irqsave(&dcb->poll->lock, flags);
291 	wake_up_locked_poll(dcb->poll, dcb->active);
292 	dcb->active = 0;
293 	spin_unlock_irqrestore(&dcb->poll->lock, flags);
294 	dma_fence_put(fence);
295 	/* Paired with get_file in dma_buf_poll */
296 	fput(dmabuf->file);
297 }
298 
299 static bool dma_buf_poll_add_cb(struct dma_resv *resv, bool write,
300 				struct dma_buf_poll_cb_t *dcb)
301 {
302 	struct dma_resv_iter cursor;
303 	struct dma_fence *fence;
304 	int r;
305 
306 	dma_resv_for_each_fence(&cursor, resv, dma_resv_usage_rw(write),
307 				fence) {
308 		dma_fence_get(fence);
309 		r = dma_fence_add_callback(fence, &dcb->cb, dma_buf_poll_cb);
310 		if (!r)
311 			return true;
312 		dma_fence_put(fence);
313 	}
314 
315 	return false;
316 }
317 
318 static __poll_t dma_buf_poll(struct file *file, poll_table *poll)
319 {
320 	struct dma_buf *dmabuf;
321 	struct dma_resv *resv;
322 	__poll_t events;
323 
324 	dmabuf = file->private_data;
325 	if (!dmabuf || !dmabuf->resv)
326 		return EPOLLERR;
327 
328 	resv = dmabuf->resv;
329 
330 	poll_wait(file, &dmabuf->poll, poll);
331 
332 	events = poll_requested_events(poll) & (EPOLLIN | EPOLLOUT);
333 	if (!events)
334 		return 0;
335 
336 	dma_resv_lock(resv, NULL);
337 
338 	if (events & EPOLLOUT) {
339 		struct dma_buf_poll_cb_t *dcb = &dmabuf->cb_out;
340 
341 		/* Check that callback isn't busy */
342 		spin_lock_irq(&dmabuf->poll.lock);
343 		if (dcb->active)
344 			events &= ~EPOLLOUT;
345 		else
346 			dcb->active = EPOLLOUT;
347 		spin_unlock_irq(&dmabuf->poll.lock);
348 
349 		if (events & EPOLLOUT) {
350 			/* Paired with fput in dma_buf_poll_cb */
351 			get_file(dmabuf->file);
352 
353 			if (!dma_buf_poll_add_cb(resv, true, dcb))
354 				/* No callback queued, wake up any other waiters */
355 				dma_buf_poll_cb(NULL, &dcb->cb);
356 			else
357 				events &= ~EPOLLOUT;
358 		}
359 	}
360 
361 	if (events & EPOLLIN) {
362 		struct dma_buf_poll_cb_t *dcb = &dmabuf->cb_in;
363 
364 		/* Check that callback isn't busy */
365 		spin_lock_irq(&dmabuf->poll.lock);
366 		if (dcb->active)
367 			events &= ~EPOLLIN;
368 		else
369 			dcb->active = EPOLLIN;
370 		spin_unlock_irq(&dmabuf->poll.lock);
371 
372 		if (events & EPOLLIN) {
373 			/* Paired with fput in dma_buf_poll_cb */
374 			get_file(dmabuf->file);
375 
376 			if (!dma_buf_poll_add_cb(resv, false, dcb))
377 				/* No callback queued, wake up any other waiters */
378 				dma_buf_poll_cb(NULL, &dcb->cb);
379 			else
380 				events &= ~EPOLLIN;
381 		}
382 	}
383 
384 	dma_resv_unlock(resv);
385 	return events;
386 }
387 
388 /**
389  * dma_buf_set_name - Set a name to a specific dma_buf to track the usage.
390  * It could support changing the name of the dma-buf if the same
391  * piece of memory is used for multiple purpose between different devices.
392  *
393  * @dmabuf: [in]     dmabuf buffer that will be renamed.
394  * @buf:    [in]     A piece of userspace memory that contains the name of
395  *                   the dma-buf.
396  *
397  * Returns 0 on success. If the dma-buf buffer is already attached to
398  * devices, return -EBUSY.
399  *
400  */
401 static long dma_buf_set_name(struct dma_buf *dmabuf, const char __user *buf)
402 {
403 	char *name = strndup_user(buf, DMA_BUF_NAME_LEN);
404 
405 	if (IS_ERR(name))
406 		return PTR_ERR(name);
407 
408 	spin_lock(&dmabuf->name_lock);
409 	kfree(dmabuf->name);
410 	dmabuf->name = name;
411 	spin_unlock(&dmabuf->name_lock);
412 
413 	return 0;
414 }
415 
416 #if IS_ENABLED(CONFIG_SYNC_FILE)
417 static long dma_buf_export_sync_file(struct dma_buf *dmabuf,
418 				     void __user *user_data)
419 {
420 	struct dma_buf_export_sync_file arg;
421 	enum dma_resv_usage usage;
422 	struct dma_fence *fence = NULL;
423 	struct sync_file *sync_file;
424 	int fd, ret;
425 
426 	if (copy_from_user(&arg, user_data, sizeof(arg)))
427 		return -EFAULT;
428 
429 	if (arg.flags & ~DMA_BUF_SYNC_RW)
430 		return -EINVAL;
431 
432 	if ((arg.flags & DMA_BUF_SYNC_RW) == 0)
433 		return -EINVAL;
434 
435 	fd = get_unused_fd_flags(O_CLOEXEC);
436 	if (fd < 0)
437 		return fd;
438 
439 	usage = dma_resv_usage_rw(arg.flags & DMA_BUF_SYNC_WRITE);
440 	ret = dma_resv_get_singleton(dmabuf->resv, usage, &fence);
441 	if (ret)
442 		goto err_put_fd;
443 
444 	if (!fence)
445 		fence = dma_fence_get_stub();
446 
447 	sync_file = sync_file_create(fence);
448 
449 	dma_fence_put(fence);
450 
451 	if (!sync_file) {
452 		ret = -ENOMEM;
453 		goto err_put_fd;
454 	}
455 
456 	arg.fd = fd;
457 	if (copy_to_user(user_data, &arg, sizeof(arg))) {
458 		ret = -EFAULT;
459 		goto err_put_file;
460 	}
461 
462 	fd_install(fd, sync_file->file);
463 
464 	return 0;
465 
466 err_put_file:
467 	fput(sync_file->file);
468 err_put_fd:
469 	put_unused_fd(fd);
470 	return ret;
471 }
472 
473 static long dma_buf_import_sync_file(struct dma_buf *dmabuf,
474 				     const void __user *user_data)
475 {
476 	struct dma_buf_import_sync_file arg;
477 	struct dma_fence *fence, *f;
478 	enum dma_resv_usage usage;
479 	struct dma_fence_unwrap iter;
480 	unsigned int num_fences;
481 	int ret = 0;
482 
483 	if (copy_from_user(&arg, user_data, sizeof(arg)))
484 		return -EFAULT;
485 
486 	if (arg.flags & ~DMA_BUF_SYNC_RW)
487 		return -EINVAL;
488 
489 	if ((arg.flags & DMA_BUF_SYNC_RW) == 0)
490 		return -EINVAL;
491 
492 	fence = sync_file_get_fence(arg.fd);
493 	if (!fence)
494 		return -EINVAL;
495 
496 	usage = (arg.flags & DMA_BUF_SYNC_WRITE) ? DMA_RESV_USAGE_WRITE :
497 						   DMA_RESV_USAGE_READ;
498 
499 	num_fences = 0;
500 	dma_fence_unwrap_for_each(f, &iter, fence)
501 		++num_fences;
502 
503 	if (num_fences > 0) {
504 		dma_resv_lock(dmabuf->resv, NULL);
505 
506 		ret = dma_resv_reserve_fences(dmabuf->resv, num_fences);
507 		if (!ret) {
508 			dma_fence_unwrap_for_each(f, &iter, fence)
509 				dma_resv_add_fence(dmabuf->resv, f, usage);
510 		}
511 
512 		dma_resv_unlock(dmabuf->resv);
513 	}
514 
515 	dma_fence_put(fence);
516 
517 	return ret;
518 }
519 #endif
520 
521 static long dma_buf_ioctl(struct file *file,
522 			  unsigned int cmd, unsigned long arg)
523 {
524 	struct dma_buf *dmabuf;
525 	struct dma_buf_sync sync;
526 	enum dma_data_direction direction;
527 	int ret;
528 
529 	dmabuf = file->private_data;
530 
531 	switch (cmd) {
532 	case DMA_BUF_IOCTL_SYNC:
533 		if (copy_from_user(&sync, (void __user *) arg, sizeof(sync)))
534 			return -EFAULT;
535 
536 		if (sync.flags & ~DMA_BUF_SYNC_VALID_FLAGS_MASK)
537 			return -EINVAL;
538 
539 		switch (sync.flags & DMA_BUF_SYNC_RW) {
540 		case DMA_BUF_SYNC_READ:
541 			direction = DMA_FROM_DEVICE;
542 			break;
543 		case DMA_BUF_SYNC_WRITE:
544 			direction = DMA_TO_DEVICE;
545 			break;
546 		case DMA_BUF_SYNC_RW:
547 			direction = DMA_BIDIRECTIONAL;
548 			break;
549 		default:
550 			return -EINVAL;
551 		}
552 
553 		if (sync.flags & DMA_BUF_SYNC_END)
554 			ret = dma_buf_end_cpu_access(dmabuf, direction);
555 		else
556 			ret = dma_buf_begin_cpu_access(dmabuf, direction);
557 
558 		return ret;
559 
560 	case DMA_BUF_SET_NAME_A:
561 	case DMA_BUF_SET_NAME_B:
562 		return dma_buf_set_name(dmabuf, (const char __user *)arg);
563 
564 #if IS_ENABLED(CONFIG_SYNC_FILE)
565 	case DMA_BUF_IOCTL_EXPORT_SYNC_FILE:
566 		return dma_buf_export_sync_file(dmabuf, (void __user *)arg);
567 	case DMA_BUF_IOCTL_IMPORT_SYNC_FILE:
568 		return dma_buf_import_sync_file(dmabuf, (const void __user *)arg);
569 #endif
570 
571 	default:
572 		return -ENOTTY;
573 	}
574 }
575 
576 static void dma_buf_show_fdinfo(struct seq_file *m, struct file *file)
577 {
578 	struct dma_buf *dmabuf = file->private_data;
579 
580 	seq_printf(m, "size:\t%zu\n", dmabuf->size);
581 	/* Don't count the temporary reference taken inside procfs seq_show */
582 	seq_printf(m, "count:\t%ld\n", file_count(dmabuf->file) - 1);
583 	seq_printf(m, "exp_name:\t%s\n", dmabuf->exp_name);
584 	spin_lock(&dmabuf->name_lock);
585 	if (dmabuf->name)
586 		seq_printf(m, "name:\t%s\n", dmabuf->name);
587 	spin_unlock(&dmabuf->name_lock);
588 }
589 
590 static const struct file_operations dma_buf_fops = {
591 	.release	= dma_buf_file_release,
592 	.mmap		= dma_buf_mmap_internal,
593 	.llseek		= dma_buf_llseek,
594 	.poll		= dma_buf_poll,
595 	.unlocked_ioctl	= dma_buf_ioctl,
596 	.compat_ioctl	= compat_ptr_ioctl,
597 	.show_fdinfo	= dma_buf_show_fdinfo,
598 };
599 
600 /*
601  * is_dma_buf_file - Check if struct file* is associated with dma_buf
602  */
603 static inline int is_dma_buf_file(struct file *file)
604 {
605 	return file->f_op == &dma_buf_fops;
606 }
607 
608 static struct file *dma_buf_getfile(size_t size, int flags)
609 {
610 	static atomic64_t dmabuf_inode = ATOMIC64_INIT(0);
611 	struct inode *inode = alloc_anon_inode(dma_buf_mnt->mnt_sb);
612 	struct file *file;
613 
614 	if (IS_ERR(inode))
615 		return ERR_CAST(inode);
616 
617 	inode->i_size = size;
618 	inode_set_bytes(inode, size);
619 
620 	/*
621 	 * The ->i_ino acquired from get_next_ino() is not unique thus
622 	 * not suitable for using it as dentry name by dmabuf stats.
623 	 * Override ->i_ino with the unique and dmabuffs specific
624 	 * value.
625 	 */
626 	inode->i_ino = atomic64_inc_return(&dmabuf_inode);
627 	flags &= O_ACCMODE | O_NONBLOCK;
628 	file = alloc_file_pseudo(inode, dma_buf_mnt, "dmabuf",
629 				 flags, &dma_buf_fops);
630 	if (IS_ERR(file))
631 		goto err_alloc_file;
632 
633 	return file;
634 
635 err_alloc_file:
636 	iput(inode);
637 	return file;
638 }
639 
640 /**
641  * DOC: dma buf device access
642  *
643  * For device DMA access to a shared DMA buffer the usual sequence of operations
644  * is fairly simple:
645  *
646  * 1. The exporter defines his exporter instance using
647  *    DEFINE_DMA_BUF_EXPORT_INFO() and calls dma_buf_export() to wrap a private
648  *    buffer object into a &dma_buf. It then exports that &dma_buf to userspace
649  *    as a file descriptor by calling dma_buf_fd().
650  *
651  * 2. Userspace passes this file-descriptors to all drivers it wants this buffer
652  *    to share with: First the file descriptor is converted to a &dma_buf using
653  *    dma_buf_get(). Then the buffer is attached to the device using
654  *    dma_buf_attach().
655  *
656  *    Up to this stage the exporter is still free to migrate or reallocate the
657  *    backing storage.
658  *
659  * 3. Once the buffer is attached to all devices userspace can initiate DMA
660  *    access to the shared buffer. In the kernel this is done by calling
661  *    dma_buf_map_attachment() and dma_buf_unmap_attachment().
662  *
663  * 4. Once a driver is done with a shared buffer it needs to call
664  *    dma_buf_detach() (after cleaning up any mappings) and then release the
665  *    reference acquired with dma_buf_get() by calling dma_buf_put().
666  *
667  * For the detailed semantics exporters are expected to implement see
668  * &dma_buf_ops.
669  */
670 
671 /**
672  * dma_buf_export - Creates a new dma_buf, and associates an anon file
673  * with this buffer, so it can be exported.
674  * Also connect the allocator specific data and ops to the buffer.
675  * Additionally, provide a name string for exporter; useful in debugging.
676  *
677  * @exp_info:	[in]	holds all the export related information provided
678  *			by the exporter. see &struct dma_buf_export_info
679  *			for further details.
680  *
681  * Returns, on success, a newly created struct dma_buf object, which wraps the
682  * supplied private data and operations for struct dma_buf_ops. On either
683  * missing ops, or error in allocating struct dma_buf, will return negative
684  * error.
685  *
686  * For most cases the easiest way to create @exp_info is through the
687  * %DEFINE_DMA_BUF_EXPORT_INFO macro.
688  */
689 struct dma_buf *dma_buf_export(const struct dma_buf_export_info *exp_info)
690 {
691 	struct dma_buf *dmabuf;
692 	struct dma_resv *resv = exp_info->resv;
693 	struct file *file;
694 	size_t alloc_size = sizeof(struct dma_buf);
695 	int ret;
696 
697 	if (WARN_ON(!exp_info->priv || !exp_info->ops
698 		    || !exp_info->ops->map_dma_buf
699 		    || !exp_info->ops->unmap_dma_buf
700 		    || !exp_info->ops->release))
701 		return ERR_PTR(-EINVAL);
702 
703 	if (WARN_ON(!exp_info->ops->pin != !exp_info->ops->unpin))
704 		return ERR_PTR(-EINVAL);
705 
706 	if (!try_module_get(exp_info->owner))
707 		return ERR_PTR(-ENOENT);
708 
709 	file = dma_buf_getfile(exp_info->size, exp_info->flags);
710 	if (IS_ERR(file)) {
711 		ret = PTR_ERR(file);
712 		goto err_module;
713 	}
714 
715 	if (!exp_info->resv)
716 		alloc_size += sizeof(struct dma_resv);
717 	else
718 		/* prevent &dma_buf[1] == dma_buf->resv */
719 		alloc_size += 1;
720 	dmabuf = kzalloc(alloc_size, GFP_KERNEL);
721 	if (!dmabuf) {
722 		ret = -ENOMEM;
723 		goto err_file;
724 	}
725 
726 	dmabuf->priv = exp_info->priv;
727 	dmabuf->ops = exp_info->ops;
728 	dmabuf->size = exp_info->size;
729 	dmabuf->exp_name = exp_info->exp_name;
730 	dmabuf->owner = exp_info->owner;
731 	spin_lock_init(&dmabuf->name_lock);
732 	init_waitqueue_head(&dmabuf->poll);
733 	dmabuf->cb_in.poll = dmabuf->cb_out.poll = &dmabuf->poll;
734 	dmabuf->cb_in.active = dmabuf->cb_out.active = 0;
735 	INIT_LIST_HEAD(&dmabuf->attachments);
736 
737 	if (!resv) {
738 		dmabuf->resv = (struct dma_resv *)&dmabuf[1];
739 		dma_resv_init(dmabuf->resv);
740 	} else {
741 		dmabuf->resv = resv;
742 	}
743 
744 	ret = dma_buf_stats_setup(dmabuf, file);
745 	if (ret)
746 		goto err_dmabuf;
747 
748 	file->private_data = dmabuf;
749 	file->f_path.dentry->d_fsdata = dmabuf;
750 	dmabuf->file = file;
751 
752 	__dma_buf_list_add(dmabuf);
753 
754 	return dmabuf;
755 
756 err_dmabuf:
757 	if (!resv)
758 		dma_resv_fini(dmabuf->resv);
759 	kfree(dmabuf);
760 err_file:
761 	fput(file);
762 err_module:
763 	module_put(exp_info->owner);
764 	return ERR_PTR(ret);
765 }
766 EXPORT_SYMBOL_NS_GPL(dma_buf_export, "DMA_BUF");
767 
768 /**
769  * dma_buf_fd - returns a file descriptor for the given struct dma_buf
770  * @dmabuf:	[in]	pointer to dma_buf for which fd is required.
771  * @flags:      [in]    flags to give to fd
772  *
773  * On success, returns an associated 'fd'. Else, returns error.
774  */
775 int dma_buf_fd(struct dma_buf *dmabuf, int flags)
776 {
777 	int fd;
778 
779 	if (!dmabuf || !dmabuf->file)
780 		return -EINVAL;
781 
782 	fd = get_unused_fd_flags(flags);
783 	if (fd < 0)
784 		return fd;
785 
786 	fd_install(fd, dmabuf->file);
787 
788 	return fd;
789 }
790 EXPORT_SYMBOL_NS_GPL(dma_buf_fd, "DMA_BUF");
791 
792 /**
793  * dma_buf_get - returns the struct dma_buf related to an fd
794  * @fd:	[in]	fd associated with the struct dma_buf to be returned
795  *
796  * On success, returns the struct dma_buf associated with an fd; uses
797  * file's refcounting done by fget to increase refcount. returns ERR_PTR
798  * otherwise.
799  */
800 struct dma_buf *dma_buf_get(int fd)
801 {
802 	struct file *file;
803 
804 	file = fget(fd);
805 
806 	if (!file)
807 		return ERR_PTR(-EBADF);
808 
809 	if (!is_dma_buf_file(file)) {
810 		fput(file);
811 		return ERR_PTR(-EINVAL);
812 	}
813 
814 	return file->private_data;
815 }
816 EXPORT_SYMBOL_NS_GPL(dma_buf_get, "DMA_BUF");
817 
818 /**
819  * dma_buf_put - decreases refcount of the buffer
820  * @dmabuf:	[in]	buffer to reduce refcount of
821  *
822  * Uses file's refcounting done implicitly by fput().
823  *
824  * If, as a result of this call, the refcount becomes 0, the 'release' file
825  * operation related to this fd is called. It calls &dma_buf_ops.release vfunc
826  * in turn, and frees the memory allocated for dmabuf when exported.
827  */
828 void dma_buf_put(struct dma_buf *dmabuf)
829 {
830 	if (WARN_ON(!dmabuf || !dmabuf->file))
831 		return;
832 
833 	fput(dmabuf->file);
834 }
835 EXPORT_SYMBOL_NS_GPL(dma_buf_put, "DMA_BUF");
836 
837 static int dma_buf_wrap_sg_table(struct sg_table **sg_table)
838 {
839 	struct scatterlist *to_sg, *from_sg;
840 	struct sg_table *from = *sg_table;
841 	struct dma_buf_sg_table_wrapper *to;
842 	int i, ret;
843 
844 	if (!IS_ENABLED(CONFIG_DMABUF_DEBUG))
845 		return 0;
846 
847 	/*
848 	 * To catch abuse of the underlying struct page by importers copy the
849 	 * sg_table without copying the page_link and give only the copy back to
850 	 * the importer.
851 	 */
852 	to = kzalloc(sizeof(*to), GFP_KERNEL);
853 	if (!to)
854 		return -ENOMEM;
855 
856 	ret = sg_alloc_table(&to->wrapper, from->nents, GFP_KERNEL);
857 	if (ret)
858 		goto free_to;
859 
860 	to_sg = to->wrapper.sgl;
861 	for_each_sgtable_dma_sg(from, from_sg, i) {
862 		to_sg->offset = 0;
863 		to_sg->length = 0;
864 		sg_assign_page(to_sg, NULL);
865 		sg_dma_address(to_sg) = sg_dma_address(from_sg);
866 		sg_dma_len(to_sg) = sg_dma_len(from_sg);
867 		to_sg = sg_next(to_sg);
868 	}
869 
870 	to->original = from;
871 	*sg_table = &to->wrapper;
872 	return 0;
873 
874 free_to:
875 	kfree(to);
876 	return ret;
877 }
878 
879 static void dma_buf_unwrap_sg_table(struct sg_table **sg_table)
880 {
881 	struct dma_buf_sg_table_wrapper *copy;
882 
883 	if (!IS_ENABLED(CONFIG_DMABUF_DEBUG))
884 		return;
885 
886 	copy = container_of(*sg_table, typeof(*copy), wrapper);
887 	*sg_table = copy->original;
888 	sg_free_table(&copy->wrapper);
889 	kfree(copy);
890 }
891 
892 static inline bool
893 dma_buf_attachment_is_dynamic(struct dma_buf_attachment *attach)
894 {
895 	return !!attach->importer_ops;
896 }
897 
898 static bool
899 dma_buf_pin_on_map(struct dma_buf_attachment *attach)
900 {
901 	return attach->dmabuf->ops->pin &&
902 		(!dma_buf_attachment_is_dynamic(attach) ||
903 		 !IS_ENABLED(CONFIG_DMABUF_MOVE_NOTIFY));
904 }
905 
906 /**
907  * DOC: locking convention
908  *
909  * In order to avoid deadlock situations between dma-buf exports and importers,
910  * all dma-buf API users must follow the common dma-buf locking convention.
911  *
912  * Convention for importers
913  *
914  * 1. Importers must hold the dma-buf reservation lock when calling these
915  *    functions:
916  *
917  *     - dma_buf_pin()
918  *     - dma_buf_unpin()
919  *     - dma_buf_map_attachment()
920  *     - dma_buf_unmap_attachment()
921  *     - dma_buf_vmap()
922  *     - dma_buf_vunmap()
923  *
924  * 2. Importers must not hold the dma-buf reservation lock when calling these
925  *    functions:
926  *
927  *     - dma_buf_attach()
928  *     - dma_buf_dynamic_attach()
929  *     - dma_buf_detach()
930  *     - dma_buf_export()
931  *     - dma_buf_fd()
932  *     - dma_buf_get()
933  *     - dma_buf_put()
934  *     - dma_buf_mmap()
935  *     - dma_buf_begin_cpu_access()
936  *     - dma_buf_end_cpu_access()
937  *     - dma_buf_map_attachment_unlocked()
938  *     - dma_buf_unmap_attachment_unlocked()
939  *     - dma_buf_vmap_unlocked()
940  *     - dma_buf_vunmap_unlocked()
941  *
942  * Convention for exporters
943  *
944  * 1. These &dma_buf_ops callbacks are invoked with unlocked dma-buf
945  *    reservation and exporter can take the lock:
946  *
947  *     - &dma_buf_ops.attach()
948  *     - &dma_buf_ops.detach()
949  *     - &dma_buf_ops.release()
950  *     - &dma_buf_ops.begin_cpu_access()
951  *     - &dma_buf_ops.end_cpu_access()
952  *     - &dma_buf_ops.mmap()
953  *
954  * 2. These &dma_buf_ops callbacks are invoked with locked dma-buf
955  *    reservation and exporter can't take the lock:
956  *
957  *     - &dma_buf_ops.pin()
958  *     - &dma_buf_ops.unpin()
959  *     - &dma_buf_ops.map_dma_buf()
960  *     - &dma_buf_ops.unmap_dma_buf()
961  *     - &dma_buf_ops.vmap()
962  *     - &dma_buf_ops.vunmap()
963  *
964  * 3. Exporters must hold the dma-buf reservation lock when calling these
965  *    functions:
966  *
967  *     - dma_buf_move_notify()
968  */
969 
970 /**
971  * dma_buf_dynamic_attach - Add the device to dma_buf's attachments list
972  * @dmabuf:		[in]	buffer to attach device to.
973  * @dev:		[in]	device to be attached.
974  * @importer_ops:	[in]	importer operations for the attachment
975  * @importer_priv:	[in]	importer private pointer for the attachment
976  *
977  * Returns struct dma_buf_attachment pointer for this attachment. Attachments
978  * must be cleaned up by calling dma_buf_detach().
979  *
980  * Optionally this calls &dma_buf_ops.attach to allow device-specific attach
981  * functionality.
982  *
983  * Returns:
984  *
985  * A pointer to newly created &dma_buf_attachment on success, or a negative
986  * error code wrapped into a pointer on failure.
987  *
988  * Note that this can fail if the backing storage of @dmabuf is in a place not
989  * accessible to @dev, and cannot be moved to a more suitable place. This is
990  * indicated with the error code -EBUSY.
991  */
992 struct dma_buf_attachment *
993 dma_buf_dynamic_attach(struct dma_buf *dmabuf, struct device *dev,
994 		       const struct dma_buf_attach_ops *importer_ops,
995 		       void *importer_priv)
996 {
997 	struct dma_buf_attachment *attach;
998 	int ret;
999 
1000 	if (WARN_ON(!dmabuf || !dev))
1001 		return ERR_PTR(-EINVAL);
1002 
1003 	if (WARN_ON(importer_ops && !importer_ops->move_notify))
1004 		return ERR_PTR(-EINVAL);
1005 
1006 	attach = kzalloc(sizeof(*attach), GFP_KERNEL);
1007 	if (!attach)
1008 		return ERR_PTR(-ENOMEM);
1009 
1010 	attach->dev = dev;
1011 	attach->dmabuf = dmabuf;
1012 	if (importer_ops)
1013 		attach->peer2peer = importer_ops->allow_peer2peer;
1014 	attach->importer_ops = importer_ops;
1015 	attach->importer_priv = importer_priv;
1016 
1017 	if (dmabuf->ops->attach) {
1018 		ret = dmabuf->ops->attach(dmabuf, attach);
1019 		if (ret)
1020 			goto err_attach;
1021 	}
1022 	dma_resv_lock(dmabuf->resv, NULL);
1023 	list_add(&attach->node, &dmabuf->attachments);
1024 	dma_resv_unlock(dmabuf->resv);
1025 
1026 	return attach;
1027 
1028 err_attach:
1029 	kfree(attach);
1030 	return ERR_PTR(ret);
1031 }
1032 EXPORT_SYMBOL_NS_GPL(dma_buf_dynamic_attach, "DMA_BUF");
1033 
1034 /**
1035  * dma_buf_attach - Wrapper for dma_buf_dynamic_attach
1036  * @dmabuf:	[in]	buffer to attach device to.
1037  * @dev:	[in]	device to be attached.
1038  *
1039  * Wrapper to call dma_buf_dynamic_attach() for drivers which still use a static
1040  * mapping.
1041  */
1042 struct dma_buf_attachment *dma_buf_attach(struct dma_buf *dmabuf,
1043 					  struct device *dev)
1044 {
1045 	return dma_buf_dynamic_attach(dmabuf, dev, NULL, NULL);
1046 }
1047 EXPORT_SYMBOL_NS_GPL(dma_buf_attach, "DMA_BUF");
1048 
1049 /**
1050  * dma_buf_detach - Remove the given attachment from dmabuf's attachments list
1051  * @dmabuf:	[in]	buffer to detach from.
1052  * @attach:	[in]	attachment to be detached; is free'd after this call.
1053  *
1054  * Clean up a device attachment obtained by calling dma_buf_attach().
1055  *
1056  * Optionally this calls &dma_buf_ops.detach for device-specific detach.
1057  */
1058 void dma_buf_detach(struct dma_buf *dmabuf, struct dma_buf_attachment *attach)
1059 {
1060 	if (WARN_ON(!dmabuf || !attach || dmabuf != attach->dmabuf))
1061 		return;
1062 
1063 	dma_resv_lock(dmabuf->resv, NULL);
1064 	list_del(&attach->node);
1065 	dma_resv_unlock(dmabuf->resv);
1066 
1067 	if (dmabuf->ops->detach)
1068 		dmabuf->ops->detach(dmabuf, attach);
1069 
1070 	kfree(attach);
1071 }
1072 EXPORT_SYMBOL_NS_GPL(dma_buf_detach, "DMA_BUF");
1073 
1074 /**
1075  * dma_buf_pin - Lock down the DMA-buf
1076  * @attach:	[in]	attachment which should be pinned
1077  *
1078  * Only dynamic importers (who set up @attach with dma_buf_dynamic_attach()) may
1079  * call this, and only for limited use cases like scanout and not for temporary
1080  * pin operations. It is not permitted to allow userspace to pin arbitrary
1081  * amounts of buffers through this interface.
1082  *
1083  * Buffers must be unpinned by calling dma_buf_unpin().
1084  *
1085  * Returns:
1086  * 0 on success, negative error code on failure.
1087  */
1088 int dma_buf_pin(struct dma_buf_attachment *attach)
1089 {
1090 	struct dma_buf *dmabuf = attach->dmabuf;
1091 	int ret = 0;
1092 
1093 	WARN_ON(!attach->importer_ops);
1094 
1095 	dma_resv_assert_held(dmabuf->resv);
1096 
1097 	if (dmabuf->ops->pin)
1098 		ret = dmabuf->ops->pin(attach);
1099 
1100 	return ret;
1101 }
1102 EXPORT_SYMBOL_NS_GPL(dma_buf_pin, "DMA_BUF");
1103 
1104 /**
1105  * dma_buf_unpin - Unpin a DMA-buf
1106  * @attach:	[in]	attachment which should be unpinned
1107  *
1108  * This unpins a buffer pinned by dma_buf_pin() and allows the exporter to move
1109  * any mapping of @attach again and inform the importer through
1110  * &dma_buf_attach_ops.move_notify.
1111  */
1112 void dma_buf_unpin(struct dma_buf_attachment *attach)
1113 {
1114 	struct dma_buf *dmabuf = attach->dmabuf;
1115 
1116 	WARN_ON(!attach->importer_ops);
1117 
1118 	dma_resv_assert_held(dmabuf->resv);
1119 
1120 	if (dmabuf->ops->unpin)
1121 		dmabuf->ops->unpin(attach);
1122 }
1123 EXPORT_SYMBOL_NS_GPL(dma_buf_unpin, "DMA_BUF");
1124 
1125 /**
1126  * dma_buf_map_attachment - Returns the scatterlist table of the attachment;
1127  * mapped into _device_ address space. Is a wrapper for map_dma_buf() of the
1128  * dma_buf_ops.
1129  * @attach:	[in]	attachment whose scatterlist is to be returned
1130  * @direction:	[in]	direction of DMA transfer
1131  *
1132  * Returns sg_table containing the scatterlist to be returned; returns ERR_PTR
1133  * on error. May return -EINTR if it is interrupted by a signal.
1134  *
1135  * On success, the DMA addresses and lengths in the returned scatterlist are
1136  * PAGE_SIZE aligned.
1137  *
1138  * A mapping must be unmapped by using dma_buf_unmap_attachment(). Note that
1139  * the underlying backing storage is pinned for as long as a mapping exists,
1140  * therefore users/importers should not hold onto a mapping for undue amounts of
1141  * time.
1142  *
1143  * Important: Dynamic importers must wait for the exclusive fence of the struct
1144  * dma_resv attached to the DMA-BUF first.
1145  */
1146 struct sg_table *dma_buf_map_attachment(struct dma_buf_attachment *attach,
1147 					enum dma_data_direction direction)
1148 {
1149 	struct sg_table *sg_table;
1150 	signed long ret;
1151 
1152 	might_sleep();
1153 
1154 	if (WARN_ON(!attach || !attach->dmabuf))
1155 		return ERR_PTR(-EINVAL);
1156 
1157 	dma_resv_assert_held(attach->dmabuf->resv);
1158 
1159 	if (dma_buf_pin_on_map(attach)) {
1160 		ret = attach->dmabuf->ops->pin(attach);
1161 		/*
1162 		 * Catch exporters making buffers inaccessible even when
1163 		 * attachments preventing that exist.
1164 		 */
1165 		WARN_ON_ONCE(ret == -EBUSY);
1166 		if (ret)
1167 			return ERR_PTR(ret);
1168 	}
1169 
1170 	sg_table = attach->dmabuf->ops->map_dma_buf(attach, direction);
1171 	if (!sg_table)
1172 		sg_table = ERR_PTR(-ENOMEM);
1173 	if (IS_ERR(sg_table))
1174 		goto error_unpin;
1175 
1176 	/*
1177 	 * Importers with static attachments don't wait for fences.
1178 	 */
1179 	if (!dma_buf_attachment_is_dynamic(attach)) {
1180 		ret = dma_resv_wait_timeout(attach->dmabuf->resv,
1181 					    DMA_RESV_USAGE_KERNEL, true,
1182 					    MAX_SCHEDULE_TIMEOUT);
1183 		if (ret < 0)
1184 			goto error_unmap;
1185 	}
1186 	ret = dma_buf_wrap_sg_table(&sg_table);
1187 	if (ret)
1188 		goto error_unmap;
1189 
1190 	if (IS_ENABLED(CONFIG_DMA_API_DEBUG)) {
1191 		struct scatterlist *sg;
1192 		u64 addr;
1193 		int len;
1194 		int i;
1195 
1196 		for_each_sgtable_dma_sg(sg_table, sg, i) {
1197 			addr = sg_dma_address(sg);
1198 			len = sg_dma_len(sg);
1199 			if (!PAGE_ALIGNED(addr) || !PAGE_ALIGNED(len)) {
1200 				pr_debug("%s: addr %llx or len %x is not page aligned!\n",
1201 					 __func__, addr, len);
1202 				break;
1203 			}
1204 		}
1205 	}
1206 	return sg_table;
1207 
1208 error_unmap:
1209 	attach->dmabuf->ops->unmap_dma_buf(attach, sg_table, direction);
1210 	sg_table = ERR_PTR(ret);
1211 
1212 error_unpin:
1213 	if (dma_buf_pin_on_map(attach))
1214 		attach->dmabuf->ops->unpin(attach);
1215 
1216 	return sg_table;
1217 }
1218 EXPORT_SYMBOL_NS_GPL(dma_buf_map_attachment, "DMA_BUF");
1219 
1220 /**
1221  * dma_buf_map_attachment_unlocked - Returns the scatterlist table of the attachment;
1222  * mapped into _device_ address space. Is a wrapper for map_dma_buf() of the
1223  * dma_buf_ops.
1224  * @attach:	[in]	attachment whose scatterlist is to be returned
1225  * @direction:	[in]	direction of DMA transfer
1226  *
1227  * Unlocked variant of dma_buf_map_attachment().
1228  */
1229 struct sg_table *
1230 dma_buf_map_attachment_unlocked(struct dma_buf_attachment *attach,
1231 				enum dma_data_direction direction)
1232 {
1233 	struct sg_table *sg_table;
1234 
1235 	might_sleep();
1236 
1237 	if (WARN_ON(!attach || !attach->dmabuf))
1238 		return ERR_PTR(-EINVAL);
1239 
1240 	dma_resv_lock(attach->dmabuf->resv, NULL);
1241 	sg_table = dma_buf_map_attachment(attach, direction);
1242 	dma_resv_unlock(attach->dmabuf->resv);
1243 
1244 	return sg_table;
1245 }
1246 EXPORT_SYMBOL_NS_GPL(dma_buf_map_attachment_unlocked, "DMA_BUF");
1247 
1248 /**
1249  * dma_buf_unmap_attachment - unmaps and decreases usecount of the buffer;might
1250  * deallocate the scatterlist associated. Is a wrapper for unmap_dma_buf() of
1251  * dma_buf_ops.
1252  * @attach:	[in]	attachment to unmap buffer from
1253  * @sg_table:	[in]	scatterlist info of the buffer to unmap
1254  * @direction:  [in]    direction of DMA transfer
1255  *
1256  * This unmaps a DMA mapping for @attached obtained by dma_buf_map_attachment().
1257  */
1258 void dma_buf_unmap_attachment(struct dma_buf_attachment *attach,
1259 				struct sg_table *sg_table,
1260 				enum dma_data_direction direction)
1261 {
1262 	might_sleep();
1263 
1264 	if (WARN_ON(!attach || !attach->dmabuf || !sg_table))
1265 		return;
1266 
1267 	dma_resv_assert_held(attach->dmabuf->resv);
1268 
1269 	dma_buf_unwrap_sg_table(&sg_table);
1270 	attach->dmabuf->ops->unmap_dma_buf(attach, sg_table, direction);
1271 
1272 	if (dma_buf_pin_on_map(attach))
1273 		attach->dmabuf->ops->unpin(attach);
1274 }
1275 EXPORT_SYMBOL_NS_GPL(dma_buf_unmap_attachment, "DMA_BUF");
1276 
1277 /**
1278  * dma_buf_unmap_attachment_unlocked - unmaps and decreases usecount of the buffer;might
1279  * deallocate the scatterlist associated. Is a wrapper for unmap_dma_buf() of
1280  * dma_buf_ops.
1281  * @attach:	[in]	attachment to unmap buffer from
1282  * @sg_table:	[in]	scatterlist info of the buffer to unmap
1283  * @direction:	[in]	direction of DMA transfer
1284  *
1285  * Unlocked variant of dma_buf_unmap_attachment().
1286  */
1287 void dma_buf_unmap_attachment_unlocked(struct dma_buf_attachment *attach,
1288 				       struct sg_table *sg_table,
1289 				       enum dma_data_direction direction)
1290 {
1291 	might_sleep();
1292 
1293 	if (WARN_ON(!attach || !attach->dmabuf || !sg_table))
1294 		return;
1295 
1296 	dma_resv_lock(attach->dmabuf->resv, NULL);
1297 	dma_buf_unmap_attachment(attach, sg_table, direction);
1298 	dma_resv_unlock(attach->dmabuf->resv);
1299 }
1300 EXPORT_SYMBOL_NS_GPL(dma_buf_unmap_attachment_unlocked, "DMA_BUF");
1301 
1302 /**
1303  * dma_buf_move_notify - notify attachments that DMA-buf is moving
1304  *
1305  * @dmabuf:	[in]	buffer which is moving
1306  *
1307  * Informs all attachments that they need to destroy and recreate all their
1308  * mappings.
1309  */
1310 void dma_buf_move_notify(struct dma_buf *dmabuf)
1311 {
1312 	struct dma_buf_attachment *attach;
1313 
1314 	dma_resv_assert_held(dmabuf->resv);
1315 
1316 	list_for_each_entry(attach, &dmabuf->attachments, node)
1317 		if (attach->importer_ops)
1318 			attach->importer_ops->move_notify(attach);
1319 }
1320 EXPORT_SYMBOL_NS_GPL(dma_buf_move_notify, "DMA_BUF");
1321 
1322 /**
1323  * DOC: cpu access
1324  *
1325  * There are multiple reasons for supporting CPU access to a dma buffer object:
1326  *
1327  * - Fallback operations in the kernel, for example when a device is connected
1328  *   over USB and the kernel needs to shuffle the data around first before
1329  *   sending it away. Cache coherency is handled by bracketing any transactions
1330  *   with calls to dma_buf_begin_cpu_access() and dma_buf_end_cpu_access()
1331  *   access.
1332  *
1333  *   Since for most kernel internal dma-buf accesses need the entire buffer, a
1334  *   vmap interface is introduced. Note that on very old 32-bit architectures
1335  *   vmalloc space might be limited and result in vmap calls failing.
1336  *
1337  *   Interfaces:
1338  *
1339  *   .. code-block:: c
1340  *
1341  *     void *dma_buf_vmap(struct dma_buf *dmabuf, struct iosys_map *map)
1342  *     void dma_buf_vunmap(struct dma_buf *dmabuf, struct iosys_map *map)
1343  *
1344  *   The vmap call can fail if there is no vmap support in the exporter, or if
1345  *   it runs out of vmalloc space. Note that the dma-buf layer keeps a reference
1346  *   count for all vmap access and calls down into the exporter's vmap function
1347  *   only when no vmapping exists, and only unmaps it once. Protection against
1348  *   concurrent vmap/vunmap calls is provided by taking the &dma_buf.lock mutex.
1349  *
1350  * - For full compatibility on the importer side with existing userspace
1351  *   interfaces, which might already support mmap'ing buffers. This is needed in
1352  *   many processing pipelines (e.g. feeding a software rendered image into a
1353  *   hardware pipeline, thumbnail creation, snapshots, ...). Also, Android's ION
1354  *   framework already supported this and for DMA buffer file descriptors to
1355  *   replace ION buffers mmap support was needed.
1356  *
1357  *   There is no special interfaces, userspace simply calls mmap on the dma-buf
1358  *   fd. But like for CPU access there's a need to bracket the actual access,
1359  *   which is handled by the ioctl (DMA_BUF_IOCTL_SYNC). Note that
1360  *   DMA_BUF_IOCTL_SYNC can fail with -EAGAIN or -EINTR, in which case it must
1361  *   be restarted.
1362  *
1363  *   Some systems might need some sort of cache coherency management e.g. when
1364  *   CPU and GPU domains are being accessed through dma-buf at the same time.
1365  *   To circumvent this problem there are begin/end coherency markers, that
1366  *   forward directly to existing dma-buf device drivers vfunc hooks. Userspace
1367  *   can make use of those markers through the DMA_BUF_IOCTL_SYNC ioctl. The
1368  *   sequence would be used like following:
1369  *
1370  *     - mmap dma-buf fd
1371  *     - for each drawing/upload cycle in CPU 1. SYNC_START ioctl, 2. read/write
1372  *       to mmap area 3. SYNC_END ioctl. This can be repeated as often as you
1373  *       want (with the new data being consumed by say the GPU or the scanout
1374  *       device)
1375  *     - munmap once you don't need the buffer any more
1376  *
1377  *    For correctness and optimal performance, it is always required to use
1378  *    SYNC_START and SYNC_END before and after, respectively, when accessing the
1379  *    mapped address. Userspace cannot rely on coherent access, even when there
1380  *    are systems where it just works without calling these ioctls.
1381  *
1382  * - And as a CPU fallback in userspace processing pipelines.
1383  *
1384  *   Similar to the motivation for kernel cpu access it is again important that
1385  *   the userspace code of a given importing subsystem can use the same
1386  *   interfaces with a imported dma-buf buffer object as with a native buffer
1387  *   object. This is especially important for drm where the userspace part of
1388  *   contemporary OpenGL, X, and other drivers is huge, and reworking them to
1389  *   use a different way to mmap a buffer rather invasive.
1390  *
1391  *   The assumption in the current dma-buf interfaces is that redirecting the
1392  *   initial mmap is all that's needed. A survey of some of the existing
1393  *   subsystems shows that no driver seems to do any nefarious thing like
1394  *   syncing up with outstanding asynchronous processing on the device or
1395  *   allocating special resources at fault time. So hopefully this is good
1396  *   enough, since adding interfaces to intercept pagefaults and allow pte
1397  *   shootdowns would increase the complexity quite a bit.
1398  *
1399  *   Interface:
1400  *
1401  *   .. code-block:: c
1402  *
1403  *     int dma_buf_mmap(struct dma_buf *, struct vm_area_struct *, unsigned long);
1404  *
1405  *   If the importing subsystem simply provides a special-purpose mmap call to
1406  *   set up a mapping in userspace, calling do_mmap with &dma_buf.file will
1407  *   equally achieve that for a dma-buf object.
1408  */
1409 
1410 static int __dma_buf_begin_cpu_access(struct dma_buf *dmabuf,
1411 				      enum dma_data_direction direction)
1412 {
1413 	bool write = (direction == DMA_BIDIRECTIONAL ||
1414 		      direction == DMA_TO_DEVICE);
1415 	struct dma_resv *resv = dmabuf->resv;
1416 	long ret;
1417 
1418 	/* Wait on any implicit rendering fences */
1419 	ret = dma_resv_wait_timeout(resv, dma_resv_usage_rw(write),
1420 				    true, MAX_SCHEDULE_TIMEOUT);
1421 	if (ret < 0)
1422 		return ret;
1423 
1424 	return 0;
1425 }
1426 
1427 /**
1428  * dma_buf_begin_cpu_access - Must be called before accessing a dma_buf from the
1429  * cpu in the kernel context. Calls begin_cpu_access to allow exporter-specific
1430  * preparations. Coherency is only guaranteed in the specified range for the
1431  * specified access direction.
1432  * @dmabuf:	[in]	buffer to prepare cpu access for.
1433  * @direction:	[in]	direction of access.
1434  *
1435  * After the cpu access is complete the caller should call
1436  * dma_buf_end_cpu_access(). Only when cpu access is bracketed by both calls is
1437  * it guaranteed to be coherent with other DMA access.
1438  *
1439  * This function will also wait for any DMA transactions tracked through
1440  * implicit synchronization in &dma_buf.resv. For DMA transactions with explicit
1441  * synchronization this function will only ensure cache coherency, callers must
1442  * ensure synchronization with such DMA transactions on their own.
1443  *
1444  * Can return negative error values, returns 0 on success.
1445  */
1446 int dma_buf_begin_cpu_access(struct dma_buf *dmabuf,
1447 			     enum dma_data_direction direction)
1448 {
1449 	int ret = 0;
1450 
1451 	if (WARN_ON(!dmabuf))
1452 		return -EINVAL;
1453 
1454 	might_lock(&dmabuf->resv->lock.base);
1455 
1456 	if (dmabuf->ops->begin_cpu_access)
1457 		ret = dmabuf->ops->begin_cpu_access(dmabuf, direction);
1458 
1459 	/* Ensure that all fences are waited upon - but we first allow
1460 	 * the native handler the chance to do so more efficiently if it
1461 	 * chooses. A double invocation here will be reasonably cheap no-op.
1462 	 */
1463 	if (ret == 0)
1464 		ret = __dma_buf_begin_cpu_access(dmabuf, direction);
1465 
1466 	return ret;
1467 }
1468 EXPORT_SYMBOL_NS_GPL(dma_buf_begin_cpu_access, "DMA_BUF");
1469 
1470 /**
1471  * dma_buf_end_cpu_access - Must be called after accessing a dma_buf from the
1472  * cpu in the kernel context. Calls end_cpu_access to allow exporter-specific
1473  * actions. Coherency is only guaranteed in the specified range for the
1474  * specified access direction.
1475  * @dmabuf:	[in]	buffer to complete cpu access for.
1476  * @direction:	[in]	direction of access.
1477  *
1478  * This terminates CPU access started with dma_buf_begin_cpu_access().
1479  *
1480  * Can return negative error values, returns 0 on success.
1481  */
1482 int dma_buf_end_cpu_access(struct dma_buf *dmabuf,
1483 			   enum dma_data_direction direction)
1484 {
1485 	int ret = 0;
1486 
1487 	WARN_ON(!dmabuf);
1488 
1489 	might_lock(&dmabuf->resv->lock.base);
1490 
1491 	if (dmabuf->ops->end_cpu_access)
1492 		ret = dmabuf->ops->end_cpu_access(dmabuf, direction);
1493 
1494 	return ret;
1495 }
1496 EXPORT_SYMBOL_NS_GPL(dma_buf_end_cpu_access, "DMA_BUF");
1497 
1498 
1499 /**
1500  * dma_buf_mmap - Setup up a userspace mmap with the given vma
1501  * @dmabuf:	[in]	buffer that should back the vma
1502  * @vma:	[in]	vma for the mmap
1503  * @pgoff:	[in]	offset in pages where this mmap should start within the
1504  *			dma-buf buffer.
1505  *
1506  * This function adjusts the passed in vma so that it points at the file of the
1507  * dma_buf operation. It also adjusts the starting pgoff and does bounds
1508  * checking on the size of the vma. Then it calls the exporters mmap function to
1509  * set up the mapping.
1510  *
1511  * Can return negative error values, returns 0 on success.
1512  */
1513 int dma_buf_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma,
1514 		 unsigned long pgoff)
1515 {
1516 	if (WARN_ON(!dmabuf || !vma))
1517 		return -EINVAL;
1518 
1519 	/* check if buffer supports mmap */
1520 	if (!dmabuf->ops->mmap)
1521 		return -EINVAL;
1522 
1523 	/* check for offset overflow */
1524 	if (pgoff + vma_pages(vma) < pgoff)
1525 		return -EOVERFLOW;
1526 
1527 	/* check for overflowing the buffer's size */
1528 	if (pgoff + vma_pages(vma) >
1529 	    dmabuf->size >> PAGE_SHIFT)
1530 		return -EINVAL;
1531 
1532 	/* readjust the vma */
1533 	vma_set_file(vma, dmabuf->file);
1534 	vma->vm_pgoff = pgoff;
1535 
1536 	return dmabuf->ops->mmap(dmabuf, vma);
1537 }
1538 EXPORT_SYMBOL_NS_GPL(dma_buf_mmap, "DMA_BUF");
1539 
1540 /**
1541  * dma_buf_vmap - Create virtual mapping for the buffer object into kernel
1542  * address space. Same restrictions as for vmap and friends apply.
1543  * @dmabuf:	[in]	buffer to vmap
1544  * @map:	[out]	returns the vmap pointer
1545  *
1546  * This call may fail due to lack of virtual mapping address space.
1547  * These calls are optional in drivers. The intended use for them
1548  * is for mapping objects linear in kernel space for high use objects.
1549  *
1550  * To ensure coherency users must call dma_buf_begin_cpu_access() and
1551  * dma_buf_end_cpu_access() around any cpu access performed through this
1552  * mapping.
1553  *
1554  * Returns 0 on success, or a negative errno code otherwise.
1555  */
1556 int dma_buf_vmap(struct dma_buf *dmabuf, struct iosys_map *map)
1557 {
1558 	struct iosys_map ptr;
1559 	int ret;
1560 
1561 	iosys_map_clear(map);
1562 
1563 	if (WARN_ON(!dmabuf))
1564 		return -EINVAL;
1565 
1566 	dma_resv_assert_held(dmabuf->resv);
1567 
1568 	if (!dmabuf->ops->vmap)
1569 		return -EINVAL;
1570 
1571 	if (dmabuf->vmapping_counter) {
1572 		dmabuf->vmapping_counter++;
1573 		BUG_ON(iosys_map_is_null(&dmabuf->vmap_ptr));
1574 		*map = dmabuf->vmap_ptr;
1575 		return 0;
1576 	}
1577 
1578 	BUG_ON(iosys_map_is_set(&dmabuf->vmap_ptr));
1579 
1580 	ret = dmabuf->ops->vmap(dmabuf, &ptr);
1581 	if (WARN_ON_ONCE(ret))
1582 		return ret;
1583 
1584 	dmabuf->vmap_ptr = ptr;
1585 	dmabuf->vmapping_counter = 1;
1586 
1587 	*map = dmabuf->vmap_ptr;
1588 
1589 	return 0;
1590 }
1591 EXPORT_SYMBOL_NS_GPL(dma_buf_vmap, "DMA_BUF");
1592 
1593 /**
1594  * dma_buf_vmap_unlocked - Create virtual mapping for the buffer object into kernel
1595  * address space. Same restrictions as for vmap and friends apply.
1596  * @dmabuf:	[in]	buffer to vmap
1597  * @map:	[out]	returns the vmap pointer
1598  *
1599  * Unlocked version of dma_buf_vmap()
1600  *
1601  * Returns 0 on success, or a negative errno code otherwise.
1602  */
1603 int dma_buf_vmap_unlocked(struct dma_buf *dmabuf, struct iosys_map *map)
1604 {
1605 	int ret;
1606 
1607 	iosys_map_clear(map);
1608 
1609 	if (WARN_ON(!dmabuf))
1610 		return -EINVAL;
1611 
1612 	dma_resv_lock(dmabuf->resv, NULL);
1613 	ret = dma_buf_vmap(dmabuf, map);
1614 	dma_resv_unlock(dmabuf->resv);
1615 
1616 	return ret;
1617 }
1618 EXPORT_SYMBOL_NS_GPL(dma_buf_vmap_unlocked, "DMA_BUF");
1619 
1620 /**
1621  * dma_buf_vunmap - Unmap a vmap obtained by dma_buf_vmap.
1622  * @dmabuf:	[in]	buffer to vunmap
1623  * @map:	[in]	vmap pointer to vunmap
1624  */
1625 void dma_buf_vunmap(struct dma_buf *dmabuf, struct iosys_map *map)
1626 {
1627 	if (WARN_ON(!dmabuf))
1628 		return;
1629 
1630 	dma_resv_assert_held(dmabuf->resv);
1631 
1632 	BUG_ON(iosys_map_is_null(&dmabuf->vmap_ptr));
1633 	BUG_ON(dmabuf->vmapping_counter == 0);
1634 	BUG_ON(!iosys_map_is_equal(&dmabuf->vmap_ptr, map));
1635 
1636 	if (--dmabuf->vmapping_counter == 0) {
1637 		if (dmabuf->ops->vunmap)
1638 			dmabuf->ops->vunmap(dmabuf, map);
1639 		iosys_map_clear(&dmabuf->vmap_ptr);
1640 	}
1641 }
1642 EXPORT_SYMBOL_NS_GPL(dma_buf_vunmap, "DMA_BUF");
1643 
1644 /**
1645  * dma_buf_vunmap_unlocked - Unmap a vmap obtained by dma_buf_vmap.
1646  * @dmabuf:	[in]	buffer to vunmap
1647  * @map:	[in]	vmap pointer to vunmap
1648  */
1649 void dma_buf_vunmap_unlocked(struct dma_buf *dmabuf, struct iosys_map *map)
1650 {
1651 	if (WARN_ON(!dmabuf))
1652 		return;
1653 
1654 	dma_resv_lock(dmabuf->resv, NULL);
1655 	dma_buf_vunmap(dmabuf, map);
1656 	dma_resv_unlock(dmabuf->resv);
1657 }
1658 EXPORT_SYMBOL_NS_GPL(dma_buf_vunmap_unlocked, "DMA_BUF");
1659 
1660 #ifdef CONFIG_DEBUG_FS
1661 static int dma_buf_debug_show(struct seq_file *s, void *unused)
1662 {
1663 	struct dma_buf *buf_obj;
1664 	struct dma_buf_attachment *attach_obj;
1665 	int count = 0, attach_count;
1666 	size_t size = 0;
1667 	int ret;
1668 
1669 	ret = mutex_lock_interruptible(&dmabuf_list_mutex);
1670 
1671 	if (ret)
1672 		return ret;
1673 
1674 	seq_puts(s, "\nDma-buf Objects:\n");
1675 	seq_printf(s, "%-8s\t%-8s\t%-8s\t%-8s\texp_name\t%-8s\tname\n",
1676 		   "size", "flags", "mode", "count", "ino");
1677 
1678 	list_for_each_entry(buf_obj, &dmabuf_list, list_node) {
1679 
1680 		ret = dma_resv_lock_interruptible(buf_obj->resv, NULL);
1681 		if (ret)
1682 			goto error_unlock;
1683 
1684 
1685 		spin_lock(&buf_obj->name_lock);
1686 		seq_printf(s, "%08zu\t%08x\t%08x\t%08ld\t%s\t%08lu\t%s\n",
1687 				buf_obj->size,
1688 				buf_obj->file->f_flags, buf_obj->file->f_mode,
1689 				file_count(buf_obj->file),
1690 				buf_obj->exp_name,
1691 				file_inode(buf_obj->file)->i_ino,
1692 				buf_obj->name ?: "<none>");
1693 		spin_unlock(&buf_obj->name_lock);
1694 
1695 		dma_resv_describe(buf_obj->resv, s);
1696 
1697 		seq_puts(s, "\tAttached Devices:\n");
1698 		attach_count = 0;
1699 
1700 		list_for_each_entry(attach_obj, &buf_obj->attachments, node) {
1701 			seq_printf(s, "\t%s\n", dev_name(attach_obj->dev));
1702 			attach_count++;
1703 		}
1704 		dma_resv_unlock(buf_obj->resv);
1705 
1706 		seq_printf(s, "Total %d devices attached\n\n",
1707 				attach_count);
1708 
1709 		count++;
1710 		size += buf_obj->size;
1711 	}
1712 
1713 	seq_printf(s, "\nTotal %d objects, %zu bytes\n", count, size);
1714 
1715 	mutex_unlock(&dmabuf_list_mutex);
1716 	return 0;
1717 
1718 error_unlock:
1719 	mutex_unlock(&dmabuf_list_mutex);
1720 	return ret;
1721 }
1722 
1723 DEFINE_SHOW_ATTRIBUTE(dma_buf_debug);
1724 
1725 static struct dentry *dma_buf_debugfs_dir;
1726 
1727 static int dma_buf_init_debugfs(void)
1728 {
1729 	struct dentry *d;
1730 	int err = 0;
1731 
1732 	d = debugfs_create_dir("dma_buf", NULL);
1733 	if (IS_ERR(d))
1734 		return PTR_ERR(d);
1735 
1736 	dma_buf_debugfs_dir = d;
1737 
1738 	d = debugfs_create_file("bufinfo", 0444, dma_buf_debugfs_dir,
1739 				NULL, &dma_buf_debug_fops);
1740 	if (IS_ERR(d)) {
1741 		pr_debug("dma_buf: debugfs: failed to create node bufinfo\n");
1742 		debugfs_remove_recursive(dma_buf_debugfs_dir);
1743 		dma_buf_debugfs_dir = NULL;
1744 		err = PTR_ERR(d);
1745 	}
1746 
1747 	return err;
1748 }
1749 
1750 static void dma_buf_uninit_debugfs(void)
1751 {
1752 	debugfs_remove_recursive(dma_buf_debugfs_dir);
1753 }
1754 #else
1755 static inline int dma_buf_init_debugfs(void)
1756 {
1757 	return 0;
1758 }
1759 static inline void dma_buf_uninit_debugfs(void)
1760 {
1761 }
1762 #endif
1763 
1764 static int __init dma_buf_init(void)
1765 {
1766 	int ret;
1767 
1768 	ret = dma_buf_init_sysfs_statistics();
1769 	if (ret)
1770 		return ret;
1771 
1772 	dma_buf_mnt = kern_mount(&dma_buf_fs_type);
1773 	if (IS_ERR(dma_buf_mnt))
1774 		return PTR_ERR(dma_buf_mnt);
1775 
1776 	dma_buf_init_debugfs();
1777 	return 0;
1778 }
1779 subsys_initcall(dma_buf_init);
1780 
1781 static void __exit dma_buf_deinit(void)
1782 {
1783 	dma_buf_uninit_debugfs();
1784 	kern_unmount(dma_buf_mnt);
1785 	dma_buf_uninit_sysfs_statistics();
1786 }
1787 __exitcall(dma_buf_deinit);
1788