xref: /linux/drivers/dma-buf/dma-buf.c (revision db5d28c0bfe566908719bec8e25443aabecbb802)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Framework for buffer objects that can be shared across devices/subsystems.
4  *
5  * Copyright(C) 2011 Linaro Limited. All rights reserved.
6  * Author: Sumit Semwal <sumit.semwal@ti.com>
7  *
8  * Many thanks to linaro-mm-sig list, and specially
9  * Arnd Bergmann <arnd@arndb.de>, Rob Clark <rob@ti.com> and
10  * Daniel Vetter <daniel@ffwll.ch> for their support in creation and
11  * refining of this idea.
12  */
13 
14 #include <linux/fs.h>
15 #include <linux/slab.h>
16 #include <linux/dma-buf.h>
17 #include <linux/dma-fence.h>
18 #include <linux/dma-fence-unwrap.h>
19 #include <linux/anon_inodes.h>
20 #include <linux/export.h>
21 #include <linux/debugfs.h>
22 #include <linux/module.h>
23 #include <linux/seq_file.h>
24 #include <linux/sync_file.h>
25 #include <linux/poll.h>
26 #include <linux/dma-resv.h>
27 #include <linux/mm.h>
28 #include <linux/mount.h>
29 #include <linux/pseudo_fs.h>
30 
31 #include <uapi/linux/dma-buf.h>
32 #include <uapi/linux/magic.h>
33 
34 #include "dma-buf-sysfs-stats.h"
35 
36 static inline int is_dma_buf_file(struct file *);
37 
38 #if IS_ENABLED(CONFIG_DEBUG_FS)
39 static DEFINE_MUTEX(debugfs_list_mutex);
40 static LIST_HEAD(debugfs_list);
41 
__dma_buf_debugfs_list_add(struct dma_buf * dmabuf)42 static void __dma_buf_debugfs_list_add(struct dma_buf *dmabuf)
43 {
44 	mutex_lock(&debugfs_list_mutex);
45 	list_add(&dmabuf->list_node, &debugfs_list);
46 	mutex_unlock(&debugfs_list_mutex);
47 }
48 
__dma_buf_debugfs_list_del(struct dma_buf * dmabuf)49 static void __dma_buf_debugfs_list_del(struct dma_buf *dmabuf)
50 {
51 	if (!dmabuf)
52 		return;
53 
54 	mutex_lock(&debugfs_list_mutex);
55 	list_del(&dmabuf->list_node);
56 	mutex_unlock(&debugfs_list_mutex);
57 }
58 #else
__dma_buf_debugfs_list_add(struct dma_buf * dmabuf)59 static void __dma_buf_debugfs_list_add(struct dma_buf *dmabuf)
60 {
61 }
62 
__dma_buf_debugfs_list_del(struct file * file)63 static void __dma_buf_debugfs_list_del(struct file *file)
64 {
65 }
66 #endif
67 
dmabuffs_dname(struct dentry * dentry,char * buffer,int buflen)68 static char *dmabuffs_dname(struct dentry *dentry, char *buffer, int buflen)
69 {
70 	struct dma_buf *dmabuf;
71 	char name[DMA_BUF_NAME_LEN];
72 	ssize_t ret = 0;
73 
74 	dmabuf = dentry->d_fsdata;
75 	spin_lock(&dmabuf->name_lock);
76 	if (dmabuf->name)
77 		ret = strscpy(name, dmabuf->name, sizeof(name));
78 	spin_unlock(&dmabuf->name_lock);
79 
80 	return dynamic_dname(buffer, buflen, "/%s:%s",
81 			     dentry->d_name.name, ret > 0 ? name : "");
82 }
83 
dma_buf_release(struct dentry * dentry)84 static void dma_buf_release(struct dentry *dentry)
85 {
86 	struct dma_buf *dmabuf;
87 
88 	dmabuf = dentry->d_fsdata;
89 	if (unlikely(!dmabuf))
90 		return;
91 
92 	BUG_ON(dmabuf->vmapping_counter);
93 
94 	/*
95 	 * If you hit this BUG() it could mean:
96 	 * * There's a file reference imbalance in dma_buf_poll / dma_buf_poll_cb or somewhere else
97 	 * * dmabuf->cb_in/out.active are non-0 despite no pending fence callback
98 	 */
99 	BUG_ON(dmabuf->cb_in.active || dmabuf->cb_out.active);
100 
101 	dma_buf_stats_teardown(dmabuf);
102 	dmabuf->ops->release(dmabuf);
103 
104 	if (dmabuf->resv == (struct dma_resv *)&dmabuf[1])
105 		dma_resv_fini(dmabuf->resv);
106 
107 	WARN_ON(!list_empty(&dmabuf->attachments));
108 	module_put(dmabuf->owner);
109 	kfree(dmabuf->name);
110 	kfree(dmabuf);
111 }
112 
dma_buf_file_release(struct inode * inode,struct file * file)113 static int dma_buf_file_release(struct inode *inode, struct file *file)
114 {
115 	if (!is_dma_buf_file(file))
116 		return -EINVAL;
117 
118 	__dma_buf_debugfs_list_del(file->private_data);
119 
120 	return 0;
121 }
122 
123 static const struct dentry_operations dma_buf_dentry_ops = {
124 	.d_dname = dmabuffs_dname,
125 	.d_release = dma_buf_release,
126 };
127 
128 static struct vfsmount *dma_buf_mnt;
129 
dma_buf_fs_init_context(struct fs_context * fc)130 static int dma_buf_fs_init_context(struct fs_context *fc)
131 {
132 	struct pseudo_fs_context *ctx;
133 
134 	ctx = init_pseudo(fc, DMA_BUF_MAGIC);
135 	if (!ctx)
136 		return -ENOMEM;
137 	ctx->dops = &dma_buf_dentry_ops;
138 	return 0;
139 }
140 
141 static struct file_system_type dma_buf_fs_type = {
142 	.name = "dmabuf",
143 	.init_fs_context = dma_buf_fs_init_context,
144 	.kill_sb = kill_anon_super,
145 };
146 
dma_buf_mmap_internal(struct file * file,struct vm_area_struct * vma)147 static int dma_buf_mmap_internal(struct file *file, struct vm_area_struct *vma)
148 {
149 	struct dma_buf *dmabuf;
150 
151 	if (!is_dma_buf_file(file))
152 		return -EINVAL;
153 
154 	dmabuf = file->private_data;
155 
156 	/* check if buffer supports mmap */
157 	if (!dmabuf->ops->mmap)
158 		return -EINVAL;
159 
160 	/* check for overflowing the buffer's size */
161 	if (vma->vm_pgoff + vma_pages(vma) >
162 	    dmabuf->size >> PAGE_SHIFT)
163 		return -EINVAL;
164 
165 	return dmabuf->ops->mmap(dmabuf, vma);
166 }
167 
dma_buf_llseek(struct file * file,loff_t offset,int whence)168 static loff_t dma_buf_llseek(struct file *file, loff_t offset, int whence)
169 {
170 	struct dma_buf *dmabuf;
171 	loff_t base;
172 
173 	if (!is_dma_buf_file(file))
174 		return -EBADF;
175 
176 	dmabuf = file->private_data;
177 
178 	/* only support discovering the end of the buffer,
179 	   but also allow SEEK_SET to maintain the idiomatic
180 	   SEEK_END(0), SEEK_CUR(0) pattern */
181 	if (whence == SEEK_END)
182 		base = dmabuf->size;
183 	else if (whence == SEEK_SET)
184 		base = 0;
185 	else
186 		return -EINVAL;
187 
188 	if (offset != 0)
189 		return -EINVAL;
190 
191 	return base + offset;
192 }
193 
194 /**
195  * DOC: implicit fence polling
196  *
197  * To support cross-device and cross-driver synchronization of buffer access
198  * implicit fences (represented internally in the kernel with &struct dma_fence)
199  * can be attached to a &dma_buf. The glue for that and a few related things are
200  * provided in the &dma_resv structure.
201  *
202  * Userspace can query the state of these implicitly tracked fences using poll()
203  * and related system calls:
204  *
205  * - Checking for EPOLLIN, i.e. read access, can be use to query the state of the
206  *   most recent write or exclusive fence.
207  *
208  * - Checking for EPOLLOUT, i.e. write access, can be used to query the state of
209  *   all attached fences, shared and exclusive ones.
210  *
211  * Note that this only signals the completion of the respective fences, i.e. the
212  * DMA transfers are complete. Cache flushing and any other necessary
213  * preparations before CPU access can begin still need to happen.
214  *
215  * As an alternative to poll(), the set of fences on DMA buffer can be
216  * exported as a &sync_file using &dma_buf_sync_file_export.
217  */
218 
dma_buf_poll_cb(struct dma_fence * fence,struct dma_fence_cb * cb)219 static void dma_buf_poll_cb(struct dma_fence *fence, struct dma_fence_cb *cb)
220 {
221 	struct dma_buf_poll_cb_t *dcb = (struct dma_buf_poll_cb_t *)cb;
222 	struct dma_buf *dmabuf = container_of(dcb->poll, struct dma_buf, poll);
223 	unsigned long flags;
224 
225 	spin_lock_irqsave(&dcb->poll->lock, flags);
226 	wake_up_locked_poll(dcb->poll, dcb->active);
227 	dcb->active = 0;
228 	spin_unlock_irqrestore(&dcb->poll->lock, flags);
229 	dma_fence_put(fence);
230 	/* Paired with get_file in dma_buf_poll */
231 	fput(dmabuf->file);
232 }
233 
dma_buf_poll_add_cb(struct dma_resv * resv,bool write,struct dma_buf_poll_cb_t * dcb)234 static bool dma_buf_poll_add_cb(struct dma_resv *resv, bool write,
235 				struct dma_buf_poll_cb_t *dcb)
236 {
237 	struct dma_resv_iter cursor;
238 	struct dma_fence *fence;
239 	int r;
240 
241 	dma_resv_for_each_fence(&cursor, resv, dma_resv_usage_rw(write),
242 				fence) {
243 		dma_fence_get(fence);
244 		r = dma_fence_add_callback(fence, &dcb->cb, dma_buf_poll_cb);
245 		if (!r)
246 			return true;
247 		dma_fence_put(fence);
248 	}
249 
250 	return false;
251 }
252 
dma_buf_poll(struct file * file,poll_table * poll)253 static __poll_t dma_buf_poll(struct file *file, poll_table *poll)
254 {
255 	struct dma_buf *dmabuf;
256 	struct dma_resv *resv;
257 	__poll_t events;
258 
259 	dmabuf = file->private_data;
260 	if (!dmabuf || !dmabuf->resv)
261 		return EPOLLERR;
262 
263 	resv = dmabuf->resv;
264 
265 	poll_wait(file, &dmabuf->poll, poll);
266 
267 	events = poll_requested_events(poll) & (EPOLLIN | EPOLLOUT);
268 	if (!events)
269 		return 0;
270 
271 	dma_resv_lock(resv, NULL);
272 
273 	if (events & EPOLLOUT) {
274 		struct dma_buf_poll_cb_t *dcb = &dmabuf->cb_out;
275 
276 		/* Check that callback isn't busy */
277 		spin_lock_irq(&dmabuf->poll.lock);
278 		if (dcb->active)
279 			events &= ~EPOLLOUT;
280 		else
281 			dcb->active = EPOLLOUT;
282 		spin_unlock_irq(&dmabuf->poll.lock);
283 
284 		if (events & EPOLLOUT) {
285 			/* Paired with fput in dma_buf_poll_cb */
286 			get_file(dmabuf->file);
287 
288 			if (!dma_buf_poll_add_cb(resv, true, dcb))
289 				/* No callback queued, wake up any other waiters */
290 				dma_buf_poll_cb(NULL, &dcb->cb);
291 			else
292 				events &= ~EPOLLOUT;
293 		}
294 	}
295 
296 	if (events & EPOLLIN) {
297 		struct dma_buf_poll_cb_t *dcb = &dmabuf->cb_in;
298 
299 		/* Check that callback isn't busy */
300 		spin_lock_irq(&dmabuf->poll.lock);
301 		if (dcb->active)
302 			events &= ~EPOLLIN;
303 		else
304 			dcb->active = EPOLLIN;
305 		spin_unlock_irq(&dmabuf->poll.lock);
306 
307 		if (events & EPOLLIN) {
308 			/* Paired with fput in dma_buf_poll_cb */
309 			get_file(dmabuf->file);
310 
311 			if (!dma_buf_poll_add_cb(resv, false, dcb))
312 				/* No callback queued, wake up any other waiters */
313 				dma_buf_poll_cb(NULL, &dcb->cb);
314 			else
315 				events &= ~EPOLLIN;
316 		}
317 	}
318 
319 	dma_resv_unlock(resv);
320 	return events;
321 }
322 
323 /**
324  * dma_buf_set_name - Set a name to a specific dma_buf to track the usage.
325  * It could support changing the name of the dma-buf if the same
326  * piece of memory is used for multiple purpose between different devices.
327  *
328  * @dmabuf: [in]     dmabuf buffer that will be renamed.
329  * @buf:    [in]     A piece of userspace memory that contains the name of
330  *                   the dma-buf.
331  *
332  * Returns 0 on success. If the dma-buf buffer is already attached to
333  * devices, return -EBUSY.
334  *
335  */
dma_buf_set_name(struct dma_buf * dmabuf,const char __user * buf)336 static long dma_buf_set_name(struct dma_buf *dmabuf, const char __user *buf)
337 {
338 	char *name = strndup_user(buf, DMA_BUF_NAME_LEN);
339 
340 	if (IS_ERR(name))
341 		return PTR_ERR(name);
342 
343 	spin_lock(&dmabuf->name_lock);
344 	kfree(dmabuf->name);
345 	dmabuf->name = name;
346 	spin_unlock(&dmabuf->name_lock);
347 
348 	return 0;
349 }
350 
351 #if IS_ENABLED(CONFIG_SYNC_FILE)
dma_buf_export_sync_file(struct dma_buf * dmabuf,void __user * user_data)352 static long dma_buf_export_sync_file(struct dma_buf *dmabuf,
353 				     void __user *user_data)
354 {
355 	struct dma_buf_export_sync_file arg;
356 	enum dma_resv_usage usage;
357 	struct dma_fence *fence = NULL;
358 	struct sync_file *sync_file;
359 	int fd, ret;
360 
361 	if (copy_from_user(&arg, user_data, sizeof(arg)))
362 		return -EFAULT;
363 
364 	if (arg.flags & ~DMA_BUF_SYNC_RW)
365 		return -EINVAL;
366 
367 	if ((arg.flags & DMA_BUF_SYNC_RW) == 0)
368 		return -EINVAL;
369 
370 	fd = get_unused_fd_flags(O_CLOEXEC);
371 	if (fd < 0)
372 		return fd;
373 
374 	usage = dma_resv_usage_rw(arg.flags & DMA_BUF_SYNC_WRITE);
375 	ret = dma_resv_get_singleton(dmabuf->resv, usage, &fence);
376 	if (ret)
377 		goto err_put_fd;
378 
379 	if (!fence)
380 		fence = dma_fence_get_stub();
381 
382 	sync_file = sync_file_create(fence);
383 
384 	dma_fence_put(fence);
385 
386 	if (!sync_file) {
387 		ret = -ENOMEM;
388 		goto err_put_fd;
389 	}
390 
391 	arg.fd = fd;
392 	if (copy_to_user(user_data, &arg, sizeof(arg))) {
393 		ret = -EFAULT;
394 		goto err_put_file;
395 	}
396 
397 	fd_install(fd, sync_file->file);
398 
399 	return 0;
400 
401 err_put_file:
402 	fput(sync_file->file);
403 err_put_fd:
404 	put_unused_fd(fd);
405 	return ret;
406 }
407 
dma_buf_import_sync_file(struct dma_buf * dmabuf,const void __user * user_data)408 static long dma_buf_import_sync_file(struct dma_buf *dmabuf,
409 				     const void __user *user_data)
410 {
411 	struct dma_buf_import_sync_file arg;
412 	struct dma_fence *fence, *f;
413 	enum dma_resv_usage usage;
414 	struct dma_fence_unwrap iter;
415 	unsigned int num_fences;
416 	int ret = 0;
417 
418 	if (copy_from_user(&arg, user_data, sizeof(arg)))
419 		return -EFAULT;
420 
421 	if (arg.flags & ~DMA_BUF_SYNC_RW)
422 		return -EINVAL;
423 
424 	if ((arg.flags & DMA_BUF_SYNC_RW) == 0)
425 		return -EINVAL;
426 
427 	fence = sync_file_get_fence(arg.fd);
428 	if (!fence)
429 		return -EINVAL;
430 
431 	usage = (arg.flags & DMA_BUF_SYNC_WRITE) ? DMA_RESV_USAGE_WRITE :
432 						   DMA_RESV_USAGE_READ;
433 
434 	num_fences = 0;
435 	dma_fence_unwrap_for_each(f, &iter, fence)
436 		++num_fences;
437 
438 	if (num_fences > 0) {
439 		dma_resv_lock(dmabuf->resv, NULL);
440 
441 		ret = dma_resv_reserve_fences(dmabuf->resv, num_fences);
442 		if (!ret) {
443 			dma_fence_unwrap_for_each(f, &iter, fence)
444 				dma_resv_add_fence(dmabuf->resv, f, usage);
445 		}
446 
447 		dma_resv_unlock(dmabuf->resv);
448 	}
449 
450 	dma_fence_put(fence);
451 
452 	return ret;
453 }
454 #endif
455 
dma_buf_ioctl(struct file * file,unsigned int cmd,unsigned long arg)456 static long dma_buf_ioctl(struct file *file,
457 			  unsigned int cmd, unsigned long arg)
458 {
459 	struct dma_buf *dmabuf;
460 	struct dma_buf_sync sync;
461 	enum dma_data_direction direction;
462 	int ret;
463 
464 	dmabuf = file->private_data;
465 
466 	switch (cmd) {
467 	case DMA_BUF_IOCTL_SYNC:
468 		if (copy_from_user(&sync, (void __user *) arg, sizeof(sync)))
469 			return -EFAULT;
470 
471 		if (sync.flags & ~DMA_BUF_SYNC_VALID_FLAGS_MASK)
472 			return -EINVAL;
473 
474 		switch (sync.flags & DMA_BUF_SYNC_RW) {
475 		case DMA_BUF_SYNC_READ:
476 			direction = DMA_FROM_DEVICE;
477 			break;
478 		case DMA_BUF_SYNC_WRITE:
479 			direction = DMA_TO_DEVICE;
480 			break;
481 		case DMA_BUF_SYNC_RW:
482 			direction = DMA_BIDIRECTIONAL;
483 			break;
484 		default:
485 			return -EINVAL;
486 		}
487 
488 		if (sync.flags & DMA_BUF_SYNC_END)
489 			ret = dma_buf_end_cpu_access(dmabuf, direction);
490 		else
491 			ret = dma_buf_begin_cpu_access(dmabuf, direction);
492 
493 		return ret;
494 
495 	case DMA_BUF_SET_NAME_A:
496 	case DMA_BUF_SET_NAME_B:
497 		return dma_buf_set_name(dmabuf, (const char __user *)arg);
498 
499 #if IS_ENABLED(CONFIG_SYNC_FILE)
500 	case DMA_BUF_IOCTL_EXPORT_SYNC_FILE:
501 		return dma_buf_export_sync_file(dmabuf, (void __user *)arg);
502 	case DMA_BUF_IOCTL_IMPORT_SYNC_FILE:
503 		return dma_buf_import_sync_file(dmabuf, (const void __user *)arg);
504 #endif
505 
506 	default:
507 		return -ENOTTY;
508 	}
509 }
510 
dma_buf_show_fdinfo(struct seq_file * m,struct file * file)511 static void dma_buf_show_fdinfo(struct seq_file *m, struct file *file)
512 {
513 	struct dma_buf *dmabuf = file->private_data;
514 
515 	seq_printf(m, "size:\t%zu\n", dmabuf->size);
516 	/* Don't count the temporary reference taken inside procfs seq_show */
517 	seq_printf(m, "count:\t%ld\n", file_count(dmabuf->file) - 1);
518 	seq_printf(m, "exp_name:\t%s\n", dmabuf->exp_name);
519 	spin_lock(&dmabuf->name_lock);
520 	if (dmabuf->name)
521 		seq_printf(m, "name:\t%s\n", dmabuf->name);
522 	spin_unlock(&dmabuf->name_lock);
523 }
524 
525 static const struct file_operations dma_buf_fops = {
526 	.release	= dma_buf_file_release,
527 	.mmap		= dma_buf_mmap_internal,
528 	.llseek		= dma_buf_llseek,
529 	.poll		= dma_buf_poll,
530 	.unlocked_ioctl	= dma_buf_ioctl,
531 	.compat_ioctl	= compat_ptr_ioctl,
532 	.show_fdinfo	= dma_buf_show_fdinfo,
533 };
534 
535 /*
536  * is_dma_buf_file - Check if struct file* is associated with dma_buf
537  */
is_dma_buf_file(struct file * file)538 static inline int is_dma_buf_file(struct file *file)
539 {
540 	return file->f_op == &dma_buf_fops;
541 }
542 
dma_buf_getfile(size_t size,int flags)543 static struct file *dma_buf_getfile(size_t size, int flags)
544 {
545 	static atomic64_t dmabuf_inode = ATOMIC64_INIT(0);
546 	struct inode *inode = alloc_anon_inode(dma_buf_mnt->mnt_sb);
547 	struct file *file;
548 
549 	if (IS_ERR(inode))
550 		return ERR_CAST(inode);
551 
552 	inode->i_size = size;
553 	inode_set_bytes(inode, size);
554 
555 	/*
556 	 * The ->i_ino acquired from get_next_ino() is not unique thus
557 	 * not suitable for using it as dentry name by dmabuf stats.
558 	 * Override ->i_ino with the unique and dmabuffs specific
559 	 * value.
560 	 */
561 	inode->i_ino = atomic64_add_return(1, &dmabuf_inode);
562 	flags &= O_ACCMODE | O_NONBLOCK;
563 	file = alloc_file_pseudo(inode, dma_buf_mnt, "dmabuf",
564 				 flags, &dma_buf_fops);
565 	if (IS_ERR(file))
566 		goto err_alloc_file;
567 
568 	return file;
569 
570 err_alloc_file:
571 	iput(inode);
572 	return file;
573 }
574 
575 /**
576  * DOC: dma buf device access
577  *
578  * For device DMA access to a shared DMA buffer the usual sequence of operations
579  * is fairly simple:
580  *
581  * 1. The exporter defines his exporter instance using
582  *    DEFINE_DMA_BUF_EXPORT_INFO() and calls dma_buf_export() to wrap a private
583  *    buffer object into a &dma_buf. It then exports that &dma_buf to userspace
584  *    as a file descriptor by calling dma_buf_fd().
585  *
586  * 2. Userspace passes this file-descriptors to all drivers it wants this buffer
587  *    to share with: First the file descriptor is converted to a &dma_buf using
588  *    dma_buf_get(). Then the buffer is attached to the device using
589  *    dma_buf_attach().
590  *
591  *    Up to this stage the exporter is still free to migrate or reallocate the
592  *    backing storage.
593  *
594  * 3. Once the buffer is attached to all devices userspace can initiate DMA
595  *    access to the shared buffer. In the kernel this is done by calling
596  *    dma_buf_map_attachment() and dma_buf_unmap_attachment().
597  *
598  * 4. Once a driver is done with a shared buffer it needs to call
599  *    dma_buf_detach() (after cleaning up any mappings) and then release the
600  *    reference acquired with dma_buf_get() by calling dma_buf_put().
601  *
602  * For the detailed semantics exporters are expected to implement see
603  * &dma_buf_ops.
604  */
605 
606 /**
607  * dma_buf_export - Creates a new dma_buf, and associates an anon file
608  * with this buffer, so it can be exported.
609  * Also connect the allocator specific data and ops to the buffer.
610  * Additionally, provide a name string for exporter; useful in debugging.
611  *
612  * @exp_info:	[in]	holds all the export related information provided
613  *			by the exporter. see &struct dma_buf_export_info
614  *			for further details.
615  *
616  * Returns, on success, a newly created struct dma_buf object, which wraps the
617  * supplied private data and operations for struct dma_buf_ops. On either
618  * missing ops, or error in allocating struct dma_buf, will return negative
619  * error.
620  *
621  * For most cases the easiest way to create @exp_info is through the
622  * %DEFINE_DMA_BUF_EXPORT_INFO macro.
623  */
dma_buf_export(const struct dma_buf_export_info * exp_info)624 struct dma_buf *dma_buf_export(const struct dma_buf_export_info *exp_info)
625 {
626 	struct dma_buf *dmabuf;
627 	struct dma_resv *resv = exp_info->resv;
628 	struct file *file;
629 	size_t alloc_size = sizeof(struct dma_buf);
630 	int ret;
631 
632 	if (WARN_ON(!exp_info->priv || !exp_info->ops
633 		    || !exp_info->ops->map_dma_buf
634 		    || !exp_info->ops->unmap_dma_buf
635 		    || !exp_info->ops->release))
636 		return ERR_PTR(-EINVAL);
637 
638 	if (WARN_ON(exp_info->ops->cache_sgt_mapping &&
639 		    (exp_info->ops->pin || exp_info->ops->unpin)))
640 		return ERR_PTR(-EINVAL);
641 
642 	if (WARN_ON(!exp_info->ops->pin != !exp_info->ops->unpin))
643 		return ERR_PTR(-EINVAL);
644 
645 	if (!try_module_get(exp_info->owner))
646 		return ERR_PTR(-ENOENT);
647 
648 	file = dma_buf_getfile(exp_info->size, exp_info->flags);
649 	if (IS_ERR(file)) {
650 		ret = PTR_ERR(file);
651 		goto err_module;
652 	}
653 
654 	if (!exp_info->resv)
655 		alloc_size += sizeof(struct dma_resv);
656 	else
657 		/* prevent &dma_buf[1] == dma_buf->resv */
658 		alloc_size += 1;
659 	dmabuf = kzalloc(alloc_size, GFP_KERNEL);
660 	if (!dmabuf) {
661 		ret = -ENOMEM;
662 		goto err_file;
663 	}
664 
665 	dmabuf->priv = exp_info->priv;
666 	dmabuf->ops = exp_info->ops;
667 	dmabuf->size = exp_info->size;
668 	dmabuf->exp_name = exp_info->exp_name;
669 	dmabuf->owner = exp_info->owner;
670 	spin_lock_init(&dmabuf->name_lock);
671 	init_waitqueue_head(&dmabuf->poll);
672 	dmabuf->cb_in.poll = dmabuf->cb_out.poll = &dmabuf->poll;
673 	dmabuf->cb_in.active = dmabuf->cb_out.active = 0;
674 	INIT_LIST_HEAD(&dmabuf->attachments);
675 
676 	if (!resv) {
677 		dmabuf->resv = (struct dma_resv *)&dmabuf[1];
678 		dma_resv_init(dmabuf->resv);
679 	} else {
680 		dmabuf->resv = resv;
681 	}
682 
683 	ret = dma_buf_stats_setup(dmabuf, file);
684 	if (ret)
685 		goto err_dmabuf;
686 
687 	file->private_data = dmabuf;
688 	file->f_path.dentry->d_fsdata = dmabuf;
689 	dmabuf->file = file;
690 
691 	__dma_buf_debugfs_list_add(dmabuf);
692 
693 	return dmabuf;
694 
695 err_dmabuf:
696 	if (!resv)
697 		dma_resv_fini(dmabuf->resv);
698 	kfree(dmabuf);
699 err_file:
700 	fput(file);
701 err_module:
702 	module_put(exp_info->owner);
703 	return ERR_PTR(ret);
704 }
705 EXPORT_SYMBOL_NS_GPL(dma_buf_export, DMA_BUF);
706 
707 /**
708  * dma_buf_fd - returns a file descriptor for the given struct dma_buf
709  * @dmabuf:	[in]	pointer to dma_buf for which fd is required.
710  * @flags:      [in]    flags to give to fd
711  *
712  * On success, returns an associated 'fd'. Else, returns error.
713  */
dma_buf_fd(struct dma_buf * dmabuf,int flags)714 int dma_buf_fd(struct dma_buf *dmabuf, int flags)
715 {
716 	int fd;
717 
718 	if (!dmabuf || !dmabuf->file)
719 		return -EINVAL;
720 
721 	fd = get_unused_fd_flags(flags);
722 	if (fd < 0)
723 		return fd;
724 
725 	fd_install(fd, dmabuf->file);
726 
727 	return fd;
728 }
729 EXPORT_SYMBOL_NS_GPL(dma_buf_fd, DMA_BUF);
730 
731 /**
732  * dma_buf_get - returns the struct dma_buf related to an fd
733  * @fd:	[in]	fd associated with the struct dma_buf to be returned
734  *
735  * On success, returns the struct dma_buf associated with an fd; uses
736  * file's refcounting done by fget to increase refcount. returns ERR_PTR
737  * otherwise.
738  */
dma_buf_get(int fd)739 struct dma_buf *dma_buf_get(int fd)
740 {
741 	struct file *file;
742 
743 	file = fget(fd);
744 
745 	if (!file)
746 		return ERR_PTR(-EBADF);
747 
748 	if (!is_dma_buf_file(file)) {
749 		fput(file);
750 		return ERR_PTR(-EINVAL);
751 	}
752 
753 	return file->private_data;
754 }
755 EXPORT_SYMBOL_NS_GPL(dma_buf_get, DMA_BUF);
756 
757 /**
758  * dma_buf_put - decreases refcount of the buffer
759  * @dmabuf:	[in]	buffer to reduce refcount of
760  *
761  * Uses file's refcounting done implicitly by fput().
762  *
763  * If, as a result of this call, the refcount becomes 0, the 'release' file
764  * operation related to this fd is called. It calls &dma_buf_ops.release vfunc
765  * in turn, and frees the memory allocated for dmabuf when exported.
766  */
dma_buf_put(struct dma_buf * dmabuf)767 void dma_buf_put(struct dma_buf *dmabuf)
768 {
769 	if (WARN_ON(!dmabuf || !dmabuf->file))
770 		return;
771 
772 	fput(dmabuf->file);
773 }
774 EXPORT_SYMBOL_NS_GPL(dma_buf_put, DMA_BUF);
775 
mangle_sg_table(struct sg_table * sg_table)776 static void mangle_sg_table(struct sg_table *sg_table)
777 {
778 #ifdef CONFIG_DMABUF_DEBUG
779 	int i;
780 	struct scatterlist *sg;
781 
782 	/* To catch abuse of the underlying struct page by importers mix
783 	 * up the bits, but take care to preserve the low SG_ bits to
784 	 * not corrupt the sgt. The mixing is undone in __unmap_dma_buf
785 	 * before passing the sgt back to the exporter. */
786 	for_each_sgtable_sg(sg_table, sg, i)
787 		sg->page_link ^= ~0xffUL;
788 #endif
789 
790 }
__map_dma_buf(struct dma_buf_attachment * attach,enum dma_data_direction direction)791 static struct sg_table * __map_dma_buf(struct dma_buf_attachment *attach,
792 				       enum dma_data_direction direction)
793 {
794 	struct sg_table *sg_table;
795 	signed long ret;
796 
797 	sg_table = attach->dmabuf->ops->map_dma_buf(attach, direction);
798 	if (IS_ERR_OR_NULL(sg_table))
799 		return sg_table;
800 
801 	if (!dma_buf_attachment_is_dynamic(attach)) {
802 		ret = dma_resv_wait_timeout(attach->dmabuf->resv,
803 					    DMA_RESV_USAGE_KERNEL, true,
804 					    MAX_SCHEDULE_TIMEOUT);
805 		if (ret < 0) {
806 			attach->dmabuf->ops->unmap_dma_buf(attach, sg_table,
807 							   direction);
808 			return ERR_PTR(ret);
809 		}
810 	}
811 
812 	mangle_sg_table(sg_table);
813 	return sg_table;
814 }
815 
816 /**
817  * DOC: locking convention
818  *
819  * In order to avoid deadlock situations between dma-buf exports and importers,
820  * all dma-buf API users must follow the common dma-buf locking convention.
821  *
822  * Convention for importers
823  *
824  * 1. Importers must hold the dma-buf reservation lock when calling these
825  *    functions:
826  *
827  *     - dma_buf_pin()
828  *     - dma_buf_unpin()
829  *     - dma_buf_map_attachment()
830  *     - dma_buf_unmap_attachment()
831  *     - dma_buf_vmap()
832  *     - dma_buf_vunmap()
833  *
834  * 2. Importers must not hold the dma-buf reservation lock when calling these
835  *    functions:
836  *
837  *     - dma_buf_attach()
838  *     - dma_buf_dynamic_attach()
839  *     - dma_buf_detach()
840  *     - dma_buf_export()
841  *     - dma_buf_fd()
842  *     - dma_buf_get()
843  *     - dma_buf_put()
844  *     - dma_buf_mmap()
845  *     - dma_buf_begin_cpu_access()
846  *     - dma_buf_end_cpu_access()
847  *     - dma_buf_map_attachment_unlocked()
848  *     - dma_buf_unmap_attachment_unlocked()
849  *     - dma_buf_vmap_unlocked()
850  *     - dma_buf_vunmap_unlocked()
851  *
852  * Convention for exporters
853  *
854  * 1. These &dma_buf_ops callbacks are invoked with unlocked dma-buf
855  *    reservation and exporter can take the lock:
856  *
857  *     - &dma_buf_ops.attach()
858  *     - &dma_buf_ops.detach()
859  *     - &dma_buf_ops.release()
860  *     - &dma_buf_ops.begin_cpu_access()
861  *     - &dma_buf_ops.end_cpu_access()
862  *     - &dma_buf_ops.mmap()
863  *
864  * 2. These &dma_buf_ops callbacks are invoked with locked dma-buf
865  *    reservation and exporter can't take the lock:
866  *
867  *     - &dma_buf_ops.pin()
868  *     - &dma_buf_ops.unpin()
869  *     - &dma_buf_ops.map_dma_buf()
870  *     - &dma_buf_ops.unmap_dma_buf()
871  *     - &dma_buf_ops.vmap()
872  *     - &dma_buf_ops.vunmap()
873  *
874  * 3. Exporters must hold the dma-buf reservation lock when calling these
875  *    functions:
876  *
877  *     - dma_buf_move_notify()
878  */
879 
880 /**
881  * dma_buf_dynamic_attach - Add the device to dma_buf's attachments list
882  * @dmabuf:		[in]	buffer to attach device to.
883  * @dev:		[in]	device to be attached.
884  * @importer_ops:	[in]	importer operations for the attachment
885  * @importer_priv:	[in]	importer private pointer for the attachment
886  *
887  * Returns struct dma_buf_attachment pointer for this attachment. Attachments
888  * must be cleaned up by calling dma_buf_detach().
889  *
890  * Optionally this calls &dma_buf_ops.attach to allow device-specific attach
891  * functionality.
892  *
893  * Returns:
894  *
895  * A pointer to newly created &dma_buf_attachment on success, or a negative
896  * error code wrapped into a pointer on failure.
897  *
898  * Note that this can fail if the backing storage of @dmabuf is in a place not
899  * accessible to @dev, and cannot be moved to a more suitable place. This is
900  * indicated with the error code -EBUSY.
901  */
902 struct dma_buf_attachment *
dma_buf_dynamic_attach(struct dma_buf * dmabuf,struct device * dev,const struct dma_buf_attach_ops * importer_ops,void * importer_priv)903 dma_buf_dynamic_attach(struct dma_buf *dmabuf, struct device *dev,
904 		       const struct dma_buf_attach_ops *importer_ops,
905 		       void *importer_priv)
906 {
907 	struct dma_buf_attachment *attach;
908 	int ret;
909 
910 	if (WARN_ON(!dmabuf || !dev))
911 		return ERR_PTR(-EINVAL);
912 
913 	if (WARN_ON(importer_ops && !importer_ops->move_notify))
914 		return ERR_PTR(-EINVAL);
915 
916 	attach = kzalloc(sizeof(*attach), GFP_KERNEL);
917 	if (!attach)
918 		return ERR_PTR(-ENOMEM);
919 
920 	attach->dev = dev;
921 	attach->dmabuf = dmabuf;
922 	if (importer_ops)
923 		attach->peer2peer = importer_ops->allow_peer2peer;
924 	attach->importer_ops = importer_ops;
925 	attach->importer_priv = importer_priv;
926 
927 	if (dmabuf->ops->attach) {
928 		ret = dmabuf->ops->attach(dmabuf, attach);
929 		if (ret)
930 			goto err_attach;
931 	}
932 	dma_resv_lock(dmabuf->resv, NULL);
933 	list_add(&attach->node, &dmabuf->attachments);
934 	dma_resv_unlock(dmabuf->resv);
935 
936 	/* When either the importer or the exporter can't handle dynamic
937 	 * mappings we cache the mapping here to avoid issues with the
938 	 * reservation object lock.
939 	 */
940 	if (dma_buf_attachment_is_dynamic(attach) !=
941 	    dma_buf_is_dynamic(dmabuf)) {
942 		struct sg_table *sgt;
943 
944 		dma_resv_lock(attach->dmabuf->resv, NULL);
945 		if (dma_buf_is_dynamic(attach->dmabuf)) {
946 			ret = dmabuf->ops->pin(attach);
947 			if (ret)
948 				goto err_unlock;
949 		}
950 
951 		sgt = __map_dma_buf(attach, DMA_BIDIRECTIONAL);
952 		if (!sgt)
953 			sgt = ERR_PTR(-ENOMEM);
954 		if (IS_ERR(sgt)) {
955 			ret = PTR_ERR(sgt);
956 			goto err_unpin;
957 		}
958 		dma_resv_unlock(attach->dmabuf->resv);
959 		attach->sgt = sgt;
960 		attach->dir = DMA_BIDIRECTIONAL;
961 	}
962 
963 	return attach;
964 
965 err_attach:
966 	kfree(attach);
967 	return ERR_PTR(ret);
968 
969 err_unpin:
970 	if (dma_buf_is_dynamic(attach->dmabuf))
971 		dmabuf->ops->unpin(attach);
972 
973 err_unlock:
974 	dma_resv_unlock(attach->dmabuf->resv);
975 
976 	dma_buf_detach(dmabuf, attach);
977 	return ERR_PTR(ret);
978 }
979 EXPORT_SYMBOL_NS_GPL(dma_buf_dynamic_attach, DMA_BUF);
980 
981 /**
982  * dma_buf_attach - Wrapper for dma_buf_dynamic_attach
983  * @dmabuf:	[in]	buffer to attach device to.
984  * @dev:	[in]	device to be attached.
985  *
986  * Wrapper to call dma_buf_dynamic_attach() for drivers which still use a static
987  * mapping.
988  */
dma_buf_attach(struct dma_buf * dmabuf,struct device * dev)989 struct dma_buf_attachment *dma_buf_attach(struct dma_buf *dmabuf,
990 					  struct device *dev)
991 {
992 	return dma_buf_dynamic_attach(dmabuf, dev, NULL, NULL);
993 }
994 EXPORT_SYMBOL_NS_GPL(dma_buf_attach, DMA_BUF);
995 
__unmap_dma_buf(struct dma_buf_attachment * attach,struct sg_table * sg_table,enum dma_data_direction direction)996 static void __unmap_dma_buf(struct dma_buf_attachment *attach,
997 			    struct sg_table *sg_table,
998 			    enum dma_data_direction direction)
999 {
1000 	/* uses XOR, hence this unmangles */
1001 	mangle_sg_table(sg_table);
1002 
1003 	attach->dmabuf->ops->unmap_dma_buf(attach, sg_table, direction);
1004 }
1005 
1006 /**
1007  * dma_buf_detach - Remove the given attachment from dmabuf's attachments list
1008  * @dmabuf:	[in]	buffer to detach from.
1009  * @attach:	[in]	attachment to be detached; is free'd after this call.
1010  *
1011  * Clean up a device attachment obtained by calling dma_buf_attach().
1012  *
1013  * Optionally this calls &dma_buf_ops.detach for device-specific detach.
1014  */
dma_buf_detach(struct dma_buf * dmabuf,struct dma_buf_attachment * attach)1015 void dma_buf_detach(struct dma_buf *dmabuf, struct dma_buf_attachment *attach)
1016 {
1017 	if (WARN_ON(!dmabuf || !attach || dmabuf != attach->dmabuf))
1018 		return;
1019 
1020 	dma_resv_lock(dmabuf->resv, NULL);
1021 
1022 	if (attach->sgt) {
1023 
1024 		__unmap_dma_buf(attach, attach->sgt, attach->dir);
1025 
1026 		if (dma_buf_is_dynamic(attach->dmabuf))
1027 			dmabuf->ops->unpin(attach);
1028 	}
1029 	list_del(&attach->node);
1030 
1031 	dma_resv_unlock(dmabuf->resv);
1032 
1033 	if (dmabuf->ops->detach)
1034 		dmabuf->ops->detach(dmabuf, attach);
1035 
1036 	kfree(attach);
1037 }
1038 EXPORT_SYMBOL_NS_GPL(dma_buf_detach, DMA_BUF);
1039 
1040 /**
1041  * dma_buf_pin - Lock down the DMA-buf
1042  * @attach:	[in]	attachment which should be pinned
1043  *
1044  * Only dynamic importers (who set up @attach with dma_buf_dynamic_attach()) may
1045  * call this, and only for limited use cases like scanout and not for temporary
1046  * pin operations. It is not permitted to allow userspace to pin arbitrary
1047  * amounts of buffers through this interface.
1048  *
1049  * Buffers must be unpinned by calling dma_buf_unpin().
1050  *
1051  * Returns:
1052  * 0 on success, negative error code on failure.
1053  */
dma_buf_pin(struct dma_buf_attachment * attach)1054 int dma_buf_pin(struct dma_buf_attachment *attach)
1055 {
1056 	struct dma_buf *dmabuf = attach->dmabuf;
1057 	int ret = 0;
1058 
1059 	WARN_ON(!dma_buf_attachment_is_dynamic(attach));
1060 
1061 	dma_resv_assert_held(dmabuf->resv);
1062 
1063 	if (dmabuf->ops->pin)
1064 		ret = dmabuf->ops->pin(attach);
1065 
1066 	return ret;
1067 }
1068 EXPORT_SYMBOL_NS_GPL(dma_buf_pin, DMA_BUF);
1069 
1070 /**
1071  * dma_buf_unpin - Unpin a DMA-buf
1072  * @attach:	[in]	attachment which should be unpinned
1073  *
1074  * This unpins a buffer pinned by dma_buf_pin() and allows the exporter to move
1075  * any mapping of @attach again and inform the importer through
1076  * &dma_buf_attach_ops.move_notify.
1077  */
dma_buf_unpin(struct dma_buf_attachment * attach)1078 void dma_buf_unpin(struct dma_buf_attachment *attach)
1079 {
1080 	struct dma_buf *dmabuf = attach->dmabuf;
1081 
1082 	WARN_ON(!dma_buf_attachment_is_dynamic(attach));
1083 
1084 	dma_resv_assert_held(dmabuf->resv);
1085 
1086 	if (dmabuf->ops->unpin)
1087 		dmabuf->ops->unpin(attach);
1088 }
1089 EXPORT_SYMBOL_NS_GPL(dma_buf_unpin, DMA_BUF);
1090 
1091 /**
1092  * dma_buf_map_attachment - Returns the scatterlist table of the attachment;
1093  * mapped into _device_ address space. Is a wrapper for map_dma_buf() of the
1094  * dma_buf_ops.
1095  * @attach:	[in]	attachment whose scatterlist is to be returned
1096  * @direction:	[in]	direction of DMA transfer
1097  *
1098  * Returns sg_table containing the scatterlist to be returned; returns ERR_PTR
1099  * on error. May return -EINTR if it is interrupted by a signal.
1100  *
1101  * On success, the DMA addresses and lengths in the returned scatterlist are
1102  * PAGE_SIZE aligned.
1103  *
1104  * A mapping must be unmapped by using dma_buf_unmap_attachment(). Note that
1105  * the underlying backing storage is pinned for as long as a mapping exists,
1106  * therefore users/importers should not hold onto a mapping for undue amounts of
1107  * time.
1108  *
1109  * Important: Dynamic importers must wait for the exclusive fence of the struct
1110  * dma_resv attached to the DMA-BUF first.
1111  */
dma_buf_map_attachment(struct dma_buf_attachment * attach,enum dma_data_direction direction)1112 struct sg_table *dma_buf_map_attachment(struct dma_buf_attachment *attach,
1113 					enum dma_data_direction direction)
1114 {
1115 	struct sg_table *sg_table;
1116 	int r;
1117 
1118 	might_sleep();
1119 
1120 	if (WARN_ON(!attach || !attach->dmabuf))
1121 		return ERR_PTR(-EINVAL);
1122 
1123 	dma_resv_assert_held(attach->dmabuf->resv);
1124 
1125 	if (attach->sgt) {
1126 		/*
1127 		 * Two mappings with different directions for the same
1128 		 * attachment are not allowed.
1129 		 */
1130 		if (attach->dir != direction &&
1131 		    attach->dir != DMA_BIDIRECTIONAL)
1132 			return ERR_PTR(-EBUSY);
1133 
1134 		return attach->sgt;
1135 	}
1136 
1137 	if (dma_buf_is_dynamic(attach->dmabuf)) {
1138 		if (!IS_ENABLED(CONFIG_DMABUF_MOVE_NOTIFY)) {
1139 			r = attach->dmabuf->ops->pin(attach);
1140 			if (r)
1141 				return ERR_PTR(r);
1142 		}
1143 	}
1144 
1145 	sg_table = __map_dma_buf(attach, direction);
1146 	if (!sg_table)
1147 		sg_table = ERR_PTR(-ENOMEM);
1148 
1149 	if (IS_ERR(sg_table) && dma_buf_is_dynamic(attach->dmabuf) &&
1150 	     !IS_ENABLED(CONFIG_DMABUF_MOVE_NOTIFY))
1151 		attach->dmabuf->ops->unpin(attach);
1152 
1153 	if (!IS_ERR(sg_table) && attach->dmabuf->ops->cache_sgt_mapping) {
1154 		attach->sgt = sg_table;
1155 		attach->dir = direction;
1156 	}
1157 
1158 #ifdef CONFIG_DMA_API_DEBUG
1159 	if (!IS_ERR(sg_table)) {
1160 		struct scatterlist *sg;
1161 		u64 addr;
1162 		int len;
1163 		int i;
1164 
1165 		for_each_sgtable_dma_sg(sg_table, sg, i) {
1166 			addr = sg_dma_address(sg);
1167 			len = sg_dma_len(sg);
1168 			if (!PAGE_ALIGNED(addr) || !PAGE_ALIGNED(len)) {
1169 				pr_debug("%s: addr %llx or len %x is not page aligned!\n",
1170 					 __func__, addr, len);
1171 			}
1172 		}
1173 	}
1174 #endif /* CONFIG_DMA_API_DEBUG */
1175 	return sg_table;
1176 }
1177 EXPORT_SYMBOL_NS_GPL(dma_buf_map_attachment, DMA_BUF);
1178 
1179 /**
1180  * dma_buf_map_attachment_unlocked - Returns the scatterlist table of the attachment;
1181  * mapped into _device_ address space. Is a wrapper for map_dma_buf() of the
1182  * dma_buf_ops.
1183  * @attach:	[in]	attachment whose scatterlist is to be returned
1184  * @direction:	[in]	direction of DMA transfer
1185  *
1186  * Unlocked variant of dma_buf_map_attachment().
1187  */
1188 struct sg_table *
dma_buf_map_attachment_unlocked(struct dma_buf_attachment * attach,enum dma_data_direction direction)1189 dma_buf_map_attachment_unlocked(struct dma_buf_attachment *attach,
1190 				enum dma_data_direction direction)
1191 {
1192 	struct sg_table *sg_table;
1193 
1194 	might_sleep();
1195 
1196 	if (WARN_ON(!attach || !attach->dmabuf))
1197 		return ERR_PTR(-EINVAL);
1198 
1199 	dma_resv_lock(attach->dmabuf->resv, NULL);
1200 	sg_table = dma_buf_map_attachment(attach, direction);
1201 	dma_resv_unlock(attach->dmabuf->resv);
1202 
1203 	return sg_table;
1204 }
1205 EXPORT_SYMBOL_NS_GPL(dma_buf_map_attachment_unlocked, DMA_BUF);
1206 
1207 /**
1208  * dma_buf_unmap_attachment - unmaps and decreases usecount of the buffer;might
1209  * deallocate the scatterlist associated. Is a wrapper for unmap_dma_buf() of
1210  * dma_buf_ops.
1211  * @attach:	[in]	attachment to unmap buffer from
1212  * @sg_table:	[in]	scatterlist info of the buffer to unmap
1213  * @direction:  [in]    direction of DMA transfer
1214  *
1215  * This unmaps a DMA mapping for @attached obtained by dma_buf_map_attachment().
1216  */
dma_buf_unmap_attachment(struct dma_buf_attachment * attach,struct sg_table * sg_table,enum dma_data_direction direction)1217 void dma_buf_unmap_attachment(struct dma_buf_attachment *attach,
1218 				struct sg_table *sg_table,
1219 				enum dma_data_direction direction)
1220 {
1221 	might_sleep();
1222 
1223 	if (WARN_ON(!attach || !attach->dmabuf || !sg_table))
1224 		return;
1225 
1226 	dma_resv_assert_held(attach->dmabuf->resv);
1227 
1228 	if (attach->sgt == sg_table)
1229 		return;
1230 
1231 	__unmap_dma_buf(attach, sg_table, direction);
1232 
1233 	if (dma_buf_is_dynamic(attach->dmabuf) &&
1234 	    !IS_ENABLED(CONFIG_DMABUF_MOVE_NOTIFY))
1235 		dma_buf_unpin(attach);
1236 }
1237 EXPORT_SYMBOL_NS_GPL(dma_buf_unmap_attachment, DMA_BUF);
1238 
1239 /**
1240  * dma_buf_unmap_attachment_unlocked - unmaps and decreases usecount of the buffer;might
1241  * deallocate the scatterlist associated. Is a wrapper for unmap_dma_buf() of
1242  * dma_buf_ops.
1243  * @attach:	[in]	attachment to unmap buffer from
1244  * @sg_table:	[in]	scatterlist info of the buffer to unmap
1245  * @direction:	[in]	direction of DMA transfer
1246  *
1247  * Unlocked variant of dma_buf_unmap_attachment().
1248  */
dma_buf_unmap_attachment_unlocked(struct dma_buf_attachment * attach,struct sg_table * sg_table,enum dma_data_direction direction)1249 void dma_buf_unmap_attachment_unlocked(struct dma_buf_attachment *attach,
1250 				       struct sg_table *sg_table,
1251 				       enum dma_data_direction direction)
1252 {
1253 	might_sleep();
1254 
1255 	if (WARN_ON(!attach || !attach->dmabuf || !sg_table))
1256 		return;
1257 
1258 	dma_resv_lock(attach->dmabuf->resv, NULL);
1259 	dma_buf_unmap_attachment(attach, sg_table, direction);
1260 	dma_resv_unlock(attach->dmabuf->resv);
1261 }
1262 EXPORT_SYMBOL_NS_GPL(dma_buf_unmap_attachment_unlocked, DMA_BUF);
1263 
1264 /**
1265  * dma_buf_move_notify - notify attachments that DMA-buf is moving
1266  *
1267  * @dmabuf:	[in]	buffer which is moving
1268  *
1269  * Informs all attachments that they need to destroy and recreate all their
1270  * mappings.
1271  */
dma_buf_move_notify(struct dma_buf * dmabuf)1272 void dma_buf_move_notify(struct dma_buf *dmabuf)
1273 {
1274 	struct dma_buf_attachment *attach;
1275 
1276 	dma_resv_assert_held(dmabuf->resv);
1277 
1278 	list_for_each_entry(attach, &dmabuf->attachments, node)
1279 		if (attach->importer_ops)
1280 			attach->importer_ops->move_notify(attach);
1281 }
1282 EXPORT_SYMBOL_NS_GPL(dma_buf_move_notify, DMA_BUF);
1283 
1284 /**
1285  * DOC: cpu access
1286  *
1287  * There are multiple reasons for supporting CPU access to a dma buffer object:
1288  *
1289  * - Fallback operations in the kernel, for example when a device is connected
1290  *   over USB and the kernel needs to shuffle the data around first before
1291  *   sending it away. Cache coherency is handled by bracketing any transactions
1292  *   with calls to dma_buf_begin_cpu_access() and dma_buf_end_cpu_access()
1293  *   access.
1294  *
1295  *   Since for most kernel internal dma-buf accesses need the entire buffer, a
1296  *   vmap interface is introduced. Note that on very old 32-bit architectures
1297  *   vmalloc space might be limited and result in vmap calls failing.
1298  *
1299  *   Interfaces::
1300  *
1301  *      void \*dma_buf_vmap(struct dma_buf \*dmabuf, struct iosys_map \*map)
1302  *      void dma_buf_vunmap(struct dma_buf \*dmabuf, struct iosys_map \*map)
1303  *
1304  *   The vmap call can fail if there is no vmap support in the exporter, or if
1305  *   it runs out of vmalloc space. Note that the dma-buf layer keeps a reference
1306  *   count for all vmap access and calls down into the exporter's vmap function
1307  *   only when no vmapping exists, and only unmaps it once. Protection against
1308  *   concurrent vmap/vunmap calls is provided by taking the &dma_buf.lock mutex.
1309  *
1310  * - For full compatibility on the importer side with existing userspace
1311  *   interfaces, which might already support mmap'ing buffers. This is needed in
1312  *   many processing pipelines (e.g. feeding a software rendered image into a
1313  *   hardware pipeline, thumbnail creation, snapshots, ...). Also, Android's ION
1314  *   framework already supported this and for DMA buffer file descriptors to
1315  *   replace ION buffers mmap support was needed.
1316  *
1317  *   There is no special interfaces, userspace simply calls mmap on the dma-buf
1318  *   fd. But like for CPU access there's a need to bracket the actual access,
1319  *   which is handled by the ioctl (DMA_BUF_IOCTL_SYNC). Note that
1320  *   DMA_BUF_IOCTL_SYNC can fail with -EAGAIN or -EINTR, in which case it must
1321  *   be restarted.
1322  *
1323  *   Some systems might need some sort of cache coherency management e.g. when
1324  *   CPU and GPU domains are being accessed through dma-buf at the same time.
1325  *   To circumvent this problem there are begin/end coherency markers, that
1326  *   forward directly to existing dma-buf device drivers vfunc hooks. Userspace
1327  *   can make use of those markers through the DMA_BUF_IOCTL_SYNC ioctl. The
1328  *   sequence would be used like following:
1329  *
1330  *     - mmap dma-buf fd
1331  *     - for each drawing/upload cycle in CPU 1. SYNC_START ioctl, 2. read/write
1332  *       to mmap area 3. SYNC_END ioctl. This can be repeated as often as you
1333  *       want (with the new data being consumed by say the GPU or the scanout
1334  *       device)
1335  *     - munmap once you don't need the buffer any more
1336  *
1337  *    For correctness and optimal performance, it is always required to use
1338  *    SYNC_START and SYNC_END before and after, respectively, when accessing the
1339  *    mapped address. Userspace cannot rely on coherent access, even when there
1340  *    are systems where it just works without calling these ioctls.
1341  *
1342  * - And as a CPU fallback in userspace processing pipelines.
1343  *
1344  *   Similar to the motivation for kernel cpu access it is again important that
1345  *   the userspace code of a given importing subsystem can use the same
1346  *   interfaces with a imported dma-buf buffer object as with a native buffer
1347  *   object. This is especially important for drm where the userspace part of
1348  *   contemporary OpenGL, X, and other drivers is huge, and reworking them to
1349  *   use a different way to mmap a buffer rather invasive.
1350  *
1351  *   The assumption in the current dma-buf interfaces is that redirecting the
1352  *   initial mmap is all that's needed. A survey of some of the existing
1353  *   subsystems shows that no driver seems to do any nefarious thing like
1354  *   syncing up with outstanding asynchronous processing on the device or
1355  *   allocating special resources at fault time. So hopefully this is good
1356  *   enough, since adding interfaces to intercept pagefaults and allow pte
1357  *   shootdowns would increase the complexity quite a bit.
1358  *
1359  *   Interface::
1360  *
1361  *      int dma_buf_mmap(struct dma_buf \*, struct vm_area_struct \*,
1362  *		       unsigned long);
1363  *
1364  *   If the importing subsystem simply provides a special-purpose mmap call to
1365  *   set up a mapping in userspace, calling do_mmap with &dma_buf.file will
1366  *   equally achieve that for a dma-buf object.
1367  */
1368 
__dma_buf_begin_cpu_access(struct dma_buf * dmabuf,enum dma_data_direction direction)1369 static int __dma_buf_begin_cpu_access(struct dma_buf *dmabuf,
1370 				      enum dma_data_direction direction)
1371 {
1372 	bool write = (direction == DMA_BIDIRECTIONAL ||
1373 		      direction == DMA_TO_DEVICE);
1374 	struct dma_resv *resv = dmabuf->resv;
1375 	long ret;
1376 
1377 	/* Wait on any implicit rendering fences */
1378 	ret = dma_resv_wait_timeout(resv, dma_resv_usage_rw(write),
1379 				    true, MAX_SCHEDULE_TIMEOUT);
1380 	if (ret < 0)
1381 		return ret;
1382 
1383 	return 0;
1384 }
1385 
1386 /**
1387  * dma_buf_begin_cpu_access - Must be called before accessing a dma_buf from the
1388  * cpu in the kernel context. Calls begin_cpu_access to allow exporter-specific
1389  * preparations. Coherency is only guaranteed in the specified range for the
1390  * specified access direction.
1391  * @dmabuf:	[in]	buffer to prepare cpu access for.
1392  * @direction:	[in]	direction of access.
1393  *
1394  * After the cpu access is complete the caller should call
1395  * dma_buf_end_cpu_access(). Only when cpu access is bracketed by both calls is
1396  * it guaranteed to be coherent with other DMA access.
1397  *
1398  * This function will also wait for any DMA transactions tracked through
1399  * implicit synchronization in &dma_buf.resv. For DMA transactions with explicit
1400  * synchronization this function will only ensure cache coherency, callers must
1401  * ensure synchronization with such DMA transactions on their own.
1402  *
1403  * Can return negative error values, returns 0 on success.
1404  */
dma_buf_begin_cpu_access(struct dma_buf * dmabuf,enum dma_data_direction direction)1405 int dma_buf_begin_cpu_access(struct dma_buf *dmabuf,
1406 			     enum dma_data_direction direction)
1407 {
1408 	int ret = 0;
1409 
1410 	if (WARN_ON(!dmabuf))
1411 		return -EINVAL;
1412 
1413 	might_lock(&dmabuf->resv->lock.base);
1414 
1415 	if (dmabuf->ops->begin_cpu_access)
1416 		ret = dmabuf->ops->begin_cpu_access(dmabuf, direction);
1417 
1418 	/* Ensure that all fences are waited upon - but we first allow
1419 	 * the native handler the chance to do so more efficiently if it
1420 	 * chooses. A double invocation here will be reasonably cheap no-op.
1421 	 */
1422 	if (ret == 0)
1423 		ret = __dma_buf_begin_cpu_access(dmabuf, direction);
1424 
1425 	return ret;
1426 }
1427 EXPORT_SYMBOL_NS_GPL(dma_buf_begin_cpu_access, DMA_BUF);
1428 
1429 /**
1430  * dma_buf_end_cpu_access - Must be called after accessing a dma_buf from the
1431  * cpu in the kernel context. Calls end_cpu_access to allow exporter-specific
1432  * actions. Coherency is only guaranteed in the specified range for the
1433  * specified access direction.
1434  * @dmabuf:	[in]	buffer to complete cpu access for.
1435  * @direction:	[in]	direction of access.
1436  *
1437  * This terminates CPU access started with dma_buf_begin_cpu_access().
1438  *
1439  * Can return negative error values, returns 0 on success.
1440  */
dma_buf_end_cpu_access(struct dma_buf * dmabuf,enum dma_data_direction direction)1441 int dma_buf_end_cpu_access(struct dma_buf *dmabuf,
1442 			   enum dma_data_direction direction)
1443 {
1444 	int ret = 0;
1445 
1446 	WARN_ON(!dmabuf);
1447 
1448 	might_lock(&dmabuf->resv->lock.base);
1449 
1450 	if (dmabuf->ops->end_cpu_access)
1451 		ret = dmabuf->ops->end_cpu_access(dmabuf, direction);
1452 
1453 	return ret;
1454 }
1455 EXPORT_SYMBOL_NS_GPL(dma_buf_end_cpu_access, DMA_BUF);
1456 
1457 
1458 /**
1459  * dma_buf_mmap - Setup up a userspace mmap with the given vma
1460  * @dmabuf:	[in]	buffer that should back the vma
1461  * @vma:	[in]	vma for the mmap
1462  * @pgoff:	[in]	offset in pages where this mmap should start within the
1463  *			dma-buf buffer.
1464  *
1465  * This function adjusts the passed in vma so that it points at the file of the
1466  * dma_buf operation. It also adjusts the starting pgoff and does bounds
1467  * checking on the size of the vma. Then it calls the exporters mmap function to
1468  * set up the mapping.
1469  *
1470  * Can return negative error values, returns 0 on success.
1471  */
dma_buf_mmap(struct dma_buf * dmabuf,struct vm_area_struct * vma,unsigned long pgoff)1472 int dma_buf_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma,
1473 		 unsigned long pgoff)
1474 {
1475 	if (WARN_ON(!dmabuf || !vma))
1476 		return -EINVAL;
1477 
1478 	/* check if buffer supports mmap */
1479 	if (!dmabuf->ops->mmap)
1480 		return -EINVAL;
1481 
1482 	/* check for offset overflow */
1483 	if (pgoff + vma_pages(vma) < pgoff)
1484 		return -EOVERFLOW;
1485 
1486 	/* check for overflowing the buffer's size */
1487 	if (pgoff + vma_pages(vma) >
1488 	    dmabuf->size >> PAGE_SHIFT)
1489 		return -EINVAL;
1490 
1491 	/* readjust the vma */
1492 	vma_set_file(vma, dmabuf->file);
1493 	vma->vm_pgoff = pgoff;
1494 
1495 	return dmabuf->ops->mmap(dmabuf, vma);
1496 }
1497 EXPORT_SYMBOL_NS_GPL(dma_buf_mmap, DMA_BUF);
1498 
1499 /**
1500  * dma_buf_vmap - Create virtual mapping for the buffer object into kernel
1501  * address space. Same restrictions as for vmap and friends apply.
1502  * @dmabuf:	[in]	buffer to vmap
1503  * @map:	[out]	returns the vmap pointer
1504  *
1505  * This call may fail due to lack of virtual mapping address space.
1506  * These calls are optional in drivers. The intended use for them
1507  * is for mapping objects linear in kernel space for high use objects.
1508  *
1509  * To ensure coherency users must call dma_buf_begin_cpu_access() and
1510  * dma_buf_end_cpu_access() around any cpu access performed through this
1511  * mapping.
1512  *
1513  * Returns 0 on success, or a negative errno code otherwise.
1514  */
dma_buf_vmap(struct dma_buf * dmabuf,struct iosys_map * map)1515 int dma_buf_vmap(struct dma_buf *dmabuf, struct iosys_map *map)
1516 {
1517 	struct iosys_map ptr;
1518 	int ret;
1519 
1520 	iosys_map_clear(map);
1521 
1522 	if (WARN_ON(!dmabuf))
1523 		return -EINVAL;
1524 
1525 	dma_resv_assert_held(dmabuf->resv);
1526 
1527 	if (!dmabuf->ops->vmap)
1528 		return -EINVAL;
1529 
1530 	if (dmabuf->vmapping_counter) {
1531 		dmabuf->vmapping_counter++;
1532 		BUG_ON(iosys_map_is_null(&dmabuf->vmap_ptr));
1533 		*map = dmabuf->vmap_ptr;
1534 		return 0;
1535 	}
1536 
1537 	BUG_ON(iosys_map_is_set(&dmabuf->vmap_ptr));
1538 
1539 	ret = dmabuf->ops->vmap(dmabuf, &ptr);
1540 	if (WARN_ON_ONCE(ret))
1541 		return ret;
1542 
1543 	dmabuf->vmap_ptr = ptr;
1544 	dmabuf->vmapping_counter = 1;
1545 
1546 	*map = dmabuf->vmap_ptr;
1547 
1548 	return 0;
1549 }
1550 EXPORT_SYMBOL_NS_GPL(dma_buf_vmap, DMA_BUF);
1551 
1552 /**
1553  * dma_buf_vmap_unlocked - Create virtual mapping for the buffer object into kernel
1554  * address space. Same restrictions as for vmap and friends apply.
1555  * @dmabuf:	[in]	buffer to vmap
1556  * @map:	[out]	returns the vmap pointer
1557  *
1558  * Unlocked version of dma_buf_vmap()
1559  *
1560  * Returns 0 on success, or a negative errno code otherwise.
1561  */
dma_buf_vmap_unlocked(struct dma_buf * dmabuf,struct iosys_map * map)1562 int dma_buf_vmap_unlocked(struct dma_buf *dmabuf, struct iosys_map *map)
1563 {
1564 	int ret;
1565 
1566 	iosys_map_clear(map);
1567 
1568 	if (WARN_ON(!dmabuf))
1569 		return -EINVAL;
1570 
1571 	dma_resv_lock(dmabuf->resv, NULL);
1572 	ret = dma_buf_vmap(dmabuf, map);
1573 	dma_resv_unlock(dmabuf->resv);
1574 
1575 	return ret;
1576 }
1577 EXPORT_SYMBOL_NS_GPL(dma_buf_vmap_unlocked, DMA_BUF);
1578 
1579 /**
1580  * dma_buf_vunmap - Unmap a vmap obtained by dma_buf_vmap.
1581  * @dmabuf:	[in]	buffer to vunmap
1582  * @map:	[in]	vmap pointer to vunmap
1583  */
dma_buf_vunmap(struct dma_buf * dmabuf,struct iosys_map * map)1584 void dma_buf_vunmap(struct dma_buf *dmabuf, struct iosys_map *map)
1585 {
1586 	if (WARN_ON(!dmabuf))
1587 		return;
1588 
1589 	dma_resv_assert_held(dmabuf->resv);
1590 
1591 	BUG_ON(iosys_map_is_null(&dmabuf->vmap_ptr));
1592 	BUG_ON(dmabuf->vmapping_counter == 0);
1593 	BUG_ON(!iosys_map_is_equal(&dmabuf->vmap_ptr, map));
1594 
1595 	if (--dmabuf->vmapping_counter == 0) {
1596 		if (dmabuf->ops->vunmap)
1597 			dmabuf->ops->vunmap(dmabuf, map);
1598 		iosys_map_clear(&dmabuf->vmap_ptr);
1599 	}
1600 }
1601 EXPORT_SYMBOL_NS_GPL(dma_buf_vunmap, DMA_BUF);
1602 
1603 /**
1604  * dma_buf_vunmap_unlocked - Unmap a vmap obtained by dma_buf_vmap.
1605  * @dmabuf:	[in]	buffer to vunmap
1606  * @map:	[in]	vmap pointer to vunmap
1607  */
dma_buf_vunmap_unlocked(struct dma_buf * dmabuf,struct iosys_map * map)1608 void dma_buf_vunmap_unlocked(struct dma_buf *dmabuf, struct iosys_map *map)
1609 {
1610 	if (WARN_ON(!dmabuf))
1611 		return;
1612 
1613 	dma_resv_lock(dmabuf->resv, NULL);
1614 	dma_buf_vunmap(dmabuf, map);
1615 	dma_resv_unlock(dmabuf->resv);
1616 }
1617 EXPORT_SYMBOL_NS_GPL(dma_buf_vunmap_unlocked, DMA_BUF);
1618 
1619 #ifdef CONFIG_DEBUG_FS
dma_buf_debug_show(struct seq_file * s,void * unused)1620 static int dma_buf_debug_show(struct seq_file *s, void *unused)
1621 {
1622 	struct dma_buf *buf_obj;
1623 	struct dma_buf_attachment *attach_obj;
1624 	int count = 0, attach_count;
1625 	size_t size = 0;
1626 	int ret;
1627 
1628 	ret = mutex_lock_interruptible(&debugfs_list_mutex);
1629 
1630 	if (ret)
1631 		return ret;
1632 
1633 	seq_puts(s, "\nDma-buf Objects:\n");
1634 	seq_printf(s, "%-8s\t%-8s\t%-8s\t%-8s\texp_name\t%-8s\tname\n",
1635 		   "size", "flags", "mode", "count", "ino");
1636 
1637 	list_for_each_entry(buf_obj, &debugfs_list, list_node) {
1638 
1639 		ret = dma_resv_lock_interruptible(buf_obj->resv, NULL);
1640 		if (ret)
1641 			goto error_unlock;
1642 
1643 
1644 		spin_lock(&buf_obj->name_lock);
1645 		seq_printf(s, "%08zu\t%08x\t%08x\t%08ld\t%s\t%08lu\t%s\n",
1646 				buf_obj->size,
1647 				buf_obj->file->f_flags, buf_obj->file->f_mode,
1648 				file_count(buf_obj->file),
1649 				buf_obj->exp_name,
1650 				file_inode(buf_obj->file)->i_ino,
1651 				buf_obj->name ?: "<none>");
1652 		spin_unlock(&buf_obj->name_lock);
1653 
1654 		dma_resv_describe(buf_obj->resv, s);
1655 
1656 		seq_puts(s, "\tAttached Devices:\n");
1657 		attach_count = 0;
1658 
1659 		list_for_each_entry(attach_obj, &buf_obj->attachments, node) {
1660 			seq_printf(s, "\t%s\n", dev_name(attach_obj->dev));
1661 			attach_count++;
1662 		}
1663 		dma_resv_unlock(buf_obj->resv);
1664 
1665 		seq_printf(s, "Total %d devices attached\n\n",
1666 				attach_count);
1667 
1668 		count++;
1669 		size += buf_obj->size;
1670 	}
1671 
1672 	seq_printf(s, "\nTotal %d objects, %zu bytes\n", count, size);
1673 
1674 	mutex_unlock(&debugfs_list_mutex);
1675 	return 0;
1676 
1677 error_unlock:
1678 	mutex_unlock(&debugfs_list_mutex);
1679 	return ret;
1680 }
1681 
1682 DEFINE_SHOW_ATTRIBUTE(dma_buf_debug);
1683 
1684 static struct dentry *dma_buf_debugfs_dir;
1685 
dma_buf_init_debugfs(void)1686 static int dma_buf_init_debugfs(void)
1687 {
1688 	struct dentry *d;
1689 	int err = 0;
1690 
1691 	d = debugfs_create_dir("dma_buf", NULL);
1692 	if (IS_ERR(d))
1693 		return PTR_ERR(d);
1694 
1695 	dma_buf_debugfs_dir = d;
1696 
1697 	d = debugfs_create_file("bufinfo", S_IRUGO, dma_buf_debugfs_dir,
1698 				NULL, &dma_buf_debug_fops);
1699 	if (IS_ERR(d)) {
1700 		pr_debug("dma_buf: debugfs: failed to create node bufinfo\n");
1701 		debugfs_remove_recursive(dma_buf_debugfs_dir);
1702 		dma_buf_debugfs_dir = NULL;
1703 		err = PTR_ERR(d);
1704 	}
1705 
1706 	return err;
1707 }
1708 
dma_buf_uninit_debugfs(void)1709 static void dma_buf_uninit_debugfs(void)
1710 {
1711 	debugfs_remove_recursive(dma_buf_debugfs_dir);
1712 }
1713 #else
dma_buf_init_debugfs(void)1714 static inline int dma_buf_init_debugfs(void)
1715 {
1716 	return 0;
1717 }
dma_buf_uninit_debugfs(void)1718 static inline void dma_buf_uninit_debugfs(void)
1719 {
1720 }
1721 #endif
1722 
dma_buf_init(void)1723 static int __init dma_buf_init(void)
1724 {
1725 	int ret;
1726 
1727 	ret = dma_buf_init_sysfs_statistics();
1728 	if (ret)
1729 		return ret;
1730 
1731 	dma_buf_mnt = kern_mount(&dma_buf_fs_type);
1732 	if (IS_ERR(dma_buf_mnt))
1733 		return PTR_ERR(dma_buf_mnt);
1734 
1735 	dma_buf_init_debugfs();
1736 	return 0;
1737 }
1738 subsys_initcall(dma_buf_init);
1739 
dma_buf_deinit(void)1740 static void __exit dma_buf_deinit(void)
1741 {
1742 	dma_buf_uninit_debugfs();
1743 	kern_unmount(dma_buf_mnt);
1744 	dma_buf_uninit_sysfs_statistics();
1745 }
1746 __exitcall(dma_buf_deinit);
1747