xref: /linux/drivers/dma-buf/dma-buf.c (revision af215c980c1fbf1ca01675b128b0dd194745b880)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Framework for buffer objects that can be shared across devices/subsystems.
4  *
5  * Copyright(C) 2011 Linaro Limited. All rights reserved.
6  * Author: Sumit Semwal <sumit.semwal@ti.com>
7  *
8  * Many thanks to linaro-mm-sig list, and specially
9  * Arnd Bergmann <arnd@arndb.de>, Rob Clark <rob@ti.com> and
10  * Daniel Vetter <daniel@ffwll.ch> for their support in creation and
11  * refining of this idea.
12  */
13 
14 #include <linux/fs.h>
15 #include <linux/slab.h>
16 #include <linux/dma-buf.h>
17 #include <linux/dma-fence.h>
18 #include <linux/dma-fence-unwrap.h>
19 #include <linux/anon_inodes.h>
20 #include <linux/export.h>
21 #include <linux/debugfs.h>
22 #include <linux/module.h>
23 #include <linux/seq_file.h>
24 #include <linux/sync_file.h>
25 #include <linux/poll.h>
26 #include <linux/dma-resv.h>
27 #include <linux/mm.h>
28 #include <linux/mount.h>
29 #include <linux/pseudo_fs.h>
30 
31 #include <uapi/linux/dma-buf.h>
32 #include <uapi/linux/magic.h>
33 
34 #include "dma-buf-sysfs-stats.h"
35 
36 static inline int is_dma_buf_file(struct file *);
37 
38 #if IS_ENABLED(CONFIG_DEBUG_FS)
39 static DEFINE_MUTEX(debugfs_list_mutex);
40 static LIST_HEAD(debugfs_list);
41 
__dma_buf_debugfs_list_add(struct dma_buf * dmabuf)42 static void __dma_buf_debugfs_list_add(struct dma_buf *dmabuf)
43 {
44 	mutex_lock(&debugfs_list_mutex);
45 	list_add(&dmabuf->list_node, &debugfs_list);
46 	mutex_unlock(&debugfs_list_mutex);
47 }
48 
__dma_buf_debugfs_list_del(struct dma_buf * dmabuf)49 static void __dma_buf_debugfs_list_del(struct dma_buf *dmabuf)
50 {
51 	if (!dmabuf)
52 		return;
53 
54 	mutex_lock(&debugfs_list_mutex);
55 	list_del(&dmabuf->list_node);
56 	mutex_unlock(&debugfs_list_mutex);
57 }
58 #else
__dma_buf_debugfs_list_add(struct dma_buf * dmabuf)59 static void __dma_buf_debugfs_list_add(struct dma_buf *dmabuf)
60 {
61 }
62 
__dma_buf_debugfs_list_del(struct dma_buf * dmabuf)63 static void __dma_buf_debugfs_list_del(struct dma_buf *dmabuf)
64 {
65 }
66 #endif
67 
dmabuffs_dname(struct dentry * dentry,char * buffer,int buflen)68 static char *dmabuffs_dname(struct dentry *dentry, char *buffer, int buflen)
69 {
70 	struct dma_buf *dmabuf;
71 	char name[DMA_BUF_NAME_LEN];
72 	ssize_t ret = 0;
73 
74 	dmabuf = dentry->d_fsdata;
75 	spin_lock(&dmabuf->name_lock);
76 	if (dmabuf->name)
77 		ret = strscpy(name, dmabuf->name, sizeof(name));
78 	spin_unlock(&dmabuf->name_lock);
79 
80 	return dynamic_dname(buffer, buflen, "/%s:%s",
81 			     dentry->d_name.name, ret > 0 ? name : "");
82 }
83 
dma_buf_release(struct dentry * dentry)84 static void dma_buf_release(struct dentry *dentry)
85 {
86 	struct dma_buf *dmabuf;
87 
88 	dmabuf = dentry->d_fsdata;
89 	if (unlikely(!dmabuf))
90 		return;
91 
92 	BUG_ON(dmabuf->vmapping_counter);
93 
94 	/*
95 	 * If you hit this BUG() it could mean:
96 	 * * There's a file reference imbalance in dma_buf_poll / dma_buf_poll_cb or somewhere else
97 	 * * dmabuf->cb_in/out.active are non-0 despite no pending fence callback
98 	 */
99 	BUG_ON(dmabuf->cb_in.active || dmabuf->cb_out.active);
100 
101 	dma_buf_stats_teardown(dmabuf);
102 	dmabuf->ops->release(dmabuf);
103 
104 	if (dmabuf->resv == (struct dma_resv *)&dmabuf[1])
105 		dma_resv_fini(dmabuf->resv);
106 
107 	WARN_ON(!list_empty(&dmabuf->attachments));
108 	module_put(dmabuf->owner);
109 	kfree(dmabuf->name);
110 	kfree(dmabuf);
111 }
112 
dma_buf_file_release(struct inode * inode,struct file * file)113 static int dma_buf_file_release(struct inode *inode, struct file *file)
114 {
115 	if (!is_dma_buf_file(file))
116 		return -EINVAL;
117 
118 	__dma_buf_debugfs_list_del(file->private_data);
119 
120 	return 0;
121 }
122 
123 static const struct dentry_operations dma_buf_dentry_ops = {
124 	.d_dname = dmabuffs_dname,
125 	.d_release = dma_buf_release,
126 };
127 
128 static struct vfsmount *dma_buf_mnt;
129 
dma_buf_fs_init_context(struct fs_context * fc)130 static int dma_buf_fs_init_context(struct fs_context *fc)
131 {
132 	struct pseudo_fs_context *ctx;
133 
134 	ctx = init_pseudo(fc, DMA_BUF_MAGIC);
135 	if (!ctx)
136 		return -ENOMEM;
137 	ctx->dops = &dma_buf_dentry_ops;
138 	return 0;
139 }
140 
141 static struct file_system_type dma_buf_fs_type = {
142 	.name = "dmabuf",
143 	.init_fs_context = dma_buf_fs_init_context,
144 	.kill_sb = kill_anon_super,
145 };
146 
dma_buf_mmap_internal(struct file * file,struct vm_area_struct * vma)147 static int dma_buf_mmap_internal(struct file *file, struct vm_area_struct *vma)
148 {
149 	struct dma_buf *dmabuf;
150 
151 	if (!is_dma_buf_file(file))
152 		return -EINVAL;
153 
154 	dmabuf = file->private_data;
155 
156 	/* check if buffer supports mmap */
157 	if (!dmabuf->ops->mmap)
158 		return -EINVAL;
159 
160 	/* check for overflowing the buffer's size */
161 	if (vma->vm_pgoff + vma_pages(vma) >
162 	    dmabuf->size >> PAGE_SHIFT)
163 		return -EINVAL;
164 
165 	return dmabuf->ops->mmap(dmabuf, vma);
166 }
167 
dma_buf_llseek(struct file * file,loff_t offset,int whence)168 static loff_t dma_buf_llseek(struct file *file, loff_t offset, int whence)
169 {
170 	struct dma_buf *dmabuf;
171 	loff_t base;
172 
173 	if (!is_dma_buf_file(file))
174 		return -EBADF;
175 
176 	dmabuf = file->private_data;
177 
178 	/* only support discovering the end of the buffer,
179 	 * but also allow SEEK_SET to maintain the idiomatic
180 	 * SEEK_END(0), SEEK_CUR(0) pattern.
181 	 */
182 	if (whence == SEEK_END)
183 		base = dmabuf->size;
184 	else if (whence == SEEK_SET)
185 		base = 0;
186 	else
187 		return -EINVAL;
188 
189 	if (offset != 0)
190 		return -EINVAL;
191 
192 	return base + offset;
193 }
194 
195 /**
196  * DOC: implicit fence polling
197  *
198  * To support cross-device and cross-driver synchronization of buffer access
199  * implicit fences (represented internally in the kernel with &struct dma_fence)
200  * can be attached to a &dma_buf. The glue for that and a few related things are
201  * provided in the &dma_resv structure.
202  *
203  * Userspace can query the state of these implicitly tracked fences using poll()
204  * and related system calls:
205  *
206  * - Checking for EPOLLIN, i.e. read access, can be use to query the state of the
207  *   most recent write or exclusive fence.
208  *
209  * - Checking for EPOLLOUT, i.e. write access, can be used to query the state of
210  *   all attached fences, shared and exclusive ones.
211  *
212  * Note that this only signals the completion of the respective fences, i.e. the
213  * DMA transfers are complete. Cache flushing and any other necessary
214  * preparations before CPU access can begin still need to happen.
215  *
216  * As an alternative to poll(), the set of fences on DMA buffer can be
217  * exported as a &sync_file using &dma_buf_sync_file_export.
218  */
219 
dma_buf_poll_cb(struct dma_fence * fence,struct dma_fence_cb * cb)220 static void dma_buf_poll_cb(struct dma_fence *fence, struct dma_fence_cb *cb)
221 {
222 	struct dma_buf_poll_cb_t *dcb = (struct dma_buf_poll_cb_t *)cb;
223 	struct dma_buf *dmabuf = container_of(dcb->poll, struct dma_buf, poll);
224 	unsigned long flags;
225 
226 	spin_lock_irqsave(&dcb->poll->lock, flags);
227 	wake_up_locked_poll(dcb->poll, dcb->active);
228 	dcb->active = 0;
229 	spin_unlock_irqrestore(&dcb->poll->lock, flags);
230 	dma_fence_put(fence);
231 	/* Paired with get_file in dma_buf_poll */
232 	fput(dmabuf->file);
233 }
234 
dma_buf_poll_add_cb(struct dma_resv * resv,bool write,struct dma_buf_poll_cb_t * dcb)235 static bool dma_buf_poll_add_cb(struct dma_resv *resv, bool write,
236 				struct dma_buf_poll_cb_t *dcb)
237 {
238 	struct dma_resv_iter cursor;
239 	struct dma_fence *fence;
240 	int r;
241 
242 	dma_resv_for_each_fence(&cursor, resv, dma_resv_usage_rw(write),
243 				fence) {
244 		dma_fence_get(fence);
245 		r = dma_fence_add_callback(fence, &dcb->cb, dma_buf_poll_cb);
246 		if (!r)
247 			return true;
248 		dma_fence_put(fence);
249 	}
250 
251 	return false;
252 }
253 
dma_buf_poll(struct file * file,poll_table * poll)254 static __poll_t dma_buf_poll(struct file *file, poll_table *poll)
255 {
256 	struct dma_buf *dmabuf;
257 	struct dma_resv *resv;
258 	__poll_t events;
259 
260 	dmabuf = file->private_data;
261 	if (!dmabuf || !dmabuf->resv)
262 		return EPOLLERR;
263 
264 	resv = dmabuf->resv;
265 
266 	poll_wait(file, &dmabuf->poll, poll);
267 
268 	events = poll_requested_events(poll) & (EPOLLIN | EPOLLOUT);
269 	if (!events)
270 		return 0;
271 
272 	dma_resv_lock(resv, NULL);
273 
274 	if (events & EPOLLOUT) {
275 		struct dma_buf_poll_cb_t *dcb = &dmabuf->cb_out;
276 
277 		/* Check that callback isn't busy */
278 		spin_lock_irq(&dmabuf->poll.lock);
279 		if (dcb->active)
280 			events &= ~EPOLLOUT;
281 		else
282 			dcb->active = EPOLLOUT;
283 		spin_unlock_irq(&dmabuf->poll.lock);
284 
285 		if (events & EPOLLOUT) {
286 			/* Paired with fput in dma_buf_poll_cb */
287 			get_file(dmabuf->file);
288 
289 			if (!dma_buf_poll_add_cb(resv, true, dcb))
290 				/* No callback queued, wake up any other waiters */
291 				dma_buf_poll_cb(NULL, &dcb->cb);
292 			else
293 				events &= ~EPOLLOUT;
294 		}
295 	}
296 
297 	if (events & EPOLLIN) {
298 		struct dma_buf_poll_cb_t *dcb = &dmabuf->cb_in;
299 
300 		/* Check that callback isn't busy */
301 		spin_lock_irq(&dmabuf->poll.lock);
302 		if (dcb->active)
303 			events &= ~EPOLLIN;
304 		else
305 			dcb->active = EPOLLIN;
306 		spin_unlock_irq(&dmabuf->poll.lock);
307 
308 		if (events & EPOLLIN) {
309 			/* Paired with fput in dma_buf_poll_cb */
310 			get_file(dmabuf->file);
311 
312 			if (!dma_buf_poll_add_cb(resv, false, dcb))
313 				/* No callback queued, wake up any other waiters */
314 				dma_buf_poll_cb(NULL, &dcb->cb);
315 			else
316 				events &= ~EPOLLIN;
317 		}
318 	}
319 
320 	dma_resv_unlock(resv);
321 	return events;
322 }
323 
324 /**
325  * dma_buf_set_name - Set a name to a specific dma_buf to track the usage.
326  * It could support changing the name of the dma-buf if the same
327  * piece of memory is used for multiple purpose between different devices.
328  *
329  * @dmabuf: [in]     dmabuf buffer that will be renamed.
330  * @buf:    [in]     A piece of userspace memory that contains the name of
331  *                   the dma-buf.
332  *
333  * Returns 0 on success. If the dma-buf buffer is already attached to
334  * devices, return -EBUSY.
335  *
336  */
dma_buf_set_name(struct dma_buf * dmabuf,const char __user * buf)337 static long dma_buf_set_name(struct dma_buf *dmabuf, const char __user *buf)
338 {
339 	char *name = strndup_user(buf, DMA_BUF_NAME_LEN);
340 
341 	if (IS_ERR(name))
342 		return PTR_ERR(name);
343 
344 	spin_lock(&dmabuf->name_lock);
345 	kfree(dmabuf->name);
346 	dmabuf->name = name;
347 	spin_unlock(&dmabuf->name_lock);
348 
349 	return 0;
350 }
351 
352 #if IS_ENABLED(CONFIG_SYNC_FILE)
dma_buf_export_sync_file(struct dma_buf * dmabuf,void __user * user_data)353 static long dma_buf_export_sync_file(struct dma_buf *dmabuf,
354 				     void __user *user_data)
355 {
356 	struct dma_buf_export_sync_file arg;
357 	enum dma_resv_usage usage;
358 	struct dma_fence *fence = NULL;
359 	struct sync_file *sync_file;
360 	int fd, ret;
361 
362 	if (copy_from_user(&arg, user_data, sizeof(arg)))
363 		return -EFAULT;
364 
365 	if (arg.flags & ~DMA_BUF_SYNC_RW)
366 		return -EINVAL;
367 
368 	if ((arg.flags & DMA_BUF_SYNC_RW) == 0)
369 		return -EINVAL;
370 
371 	fd = get_unused_fd_flags(O_CLOEXEC);
372 	if (fd < 0)
373 		return fd;
374 
375 	usage = dma_resv_usage_rw(arg.flags & DMA_BUF_SYNC_WRITE);
376 	ret = dma_resv_get_singleton(dmabuf->resv, usage, &fence);
377 	if (ret)
378 		goto err_put_fd;
379 
380 	if (!fence)
381 		fence = dma_fence_get_stub();
382 
383 	sync_file = sync_file_create(fence);
384 
385 	dma_fence_put(fence);
386 
387 	if (!sync_file) {
388 		ret = -ENOMEM;
389 		goto err_put_fd;
390 	}
391 
392 	arg.fd = fd;
393 	if (copy_to_user(user_data, &arg, sizeof(arg))) {
394 		ret = -EFAULT;
395 		goto err_put_file;
396 	}
397 
398 	fd_install(fd, sync_file->file);
399 
400 	return 0;
401 
402 err_put_file:
403 	fput(sync_file->file);
404 err_put_fd:
405 	put_unused_fd(fd);
406 	return ret;
407 }
408 
dma_buf_import_sync_file(struct dma_buf * dmabuf,const void __user * user_data)409 static long dma_buf_import_sync_file(struct dma_buf *dmabuf,
410 				     const void __user *user_data)
411 {
412 	struct dma_buf_import_sync_file arg;
413 	struct dma_fence *fence, *f;
414 	enum dma_resv_usage usage;
415 	struct dma_fence_unwrap iter;
416 	unsigned int num_fences;
417 	int ret = 0;
418 
419 	if (copy_from_user(&arg, user_data, sizeof(arg)))
420 		return -EFAULT;
421 
422 	if (arg.flags & ~DMA_BUF_SYNC_RW)
423 		return -EINVAL;
424 
425 	if ((arg.flags & DMA_BUF_SYNC_RW) == 0)
426 		return -EINVAL;
427 
428 	fence = sync_file_get_fence(arg.fd);
429 	if (!fence)
430 		return -EINVAL;
431 
432 	usage = (arg.flags & DMA_BUF_SYNC_WRITE) ? DMA_RESV_USAGE_WRITE :
433 						   DMA_RESV_USAGE_READ;
434 
435 	num_fences = 0;
436 	dma_fence_unwrap_for_each(f, &iter, fence)
437 		++num_fences;
438 
439 	if (num_fences > 0) {
440 		dma_resv_lock(dmabuf->resv, NULL);
441 
442 		ret = dma_resv_reserve_fences(dmabuf->resv, num_fences);
443 		if (!ret) {
444 			dma_fence_unwrap_for_each(f, &iter, fence)
445 				dma_resv_add_fence(dmabuf->resv, f, usage);
446 		}
447 
448 		dma_resv_unlock(dmabuf->resv);
449 	}
450 
451 	dma_fence_put(fence);
452 
453 	return ret;
454 }
455 #endif
456 
dma_buf_ioctl(struct file * file,unsigned int cmd,unsigned long arg)457 static long dma_buf_ioctl(struct file *file,
458 			  unsigned int cmd, unsigned long arg)
459 {
460 	struct dma_buf *dmabuf;
461 	struct dma_buf_sync sync;
462 	enum dma_data_direction direction;
463 	int ret;
464 
465 	dmabuf = file->private_data;
466 
467 	switch (cmd) {
468 	case DMA_BUF_IOCTL_SYNC:
469 		if (copy_from_user(&sync, (void __user *) arg, sizeof(sync)))
470 			return -EFAULT;
471 
472 		if (sync.flags & ~DMA_BUF_SYNC_VALID_FLAGS_MASK)
473 			return -EINVAL;
474 
475 		switch (sync.flags & DMA_BUF_SYNC_RW) {
476 		case DMA_BUF_SYNC_READ:
477 			direction = DMA_FROM_DEVICE;
478 			break;
479 		case DMA_BUF_SYNC_WRITE:
480 			direction = DMA_TO_DEVICE;
481 			break;
482 		case DMA_BUF_SYNC_RW:
483 			direction = DMA_BIDIRECTIONAL;
484 			break;
485 		default:
486 			return -EINVAL;
487 		}
488 
489 		if (sync.flags & DMA_BUF_SYNC_END)
490 			ret = dma_buf_end_cpu_access(dmabuf, direction);
491 		else
492 			ret = dma_buf_begin_cpu_access(dmabuf, direction);
493 
494 		return ret;
495 
496 	case DMA_BUF_SET_NAME_A:
497 	case DMA_BUF_SET_NAME_B:
498 		return dma_buf_set_name(dmabuf, (const char __user *)arg);
499 
500 #if IS_ENABLED(CONFIG_SYNC_FILE)
501 	case DMA_BUF_IOCTL_EXPORT_SYNC_FILE:
502 		return dma_buf_export_sync_file(dmabuf, (void __user *)arg);
503 	case DMA_BUF_IOCTL_IMPORT_SYNC_FILE:
504 		return dma_buf_import_sync_file(dmabuf, (const void __user *)arg);
505 #endif
506 
507 	default:
508 		return -ENOTTY;
509 	}
510 }
511 
dma_buf_show_fdinfo(struct seq_file * m,struct file * file)512 static void dma_buf_show_fdinfo(struct seq_file *m, struct file *file)
513 {
514 	struct dma_buf *dmabuf = file->private_data;
515 
516 	seq_printf(m, "size:\t%zu\n", dmabuf->size);
517 	/* Don't count the temporary reference taken inside procfs seq_show */
518 	seq_printf(m, "count:\t%ld\n", file_count(dmabuf->file) - 1);
519 	seq_printf(m, "exp_name:\t%s\n", dmabuf->exp_name);
520 	spin_lock(&dmabuf->name_lock);
521 	if (dmabuf->name)
522 		seq_printf(m, "name:\t%s\n", dmabuf->name);
523 	spin_unlock(&dmabuf->name_lock);
524 }
525 
526 static const struct file_operations dma_buf_fops = {
527 	.release	= dma_buf_file_release,
528 	.mmap		= dma_buf_mmap_internal,
529 	.llseek		= dma_buf_llseek,
530 	.poll		= dma_buf_poll,
531 	.unlocked_ioctl	= dma_buf_ioctl,
532 	.compat_ioctl	= compat_ptr_ioctl,
533 	.show_fdinfo	= dma_buf_show_fdinfo,
534 };
535 
536 /*
537  * is_dma_buf_file - Check if struct file* is associated with dma_buf
538  */
is_dma_buf_file(struct file * file)539 static inline int is_dma_buf_file(struct file *file)
540 {
541 	return file->f_op == &dma_buf_fops;
542 }
543 
dma_buf_getfile(size_t size,int flags)544 static struct file *dma_buf_getfile(size_t size, int flags)
545 {
546 	static atomic64_t dmabuf_inode = ATOMIC64_INIT(0);
547 	struct inode *inode = alloc_anon_inode(dma_buf_mnt->mnt_sb);
548 	struct file *file;
549 
550 	if (IS_ERR(inode))
551 		return ERR_CAST(inode);
552 
553 	inode->i_size = size;
554 	inode_set_bytes(inode, size);
555 
556 	/*
557 	 * The ->i_ino acquired from get_next_ino() is not unique thus
558 	 * not suitable for using it as dentry name by dmabuf stats.
559 	 * Override ->i_ino with the unique and dmabuffs specific
560 	 * value.
561 	 */
562 	inode->i_ino = atomic64_inc_return(&dmabuf_inode);
563 	flags &= O_ACCMODE | O_NONBLOCK;
564 	file = alloc_file_pseudo(inode, dma_buf_mnt, "dmabuf",
565 				 flags, &dma_buf_fops);
566 	if (IS_ERR(file))
567 		goto err_alloc_file;
568 
569 	return file;
570 
571 err_alloc_file:
572 	iput(inode);
573 	return file;
574 }
575 
576 /**
577  * DOC: dma buf device access
578  *
579  * For device DMA access to a shared DMA buffer the usual sequence of operations
580  * is fairly simple:
581  *
582  * 1. The exporter defines his exporter instance using
583  *    DEFINE_DMA_BUF_EXPORT_INFO() and calls dma_buf_export() to wrap a private
584  *    buffer object into a &dma_buf. It then exports that &dma_buf to userspace
585  *    as a file descriptor by calling dma_buf_fd().
586  *
587  * 2. Userspace passes this file-descriptors to all drivers it wants this buffer
588  *    to share with: First the file descriptor is converted to a &dma_buf using
589  *    dma_buf_get(). Then the buffer is attached to the device using
590  *    dma_buf_attach().
591  *
592  *    Up to this stage the exporter is still free to migrate or reallocate the
593  *    backing storage.
594  *
595  * 3. Once the buffer is attached to all devices userspace can initiate DMA
596  *    access to the shared buffer. In the kernel this is done by calling
597  *    dma_buf_map_attachment() and dma_buf_unmap_attachment().
598  *
599  * 4. Once a driver is done with a shared buffer it needs to call
600  *    dma_buf_detach() (after cleaning up any mappings) and then release the
601  *    reference acquired with dma_buf_get() by calling dma_buf_put().
602  *
603  * For the detailed semantics exporters are expected to implement see
604  * &dma_buf_ops.
605  */
606 
607 /**
608  * dma_buf_export - Creates a new dma_buf, and associates an anon file
609  * with this buffer, so it can be exported.
610  * Also connect the allocator specific data and ops to the buffer.
611  * Additionally, provide a name string for exporter; useful in debugging.
612  *
613  * @exp_info:	[in]	holds all the export related information provided
614  *			by the exporter. see &struct dma_buf_export_info
615  *			for further details.
616  *
617  * Returns, on success, a newly created struct dma_buf object, which wraps the
618  * supplied private data and operations for struct dma_buf_ops. On either
619  * missing ops, or error in allocating struct dma_buf, will return negative
620  * error.
621  *
622  * For most cases the easiest way to create @exp_info is through the
623  * %DEFINE_DMA_BUF_EXPORT_INFO macro.
624  */
dma_buf_export(const struct dma_buf_export_info * exp_info)625 struct dma_buf *dma_buf_export(const struct dma_buf_export_info *exp_info)
626 {
627 	struct dma_buf *dmabuf;
628 	struct dma_resv *resv = exp_info->resv;
629 	struct file *file;
630 	size_t alloc_size = sizeof(struct dma_buf);
631 	int ret;
632 
633 	if (WARN_ON(!exp_info->priv || !exp_info->ops
634 		    || !exp_info->ops->map_dma_buf
635 		    || !exp_info->ops->unmap_dma_buf
636 		    || !exp_info->ops->release))
637 		return ERR_PTR(-EINVAL);
638 
639 	if (WARN_ON(exp_info->ops->cache_sgt_mapping &&
640 		    (exp_info->ops->pin || exp_info->ops->unpin)))
641 		return ERR_PTR(-EINVAL);
642 
643 	if (WARN_ON(!exp_info->ops->pin != !exp_info->ops->unpin))
644 		return ERR_PTR(-EINVAL);
645 
646 	if (!try_module_get(exp_info->owner))
647 		return ERR_PTR(-ENOENT);
648 
649 	file = dma_buf_getfile(exp_info->size, exp_info->flags);
650 	if (IS_ERR(file)) {
651 		ret = PTR_ERR(file);
652 		goto err_module;
653 	}
654 
655 	if (!exp_info->resv)
656 		alloc_size += sizeof(struct dma_resv);
657 	else
658 		/* prevent &dma_buf[1] == dma_buf->resv */
659 		alloc_size += 1;
660 	dmabuf = kzalloc(alloc_size, GFP_KERNEL);
661 	if (!dmabuf) {
662 		ret = -ENOMEM;
663 		goto err_file;
664 	}
665 
666 	dmabuf->priv = exp_info->priv;
667 	dmabuf->ops = exp_info->ops;
668 	dmabuf->size = exp_info->size;
669 	dmabuf->exp_name = exp_info->exp_name;
670 	dmabuf->owner = exp_info->owner;
671 	spin_lock_init(&dmabuf->name_lock);
672 	init_waitqueue_head(&dmabuf->poll);
673 	dmabuf->cb_in.poll = dmabuf->cb_out.poll = &dmabuf->poll;
674 	dmabuf->cb_in.active = dmabuf->cb_out.active = 0;
675 	INIT_LIST_HEAD(&dmabuf->attachments);
676 
677 	if (!resv) {
678 		dmabuf->resv = (struct dma_resv *)&dmabuf[1];
679 		dma_resv_init(dmabuf->resv);
680 	} else {
681 		dmabuf->resv = resv;
682 	}
683 
684 	ret = dma_buf_stats_setup(dmabuf, file);
685 	if (ret)
686 		goto err_dmabuf;
687 
688 	file->private_data = dmabuf;
689 	file->f_path.dentry->d_fsdata = dmabuf;
690 	dmabuf->file = file;
691 
692 	__dma_buf_debugfs_list_add(dmabuf);
693 
694 	return dmabuf;
695 
696 err_dmabuf:
697 	if (!resv)
698 		dma_resv_fini(dmabuf->resv);
699 	kfree(dmabuf);
700 err_file:
701 	fput(file);
702 err_module:
703 	module_put(exp_info->owner);
704 	return ERR_PTR(ret);
705 }
706 EXPORT_SYMBOL_NS_GPL(dma_buf_export, "DMA_BUF");
707 
708 /**
709  * dma_buf_fd - returns a file descriptor for the given struct dma_buf
710  * @dmabuf:	[in]	pointer to dma_buf for which fd is required.
711  * @flags:      [in]    flags to give to fd
712  *
713  * On success, returns an associated 'fd'. Else, returns error.
714  */
dma_buf_fd(struct dma_buf * dmabuf,int flags)715 int dma_buf_fd(struct dma_buf *dmabuf, int flags)
716 {
717 	int fd;
718 
719 	if (!dmabuf || !dmabuf->file)
720 		return -EINVAL;
721 
722 	fd = get_unused_fd_flags(flags);
723 	if (fd < 0)
724 		return fd;
725 
726 	fd_install(fd, dmabuf->file);
727 
728 	return fd;
729 }
730 EXPORT_SYMBOL_NS_GPL(dma_buf_fd, "DMA_BUF");
731 
732 /**
733  * dma_buf_get - returns the struct dma_buf related to an fd
734  * @fd:	[in]	fd associated with the struct dma_buf to be returned
735  *
736  * On success, returns the struct dma_buf associated with an fd; uses
737  * file's refcounting done by fget to increase refcount. returns ERR_PTR
738  * otherwise.
739  */
dma_buf_get(int fd)740 struct dma_buf *dma_buf_get(int fd)
741 {
742 	struct file *file;
743 
744 	file = fget(fd);
745 
746 	if (!file)
747 		return ERR_PTR(-EBADF);
748 
749 	if (!is_dma_buf_file(file)) {
750 		fput(file);
751 		return ERR_PTR(-EINVAL);
752 	}
753 
754 	return file->private_data;
755 }
756 EXPORT_SYMBOL_NS_GPL(dma_buf_get, "DMA_BUF");
757 
758 /**
759  * dma_buf_put - decreases refcount of the buffer
760  * @dmabuf:	[in]	buffer to reduce refcount of
761  *
762  * Uses file's refcounting done implicitly by fput().
763  *
764  * If, as a result of this call, the refcount becomes 0, the 'release' file
765  * operation related to this fd is called. It calls &dma_buf_ops.release vfunc
766  * in turn, and frees the memory allocated for dmabuf when exported.
767  */
dma_buf_put(struct dma_buf * dmabuf)768 void dma_buf_put(struct dma_buf *dmabuf)
769 {
770 	if (WARN_ON(!dmabuf || !dmabuf->file))
771 		return;
772 
773 	fput(dmabuf->file);
774 }
775 EXPORT_SYMBOL_NS_GPL(dma_buf_put, "DMA_BUF");
776 
mangle_sg_table(struct sg_table * sg_table)777 static void mangle_sg_table(struct sg_table *sg_table)
778 {
779 #ifdef CONFIG_DMABUF_DEBUG
780 	int i;
781 	struct scatterlist *sg;
782 
783 	/* To catch abuse of the underlying struct page by importers mix
784 	 * up the bits, but take care to preserve the low SG_ bits to
785 	 * not corrupt the sgt. The mixing is undone in __unmap_dma_buf
786 	 * before passing the sgt back to the exporter.
787 	 */
788 	for_each_sgtable_sg(sg_table, sg, i)
789 		sg->page_link ^= ~0xffUL;
790 #endif
791 
792 }
__map_dma_buf(struct dma_buf_attachment * attach,enum dma_data_direction direction)793 static struct sg_table *__map_dma_buf(struct dma_buf_attachment *attach,
794 				       enum dma_data_direction direction)
795 {
796 	struct sg_table *sg_table;
797 	signed long ret;
798 
799 	sg_table = attach->dmabuf->ops->map_dma_buf(attach, direction);
800 	if (IS_ERR_OR_NULL(sg_table))
801 		return sg_table;
802 
803 	if (!dma_buf_attachment_is_dynamic(attach)) {
804 		ret = dma_resv_wait_timeout(attach->dmabuf->resv,
805 					    DMA_RESV_USAGE_KERNEL, true,
806 					    MAX_SCHEDULE_TIMEOUT);
807 		if (ret < 0) {
808 			attach->dmabuf->ops->unmap_dma_buf(attach, sg_table,
809 							   direction);
810 			return ERR_PTR(ret);
811 		}
812 	}
813 
814 	mangle_sg_table(sg_table);
815 	return sg_table;
816 }
817 
818 /**
819  * DOC: locking convention
820  *
821  * In order to avoid deadlock situations between dma-buf exports and importers,
822  * all dma-buf API users must follow the common dma-buf locking convention.
823  *
824  * Convention for importers
825  *
826  * 1. Importers must hold the dma-buf reservation lock when calling these
827  *    functions:
828  *
829  *     - dma_buf_pin()
830  *     - dma_buf_unpin()
831  *     - dma_buf_map_attachment()
832  *     - dma_buf_unmap_attachment()
833  *     - dma_buf_vmap()
834  *     - dma_buf_vunmap()
835  *
836  * 2. Importers must not hold the dma-buf reservation lock when calling these
837  *    functions:
838  *
839  *     - dma_buf_attach()
840  *     - dma_buf_dynamic_attach()
841  *     - dma_buf_detach()
842  *     - dma_buf_export()
843  *     - dma_buf_fd()
844  *     - dma_buf_get()
845  *     - dma_buf_put()
846  *     - dma_buf_mmap()
847  *     - dma_buf_begin_cpu_access()
848  *     - dma_buf_end_cpu_access()
849  *     - dma_buf_map_attachment_unlocked()
850  *     - dma_buf_unmap_attachment_unlocked()
851  *     - dma_buf_vmap_unlocked()
852  *     - dma_buf_vunmap_unlocked()
853  *
854  * Convention for exporters
855  *
856  * 1. These &dma_buf_ops callbacks are invoked with unlocked dma-buf
857  *    reservation and exporter can take the lock:
858  *
859  *     - &dma_buf_ops.attach()
860  *     - &dma_buf_ops.detach()
861  *     - &dma_buf_ops.release()
862  *     - &dma_buf_ops.begin_cpu_access()
863  *     - &dma_buf_ops.end_cpu_access()
864  *     - &dma_buf_ops.mmap()
865  *
866  * 2. These &dma_buf_ops callbacks are invoked with locked dma-buf
867  *    reservation and exporter can't take the lock:
868  *
869  *     - &dma_buf_ops.pin()
870  *     - &dma_buf_ops.unpin()
871  *     - &dma_buf_ops.map_dma_buf()
872  *     - &dma_buf_ops.unmap_dma_buf()
873  *     - &dma_buf_ops.vmap()
874  *     - &dma_buf_ops.vunmap()
875  *
876  * 3. Exporters must hold the dma-buf reservation lock when calling these
877  *    functions:
878  *
879  *     - dma_buf_move_notify()
880  */
881 
882 /**
883  * dma_buf_dynamic_attach - Add the device to dma_buf's attachments list
884  * @dmabuf:		[in]	buffer to attach device to.
885  * @dev:		[in]	device to be attached.
886  * @importer_ops:	[in]	importer operations for the attachment
887  * @importer_priv:	[in]	importer private pointer for the attachment
888  *
889  * Returns struct dma_buf_attachment pointer for this attachment. Attachments
890  * must be cleaned up by calling dma_buf_detach().
891  *
892  * Optionally this calls &dma_buf_ops.attach to allow device-specific attach
893  * functionality.
894  *
895  * Returns:
896  *
897  * A pointer to newly created &dma_buf_attachment on success, or a negative
898  * error code wrapped into a pointer on failure.
899  *
900  * Note that this can fail if the backing storage of @dmabuf is in a place not
901  * accessible to @dev, and cannot be moved to a more suitable place. This is
902  * indicated with the error code -EBUSY.
903  */
904 struct dma_buf_attachment *
dma_buf_dynamic_attach(struct dma_buf * dmabuf,struct device * dev,const struct dma_buf_attach_ops * importer_ops,void * importer_priv)905 dma_buf_dynamic_attach(struct dma_buf *dmabuf, struct device *dev,
906 		       const struct dma_buf_attach_ops *importer_ops,
907 		       void *importer_priv)
908 {
909 	struct dma_buf_attachment *attach;
910 	int ret;
911 
912 	if (WARN_ON(!dmabuf || !dev))
913 		return ERR_PTR(-EINVAL);
914 
915 	if (WARN_ON(importer_ops && !importer_ops->move_notify))
916 		return ERR_PTR(-EINVAL);
917 
918 	attach = kzalloc(sizeof(*attach), GFP_KERNEL);
919 	if (!attach)
920 		return ERR_PTR(-ENOMEM);
921 
922 	attach->dev = dev;
923 	attach->dmabuf = dmabuf;
924 	if (importer_ops)
925 		attach->peer2peer = importer_ops->allow_peer2peer;
926 	attach->importer_ops = importer_ops;
927 	attach->importer_priv = importer_priv;
928 
929 	if (dmabuf->ops->attach) {
930 		ret = dmabuf->ops->attach(dmabuf, attach);
931 		if (ret)
932 			goto err_attach;
933 	}
934 	dma_resv_lock(dmabuf->resv, NULL);
935 	list_add(&attach->node, &dmabuf->attachments);
936 	dma_resv_unlock(dmabuf->resv);
937 
938 	/* When either the importer or the exporter can't handle dynamic
939 	 * mappings we cache the mapping here to avoid issues with the
940 	 * reservation object lock.
941 	 */
942 	if (dma_buf_attachment_is_dynamic(attach) !=
943 	    dma_buf_is_dynamic(dmabuf)) {
944 		struct sg_table *sgt;
945 
946 		dma_resv_lock(attach->dmabuf->resv, NULL);
947 		if (dma_buf_is_dynamic(attach->dmabuf)) {
948 			ret = dmabuf->ops->pin(attach);
949 			if (ret)
950 				goto err_unlock;
951 		}
952 
953 		sgt = __map_dma_buf(attach, DMA_BIDIRECTIONAL);
954 		if (!sgt)
955 			sgt = ERR_PTR(-ENOMEM);
956 		if (IS_ERR(sgt)) {
957 			ret = PTR_ERR(sgt);
958 			goto err_unpin;
959 		}
960 		dma_resv_unlock(attach->dmabuf->resv);
961 		attach->sgt = sgt;
962 		attach->dir = DMA_BIDIRECTIONAL;
963 	}
964 
965 	return attach;
966 
967 err_attach:
968 	kfree(attach);
969 	return ERR_PTR(ret);
970 
971 err_unpin:
972 	if (dma_buf_is_dynamic(attach->dmabuf))
973 		dmabuf->ops->unpin(attach);
974 
975 err_unlock:
976 	dma_resv_unlock(attach->dmabuf->resv);
977 
978 	dma_buf_detach(dmabuf, attach);
979 	return ERR_PTR(ret);
980 }
981 EXPORT_SYMBOL_NS_GPL(dma_buf_dynamic_attach, "DMA_BUF");
982 
983 /**
984  * dma_buf_attach - Wrapper for dma_buf_dynamic_attach
985  * @dmabuf:	[in]	buffer to attach device to.
986  * @dev:	[in]	device to be attached.
987  *
988  * Wrapper to call dma_buf_dynamic_attach() for drivers which still use a static
989  * mapping.
990  */
dma_buf_attach(struct dma_buf * dmabuf,struct device * dev)991 struct dma_buf_attachment *dma_buf_attach(struct dma_buf *dmabuf,
992 					  struct device *dev)
993 {
994 	return dma_buf_dynamic_attach(dmabuf, dev, NULL, NULL);
995 }
996 EXPORT_SYMBOL_NS_GPL(dma_buf_attach, "DMA_BUF");
997 
__unmap_dma_buf(struct dma_buf_attachment * attach,struct sg_table * sg_table,enum dma_data_direction direction)998 static void __unmap_dma_buf(struct dma_buf_attachment *attach,
999 			    struct sg_table *sg_table,
1000 			    enum dma_data_direction direction)
1001 {
1002 	/* uses XOR, hence this unmangles */
1003 	mangle_sg_table(sg_table);
1004 
1005 	attach->dmabuf->ops->unmap_dma_buf(attach, sg_table, direction);
1006 }
1007 
1008 /**
1009  * dma_buf_detach - Remove the given attachment from dmabuf's attachments list
1010  * @dmabuf:	[in]	buffer to detach from.
1011  * @attach:	[in]	attachment to be detached; is free'd after this call.
1012  *
1013  * Clean up a device attachment obtained by calling dma_buf_attach().
1014  *
1015  * Optionally this calls &dma_buf_ops.detach for device-specific detach.
1016  */
dma_buf_detach(struct dma_buf * dmabuf,struct dma_buf_attachment * attach)1017 void dma_buf_detach(struct dma_buf *dmabuf, struct dma_buf_attachment *attach)
1018 {
1019 	if (WARN_ON(!dmabuf || !attach || dmabuf != attach->dmabuf))
1020 		return;
1021 
1022 	dma_resv_lock(dmabuf->resv, NULL);
1023 
1024 	if (attach->sgt) {
1025 
1026 		__unmap_dma_buf(attach, attach->sgt, attach->dir);
1027 
1028 		if (dma_buf_is_dynamic(attach->dmabuf))
1029 			dmabuf->ops->unpin(attach);
1030 	}
1031 	list_del(&attach->node);
1032 
1033 	dma_resv_unlock(dmabuf->resv);
1034 
1035 	if (dmabuf->ops->detach)
1036 		dmabuf->ops->detach(dmabuf, attach);
1037 
1038 	kfree(attach);
1039 }
1040 EXPORT_SYMBOL_NS_GPL(dma_buf_detach, "DMA_BUF");
1041 
1042 /**
1043  * dma_buf_pin - Lock down the DMA-buf
1044  * @attach:	[in]	attachment which should be pinned
1045  *
1046  * Only dynamic importers (who set up @attach with dma_buf_dynamic_attach()) may
1047  * call this, and only for limited use cases like scanout and not for temporary
1048  * pin operations. It is not permitted to allow userspace to pin arbitrary
1049  * amounts of buffers through this interface.
1050  *
1051  * Buffers must be unpinned by calling dma_buf_unpin().
1052  *
1053  * Returns:
1054  * 0 on success, negative error code on failure.
1055  */
dma_buf_pin(struct dma_buf_attachment * attach)1056 int dma_buf_pin(struct dma_buf_attachment *attach)
1057 {
1058 	struct dma_buf *dmabuf = attach->dmabuf;
1059 	int ret = 0;
1060 
1061 	WARN_ON(!dma_buf_attachment_is_dynamic(attach));
1062 
1063 	dma_resv_assert_held(dmabuf->resv);
1064 
1065 	if (dmabuf->ops->pin)
1066 		ret = dmabuf->ops->pin(attach);
1067 
1068 	return ret;
1069 }
1070 EXPORT_SYMBOL_NS_GPL(dma_buf_pin, "DMA_BUF");
1071 
1072 /**
1073  * dma_buf_unpin - Unpin a DMA-buf
1074  * @attach:	[in]	attachment which should be unpinned
1075  *
1076  * This unpins a buffer pinned by dma_buf_pin() and allows the exporter to move
1077  * any mapping of @attach again and inform the importer through
1078  * &dma_buf_attach_ops.move_notify.
1079  */
dma_buf_unpin(struct dma_buf_attachment * attach)1080 void dma_buf_unpin(struct dma_buf_attachment *attach)
1081 {
1082 	struct dma_buf *dmabuf = attach->dmabuf;
1083 
1084 	WARN_ON(!dma_buf_attachment_is_dynamic(attach));
1085 
1086 	dma_resv_assert_held(dmabuf->resv);
1087 
1088 	if (dmabuf->ops->unpin)
1089 		dmabuf->ops->unpin(attach);
1090 }
1091 EXPORT_SYMBOL_NS_GPL(dma_buf_unpin, "DMA_BUF");
1092 
1093 /**
1094  * dma_buf_map_attachment - Returns the scatterlist table of the attachment;
1095  * mapped into _device_ address space. Is a wrapper for map_dma_buf() of the
1096  * dma_buf_ops.
1097  * @attach:	[in]	attachment whose scatterlist is to be returned
1098  * @direction:	[in]	direction of DMA transfer
1099  *
1100  * Returns sg_table containing the scatterlist to be returned; returns ERR_PTR
1101  * on error. May return -EINTR if it is interrupted by a signal.
1102  *
1103  * On success, the DMA addresses and lengths in the returned scatterlist are
1104  * PAGE_SIZE aligned.
1105  *
1106  * A mapping must be unmapped by using dma_buf_unmap_attachment(). Note that
1107  * the underlying backing storage is pinned for as long as a mapping exists,
1108  * therefore users/importers should not hold onto a mapping for undue amounts of
1109  * time.
1110  *
1111  * Important: Dynamic importers must wait for the exclusive fence of the struct
1112  * dma_resv attached to the DMA-BUF first.
1113  */
dma_buf_map_attachment(struct dma_buf_attachment * attach,enum dma_data_direction direction)1114 struct sg_table *dma_buf_map_attachment(struct dma_buf_attachment *attach,
1115 					enum dma_data_direction direction)
1116 {
1117 	struct sg_table *sg_table;
1118 	int r;
1119 
1120 	might_sleep();
1121 
1122 	if (WARN_ON(!attach || !attach->dmabuf))
1123 		return ERR_PTR(-EINVAL);
1124 
1125 	dma_resv_assert_held(attach->dmabuf->resv);
1126 
1127 	if (attach->sgt) {
1128 		/*
1129 		 * Two mappings with different directions for the same
1130 		 * attachment are not allowed.
1131 		 */
1132 		if (attach->dir != direction &&
1133 		    attach->dir != DMA_BIDIRECTIONAL)
1134 			return ERR_PTR(-EBUSY);
1135 
1136 		return attach->sgt;
1137 	}
1138 
1139 	if (dma_buf_is_dynamic(attach->dmabuf)) {
1140 		if (!IS_ENABLED(CONFIG_DMABUF_MOVE_NOTIFY)) {
1141 			r = attach->dmabuf->ops->pin(attach);
1142 			if (r)
1143 				return ERR_PTR(r);
1144 		}
1145 	}
1146 
1147 	sg_table = __map_dma_buf(attach, direction);
1148 	if (!sg_table)
1149 		sg_table = ERR_PTR(-ENOMEM);
1150 
1151 	if (IS_ERR(sg_table) && dma_buf_is_dynamic(attach->dmabuf) &&
1152 	     !IS_ENABLED(CONFIG_DMABUF_MOVE_NOTIFY))
1153 		attach->dmabuf->ops->unpin(attach);
1154 
1155 	if (!IS_ERR(sg_table) && attach->dmabuf->ops->cache_sgt_mapping) {
1156 		attach->sgt = sg_table;
1157 		attach->dir = direction;
1158 	}
1159 
1160 #ifdef CONFIG_DMA_API_DEBUG
1161 	if (!IS_ERR(sg_table)) {
1162 		struct scatterlist *sg;
1163 		u64 addr;
1164 		int len;
1165 		int i;
1166 
1167 		for_each_sgtable_dma_sg(sg_table, sg, i) {
1168 			addr = sg_dma_address(sg);
1169 			len = sg_dma_len(sg);
1170 			if (!PAGE_ALIGNED(addr) || !PAGE_ALIGNED(len)) {
1171 				pr_debug("%s: addr %llx or len %x is not page aligned!\n",
1172 					 __func__, addr, len);
1173 			}
1174 		}
1175 	}
1176 #endif /* CONFIG_DMA_API_DEBUG */
1177 	return sg_table;
1178 }
1179 EXPORT_SYMBOL_NS_GPL(dma_buf_map_attachment, "DMA_BUF");
1180 
1181 /**
1182  * dma_buf_map_attachment_unlocked - Returns the scatterlist table of the attachment;
1183  * mapped into _device_ address space. Is a wrapper for map_dma_buf() of the
1184  * dma_buf_ops.
1185  * @attach:	[in]	attachment whose scatterlist is to be returned
1186  * @direction:	[in]	direction of DMA transfer
1187  *
1188  * Unlocked variant of dma_buf_map_attachment().
1189  */
1190 struct sg_table *
dma_buf_map_attachment_unlocked(struct dma_buf_attachment * attach,enum dma_data_direction direction)1191 dma_buf_map_attachment_unlocked(struct dma_buf_attachment *attach,
1192 				enum dma_data_direction direction)
1193 {
1194 	struct sg_table *sg_table;
1195 
1196 	might_sleep();
1197 
1198 	if (WARN_ON(!attach || !attach->dmabuf))
1199 		return ERR_PTR(-EINVAL);
1200 
1201 	dma_resv_lock(attach->dmabuf->resv, NULL);
1202 	sg_table = dma_buf_map_attachment(attach, direction);
1203 	dma_resv_unlock(attach->dmabuf->resv);
1204 
1205 	return sg_table;
1206 }
1207 EXPORT_SYMBOL_NS_GPL(dma_buf_map_attachment_unlocked, "DMA_BUF");
1208 
1209 /**
1210  * dma_buf_unmap_attachment - unmaps and decreases usecount of the buffer;might
1211  * deallocate the scatterlist associated. Is a wrapper for unmap_dma_buf() of
1212  * dma_buf_ops.
1213  * @attach:	[in]	attachment to unmap buffer from
1214  * @sg_table:	[in]	scatterlist info of the buffer to unmap
1215  * @direction:  [in]    direction of DMA transfer
1216  *
1217  * This unmaps a DMA mapping for @attached obtained by dma_buf_map_attachment().
1218  */
dma_buf_unmap_attachment(struct dma_buf_attachment * attach,struct sg_table * sg_table,enum dma_data_direction direction)1219 void dma_buf_unmap_attachment(struct dma_buf_attachment *attach,
1220 				struct sg_table *sg_table,
1221 				enum dma_data_direction direction)
1222 {
1223 	might_sleep();
1224 
1225 	if (WARN_ON(!attach || !attach->dmabuf || !sg_table))
1226 		return;
1227 
1228 	dma_resv_assert_held(attach->dmabuf->resv);
1229 
1230 	if (attach->sgt == sg_table)
1231 		return;
1232 
1233 	__unmap_dma_buf(attach, sg_table, direction);
1234 
1235 	if (dma_buf_is_dynamic(attach->dmabuf) &&
1236 	    !IS_ENABLED(CONFIG_DMABUF_MOVE_NOTIFY))
1237 		dma_buf_unpin(attach);
1238 }
1239 EXPORT_SYMBOL_NS_GPL(dma_buf_unmap_attachment, "DMA_BUF");
1240 
1241 /**
1242  * dma_buf_unmap_attachment_unlocked - unmaps and decreases usecount of the buffer;might
1243  * deallocate the scatterlist associated. Is a wrapper for unmap_dma_buf() of
1244  * dma_buf_ops.
1245  * @attach:	[in]	attachment to unmap buffer from
1246  * @sg_table:	[in]	scatterlist info of the buffer to unmap
1247  * @direction:	[in]	direction of DMA transfer
1248  *
1249  * Unlocked variant of dma_buf_unmap_attachment().
1250  */
dma_buf_unmap_attachment_unlocked(struct dma_buf_attachment * attach,struct sg_table * sg_table,enum dma_data_direction direction)1251 void dma_buf_unmap_attachment_unlocked(struct dma_buf_attachment *attach,
1252 				       struct sg_table *sg_table,
1253 				       enum dma_data_direction direction)
1254 {
1255 	might_sleep();
1256 
1257 	if (WARN_ON(!attach || !attach->dmabuf || !sg_table))
1258 		return;
1259 
1260 	dma_resv_lock(attach->dmabuf->resv, NULL);
1261 	dma_buf_unmap_attachment(attach, sg_table, direction);
1262 	dma_resv_unlock(attach->dmabuf->resv);
1263 }
1264 EXPORT_SYMBOL_NS_GPL(dma_buf_unmap_attachment_unlocked, "DMA_BUF");
1265 
1266 /**
1267  * dma_buf_move_notify - notify attachments that DMA-buf is moving
1268  *
1269  * @dmabuf:	[in]	buffer which is moving
1270  *
1271  * Informs all attachments that they need to destroy and recreate all their
1272  * mappings.
1273  */
dma_buf_move_notify(struct dma_buf * dmabuf)1274 void dma_buf_move_notify(struct dma_buf *dmabuf)
1275 {
1276 	struct dma_buf_attachment *attach;
1277 
1278 	dma_resv_assert_held(dmabuf->resv);
1279 
1280 	list_for_each_entry(attach, &dmabuf->attachments, node)
1281 		if (attach->importer_ops)
1282 			attach->importer_ops->move_notify(attach);
1283 }
1284 EXPORT_SYMBOL_NS_GPL(dma_buf_move_notify, "DMA_BUF");
1285 
1286 /**
1287  * DOC: cpu access
1288  *
1289  * There are multiple reasons for supporting CPU access to a dma buffer object:
1290  *
1291  * - Fallback operations in the kernel, for example when a device is connected
1292  *   over USB and the kernel needs to shuffle the data around first before
1293  *   sending it away. Cache coherency is handled by bracketing any transactions
1294  *   with calls to dma_buf_begin_cpu_access() and dma_buf_end_cpu_access()
1295  *   access.
1296  *
1297  *   Since for most kernel internal dma-buf accesses need the entire buffer, a
1298  *   vmap interface is introduced. Note that on very old 32-bit architectures
1299  *   vmalloc space might be limited and result in vmap calls failing.
1300  *
1301  *   Interfaces:
1302  *
1303  *   .. code-block:: c
1304  *
1305  *     void *dma_buf_vmap(struct dma_buf *dmabuf, struct iosys_map *map)
1306  *     void dma_buf_vunmap(struct dma_buf *dmabuf, struct iosys_map *map)
1307  *
1308  *   The vmap call can fail if there is no vmap support in the exporter, or if
1309  *   it runs out of vmalloc space. Note that the dma-buf layer keeps a reference
1310  *   count for all vmap access and calls down into the exporter's vmap function
1311  *   only when no vmapping exists, and only unmaps it once. Protection against
1312  *   concurrent vmap/vunmap calls is provided by taking the &dma_buf.lock mutex.
1313  *
1314  * - For full compatibility on the importer side with existing userspace
1315  *   interfaces, which might already support mmap'ing buffers. This is needed in
1316  *   many processing pipelines (e.g. feeding a software rendered image into a
1317  *   hardware pipeline, thumbnail creation, snapshots, ...). Also, Android's ION
1318  *   framework already supported this and for DMA buffer file descriptors to
1319  *   replace ION buffers mmap support was needed.
1320  *
1321  *   There is no special interfaces, userspace simply calls mmap on the dma-buf
1322  *   fd. But like for CPU access there's a need to bracket the actual access,
1323  *   which is handled by the ioctl (DMA_BUF_IOCTL_SYNC). Note that
1324  *   DMA_BUF_IOCTL_SYNC can fail with -EAGAIN or -EINTR, in which case it must
1325  *   be restarted.
1326  *
1327  *   Some systems might need some sort of cache coherency management e.g. when
1328  *   CPU and GPU domains are being accessed through dma-buf at the same time.
1329  *   To circumvent this problem there are begin/end coherency markers, that
1330  *   forward directly to existing dma-buf device drivers vfunc hooks. Userspace
1331  *   can make use of those markers through the DMA_BUF_IOCTL_SYNC ioctl. The
1332  *   sequence would be used like following:
1333  *
1334  *     - mmap dma-buf fd
1335  *     - for each drawing/upload cycle in CPU 1. SYNC_START ioctl, 2. read/write
1336  *       to mmap area 3. SYNC_END ioctl. This can be repeated as often as you
1337  *       want (with the new data being consumed by say the GPU or the scanout
1338  *       device)
1339  *     - munmap once you don't need the buffer any more
1340  *
1341  *    For correctness and optimal performance, it is always required to use
1342  *    SYNC_START and SYNC_END before and after, respectively, when accessing the
1343  *    mapped address. Userspace cannot rely on coherent access, even when there
1344  *    are systems where it just works without calling these ioctls.
1345  *
1346  * - And as a CPU fallback in userspace processing pipelines.
1347  *
1348  *   Similar to the motivation for kernel cpu access it is again important that
1349  *   the userspace code of a given importing subsystem can use the same
1350  *   interfaces with a imported dma-buf buffer object as with a native buffer
1351  *   object. This is especially important for drm where the userspace part of
1352  *   contemporary OpenGL, X, and other drivers is huge, and reworking them to
1353  *   use a different way to mmap a buffer rather invasive.
1354  *
1355  *   The assumption in the current dma-buf interfaces is that redirecting the
1356  *   initial mmap is all that's needed. A survey of some of the existing
1357  *   subsystems shows that no driver seems to do any nefarious thing like
1358  *   syncing up with outstanding asynchronous processing on the device or
1359  *   allocating special resources at fault time. So hopefully this is good
1360  *   enough, since adding interfaces to intercept pagefaults and allow pte
1361  *   shootdowns would increase the complexity quite a bit.
1362  *
1363  *   Interface:
1364  *
1365  *   .. code-block:: c
1366  *
1367  *     int dma_buf_mmap(struct dma_buf *, struct vm_area_struct *, unsigned long);
1368  *
1369  *   If the importing subsystem simply provides a special-purpose mmap call to
1370  *   set up a mapping in userspace, calling do_mmap with &dma_buf.file will
1371  *   equally achieve that for a dma-buf object.
1372  */
1373 
__dma_buf_begin_cpu_access(struct dma_buf * dmabuf,enum dma_data_direction direction)1374 static int __dma_buf_begin_cpu_access(struct dma_buf *dmabuf,
1375 				      enum dma_data_direction direction)
1376 {
1377 	bool write = (direction == DMA_BIDIRECTIONAL ||
1378 		      direction == DMA_TO_DEVICE);
1379 	struct dma_resv *resv = dmabuf->resv;
1380 	long ret;
1381 
1382 	/* Wait on any implicit rendering fences */
1383 	ret = dma_resv_wait_timeout(resv, dma_resv_usage_rw(write),
1384 				    true, MAX_SCHEDULE_TIMEOUT);
1385 	if (ret < 0)
1386 		return ret;
1387 
1388 	return 0;
1389 }
1390 
1391 /**
1392  * dma_buf_begin_cpu_access - Must be called before accessing a dma_buf from the
1393  * cpu in the kernel context. Calls begin_cpu_access to allow exporter-specific
1394  * preparations. Coherency is only guaranteed in the specified range for the
1395  * specified access direction.
1396  * @dmabuf:	[in]	buffer to prepare cpu access for.
1397  * @direction:	[in]	direction of access.
1398  *
1399  * After the cpu access is complete the caller should call
1400  * dma_buf_end_cpu_access(). Only when cpu access is bracketed by both calls is
1401  * it guaranteed to be coherent with other DMA access.
1402  *
1403  * This function will also wait for any DMA transactions tracked through
1404  * implicit synchronization in &dma_buf.resv. For DMA transactions with explicit
1405  * synchronization this function will only ensure cache coherency, callers must
1406  * ensure synchronization with such DMA transactions on their own.
1407  *
1408  * Can return negative error values, returns 0 on success.
1409  */
dma_buf_begin_cpu_access(struct dma_buf * dmabuf,enum dma_data_direction direction)1410 int dma_buf_begin_cpu_access(struct dma_buf *dmabuf,
1411 			     enum dma_data_direction direction)
1412 {
1413 	int ret = 0;
1414 
1415 	if (WARN_ON(!dmabuf))
1416 		return -EINVAL;
1417 
1418 	might_lock(&dmabuf->resv->lock.base);
1419 
1420 	if (dmabuf->ops->begin_cpu_access)
1421 		ret = dmabuf->ops->begin_cpu_access(dmabuf, direction);
1422 
1423 	/* Ensure that all fences are waited upon - but we first allow
1424 	 * the native handler the chance to do so more efficiently if it
1425 	 * chooses. A double invocation here will be reasonably cheap no-op.
1426 	 */
1427 	if (ret == 0)
1428 		ret = __dma_buf_begin_cpu_access(dmabuf, direction);
1429 
1430 	return ret;
1431 }
1432 EXPORT_SYMBOL_NS_GPL(dma_buf_begin_cpu_access, "DMA_BUF");
1433 
1434 /**
1435  * dma_buf_end_cpu_access - Must be called after accessing a dma_buf from the
1436  * cpu in the kernel context. Calls end_cpu_access to allow exporter-specific
1437  * actions. Coherency is only guaranteed in the specified range for the
1438  * specified access direction.
1439  * @dmabuf:	[in]	buffer to complete cpu access for.
1440  * @direction:	[in]	direction of access.
1441  *
1442  * This terminates CPU access started with dma_buf_begin_cpu_access().
1443  *
1444  * Can return negative error values, returns 0 on success.
1445  */
dma_buf_end_cpu_access(struct dma_buf * dmabuf,enum dma_data_direction direction)1446 int dma_buf_end_cpu_access(struct dma_buf *dmabuf,
1447 			   enum dma_data_direction direction)
1448 {
1449 	int ret = 0;
1450 
1451 	WARN_ON(!dmabuf);
1452 
1453 	might_lock(&dmabuf->resv->lock.base);
1454 
1455 	if (dmabuf->ops->end_cpu_access)
1456 		ret = dmabuf->ops->end_cpu_access(dmabuf, direction);
1457 
1458 	return ret;
1459 }
1460 EXPORT_SYMBOL_NS_GPL(dma_buf_end_cpu_access, "DMA_BUF");
1461 
1462 
1463 /**
1464  * dma_buf_mmap - Setup up a userspace mmap with the given vma
1465  * @dmabuf:	[in]	buffer that should back the vma
1466  * @vma:	[in]	vma for the mmap
1467  * @pgoff:	[in]	offset in pages where this mmap should start within the
1468  *			dma-buf buffer.
1469  *
1470  * This function adjusts the passed in vma so that it points at the file of the
1471  * dma_buf operation. It also adjusts the starting pgoff and does bounds
1472  * checking on the size of the vma. Then it calls the exporters mmap function to
1473  * set up the mapping.
1474  *
1475  * Can return negative error values, returns 0 on success.
1476  */
dma_buf_mmap(struct dma_buf * dmabuf,struct vm_area_struct * vma,unsigned long pgoff)1477 int dma_buf_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma,
1478 		 unsigned long pgoff)
1479 {
1480 	if (WARN_ON(!dmabuf || !vma))
1481 		return -EINVAL;
1482 
1483 	/* check if buffer supports mmap */
1484 	if (!dmabuf->ops->mmap)
1485 		return -EINVAL;
1486 
1487 	/* check for offset overflow */
1488 	if (pgoff + vma_pages(vma) < pgoff)
1489 		return -EOVERFLOW;
1490 
1491 	/* check for overflowing the buffer's size */
1492 	if (pgoff + vma_pages(vma) >
1493 	    dmabuf->size >> PAGE_SHIFT)
1494 		return -EINVAL;
1495 
1496 	/* readjust the vma */
1497 	vma_set_file(vma, dmabuf->file);
1498 	vma->vm_pgoff = pgoff;
1499 
1500 	return dmabuf->ops->mmap(dmabuf, vma);
1501 }
1502 EXPORT_SYMBOL_NS_GPL(dma_buf_mmap, "DMA_BUF");
1503 
1504 /**
1505  * dma_buf_vmap - Create virtual mapping for the buffer object into kernel
1506  * address space. Same restrictions as for vmap and friends apply.
1507  * @dmabuf:	[in]	buffer to vmap
1508  * @map:	[out]	returns the vmap pointer
1509  *
1510  * This call may fail due to lack of virtual mapping address space.
1511  * These calls are optional in drivers. The intended use for them
1512  * is for mapping objects linear in kernel space for high use objects.
1513  *
1514  * To ensure coherency users must call dma_buf_begin_cpu_access() and
1515  * dma_buf_end_cpu_access() around any cpu access performed through this
1516  * mapping.
1517  *
1518  * Returns 0 on success, or a negative errno code otherwise.
1519  */
dma_buf_vmap(struct dma_buf * dmabuf,struct iosys_map * map)1520 int dma_buf_vmap(struct dma_buf *dmabuf, struct iosys_map *map)
1521 {
1522 	struct iosys_map ptr;
1523 	int ret;
1524 
1525 	iosys_map_clear(map);
1526 
1527 	if (WARN_ON(!dmabuf))
1528 		return -EINVAL;
1529 
1530 	dma_resv_assert_held(dmabuf->resv);
1531 
1532 	if (!dmabuf->ops->vmap)
1533 		return -EINVAL;
1534 
1535 	if (dmabuf->vmapping_counter) {
1536 		dmabuf->vmapping_counter++;
1537 		BUG_ON(iosys_map_is_null(&dmabuf->vmap_ptr));
1538 		*map = dmabuf->vmap_ptr;
1539 		return 0;
1540 	}
1541 
1542 	BUG_ON(iosys_map_is_set(&dmabuf->vmap_ptr));
1543 
1544 	ret = dmabuf->ops->vmap(dmabuf, &ptr);
1545 	if (WARN_ON_ONCE(ret))
1546 		return ret;
1547 
1548 	dmabuf->vmap_ptr = ptr;
1549 	dmabuf->vmapping_counter = 1;
1550 
1551 	*map = dmabuf->vmap_ptr;
1552 
1553 	return 0;
1554 }
1555 EXPORT_SYMBOL_NS_GPL(dma_buf_vmap, "DMA_BUF");
1556 
1557 /**
1558  * dma_buf_vmap_unlocked - Create virtual mapping for the buffer object into kernel
1559  * address space. Same restrictions as for vmap and friends apply.
1560  * @dmabuf:	[in]	buffer to vmap
1561  * @map:	[out]	returns the vmap pointer
1562  *
1563  * Unlocked version of dma_buf_vmap()
1564  *
1565  * Returns 0 on success, or a negative errno code otherwise.
1566  */
dma_buf_vmap_unlocked(struct dma_buf * dmabuf,struct iosys_map * map)1567 int dma_buf_vmap_unlocked(struct dma_buf *dmabuf, struct iosys_map *map)
1568 {
1569 	int ret;
1570 
1571 	iosys_map_clear(map);
1572 
1573 	if (WARN_ON(!dmabuf))
1574 		return -EINVAL;
1575 
1576 	dma_resv_lock(dmabuf->resv, NULL);
1577 	ret = dma_buf_vmap(dmabuf, map);
1578 	dma_resv_unlock(dmabuf->resv);
1579 
1580 	return ret;
1581 }
1582 EXPORT_SYMBOL_NS_GPL(dma_buf_vmap_unlocked, "DMA_BUF");
1583 
1584 /**
1585  * dma_buf_vunmap - Unmap a vmap obtained by dma_buf_vmap.
1586  * @dmabuf:	[in]	buffer to vunmap
1587  * @map:	[in]	vmap pointer to vunmap
1588  */
dma_buf_vunmap(struct dma_buf * dmabuf,struct iosys_map * map)1589 void dma_buf_vunmap(struct dma_buf *dmabuf, struct iosys_map *map)
1590 {
1591 	if (WARN_ON(!dmabuf))
1592 		return;
1593 
1594 	dma_resv_assert_held(dmabuf->resv);
1595 
1596 	BUG_ON(iosys_map_is_null(&dmabuf->vmap_ptr));
1597 	BUG_ON(dmabuf->vmapping_counter == 0);
1598 	BUG_ON(!iosys_map_is_equal(&dmabuf->vmap_ptr, map));
1599 
1600 	if (--dmabuf->vmapping_counter == 0) {
1601 		if (dmabuf->ops->vunmap)
1602 			dmabuf->ops->vunmap(dmabuf, map);
1603 		iosys_map_clear(&dmabuf->vmap_ptr);
1604 	}
1605 }
1606 EXPORT_SYMBOL_NS_GPL(dma_buf_vunmap, "DMA_BUF");
1607 
1608 /**
1609  * dma_buf_vunmap_unlocked - Unmap a vmap obtained by dma_buf_vmap.
1610  * @dmabuf:	[in]	buffer to vunmap
1611  * @map:	[in]	vmap pointer to vunmap
1612  */
dma_buf_vunmap_unlocked(struct dma_buf * dmabuf,struct iosys_map * map)1613 void dma_buf_vunmap_unlocked(struct dma_buf *dmabuf, struct iosys_map *map)
1614 {
1615 	if (WARN_ON(!dmabuf))
1616 		return;
1617 
1618 	dma_resv_lock(dmabuf->resv, NULL);
1619 	dma_buf_vunmap(dmabuf, map);
1620 	dma_resv_unlock(dmabuf->resv);
1621 }
1622 EXPORT_SYMBOL_NS_GPL(dma_buf_vunmap_unlocked, "DMA_BUF");
1623 
1624 #ifdef CONFIG_DEBUG_FS
dma_buf_debug_show(struct seq_file * s,void * unused)1625 static int dma_buf_debug_show(struct seq_file *s, void *unused)
1626 {
1627 	struct dma_buf *buf_obj;
1628 	struct dma_buf_attachment *attach_obj;
1629 	int count = 0, attach_count;
1630 	size_t size = 0;
1631 	int ret;
1632 
1633 	ret = mutex_lock_interruptible(&debugfs_list_mutex);
1634 
1635 	if (ret)
1636 		return ret;
1637 
1638 	seq_puts(s, "\nDma-buf Objects:\n");
1639 	seq_printf(s, "%-8s\t%-8s\t%-8s\t%-8s\texp_name\t%-8s\tname\n",
1640 		   "size", "flags", "mode", "count", "ino");
1641 
1642 	list_for_each_entry(buf_obj, &debugfs_list, list_node) {
1643 
1644 		ret = dma_resv_lock_interruptible(buf_obj->resv, NULL);
1645 		if (ret)
1646 			goto error_unlock;
1647 
1648 
1649 		spin_lock(&buf_obj->name_lock);
1650 		seq_printf(s, "%08zu\t%08x\t%08x\t%08ld\t%s\t%08lu\t%s\n",
1651 				buf_obj->size,
1652 				buf_obj->file->f_flags, buf_obj->file->f_mode,
1653 				file_count(buf_obj->file),
1654 				buf_obj->exp_name,
1655 				file_inode(buf_obj->file)->i_ino,
1656 				buf_obj->name ?: "<none>");
1657 		spin_unlock(&buf_obj->name_lock);
1658 
1659 		dma_resv_describe(buf_obj->resv, s);
1660 
1661 		seq_puts(s, "\tAttached Devices:\n");
1662 		attach_count = 0;
1663 
1664 		list_for_each_entry(attach_obj, &buf_obj->attachments, node) {
1665 			seq_printf(s, "\t%s\n", dev_name(attach_obj->dev));
1666 			attach_count++;
1667 		}
1668 		dma_resv_unlock(buf_obj->resv);
1669 
1670 		seq_printf(s, "Total %d devices attached\n\n",
1671 				attach_count);
1672 
1673 		count++;
1674 		size += buf_obj->size;
1675 	}
1676 
1677 	seq_printf(s, "\nTotal %d objects, %zu bytes\n", count, size);
1678 
1679 	mutex_unlock(&debugfs_list_mutex);
1680 	return 0;
1681 
1682 error_unlock:
1683 	mutex_unlock(&debugfs_list_mutex);
1684 	return ret;
1685 }
1686 
1687 DEFINE_SHOW_ATTRIBUTE(dma_buf_debug);
1688 
1689 static struct dentry *dma_buf_debugfs_dir;
1690 
dma_buf_init_debugfs(void)1691 static int dma_buf_init_debugfs(void)
1692 {
1693 	struct dentry *d;
1694 	int err = 0;
1695 
1696 	d = debugfs_create_dir("dma_buf", NULL);
1697 	if (IS_ERR(d))
1698 		return PTR_ERR(d);
1699 
1700 	dma_buf_debugfs_dir = d;
1701 
1702 	d = debugfs_create_file("bufinfo", 0444, dma_buf_debugfs_dir,
1703 				NULL, &dma_buf_debug_fops);
1704 	if (IS_ERR(d)) {
1705 		pr_debug("dma_buf: debugfs: failed to create node bufinfo\n");
1706 		debugfs_remove_recursive(dma_buf_debugfs_dir);
1707 		dma_buf_debugfs_dir = NULL;
1708 		err = PTR_ERR(d);
1709 	}
1710 
1711 	return err;
1712 }
1713 
dma_buf_uninit_debugfs(void)1714 static void dma_buf_uninit_debugfs(void)
1715 {
1716 	debugfs_remove_recursive(dma_buf_debugfs_dir);
1717 }
1718 #else
dma_buf_init_debugfs(void)1719 static inline int dma_buf_init_debugfs(void)
1720 {
1721 	return 0;
1722 }
dma_buf_uninit_debugfs(void)1723 static inline void dma_buf_uninit_debugfs(void)
1724 {
1725 }
1726 #endif
1727 
dma_buf_init(void)1728 static int __init dma_buf_init(void)
1729 {
1730 	int ret;
1731 
1732 	ret = dma_buf_init_sysfs_statistics();
1733 	if (ret)
1734 		return ret;
1735 
1736 	dma_buf_mnt = kern_mount(&dma_buf_fs_type);
1737 	if (IS_ERR(dma_buf_mnt))
1738 		return PTR_ERR(dma_buf_mnt);
1739 
1740 	dma_buf_init_debugfs();
1741 	return 0;
1742 }
1743 subsys_initcall(dma_buf_init);
1744 
dma_buf_deinit(void)1745 static void __exit dma_buf_deinit(void)
1746 {
1747 	dma_buf_uninit_debugfs();
1748 	kern_unmount(dma_buf_mnt);
1749 	dma_buf_uninit_sysfs_statistics();
1750 }
1751 __exitcall(dma_buf_deinit);
1752