xref: /linux/drivers/dma-buf/dma-buf.c (revision faabed295cccc2aba2b67f2e7b309f2892d55004)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Framework for buffer objects that can be shared across devices/subsystems.
4  *
5  * Copyright(C) 2011 Linaro Limited. All rights reserved.
6  * Author: Sumit Semwal <sumit.semwal@ti.com>
7  *
8  * Many thanks to linaro-mm-sig list, and specially
9  * Arnd Bergmann <arnd@arndb.de>, Rob Clark <rob@ti.com> and
10  * Daniel Vetter <daniel@ffwll.ch> for their support in creation and
11  * refining of this idea.
12  */
13 
14 #include <linux/fs.h>
15 #include <linux/slab.h>
16 #include <linux/dma-buf.h>
17 #include <linux/dma-fence.h>
18 #include <linux/anon_inodes.h>
19 #include <linux/export.h>
20 #include <linux/debugfs.h>
21 #include <linux/module.h>
22 #include <linux/seq_file.h>
23 #include <linux/poll.h>
24 #include <linux/dma-resv.h>
25 #include <linux/mm.h>
26 #include <linux/mount.h>
27 #include <linux/pseudo_fs.h>
28 
29 #include <uapi/linux/dma-buf.h>
30 #include <uapi/linux/magic.h>
31 
32 static inline int is_dma_buf_file(struct file *);
33 
34 struct dma_buf_list {
35 	struct list_head head;
36 	struct mutex lock;
37 };
38 
39 static struct dma_buf_list db_list;
40 
41 static char *dmabuffs_dname(struct dentry *dentry, char *buffer, int buflen)
42 {
43 	struct dma_buf *dmabuf;
44 	char name[DMA_BUF_NAME_LEN];
45 	size_t ret = 0;
46 
47 	dmabuf = dentry->d_fsdata;
48 	dma_resv_lock(dmabuf->resv, NULL);
49 	if (dmabuf->name)
50 		ret = strlcpy(name, dmabuf->name, DMA_BUF_NAME_LEN);
51 	dma_resv_unlock(dmabuf->resv);
52 
53 	return dynamic_dname(dentry, buffer, buflen, "/%s:%s",
54 			     dentry->d_name.name, ret > 0 ? name : "");
55 }
56 
57 static void dma_buf_release(struct dentry *dentry)
58 {
59 	struct dma_buf *dmabuf;
60 
61 	dmabuf = dentry->d_fsdata;
62 
63 	BUG_ON(dmabuf->vmapping_counter);
64 
65 	/*
66 	 * Any fences that a dma-buf poll can wait on should be signaled
67 	 * before releasing dma-buf. This is the responsibility of each
68 	 * driver that uses the reservation objects.
69 	 *
70 	 * If you hit this BUG() it means someone dropped their ref to the
71 	 * dma-buf while still having pending operation to the buffer.
72 	 */
73 	BUG_ON(dmabuf->cb_shared.active || dmabuf->cb_excl.active);
74 
75 	dmabuf->ops->release(dmabuf);
76 
77 	mutex_lock(&db_list.lock);
78 	list_del(&dmabuf->list_node);
79 	mutex_unlock(&db_list.lock);
80 
81 	if (dmabuf->resv == (struct dma_resv *)&dmabuf[1])
82 		dma_resv_fini(dmabuf->resv);
83 
84 	module_put(dmabuf->owner);
85 	kfree(dmabuf->name);
86 	kfree(dmabuf);
87 }
88 
89 static const struct dentry_operations dma_buf_dentry_ops = {
90 	.d_dname = dmabuffs_dname,
91 	.d_release = dma_buf_release,
92 };
93 
94 static struct vfsmount *dma_buf_mnt;
95 
96 static int dma_buf_fs_init_context(struct fs_context *fc)
97 {
98 	struct pseudo_fs_context *ctx;
99 
100 	ctx = init_pseudo(fc, DMA_BUF_MAGIC);
101 	if (!ctx)
102 		return -ENOMEM;
103 	ctx->dops = &dma_buf_dentry_ops;
104 	return 0;
105 }
106 
107 static struct file_system_type dma_buf_fs_type = {
108 	.name = "dmabuf",
109 	.init_fs_context = dma_buf_fs_init_context,
110 	.kill_sb = kill_anon_super,
111 };
112 
113 static int dma_buf_mmap_internal(struct file *file, struct vm_area_struct *vma)
114 {
115 	struct dma_buf *dmabuf;
116 
117 	if (!is_dma_buf_file(file))
118 		return -EINVAL;
119 
120 	dmabuf = file->private_data;
121 
122 	/* check if buffer supports mmap */
123 	if (!dmabuf->ops->mmap)
124 		return -EINVAL;
125 
126 	/* check for overflowing the buffer's size */
127 	if (vma->vm_pgoff + vma_pages(vma) >
128 	    dmabuf->size >> PAGE_SHIFT)
129 		return -EINVAL;
130 
131 	return dmabuf->ops->mmap(dmabuf, vma);
132 }
133 
134 static loff_t dma_buf_llseek(struct file *file, loff_t offset, int whence)
135 {
136 	struct dma_buf *dmabuf;
137 	loff_t base;
138 
139 	if (!is_dma_buf_file(file))
140 		return -EBADF;
141 
142 	dmabuf = file->private_data;
143 
144 	/* only support discovering the end of the buffer,
145 	   but also allow SEEK_SET to maintain the idiomatic
146 	   SEEK_END(0), SEEK_CUR(0) pattern */
147 	if (whence == SEEK_END)
148 		base = dmabuf->size;
149 	else if (whence == SEEK_SET)
150 		base = 0;
151 	else
152 		return -EINVAL;
153 
154 	if (offset != 0)
155 		return -EINVAL;
156 
157 	return base + offset;
158 }
159 
160 /**
161  * DOC: fence polling
162  *
163  * To support cross-device and cross-driver synchronization of buffer access
164  * implicit fences (represented internally in the kernel with &struct fence) can
165  * be attached to a &dma_buf. The glue for that and a few related things are
166  * provided in the &dma_resv structure.
167  *
168  * Userspace can query the state of these implicitly tracked fences using poll()
169  * and related system calls:
170  *
171  * - Checking for EPOLLIN, i.e. read access, can be use to query the state of the
172  *   most recent write or exclusive fence.
173  *
174  * - Checking for EPOLLOUT, i.e. write access, can be used to query the state of
175  *   all attached fences, shared and exclusive ones.
176  *
177  * Note that this only signals the completion of the respective fences, i.e. the
178  * DMA transfers are complete. Cache flushing and any other necessary
179  * preparations before CPU access can begin still need to happen.
180  */
181 
182 static void dma_buf_poll_cb(struct dma_fence *fence, struct dma_fence_cb *cb)
183 {
184 	struct dma_buf_poll_cb_t *dcb = (struct dma_buf_poll_cb_t *)cb;
185 	unsigned long flags;
186 
187 	spin_lock_irqsave(&dcb->poll->lock, flags);
188 	wake_up_locked_poll(dcb->poll, dcb->active);
189 	dcb->active = 0;
190 	spin_unlock_irqrestore(&dcb->poll->lock, flags);
191 }
192 
193 static __poll_t dma_buf_poll(struct file *file, poll_table *poll)
194 {
195 	struct dma_buf *dmabuf;
196 	struct dma_resv *resv;
197 	struct dma_resv_list *fobj;
198 	struct dma_fence *fence_excl;
199 	__poll_t events;
200 	unsigned shared_count, seq;
201 
202 	dmabuf = file->private_data;
203 	if (!dmabuf || !dmabuf->resv)
204 		return EPOLLERR;
205 
206 	resv = dmabuf->resv;
207 
208 	poll_wait(file, &dmabuf->poll, poll);
209 
210 	events = poll_requested_events(poll) & (EPOLLIN | EPOLLOUT);
211 	if (!events)
212 		return 0;
213 
214 retry:
215 	seq = read_seqcount_begin(&resv->seq);
216 	rcu_read_lock();
217 
218 	fobj = rcu_dereference(resv->fence);
219 	if (fobj)
220 		shared_count = fobj->shared_count;
221 	else
222 		shared_count = 0;
223 	fence_excl = rcu_dereference(resv->fence_excl);
224 	if (read_seqcount_retry(&resv->seq, seq)) {
225 		rcu_read_unlock();
226 		goto retry;
227 	}
228 
229 	if (fence_excl && (!(events & EPOLLOUT) || shared_count == 0)) {
230 		struct dma_buf_poll_cb_t *dcb = &dmabuf->cb_excl;
231 		__poll_t pevents = EPOLLIN;
232 
233 		if (shared_count == 0)
234 			pevents |= EPOLLOUT;
235 
236 		spin_lock_irq(&dmabuf->poll.lock);
237 		if (dcb->active) {
238 			dcb->active |= pevents;
239 			events &= ~pevents;
240 		} else
241 			dcb->active = pevents;
242 		spin_unlock_irq(&dmabuf->poll.lock);
243 
244 		if (events & pevents) {
245 			if (!dma_fence_get_rcu(fence_excl)) {
246 				/* force a recheck */
247 				events &= ~pevents;
248 				dma_buf_poll_cb(NULL, &dcb->cb);
249 			} else if (!dma_fence_add_callback(fence_excl, &dcb->cb,
250 							   dma_buf_poll_cb)) {
251 				events &= ~pevents;
252 				dma_fence_put(fence_excl);
253 			} else {
254 				/*
255 				 * No callback queued, wake up any additional
256 				 * waiters.
257 				 */
258 				dma_fence_put(fence_excl);
259 				dma_buf_poll_cb(NULL, &dcb->cb);
260 			}
261 		}
262 	}
263 
264 	if ((events & EPOLLOUT) && shared_count > 0) {
265 		struct dma_buf_poll_cb_t *dcb = &dmabuf->cb_shared;
266 		int i;
267 
268 		/* Only queue a new callback if no event has fired yet */
269 		spin_lock_irq(&dmabuf->poll.lock);
270 		if (dcb->active)
271 			events &= ~EPOLLOUT;
272 		else
273 			dcb->active = EPOLLOUT;
274 		spin_unlock_irq(&dmabuf->poll.lock);
275 
276 		if (!(events & EPOLLOUT))
277 			goto out;
278 
279 		for (i = 0; i < shared_count; ++i) {
280 			struct dma_fence *fence = rcu_dereference(fobj->shared[i]);
281 
282 			if (!dma_fence_get_rcu(fence)) {
283 				/*
284 				 * fence refcount dropped to zero, this means
285 				 * that fobj has been freed
286 				 *
287 				 * call dma_buf_poll_cb and force a recheck!
288 				 */
289 				events &= ~EPOLLOUT;
290 				dma_buf_poll_cb(NULL, &dcb->cb);
291 				break;
292 			}
293 			if (!dma_fence_add_callback(fence, &dcb->cb,
294 						    dma_buf_poll_cb)) {
295 				dma_fence_put(fence);
296 				events &= ~EPOLLOUT;
297 				break;
298 			}
299 			dma_fence_put(fence);
300 		}
301 
302 		/* No callback queued, wake up any additional waiters. */
303 		if (i == shared_count)
304 			dma_buf_poll_cb(NULL, &dcb->cb);
305 	}
306 
307 out:
308 	rcu_read_unlock();
309 	return events;
310 }
311 
312 /**
313  * dma_buf_set_name - Set a name to a specific dma_buf to track the usage.
314  * The name of the dma-buf buffer can only be set when the dma-buf is not
315  * attached to any devices. It could theoritically support changing the
316  * name of the dma-buf if the same piece of memory is used for multiple
317  * purpose between different devices.
318  *
319  * @dmabuf [in]     dmabuf buffer that will be renamed.
320  * @buf:   [in]     A piece of userspace memory that contains the name of
321  *                  the dma-buf.
322  *
323  * Returns 0 on success. If the dma-buf buffer is already attached to
324  * devices, return -EBUSY.
325  *
326  */
327 static long dma_buf_set_name(struct dma_buf *dmabuf, const char __user *buf)
328 {
329 	char *name = strndup_user(buf, DMA_BUF_NAME_LEN);
330 	long ret = 0;
331 
332 	if (IS_ERR(name))
333 		return PTR_ERR(name);
334 
335 	dma_resv_lock(dmabuf->resv, NULL);
336 	if (!list_empty(&dmabuf->attachments)) {
337 		ret = -EBUSY;
338 		kfree(name);
339 		goto out_unlock;
340 	}
341 	kfree(dmabuf->name);
342 	dmabuf->name = name;
343 
344 out_unlock:
345 	dma_resv_unlock(dmabuf->resv);
346 	return ret;
347 }
348 
349 static long dma_buf_ioctl(struct file *file,
350 			  unsigned int cmd, unsigned long arg)
351 {
352 	struct dma_buf *dmabuf;
353 	struct dma_buf_sync sync;
354 	enum dma_data_direction direction;
355 	int ret;
356 
357 	dmabuf = file->private_data;
358 
359 	switch (cmd) {
360 	case DMA_BUF_IOCTL_SYNC:
361 		if (copy_from_user(&sync, (void __user *) arg, sizeof(sync)))
362 			return -EFAULT;
363 
364 		if (sync.flags & ~DMA_BUF_SYNC_VALID_FLAGS_MASK)
365 			return -EINVAL;
366 
367 		switch (sync.flags & DMA_BUF_SYNC_RW) {
368 		case DMA_BUF_SYNC_READ:
369 			direction = DMA_FROM_DEVICE;
370 			break;
371 		case DMA_BUF_SYNC_WRITE:
372 			direction = DMA_TO_DEVICE;
373 			break;
374 		case DMA_BUF_SYNC_RW:
375 			direction = DMA_BIDIRECTIONAL;
376 			break;
377 		default:
378 			return -EINVAL;
379 		}
380 
381 		if (sync.flags & DMA_BUF_SYNC_END)
382 			ret = dma_buf_end_cpu_access(dmabuf, direction);
383 		else
384 			ret = dma_buf_begin_cpu_access(dmabuf, direction);
385 
386 		return ret;
387 
388 	case DMA_BUF_SET_NAME_A:
389 	case DMA_BUF_SET_NAME_B:
390 		return dma_buf_set_name(dmabuf, (const char __user *)arg);
391 
392 	default:
393 		return -ENOTTY;
394 	}
395 }
396 
397 static void dma_buf_show_fdinfo(struct seq_file *m, struct file *file)
398 {
399 	struct dma_buf *dmabuf = file->private_data;
400 
401 	seq_printf(m, "size:\t%zu\n", dmabuf->size);
402 	/* Don't count the temporary reference taken inside procfs seq_show */
403 	seq_printf(m, "count:\t%ld\n", file_count(dmabuf->file) - 1);
404 	seq_printf(m, "exp_name:\t%s\n", dmabuf->exp_name);
405 	dma_resv_lock(dmabuf->resv, NULL);
406 	if (dmabuf->name)
407 		seq_printf(m, "name:\t%s\n", dmabuf->name);
408 	dma_resv_unlock(dmabuf->resv);
409 }
410 
411 static const struct file_operations dma_buf_fops = {
412 	.mmap		= dma_buf_mmap_internal,
413 	.llseek		= dma_buf_llseek,
414 	.poll		= dma_buf_poll,
415 	.unlocked_ioctl	= dma_buf_ioctl,
416 	.compat_ioctl	= compat_ptr_ioctl,
417 	.show_fdinfo	= dma_buf_show_fdinfo,
418 };
419 
420 /*
421  * is_dma_buf_file - Check if struct file* is associated with dma_buf
422  */
423 static inline int is_dma_buf_file(struct file *file)
424 {
425 	return file->f_op == &dma_buf_fops;
426 }
427 
428 static struct file *dma_buf_getfile(struct dma_buf *dmabuf, int flags)
429 {
430 	struct file *file;
431 	struct inode *inode = alloc_anon_inode(dma_buf_mnt->mnt_sb);
432 
433 	if (IS_ERR(inode))
434 		return ERR_CAST(inode);
435 
436 	inode->i_size = dmabuf->size;
437 	inode_set_bytes(inode, dmabuf->size);
438 
439 	file = alloc_file_pseudo(inode, dma_buf_mnt, "dmabuf",
440 				 flags, &dma_buf_fops);
441 	if (IS_ERR(file))
442 		goto err_alloc_file;
443 	file->f_flags = flags & (O_ACCMODE | O_NONBLOCK);
444 	file->private_data = dmabuf;
445 	file->f_path.dentry->d_fsdata = dmabuf;
446 
447 	return file;
448 
449 err_alloc_file:
450 	iput(inode);
451 	return file;
452 }
453 
454 /**
455  * DOC: dma buf device access
456  *
457  * For device DMA access to a shared DMA buffer the usual sequence of operations
458  * is fairly simple:
459  *
460  * 1. The exporter defines his exporter instance using
461  *    DEFINE_DMA_BUF_EXPORT_INFO() and calls dma_buf_export() to wrap a private
462  *    buffer object into a &dma_buf. It then exports that &dma_buf to userspace
463  *    as a file descriptor by calling dma_buf_fd().
464  *
465  * 2. Userspace passes this file-descriptors to all drivers it wants this buffer
466  *    to share with: First the filedescriptor is converted to a &dma_buf using
467  *    dma_buf_get(). Then the buffer is attached to the device using
468  *    dma_buf_attach().
469  *
470  *    Up to this stage the exporter is still free to migrate or reallocate the
471  *    backing storage.
472  *
473  * 3. Once the buffer is attached to all devices userspace can initiate DMA
474  *    access to the shared buffer. In the kernel this is done by calling
475  *    dma_buf_map_attachment() and dma_buf_unmap_attachment().
476  *
477  * 4. Once a driver is done with a shared buffer it needs to call
478  *    dma_buf_detach() (after cleaning up any mappings) and then release the
479  *    reference acquired with dma_buf_get by calling dma_buf_put().
480  *
481  * For the detailed semantics exporters are expected to implement see
482  * &dma_buf_ops.
483  */
484 
485 /**
486  * dma_buf_export - Creates a new dma_buf, and associates an anon file
487  * with this buffer, so it can be exported.
488  * Also connect the allocator specific data and ops to the buffer.
489  * Additionally, provide a name string for exporter; useful in debugging.
490  *
491  * @exp_info:	[in]	holds all the export related information provided
492  *			by the exporter. see &struct dma_buf_export_info
493  *			for further details.
494  *
495  * Returns, on success, a newly created dma_buf object, which wraps the
496  * supplied private data and operations for dma_buf_ops. On either missing
497  * ops, or error in allocating struct dma_buf, will return negative error.
498  *
499  * For most cases the easiest way to create @exp_info is through the
500  * %DEFINE_DMA_BUF_EXPORT_INFO macro.
501  */
502 struct dma_buf *dma_buf_export(const struct dma_buf_export_info *exp_info)
503 {
504 	struct dma_buf *dmabuf;
505 	struct dma_resv *resv = exp_info->resv;
506 	struct file *file;
507 	size_t alloc_size = sizeof(struct dma_buf);
508 	int ret;
509 
510 	if (!exp_info->resv)
511 		alloc_size += sizeof(struct dma_resv);
512 	else
513 		/* prevent &dma_buf[1] == dma_buf->resv */
514 		alloc_size += 1;
515 
516 	if (WARN_ON(!exp_info->priv
517 			  || !exp_info->ops
518 			  || !exp_info->ops->map_dma_buf
519 			  || !exp_info->ops->unmap_dma_buf
520 			  || !exp_info->ops->release)) {
521 		return ERR_PTR(-EINVAL);
522 	}
523 
524 	if (WARN_ON(exp_info->ops->cache_sgt_mapping &&
525 		    (exp_info->ops->pin || exp_info->ops->unpin)))
526 		return ERR_PTR(-EINVAL);
527 
528 	if (WARN_ON(!exp_info->ops->pin != !exp_info->ops->unpin))
529 		return ERR_PTR(-EINVAL);
530 
531 	if (!try_module_get(exp_info->owner))
532 		return ERR_PTR(-ENOENT);
533 
534 	dmabuf = kzalloc(alloc_size, GFP_KERNEL);
535 	if (!dmabuf) {
536 		ret = -ENOMEM;
537 		goto err_module;
538 	}
539 
540 	dmabuf->priv = exp_info->priv;
541 	dmabuf->ops = exp_info->ops;
542 	dmabuf->size = exp_info->size;
543 	dmabuf->exp_name = exp_info->exp_name;
544 	dmabuf->owner = exp_info->owner;
545 	init_waitqueue_head(&dmabuf->poll);
546 	dmabuf->cb_excl.poll = dmabuf->cb_shared.poll = &dmabuf->poll;
547 	dmabuf->cb_excl.active = dmabuf->cb_shared.active = 0;
548 
549 	if (!resv) {
550 		resv = (struct dma_resv *)&dmabuf[1];
551 		dma_resv_init(resv);
552 	}
553 	dmabuf->resv = resv;
554 
555 	file = dma_buf_getfile(dmabuf, exp_info->flags);
556 	if (IS_ERR(file)) {
557 		ret = PTR_ERR(file);
558 		goto err_dmabuf;
559 	}
560 
561 	file->f_mode |= FMODE_LSEEK;
562 	dmabuf->file = file;
563 
564 	mutex_init(&dmabuf->lock);
565 	INIT_LIST_HEAD(&dmabuf->attachments);
566 
567 	mutex_lock(&db_list.lock);
568 	list_add(&dmabuf->list_node, &db_list.head);
569 	mutex_unlock(&db_list.lock);
570 
571 	return dmabuf;
572 
573 err_dmabuf:
574 	kfree(dmabuf);
575 err_module:
576 	module_put(exp_info->owner);
577 	return ERR_PTR(ret);
578 }
579 EXPORT_SYMBOL_GPL(dma_buf_export);
580 
581 /**
582  * dma_buf_fd - returns a file descriptor for the given dma_buf
583  * @dmabuf:	[in]	pointer to dma_buf for which fd is required.
584  * @flags:      [in]    flags to give to fd
585  *
586  * On success, returns an associated 'fd'. Else, returns error.
587  */
588 int dma_buf_fd(struct dma_buf *dmabuf, int flags)
589 {
590 	int fd;
591 
592 	if (!dmabuf || !dmabuf->file)
593 		return -EINVAL;
594 
595 	fd = get_unused_fd_flags(flags);
596 	if (fd < 0)
597 		return fd;
598 
599 	fd_install(fd, dmabuf->file);
600 
601 	return fd;
602 }
603 EXPORT_SYMBOL_GPL(dma_buf_fd);
604 
605 /**
606  * dma_buf_get - returns the dma_buf structure related to an fd
607  * @fd:	[in]	fd associated with the dma_buf to be returned
608  *
609  * On success, returns the dma_buf structure associated with an fd; uses
610  * file's refcounting done by fget to increase refcount. returns ERR_PTR
611  * otherwise.
612  */
613 struct dma_buf *dma_buf_get(int fd)
614 {
615 	struct file *file;
616 
617 	file = fget(fd);
618 
619 	if (!file)
620 		return ERR_PTR(-EBADF);
621 
622 	if (!is_dma_buf_file(file)) {
623 		fput(file);
624 		return ERR_PTR(-EINVAL);
625 	}
626 
627 	return file->private_data;
628 }
629 EXPORT_SYMBOL_GPL(dma_buf_get);
630 
631 /**
632  * dma_buf_put - decreases refcount of the buffer
633  * @dmabuf:	[in]	buffer to reduce refcount of
634  *
635  * Uses file's refcounting done implicitly by fput().
636  *
637  * If, as a result of this call, the refcount becomes 0, the 'release' file
638  * operation related to this fd is called. It calls &dma_buf_ops.release vfunc
639  * in turn, and frees the memory allocated for dmabuf when exported.
640  */
641 void dma_buf_put(struct dma_buf *dmabuf)
642 {
643 	if (WARN_ON(!dmabuf || !dmabuf->file))
644 		return;
645 
646 	fput(dmabuf->file);
647 }
648 EXPORT_SYMBOL_GPL(dma_buf_put);
649 
650 /**
651  * dma_buf_dynamic_attach - Add the device to dma_buf's attachments list; optionally,
652  * calls attach() of dma_buf_ops to allow device-specific attach functionality
653  * @dmabuf:		[in]	buffer to attach device to.
654  * @dev:		[in]	device to be attached.
655  * @importer_ops:	[in]	importer operations for the attachment
656  * @importer_priv:	[in]	importer private pointer for the attachment
657  *
658  * Returns struct dma_buf_attachment pointer for this attachment. Attachments
659  * must be cleaned up by calling dma_buf_detach().
660  *
661  * Returns:
662  *
663  * A pointer to newly created &dma_buf_attachment on success, or a negative
664  * error code wrapped into a pointer on failure.
665  *
666  * Note that this can fail if the backing storage of @dmabuf is in a place not
667  * accessible to @dev, and cannot be moved to a more suitable place. This is
668  * indicated with the error code -EBUSY.
669  */
670 struct dma_buf_attachment *
671 dma_buf_dynamic_attach(struct dma_buf *dmabuf, struct device *dev,
672 		       const struct dma_buf_attach_ops *importer_ops,
673 		       void *importer_priv)
674 {
675 	struct dma_buf_attachment *attach;
676 	int ret;
677 
678 	if (WARN_ON(!dmabuf || !dev))
679 		return ERR_PTR(-EINVAL);
680 
681 	if (WARN_ON(importer_ops && !importer_ops->move_notify))
682 		return ERR_PTR(-EINVAL);
683 
684 	attach = kzalloc(sizeof(*attach), GFP_KERNEL);
685 	if (!attach)
686 		return ERR_PTR(-ENOMEM);
687 
688 	attach->dev = dev;
689 	attach->dmabuf = dmabuf;
690 	if (importer_ops)
691 		attach->peer2peer = importer_ops->allow_peer2peer;
692 	attach->importer_ops = importer_ops;
693 	attach->importer_priv = importer_priv;
694 
695 	if (dmabuf->ops->attach) {
696 		ret = dmabuf->ops->attach(dmabuf, attach);
697 		if (ret)
698 			goto err_attach;
699 	}
700 	dma_resv_lock(dmabuf->resv, NULL);
701 	list_add(&attach->node, &dmabuf->attachments);
702 	dma_resv_unlock(dmabuf->resv);
703 
704 	/* When either the importer or the exporter can't handle dynamic
705 	 * mappings we cache the mapping here to avoid issues with the
706 	 * reservation object lock.
707 	 */
708 	if (dma_buf_attachment_is_dynamic(attach) !=
709 	    dma_buf_is_dynamic(dmabuf)) {
710 		struct sg_table *sgt;
711 
712 		if (dma_buf_is_dynamic(attach->dmabuf)) {
713 			dma_resv_lock(attach->dmabuf->resv, NULL);
714 			ret = dma_buf_pin(attach);
715 			if (ret)
716 				goto err_unlock;
717 		}
718 
719 		sgt = dmabuf->ops->map_dma_buf(attach, DMA_BIDIRECTIONAL);
720 		if (!sgt)
721 			sgt = ERR_PTR(-ENOMEM);
722 		if (IS_ERR(sgt)) {
723 			ret = PTR_ERR(sgt);
724 			goto err_unpin;
725 		}
726 		if (dma_buf_is_dynamic(attach->dmabuf))
727 			dma_resv_unlock(attach->dmabuf->resv);
728 		attach->sgt = sgt;
729 		attach->dir = DMA_BIDIRECTIONAL;
730 	}
731 
732 	return attach;
733 
734 err_attach:
735 	kfree(attach);
736 	return ERR_PTR(ret);
737 
738 err_unpin:
739 	if (dma_buf_is_dynamic(attach->dmabuf))
740 		dma_buf_unpin(attach);
741 
742 err_unlock:
743 	if (dma_buf_is_dynamic(attach->dmabuf))
744 		dma_resv_unlock(attach->dmabuf->resv);
745 
746 	dma_buf_detach(dmabuf, attach);
747 	return ERR_PTR(ret);
748 }
749 EXPORT_SYMBOL_GPL(dma_buf_dynamic_attach);
750 
751 /**
752  * dma_buf_attach - Wrapper for dma_buf_dynamic_attach
753  * @dmabuf:	[in]	buffer to attach device to.
754  * @dev:	[in]	device to be attached.
755  *
756  * Wrapper to call dma_buf_dynamic_attach() for drivers which still use a static
757  * mapping.
758  */
759 struct dma_buf_attachment *dma_buf_attach(struct dma_buf *dmabuf,
760 					  struct device *dev)
761 {
762 	return dma_buf_dynamic_attach(dmabuf, dev, NULL, NULL);
763 }
764 EXPORT_SYMBOL_GPL(dma_buf_attach);
765 
766 /**
767  * dma_buf_detach - Remove the given attachment from dmabuf's attachments list;
768  * optionally calls detach() of dma_buf_ops for device-specific detach
769  * @dmabuf:	[in]	buffer to detach from.
770  * @attach:	[in]	attachment to be detached; is free'd after this call.
771  *
772  * Clean up a device attachment obtained by calling dma_buf_attach().
773  */
774 void dma_buf_detach(struct dma_buf *dmabuf, struct dma_buf_attachment *attach)
775 {
776 	if (WARN_ON(!dmabuf || !attach))
777 		return;
778 
779 	if (attach->sgt) {
780 		if (dma_buf_is_dynamic(attach->dmabuf))
781 			dma_resv_lock(attach->dmabuf->resv, NULL);
782 
783 		dmabuf->ops->unmap_dma_buf(attach, attach->sgt, attach->dir);
784 
785 		if (dma_buf_is_dynamic(attach->dmabuf)) {
786 			dma_buf_unpin(attach);
787 			dma_resv_unlock(attach->dmabuf->resv);
788 		}
789 	}
790 
791 	dma_resv_lock(dmabuf->resv, NULL);
792 	list_del(&attach->node);
793 	dma_resv_unlock(dmabuf->resv);
794 	if (dmabuf->ops->detach)
795 		dmabuf->ops->detach(dmabuf, attach);
796 
797 	kfree(attach);
798 }
799 EXPORT_SYMBOL_GPL(dma_buf_detach);
800 
801 /**
802  * dma_buf_pin - Lock down the DMA-buf
803  *
804  * @attach:	[in]	attachment which should be pinned
805  *
806  * Returns:
807  * 0 on success, negative error code on failure.
808  */
809 int dma_buf_pin(struct dma_buf_attachment *attach)
810 {
811 	struct dma_buf *dmabuf = attach->dmabuf;
812 	int ret = 0;
813 
814 	dma_resv_assert_held(dmabuf->resv);
815 
816 	if (dmabuf->ops->pin)
817 		ret = dmabuf->ops->pin(attach);
818 
819 	return ret;
820 }
821 EXPORT_SYMBOL_GPL(dma_buf_pin);
822 
823 /**
824  * dma_buf_unpin - Remove lock from DMA-buf
825  *
826  * @attach:	[in]	attachment which should be unpinned
827  */
828 void dma_buf_unpin(struct dma_buf_attachment *attach)
829 {
830 	struct dma_buf *dmabuf = attach->dmabuf;
831 
832 	dma_resv_assert_held(dmabuf->resv);
833 
834 	if (dmabuf->ops->unpin)
835 		dmabuf->ops->unpin(attach);
836 }
837 EXPORT_SYMBOL_GPL(dma_buf_unpin);
838 
839 /**
840  * dma_buf_map_attachment - Returns the scatterlist table of the attachment;
841  * mapped into _device_ address space. Is a wrapper for map_dma_buf() of the
842  * dma_buf_ops.
843  * @attach:	[in]	attachment whose scatterlist is to be returned
844  * @direction:	[in]	direction of DMA transfer
845  *
846  * Returns sg_table containing the scatterlist to be returned; returns ERR_PTR
847  * on error. May return -EINTR if it is interrupted by a signal.
848  *
849  * A mapping must be unmapped by using dma_buf_unmap_attachment(). Note that
850  * the underlying backing storage is pinned for as long as a mapping exists,
851  * therefore users/importers should not hold onto a mapping for undue amounts of
852  * time.
853  */
854 struct sg_table *dma_buf_map_attachment(struct dma_buf_attachment *attach,
855 					enum dma_data_direction direction)
856 {
857 	struct sg_table *sg_table;
858 	int r;
859 
860 	might_sleep();
861 
862 	if (WARN_ON(!attach || !attach->dmabuf))
863 		return ERR_PTR(-EINVAL);
864 
865 	if (dma_buf_attachment_is_dynamic(attach))
866 		dma_resv_assert_held(attach->dmabuf->resv);
867 
868 	if (attach->sgt) {
869 		/*
870 		 * Two mappings with different directions for the same
871 		 * attachment are not allowed.
872 		 */
873 		if (attach->dir != direction &&
874 		    attach->dir != DMA_BIDIRECTIONAL)
875 			return ERR_PTR(-EBUSY);
876 
877 		return attach->sgt;
878 	}
879 
880 	if (dma_buf_is_dynamic(attach->dmabuf)) {
881 		dma_resv_assert_held(attach->dmabuf->resv);
882 		if (!IS_ENABLED(CONFIG_DMABUF_MOVE_NOTIFY)) {
883 			r = dma_buf_pin(attach);
884 			if (r)
885 				return ERR_PTR(r);
886 		}
887 	}
888 
889 	sg_table = attach->dmabuf->ops->map_dma_buf(attach, direction);
890 	if (!sg_table)
891 		sg_table = ERR_PTR(-ENOMEM);
892 
893 	if (IS_ERR(sg_table) && dma_buf_is_dynamic(attach->dmabuf) &&
894 	     !IS_ENABLED(CONFIG_DMABUF_MOVE_NOTIFY))
895 		dma_buf_unpin(attach);
896 
897 	if (!IS_ERR(sg_table) && attach->dmabuf->ops->cache_sgt_mapping) {
898 		attach->sgt = sg_table;
899 		attach->dir = direction;
900 	}
901 
902 	return sg_table;
903 }
904 EXPORT_SYMBOL_GPL(dma_buf_map_attachment);
905 
906 /**
907  * dma_buf_unmap_attachment - unmaps and decreases usecount of the buffer;might
908  * deallocate the scatterlist associated. Is a wrapper for unmap_dma_buf() of
909  * dma_buf_ops.
910  * @attach:	[in]	attachment to unmap buffer from
911  * @sg_table:	[in]	scatterlist info of the buffer to unmap
912  * @direction:  [in]    direction of DMA transfer
913  *
914  * This unmaps a DMA mapping for @attached obtained by dma_buf_map_attachment().
915  */
916 void dma_buf_unmap_attachment(struct dma_buf_attachment *attach,
917 				struct sg_table *sg_table,
918 				enum dma_data_direction direction)
919 {
920 	might_sleep();
921 
922 	if (WARN_ON(!attach || !attach->dmabuf || !sg_table))
923 		return;
924 
925 	if (dma_buf_attachment_is_dynamic(attach))
926 		dma_resv_assert_held(attach->dmabuf->resv);
927 
928 	if (attach->sgt == sg_table)
929 		return;
930 
931 	if (dma_buf_is_dynamic(attach->dmabuf))
932 		dma_resv_assert_held(attach->dmabuf->resv);
933 
934 	attach->dmabuf->ops->unmap_dma_buf(attach, sg_table, direction);
935 
936 	if (dma_buf_is_dynamic(attach->dmabuf) &&
937 	    !IS_ENABLED(CONFIG_DMABUF_MOVE_NOTIFY))
938 		dma_buf_unpin(attach);
939 }
940 EXPORT_SYMBOL_GPL(dma_buf_unmap_attachment);
941 
942 /**
943  * dma_buf_move_notify - notify attachments that DMA-buf is moving
944  *
945  * @dmabuf:	[in]	buffer which is moving
946  *
947  * Informs all attachmenst that they need to destroy and recreated all their
948  * mappings.
949  */
950 void dma_buf_move_notify(struct dma_buf *dmabuf)
951 {
952 	struct dma_buf_attachment *attach;
953 
954 	dma_resv_assert_held(dmabuf->resv);
955 
956 	list_for_each_entry(attach, &dmabuf->attachments, node)
957 		if (attach->importer_ops)
958 			attach->importer_ops->move_notify(attach);
959 }
960 EXPORT_SYMBOL_GPL(dma_buf_move_notify);
961 
962 /**
963  * DOC: cpu access
964  *
965  * There are mutliple reasons for supporting CPU access to a dma buffer object:
966  *
967  * - Fallback operations in the kernel, for example when a device is connected
968  *   over USB and the kernel needs to shuffle the data around first before
969  *   sending it away. Cache coherency is handled by braketing any transactions
970  *   with calls to dma_buf_begin_cpu_access() and dma_buf_end_cpu_access()
971  *   access.
972  *
973  *   Since for most kernel internal dma-buf accesses need the entire buffer, a
974  *   vmap interface is introduced. Note that on very old 32-bit architectures
975  *   vmalloc space might be limited and result in vmap calls failing.
976  *
977  *   Interfaces::
978  *      void \*dma_buf_vmap(struct dma_buf \*dmabuf)
979  *      void dma_buf_vunmap(struct dma_buf \*dmabuf, void \*vaddr)
980  *
981  *   The vmap call can fail if there is no vmap support in the exporter, or if
982  *   it runs out of vmalloc space. Fallback to kmap should be implemented. Note
983  *   that the dma-buf layer keeps a reference count for all vmap access and
984  *   calls down into the exporter's vmap function only when no vmapping exists,
985  *   and only unmaps it once. Protection against concurrent vmap/vunmap calls is
986  *   provided by taking the dma_buf->lock mutex.
987  *
988  * - For full compatibility on the importer side with existing userspace
989  *   interfaces, which might already support mmap'ing buffers. This is needed in
990  *   many processing pipelines (e.g. feeding a software rendered image into a
991  *   hardware pipeline, thumbnail creation, snapshots, ...). Also, Android's ION
992  *   framework already supported this and for DMA buffer file descriptors to
993  *   replace ION buffers mmap support was needed.
994  *
995  *   There is no special interfaces, userspace simply calls mmap on the dma-buf
996  *   fd. But like for CPU access there's a need to braket the actual access,
997  *   which is handled by the ioctl (DMA_BUF_IOCTL_SYNC). Note that
998  *   DMA_BUF_IOCTL_SYNC can fail with -EAGAIN or -EINTR, in which case it must
999  *   be restarted.
1000  *
1001  *   Some systems might need some sort of cache coherency management e.g. when
1002  *   CPU and GPU domains are being accessed through dma-buf at the same time.
1003  *   To circumvent this problem there are begin/end coherency markers, that
1004  *   forward directly to existing dma-buf device drivers vfunc hooks. Userspace
1005  *   can make use of those markers through the DMA_BUF_IOCTL_SYNC ioctl. The
1006  *   sequence would be used like following:
1007  *
1008  *     - mmap dma-buf fd
1009  *     - for each drawing/upload cycle in CPU 1. SYNC_START ioctl, 2. read/write
1010  *       to mmap area 3. SYNC_END ioctl. This can be repeated as often as you
1011  *       want (with the new data being consumed by say the GPU or the scanout
1012  *       device)
1013  *     - munmap once you don't need the buffer any more
1014  *
1015  *    For correctness and optimal performance, it is always required to use
1016  *    SYNC_START and SYNC_END before and after, respectively, when accessing the
1017  *    mapped address. Userspace cannot rely on coherent access, even when there
1018  *    are systems where it just works without calling these ioctls.
1019  *
1020  * - And as a CPU fallback in userspace processing pipelines.
1021  *
1022  *   Similar to the motivation for kernel cpu access it is again important that
1023  *   the userspace code of a given importing subsystem can use the same
1024  *   interfaces with a imported dma-buf buffer object as with a native buffer
1025  *   object. This is especially important for drm where the userspace part of
1026  *   contemporary OpenGL, X, and other drivers is huge, and reworking them to
1027  *   use a different way to mmap a buffer rather invasive.
1028  *
1029  *   The assumption in the current dma-buf interfaces is that redirecting the
1030  *   initial mmap is all that's needed. A survey of some of the existing
1031  *   subsystems shows that no driver seems to do any nefarious thing like
1032  *   syncing up with outstanding asynchronous processing on the device or
1033  *   allocating special resources at fault time. So hopefully this is good
1034  *   enough, since adding interfaces to intercept pagefaults and allow pte
1035  *   shootdowns would increase the complexity quite a bit.
1036  *
1037  *   Interface::
1038  *      int dma_buf_mmap(struct dma_buf \*, struct vm_area_struct \*,
1039  *		       unsigned long);
1040  *
1041  *   If the importing subsystem simply provides a special-purpose mmap call to
1042  *   set up a mapping in userspace, calling do_mmap with dma_buf->file will
1043  *   equally achieve that for a dma-buf object.
1044  */
1045 
1046 static int __dma_buf_begin_cpu_access(struct dma_buf *dmabuf,
1047 				      enum dma_data_direction direction)
1048 {
1049 	bool write = (direction == DMA_BIDIRECTIONAL ||
1050 		      direction == DMA_TO_DEVICE);
1051 	struct dma_resv *resv = dmabuf->resv;
1052 	long ret;
1053 
1054 	/* Wait on any implicit rendering fences */
1055 	ret = dma_resv_wait_timeout_rcu(resv, write, true,
1056 						  MAX_SCHEDULE_TIMEOUT);
1057 	if (ret < 0)
1058 		return ret;
1059 
1060 	return 0;
1061 }
1062 
1063 /**
1064  * dma_buf_begin_cpu_access - Must be called before accessing a dma_buf from the
1065  * cpu in the kernel context. Calls begin_cpu_access to allow exporter-specific
1066  * preparations. Coherency is only guaranteed in the specified range for the
1067  * specified access direction.
1068  * @dmabuf:	[in]	buffer to prepare cpu access for.
1069  * @direction:	[in]	length of range for cpu access.
1070  *
1071  * After the cpu access is complete the caller should call
1072  * dma_buf_end_cpu_access(). Only when cpu access is braketed by both calls is
1073  * it guaranteed to be coherent with other DMA access.
1074  *
1075  * Can return negative error values, returns 0 on success.
1076  */
1077 int dma_buf_begin_cpu_access(struct dma_buf *dmabuf,
1078 			     enum dma_data_direction direction)
1079 {
1080 	int ret = 0;
1081 
1082 	if (WARN_ON(!dmabuf))
1083 		return -EINVAL;
1084 
1085 	if (dmabuf->ops->begin_cpu_access)
1086 		ret = dmabuf->ops->begin_cpu_access(dmabuf, direction);
1087 
1088 	/* Ensure that all fences are waited upon - but we first allow
1089 	 * the native handler the chance to do so more efficiently if it
1090 	 * chooses. A double invocation here will be reasonably cheap no-op.
1091 	 */
1092 	if (ret == 0)
1093 		ret = __dma_buf_begin_cpu_access(dmabuf, direction);
1094 
1095 	return ret;
1096 }
1097 EXPORT_SYMBOL_GPL(dma_buf_begin_cpu_access);
1098 
1099 /**
1100  * dma_buf_end_cpu_access - Must be called after accessing a dma_buf from the
1101  * cpu in the kernel context. Calls end_cpu_access to allow exporter-specific
1102  * actions. Coherency is only guaranteed in the specified range for the
1103  * specified access direction.
1104  * @dmabuf:	[in]	buffer to complete cpu access for.
1105  * @direction:	[in]	length of range for cpu access.
1106  *
1107  * This terminates CPU access started with dma_buf_begin_cpu_access().
1108  *
1109  * Can return negative error values, returns 0 on success.
1110  */
1111 int dma_buf_end_cpu_access(struct dma_buf *dmabuf,
1112 			   enum dma_data_direction direction)
1113 {
1114 	int ret = 0;
1115 
1116 	WARN_ON(!dmabuf);
1117 
1118 	if (dmabuf->ops->end_cpu_access)
1119 		ret = dmabuf->ops->end_cpu_access(dmabuf, direction);
1120 
1121 	return ret;
1122 }
1123 EXPORT_SYMBOL_GPL(dma_buf_end_cpu_access);
1124 
1125 
1126 /**
1127  * dma_buf_mmap - Setup up a userspace mmap with the given vma
1128  * @dmabuf:	[in]	buffer that should back the vma
1129  * @vma:	[in]	vma for the mmap
1130  * @pgoff:	[in]	offset in pages where this mmap should start within the
1131  *			dma-buf buffer.
1132  *
1133  * This function adjusts the passed in vma so that it points at the file of the
1134  * dma_buf operation. It also adjusts the starting pgoff and does bounds
1135  * checking on the size of the vma. Then it calls the exporters mmap function to
1136  * set up the mapping.
1137  *
1138  * Can return negative error values, returns 0 on success.
1139  */
1140 int dma_buf_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma,
1141 		 unsigned long pgoff)
1142 {
1143 	struct file *oldfile;
1144 	int ret;
1145 
1146 	if (WARN_ON(!dmabuf || !vma))
1147 		return -EINVAL;
1148 
1149 	/* check if buffer supports mmap */
1150 	if (!dmabuf->ops->mmap)
1151 		return -EINVAL;
1152 
1153 	/* check for offset overflow */
1154 	if (pgoff + vma_pages(vma) < pgoff)
1155 		return -EOVERFLOW;
1156 
1157 	/* check for overflowing the buffer's size */
1158 	if (pgoff + vma_pages(vma) >
1159 	    dmabuf->size >> PAGE_SHIFT)
1160 		return -EINVAL;
1161 
1162 	/* readjust the vma */
1163 	get_file(dmabuf->file);
1164 	oldfile = vma->vm_file;
1165 	vma->vm_file = dmabuf->file;
1166 	vma->vm_pgoff = pgoff;
1167 
1168 	ret = dmabuf->ops->mmap(dmabuf, vma);
1169 	if (ret) {
1170 		/* restore old parameters on failure */
1171 		vma->vm_file = oldfile;
1172 		fput(dmabuf->file);
1173 	} else {
1174 		if (oldfile)
1175 			fput(oldfile);
1176 	}
1177 	return ret;
1178 
1179 }
1180 EXPORT_SYMBOL_GPL(dma_buf_mmap);
1181 
1182 /**
1183  * dma_buf_vmap - Create virtual mapping for the buffer object into kernel
1184  * address space. Same restrictions as for vmap and friends apply.
1185  * @dmabuf:	[in]	buffer to vmap
1186  *
1187  * This call may fail due to lack of virtual mapping address space.
1188  * These calls are optional in drivers. The intended use for them
1189  * is for mapping objects linear in kernel space for high use objects.
1190  * Please attempt to use kmap/kunmap before thinking about these interfaces.
1191  *
1192  * Returns NULL on error.
1193  */
1194 void *dma_buf_vmap(struct dma_buf *dmabuf)
1195 {
1196 	void *ptr;
1197 
1198 	if (WARN_ON(!dmabuf))
1199 		return NULL;
1200 
1201 	if (!dmabuf->ops->vmap)
1202 		return NULL;
1203 
1204 	mutex_lock(&dmabuf->lock);
1205 	if (dmabuf->vmapping_counter) {
1206 		dmabuf->vmapping_counter++;
1207 		BUG_ON(!dmabuf->vmap_ptr);
1208 		ptr = dmabuf->vmap_ptr;
1209 		goto out_unlock;
1210 	}
1211 
1212 	BUG_ON(dmabuf->vmap_ptr);
1213 
1214 	ptr = dmabuf->ops->vmap(dmabuf);
1215 	if (WARN_ON_ONCE(IS_ERR(ptr)))
1216 		ptr = NULL;
1217 	if (!ptr)
1218 		goto out_unlock;
1219 
1220 	dmabuf->vmap_ptr = ptr;
1221 	dmabuf->vmapping_counter = 1;
1222 
1223 out_unlock:
1224 	mutex_unlock(&dmabuf->lock);
1225 	return ptr;
1226 }
1227 EXPORT_SYMBOL_GPL(dma_buf_vmap);
1228 
1229 /**
1230  * dma_buf_vunmap - Unmap a vmap obtained by dma_buf_vmap.
1231  * @dmabuf:	[in]	buffer to vunmap
1232  * @vaddr:	[in]	vmap to vunmap
1233  */
1234 void dma_buf_vunmap(struct dma_buf *dmabuf, void *vaddr)
1235 {
1236 	if (WARN_ON(!dmabuf))
1237 		return;
1238 
1239 	BUG_ON(!dmabuf->vmap_ptr);
1240 	BUG_ON(dmabuf->vmapping_counter == 0);
1241 	BUG_ON(dmabuf->vmap_ptr != vaddr);
1242 
1243 	mutex_lock(&dmabuf->lock);
1244 	if (--dmabuf->vmapping_counter == 0) {
1245 		if (dmabuf->ops->vunmap)
1246 			dmabuf->ops->vunmap(dmabuf, vaddr);
1247 		dmabuf->vmap_ptr = NULL;
1248 	}
1249 	mutex_unlock(&dmabuf->lock);
1250 }
1251 EXPORT_SYMBOL_GPL(dma_buf_vunmap);
1252 
1253 #ifdef CONFIG_DEBUG_FS
1254 static int dma_buf_debug_show(struct seq_file *s, void *unused)
1255 {
1256 	int ret;
1257 	struct dma_buf *buf_obj;
1258 	struct dma_buf_attachment *attach_obj;
1259 	struct dma_resv *robj;
1260 	struct dma_resv_list *fobj;
1261 	struct dma_fence *fence;
1262 	unsigned seq;
1263 	int count = 0, attach_count, shared_count, i;
1264 	size_t size = 0;
1265 
1266 	ret = mutex_lock_interruptible(&db_list.lock);
1267 
1268 	if (ret)
1269 		return ret;
1270 
1271 	seq_puts(s, "\nDma-buf Objects:\n");
1272 	seq_printf(s, "%-8s\t%-8s\t%-8s\t%-8s\texp_name\t%-8s\n",
1273 		   "size", "flags", "mode", "count", "ino");
1274 
1275 	list_for_each_entry(buf_obj, &db_list.head, list_node) {
1276 
1277 		ret = dma_resv_lock_interruptible(buf_obj->resv, NULL);
1278 		if (ret)
1279 			goto error_unlock;
1280 
1281 		seq_printf(s, "%08zu\t%08x\t%08x\t%08ld\t%s\t%08lu\t%s\n",
1282 				buf_obj->size,
1283 				buf_obj->file->f_flags, buf_obj->file->f_mode,
1284 				file_count(buf_obj->file),
1285 				buf_obj->exp_name,
1286 				file_inode(buf_obj->file)->i_ino,
1287 				buf_obj->name ?: "");
1288 
1289 		robj = buf_obj->resv;
1290 		while (true) {
1291 			seq = read_seqcount_begin(&robj->seq);
1292 			rcu_read_lock();
1293 			fobj = rcu_dereference(robj->fence);
1294 			shared_count = fobj ? fobj->shared_count : 0;
1295 			fence = rcu_dereference(robj->fence_excl);
1296 			if (!read_seqcount_retry(&robj->seq, seq))
1297 				break;
1298 			rcu_read_unlock();
1299 		}
1300 
1301 		if (fence)
1302 			seq_printf(s, "\tExclusive fence: %s %s %ssignalled\n",
1303 				   fence->ops->get_driver_name(fence),
1304 				   fence->ops->get_timeline_name(fence),
1305 				   dma_fence_is_signaled(fence) ? "" : "un");
1306 		for (i = 0; i < shared_count; i++) {
1307 			fence = rcu_dereference(fobj->shared[i]);
1308 			if (!dma_fence_get_rcu(fence))
1309 				continue;
1310 			seq_printf(s, "\tShared fence: %s %s %ssignalled\n",
1311 				   fence->ops->get_driver_name(fence),
1312 				   fence->ops->get_timeline_name(fence),
1313 				   dma_fence_is_signaled(fence) ? "" : "un");
1314 			dma_fence_put(fence);
1315 		}
1316 		rcu_read_unlock();
1317 
1318 		seq_puts(s, "\tAttached Devices:\n");
1319 		attach_count = 0;
1320 
1321 		list_for_each_entry(attach_obj, &buf_obj->attachments, node) {
1322 			seq_printf(s, "\t%s\n", dev_name(attach_obj->dev));
1323 			attach_count++;
1324 		}
1325 		dma_resv_unlock(buf_obj->resv);
1326 
1327 		seq_printf(s, "Total %d devices attached\n\n",
1328 				attach_count);
1329 
1330 		count++;
1331 		size += buf_obj->size;
1332 	}
1333 
1334 	seq_printf(s, "\nTotal %d objects, %zu bytes\n", count, size);
1335 
1336 	mutex_unlock(&db_list.lock);
1337 	return 0;
1338 
1339 error_unlock:
1340 	mutex_unlock(&db_list.lock);
1341 	return ret;
1342 }
1343 
1344 DEFINE_SHOW_ATTRIBUTE(dma_buf_debug);
1345 
1346 static struct dentry *dma_buf_debugfs_dir;
1347 
1348 static int dma_buf_init_debugfs(void)
1349 {
1350 	struct dentry *d;
1351 	int err = 0;
1352 
1353 	d = debugfs_create_dir("dma_buf", NULL);
1354 	if (IS_ERR(d))
1355 		return PTR_ERR(d);
1356 
1357 	dma_buf_debugfs_dir = d;
1358 
1359 	d = debugfs_create_file("bufinfo", S_IRUGO, dma_buf_debugfs_dir,
1360 				NULL, &dma_buf_debug_fops);
1361 	if (IS_ERR(d)) {
1362 		pr_debug("dma_buf: debugfs: failed to create node bufinfo\n");
1363 		debugfs_remove_recursive(dma_buf_debugfs_dir);
1364 		dma_buf_debugfs_dir = NULL;
1365 		err = PTR_ERR(d);
1366 	}
1367 
1368 	return err;
1369 }
1370 
1371 static void dma_buf_uninit_debugfs(void)
1372 {
1373 	debugfs_remove_recursive(dma_buf_debugfs_dir);
1374 }
1375 #else
1376 static inline int dma_buf_init_debugfs(void)
1377 {
1378 	return 0;
1379 }
1380 static inline void dma_buf_uninit_debugfs(void)
1381 {
1382 }
1383 #endif
1384 
1385 static int __init dma_buf_init(void)
1386 {
1387 	dma_buf_mnt = kern_mount(&dma_buf_fs_type);
1388 	if (IS_ERR(dma_buf_mnt))
1389 		return PTR_ERR(dma_buf_mnt);
1390 
1391 	mutex_init(&db_list.lock);
1392 	INIT_LIST_HEAD(&db_list.head);
1393 	dma_buf_init_debugfs();
1394 	return 0;
1395 }
1396 subsys_initcall(dma_buf_init);
1397 
1398 static void __exit dma_buf_deinit(void)
1399 {
1400 	dma_buf_uninit_debugfs();
1401 	kern_unmount(dma_buf_mnt);
1402 }
1403 __exitcall(dma_buf_deinit);
1404