xref: /linux/drivers/media/common/videobuf2/videobuf2-dma-sg.c (revision 7482c19173b7eb044d476b3444d7ee55bc669d03)
1 /*
2  * videobuf2-dma-sg.c - dma scatter/gather memory allocator for videobuf2
3  *
4  * Copyright (C) 2010 Samsung Electronics
5  *
6  * Author: Andrzej Pietrasiewicz <andrzejtp2010@gmail.com>
7  *
8  * This program is free software; you can redistribute it and/or modify
9  * it under the terms of the GNU General Public License as published by
10  * the Free Software Foundation.
11  */
12 
13 #include <linux/dma-resv.h>
14 #include <linux/module.h>
15 #include <linux/mm.h>
16 #include <linux/refcount.h>
17 #include <linux/scatterlist.h>
18 #include <linux/sched.h>
19 #include <linux/slab.h>
20 #include <linux/vmalloc.h>
21 
22 #include <media/videobuf2-v4l2.h>
23 #include <media/videobuf2-memops.h>
24 #include <media/videobuf2-dma-sg.h>
25 
26 static int debug;
27 module_param(debug, int, 0644);
28 
29 #define dprintk(level, fmt, arg...)					\
30 	do {								\
31 		if (debug >= level)					\
32 			printk(KERN_DEBUG "vb2-dma-sg: " fmt, ## arg);	\
33 	} while (0)
34 
35 struct vb2_dma_sg_buf {
36 	struct device			*dev;
37 	void				*vaddr;
38 	struct page			**pages;
39 	struct frame_vector		*vec;
40 	int				offset;
41 	enum dma_data_direction		dma_dir;
42 	struct sg_table			sg_table;
43 	/*
44 	 * This will point to sg_table when used with the MMAP or USERPTR
45 	 * memory model, and to the dma_buf sglist when used with the
46 	 * DMABUF memory model.
47 	 */
48 	struct sg_table			*dma_sgt;
49 	size_t				size;
50 	unsigned int			num_pages;
51 	refcount_t			refcount;
52 	struct vb2_vmarea_handler	handler;
53 
54 	struct dma_buf_attachment	*db_attach;
55 
56 	struct vb2_buffer		*vb;
57 };
58 
59 static void vb2_dma_sg_put(void *buf_priv);
60 
61 static int vb2_dma_sg_alloc_compacted(struct vb2_dma_sg_buf *buf,
62 		gfp_t gfp_flags)
63 {
64 	unsigned int last_page = 0;
65 	unsigned long size = buf->size;
66 
67 	while (size > 0) {
68 		struct page *pages;
69 		int order;
70 		int i;
71 
72 		order = get_order(size);
73 		/* Don't over allocate*/
74 		if ((PAGE_SIZE << order) > size)
75 			order--;
76 
77 		pages = NULL;
78 		while (!pages) {
79 			pages = alloc_pages(GFP_KERNEL | __GFP_ZERO |
80 					__GFP_NOWARN | gfp_flags, order);
81 			if (pages)
82 				break;
83 
84 			if (order == 0) {
85 				while (last_page--)
86 					__free_page(buf->pages[last_page]);
87 				return -ENOMEM;
88 			}
89 			order--;
90 		}
91 
92 		split_page(pages, order);
93 		for (i = 0; i < (1 << order); i++)
94 			buf->pages[last_page++] = &pages[i];
95 
96 		size -= PAGE_SIZE << order;
97 	}
98 
99 	return 0;
100 }
101 
102 static void *vb2_dma_sg_alloc(struct vb2_buffer *vb, struct device *dev,
103 			      unsigned long size)
104 {
105 	struct vb2_dma_sg_buf *buf;
106 	struct sg_table *sgt;
107 	int ret;
108 	int num_pages;
109 
110 	if (WARN_ON(!dev) || WARN_ON(!size))
111 		return ERR_PTR(-EINVAL);
112 
113 	buf = kzalloc(sizeof *buf, GFP_KERNEL);
114 	if (!buf)
115 		return ERR_PTR(-ENOMEM);
116 
117 	buf->vaddr = NULL;
118 	buf->dma_dir = vb->vb2_queue->dma_dir;
119 	buf->offset = 0;
120 	buf->size = size;
121 	/* size is already page aligned */
122 	buf->num_pages = size >> PAGE_SHIFT;
123 	buf->dma_sgt = &buf->sg_table;
124 
125 	/*
126 	 * NOTE: dma-sg allocates memory using the page allocator directly, so
127 	 * there is no memory consistency guarantee, hence dma-sg ignores DMA
128 	 * attributes passed from the upper layer.
129 	 */
130 	buf->pages = kvcalloc(buf->num_pages, sizeof(struct page *), GFP_KERNEL);
131 	if (!buf->pages)
132 		goto fail_pages_array_alloc;
133 
134 	ret = vb2_dma_sg_alloc_compacted(buf, vb->vb2_queue->gfp_flags);
135 	if (ret)
136 		goto fail_pages_alloc;
137 
138 	ret = sg_alloc_table_from_pages(buf->dma_sgt, buf->pages,
139 			buf->num_pages, 0, size, GFP_KERNEL);
140 	if (ret)
141 		goto fail_table_alloc;
142 
143 	/* Prevent the device from being released while the buffer is used */
144 	buf->dev = get_device(dev);
145 
146 	sgt = &buf->sg_table;
147 	/*
148 	 * No need to sync to the device, this will happen later when the
149 	 * prepare() memop is called.
150 	 */
151 	if (dma_map_sgtable(buf->dev, sgt, buf->dma_dir,
152 			    DMA_ATTR_SKIP_CPU_SYNC))
153 		goto fail_map;
154 
155 	buf->handler.refcount = &buf->refcount;
156 	buf->handler.put = vb2_dma_sg_put;
157 	buf->handler.arg = buf;
158 	buf->vb = vb;
159 
160 	refcount_set(&buf->refcount, 1);
161 
162 	dprintk(1, "%s: Allocated buffer of %d pages\n",
163 		__func__, buf->num_pages);
164 	return buf;
165 
166 fail_map:
167 	put_device(buf->dev);
168 	sg_free_table(buf->dma_sgt);
169 fail_table_alloc:
170 	num_pages = buf->num_pages;
171 	while (num_pages--)
172 		__free_page(buf->pages[num_pages]);
173 fail_pages_alloc:
174 	kvfree(buf->pages);
175 fail_pages_array_alloc:
176 	kfree(buf);
177 	return ERR_PTR(-ENOMEM);
178 }
179 
180 static void vb2_dma_sg_put(void *buf_priv)
181 {
182 	struct vb2_dma_sg_buf *buf = buf_priv;
183 	struct sg_table *sgt = &buf->sg_table;
184 	int i = buf->num_pages;
185 
186 	if (refcount_dec_and_test(&buf->refcount)) {
187 		dprintk(1, "%s: Freeing buffer of %d pages\n", __func__,
188 			buf->num_pages);
189 		dma_unmap_sgtable(buf->dev, sgt, buf->dma_dir,
190 				  DMA_ATTR_SKIP_CPU_SYNC);
191 		if (buf->vaddr)
192 			vm_unmap_ram(buf->vaddr, buf->num_pages);
193 		sg_free_table(buf->dma_sgt);
194 		while (--i >= 0)
195 			__free_page(buf->pages[i]);
196 		kvfree(buf->pages);
197 		put_device(buf->dev);
198 		kfree(buf);
199 	}
200 }
201 
202 static void vb2_dma_sg_prepare(void *buf_priv)
203 {
204 	struct vb2_dma_sg_buf *buf = buf_priv;
205 	struct sg_table *sgt = buf->dma_sgt;
206 
207 	if (buf->vb->skip_cache_sync_on_prepare)
208 		return;
209 
210 	dma_sync_sgtable_for_device(buf->dev, sgt, buf->dma_dir);
211 }
212 
213 static void vb2_dma_sg_finish(void *buf_priv)
214 {
215 	struct vb2_dma_sg_buf *buf = buf_priv;
216 	struct sg_table *sgt = buf->dma_sgt;
217 
218 	if (buf->vb->skip_cache_sync_on_finish)
219 		return;
220 
221 	dma_sync_sgtable_for_cpu(buf->dev, sgt, buf->dma_dir);
222 }
223 
224 static void *vb2_dma_sg_get_userptr(struct vb2_buffer *vb, struct device *dev,
225 				    unsigned long vaddr, unsigned long size)
226 {
227 	struct vb2_dma_sg_buf *buf;
228 	struct sg_table *sgt;
229 	struct frame_vector *vec;
230 
231 	if (WARN_ON(!dev))
232 		return ERR_PTR(-EINVAL);
233 
234 	buf = kzalloc(sizeof *buf, GFP_KERNEL);
235 	if (!buf)
236 		return ERR_PTR(-ENOMEM);
237 
238 	buf->vaddr = NULL;
239 	buf->dev = dev;
240 	buf->dma_dir = vb->vb2_queue->dma_dir;
241 	buf->offset = vaddr & ~PAGE_MASK;
242 	buf->size = size;
243 	buf->dma_sgt = &buf->sg_table;
244 	buf->vb = vb;
245 	vec = vb2_create_framevec(vaddr, size,
246 				  buf->dma_dir == DMA_FROM_DEVICE ||
247 				  buf->dma_dir == DMA_BIDIRECTIONAL);
248 	if (IS_ERR(vec))
249 		goto userptr_fail_pfnvec;
250 	buf->vec = vec;
251 
252 	buf->pages = frame_vector_pages(vec);
253 	if (IS_ERR(buf->pages))
254 		goto userptr_fail_sgtable;
255 	buf->num_pages = frame_vector_count(vec);
256 
257 	if (sg_alloc_table_from_pages(buf->dma_sgt, buf->pages,
258 			buf->num_pages, buf->offset, size, 0))
259 		goto userptr_fail_sgtable;
260 
261 	sgt = &buf->sg_table;
262 	/*
263 	 * No need to sync to the device, this will happen later when the
264 	 * prepare() memop is called.
265 	 */
266 	if (dma_map_sgtable(buf->dev, sgt, buf->dma_dir,
267 			    DMA_ATTR_SKIP_CPU_SYNC))
268 		goto userptr_fail_map;
269 
270 	return buf;
271 
272 userptr_fail_map:
273 	sg_free_table(&buf->sg_table);
274 userptr_fail_sgtable:
275 	vb2_destroy_framevec(vec);
276 userptr_fail_pfnvec:
277 	kfree(buf);
278 	return ERR_PTR(-ENOMEM);
279 }
280 
281 /*
282  * @put_userptr: inform the allocator that a USERPTR buffer will no longer
283  *		 be used
284  */
285 static void vb2_dma_sg_put_userptr(void *buf_priv)
286 {
287 	struct vb2_dma_sg_buf *buf = buf_priv;
288 	struct sg_table *sgt = &buf->sg_table;
289 	int i = buf->num_pages;
290 
291 	dprintk(1, "%s: Releasing userspace buffer of %d pages\n",
292 	       __func__, buf->num_pages);
293 	dma_unmap_sgtable(buf->dev, sgt, buf->dma_dir, DMA_ATTR_SKIP_CPU_SYNC);
294 	if (buf->vaddr)
295 		vm_unmap_ram(buf->vaddr, buf->num_pages);
296 	sg_free_table(buf->dma_sgt);
297 	if (buf->dma_dir == DMA_FROM_DEVICE ||
298 	    buf->dma_dir == DMA_BIDIRECTIONAL)
299 		while (--i >= 0)
300 			set_page_dirty_lock(buf->pages[i]);
301 	vb2_destroy_framevec(buf->vec);
302 	kfree(buf);
303 }
304 
305 static void *vb2_dma_sg_vaddr(struct vb2_buffer *vb, void *buf_priv)
306 {
307 	struct vb2_dma_sg_buf *buf = buf_priv;
308 	struct iosys_map map;
309 	int ret;
310 
311 	BUG_ON(!buf);
312 
313 	if (!buf->vaddr) {
314 		if (buf->db_attach) {
315 			ret = dma_buf_vmap_unlocked(buf->db_attach->dmabuf, &map);
316 			buf->vaddr = ret ? NULL : map.vaddr;
317 		} else {
318 			buf->vaddr = vm_map_ram(buf->pages, buf->num_pages, -1);
319 		}
320 	}
321 
322 	/* add offset in case userptr is not page-aligned */
323 	return buf->vaddr ? buf->vaddr + buf->offset : NULL;
324 }
325 
326 static unsigned int vb2_dma_sg_num_users(void *buf_priv)
327 {
328 	struct vb2_dma_sg_buf *buf = buf_priv;
329 
330 	return refcount_read(&buf->refcount);
331 }
332 
333 static int vb2_dma_sg_mmap(void *buf_priv, struct vm_area_struct *vma)
334 {
335 	struct vb2_dma_sg_buf *buf = buf_priv;
336 	int err;
337 
338 	if (!buf) {
339 		printk(KERN_ERR "No memory to map\n");
340 		return -EINVAL;
341 	}
342 
343 	err = vm_map_pages(vma, buf->pages, buf->num_pages);
344 	if (err) {
345 		printk(KERN_ERR "Remapping memory, error: %d\n", err);
346 		return err;
347 	}
348 
349 	/*
350 	 * Use common vm_area operations to track buffer refcount.
351 	 */
352 	vma->vm_private_data	= &buf->handler;
353 	vma->vm_ops		= &vb2_common_vm_ops;
354 
355 	vma->vm_ops->open(vma);
356 
357 	return 0;
358 }
359 
360 /*********************************************/
361 /*         DMABUF ops for exporters          */
362 /*********************************************/
363 
364 struct vb2_dma_sg_attachment {
365 	struct sg_table sgt;
366 	enum dma_data_direction dma_dir;
367 };
368 
369 static int vb2_dma_sg_dmabuf_ops_attach(struct dma_buf *dbuf,
370 	struct dma_buf_attachment *dbuf_attach)
371 {
372 	struct vb2_dma_sg_attachment *attach;
373 	unsigned int i;
374 	struct scatterlist *rd, *wr;
375 	struct sg_table *sgt;
376 	struct vb2_dma_sg_buf *buf = dbuf->priv;
377 	int ret;
378 
379 	attach = kzalloc(sizeof(*attach), GFP_KERNEL);
380 	if (!attach)
381 		return -ENOMEM;
382 
383 	sgt = &attach->sgt;
384 	/* Copy the buf->base_sgt scatter list to the attachment, as we can't
385 	 * map the same scatter list to multiple attachments at the same time.
386 	 */
387 	ret = sg_alloc_table(sgt, buf->dma_sgt->orig_nents, GFP_KERNEL);
388 	if (ret) {
389 		kfree(attach);
390 		return -ENOMEM;
391 	}
392 
393 	rd = buf->dma_sgt->sgl;
394 	wr = sgt->sgl;
395 	for (i = 0; i < sgt->orig_nents; ++i) {
396 		sg_set_page(wr, sg_page(rd), rd->length, rd->offset);
397 		rd = sg_next(rd);
398 		wr = sg_next(wr);
399 	}
400 
401 	attach->dma_dir = DMA_NONE;
402 	dbuf_attach->priv = attach;
403 
404 	return 0;
405 }
406 
407 static void vb2_dma_sg_dmabuf_ops_detach(struct dma_buf *dbuf,
408 	struct dma_buf_attachment *db_attach)
409 {
410 	struct vb2_dma_sg_attachment *attach = db_attach->priv;
411 	struct sg_table *sgt;
412 
413 	if (!attach)
414 		return;
415 
416 	sgt = &attach->sgt;
417 
418 	/* release the scatterlist cache */
419 	if (attach->dma_dir != DMA_NONE)
420 		dma_unmap_sgtable(db_attach->dev, sgt, attach->dma_dir, 0);
421 	sg_free_table(sgt);
422 	kfree(attach);
423 	db_attach->priv = NULL;
424 }
425 
426 static struct sg_table *vb2_dma_sg_dmabuf_ops_map(
427 	struct dma_buf_attachment *db_attach, enum dma_data_direction dma_dir)
428 {
429 	struct vb2_dma_sg_attachment *attach = db_attach->priv;
430 	struct sg_table *sgt;
431 
432 	sgt = &attach->sgt;
433 	/* return previously mapped sg table */
434 	if (attach->dma_dir == dma_dir)
435 		return sgt;
436 
437 	/* release any previous cache */
438 	if (attach->dma_dir != DMA_NONE) {
439 		dma_unmap_sgtable(db_attach->dev, sgt, attach->dma_dir, 0);
440 		attach->dma_dir = DMA_NONE;
441 	}
442 
443 	/* mapping to the client with new direction */
444 	if (dma_map_sgtable(db_attach->dev, sgt, dma_dir, 0)) {
445 		pr_err("failed to map scatterlist\n");
446 		return ERR_PTR(-EIO);
447 	}
448 
449 	attach->dma_dir = dma_dir;
450 
451 	return sgt;
452 }
453 
454 static void vb2_dma_sg_dmabuf_ops_unmap(struct dma_buf_attachment *db_attach,
455 	struct sg_table *sgt, enum dma_data_direction dma_dir)
456 {
457 	/* nothing to be done here */
458 }
459 
460 static void vb2_dma_sg_dmabuf_ops_release(struct dma_buf *dbuf)
461 {
462 	/* drop reference obtained in vb2_dma_sg_get_dmabuf */
463 	vb2_dma_sg_put(dbuf->priv);
464 }
465 
466 static int
467 vb2_dma_sg_dmabuf_ops_begin_cpu_access(struct dma_buf *dbuf,
468 				       enum dma_data_direction direction)
469 {
470 	struct vb2_dma_sg_buf *buf = dbuf->priv;
471 	struct sg_table *sgt = buf->dma_sgt;
472 
473 	dma_sync_sg_for_cpu(buf->dev, sgt->sgl, sgt->nents, buf->dma_dir);
474 	return 0;
475 }
476 
477 static int
478 vb2_dma_sg_dmabuf_ops_end_cpu_access(struct dma_buf *dbuf,
479 				     enum dma_data_direction direction)
480 {
481 	struct vb2_dma_sg_buf *buf = dbuf->priv;
482 	struct sg_table *sgt = buf->dma_sgt;
483 
484 	dma_sync_sg_for_device(buf->dev, sgt->sgl, sgt->nents, buf->dma_dir);
485 	return 0;
486 }
487 
488 static int vb2_dma_sg_dmabuf_ops_vmap(struct dma_buf *dbuf,
489 				      struct iosys_map *map)
490 {
491 	struct vb2_dma_sg_buf *buf = dbuf->priv;
492 
493 	iosys_map_set_vaddr(map, buf->vaddr);
494 
495 	return 0;
496 }
497 
498 static int vb2_dma_sg_dmabuf_ops_mmap(struct dma_buf *dbuf,
499 	struct vm_area_struct *vma)
500 {
501 	dma_resv_assert_held(dbuf->resv);
502 
503 	return vb2_dma_sg_mmap(dbuf->priv, vma);
504 }
505 
506 static const struct dma_buf_ops vb2_dma_sg_dmabuf_ops = {
507 	.attach = vb2_dma_sg_dmabuf_ops_attach,
508 	.detach = vb2_dma_sg_dmabuf_ops_detach,
509 	.map_dma_buf = vb2_dma_sg_dmabuf_ops_map,
510 	.unmap_dma_buf = vb2_dma_sg_dmabuf_ops_unmap,
511 	.begin_cpu_access = vb2_dma_sg_dmabuf_ops_begin_cpu_access,
512 	.end_cpu_access = vb2_dma_sg_dmabuf_ops_end_cpu_access,
513 	.vmap = vb2_dma_sg_dmabuf_ops_vmap,
514 	.mmap = vb2_dma_sg_dmabuf_ops_mmap,
515 	.release = vb2_dma_sg_dmabuf_ops_release,
516 };
517 
518 static struct dma_buf *vb2_dma_sg_get_dmabuf(struct vb2_buffer *vb,
519 					     void *buf_priv,
520 					     unsigned long flags)
521 {
522 	struct vb2_dma_sg_buf *buf = buf_priv;
523 	struct dma_buf *dbuf;
524 	DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
525 
526 	exp_info.ops = &vb2_dma_sg_dmabuf_ops;
527 	exp_info.size = buf->size;
528 	exp_info.flags = flags;
529 	exp_info.priv = buf;
530 
531 	if (WARN_ON(!buf->dma_sgt))
532 		return NULL;
533 
534 	dbuf = dma_buf_export(&exp_info);
535 	if (IS_ERR(dbuf))
536 		return NULL;
537 
538 	/* dmabuf keeps reference to vb2 buffer */
539 	refcount_inc(&buf->refcount);
540 
541 	return dbuf;
542 }
543 
544 /*********************************************/
545 /*       callbacks for DMABUF buffers        */
546 /*********************************************/
547 
548 static int vb2_dma_sg_map_dmabuf(void *mem_priv)
549 {
550 	struct vb2_dma_sg_buf *buf = mem_priv;
551 	struct sg_table *sgt;
552 
553 	if (WARN_ON(!buf->db_attach)) {
554 		pr_err("trying to pin a non attached buffer\n");
555 		return -EINVAL;
556 	}
557 
558 	if (WARN_ON(buf->dma_sgt)) {
559 		pr_err("dmabuf buffer is already pinned\n");
560 		return 0;
561 	}
562 
563 	/* get the associated scatterlist for this buffer */
564 	sgt = dma_buf_map_attachment_unlocked(buf->db_attach, buf->dma_dir);
565 	if (IS_ERR(sgt)) {
566 		pr_err("Error getting dmabuf scatterlist\n");
567 		return -EINVAL;
568 	}
569 
570 	buf->dma_sgt = sgt;
571 	buf->vaddr = NULL;
572 
573 	return 0;
574 }
575 
576 static void vb2_dma_sg_unmap_dmabuf(void *mem_priv)
577 {
578 	struct vb2_dma_sg_buf *buf = mem_priv;
579 	struct sg_table *sgt = buf->dma_sgt;
580 	struct iosys_map map = IOSYS_MAP_INIT_VADDR(buf->vaddr);
581 
582 	if (WARN_ON(!buf->db_attach)) {
583 		pr_err("trying to unpin a not attached buffer\n");
584 		return;
585 	}
586 
587 	if (WARN_ON(!sgt)) {
588 		pr_err("dmabuf buffer is already unpinned\n");
589 		return;
590 	}
591 
592 	if (buf->vaddr) {
593 		dma_buf_vunmap_unlocked(buf->db_attach->dmabuf, &map);
594 		buf->vaddr = NULL;
595 	}
596 	dma_buf_unmap_attachment_unlocked(buf->db_attach, sgt, buf->dma_dir);
597 
598 	buf->dma_sgt = NULL;
599 }
600 
601 static void vb2_dma_sg_detach_dmabuf(void *mem_priv)
602 {
603 	struct vb2_dma_sg_buf *buf = mem_priv;
604 
605 	/* if vb2 works correctly you should never detach mapped buffer */
606 	if (WARN_ON(buf->dma_sgt))
607 		vb2_dma_sg_unmap_dmabuf(buf);
608 
609 	/* detach this attachment */
610 	dma_buf_detach(buf->db_attach->dmabuf, buf->db_attach);
611 	kfree(buf);
612 }
613 
614 static void *vb2_dma_sg_attach_dmabuf(struct vb2_buffer *vb, struct device *dev,
615 				      struct dma_buf *dbuf, unsigned long size)
616 {
617 	struct vb2_dma_sg_buf *buf;
618 	struct dma_buf_attachment *dba;
619 
620 	if (WARN_ON(!dev))
621 		return ERR_PTR(-EINVAL);
622 
623 	if (dbuf->size < size)
624 		return ERR_PTR(-EFAULT);
625 
626 	buf = kzalloc(sizeof(*buf), GFP_KERNEL);
627 	if (!buf)
628 		return ERR_PTR(-ENOMEM);
629 
630 	buf->dev = dev;
631 	/* create attachment for the dmabuf with the user device */
632 	dba = dma_buf_attach(dbuf, buf->dev);
633 	if (IS_ERR(dba)) {
634 		pr_err("failed to attach dmabuf\n");
635 		kfree(buf);
636 		return dba;
637 	}
638 
639 	buf->dma_dir = vb->vb2_queue->dma_dir;
640 	buf->size = size;
641 	buf->db_attach = dba;
642 	buf->vb = vb;
643 
644 	return buf;
645 }
646 
647 static void *vb2_dma_sg_cookie(struct vb2_buffer *vb, void *buf_priv)
648 {
649 	struct vb2_dma_sg_buf *buf = buf_priv;
650 
651 	return buf->dma_sgt;
652 }
653 
654 const struct vb2_mem_ops vb2_dma_sg_memops = {
655 	.alloc		= vb2_dma_sg_alloc,
656 	.put		= vb2_dma_sg_put,
657 	.get_userptr	= vb2_dma_sg_get_userptr,
658 	.put_userptr	= vb2_dma_sg_put_userptr,
659 	.prepare	= vb2_dma_sg_prepare,
660 	.finish		= vb2_dma_sg_finish,
661 	.vaddr		= vb2_dma_sg_vaddr,
662 	.mmap		= vb2_dma_sg_mmap,
663 	.num_users	= vb2_dma_sg_num_users,
664 	.get_dmabuf	= vb2_dma_sg_get_dmabuf,
665 	.map_dmabuf	= vb2_dma_sg_map_dmabuf,
666 	.unmap_dmabuf	= vb2_dma_sg_unmap_dmabuf,
667 	.attach_dmabuf	= vb2_dma_sg_attach_dmabuf,
668 	.detach_dmabuf	= vb2_dma_sg_detach_dmabuf,
669 	.cookie		= vb2_dma_sg_cookie,
670 };
671 EXPORT_SYMBOL_GPL(vb2_dma_sg_memops);
672 
673 MODULE_DESCRIPTION("dma scatter/gather memory handling routines for videobuf2");
674 MODULE_AUTHOR("Andrzej Pietrasiewicz");
675 MODULE_LICENSE("GPL");
676 MODULE_IMPORT_NS(DMA_BUF);
677