xref: /linux/drivers/media/common/videobuf2/videobuf2-dma-sg.c (revision 06d07429858317ded2db7986113a9e0129cd599b)
1 /*
2  * videobuf2-dma-sg.c - dma scatter/gather memory allocator for videobuf2
3  *
4  * Copyright (C) 2010 Samsung Electronics
5  *
6  * Author: Andrzej Pietrasiewicz <andrzejtp2010@gmail.com>
7  *
8  * This program is free software; you can redistribute it and/or modify
9  * it under the terms of the GNU General Public License as published by
10  * the Free Software Foundation.
11  */
12 
13 #include <linux/module.h>
14 #include <linux/mm.h>
15 #include <linux/refcount.h>
16 #include <linux/scatterlist.h>
17 #include <linux/sched.h>
18 #include <linux/slab.h>
19 #include <linux/vmalloc.h>
20 
21 #include <media/videobuf2-v4l2.h>
22 #include <media/videobuf2-memops.h>
23 #include <media/videobuf2-dma-sg.h>
24 
25 static int debug;
26 module_param(debug, int, 0644);
27 
28 #define dprintk(level, fmt, arg...)					\
29 	do {								\
30 		if (debug >= level)					\
31 			printk(KERN_DEBUG "vb2-dma-sg: " fmt, ## arg);	\
32 	} while (0)
33 
34 struct vb2_dma_sg_buf {
35 	struct device			*dev;
36 	void				*vaddr;
37 	struct page			**pages;
38 	struct frame_vector		*vec;
39 	int				offset;
40 	enum dma_data_direction		dma_dir;
41 	struct sg_table			sg_table;
42 	/*
43 	 * This will point to sg_table when used with the MMAP or USERPTR
44 	 * memory model, and to the dma_buf sglist when used with the
45 	 * DMABUF memory model.
46 	 */
47 	struct sg_table			*dma_sgt;
48 	size_t				size;
49 	unsigned int			num_pages;
50 	refcount_t			refcount;
51 	struct vb2_vmarea_handler	handler;
52 
53 	struct dma_buf_attachment	*db_attach;
54 
55 	struct vb2_buffer		*vb;
56 };
57 
58 static void vb2_dma_sg_put(void *buf_priv);
59 
vb2_dma_sg_alloc_compacted(struct vb2_dma_sg_buf * buf,gfp_t gfp_flags)60 static int vb2_dma_sg_alloc_compacted(struct vb2_dma_sg_buf *buf,
61 		gfp_t gfp_flags)
62 {
63 	unsigned int last_page = 0;
64 	unsigned long size = buf->size;
65 
66 	while (size > 0) {
67 		struct page *pages;
68 		int order;
69 		int i;
70 
71 		order = get_order(size);
72 		/* Don't over allocate*/
73 		if ((PAGE_SIZE << order) > size)
74 			order--;
75 
76 		pages = NULL;
77 		while (!pages) {
78 			pages = alloc_pages(GFP_KERNEL | __GFP_ZERO |
79 					__GFP_NOWARN | gfp_flags, order);
80 			if (pages)
81 				break;
82 
83 			if (order == 0) {
84 				while (last_page--)
85 					__free_page(buf->pages[last_page]);
86 				return -ENOMEM;
87 			}
88 			order--;
89 		}
90 
91 		split_page(pages, order);
92 		for (i = 0; i < (1 << order); i++)
93 			buf->pages[last_page++] = &pages[i];
94 
95 		size -= PAGE_SIZE << order;
96 	}
97 
98 	return 0;
99 }
100 
vb2_dma_sg_alloc(struct vb2_buffer * vb,struct device * dev,unsigned long size)101 static void *vb2_dma_sg_alloc(struct vb2_buffer *vb, struct device *dev,
102 			      unsigned long size)
103 {
104 	struct vb2_dma_sg_buf *buf;
105 	struct sg_table *sgt;
106 	int ret;
107 	int num_pages;
108 
109 	if (WARN_ON(!dev) || WARN_ON(!size))
110 		return ERR_PTR(-EINVAL);
111 
112 	buf = kzalloc(sizeof *buf, GFP_KERNEL);
113 	if (!buf)
114 		return ERR_PTR(-ENOMEM);
115 
116 	buf->vaddr = NULL;
117 	buf->dma_dir = vb->vb2_queue->dma_dir;
118 	buf->offset = 0;
119 	buf->size = size;
120 	/* size is already page aligned */
121 	buf->num_pages = size >> PAGE_SHIFT;
122 	buf->dma_sgt = &buf->sg_table;
123 
124 	/*
125 	 * NOTE: dma-sg allocates memory using the page allocator directly, so
126 	 * there is no memory consistency guarantee, hence dma-sg ignores DMA
127 	 * attributes passed from the upper layer.
128 	 */
129 	buf->pages = kvcalloc(buf->num_pages, sizeof(struct page *), GFP_KERNEL);
130 	if (!buf->pages)
131 		goto fail_pages_array_alloc;
132 
133 	ret = vb2_dma_sg_alloc_compacted(buf, vb->vb2_queue->gfp_flags);
134 	if (ret)
135 		goto fail_pages_alloc;
136 
137 	ret = sg_alloc_table_from_pages(buf->dma_sgt, buf->pages,
138 			buf->num_pages, 0, size, GFP_KERNEL);
139 	if (ret)
140 		goto fail_table_alloc;
141 
142 	/* Prevent the device from being released while the buffer is used */
143 	buf->dev = get_device(dev);
144 
145 	sgt = &buf->sg_table;
146 	/*
147 	 * No need to sync to the device, this will happen later when the
148 	 * prepare() memop is called.
149 	 */
150 	if (dma_map_sgtable(buf->dev, sgt, buf->dma_dir,
151 			    DMA_ATTR_SKIP_CPU_SYNC))
152 		goto fail_map;
153 
154 	buf->handler.refcount = &buf->refcount;
155 	buf->handler.put = vb2_dma_sg_put;
156 	buf->handler.arg = buf;
157 	buf->vb = vb;
158 
159 	refcount_set(&buf->refcount, 1);
160 
161 	dprintk(1, "%s: Allocated buffer of %d pages\n",
162 		__func__, buf->num_pages);
163 	return buf;
164 
165 fail_map:
166 	put_device(buf->dev);
167 	sg_free_table(buf->dma_sgt);
168 fail_table_alloc:
169 	num_pages = buf->num_pages;
170 	while (num_pages--)
171 		__free_page(buf->pages[num_pages]);
172 fail_pages_alloc:
173 	kvfree(buf->pages);
174 fail_pages_array_alloc:
175 	kfree(buf);
176 	return ERR_PTR(-ENOMEM);
177 }
178 
vb2_dma_sg_put(void * buf_priv)179 static void vb2_dma_sg_put(void *buf_priv)
180 {
181 	struct vb2_dma_sg_buf *buf = buf_priv;
182 	struct sg_table *sgt = &buf->sg_table;
183 	int i = buf->num_pages;
184 
185 	if (refcount_dec_and_test(&buf->refcount)) {
186 		dprintk(1, "%s: Freeing buffer of %d pages\n", __func__,
187 			buf->num_pages);
188 		dma_unmap_sgtable(buf->dev, sgt, buf->dma_dir,
189 				  DMA_ATTR_SKIP_CPU_SYNC);
190 		if (buf->vaddr)
191 			vm_unmap_ram(buf->vaddr, buf->num_pages);
192 		sg_free_table(buf->dma_sgt);
193 		while (--i >= 0)
194 			__free_page(buf->pages[i]);
195 		kvfree(buf->pages);
196 		put_device(buf->dev);
197 		kfree(buf);
198 	}
199 }
200 
vb2_dma_sg_prepare(void * buf_priv)201 static void vb2_dma_sg_prepare(void *buf_priv)
202 {
203 	struct vb2_dma_sg_buf *buf = buf_priv;
204 	struct sg_table *sgt = buf->dma_sgt;
205 
206 	if (buf->vb->skip_cache_sync_on_prepare)
207 		return;
208 
209 	dma_sync_sgtable_for_device(buf->dev, sgt, buf->dma_dir);
210 }
211 
vb2_dma_sg_finish(void * buf_priv)212 static void vb2_dma_sg_finish(void *buf_priv)
213 {
214 	struct vb2_dma_sg_buf *buf = buf_priv;
215 	struct sg_table *sgt = buf->dma_sgt;
216 
217 	if (buf->vb->skip_cache_sync_on_finish)
218 		return;
219 
220 	dma_sync_sgtable_for_cpu(buf->dev, sgt, buf->dma_dir);
221 }
222 
vb2_dma_sg_get_userptr(struct vb2_buffer * vb,struct device * dev,unsigned long vaddr,unsigned long size)223 static void *vb2_dma_sg_get_userptr(struct vb2_buffer *vb, struct device *dev,
224 				    unsigned long vaddr, unsigned long size)
225 {
226 	struct vb2_dma_sg_buf *buf;
227 	struct sg_table *sgt;
228 	struct frame_vector *vec;
229 
230 	if (WARN_ON(!dev))
231 		return ERR_PTR(-EINVAL);
232 
233 	buf = kzalloc(sizeof *buf, GFP_KERNEL);
234 	if (!buf)
235 		return ERR_PTR(-ENOMEM);
236 
237 	buf->vaddr = NULL;
238 	buf->dev = dev;
239 	buf->dma_dir = vb->vb2_queue->dma_dir;
240 	buf->offset = vaddr & ~PAGE_MASK;
241 	buf->size = size;
242 	buf->dma_sgt = &buf->sg_table;
243 	buf->vb = vb;
244 	vec = vb2_create_framevec(vaddr, size,
245 				  buf->dma_dir == DMA_FROM_DEVICE ||
246 				  buf->dma_dir == DMA_BIDIRECTIONAL);
247 	if (IS_ERR(vec))
248 		goto userptr_fail_pfnvec;
249 	buf->vec = vec;
250 
251 	buf->pages = frame_vector_pages(vec);
252 	if (IS_ERR(buf->pages))
253 		goto userptr_fail_sgtable;
254 	buf->num_pages = frame_vector_count(vec);
255 
256 	if (sg_alloc_table_from_pages(buf->dma_sgt, buf->pages,
257 			buf->num_pages, buf->offset, size, 0))
258 		goto userptr_fail_sgtable;
259 
260 	sgt = &buf->sg_table;
261 	/*
262 	 * No need to sync to the device, this will happen later when the
263 	 * prepare() memop is called.
264 	 */
265 	if (dma_map_sgtable(buf->dev, sgt, buf->dma_dir,
266 			    DMA_ATTR_SKIP_CPU_SYNC))
267 		goto userptr_fail_map;
268 
269 	return buf;
270 
271 userptr_fail_map:
272 	sg_free_table(&buf->sg_table);
273 userptr_fail_sgtable:
274 	vb2_destroy_framevec(vec);
275 userptr_fail_pfnvec:
276 	kfree(buf);
277 	return ERR_PTR(-ENOMEM);
278 }
279 
280 /*
281  * @put_userptr: inform the allocator that a USERPTR buffer will no longer
282  *		 be used
283  */
vb2_dma_sg_put_userptr(void * buf_priv)284 static void vb2_dma_sg_put_userptr(void *buf_priv)
285 {
286 	struct vb2_dma_sg_buf *buf = buf_priv;
287 	struct sg_table *sgt = &buf->sg_table;
288 	int i = buf->num_pages;
289 
290 	dprintk(1, "%s: Releasing userspace buffer of %d pages\n",
291 	       __func__, buf->num_pages);
292 	dma_unmap_sgtable(buf->dev, sgt, buf->dma_dir, DMA_ATTR_SKIP_CPU_SYNC);
293 	if (buf->vaddr)
294 		vm_unmap_ram(buf->vaddr, buf->num_pages);
295 	sg_free_table(buf->dma_sgt);
296 	if (buf->dma_dir == DMA_FROM_DEVICE ||
297 	    buf->dma_dir == DMA_BIDIRECTIONAL)
298 		while (--i >= 0)
299 			set_page_dirty_lock(buf->pages[i]);
300 	vb2_destroy_framevec(buf->vec);
301 	kfree(buf);
302 }
303 
vb2_dma_sg_vaddr(struct vb2_buffer * vb,void * buf_priv)304 static void *vb2_dma_sg_vaddr(struct vb2_buffer *vb, void *buf_priv)
305 {
306 	struct vb2_dma_sg_buf *buf = buf_priv;
307 	struct iosys_map map;
308 	int ret;
309 
310 	BUG_ON(!buf);
311 
312 	if (!buf->vaddr) {
313 		if (buf->db_attach) {
314 			ret = dma_buf_vmap_unlocked(buf->db_attach->dmabuf, &map);
315 			buf->vaddr = ret ? NULL : map.vaddr;
316 		} else {
317 			buf->vaddr = vm_map_ram(buf->pages, buf->num_pages, -1);
318 		}
319 	}
320 
321 	/* add offset in case userptr is not page-aligned */
322 	return buf->vaddr ? buf->vaddr + buf->offset : NULL;
323 }
324 
vb2_dma_sg_num_users(void * buf_priv)325 static unsigned int vb2_dma_sg_num_users(void *buf_priv)
326 {
327 	struct vb2_dma_sg_buf *buf = buf_priv;
328 
329 	return refcount_read(&buf->refcount);
330 }
331 
vb2_dma_sg_mmap(void * buf_priv,struct vm_area_struct * vma)332 static int vb2_dma_sg_mmap(void *buf_priv, struct vm_area_struct *vma)
333 {
334 	struct vb2_dma_sg_buf *buf = buf_priv;
335 	int err;
336 
337 	if (!buf) {
338 		printk(KERN_ERR "No memory to map\n");
339 		return -EINVAL;
340 	}
341 
342 	err = vm_map_pages(vma, buf->pages, buf->num_pages);
343 	if (err) {
344 		printk(KERN_ERR "Remapping memory, error: %d\n", err);
345 		return err;
346 	}
347 
348 	/*
349 	 * Use common vm_area operations to track buffer refcount.
350 	 */
351 	vma->vm_private_data	= &buf->handler;
352 	vma->vm_ops		= &vb2_common_vm_ops;
353 
354 	vma->vm_ops->open(vma);
355 
356 	return 0;
357 }
358 
359 /*********************************************/
360 /*         DMABUF ops for exporters          */
361 /*********************************************/
362 
363 struct vb2_dma_sg_attachment {
364 	struct sg_table sgt;
365 	enum dma_data_direction dma_dir;
366 };
367 
vb2_dma_sg_dmabuf_ops_attach(struct dma_buf * dbuf,struct dma_buf_attachment * dbuf_attach)368 static int vb2_dma_sg_dmabuf_ops_attach(struct dma_buf *dbuf,
369 	struct dma_buf_attachment *dbuf_attach)
370 {
371 	struct vb2_dma_sg_attachment *attach;
372 	unsigned int i;
373 	struct scatterlist *rd, *wr;
374 	struct sg_table *sgt;
375 	struct vb2_dma_sg_buf *buf = dbuf->priv;
376 	int ret;
377 
378 	attach = kzalloc(sizeof(*attach), GFP_KERNEL);
379 	if (!attach)
380 		return -ENOMEM;
381 
382 	sgt = &attach->sgt;
383 	/* Copy the buf->base_sgt scatter list to the attachment, as we can't
384 	 * map the same scatter list to multiple attachments at the same time.
385 	 */
386 	ret = sg_alloc_table(sgt, buf->dma_sgt->orig_nents, GFP_KERNEL);
387 	if (ret) {
388 		kfree(attach);
389 		return -ENOMEM;
390 	}
391 
392 	rd = buf->dma_sgt->sgl;
393 	wr = sgt->sgl;
394 	for (i = 0; i < sgt->orig_nents; ++i) {
395 		sg_set_page(wr, sg_page(rd), rd->length, rd->offset);
396 		rd = sg_next(rd);
397 		wr = sg_next(wr);
398 	}
399 
400 	attach->dma_dir = DMA_NONE;
401 	dbuf_attach->priv = attach;
402 
403 	return 0;
404 }
405 
vb2_dma_sg_dmabuf_ops_detach(struct dma_buf * dbuf,struct dma_buf_attachment * db_attach)406 static void vb2_dma_sg_dmabuf_ops_detach(struct dma_buf *dbuf,
407 	struct dma_buf_attachment *db_attach)
408 {
409 	struct vb2_dma_sg_attachment *attach = db_attach->priv;
410 	struct sg_table *sgt;
411 
412 	if (!attach)
413 		return;
414 
415 	sgt = &attach->sgt;
416 
417 	/* release the scatterlist cache */
418 	if (attach->dma_dir != DMA_NONE)
419 		dma_unmap_sgtable(db_attach->dev, sgt, attach->dma_dir, 0);
420 	sg_free_table(sgt);
421 	kfree(attach);
422 	db_attach->priv = NULL;
423 }
424 
vb2_dma_sg_dmabuf_ops_map(struct dma_buf_attachment * db_attach,enum dma_data_direction dma_dir)425 static struct sg_table *vb2_dma_sg_dmabuf_ops_map(
426 	struct dma_buf_attachment *db_attach, enum dma_data_direction dma_dir)
427 {
428 	struct vb2_dma_sg_attachment *attach = db_attach->priv;
429 	struct sg_table *sgt;
430 
431 	sgt = &attach->sgt;
432 	/* return previously mapped sg table */
433 	if (attach->dma_dir == dma_dir)
434 		return sgt;
435 
436 	/* release any previous cache */
437 	if (attach->dma_dir != DMA_NONE) {
438 		dma_unmap_sgtable(db_attach->dev, sgt, attach->dma_dir, 0);
439 		attach->dma_dir = DMA_NONE;
440 	}
441 
442 	/* mapping to the client with new direction */
443 	if (dma_map_sgtable(db_attach->dev, sgt, dma_dir, 0)) {
444 		pr_err("failed to map scatterlist\n");
445 		return ERR_PTR(-EIO);
446 	}
447 
448 	attach->dma_dir = dma_dir;
449 
450 	return sgt;
451 }
452 
vb2_dma_sg_dmabuf_ops_unmap(struct dma_buf_attachment * db_attach,struct sg_table * sgt,enum dma_data_direction dma_dir)453 static void vb2_dma_sg_dmabuf_ops_unmap(struct dma_buf_attachment *db_attach,
454 	struct sg_table *sgt, enum dma_data_direction dma_dir)
455 {
456 	/* nothing to be done here */
457 }
458 
vb2_dma_sg_dmabuf_ops_release(struct dma_buf * dbuf)459 static void vb2_dma_sg_dmabuf_ops_release(struct dma_buf *dbuf)
460 {
461 	/* drop reference obtained in vb2_dma_sg_get_dmabuf */
462 	vb2_dma_sg_put(dbuf->priv);
463 }
464 
465 static int
vb2_dma_sg_dmabuf_ops_begin_cpu_access(struct dma_buf * dbuf,enum dma_data_direction direction)466 vb2_dma_sg_dmabuf_ops_begin_cpu_access(struct dma_buf *dbuf,
467 				       enum dma_data_direction direction)
468 {
469 	struct vb2_dma_sg_buf *buf = dbuf->priv;
470 	struct sg_table *sgt = buf->dma_sgt;
471 
472 	dma_sync_sg_for_cpu(buf->dev, sgt->sgl, sgt->nents, buf->dma_dir);
473 	return 0;
474 }
475 
476 static int
vb2_dma_sg_dmabuf_ops_end_cpu_access(struct dma_buf * dbuf,enum dma_data_direction direction)477 vb2_dma_sg_dmabuf_ops_end_cpu_access(struct dma_buf *dbuf,
478 				     enum dma_data_direction direction)
479 {
480 	struct vb2_dma_sg_buf *buf = dbuf->priv;
481 	struct sg_table *sgt = buf->dma_sgt;
482 
483 	dma_sync_sg_for_device(buf->dev, sgt->sgl, sgt->nents, buf->dma_dir);
484 	return 0;
485 }
486 
vb2_dma_sg_dmabuf_ops_vmap(struct dma_buf * dbuf,struct iosys_map * map)487 static int vb2_dma_sg_dmabuf_ops_vmap(struct dma_buf *dbuf,
488 				      struct iosys_map *map)
489 {
490 	struct vb2_dma_sg_buf *buf;
491 	void *vaddr;
492 
493 	buf = dbuf->priv;
494 	vaddr = vb2_dma_sg_vaddr(buf->vb, buf);
495 	if (!vaddr)
496 		return -EINVAL;
497 
498 	iosys_map_set_vaddr(map, vaddr);
499 
500 	return 0;
501 }
502 
vb2_dma_sg_dmabuf_ops_mmap(struct dma_buf * dbuf,struct vm_area_struct * vma)503 static int vb2_dma_sg_dmabuf_ops_mmap(struct dma_buf *dbuf,
504 	struct vm_area_struct *vma)
505 {
506 	return vb2_dma_sg_mmap(dbuf->priv, vma);
507 }
508 
509 static const struct dma_buf_ops vb2_dma_sg_dmabuf_ops = {
510 	.attach = vb2_dma_sg_dmabuf_ops_attach,
511 	.detach = vb2_dma_sg_dmabuf_ops_detach,
512 	.map_dma_buf = vb2_dma_sg_dmabuf_ops_map,
513 	.unmap_dma_buf = vb2_dma_sg_dmabuf_ops_unmap,
514 	.begin_cpu_access = vb2_dma_sg_dmabuf_ops_begin_cpu_access,
515 	.end_cpu_access = vb2_dma_sg_dmabuf_ops_end_cpu_access,
516 	.vmap = vb2_dma_sg_dmabuf_ops_vmap,
517 	.mmap = vb2_dma_sg_dmabuf_ops_mmap,
518 	.release = vb2_dma_sg_dmabuf_ops_release,
519 };
520 
vb2_dma_sg_get_dmabuf(struct vb2_buffer * vb,void * buf_priv,unsigned long flags)521 static struct dma_buf *vb2_dma_sg_get_dmabuf(struct vb2_buffer *vb,
522 					     void *buf_priv,
523 					     unsigned long flags)
524 {
525 	struct vb2_dma_sg_buf *buf = buf_priv;
526 	struct dma_buf *dbuf;
527 	DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
528 
529 	exp_info.ops = &vb2_dma_sg_dmabuf_ops;
530 	exp_info.size = buf->size;
531 	exp_info.flags = flags;
532 	exp_info.priv = buf;
533 
534 	if (WARN_ON(!buf->dma_sgt))
535 		return NULL;
536 
537 	dbuf = dma_buf_export(&exp_info);
538 	if (IS_ERR(dbuf))
539 		return NULL;
540 
541 	/* dmabuf keeps reference to vb2 buffer */
542 	refcount_inc(&buf->refcount);
543 
544 	return dbuf;
545 }
546 
547 /*********************************************/
548 /*       callbacks for DMABUF buffers        */
549 /*********************************************/
550 
vb2_dma_sg_map_dmabuf(void * mem_priv)551 static int vb2_dma_sg_map_dmabuf(void *mem_priv)
552 {
553 	struct vb2_dma_sg_buf *buf = mem_priv;
554 	struct sg_table *sgt;
555 
556 	if (WARN_ON(!buf->db_attach)) {
557 		pr_err("trying to pin a non attached buffer\n");
558 		return -EINVAL;
559 	}
560 
561 	if (WARN_ON(buf->dma_sgt)) {
562 		pr_err("dmabuf buffer is already pinned\n");
563 		return 0;
564 	}
565 
566 	/* get the associated scatterlist for this buffer */
567 	sgt = dma_buf_map_attachment_unlocked(buf->db_attach, buf->dma_dir);
568 	if (IS_ERR(sgt)) {
569 		pr_err("Error getting dmabuf scatterlist\n");
570 		return -EINVAL;
571 	}
572 
573 	buf->dma_sgt = sgt;
574 	buf->vaddr = NULL;
575 
576 	return 0;
577 }
578 
vb2_dma_sg_unmap_dmabuf(void * mem_priv)579 static void vb2_dma_sg_unmap_dmabuf(void *mem_priv)
580 {
581 	struct vb2_dma_sg_buf *buf = mem_priv;
582 	struct sg_table *sgt = buf->dma_sgt;
583 	struct iosys_map map = IOSYS_MAP_INIT_VADDR(buf->vaddr);
584 
585 	if (WARN_ON(!buf->db_attach)) {
586 		pr_err("trying to unpin a not attached buffer\n");
587 		return;
588 	}
589 
590 	if (WARN_ON(!sgt)) {
591 		pr_err("dmabuf buffer is already unpinned\n");
592 		return;
593 	}
594 
595 	if (buf->vaddr) {
596 		dma_buf_vunmap_unlocked(buf->db_attach->dmabuf, &map);
597 		buf->vaddr = NULL;
598 	}
599 	dma_buf_unmap_attachment_unlocked(buf->db_attach, sgt, buf->dma_dir);
600 
601 	buf->dma_sgt = NULL;
602 }
603 
vb2_dma_sg_detach_dmabuf(void * mem_priv)604 static void vb2_dma_sg_detach_dmabuf(void *mem_priv)
605 {
606 	struct vb2_dma_sg_buf *buf = mem_priv;
607 
608 	/* if vb2 works correctly you should never detach mapped buffer */
609 	if (WARN_ON(buf->dma_sgt))
610 		vb2_dma_sg_unmap_dmabuf(buf);
611 
612 	/* detach this attachment */
613 	dma_buf_detach(buf->db_attach->dmabuf, buf->db_attach);
614 	kfree(buf);
615 }
616 
vb2_dma_sg_attach_dmabuf(struct vb2_buffer * vb,struct device * dev,struct dma_buf * dbuf,unsigned long size)617 static void *vb2_dma_sg_attach_dmabuf(struct vb2_buffer *vb, struct device *dev,
618 				      struct dma_buf *dbuf, unsigned long size)
619 {
620 	struct vb2_dma_sg_buf *buf;
621 	struct dma_buf_attachment *dba;
622 
623 	if (WARN_ON(!dev))
624 		return ERR_PTR(-EINVAL);
625 
626 	if (dbuf->size < size)
627 		return ERR_PTR(-EFAULT);
628 
629 	buf = kzalloc(sizeof(*buf), GFP_KERNEL);
630 	if (!buf)
631 		return ERR_PTR(-ENOMEM);
632 
633 	buf->dev = dev;
634 	/* create attachment for the dmabuf with the user device */
635 	dba = dma_buf_attach(dbuf, buf->dev);
636 	if (IS_ERR(dba)) {
637 		pr_err("failed to attach dmabuf\n");
638 		kfree(buf);
639 		return dba;
640 	}
641 
642 	buf->dma_dir = vb->vb2_queue->dma_dir;
643 	buf->size = size;
644 	buf->db_attach = dba;
645 	buf->vb = vb;
646 
647 	return buf;
648 }
649 
vb2_dma_sg_cookie(struct vb2_buffer * vb,void * buf_priv)650 static void *vb2_dma_sg_cookie(struct vb2_buffer *vb, void *buf_priv)
651 {
652 	struct vb2_dma_sg_buf *buf = buf_priv;
653 
654 	return buf->dma_sgt;
655 }
656 
657 const struct vb2_mem_ops vb2_dma_sg_memops = {
658 	.alloc		= vb2_dma_sg_alloc,
659 	.put		= vb2_dma_sg_put,
660 	.get_userptr	= vb2_dma_sg_get_userptr,
661 	.put_userptr	= vb2_dma_sg_put_userptr,
662 	.prepare	= vb2_dma_sg_prepare,
663 	.finish		= vb2_dma_sg_finish,
664 	.vaddr		= vb2_dma_sg_vaddr,
665 	.mmap		= vb2_dma_sg_mmap,
666 	.num_users	= vb2_dma_sg_num_users,
667 	.get_dmabuf	= vb2_dma_sg_get_dmabuf,
668 	.map_dmabuf	= vb2_dma_sg_map_dmabuf,
669 	.unmap_dmabuf	= vb2_dma_sg_unmap_dmabuf,
670 	.attach_dmabuf	= vb2_dma_sg_attach_dmabuf,
671 	.detach_dmabuf	= vb2_dma_sg_detach_dmabuf,
672 	.cookie		= vb2_dma_sg_cookie,
673 };
674 EXPORT_SYMBOL_GPL(vb2_dma_sg_memops);
675 
676 MODULE_DESCRIPTION("dma scatter/gather memory handling routines for videobuf2");
677 MODULE_AUTHOR("Andrzej Pietrasiewicz");
678 MODULE_LICENSE("GPL");
679 MODULE_IMPORT_NS(DMA_BUF);
680