xref: /linux/drivers/media/common/videobuf2/videobuf2-dma-contig.c (revision d9c5252295218df4cfe64353aa860d7b5c8700ef)
1 /*
2  * videobuf2-dma-contig.c - DMA contig memory allocator for videobuf2
3  *
4  * Copyright (C) 2010 Samsung Electronics
5  *
6  * Author: Pawel Osciak <pawel@osciak.com>
7  *
8  * This program is free software; you can redistribute it and/or modify
9  * it under the terms of the GNU General Public License as published by
10  * the Free Software Foundation.
11  */
12 
13 #include <linux/dma-buf.h>
14 #include <linux/module.h>
15 #include <linux/refcount.h>
16 #include <linux/scatterlist.h>
17 #include <linux/sched.h>
18 #include <linux/slab.h>
19 #include <linux/dma-mapping.h>
20 
21 #include <media/videobuf2-v4l2.h>
22 #include <media/videobuf2-dma-contig.h>
23 #include <media/videobuf2-memops.h>
24 
25 struct vb2_dc_buf {
26 	struct device			*dev;
27 	void				*vaddr;
28 	unsigned long			size;
29 	void				*cookie;
30 	dma_addr_t			dma_addr;
31 	unsigned long			attrs;
32 	enum dma_data_direction		dma_dir;
33 	struct sg_table			*dma_sgt;
34 	struct frame_vector		*vec;
35 
36 	/* MMAP related */
37 	struct vb2_vmarea_handler	handler;
38 	refcount_t			refcount;
39 	struct sg_table			*sgt_base;
40 
41 	/* DMABUF related */
42 	struct dma_buf_attachment	*db_attach;
43 };
44 
45 /*********************************************/
46 /*        scatterlist table functions        */
47 /*********************************************/
48 
49 static unsigned long vb2_dc_get_contiguous_size(struct sg_table *sgt)
50 {
51 	struct scatterlist *s;
52 	dma_addr_t expected = sg_dma_address(sgt->sgl);
53 	unsigned int i;
54 	unsigned long size = 0;
55 
56 	for_each_sg(sgt->sgl, s, sgt->nents, i) {
57 		if (sg_dma_address(s) != expected)
58 			break;
59 		expected = sg_dma_address(s) + sg_dma_len(s);
60 		size += sg_dma_len(s);
61 	}
62 	return size;
63 }
64 
65 /*********************************************/
66 /*         callbacks for all buffers         */
67 /*********************************************/
68 
69 static void *vb2_dc_cookie(void *buf_priv)
70 {
71 	struct vb2_dc_buf *buf = buf_priv;
72 
73 	return &buf->dma_addr;
74 }
75 
76 static void *vb2_dc_vaddr(void *buf_priv)
77 {
78 	struct vb2_dc_buf *buf = buf_priv;
79 
80 	if (!buf->vaddr && buf->db_attach)
81 		buf->vaddr = dma_buf_vmap(buf->db_attach->dmabuf);
82 
83 	return buf->vaddr;
84 }
85 
86 static unsigned int vb2_dc_num_users(void *buf_priv)
87 {
88 	struct vb2_dc_buf *buf = buf_priv;
89 
90 	return refcount_read(&buf->refcount);
91 }
92 
93 static void vb2_dc_prepare(void *buf_priv)
94 {
95 	struct vb2_dc_buf *buf = buf_priv;
96 	struct sg_table *sgt = buf->dma_sgt;
97 
98 	/* DMABUF exporter will flush the cache for us */
99 	if (!sgt || buf->db_attach)
100 		return;
101 
102 	dma_sync_sg_for_device(buf->dev, sgt->sgl, sgt->orig_nents,
103 			       buf->dma_dir);
104 }
105 
106 static void vb2_dc_finish(void *buf_priv)
107 {
108 	struct vb2_dc_buf *buf = buf_priv;
109 	struct sg_table *sgt = buf->dma_sgt;
110 
111 	/* DMABUF exporter will flush the cache for us */
112 	if (!sgt || buf->db_attach)
113 		return;
114 
115 	dma_sync_sg_for_cpu(buf->dev, sgt->sgl, sgt->orig_nents, buf->dma_dir);
116 }
117 
118 /*********************************************/
119 /*        callbacks for MMAP buffers         */
120 /*********************************************/
121 
122 static void vb2_dc_put(void *buf_priv)
123 {
124 	struct vb2_dc_buf *buf = buf_priv;
125 
126 	if (!refcount_dec_and_test(&buf->refcount))
127 		return;
128 
129 	if (buf->sgt_base) {
130 		sg_free_table(buf->sgt_base);
131 		kfree(buf->sgt_base);
132 	}
133 	dma_free_attrs(buf->dev, buf->size, buf->cookie, buf->dma_addr,
134 		       buf->attrs);
135 	put_device(buf->dev);
136 	kfree(buf);
137 }
138 
139 static void *vb2_dc_alloc(struct device *dev, unsigned long attrs,
140 			  unsigned long size, enum dma_data_direction dma_dir,
141 			  gfp_t gfp_flags)
142 {
143 	struct vb2_dc_buf *buf;
144 
145 	if (WARN_ON(!dev))
146 		return ERR_PTR(-EINVAL);
147 
148 	buf = kzalloc(sizeof *buf, GFP_KERNEL);
149 	if (!buf)
150 		return ERR_PTR(-ENOMEM);
151 
152 	if (attrs)
153 		buf->attrs = attrs;
154 	buf->cookie = dma_alloc_attrs(dev, size, &buf->dma_addr,
155 					GFP_KERNEL | gfp_flags, buf->attrs);
156 	if (!buf->cookie) {
157 		dev_err(dev, "dma_alloc_coherent of size %ld failed\n", size);
158 		kfree(buf);
159 		return ERR_PTR(-ENOMEM);
160 	}
161 
162 	if ((buf->attrs & DMA_ATTR_NO_KERNEL_MAPPING) == 0)
163 		buf->vaddr = buf->cookie;
164 
165 	/* Prevent the device from being released while the buffer is used */
166 	buf->dev = get_device(dev);
167 	buf->size = size;
168 	buf->dma_dir = dma_dir;
169 
170 	buf->handler.refcount = &buf->refcount;
171 	buf->handler.put = vb2_dc_put;
172 	buf->handler.arg = buf;
173 
174 	refcount_set(&buf->refcount, 1);
175 
176 	return buf;
177 }
178 
179 static int vb2_dc_mmap(void *buf_priv, struct vm_area_struct *vma)
180 {
181 	struct vb2_dc_buf *buf = buf_priv;
182 	int ret;
183 
184 	if (!buf) {
185 		printk(KERN_ERR "No buffer to map\n");
186 		return -EINVAL;
187 	}
188 
189 	ret = dma_mmap_attrs(buf->dev, vma, buf->cookie,
190 		buf->dma_addr, buf->size, buf->attrs);
191 
192 	if (ret) {
193 		pr_err("Remapping memory failed, error: %d\n", ret);
194 		return ret;
195 	}
196 
197 	vma->vm_flags		|= VM_DONTEXPAND | VM_DONTDUMP;
198 	vma->vm_private_data	= &buf->handler;
199 	vma->vm_ops		= &vb2_common_vm_ops;
200 
201 	vma->vm_ops->open(vma);
202 
203 	pr_debug("%s: mapped dma addr 0x%08lx at 0x%08lx, size %ld\n",
204 		__func__, (unsigned long)buf->dma_addr, vma->vm_start,
205 		buf->size);
206 
207 	return 0;
208 }
209 
210 /*********************************************/
211 /*         DMABUF ops for exporters          */
212 /*********************************************/
213 
214 struct vb2_dc_attachment {
215 	struct sg_table sgt;
216 	enum dma_data_direction dma_dir;
217 };
218 
219 static int vb2_dc_dmabuf_ops_attach(struct dma_buf *dbuf,
220 	struct dma_buf_attachment *dbuf_attach)
221 {
222 	struct vb2_dc_attachment *attach;
223 	unsigned int i;
224 	struct scatterlist *rd, *wr;
225 	struct sg_table *sgt;
226 	struct vb2_dc_buf *buf = dbuf->priv;
227 	int ret;
228 
229 	attach = kzalloc(sizeof(*attach), GFP_KERNEL);
230 	if (!attach)
231 		return -ENOMEM;
232 
233 	sgt = &attach->sgt;
234 	/* Copy the buf->base_sgt scatter list to the attachment, as we can't
235 	 * map the same scatter list to multiple attachments at the same time.
236 	 */
237 	ret = sg_alloc_table(sgt, buf->sgt_base->orig_nents, GFP_KERNEL);
238 	if (ret) {
239 		kfree(attach);
240 		return -ENOMEM;
241 	}
242 
243 	rd = buf->sgt_base->sgl;
244 	wr = sgt->sgl;
245 	for (i = 0; i < sgt->orig_nents; ++i) {
246 		sg_set_page(wr, sg_page(rd), rd->length, rd->offset);
247 		rd = sg_next(rd);
248 		wr = sg_next(wr);
249 	}
250 
251 	attach->dma_dir = DMA_NONE;
252 	dbuf_attach->priv = attach;
253 
254 	return 0;
255 }
256 
257 static void vb2_dc_dmabuf_ops_detach(struct dma_buf *dbuf,
258 	struct dma_buf_attachment *db_attach)
259 {
260 	struct vb2_dc_attachment *attach = db_attach->priv;
261 	struct sg_table *sgt;
262 
263 	if (!attach)
264 		return;
265 
266 	sgt = &attach->sgt;
267 
268 	/* release the scatterlist cache */
269 	if (attach->dma_dir != DMA_NONE)
270 		dma_unmap_sg(db_attach->dev, sgt->sgl, sgt->orig_nents,
271 			attach->dma_dir);
272 	sg_free_table(sgt);
273 	kfree(attach);
274 	db_attach->priv = NULL;
275 }
276 
277 static struct sg_table *vb2_dc_dmabuf_ops_map(
278 	struct dma_buf_attachment *db_attach, enum dma_data_direction dma_dir)
279 {
280 	struct vb2_dc_attachment *attach = db_attach->priv;
281 	/* stealing dmabuf mutex to serialize map/unmap operations */
282 	struct mutex *lock = &db_attach->dmabuf->lock;
283 	struct sg_table *sgt;
284 
285 	mutex_lock(lock);
286 
287 	sgt = &attach->sgt;
288 	/* return previously mapped sg table */
289 	if (attach->dma_dir == dma_dir) {
290 		mutex_unlock(lock);
291 		return sgt;
292 	}
293 
294 	/* release any previous cache */
295 	if (attach->dma_dir != DMA_NONE) {
296 		dma_unmap_sg(db_attach->dev, sgt->sgl, sgt->orig_nents,
297 			attach->dma_dir);
298 		attach->dma_dir = DMA_NONE;
299 	}
300 
301 	/* mapping to the client with new direction */
302 	sgt->nents = dma_map_sg(db_attach->dev, sgt->sgl, sgt->orig_nents,
303 				dma_dir);
304 	if (!sgt->nents) {
305 		pr_err("failed to map scatterlist\n");
306 		mutex_unlock(lock);
307 		return ERR_PTR(-EIO);
308 	}
309 
310 	attach->dma_dir = dma_dir;
311 
312 	mutex_unlock(lock);
313 
314 	return sgt;
315 }
316 
317 static void vb2_dc_dmabuf_ops_unmap(struct dma_buf_attachment *db_attach,
318 	struct sg_table *sgt, enum dma_data_direction dma_dir)
319 {
320 	/* nothing to be done here */
321 }
322 
323 static void vb2_dc_dmabuf_ops_release(struct dma_buf *dbuf)
324 {
325 	/* drop reference obtained in vb2_dc_get_dmabuf */
326 	vb2_dc_put(dbuf->priv);
327 }
328 
329 static void *vb2_dc_dmabuf_ops_kmap(struct dma_buf *dbuf, unsigned long pgnum)
330 {
331 	struct vb2_dc_buf *buf = dbuf->priv;
332 
333 	return buf->vaddr ? buf->vaddr + pgnum * PAGE_SIZE : NULL;
334 }
335 
336 static void *vb2_dc_dmabuf_ops_vmap(struct dma_buf *dbuf)
337 {
338 	struct vb2_dc_buf *buf = dbuf->priv;
339 
340 	return buf->vaddr;
341 }
342 
343 static int vb2_dc_dmabuf_ops_mmap(struct dma_buf *dbuf,
344 	struct vm_area_struct *vma)
345 {
346 	return vb2_dc_mmap(dbuf->priv, vma);
347 }
348 
349 static const struct dma_buf_ops vb2_dc_dmabuf_ops = {
350 	.attach = vb2_dc_dmabuf_ops_attach,
351 	.detach = vb2_dc_dmabuf_ops_detach,
352 	.map_dma_buf = vb2_dc_dmabuf_ops_map,
353 	.unmap_dma_buf = vb2_dc_dmabuf_ops_unmap,
354 	.map = vb2_dc_dmabuf_ops_kmap,
355 	.vmap = vb2_dc_dmabuf_ops_vmap,
356 	.mmap = vb2_dc_dmabuf_ops_mmap,
357 	.release = vb2_dc_dmabuf_ops_release,
358 };
359 
360 static struct sg_table *vb2_dc_get_base_sgt(struct vb2_dc_buf *buf)
361 {
362 	int ret;
363 	struct sg_table *sgt;
364 
365 	sgt = kmalloc(sizeof(*sgt), GFP_KERNEL);
366 	if (!sgt) {
367 		dev_err(buf->dev, "failed to alloc sg table\n");
368 		return NULL;
369 	}
370 
371 	ret = dma_get_sgtable_attrs(buf->dev, sgt, buf->cookie, buf->dma_addr,
372 		buf->size, buf->attrs);
373 	if (ret < 0) {
374 		dev_err(buf->dev, "failed to get scatterlist from DMA API\n");
375 		kfree(sgt);
376 		return NULL;
377 	}
378 
379 	return sgt;
380 }
381 
382 static struct dma_buf *vb2_dc_get_dmabuf(void *buf_priv, unsigned long flags)
383 {
384 	struct vb2_dc_buf *buf = buf_priv;
385 	struct dma_buf *dbuf;
386 	DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
387 
388 	exp_info.ops = &vb2_dc_dmabuf_ops;
389 	exp_info.size = buf->size;
390 	exp_info.flags = flags;
391 	exp_info.priv = buf;
392 
393 	if (!buf->sgt_base)
394 		buf->sgt_base = vb2_dc_get_base_sgt(buf);
395 
396 	if (WARN_ON(!buf->sgt_base))
397 		return NULL;
398 
399 	dbuf = dma_buf_export(&exp_info);
400 	if (IS_ERR(dbuf))
401 		return NULL;
402 
403 	/* dmabuf keeps reference to vb2 buffer */
404 	refcount_inc(&buf->refcount);
405 
406 	return dbuf;
407 }
408 
409 /*********************************************/
410 /*       callbacks for USERPTR buffers       */
411 /*********************************************/
412 
413 static void vb2_dc_put_userptr(void *buf_priv)
414 {
415 	struct vb2_dc_buf *buf = buf_priv;
416 	struct sg_table *sgt = buf->dma_sgt;
417 	int i;
418 	struct page **pages;
419 
420 	if (sgt) {
421 		/*
422 		 * No need to sync to CPU, it's already synced to the CPU
423 		 * since the finish() memop will have been called before this.
424 		 */
425 		dma_unmap_sg_attrs(buf->dev, sgt->sgl, sgt->orig_nents,
426 				   buf->dma_dir, DMA_ATTR_SKIP_CPU_SYNC);
427 		pages = frame_vector_pages(buf->vec);
428 		/* sgt should exist only if vector contains pages... */
429 		BUG_ON(IS_ERR(pages));
430 		if (buf->dma_dir == DMA_FROM_DEVICE ||
431 		    buf->dma_dir == DMA_BIDIRECTIONAL)
432 			for (i = 0; i < frame_vector_count(buf->vec); i++)
433 				set_page_dirty_lock(pages[i]);
434 		sg_free_table(sgt);
435 		kfree(sgt);
436 	} else {
437 		dma_unmap_resource(buf->dev, buf->dma_addr, buf->size,
438 				   buf->dma_dir, 0);
439 	}
440 	vb2_destroy_framevec(buf->vec);
441 	kfree(buf);
442 }
443 
444 static void *vb2_dc_get_userptr(struct device *dev, unsigned long vaddr,
445 	unsigned long size, enum dma_data_direction dma_dir)
446 {
447 	struct vb2_dc_buf *buf;
448 	struct frame_vector *vec;
449 	unsigned int offset;
450 	int n_pages, i;
451 	int ret = 0;
452 	struct sg_table *sgt;
453 	unsigned long contig_size;
454 	unsigned long dma_align = dma_get_cache_alignment();
455 
456 	/* Only cache aligned DMA transfers are reliable */
457 	if (!IS_ALIGNED(vaddr | size, dma_align)) {
458 		pr_debug("user data must be aligned to %lu bytes\n", dma_align);
459 		return ERR_PTR(-EINVAL);
460 	}
461 
462 	if (!size) {
463 		pr_debug("size is zero\n");
464 		return ERR_PTR(-EINVAL);
465 	}
466 
467 	if (WARN_ON(!dev))
468 		return ERR_PTR(-EINVAL);
469 
470 	buf = kzalloc(sizeof *buf, GFP_KERNEL);
471 	if (!buf)
472 		return ERR_PTR(-ENOMEM);
473 
474 	buf->dev = dev;
475 	buf->dma_dir = dma_dir;
476 
477 	offset = lower_32_bits(offset_in_page(vaddr));
478 	vec = vb2_create_framevec(vaddr, size);
479 	if (IS_ERR(vec)) {
480 		ret = PTR_ERR(vec);
481 		goto fail_buf;
482 	}
483 	buf->vec = vec;
484 	n_pages = frame_vector_count(vec);
485 	ret = frame_vector_to_pages(vec);
486 	if (ret < 0) {
487 		unsigned long *nums = frame_vector_pfns(vec);
488 
489 		/*
490 		 * Failed to convert to pages... Check the memory is physically
491 		 * contiguous and use direct mapping
492 		 */
493 		for (i = 1; i < n_pages; i++)
494 			if (nums[i-1] + 1 != nums[i])
495 				goto fail_pfnvec;
496 		buf->dma_addr = dma_map_resource(buf->dev,
497 				__pfn_to_phys(nums[0]), size, buf->dma_dir, 0);
498 		if (dma_mapping_error(buf->dev, buf->dma_addr)) {
499 			ret = -ENOMEM;
500 			goto fail_pfnvec;
501 		}
502 		goto out;
503 	}
504 
505 	sgt = kzalloc(sizeof(*sgt), GFP_KERNEL);
506 	if (!sgt) {
507 		pr_err("failed to allocate sg table\n");
508 		ret = -ENOMEM;
509 		goto fail_pfnvec;
510 	}
511 
512 	ret = sg_alloc_table_from_pages(sgt, frame_vector_pages(vec), n_pages,
513 		offset, size, GFP_KERNEL);
514 	if (ret) {
515 		pr_err("failed to initialize sg table\n");
516 		goto fail_sgt;
517 	}
518 
519 	/*
520 	 * No need to sync to the device, this will happen later when the
521 	 * prepare() memop is called.
522 	 */
523 	sgt->nents = dma_map_sg_attrs(buf->dev, sgt->sgl, sgt->orig_nents,
524 				      buf->dma_dir, DMA_ATTR_SKIP_CPU_SYNC);
525 	if (sgt->nents <= 0) {
526 		pr_err("failed to map scatterlist\n");
527 		ret = -EIO;
528 		goto fail_sgt_init;
529 	}
530 
531 	contig_size = vb2_dc_get_contiguous_size(sgt);
532 	if (contig_size < size) {
533 		pr_err("contiguous mapping is too small %lu/%lu\n",
534 			contig_size, size);
535 		ret = -EFAULT;
536 		goto fail_map_sg;
537 	}
538 
539 	buf->dma_addr = sg_dma_address(sgt->sgl);
540 	buf->dma_sgt = sgt;
541 out:
542 	buf->size = size;
543 
544 	return buf;
545 
546 fail_map_sg:
547 	dma_unmap_sg_attrs(buf->dev, sgt->sgl, sgt->orig_nents,
548 			   buf->dma_dir, DMA_ATTR_SKIP_CPU_SYNC);
549 
550 fail_sgt_init:
551 	sg_free_table(sgt);
552 
553 fail_sgt:
554 	kfree(sgt);
555 
556 fail_pfnvec:
557 	vb2_destroy_framevec(vec);
558 
559 fail_buf:
560 	kfree(buf);
561 
562 	return ERR_PTR(ret);
563 }
564 
565 /*********************************************/
566 /*       callbacks for DMABUF buffers        */
567 /*********************************************/
568 
569 static int vb2_dc_map_dmabuf(void *mem_priv)
570 {
571 	struct vb2_dc_buf *buf = mem_priv;
572 	struct sg_table *sgt;
573 	unsigned long contig_size;
574 
575 	if (WARN_ON(!buf->db_attach)) {
576 		pr_err("trying to pin a non attached buffer\n");
577 		return -EINVAL;
578 	}
579 
580 	if (WARN_ON(buf->dma_sgt)) {
581 		pr_err("dmabuf buffer is already pinned\n");
582 		return 0;
583 	}
584 
585 	/* get the associated scatterlist for this buffer */
586 	sgt = dma_buf_map_attachment(buf->db_attach, buf->dma_dir);
587 	if (IS_ERR(sgt)) {
588 		pr_err("Error getting dmabuf scatterlist\n");
589 		return -EINVAL;
590 	}
591 
592 	/* checking if dmabuf is big enough to store contiguous chunk */
593 	contig_size = vb2_dc_get_contiguous_size(sgt);
594 	if (contig_size < buf->size) {
595 		pr_err("contiguous chunk is too small %lu/%lu b\n",
596 			contig_size, buf->size);
597 		dma_buf_unmap_attachment(buf->db_attach, sgt, buf->dma_dir);
598 		return -EFAULT;
599 	}
600 
601 	buf->dma_addr = sg_dma_address(sgt->sgl);
602 	buf->dma_sgt = sgt;
603 	buf->vaddr = NULL;
604 
605 	return 0;
606 }
607 
608 static void vb2_dc_unmap_dmabuf(void *mem_priv)
609 {
610 	struct vb2_dc_buf *buf = mem_priv;
611 	struct sg_table *sgt = buf->dma_sgt;
612 
613 	if (WARN_ON(!buf->db_attach)) {
614 		pr_err("trying to unpin a not attached buffer\n");
615 		return;
616 	}
617 
618 	if (WARN_ON(!sgt)) {
619 		pr_err("dmabuf buffer is already unpinned\n");
620 		return;
621 	}
622 
623 	if (buf->vaddr) {
624 		dma_buf_vunmap(buf->db_attach->dmabuf, buf->vaddr);
625 		buf->vaddr = NULL;
626 	}
627 	dma_buf_unmap_attachment(buf->db_attach, sgt, buf->dma_dir);
628 
629 	buf->dma_addr = 0;
630 	buf->dma_sgt = NULL;
631 }
632 
633 static void vb2_dc_detach_dmabuf(void *mem_priv)
634 {
635 	struct vb2_dc_buf *buf = mem_priv;
636 
637 	/* if vb2 works correctly you should never detach mapped buffer */
638 	if (WARN_ON(buf->dma_addr))
639 		vb2_dc_unmap_dmabuf(buf);
640 
641 	/* detach this attachment */
642 	dma_buf_detach(buf->db_attach->dmabuf, buf->db_attach);
643 	kfree(buf);
644 }
645 
646 static void *vb2_dc_attach_dmabuf(struct device *dev, struct dma_buf *dbuf,
647 	unsigned long size, enum dma_data_direction dma_dir)
648 {
649 	struct vb2_dc_buf *buf;
650 	struct dma_buf_attachment *dba;
651 
652 	if (dbuf->size < size)
653 		return ERR_PTR(-EFAULT);
654 
655 	if (WARN_ON(!dev))
656 		return ERR_PTR(-EINVAL);
657 
658 	buf = kzalloc(sizeof(*buf), GFP_KERNEL);
659 	if (!buf)
660 		return ERR_PTR(-ENOMEM);
661 
662 	buf->dev = dev;
663 	/* create attachment for the dmabuf with the user device */
664 	dba = dma_buf_attach(dbuf, buf->dev);
665 	if (IS_ERR(dba)) {
666 		pr_err("failed to attach dmabuf\n");
667 		kfree(buf);
668 		return dba;
669 	}
670 
671 	buf->dma_dir = dma_dir;
672 	buf->size = size;
673 	buf->db_attach = dba;
674 
675 	return buf;
676 }
677 
678 /*********************************************/
679 /*       DMA CONTIG exported functions       */
680 /*********************************************/
681 
682 const struct vb2_mem_ops vb2_dma_contig_memops = {
683 	.alloc		= vb2_dc_alloc,
684 	.put		= vb2_dc_put,
685 	.get_dmabuf	= vb2_dc_get_dmabuf,
686 	.cookie		= vb2_dc_cookie,
687 	.vaddr		= vb2_dc_vaddr,
688 	.mmap		= vb2_dc_mmap,
689 	.get_userptr	= vb2_dc_get_userptr,
690 	.put_userptr	= vb2_dc_put_userptr,
691 	.prepare	= vb2_dc_prepare,
692 	.finish		= vb2_dc_finish,
693 	.map_dmabuf	= vb2_dc_map_dmabuf,
694 	.unmap_dmabuf	= vb2_dc_unmap_dmabuf,
695 	.attach_dmabuf	= vb2_dc_attach_dmabuf,
696 	.detach_dmabuf	= vb2_dc_detach_dmabuf,
697 	.num_users	= vb2_dc_num_users,
698 };
699 EXPORT_SYMBOL_GPL(vb2_dma_contig_memops);
700 
701 /**
702  * vb2_dma_contig_set_max_seg_size() - configure DMA max segment size
703  * @dev:	device for configuring DMA parameters
704  * @size:	size of DMA max segment size to set
705  *
706  * To allow mapping the scatter-list into a single chunk in the DMA
707  * address space, the device is required to have the DMA max segment
708  * size parameter set to a value larger than the buffer size. Otherwise,
709  * the DMA-mapping subsystem will split the mapping into max segment
710  * size chunks. This function sets the DMA max segment size
711  * parameter to let DMA-mapping map a buffer as a single chunk in DMA
712  * address space.
713  * This code assumes that the DMA-mapping subsystem will merge all
714  * scatterlist segments if this is really possible (for example when
715  * an IOMMU is available and enabled).
716  * Ideally, this parameter should be set by the generic bus code, but it
717  * is left with the default 64KiB value due to historical litmiations in
718  * other subsystems (like limited USB host drivers) and there no good
719  * place to set it to the proper value.
720  * This function should be called from the drivers, which are known to
721  * operate on platforms with IOMMU and provide access to shared buffers
722  * (either USERPTR or DMABUF). This should be done before initializing
723  * videobuf2 queue.
724  */
725 int vb2_dma_contig_set_max_seg_size(struct device *dev, unsigned int size)
726 {
727 	if (!dev->dma_parms) {
728 		dev->dma_parms = kzalloc(sizeof(*dev->dma_parms), GFP_KERNEL);
729 		if (!dev->dma_parms)
730 			return -ENOMEM;
731 	}
732 	if (dma_get_max_seg_size(dev) < size)
733 		return dma_set_max_seg_size(dev, size);
734 
735 	return 0;
736 }
737 EXPORT_SYMBOL_GPL(vb2_dma_contig_set_max_seg_size);
738 
739 /*
740  * vb2_dma_contig_clear_max_seg_size() - release resources for DMA parameters
741  * @dev:	device for configuring DMA parameters
742  *
743  * This function releases resources allocated to configure DMA parameters
744  * (see vb2_dma_contig_set_max_seg_size() function). It should be called from
745  * device drivers on driver remove.
746  */
747 void vb2_dma_contig_clear_max_seg_size(struct device *dev)
748 {
749 	kfree(dev->dma_parms);
750 	dev->dma_parms = NULL;
751 }
752 EXPORT_SYMBOL_GPL(vb2_dma_contig_clear_max_seg_size);
753 
754 MODULE_DESCRIPTION("DMA-contig memory handling routines for videobuf2");
755 MODULE_AUTHOR("Pawel Osciak <pawel@osciak.com>");
756 MODULE_LICENSE("GPL");
757