1 /*
2 * videobuf2-dma-contig.c - DMA contig memory allocator for videobuf2
3 *
4 * Copyright (C) 2010 Samsung Electronics
5 *
6 * Author: Pawel Osciak <pawel@osciak.com>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation.
11 */
12
13 #include <linux/dma-buf.h>
14 #include <linux/module.h>
15 #include <linux/refcount.h>
16 #include <linux/scatterlist.h>
17 #include <linux/sched.h>
18 #include <linux/slab.h>
19 #include <linux/dma-mapping.h>
20 #include <linux/highmem.h>
21
22 #include <media/videobuf2-v4l2.h>
23 #include <media/videobuf2-dma-contig.h>
24 #include <media/videobuf2-memops.h>
25
26 struct vb2_dc_buf {
27 struct device *dev;
28 void *vaddr;
29 unsigned long size;
30 void *cookie;
31 dma_addr_t dma_addr;
32 unsigned long attrs;
33 enum dma_data_direction dma_dir;
34 struct sg_table *dma_sgt;
35 struct frame_vector *vec;
36
37 /* MMAP related */
38 struct vb2_vmarea_handler handler;
39 refcount_t refcount;
40 struct sg_table *sgt_base;
41
42 /* DMABUF related */
43 struct dma_buf_attachment *db_attach;
44
45 struct vb2_buffer *vb;
46 bool non_coherent_mem;
47 };
48
49 /*********************************************/
50 /* scatterlist table functions */
51 /*********************************************/
52
vb2_dc_get_contiguous_size(struct sg_table * sgt)53 static unsigned long vb2_dc_get_contiguous_size(struct sg_table *sgt)
54 {
55 struct scatterlist *s;
56 dma_addr_t expected = sg_dma_address(sgt->sgl);
57 unsigned int i;
58 unsigned long size = 0;
59
60 for_each_sgtable_dma_sg(sgt, s, i) {
61 if (sg_dma_address(s) != expected)
62 break;
63 expected += sg_dma_len(s);
64 size += sg_dma_len(s);
65 }
66 return size;
67 }
68
69 /*********************************************/
70 /* callbacks for all buffers */
71 /*********************************************/
72
vb2_dc_cookie(struct vb2_buffer * vb,void * buf_priv)73 static void *vb2_dc_cookie(struct vb2_buffer *vb, void *buf_priv)
74 {
75 struct vb2_dc_buf *buf = buf_priv;
76
77 return &buf->dma_addr;
78 }
79
80 /*
81 * This function may fail if:
82 *
83 * - dma_buf_vmap() fails
84 * E.g. due to lack of virtual mapping address space, or due to
85 * dmabuf->ops misconfiguration.
86 *
87 * - dma_vmap_noncontiguous() fails
88 * For instance, when requested buffer size is larger than totalram_pages().
89 * Relevant for buffers that use non-coherent memory.
90 *
91 * - Queue DMA attrs have DMA_ATTR_NO_KERNEL_MAPPING set
92 * Relevant for buffers that use coherent memory.
93 */
vb2_dc_vaddr(struct vb2_buffer * vb,void * buf_priv)94 static void *vb2_dc_vaddr(struct vb2_buffer *vb, void *buf_priv)
95 {
96 struct vb2_dc_buf *buf = buf_priv;
97
98 if (buf->vaddr)
99 return buf->vaddr;
100
101 if (buf->db_attach) {
102 struct iosys_map map;
103
104 if (!dma_buf_vmap_unlocked(buf->db_attach->dmabuf, &map))
105 buf->vaddr = map.vaddr;
106
107 return buf->vaddr;
108 }
109
110 if (buf->non_coherent_mem)
111 buf->vaddr = dma_vmap_noncontiguous(buf->dev, buf->size,
112 buf->dma_sgt);
113 return buf->vaddr;
114 }
115
vb2_dc_num_users(void * buf_priv)116 static unsigned int vb2_dc_num_users(void *buf_priv)
117 {
118 struct vb2_dc_buf *buf = buf_priv;
119
120 return refcount_read(&buf->refcount);
121 }
122
vb2_dc_prepare(void * buf_priv)123 static void vb2_dc_prepare(void *buf_priv)
124 {
125 struct vb2_dc_buf *buf = buf_priv;
126 struct sg_table *sgt = buf->dma_sgt;
127
128 /* This takes care of DMABUF and user-enforced cache sync hint */
129 if (buf->vb->skip_cache_sync_on_prepare)
130 return;
131
132 if (!buf->non_coherent_mem)
133 return;
134
135 /* Non-coherent MMAP only */
136 if (buf->vaddr)
137 flush_kernel_vmap_range(buf->vaddr, buf->size);
138
139 /* For both USERPTR and non-coherent MMAP */
140 dma_sync_sgtable_for_device(buf->dev, sgt, buf->dma_dir);
141 }
142
vb2_dc_finish(void * buf_priv)143 static void vb2_dc_finish(void *buf_priv)
144 {
145 struct vb2_dc_buf *buf = buf_priv;
146 struct sg_table *sgt = buf->dma_sgt;
147
148 /* This takes care of DMABUF and user-enforced cache sync hint */
149 if (buf->vb->skip_cache_sync_on_finish)
150 return;
151
152 if (!buf->non_coherent_mem)
153 return;
154
155 /* Non-coherent MMAP only */
156 if (buf->vaddr)
157 invalidate_kernel_vmap_range(buf->vaddr, buf->size);
158
159 /* For both USERPTR and non-coherent MMAP */
160 dma_sync_sgtable_for_cpu(buf->dev, sgt, buf->dma_dir);
161 }
162
163 /*********************************************/
164 /* callbacks for MMAP buffers */
165 /*********************************************/
166
vb2_dc_put(void * buf_priv)167 static void vb2_dc_put(void *buf_priv)
168 {
169 struct vb2_dc_buf *buf = buf_priv;
170
171 if (!refcount_dec_and_test(&buf->refcount))
172 return;
173
174 if (buf->non_coherent_mem) {
175 if (buf->vaddr)
176 dma_vunmap_noncontiguous(buf->dev, buf->vaddr);
177 dma_free_noncontiguous(buf->dev, buf->size,
178 buf->dma_sgt, buf->dma_dir);
179 } else {
180 if (buf->sgt_base) {
181 sg_free_table(buf->sgt_base);
182 kfree(buf->sgt_base);
183 }
184 dma_free_attrs(buf->dev, buf->size, buf->cookie,
185 buf->dma_addr, buf->attrs);
186 }
187 put_device(buf->dev);
188 kfree(buf);
189 }
190
vb2_dc_alloc_coherent(struct vb2_dc_buf * buf)191 static int vb2_dc_alloc_coherent(struct vb2_dc_buf *buf)
192 {
193 struct vb2_queue *q = buf->vb->vb2_queue;
194
195 buf->cookie = dma_alloc_attrs(buf->dev,
196 buf->size,
197 &buf->dma_addr,
198 GFP_KERNEL | q->gfp_flags,
199 buf->attrs);
200 if (!buf->cookie)
201 return -ENOMEM;
202
203 if (q->dma_attrs & DMA_ATTR_NO_KERNEL_MAPPING)
204 return 0;
205
206 buf->vaddr = buf->cookie;
207 return 0;
208 }
209
vb2_dc_alloc_non_coherent(struct vb2_dc_buf * buf)210 static int vb2_dc_alloc_non_coherent(struct vb2_dc_buf *buf)
211 {
212 struct vb2_queue *q = buf->vb->vb2_queue;
213
214 buf->dma_sgt = dma_alloc_noncontiguous(buf->dev,
215 buf->size,
216 buf->dma_dir,
217 GFP_KERNEL | q->gfp_flags,
218 buf->attrs);
219 if (!buf->dma_sgt)
220 return -ENOMEM;
221
222 buf->dma_addr = sg_dma_address(buf->dma_sgt->sgl);
223
224 /*
225 * For non-coherent buffers the kernel mapping is created on demand
226 * in vb2_dc_vaddr().
227 */
228 return 0;
229 }
230
vb2_dc_alloc(struct vb2_buffer * vb,struct device * dev,unsigned long size)231 static void *vb2_dc_alloc(struct vb2_buffer *vb,
232 struct device *dev,
233 unsigned long size)
234 {
235 struct vb2_dc_buf *buf;
236 int ret;
237
238 if (WARN_ON(!dev))
239 return ERR_PTR(-EINVAL);
240
241 buf = kzalloc_obj(*buf);
242 if (!buf)
243 return ERR_PTR(-ENOMEM);
244
245 buf->attrs = vb->vb2_queue->dma_attrs;
246 buf->dma_dir = vb->vb2_queue->dma_dir;
247 buf->vb = vb;
248 buf->non_coherent_mem = vb->vb2_queue->non_coherent_mem;
249
250 buf->size = size;
251 /* Prevent the device from being released while the buffer is used */
252 buf->dev = get_device(dev);
253
254 if (buf->non_coherent_mem)
255 ret = vb2_dc_alloc_non_coherent(buf);
256 else
257 ret = vb2_dc_alloc_coherent(buf);
258
259 if (ret) {
260 dev_err(dev, "dma alloc of size %lu failed\n", size);
261 put_device(buf->dev);
262 kfree(buf);
263 return ERR_PTR(-ENOMEM);
264 }
265
266 buf->handler.refcount = &buf->refcount;
267 buf->handler.put = vb2_dc_put;
268 buf->handler.arg = buf;
269
270 refcount_set(&buf->refcount, 1);
271
272 return buf;
273 }
274
vb2_dc_mmap(void * buf_priv,struct vm_area_struct * vma)275 static int vb2_dc_mmap(void *buf_priv, struct vm_area_struct *vma)
276 {
277 struct vb2_dc_buf *buf = buf_priv;
278 int ret;
279
280 if (!buf) {
281 printk(KERN_ERR "No buffer to map\n");
282 return -EINVAL;
283 }
284
285 if (buf->non_coherent_mem)
286 ret = dma_mmap_noncontiguous(buf->dev, vma, buf->size,
287 buf->dma_sgt);
288 else
289 ret = dma_mmap_attrs(buf->dev, vma, buf->cookie, buf->dma_addr,
290 buf->size, buf->attrs);
291 if (ret) {
292 pr_err("Remapping memory failed, error: %d\n", ret);
293 return ret;
294 }
295
296 vm_flags_set(vma, VM_DONTEXPAND | VM_DONTDUMP);
297 vma->vm_private_data = &buf->handler;
298 vma->vm_ops = &vb2_common_vm_ops;
299
300 vma->vm_ops->open(vma);
301
302 pr_debug("%s: mapped dma addr 0x%08lx at 0x%08lx, size %lu\n",
303 __func__, (unsigned long)buf->dma_addr, vma->vm_start,
304 buf->size);
305
306 return 0;
307 }
308
309 /*********************************************/
310 /* DMABUF ops for exporters */
311 /*********************************************/
312
313 struct vb2_dc_attachment {
314 struct sg_table sgt;
315 enum dma_data_direction dma_dir;
316 };
317
vb2_dc_dmabuf_ops_attach(struct dma_buf * dbuf,struct dma_buf_attachment * dbuf_attach)318 static int vb2_dc_dmabuf_ops_attach(struct dma_buf *dbuf,
319 struct dma_buf_attachment *dbuf_attach)
320 {
321 struct vb2_dc_attachment *attach;
322 unsigned int i;
323 struct scatterlist *rd, *wr;
324 struct sg_table *sgt;
325 struct vb2_dc_buf *buf = dbuf->priv;
326 int ret;
327
328 attach = kzalloc_obj(*attach);
329 if (!attach)
330 return -ENOMEM;
331
332 sgt = &attach->sgt;
333 /* Copy the buf->base_sgt scatter list to the attachment, as we can't
334 * map the same scatter list to multiple attachments at the same time.
335 */
336 ret = sg_alloc_table(sgt, buf->sgt_base->orig_nents, GFP_KERNEL);
337 if (ret) {
338 kfree(attach);
339 return -ENOMEM;
340 }
341
342 rd = buf->sgt_base->sgl;
343 wr = sgt->sgl;
344 for (i = 0; i < sgt->orig_nents; ++i) {
345 sg_set_page(wr, sg_page(rd), rd->length, rd->offset);
346 rd = sg_next(rd);
347 wr = sg_next(wr);
348 }
349
350 attach->dma_dir = DMA_NONE;
351 dbuf_attach->priv = attach;
352
353 return 0;
354 }
355
vb2_dc_dmabuf_ops_detach(struct dma_buf * dbuf,struct dma_buf_attachment * db_attach)356 static void vb2_dc_dmabuf_ops_detach(struct dma_buf *dbuf,
357 struct dma_buf_attachment *db_attach)
358 {
359 struct vb2_dc_attachment *attach = db_attach->priv;
360 struct sg_table *sgt;
361
362 if (!attach)
363 return;
364
365 sgt = &attach->sgt;
366
367 /* release the scatterlist cache */
368 if (attach->dma_dir != DMA_NONE)
369 /*
370 * Cache sync can be skipped here, as the vb2_dc memory is
371 * allocated from device coherent memory, which means the
372 * memory locations do not require any explicit cache
373 * maintenance prior or after being used by the device.
374 */
375 dma_unmap_sgtable(db_attach->dev, sgt, attach->dma_dir,
376 DMA_ATTR_SKIP_CPU_SYNC);
377 sg_free_table(sgt);
378 kfree(attach);
379 db_attach->priv = NULL;
380 }
381
vb2_dc_dmabuf_ops_map(struct dma_buf_attachment * db_attach,enum dma_data_direction dma_dir)382 static struct sg_table *vb2_dc_dmabuf_ops_map(
383 struct dma_buf_attachment *db_attach, enum dma_data_direction dma_dir)
384 {
385 struct vb2_dc_attachment *attach = db_attach->priv;
386 struct sg_table *sgt;
387
388 sgt = &attach->sgt;
389 /* return previously mapped sg table */
390 if (attach->dma_dir == dma_dir)
391 return sgt;
392
393 /* release any previous cache */
394 if (attach->dma_dir != DMA_NONE) {
395 dma_unmap_sgtable(db_attach->dev, sgt, attach->dma_dir,
396 DMA_ATTR_SKIP_CPU_SYNC);
397 attach->dma_dir = DMA_NONE;
398 }
399
400 /*
401 * mapping to the client with new direction, no cache sync
402 * required see comment in vb2_dc_dmabuf_ops_detach()
403 */
404 if (dma_map_sgtable(db_attach->dev, sgt, dma_dir,
405 DMA_ATTR_SKIP_CPU_SYNC)) {
406 pr_err("failed to map scatterlist\n");
407 return ERR_PTR(-EIO);
408 }
409
410 attach->dma_dir = dma_dir;
411
412 return sgt;
413 }
414
vb2_dc_dmabuf_ops_unmap(struct dma_buf_attachment * db_attach,struct sg_table * sgt,enum dma_data_direction dma_dir)415 static void vb2_dc_dmabuf_ops_unmap(struct dma_buf_attachment *db_attach,
416 struct sg_table *sgt, enum dma_data_direction dma_dir)
417 {
418 /* nothing to be done here */
419 }
420
vb2_dc_dmabuf_ops_release(struct dma_buf * dbuf)421 static void vb2_dc_dmabuf_ops_release(struct dma_buf *dbuf)
422 {
423 /* drop reference obtained in vb2_dc_get_dmabuf */
424 vb2_dc_put(dbuf->priv);
425 }
426
427 static int
vb2_dc_dmabuf_ops_begin_cpu_access(struct dma_buf * dbuf,enum dma_data_direction direction)428 vb2_dc_dmabuf_ops_begin_cpu_access(struct dma_buf *dbuf,
429 enum dma_data_direction direction)
430 {
431 return 0;
432 }
433
434 static int
vb2_dc_dmabuf_ops_end_cpu_access(struct dma_buf * dbuf,enum dma_data_direction direction)435 vb2_dc_dmabuf_ops_end_cpu_access(struct dma_buf *dbuf,
436 enum dma_data_direction direction)
437 {
438 return 0;
439 }
440
vb2_dc_dmabuf_ops_vmap(struct dma_buf * dbuf,struct iosys_map * map)441 static int vb2_dc_dmabuf_ops_vmap(struct dma_buf *dbuf, struct iosys_map *map)
442 {
443 struct vb2_dc_buf *buf;
444 void *vaddr;
445
446 buf = dbuf->priv;
447 vaddr = vb2_dc_vaddr(buf->vb, buf);
448 if (!vaddr)
449 return -EINVAL;
450
451 iosys_map_set_vaddr(map, vaddr);
452
453 return 0;
454 }
455
vb2_dc_dmabuf_ops_mmap(struct dma_buf * dbuf,struct vm_area_struct * vma)456 static int vb2_dc_dmabuf_ops_mmap(struct dma_buf *dbuf,
457 struct vm_area_struct *vma)
458 {
459 return vb2_dc_mmap(dbuf->priv, vma);
460 }
461
462 static const struct dma_buf_ops vb2_dc_dmabuf_ops = {
463 .attach = vb2_dc_dmabuf_ops_attach,
464 .detach = vb2_dc_dmabuf_ops_detach,
465 .map_dma_buf = vb2_dc_dmabuf_ops_map,
466 .unmap_dma_buf = vb2_dc_dmabuf_ops_unmap,
467 .begin_cpu_access = vb2_dc_dmabuf_ops_begin_cpu_access,
468 .end_cpu_access = vb2_dc_dmabuf_ops_end_cpu_access,
469 .vmap = vb2_dc_dmabuf_ops_vmap,
470 .mmap = vb2_dc_dmabuf_ops_mmap,
471 .release = vb2_dc_dmabuf_ops_release,
472 };
473
vb2_dc_get_base_sgt(struct vb2_dc_buf * buf)474 static struct sg_table *vb2_dc_get_base_sgt(struct vb2_dc_buf *buf)
475 {
476 int ret;
477 struct sg_table *sgt;
478
479 if (buf->non_coherent_mem)
480 return buf->dma_sgt;
481
482 sgt = kmalloc_obj(*sgt);
483 if (!sgt) {
484 dev_err(buf->dev, "failed to alloc sg table\n");
485 return NULL;
486 }
487
488 ret = dma_get_sgtable_attrs(buf->dev, sgt, buf->cookie, buf->dma_addr,
489 buf->size, buf->attrs);
490 if (ret < 0) {
491 dev_err(buf->dev, "failed to get scatterlist from DMA API\n");
492 kfree(sgt);
493 return NULL;
494 }
495
496 return sgt;
497 }
498
vb2_dc_get_dmabuf(struct vb2_buffer * vb,void * buf_priv,unsigned long flags)499 static struct dma_buf *vb2_dc_get_dmabuf(struct vb2_buffer *vb,
500 void *buf_priv,
501 unsigned long flags)
502 {
503 struct vb2_dc_buf *buf = buf_priv;
504 struct dma_buf *dbuf;
505 DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
506
507 exp_info.ops = &vb2_dc_dmabuf_ops;
508 exp_info.size = buf->size;
509 exp_info.flags = flags;
510 exp_info.priv = buf;
511
512 if (!buf->sgt_base)
513 buf->sgt_base = vb2_dc_get_base_sgt(buf);
514
515 if (WARN_ON(!buf->sgt_base))
516 return NULL;
517
518 dbuf = dma_buf_export(&exp_info);
519 if (IS_ERR(dbuf))
520 return NULL;
521
522 /* dmabuf keeps reference to vb2 buffer */
523 refcount_inc(&buf->refcount);
524
525 return dbuf;
526 }
527
528 /*********************************************/
529 /* callbacks for USERPTR buffers */
530 /*********************************************/
531
vb2_dc_put_userptr(void * buf_priv)532 static void vb2_dc_put_userptr(void *buf_priv)
533 {
534 struct vb2_dc_buf *buf = buf_priv;
535 struct sg_table *sgt = buf->dma_sgt;
536 int i;
537 struct page **pages;
538
539 if (sgt) {
540 /*
541 * No need to sync to CPU, it's already synced to the CPU
542 * since the finish() memop will have been called before this.
543 */
544 dma_unmap_sgtable(buf->dev, sgt, buf->dma_dir,
545 DMA_ATTR_SKIP_CPU_SYNC);
546 if (buf->dma_dir == DMA_FROM_DEVICE ||
547 buf->dma_dir == DMA_BIDIRECTIONAL) {
548 pages = frame_vector_pages(buf->vec);
549 /* sgt should exist only if vector contains pages... */
550 if (!WARN_ON_ONCE(IS_ERR(pages)))
551 for (i = 0; i < frame_vector_count(buf->vec); i++)
552 set_page_dirty_lock(pages[i]);
553 }
554 sg_free_table(sgt);
555 kfree(sgt);
556 } else {
557 dma_unmap_resource(buf->dev, buf->dma_addr, buf->size,
558 buf->dma_dir, 0);
559 }
560 vb2_destroy_framevec(buf->vec);
561 kfree(buf);
562 }
563
vb2_dc_get_userptr(struct vb2_buffer * vb,struct device * dev,unsigned long vaddr,unsigned long size)564 static void *vb2_dc_get_userptr(struct vb2_buffer *vb, struct device *dev,
565 unsigned long vaddr, unsigned long size)
566 {
567 struct vb2_dc_buf *buf;
568 struct frame_vector *vec;
569 unsigned int offset;
570 int n_pages, i;
571 int ret = 0;
572 struct sg_table *sgt;
573 unsigned long contig_size;
574 unsigned long dma_align = dma_get_cache_alignment();
575
576 /* Only cache aligned DMA transfers are reliable */
577 if (!IS_ALIGNED(vaddr | size, dma_align)) {
578 pr_debug("user data must be aligned to %lu bytes\n", dma_align);
579 return ERR_PTR(-EINVAL);
580 }
581
582 if (!size) {
583 pr_debug("size is zero\n");
584 return ERR_PTR(-EINVAL);
585 }
586
587 if (WARN_ON(!dev))
588 return ERR_PTR(-EINVAL);
589
590 buf = kzalloc_obj(*buf);
591 if (!buf)
592 return ERR_PTR(-ENOMEM);
593
594 buf->dev = dev;
595 buf->dma_dir = vb->vb2_queue->dma_dir;
596 buf->vb = vb;
597
598 offset = lower_32_bits(offset_in_page(vaddr));
599 vec = vb2_create_framevec(vaddr, size, buf->dma_dir == DMA_FROM_DEVICE ||
600 buf->dma_dir == DMA_BIDIRECTIONAL);
601 if (IS_ERR(vec)) {
602 ret = PTR_ERR(vec);
603 goto fail_buf;
604 }
605 buf->vec = vec;
606 n_pages = frame_vector_count(vec);
607 ret = frame_vector_to_pages(vec);
608 if (ret < 0) {
609 unsigned long *nums = frame_vector_pfns(vec);
610
611 /*
612 * Failed to convert to pages... Check the memory is physically
613 * contiguous and use direct mapping
614 */
615 for (i = 1; i < n_pages; i++)
616 if (nums[i-1] + 1 != nums[i])
617 goto fail_pfnvec;
618 buf->dma_addr = dma_map_resource(buf->dev,
619 __pfn_to_phys(nums[0]), size, buf->dma_dir, 0);
620 if (dma_mapping_error(buf->dev, buf->dma_addr)) {
621 ret = -ENOMEM;
622 goto fail_pfnvec;
623 }
624 goto out;
625 }
626
627 sgt = kzalloc_obj(*sgt);
628 if (!sgt) {
629 pr_err("failed to allocate sg table\n");
630 ret = -ENOMEM;
631 goto fail_pfnvec;
632 }
633
634 ret = sg_alloc_table_from_pages(sgt, frame_vector_pages(vec), n_pages,
635 offset, size, GFP_KERNEL);
636 if (ret) {
637 pr_err("failed to initialize sg table\n");
638 goto fail_sgt;
639 }
640
641 /*
642 * No need to sync to the device, this will happen later when the
643 * prepare() memop is called.
644 */
645 if (dma_map_sgtable(buf->dev, sgt, buf->dma_dir,
646 DMA_ATTR_SKIP_CPU_SYNC)) {
647 pr_err("failed to map scatterlist\n");
648 ret = -EIO;
649 goto fail_sgt_init;
650 }
651
652 contig_size = vb2_dc_get_contiguous_size(sgt);
653 if (contig_size < size) {
654 pr_err("contiguous mapping is too small %lu/%lu\n",
655 contig_size, size);
656 ret = -EFAULT;
657 goto fail_map_sg;
658 }
659
660 buf->dma_addr = sg_dma_address(sgt->sgl);
661 buf->dma_sgt = sgt;
662 buf->non_coherent_mem = 1;
663
664 out:
665 buf->size = size;
666
667 return buf;
668
669 fail_map_sg:
670 dma_unmap_sgtable(buf->dev, sgt, buf->dma_dir, DMA_ATTR_SKIP_CPU_SYNC);
671
672 fail_sgt_init:
673 sg_free_table(sgt);
674
675 fail_sgt:
676 kfree(sgt);
677
678 fail_pfnvec:
679 vb2_destroy_framevec(vec);
680
681 fail_buf:
682 kfree(buf);
683
684 return ERR_PTR(ret);
685 }
686
687 /*********************************************/
688 /* callbacks for DMABUF buffers */
689 /*********************************************/
690
vb2_dc_map_dmabuf(void * mem_priv)691 static int vb2_dc_map_dmabuf(void *mem_priv)
692 {
693 struct vb2_dc_buf *buf = mem_priv;
694 struct sg_table *sgt;
695 unsigned long contig_size;
696
697 if (WARN_ON(!buf->db_attach)) {
698 pr_err("trying to pin a non attached buffer\n");
699 return -EINVAL;
700 }
701
702 if (WARN_ON(buf->dma_sgt)) {
703 pr_err("dmabuf buffer is already pinned\n");
704 return 0;
705 }
706
707 /* get the associated scatterlist for this buffer */
708 sgt = dma_buf_map_attachment_unlocked(buf->db_attach, buf->dma_dir);
709 if (IS_ERR(sgt)) {
710 pr_err("Error getting dmabuf scatterlist\n");
711 return -EINVAL;
712 }
713
714 /* checking if dmabuf is big enough to store contiguous chunk */
715 contig_size = vb2_dc_get_contiguous_size(sgt);
716 if (contig_size < buf->size) {
717 pr_err("contiguous chunk is too small %lu/%lu\n",
718 contig_size, buf->size);
719 dma_buf_unmap_attachment_unlocked(buf->db_attach, sgt,
720 buf->dma_dir);
721 return -EFAULT;
722 }
723
724 buf->dma_addr = sg_dma_address(sgt->sgl);
725 buf->dma_sgt = sgt;
726 buf->vaddr = NULL;
727
728 return 0;
729 }
730
vb2_dc_unmap_dmabuf(void * mem_priv)731 static void vb2_dc_unmap_dmabuf(void *mem_priv)
732 {
733 struct vb2_dc_buf *buf = mem_priv;
734 struct sg_table *sgt = buf->dma_sgt;
735 struct iosys_map map = IOSYS_MAP_INIT_VADDR(buf->vaddr);
736
737 if (WARN_ON(!buf->db_attach)) {
738 pr_err("trying to unpin a not attached buffer\n");
739 return;
740 }
741
742 if (WARN_ON(!sgt)) {
743 pr_err("dmabuf buffer is already unpinned\n");
744 return;
745 }
746
747 if (buf->vaddr) {
748 dma_buf_vunmap_unlocked(buf->db_attach->dmabuf, &map);
749 buf->vaddr = NULL;
750 }
751 dma_buf_unmap_attachment_unlocked(buf->db_attach, sgt, buf->dma_dir);
752
753 buf->dma_addr = 0;
754 buf->dma_sgt = NULL;
755 }
756
vb2_dc_detach_dmabuf(void * mem_priv)757 static void vb2_dc_detach_dmabuf(void *mem_priv)
758 {
759 struct vb2_dc_buf *buf = mem_priv;
760
761 /* if vb2 works correctly you should never detach mapped buffer */
762 if (WARN_ON(buf->dma_addr))
763 vb2_dc_unmap_dmabuf(buf);
764
765 /* detach this attachment */
766 dma_buf_detach(buf->db_attach->dmabuf, buf->db_attach);
767 kfree(buf);
768 }
769
vb2_dc_attach_dmabuf(struct vb2_buffer * vb,struct device * dev,struct dma_buf * dbuf,unsigned long size)770 static void *vb2_dc_attach_dmabuf(struct vb2_buffer *vb, struct device *dev,
771 struct dma_buf *dbuf, unsigned long size)
772 {
773 struct vb2_dc_buf *buf;
774 struct dma_buf_attachment *dba;
775
776 if (dbuf->size < size)
777 return ERR_PTR(-EFAULT);
778
779 if (WARN_ON(!dev))
780 return ERR_PTR(-EINVAL);
781
782 buf = kzalloc_obj(*buf);
783 if (!buf)
784 return ERR_PTR(-ENOMEM);
785
786 buf->dev = dev;
787 buf->vb = vb;
788
789 /* create attachment for the dmabuf with the user device */
790 dba = dma_buf_attach(dbuf, buf->dev);
791 if (IS_ERR(dba)) {
792 pr_err("failed to attach dmabuf\n");
793 kfree(buf);
794 return dba;
795 }
796
797 buf->dma_dir = vb->vb2_queue->dma_dir;
798 buf->size = size;
799 buf->db_attach = dba;
800
801 return buf;
802 }
803
804 /*********************************************/
805 /* DMA CONTIG exported functions */
806 /*********************************************/
807
808 const struct vb2_mem_ops vb2_dma_contig_memops = {
809 .alloc = vb2_dc_alloc,
810 .put = vb2_dc_put,
811 .get_dmabuf = vb2_dc_get_dmabuf,
812 .cookie = vb2_dc_cookie,
813 .vaddr = vb2_dc_vaddr,
814 .mmap = vb2_dc_mmap,
815 .get_userptr = vb2_dc_get_userptr,
816 .put_userptr = vb2_dc_put_userptr,
817 .prepare = vb2_dc_prepare,
818 .finish = vb2_dc_finish,
819 .map_dmabuf = vb2_dc_map_dmabuf,
820 .unmap_dmabuf = vb2_dc_unmap_dmabuf,
821 .attach_dmabuf = vb2_dc_attach_dmabuf,
822 .detach_dmabuf = vb2_dc_detach_dmabuf,
823 .num_users = vb2_dc_num_users,
824 };
825 EXPORT_SYMBOL_GPL(vb2_dma_contig_memops);
826
827 /**
828 * vb2_dma_contig_set_max_seg_size() - configure DMA max segment size
829 * @dev: device for configuring DMA parameters
830 * @size: size of DMA max segment size to set
831 *
832 * To allow mapping the scatter-list into a single chunk in the DMA
833 * address space, the device is required to have the DMA max segment
834 * size parameter set to a value larger than the buffer size. Otherwise,
835 * the DMA-mapping subsystem will split the mapping into max segment
836 * size chunks. This function sets the DMA max segment size
837 * parameter to let DMA-mapping map a buffer as a single chunk in DMA
838 * address space.
839 * This code assumes that the DMA-mapping subsystem will merge all
840 * scatterlist segments if this is really possible (for example when
841 * an IOMMU is available and enabled).
842 * Ideally, this parameter should be set by the generic bus code, but it
843 * is left with the default 64KiB value due to historical litmiations in
844 * other subsystems (like limited USB host drivers) and there no good
845 * place to set it to the proper value.
846 * This function should be called from the drivers, which are known to
847 * operate on platforms with IOMMU and provide access to shared buffers
848 * (either USERPTR or DMABUF). This should be done before initializing
849 * videobuf2 queue.
850 */
vb2_dma_contig_set_max_seg_size(struct device * dev,unsigned int size)851 int vb2_dma_contig_set_max_seg_size(struct device *dev, unsigned int size)
852 {
853 if (!dev->dma_parms) {
854 dev_err(dev, "Failed to set max_seg_size: dma_parms is NULL\n");
855 return -ENODEV;
856 }
857 if (dma_get_max_seg_size(dev) < size)
858 dma_set_max_seg_size(dev, size);
859 return 0;
860 }
861 EXPORT_SYMBOL_GPL(vb2_dma_contig_set_max_seg_size);
862
863 MODULE_DESCRIPTION("DMA-contig memory handling routines for videobuf2");
864 MODULE_AUTHOR("Pawel Osciak <pawel@osciak.com>");
865 MODULE_LICENSE("GPL");
866 MODULE_IMPORT_NS("DMA_BUF");
867