1 /*
2 * videobuf2-vmalloc.c - vmalloc memory allocator for videobuf2
3 *
4 * Copyright (C) 2010 Samsung Electronics
5 *
6 * Author: Pawel Osciak <pawel@osciak.com>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation.
11 */
12
13 #include <linux/io.h>
14 #include <linux/module.h>
15 #include <linux/mm.h>
16 #include <linux/refcount.h>
17 #include <linux/sched.h>
18 #include <linux/slab.h>
19 #include <linux/vmalloc.h>
20
21 #include <media/videobuf2-v4l2.h>
22 #include <media/videobuf2-vmalloc.h>
23 #include <media/videobuf2-memops.h>
24
25 struct vb2_vmalloc_buf {
26 void *vaddr;
27 struct frame_vector *vec;
28 enum dma_data_direction dma_dir;
29 unsigned long size;
30 refcount_t refcount;
31 struct vb2_vmarea_handler handler;
32 struct dma_buf *dbuf;
33 };
34
35 static void vb2_vmalloc_put(void *buf_priv);
36
vb2_vmalloc_alloc(struct vb2_buffer * vb,struct device * dev,unsigned long size)37 static void *vb2_vmalloc_alloc(struct vb2_buffer *vb, struct device *dev,
38 unsigned long size)
39 {
40 struct vb2_vmalloc_buf *buf;
41
42 buf = kzalloc(sizeof(*buf), GFP_KERNEL | vb->vb2_queue->gfp_flags);
43 if (!buf)
44 return ERR_PTR(-ENOMEM);
45
46 buf->size = size;
47 buf->vaddr = vmalloc_user(buf->size);
48 if (!buf->vaddr) {
49 pr_debug("vmalloc of size %ld failed\n", buf->size);
50 kfree(buf);
51 return ERR_PTR(-ENOMEM);
52 }
53
54 buf->dma_dir = vb->vb2_queue->dma_dir;
55 buf->handler.refcount = &buf->refcount;
56 buf->handler.put = vb2_vmalloc_put;
57 buf->handler.arg = buf;
58
59 refcount_set(&buf->refcount, 1);
60 return buf;
61 }
62
vb2_vmalloc_put(void * buf_priv)63 static void vb2_vmalloc_put(void *buf_priv)
64 {
65 struct vb2_vmalloc_buf *buf = buf_priv;
66
67 if (refcount_dec_and_test(&buf->refcount)) {
68 vfree(buf->vaddr);
69 kfree(buf);
70 }
71 }
72
vb2_vmalloc_get_userptr(struct vb2_buffer * vb,struct device * dev,unsigned long vaddr,unsigned long size)73 static void *vb2_vmalloc_get_userptr(struct vb2_buffer *vb, struct device *dev,
74 unsigned long vaddr, unsigned long size)
75 {
76 struct vb2_vmalloc_buf *buf;
77 struct frame_vector *vec;
78 int n_pages, offset, i;
79 int ret = -ENOMEM;
80
81 buf = kzalloc(sizeof(*buf), GFP_KERNEL);
82 if (!buf)
83 return ERR_PTR(-ENOMEM);
84
85 buf->dma_dir = vb->vb2_queue->dma_dir;
86 offset = vaddr & ~PAGE_MASK;
87 buf->size = size;
88 vec = vb2_create_framevec(vaddr, size,
89 buf->dma_dir == DMA_FROM_DEVICE ||
90 buf->dma_dir == DMA_BIDIRECTIONAL);
91 if (IS_ERR(vec)) {
92 ret = PTR_ERR(vec);
93 goto fail_pfnvec_create;
94 }
95 buf->vec = vec;
96 n_pages = frame_vector_count(vec);
97 if (frame_vector_to_pages(vec) < 0) {
98 unsigned long *nums = frame_vector_pfns(vec);
99
100 /*
101 * We cannot get page pointers for these pfns. Check memory is
102 * physically contiguous and use direct mapping.
103 */
104 for (i = 1; i < n_pages; i++)
105 if (nums[i-1] + 1 != nums[i])
106 goto fail_map;
107 buf->vaddr = (__force void *)
108 ioremap(__pfn_to_phys(nums[0]), size + offset);
109 } else {
110 buf->vaddr = vm_map_ram(frame_vector_pages(vec), n_pages, -1);
111 }
112
113 if (!buf->vaddr)
114 goto fail_map;
115 buf->vaddr += offset;
116 return buf;
117
118 fail_map:
119 vb2_destroy_framevec(vec);
120 fail_pfnvec_create:
121 kfree(buf);
122
123 return ERR_PTR(ret);
124 }
125
vb2_vmalloc_put_userptr(void * buf_priv)126 static void vb2_vmalloc_put_userptr(void *buf_priv)
127 {
128 struct vb2_vmalloc_buf *buf = buf_priv;
129 unsigned long vaddr = (unsigned long)buf->vaddr & PAGE_MASK;
130 unsigned int i;
131 struct page **pages;
132 unsigned int n_pages;
133
134 if (!buf->vec->is_pfns) {
135 n_pages = frame_vector_count(buf->vec);
136 if (vaddr)
137 vm_unmap_ram((void *)vaddr, n_pages);
138 if (buf->dma_dir == DMA_FROM_DEVICE ||
139 buf->dma_dir == DMA_BIDIRECTIONAL) {
140 pages = frame_vector_pages(buf->vec);
141 if (!WARN_ON_ONCE(IS_ERR(pages)))
142 for (i = 0; i < n_pages; i++)
143 set_page_dirty_lock(pages[i]);
144 }
145 } else {
146 iounmap((__force void __iomem *)buf->vaddr);
147 }
148 vb2_destroy_framevec(buf->vec);
149 kfree(buf);
150 }
151
vb2_vmalloc_vaddr(struct vb2_buffer * vb,void * buf_priv)152 static void *vb2_vmalloc_vaddr(struct vb2_buffer *vb, void *buf_priv)
153 {
154 struct vb2_vmalloc_buf *buf = buf_priv;
155
156 if (!buf->vaddr) {
157 pr_err("Address of an unallocated plane requested or cannot map user pointer\n");
158 return NULL;
159 }
160
161 return buf->vaddr;
162 }
163
vb2_vmalloc_num_users(void * buf_priv)164 static unsigned int vb2_vmalloc_num_users(void *buf_priv)
165 {
166 struct vb2_vmalloc_buf *buf = buf_priv;
167 return refcount_read(&buf->refcount);
168 }
169
vb2_vmalloc_mmap(void * buf_priv,struct vm_area_struct * vma)170 static int vb2_vmalloc_mmap(void *buf_priv, struct vm_area_struct *vma)
171 {
172 struct vb2_vmalloc_buf *buf = buf_priv;
173 int ret;
174
175 if (!buf) {
176 pr_err("No memory to map\n");
177 return -EINVAL;
178 }
179
180 ret = remap_vmalloc_range(vma, buf->vaddr, 0);
181 if (ret) {
182 pr_err("Remapping vmalloc memory, error: %d\n", ret);
183 return ret;
184 }
185
186 /*
187 * Make sure that vm_areas for 2 buffers won't be merged together
188 */
189 vm_flags_set(vma, VM_DONTEXPAND);
190
191 /*
192 * Use common vm_area operations to track buffer refcount.
193 */
194 vma->vm_private_data = &buf->handler;
195 vma->vm_ops = &vb2_common_vm_ops;
196
197 vma->vm_ops->open(vma);
198
199 return 0;
200 }
201
202 #ifdef CONFIG_HAS_DMA
203 /*********************************************/
204 /* DMABUF ops for exporters */
205 /*********************************************/
206
207 struct vb2_vmalloc_attachment {
208 struct sg_table sgt;
209 enum dma_data_direction dma_dir;
210 };
211
vb2_vmalloc_dmabuf_ops_attach(struct dma_buf * dbuf,struct dma_buf_attachment * dbuf_attach)212 static int vb2_vmalloc_dmabuf_ops_attach(struct dma_buf *dbuf,
213 struct dma_buf_attachment *dbuf_attach)
214 {
215 struct vb2_vmalloc_attachment *attach;
216 struct vb2_vmalloc_buf *buf = dbuf->priv;
217 int num_pages = PAGE_ALIGN(buf->size) / PAGE_SIZE;
218 struct sg_table *sgt;
219 struct scatterlist *sg;
220 void *vaddr = buf->vaddr;
221 int ret;
222 int i;
223
224 attach = kzalloc(sizeof(*attach), GFP_KERNEL);
225 if (!attach)
226 return -ENOMEM;
227
228 sgt = &attach->sgt;
229 ret = sg_alloc_table(sgt, num_pages, GFP_KERNEL);
230 if (ret) {
231 kfree(attach);
232 return ret;
233 }
234 for_each_sgtable_sg(sgt, sg, i) {
235 struct page *page = vmalloc_to_page(vaddr);
236
237 if (!page) {
238 sg_free_table(sgt);
239 kfree(attach);
240 return -ENOMEM;
241 }
242 sg_set_page(sg, page, PAGE_SIZE, 0);
243 vaddr += PAGE_SIZE;
244 }
245
246 attach->dma_dir = DMA_NONE;
247 dbuf_attach->priv = attach;
248 return 0;
249 }
250
vb2_vmalloc_dmabuf_ops_detach(struct dma_buf * dbuf,struct dma_buf_attachment * db_attach)251 static void vb2_vmalloc_dmabuf_ops_detach(struct dma_buf *dbuf,
252 struct dma_buf_attachment *db_attach)
253 {
254 struct vb2_vmalloc_attachment *attach = db_attach->priv;
255 struct sg_table *sgt;
256
257 if (!attach)
258 return;
259
260 sgt = &attach->sgt;
261
262 /* release the scatterlist cache */
263 if (attach->dma_dir != DMA_NONE)
264 dma_unmap_sgtable(db_attach->dev, sgt, attach->dma_dir, 0);
265 sg_free_table(sgt);
266 kfree(attach);
267 db_attach->priv = NULL;
268 }
269
vb2_vmalloc_dmabuf_ops_map(struct dma_buf_attachment * db_attach,enum dma_data_direction dma_dir)270 static struct sg_table *vb2_vmalloc_dmabuf_ops_map(
271 struct dma_buf_attachment *db_attach, enum dma_data_direction dma_dir)
272 {
273 struct vb2_vmalloc_attachment *attach = db_attach->priv;
274 struct sg_table *sgt;
275
276 sgt = &attach->sgt;
277 /* return previously mapped sg table */
278 if (attach->dma_dir == dma_dir)
279 return sgt;
280
281 /* release any previous cache */
282 if (attach->dma_dir != DMA_NONE) {
283 dma_unmap_sgtable(db_attach->dev, sgt, attach->dma_dir, 0);
284 attach->dma_dir = DMA_NONE;
285 }
286
287 /* mapping to the client with new direction */
288 if (dma_map_sgtable(db_attach->dev, sgt, dma_dir, 0)) {
289 pr_err("failed to map scatterlist\n");
290 return ERR_PTR(-EIO);
291 }
292
293 attach->dma_dir = dma_dir;
294
295 return sgt;
296 }
297
vb2_vmalloc_dmabuf_ops_unmap(struct dma_buf_attachment * db_attach,struct sg_table * sgt,enum dma_data_direction dma_dir)298 static void vb2_vmalloc_dmabuf_ops_unmap(struct dma_buf_attachment *db_attach,
299 struct sg_table *sgt, enum dma_data_direction dma_dir)
300 {
301 /* nothing to be done here */
302 }
303
vb2_vmalloc_dmabuf_ops_release(struct dma_buf * dbuf)304 static void vb2_vmalloc_dmabuf_ops_release(struct dma_buf *dbuf)
305 {
306 /* drop reference obtained in vb2_vmalloc_get_dmabuf */
307 vb2_vmalloc_put(dbuf->priv);
308 }
309
vb2_vmalloc_dmabuf_ops_vmap(struct dma_buf * dbuf,struct iosys_map * map)310 static int vb2_vmalloc_dmabuf_ops_vmap(struct dma_buf *dbuf,
311 struct iosys_map *map)
312 {
313 struct vb2_vmalloc_buf *buf = dbuf->priv;
314
315 iosys_map_set_vaddr(map, buf->vaddr);
316
317 return 0;
318 }
319
vb2_vmalloc_dmabuf_ops_mmap(struct dma_buf * dbuf,struct vm_area_struct * vma)320 static int vb2_vmalloc_dmabuf_ops_mmap(struct dma_buf *dbuf,
321 struct vm_area_struct *vma)
322 {
323 return vb2_vmalloc_mmap(dbuf->priv, vma);
324 }
325
326 static const struct dma_buf_ops vb2_vmalloc_dmabuf_ops = {
327 .attach = vb2_vmalloc_dmabuf_ops_attach,
328 .detach = vb2_vmalloc_dmabuf_ops_detach,
329 .map_dma_buf = vb2_vmalloc_dmabuf_ops_map,
330 .unmap_dma_buf = vb2_vmalloc_dmabuf_ops_unmap,
331 .vmap = vb2_vmalloc_dmabuf_ops_vmap,
332 .mmap = vb2_vmalloc_dmabuf_ops_mmap,
333 .release = vb2_vmalloc_dmabuf_ops_release,
334 };
335
vb2_vmalloc_get_dmabuf(struct vb2_buffer * vb,void * buf_priv,unsigned long flags)336 static struct dma_buf *vb2_vmalloc_get_dmabuf(struct vb2_buffer *vb,
337 void *buf_priv,
338 unsigned long flags)
339 {
340 struct vb2_vmalloc_buf *buf = buf_priv;
341 struct dma_buf *dbuf;
342 DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
343
344 exp_info.ops = &vb2_vmalloc_dmabuf_ops;
345 exp_info.size = buf->size;
346 exp_info.flags = flags;
347 exp_info.priv = buf;
348
349 if (WARN_ON(!buf->vaddr))
350 return NULL;
351
352 dbuf = dma_buf_export(&exp_info);
353 if (IS_ERR(dbuf))
354 return NULL;
355
356 /* dmabuf keeps reference to vb2 buffer */
357 refcount_inc(&buf->refcount);
358
359 return dbuf;
360 }
361 #endif /* CONFIG_HAS_DMA */
362
363
364 /*********************************************/
365 /* callbacks for DMABUF buffers */
366 /*********************************************/
367
vb2_vmalloc_map_dmabuf(void * mem_priv)368 static int vb2_vmalloc_map_dmabuf(void *mem_priv)
369 {
370 struct vb2_vmalloc_buf *buf = mem_priv;
371 struct iosys_map map;
372 int ret;
373
374 ret = dma_buf_vmap_unlocked(buf->dbuf, &map);
375 if (ret)
376 return -EFAULT;
377 buf->vaddr = map.vaddr;
378
379 return 0;
380 }
381
vb2_vmalloc_unmap_dmabuf(void * mem_priv)382 static void vb2_vmalloc_unmap_dmabuf(void *mem_priv)
383 {
384 struct vb2_vmalloc_buf *buf = mem_priv;
385 struct iosys_map map = IOSYS_MAP_INIT_VADDR(buf->vaddr);
386
387 dma_buf_vunmap_unlocked(buf->dbuf, &map);
388 buf->vaddr = NULL;
389 }
390
vb2_vmalloc_detach_dmabuf(void * mem_priv)391 static void vb2_vmalloc_detach_dmabuf(void *mem_priv)
392 {
393 struct vb2_vmalloc_buf *buf = mem_priv;
394 struct iosys_map map = IOSYS_MAP_INIT_VADDR(buf->vaddr);
395
396 if (buf->vaddr)
397 dma_buf_vunmap_unlocked(buf->dbuf, &map);
398
399 kfree(buf);
400 }
401
vb2_vmalloc_attach_dmabuf(struct vb2_buffer * vb,struct device * dev,struct dma_buf * dbuf,unsigned long size)402 static void *vb2_vmalloc_attach_dmabuf(struct vb2_buffer *vb,
403 struct device *dev,
404 struct dma_buf *dbuf,
405 unsigned long size)
406 {
407 struct vb2_vmalloc_buf *buf;
408
409 if (dbuf->size < size)
410 return ERR_PTR(-EFAULT);
411
412 buf = kzalloc(sizeof(*buf), GFP_KERNEL);
413 if (!buf)
414 return ERR_PTR(-ENOMEM);
415
416 buf->dbuf = dbuf;
417 buf->dma_dir = vb->vb2_queue->dma_dir;
418 buf->size = size;
419
420 return buf;
421 }
422
423
424 const struct vb2_mem_ops vb2_vmalloc_memops = {
425 .alloc = vb2_vmalloc_alloc,
426 .put = vb2_vmalloc_put,
427 .get_userptr = vb2_vmalloc_get_userptr,
428 .put_userptr = vb2_vmalloc_put_userptr,
429 #ifdef CONFIG_HAS_DMA
430 .get_dmabuf = vb2_vmalloc_get_dmabuf,
431 #endif
432 .map_dmabuf = vb2_vmalloc_map_dmabuf,
433 .unmap_dmabuf = vb2_vmalloc_unmap_dmabuf,
434 .attach_dmabuf = vb2_vmalloc_attach_dmabuf,
435 .detach_dmabuf = vb2_vmalloc_detach_dmabuf,
436 .vaddr = vb2_vmalloc_vaddr,
437 .mmap = vb2_vmalloc_mmap,
438 .num_users = vb2_vmalloc_num_users,
439 };
440 EXPORT_SYMBOL_GPL(vb2_vmalloc_memops);
441
442 MODULE_DESCRIPTION("vmalloc memory handling routines for videobuf2");
443 MODULE_AUTHOR("Pawel Osciak <pawel@osciak.com>");
444 MODULE_LICENSE("GPL");
445 MODULE_IMPORT_NS(DMA_BUF);
446