xref: /linux/drivers/media/common/videobuf2/videobuf2-vmalloc.c (revision 02680c23d7b3febe45ea3d4f9818c2b2dc89020a)
1 /*
2  * videobuf2-vmalloc.c - vmalloc memory allocator for videobuf2
3  *
4  * Copyright (C) 2010 Samsung Electronics
5  *
6  * Author: Pawel Osciak <pawel@osciak.com>
7  *
8  * This program is free software; you can redistribute it and/or modify
9  * it under the terms of the GNU General Public License as published by
10  * the Free Software Foundation.
11  */
12 
13 #include <linux/io.h>
14 #include <linux/module.h>
15 #include <linux/mm.h>
16 #include <linux/refcount.h>
17 #include <linux/sched.h>
18 #include <linux/slab.h>
19 #include <linux/vmalloc.h>
20 
21 #include <media/videobuf2-v4l2.h>
22 #include <media/videobuf2-vmalloc.h>
23 #include <media/videobuf2-memops.h>
24 
25 struct vb2_vmalloc_buf {
26 	void				*vaddr;
27 	struct frame_vector		*vec;
28 	enum dma_data_direction		dma_dir;
29 	unsigned long			size;
30 	refcount_t			refcount;
31 	struct vb2_vmarea_handler	handler;
32 	struct dma_buf			*dbuf;
33 };
34 
35 static void vb2_vmalloc_put(void *buf_priv);
36 
37 static void *vb2_vmalloc_alloc(struct device *dev, unsigned long attrs,
38 			       unsigned long size, enum dma_data_direction dma_dir,
39 			       gfp_t gfp_flags)
40 {
41 	struct vb2_vmalloc_buf *buf;
42 
43 	buf = kzalloc(sizeof(*buf), GFP_KERNEL | gfp_flags);
44 	if (!buf)
45 		return ERR_PTR(-ENOMEM);
46 
47 	buf->size = size;
48 	buf->vaddr = vmalloc_user(buf->size);
49 	if (!buf->vaddr) {
50 		pr_debug("vmalloc of size %ld failed\n", buf->size);
51 		kfree(buf);
52 		return ERR_PTR(-ENOMEM);
53 	}
54 
55 	buf->dma_dir = dma_dir;
56 	buf->handler.refcount = &buf->refcount;
57 	buf->handler.put = vb2_vmalloc_put;
58 	buf->handler.arg = buf;
59 
60 	refcount_set(&buf->refcount, 1);
61 	return buf;
62 }
63 
64 static void vb2_vmalloc_put(void *buf_priv)
65 {
66 	struct vb2_vmalloc_buf *buf = buf_priv;
67 
68 	if (refcount_dec_and_test(&buf->refcount)) {
69 		vfree(buf->vaddr);
70 		kfree(buf);
71 	}
72 }
73 
74 static void *vb2_vmalloc_get_userptr(struct device *dev, unsigned long vaddr,
75 				     unsigned long size,
76 				     enum dma_data_direction dma_dir)
77 {
78 	struct vb2_vmalloc_buf *buf;
79 	struct frame_vector *vec;
80 	int n_pages, offset, i;
81 	int ret = -ENOMEM;
82 
83 	buf = kzalloc(sizeof(*buf), GFP_KERNEL);
84 	if (!buf)
85 		return ERR_PTR(-ENOMEM);
86 
87 	buf->dma_dir = dma_dir;
88 	offset = vaddr & ~PAGE_MASK;
89 	buf->size = size;
90 	vec = vb2_create_framevec(vaddr, size);
91 	if (IS_ERR(vec)) {
92 		ret = PTR_ERR(vec);
93 		goto fail_pfnvec_create;
94 	}
95 	buf->vec = vec;
96 	n_pages = frame_vector_count(vec);
97 	if (frame_vector_to_pages(vec) < 0) {
98 		unsigned long *nums = frame_vector_pfns(vec);
99 
100 		/*
101 		 * We cannot get page pointers for these pfns. Check memory is
102 		 * physically contiguous and use direct mapping.
103 		 */
104 		for (i = 1; i < n_pages; i++)
105 			if (nums[i-1] + 1 != nums[i])
106 				goto fail_map;
107 		buf->vaddr = (__force void *)
108 			ioremap(__pfn_to_phys(nums[0]), size + offset);
109 	} else {
110 		buf->vaddr = vm_map_ram(frame_vector_pages(vec), n_pages, -1);
111 	}
112 
113 	if (!buf->vaddr)
114 		goto fail_map;
115 	buf->vaddr += offset;
116 	return buf;
117 
118 fail_map:
119 	vb2_destroy_framevec(vec);
120 fail_pfnvec_create:
121 	kfree(buf);
122 
123 	return ERR_PTR(ret);
124 }
125 
126 static void vb2_vmalloc_put_userptr(void *buf_priv)
127 {
128 	struct vb2_vmalloc_buf *buf = buf_priv;
129 	unsigned long vaddr = (unsigned long)buf->vaddr & PAGE_MASK;
130 	unsigned int i;
131 	struct page **pages;
132 	unsigned int n_pages;
133 
134 	if (!buf->vec->is_pfns) {
135 		n_pages = frame_vector_count(buf->vec);
136 		pages = frame_vector_pages(buf->vec);
137 		if (vaddr)
138 			vm_unmap_ram((void *)vaddr, n_pages);
139 		if (buf->dma_dir == DMA_FROM_DEVICE ||
140 		    buf->dma_dir == DMA_BIDIRECTIONAL)
141 			for (i = 0; i < n_pages; i++)
142 				set_page_dirty_lock(pages[i]);
143 	} else {
144 		iounmap((__force void __iomem *)buf->vaddr);
145 	}
146 	vb2_destroy_framevec(buf->vec);
147 	kfree(buf);
148 }
149 
150 static void *vb2_vmalloc_vaddr(void *buf_priv)
151 {
152 	struct vb2_vmalloc_buf *buf = buf_priv;
153 
154 	if (!buf->vaddr) {
155 		pr_err("Address of an unallocated plane requested or cannot map user pointer\n");
156 		return NULL;
157 	}
158 
159 	return buf->vaddr;
160 }
161 
162 static unsigned int vb2_vmalloc_num_users(void *buf_priv)
163 {
164 	struct vb2_vmalloc_buf *buf = buf_priv;
165 	return refcount_read(&buf->refcount);
166 }
167 
168 static int vb2_vmalloc_mmap(void *buf_priv, struct vm_area_struct *vma)
169 {
170 	struct vb2_vmalloc_buf *buf = buf_priv;
171 	int ret;
172 
173 	if (!buf) {
174 		pr_err("No memory to map\n");
175 		return -EINVAL;
176 	}
177 
178 	ret = remap_vmalloc_range(vma, buf->vaddr, 0);
179 	if (ret) {
180 		pr_err("Remapping vmalloc memory, error: %d\n", ret);
181 		return ret;
182 	}
183 
184 	/*
185 	 * Make sure that vm_areas for 2 buffers won't be merged together
186 	 */
187 	vma->vm_flags		|= VM_DONTEXPAND;
188 
189 	/*
190 	 * Use common vm_area operations to track buffer refcount.
191 	 */
192 	vma->vm_private_data	= &buf->handler;
193 	vma->vm_ops		= &vb2_common_vm_ops;
194 
195 	vma->vm_ops->open(vma);
196 
197 	return 0;
198 }
199 
200 #ifdef CONFIG_HAS_DMA
201 /*********************************************/
202 /*         DMABUF ops for exporters          */
203 /*********************************************/
204 
205 struct vb2_vmalloc_attachment {
206 	struct sg_table sgt;
207 	enum dma_data_direction dma_dir;
208 };
209 
210 static int vb2_vmalloc_dmabuf_ops_attach(struct dma_buf *dbuf,
211 	struct dma_buf_attachment *dbuf_attach)
212 {
213 	struct vb2_vmalloc_attachment *attach;
214 	struct vb2_vmalloc_buf *buf = dbuf->priv;
215 	int num_pages = PAGE_ALIGN(buf->size) / PAGE_SIZE;
216 	struct sg_table *sgt;
217 	struct scatterlist *sg;
218 	void *vaddr = buf->vaddr;
219 	int ret;
220 	int i;
221 
222 	attach = kzalloc(sizeof(*attach), GFP_KERNEL);
223 	if (!attach)
224 		return -ENOMEM;
225 
226 	sgt = &attach->sgt;
227 	ret = sg_alloc_table(sgt, num_pages, GFP_KERNEL);
228 	if (ret) {
229 		kfree(attach);
230 		return ret;
231 	}
232 	for_each_sgtable_sg(sgt, sg, i) {
233 		struct page *page = vmalloc_to_page(vaddr);
234 
235 		if (!page) {
236 			sg_free_table(sgt);
237 			kfree(attach);
238 			return -ENOMEM;
239 		}
240 		sg_set_page(sg, page, PAGE_SIZE, 0);
241 		vaddr += PAGE_SIZE;
242 	}
243 
244 	attach->dma_dir = DMA_NONE;
245 	dbuf_attach->priv = attach;
246 	return 0;
247 }
248 
249 static void vb2_vmalloc_dmabuf_ops_detach(struct dma_buf *dbuf,
250 	struct dma_buf_attachment *db_attach)
251 {
252 	struct vb2_vmalloc_attachment *attach = db_attach->priv;
253 	struct sg_table *sgt;
254 
255 	if (!attach)
256 		return;
257 
258 	sgt = &attach->sgt;
259 
260 	/* release the scatterlist cache */
261 	if (attach->dma_dir != DMA_NONE)
262 		dma_unmap_sgtable(db_attach->dev, sgt, attach->dma_dir, 0);
263 	sg_free_table(sgt);
264 	kfree(attach);
265 	db_attach->priv = NULL;
266 }
267 
268 static struct sg_table *vb2_vmalloc_dmabuf_ops_map(
269 	struct dma_buf_attachment *db_attach, enum dma_data_direction dma_dir)
270 {
271 	struct vb2_vmalloc_attachment *attach = db_attach->priv;
272 	/* stealing dmabuf mutex to serialize map/unmap operations */
273 	struct mutex *lock = &db_attach->dmabuf->lock;
274 	struct sg_table *sgt;
275 
276 	mutex_lock(lock);
277 
278 	sgt = &attach->sgt;
279 	/* return previously mapped sg table */
280 	if (attach->dma_dir == dma_dir) {
281 		mutex_unlock(lock);
282 		return sgt;
283 	}
284 
285 	/* release any previous cache */
286 	if (attach->dma_dir != DMA_NONE) {
287 		dma_unmap_sgtable(db_attach->dev, sgt, attach->dma_dir, 0);
288 		attach->dma_dir = DMA_NONE;
289 	}
290 
291 	/* mapping to the client with new direction */
292 	if (dma_map_sgtable(db_attach->dev, sgt, dma_dir, 0)) {
293 		pr_err("failed to map scatterlist\n");
294 		mutex_unlock(lock);
295 		return ERR_PTR(-EIO);
296 	}
297 
298 	attach->dma_dir = dma_dir;
299 
300 	mutex_unlock(lock);
301 
302 	return sgt;
303 }
304 
305 static void vb2_vmalloc_dmabuf_ops_unmap(struct dma_buf_attachment *db_attach,
306 	struct sg_table *sgt, enum dma_data_direction dma_dir)
307 {
308 	/* nothing to be done here */
309 }
310 
311 static void vb2_vmalloc_dmabuf_ops_release(struct dma_buf *dbuf)
312 {
313 	/* drop reference obtained in vb2_vmalloc_get_dmabuf */
314 	vb2_vmalloc_put(dbuf->priv);
315 }
316 
317 static int vb2_vmalloc_dmabuf_ops_vmap(struct dma_buf *dbuf, struct dma_buf_map *map)
318 {
319 	struct vb2_vmalloc_buf *buf = dbuf->priv;
320 
321 	dma_buf_map_set_vaddr(map, buf->vaddr);
322 
323 	return 0;
324 }
325 
326 static int vb2_vmalloc_dmabuf_ops_mmap(struct dma_buf *dbuf,
327 	struct vm_area_struct *vma)
328 {
329 	return vb2_vmalloc_mmap(dbuf->priv, vma);
330 }
331 
332 static const struct dma_buf_ops vb2_vmalloc_dmabuf_ops = {
333 	.attach = vb2_vmalloc_dmabuf_ops_attach,
334 	.detach = vb2_vmalloc_dmabuf_ops_detach,
335 	.map_dma_buf = vb2_vmalloc_dmabuf_ops_map,
336 	.unmap_dma_buf = vb2_vmalloc_dmabuf_ops_unmap,
337 	.vmap = vb2_vmalloc_dmabuf_ops_vmap,
338 	.mmap = vb2_vmalloc_dmabuf_ops_mmap,
339 	.release = vb2_vmalloc_dmabuf_ops_release,
340 };
341 
342 static struct dma_buf *vb2_vmalloc_get_dmabuf(void *buf_priv, unsigned long flags)
343 {
344 	struct vb2_vmalloc_buf *buf = buf_priv;
345 	struct dma_buf *dbuf;
346 	DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
347 
348 	exp_info.ops = &vb2_vmalloc_dmabuf_ops;
349 	exp_info.size = buf->size;
350 	exp_info.flags = flags;
351 	exp_info.priv = buf;
352 
353 	if (WARN_ON(!buf->vaddr))
354 		return NULL;
355 
356 	dbuf = dma_buf_export(&exp_info);
357 	if (IS_ERR(dbuf))
358 		return NULL;
359 
360 	/* dmabuf keeps reference to vb2 buffer */
361 	refcount_inc(&buf->refcount);
362 
363 	return dbuf;
364 }
365 #endif /* CONFIG_HAS_DMA */
366 
367 
368 /*********************************************/
369 /*       callbacks for DMABUF buffers        */
370 /*********************************************/
371 
372 static int vb2_vmalloc_map_dmabuf(void *mem_priv)
373 {
374 	struct vb2_vmalloc_buf *buf = mem_priv;
375 	struct dma_buf_map map;
376 	int ret;
377 
378 	ret = dma_buf_vmap(buf->dbuf, &map);
379 	if (ret)
380 		return -EFAULT;
381 	buf->vaddr = map.vaddr;
382 
383 	return 0;
384 }
385 
386 static void vb2_vmalloc_unmap_dmabuf(void *mem_priv)
387 {
388 	struct vb2_vmalloc_buf *buf = mem_priv;
389 	struct dma_buf_map map = DMA_BUF_MAP_INIT_VADDR(buf->vaddr);
390 
391 	dma_buf_vunmap(buf->dbuf, &map);
392 	buf->vaddr = NULL;
393 }
394 
395 static void vb2_vmalloc_detach_dmabuf(void *mem_priv)
396 {
397 	struct vb2_vmalloc_buf *buf = mem_priv;
398 	struct dma_buf_map map = DMA_BUF_MAP_INIT_VADDR(buf->vaddr);
399 
400 	if (buf->vaddr)
401 		dma_buf_vunmap(buf->dbuf, &map);
402 
403 	kfree(buf);
404 }
405 
406 static void *vb2_vmalloc_attach_dmabuf(struct device *dev, struct dma_buf *dbuf,
407 	unsigned long size, enum dma_data_direction dma_dir)
408 {
409 	struct vb2_vmalloc_buf *buf;
410 
411 	if (dbuf->size < size)
412 		return ERR_PTR(-EFAULT);
413 
414 	buf = kzalloc(sizeof(*buf), GFP_KERNEL);
415 	if (!buf)
416 		return ERR_PTR(-ENOMEM);
417 
418 	buf->dbuf = dbuf;
419 	buf->dma_dir = dma_dir;
420 	buf->size = size;
421 
422 	return buf;
423 }
424 
425 
426 const struct vb2_mem_ops vb2_vmalloc_memops = {
427 	.alloc		= vb2_vmalloc_alloc,
428 	.put		= vb2_vmalloc_put,
429 	.get_userptr	= vb2_vmalloc_get_userptr,
430 	.put_userptr	= vb2_vmalloc_put_userptr,
431 #ifdef CONFIG_HAS_DMA
432 	.get_dmabuf	= vb2_vmalloc_get_dmabuf,
433 #endif
434 	.map_dmabuf	= vb2_vmalloc_map_dmabuf,
435 	.unmap_dmabuf	= vb2_vmalloc_unmap_dmabuf,
436 	.attach_dmabuf	= vb2_vmalloc_attach_dmabuf,
437 	.detach_dmabuf	= vb2_vmalloc_detach_dmabuf,
438 	.vaddr		= vb2_vmalloc_vaddr,
439 	.mmap		= vb2_vmalloc_mmap,
440 	.num_users	= vb2_vmalloc_num_users,
441 };
442 EXPORT_SYMBOL_GPL(vb2_vmalloc_memops);
443 
444 MODULE_DESCRIPTION("vmalloc memory handling routines for videobuf2");
445 MODULE_AUTHOR("Pawel Osciak <pawel@osciak.com>");
446 MODULE_LICENSE("GPL");
447