xref: /linux/drivers/gpu/drm/exynos/exynos_drm_gem.c (revision f2ee442115c9b6219083c019939a9cc0c9abb2f8)
1 /* exynos_drm_gem.c
2  *
3  * Copyright (c) 2011 Samsung Electronics Co., Ltd.
4  * Author: Inki Dae <inki.dae@samsung.com>
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the "Software"),
8  * to deal in the Software without restriction, including without limitation
9  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10  * and/or sell copies of the Software, and to permit persons to whom the
11  * Software is furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice (including the next
14  * paragraph) shall be included in all copies or substantial portions of the
15  * Software.
16  *
17  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
20  * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
21  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
22  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
23  * OTHER DEALINGS IN THE SOFTWARE.
24  */
25 
26 #include "drmP.h"
27 #include "drm.h"
28 
29 #include <drm/exynos_drm.h>
30 
31 #include "exynos_drm_drv.h"
32 #include "exynos_drm_gem.h"
33 #include "exynos_drm_buf.h"
34 
35 static unsigned int convert_to_vm_err_msg(int msg)
36 {
37 	unsigned int out_msg;
38 
39 	switch (msg) {
40 	case 0:
41 	case -ERESTARTSYS:
42 	case -EINTR:
43 		out_msg = VM_FAULT_NOPAGE;
44 		break;
45 
46 	case -ENOMEM:
47 		out_msg = VM_FAULT_OOM;
48 		break;
49 
50 	default:
51 		out_msg = VM_FAULT_SIGBUS;
52 		break;
53 	}
54 
55 	return out_msg;
56 }
57 
58 static unsigned int get_gem_mmap_offset(struct drm_gem_object *obj)
59 {
60 	DRM_DEBUG_KMS("%s\n", __FILE__);
61 
62 	return (unsigned int)obj->map_list.hash.key << PAGE_SHIFT;
63 }
64 
65 struct exynos_drm_gem_obj *exynos_drm_gem_create(struct drm_file *file_priv,
66 		struct drm_device *dev, unsigned int size,
67 		unsigned int *handle)
68 {
69 	struct exynos_drm_gem_obj *exynos_gem_obj;
70 	struct exynos_drm_buf_entry *entry;
71 	struct drm_gem_object *obj;
72 	int ret;
73 
74 	DRM_DEBUG_KMS("%s\n", __FILE__);
75 
76 	size = roundup(size, PAGE_SIZE);
77 
78 	exynos_gem_obj = kzalloc(sizeof(*exynos_gem_obj), GFP_KERNEL);
79 	if (!exynos_gem_obj) {
80 		DRM_ERROR("failed to allocate exynos gem object.\n");
81 		return ERR_PTR(-ENOMEM);
82 	}
83 
84 	/* allocate the new buffer object and memory region. */
85 	entry = exynos_drm_buf_create(dev, size);
86 	if (!entry) {
87 		kfree(exynos_gem_obj);
88 		return ERR_PTR(-ENOMEM);
89 	}
90 
91 	exynos_gem_obj->entry = entry;
92 
93 	obj = &exynos_gem_obj->base;
94 
95 	ret = drm_gem_object_init(dev, obj, size);
96 	if (ret < 0) {
97 		DRM_ERROR("failed to initailize gem object.\n");
98 		goto err_obj_init;
99 	}
100 
101 	DRM_DEBUG_KMS("created file object = 0x%x\n", (unsigned int)obj->filp);
102 
103 	ret = drm_gem_create_mmap_offset(obj);
104 	if (ret < 0) {
105 		DRM_ERROR("failed to allocate mmap offset.\n");
106 		goto err_create_mmap_offset;
107 	}
108 
109 	/*
110 	 * allocate a id of idr table where the obj is registered
111 	 * and handle has the id what user can see.
112 	 */
113 	ret = drm_gem_handle_create(file_priv, obj, handle);
114 	if (ret)
115 		goto err_handle_create;
116 
117 	DRM_DEBUG_KMS("gem handle = 0x%x\n", *handle);
118 
119 	/* drop reference from allocate - handle holds it now. */
120 	drm_gem_object_unreference_unlocked(obj);
121 
122 	return exynos_gem_obj;
123 
124 err_handle_create:
125 	drm_gem_free_mmap_offset(obj);
126 
127 err_create_mmap_offset:
128 	drm_gem_object_release(obj);
129 
130 err_obj_init:
131 	exynos_drm_buf_destroy(dev, exynos_gem_obj->entry);
132 
133 	kfree(exynos_gem_obj);
134 
135 	return ERR_PTR(ret);
136 }
137 
138 int exynos_drm_gem_create_ioctl(struct drm_device *dev, void *data,
139 		struct drm_file *file_priv)
140 {
141 	struct drm_exynos_gem_create *args = data;
142 	struct exynos_drm_gem_obj *exynos_gem_obj;
143 
144 	DRM_DEBUG_KMS("%s : size = 0x%x\n", __FILE__, args->size);
145 
146 	exynos_gem_obj = exynos_drm_gem_create(file_priv, dev, args->size,
147 			&args->handle);
148 	if (IS_ERR(exynos_gem_obj))
149 		return PTR_ERR(exynos_gem_obj);
150 
151 	return 0;
152 }
153 
154 int exynos_drm_gem_map_offset_ioctl(struct drm_device *dev, void *data,
155 		struct drm_file *file_priv)
156 {
157 	struct drm_exynos_gem_map_off *args = data;
158 
159 	DRM_DEBUG_KMS("%s\n", __FILE__);
160 
161 	DRM_DEBUG_KMS("handle = 0x%x, offset = 0x%lx\n",
162 			args->handle, (unsigned long)args->offset);
163 
164 	if (!(dev->driver->driver_features & DRIVER_GEM)) {
165 		DRM_ERROR("does not support GEM.\n");
166 		return -ENODEV;
167 	}
168 
169 	return exynos_drm_gem_dumb_map_offset(file_priv, dev, args->handle,
170 			&args->offset);
171 }
172 
173 static int exynos_drm_gem_mmap_buffer(struct file *filp,
174 		struct vm_area_struct *vma)
175 {
176 	struct drm_gem_object *obj = filp->private_data;
177 	struct exynos_drm_gem_obj *exynos_gem_obj = to_exynos_gem_obj(obj);
178 	struct exynos_drm_buf_entry *entry;
179 	unsigned long pfn, vm_size;
180 
181 	DRM_DEBUG_KMS("%s\n", __FILE__);
182 
183 	vma->vm_flags |= (VM_IO | VM_RESERVED);
184 
185 	vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
186 	vma->vm_file = filp;
187 
188 	vm_size = vma->vm_end - vma->vm_start;
189 	/*
190 	 * a entry contains information to physically continuous memory
191 	 * allocated by user request or at framebuffer creation.
192 	 */
193 	entry = exynos_gem_obj->entry;
194 
195 	/* check if user-requested size is valid. */
196 	if (vm_size > entry->size)
197 		return -EINVAL;
198 
199 	/*
200 	 * get page frame number to physical memory to be mapped
201 	 * to user space.
202 	 */
203 	pfn = exynos_gem_obj->entry->paddr >> PAGE_SHIFT;
204 
205 	DRM_DEBUG_KMS("pfn = 0x%lx\n", pfn);
206 
207 	if (remap_pfn_range(vma, vma->vm_start, pfn, vm_size,
208 				vma->vm_page_prot)) {
209 		DRM_ERROR("failed to remap pfn range.\n");
210 		return -EAGAIN;
211 	}
212 
213 	return 0;
214 }
215 
216 static const struct file_operations exynos_drm_gem_fops = {
217 	.mmap = exynos_drm_gem_mmap_buffer,
218 };
219 
220 int exynos_drm_gem_mmap_ioctl(struct drm_device *dev, void *data,
221 		struct drm_file *file_priv)
222 {
223 	struct drm_exynos_gem_mmap *args = data;
224 	struct drm_gem_object *obj;
225 	unsigned int addr;
226 
227 	DRM_DEBUG_KMS("%s\n", __FILE__);
228 
229 	if (!(dev->driver->driver_features & DRIVER_GEM)) {
230 		DRM_ERROR("does not support GEM.\n");
231 		return -ENODEV;
232 	}
233 
234 	obj = drm_gem_object_lookup(dev, file_priv, args->handle);
235 	if (!obj) {
236 		DRM_ERROR("failed to lookup gem object.\n");
237 		return -EINVAL;
238 	}
239 
240 	obj->filp->f_op = &exynos_drm_gem_fops;
241 	obj->filp->private_data = obj;
242 
243 	down_write(&current->mm->mmap_sem);
244 	addr = do_mmap(obj->filp, 0, args->size,
245 			PROT_READ | PROT_WRITE, MAP_SHARED, 0);
246 	up_write(&current->mm->mmap_sem);
247 
248 	drm_gem_object_unreference_unlocked(obj);
249 
250 	if (IS_ERR((void *)addr))
251 		return PTR_ERR((void *)addr);
252 
253 	args->mapped = addr;
254 
255 	DRM_DEBUG_KMS("mapped = 0x%lx\n", (unsigned long)args->mapped);
256 
257 	return 0;
258 }
259 
260 int exynos_drm_gem_init_object(struct drm_gem_object *obj)
261 {
262 	DRM_DEBUG_KMS("%s\n", __FILE__);
263 
264 	return 0;
265 }
266 
267 void exynos_drm_gem_free_object(struct drm_gem_object *gem_obj)
268 {
269 	struct exynos_drm_gem_obj *exynos_gem_obj;
270 
271 	DRM_DEBUG_KMS("%s\n", __FILE__);
272 
273 	DRM_DEBUG_KMS("handle count = %d\n",
274 			atomic_read(&gem_obj->handle_count));
275 
276 	if (gem_obj->map_list.map)
277 		drm_gem_free_mmap_offset(gem_obj);
278 
279 	/* release file pointer to gem object. */
280 	drm_gem_object_release(gem_obj);
281 
282 	exynos_gem_obj = to_exynos_gem_obj(gem_obj);
283 
284 	exynos_drm_buf_destroy(gem_obj->dev, exynos_gem_obj->entry);
285 
286 	kfree(exynos_gem_obj);
287 }
288 
289 int exynos_drm_gem_dumb_create(struct drm_file *file_priv,
290 		struct drm_device *dev, struct drm_mode_create_dumb *args)
291 {
292 	struct exynos_drm_gem_obj *exynos_gem_obj;
293 
294 	DRM_DEBUG_KMS("%s\n", __FILE__);
295 
296 	/*
297 	 * alocate memory to be used for framebuffer.
298 	 * - this callback would be called by user application
299 	 *	with DRM_IOCTL_MODE_CREATE_DUMB command.
300 	 */
301 
302 	args->pitch = args->width * args->bpp >> 3;
303 	args->size = args->pitch * args->height;
304 
305 	exynos_gem_obj = exynos_drm_gem_create(file_priv, dev, args->size,
306 							&args->handle);
307 	if (IS_ERR(exynos_gem_obj))
308 		return PTR_ERR(exynos_gem_obj);
309 
310 	return 0;
311 }
312 
313 int exynos_drm_gem_dumb_map_offset(struct drm_file *file_priv,
314 		struct drm_device *dev, uint32_t handle, uint64_t *offset)
315 {
316 	struct exynos_drm_gem_obj *exynos_gem_obj;
317 	struct drm_gem_object *obj;
318 
319 	DRM_DEBUG_KMS("%s\n", __FILE__);
320 
321 	mutex_lock(&dev->struct_mutex);
322 
323 	/*
324 	 * get offset of memory allocated for drm framebuffer.
325 	 * - this callback would be called by user application
326 	 *	with DRM_IOCTL_MODE_MAP_DUMB command.
327 	 */
328 
329 	obj = drm_gem_object_lookup(dev, file_priv, handle);
330 	if (!obj) {
331 		DRM_ERROR("failed to lookup gem object.\n");
332 		mutex_unlock(&dev->struct_mutex);
333 		return -EINVAL;
334 	}
335 
336 	exynos_gem_obj = to_exynos_gem_obj(obj);
337 
338 	*offset = get_gem_mmap_offset(&exynos_gem_obj->base);
339 
340 	drm_gem_object_unreference(obj);
341 
342 	DRM_DEBUG_KMS("offset = 0x%lx\n", (unsigned long)*offset);
343 
344 	mutex_unlock(&dev->struct_mutex);
345 
346 	return 0;
347 }
348 
349 int exynos_drm_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
350 {
351 	struct drm_gem_object *obj = vma->vm_private_data;
352 	struct exynos_drm_gem_obj *exynos_gem_obj = to_exynos_gem_obj(obj);
353 	struct drm_device *dev = obj->dev;
354 	unsigned long pfn;
355 	pgoff_t page_offset;
356 	int ret;
357 
358 	page_offset = ((unsigned long)vmf->virtual_address -
359 			vma->vm_start) >> PAGE_SHIFT;
360 
361 	mutex_lock(&dev->struct_mutex);
362 
363 	pfn = (exynos_gem_obj->entry->paddr >> PAGE_SHIFT) + page_offset;
364 
365 	ret = vm_insert_mixed(vma, (unsigned long)vmf->virtual_address, pfn);
366 
367 	mutex_unlock(&dev->struct_mutex);
368 
369 	return convert_to_vm_err_msg(ret);
370 }
371 
372 int exynos_drm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
373 {
374 	int ret;
375 
376 	DRM_DEBUG_KMS("%s\n", __FILE__);
377 
378 	/* set vm_area_struct. */
379 	ret = drm_gem_mmap(filp, vma);
380 	if (ret < 0) {
381 		DRM_ERROR("failed to mmap.\n");
382 		return ret;
383 	}
384 
385 	vma->vm_flags &= ~VM_PFNMAP;
386 	vma->vm_flags |= VM_MIXEDMAP;
387 
388 	return ret;
389 }
390 
391 
392 int exynos_drm_gem_dumb_destroy(struct drm_file *file_priv,
393 		struct drm_device *dev, unsigned int handle)
394 {
395 	int ret;
396 
397 	DRM_DEBUG_KMS("%s\n", __FILE__);
398 
399 	/*
400 	 * obj->refcount and obj->handle_count are decreased and
401 	 * if both them are 0 then exynos_drm_gem_free_object()
402 	 * would be called by callback to release resources.
403 	 */
404 	ret = drm_gem_handle_delete(file_priv, handle);
405 	if (ret < 0) {
406 		DRM_ERROR("failed to delete drm_gem_handle.\n");
407 		return ret;
408 	}
409 
410 	return 0;
411 }
412 
413 MODULE_AUTHOR("Inki Dae <inki.dae@samsung.com>");
414 MODULE_DESCRIPTION("Samsung SoC DRM GEM Module");
415 MODULE_LICENSE("GPL");
416