xref: /linux/drivers/gpu/drm/armada/armada_gem.c (revision dc0d1c4519095a6c6bbd9ec4a808674aba502741)
1 /*
2  * Copyright (C) 2012 Russell King
3  *
4  * This program is free software; you can redistribute it and/or modify
5  * it under the terms of the GNU General Public License version 2 as
6  * published by the Free Software Foundation.
7  */
8 #include <linux/dma-buf.h>
9 #include <linux/dma-mapping.h>
10 #include <linux/shmem_fs.h>
11 #include "armada_drm.h"
12 #include "armada_gem.h"
13 #include <drm/armada_drm.h>
14 #include "armada_ioctlP.h"
15 
16 static vm_fault_t armada_gem_vm_fault(struct vm_fault *vmf)
17 {
18 	struct drm_gem_object *gobj = vmf->vma->vm_private_data;
19 	struct armada_gem_object *obj = drm_to_armada_gem(gobj);
20 	unsigned long pfn = obj->phys_addr >> PAGE_SHIFT;
21 
22 	pfn += (vmf->address - vmf->vma->vm_start) >> PAGE_SHIFT;
23 	return vmf_insert_pfn(vmf->vma, vmf->address, pfn);
24 }
25 
26 const struct vm_operations_struct armada_gem_vm_ops = {
27 	.fault	= armada_gem_vm_fault,
28 	.open	= drm_gem_vm_open,
29 	.close	= drm_gem_vm_close,
30 };
31 
32 static size_t roundup_gem_size(size_t size)
33 {
34 	return roundup(size, PAGE_SIZE);
35 }
36 
37 void armada_gem_free_object(struct drm_gem_object *obj)
38 {
39 	struct armada_gem_object *dobj = drm_to_armada_gem(obj);
40 	struct armada_private *priv = obj->dev->dev_private;
41 
42 	DRM_DEBUG_DRIVER("release obj %p\n", dobj);
43 
44 	drm_gem_free_mmap_offset(&dobj->obj);
45 
46 	might_lock(&priv->linear_lock);
47 
48 	if (dobj->page) {
49 		/* page backed memory */
50 		unsigned int order = get_order(dobj->obj.size);
51 		__free_pages(dobj->page, order);
52 	} else if (dobj->linear) {
53 		/* linear backed memory */
54 		mutex_lock(&priv->linear_lock);
55 		drm_mm_remove_node(dobj->linear);
56 		mutex_unlock(&priv->linear_lock);
57 		kfree(dobj->linear);
58 		if (dobj->addr)
59 			iounmap(dobj->addr);
60 	}
61 
62 	if (dobj->obj.import_attach) {
63 		/* We only ever display imported data */
64 		if (dobj->sgt)
65 			dma_buf_unmap_attachment(dobj->obj.import_attach,
66 						 dobj->sgt, DMA_TO_DEVICE);
67 		drm_prime_gem_destroy(&dobj->obj, NULL);
68 	}
69 
70 	drm_gem_object_release(&dobj->obj);
71 
72 	kfree(dobj);
73 }
74 
75 int
76 armada_gem_linear_back(struct drm_device *dev, struct armada_gem_object *obj)
77 {
78 	struct armada_private *priv = dev->dev_private;
79 	size_t size = obj->obj.size;
80 
81 	if (obj->page || obj->linear)
82 		return 0;
83 
84 	/*
85 	 * If it is a small allocation (typically cursor, which will
86 	 * be 32x64 or 64x32 ARGB pixels) try to get it from the system.
87 	 * Framebuffers will never be this small (our minimum size for
88 	 * framebuffers is larger than this anyway.)  Such objects are
89 	 * only accessed by the CPU so we don't need any special handing
90 	 * here.
91 	 */
92 	if (size <= 8192) {
93 		unsigned int order = get_order(size);
94 		struct page *p = alloc_pages(GFP_KERNEL, order);
95 
96 		if (p) {
97 			obj->addr = page_address(p);
98 			obj->phys_addr = page_to_phys(p);
99 			obj->page = p;
100 
101 			memset(obj->addr, 0, PAGE_ALIGN(size));
102 		}
103 	}
104 
105 	/*
106 	 * We could grab something from CMA if it's enabled, but that
107 	 * involves building in a problem:
108 	 *
109 	 * CMA's interface uses dma_alloc_coherent(), which provides us
110 	 * with an CPU virtual address and a device address.
111 	 *
112 	 * The CPU virtual address may be either an address in the kernel
113 	 * direct mapped region (for example, as it would be on x86) or
114 	 * it may be remapped into another part of kernel memory space
115 	 * (eg, as it would be on ARM.)  This means virt_to_phys() on the
116 	 * returned virtual address is invalid depending on the architecture
117 	 * implementation.
118 	 *
119 	 * The device address may also not be a physical address; it may
120 	 * be that there is some kind of remapping between the device and
121 	 * system RAM, which makes the use of the device address also
122 	 * unsafe to re-use as a physical address.
123 	 *
124 	 * This makes DRM usage of dma_alloc_coherent() in a generic way
125 	 * at best very questionable and unsafe.
126 	 */
127 
128 	/* Otherwise, grab it from our linear allocation */
129 	if (!obj->page) {
130 		struct drm_mm_node *node;
131 		unsigned align = min_t(unsigned, size, SZ_2M);
132 		void __iomem *ptr;
133 		int ret;
134 
135 		node = kzalloc(sizeof(*node), GFP_KERNEL);
136 		if (!node)
137 			return -ENOSPC;
138 
139 		mutex_lock(&priv->linear_lock);
140 		ret = drm_mm_insert_node_generic(&priv->linear, node,
141 						 size, align, 0, 0);
142 		mutex_unlock(&priv->linear_lock);
143 		if (ret) {
144 			kfree(node);
145 			return ret;
146 		}
147 
148 		obj->linear = node;
149 
150 		/* Ensure that the memory we're returning is cleared. */
151 		ptr = ioremap_wc(obj->linear->start, size);
152 		if (!ptr) {
153 			mutex_lock(&priv->linear_lock);
154 			drm_mm_remove_node(obj->linear);
155 			mutex_unlock(&priv->linear_lock);
156 			kfree(obj->linear);
157 			obj->linear = NULL;
158 			return -ENOMEM;
159 		}
160 
161 		memset_io(ptr, 0, size);
162 		iounmap(ptr);
163 
164 		obj->phys_addr = obj->linear->start;
165 		obj->dev_addr = obj->linear->start;
166 		obj->mapped = true;
167 	}
168 
169 	DRM_DEBUG_DRIVER("obj %p phys %#llx dev %#llx\n", obj,
170 			 (unsigned long long)obj->phys_addr,
171 			 (unsigned long long)obj->dev_addr);
172 
173 	return 0;
174 }
175 
176 void *
177 armada_gem_map_object(struct drm_device *dev, struct armada_gem_object *dobj)
178 {
179 	/* only linear objects need to be ioremap'd */
180 	if (!dobj->addr && dobj->linear)
181 		dobj->addr = ioremap_wc(dobj->phys_addr, dobj->obj.size);
182 	return dobj->addr;
183 }
184 
185 struct armada_gem_object *
186 armada_gem_alloc_private_object(struct drm_device *dev, size_t size)
187 {
188 	struct armada_gem_object *obj;
189 
190 	size = roundup_gem_size(size);
191 
192 	obj = kzalloc(sizeof(*obj), GFP_KERNEL);
193 	if (!obj)
194 		return NULL;
195 
196 	drm_gem_private_object_init(dev, &obj->obj, size);
197 
198 	DRM_DEBUG_DRIVER("alloc private obj %p size %zu\n", obj, size);
199 
200 	return obj;
201 }
202 
203 static struct armada_gem_object *armada_gem_alloc_object(struct drm_device *dev,
204 	size_t size)
205 {
206 	struct armada_gem_object *obj;
207 	struct address_space *mapping;
208 
209 	size = roundup_gem_size(size);
210 
211 	obj = kzalloc(sizeof(*obj), GFP_KERNEL);
212 	if (!obj)
213 		return NULL;
214 
215 	if (drm_gem_object_init(dev, &obj->obj, size)) {
216 		kfree(obj);
217 		return NULL;
218 	}
219 
220 	mapping = obj->obj.filp->f_mapping;
221 	mapping_set_gfp_mask(mapping, GFP_HIGHUSER | __GFP_RECLAIMABLE);
222 
223 	DRM_DEBUG_DRIVER("alloc obj %p size %zu\n", obj, size);
224 
225 	return obj;
226 }
227 
228 /* Dumb alloc support */
229 int armada_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
230 	struct drm_mode_create_dumb *args)
231 {
232 	struct armada_gem_object *dobj;
233 	u32 handle;
234 	size_t size;
235 	int ret;
236 
237 	args->pitch = armada_pitch(args->width, args->bpp);
238 	args->size = size = args->pitch * args->height;
239 
240 	dobj = armada_gem_alloc_private_object(dev, size);
241 	if (dobj == NULL)
242 		return -ENOMEM;
243 
244 	ret = armada_gem_linear_back(dev, dobj);
245 	if (ret)
246 		goto err;
247 
248 	ret = drm_gem_handle_create(file, &dobj->obj, &handle);
249 	if (ret)
250 		goto err;
251 
252 	args->handle = handle;
253 
254 	/* drop reference from allocate - handle holds it now */
255 	DRM_DEBUG_DRIVER("obj %p size %zu handle %#x\n", dobj, size, handle);
256  err:
257 	drm_gem_object_put_unlocked(&dobj->obj);
258 	return ret;
259 }
260 
261 /* Private driver gem ioctls */
262 int armada_gem_create_ioctl(struct drm_device *dev, void *data,
263 	struct drm_file *file)
264 {
265 	struct drm_armada_gem_create *args = data;
266 	struct armada_gem_object *dobj;
267 	size_t size;
268 	u32 handle;
269 	int ret;
270 
271 	if (args->size == 0)
272 		return -ENOMEM;
273 
274 	size = args->size;
275 
276 	dobj = armada_gem_alloc_object(dev, size);
277 	if (dobj == NULL)
278 		return -ENOMEM;
279 
280 	ret = drm_gem_handle_create(file, &dobj->obj, &handle);
281 	if (ret)
282 		goto err;
283 
284 	args->handle = handle;
285 
286 	/* drop reference from allocate - handle holds it now */
287 	DRM_DEBUG_DRIVER("obj %p size %zu handle %#x\n", dobj, size, handle);
288  err:
289 	drm_gem_object_put_unlocked(&dobj->obj);
290 	return ret;
291 }
292 
293 /* Map a shmem-backed object into process memory space */
294 int armada_gem_mmap_ioctl(struct drm_device *dev, void *data,
295 	struct drm_file *file)
296 {
297 	struct drm_armada_gem_mmap *args = data;
298 	struct armada_gem_object *dobj;
299 	unsigned long addr;
300 
301 	dobj = armada_gem_object_lookup(file, args->handle);
302 	if (dobj == NULL)
303 		return -ENOENT;
304 
305 	if (!dobj->obj.filp) {
306 		drm_gem_object_put_unlocked(&dobj->obj);
307 		return -EINVAL;
308 	}
309 
310 	addr = vm_mmap(dobj->obj.filp, 0, args->size, PROT_READ | PROT_WRITE,
311 		       MAP_SHARED, args->offset);
312 	drm_gem_object_put_unlocked(&dobj->obj);
313 	if (IS_ERR_VALUE(addr))
314 		return addr;
315 
316 	args->addr = addr;
317 
318 	return 0;
319 }
320 
321 int armada_gem_pwrite_ioctl(struct drm_device *dev, void *data,
322 	struct drm_file *file)
323 {
324 	struct drm_armada_gem_pwrite *args = data;
325 	struct armada_gem_object *dobj;
326 	char __user *ptr;
327 	int ret;
328 
329 	DRM_DEBUG_DRIVER("handle %u off %u size %u ptr 0x%llx\n",
330 		args->handle, args->offset, args->size, args->ptr);
331 
332 	if (args->size == 0)
333 		return 0;
334 
335 	ptr = (char __user *)(uintptr_t)args->ptr;
336 
337 	if (!access_ok(VERIFY_READ, ptr, args->size))
338 		return -EFAULT;
339 
340 	ret = fault_in_pages_readable(ptr, args->size);
341 	if (ret)
342 		return ret;
343 
344 	dobj = armada_gem_object_lookup(file, args->handle);
345 	if (dobj == NULL)
346 		return -ENOENT;
347 
348 	/* Must be a kernel-mapped object */
349 	if (!dobj->addr)
350 		return -EINVAL;
351 
352 	if (args->offset > dobj->obj.size ||
353 	    args->size > dobj->obj.size - args->offset) {
354 		DRM_ERROR("invalid size: object size %u\n", dobj->obj.size);
355 		ret = -EINVAL;
356 		goto unref;
357 	}
358 
359 	if (copy_from_user(dobj->addr + args->offset, ptr, args->size)) {
360 		ret = -EFAULT;
361 	} else if (dobj->update) {
362 		dobj->update(dobj->update_data);
363 		ret = 0;
364 	}
365 
366  unref:
367 	drm_gem_object_put_unlocked(&dobj->obj);
368 	return ret;
369 }
370 
371 /* Prime support */
372 static struct sg_table *
373 armada_gem_prime_map_dma_buf(struct dma_buf_attachment *attach,
374 	enum dma_data_direction dir)
375 {
376 	struct drm_gem_object *obj = attach->dmabuf->priv;
377 	struct armada_gem_object *dobj = drm_to_armada_gem(obj);
378 	struct scatterlist *sg;
379 	struct sg_table *sgt;
380 	int i, num;
381 
382 	sgt = kmalloc(sizeof(*sgt), GFP_KERNEL);
383 	if (!sgt)
384 		return NULL;
385 
386 	if (dobj->obj.filp) {
387 		struct address_space *mapping;
388 		int count;
389 
390 		count = dobj->obj.size / PAGE_SIZE;
391 		if (sg_alloc_table(sgt, count, GFP_KERNEL))
392 			goto free_sgt;
393 
394 		mapping = dobj->obj.filp->f_mapping;
395 
396 		for_each_sg(sgt->sgl, sg, count, i) {
397 			struct page *page;
398 
399 			page = shmem_read_mapping_page(mapping, i);
400 			if (IS_ERR(page)) {
401 				num = i;
402 				goto release;
403 			}
404 
405 			sg_set_page(sg, page, PAGE_SIZE, 0);
406 		}
407 
408 		if (dma_map_sg(attach->dev, sgt->sgl, sgt->nents, dir) == 0) {
409 			num = sgt->nents;
410 			goto release;
411 		}
412 	} else if (dobj->page) {
413 		/* Single contiguous page */
414 		if (sg_alloc_table(sgt, 1, GFP_KERNEL))
415 			goto free_sgt;
416 
417 		sg_set_page(sgt->sgl, dobj->page, dobj->obj.size, 0);
418 
419 		if (dma_map_sg(attach->dev, sgt->sgl, sgt->nents, dir) == 0)
420 			goto free_table;
421 	} else if (dobj->linear) {
422 		/* Single contiguous physical region - no struct page */
423 		if (sg_alloc_table(sgt, 1, GFP_KERNEL))
424 			goto free_sgt;
425 		sg_dma_address(sgt->sgl) = dobj->dev_addr;
426 		sg_dma_len(sgt->sgl) = dobj->obj.size;
427 	} else {
428 		goto free_sgt;
429 	}
430 	return sgt;
431 
432  release:
433 	for_each_sg(sgt->sgl, sg, num, i)
434 		put_page(sg_page(sg));
435  free_table:
436 	sg_free_table(sgt);
437  free_sgt:
438 	kfree(sgt);
439 	return NULL;
440 }
441 
442 static void armada_gem_prime_unmap_dma_buf(struct dma_buf_attachment *attach,
443 	struct sg_table *sgt, enum dma_data_direction dir)
444 {
445 	struct drm_gem_object *obj = attach->dmabuf->priv;
446 	struct armada_gem_object *dobj = drm_to_armada_gem(obj);
447 	int i;
448 
449 	if (!dobj->linear)
450 		dma_unmap_sg(attach->dev, sgt->sgl, sgt->nents, dir);
451 
452 	if (dobj->obj.filp) {
453 		struct scatterlist *sg;
454 		for_each_sg(sgt->sgl, sg, sgt->nents, i)
455 			put_page(sg_page(sg));
456 	}
457 
458 	sg_free_table(sgt);
459 	kfree(sgt);
460 }
461 
462 static void *armada_gem_dmabuf_no_kmap(struct dma_buf *buf, unsigned long n)
463 {
464 	return NULL;
465 }
466 
467 static void
468 armada_gem_dmabuf_no_kunmap(struct dma_buf *buf, unsigned long n, void *addr)
469 {
470 }
471 
472 static int
473 armada_gem_dmabuf_mmap(struct dma_buf *buf, struct vm_area_struct *vma)
474 {
475 	return -EINVAL;
476 }
477 
478 static const struct dma_buf_ops armada_gem_prime_dmabuf_ops = {
479 	.map_dma_buf	= armada_gem_prime_map_dma_buf,
480 	.unmap_dma_buf	= armada_gem_prime_unmap_dma_buf,
481 	.release	= drm_gem_dmabuf_release,
482 	.map		= armada_gem_dmabuf_no_kmap,
483 	.unmap		= armada_gem_dmabuf_no_kunmap,
484 	.mmap		= armada_gem_dmabuf_mmap,
485 };
486 
487 struct dma_buf *
488 armada_gem_prime_export(struct drm_device *dev, struct drm_gem_object *obj,
489 	int flags)
490 {
491 	DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
492 
493 	exp_info.ops = &armada_gem_prime_dmabuf_ops;
494 	exp_info.size = obj->size;
495 	exp_info.flags = O_RDWR;
496 	exp_info.priv = obj;
497 
498 	return drm_gem_dmabuf_export(dev, &exp_info);
499 }
500 
501 struct drm_gem_object *
502 armada_gem_prime_import(struct drm_device *dev, struct dma_buf *buf)
503 {
504 	struct dma_buf_attachment *attach;
505 	struct armada_gem_object *dobj;
506 
507 	if (buf->ops == &armada_gem_prime_dmabuf_ops) {
508 		struct drm_gem_object *obj = buf->priv;
509 		if (obj->dev == dev) {
510 			/*
511 			 * Importing our own dmabuf(s) increases the
512 			 * refcount on the gem object itself.
513 			 */
514 			drm_gem_object_get(obj);
515 			return obj;
516 		}
517 	}
518 
519 	attach = dma_buf_attach(buf, dev->dev);
520 	if (IS_ERR(attach))
521 		return ERR_CAST(attach);
522 
523 	dobj = armada_gem_alloc_private_object(dev, buf->size);
524 	if (!dobj) {
525 		dma_buf_detach(buf, attach);
526 		return ERR_PTR(-ENOMEM);
527 	}
528 
529 	dobj->obj.import_attach = attach;
530 	get_dma_buf(buf);
531 
532 	/*
533 	 * Don't call dma_buf_map_attachment() here - it maps the
534 	 * scatterlist immediately for DMA, and this is not always
535 	 * an appropriate thing to do.
536 	 */
537 	return &dobj->obj;
538 }
539 
540 int armada_gem_map_import(struct armada_gem_object *dobj)
541 {
542 	int ret;
543 
544 	dobj->sgt = dma_buf_map_attachment(dobj->obj.import_attach,
545 					   DMA_TO_DEVICE);
546 	if (IS_ERR(dobj->sgt)) {
547 		ret = PTR_ERR(dobj->sgt);
548 		dobj->sgt = NULL;
549 		DRM_ERROR("dma_buf_map_attachment() error: %d\n", ret);
550 		return ret;
551 	}
552 	if (dobj->sgt->nents > 1) {
553 		DRM_ERROR("dma_buf_map_attachment() returned an (unsupported) scattered list\n");
554 		return -EINVAL;
555 	}
556 	if (sg_dma_len(dobj->sgt->sgl) < dobj->obj.size) {
557 		DRM_ERROR("dma_buf_map_attachment() returned a small buffer\n");
558 		return -EINVAL;
559 	}
560 	dobj->dev_addr = sg_dma_address(dobj->sgt->sgl);
561 	dobj->mapped = true;
562 	return 0;
563 }
564