xref: /linux/drivers/gpu/drm/armada/armada_gem.c (revision 1517d90cfafe0f95fd7863d04e1596f7beb7dfa8)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) 2012 Russell King
4  */
5 
6 #include <linux/dma-buf.h>
7 #include <linux/dma-mapping.h>
8 #include <linux/mman.h>
9 #include <linux/shmem_fs.h>
10 
11 #include <drm/armada_drm.h>
12 #include <drm/drm_prime.h>
13 
14 #include "armada_drm.h"
15 #include "armada_gem.h"
16 #include "armada_ioctlP.h"
17 
18 static vm_fault_t armada_gem_vm_fault(struct vm_fault *vmf)
19 {
20 	struct drm_gem_object *gobj = vmf->vma->vm_private_data;
21 	struct armada_gem_object *obj = drm_to_armada_gem(gobj);
22 	unsigned long pfn = obj->phys_addr >> PAGE_SHIFT;
23 
24 	pfn += (vmf->address - vmf->vma->vm_start) >> PAGE_SHIFT;
25 	return vmf_insert_pfn(vmf->vma, vmf->address, pfn);
26 }
27 
28 const struct vm_operations_struct armada_gem_vm_ops = {
29 	.fault	= armada_gem_vm_fault,
30 	.open	= drm_gem_vm_open,
31 	.close	= drm_gem_vm_close,
32 };
33 
34 static size_t roundup_gem_size(size_t size)
35 {
36 	return roundup(size, PAGE_SIZE);
37 }
38 
39 void armada_gem_free_object(struct drm_gem_object *obj)
40 {
41 	struct armada_gem_object *dobj = drm_to_armada_gem(obj);
42 	struct armada_private *priv = obj->dev->dev_private;
43 
44 	DRM_DEBUG_DRIVER("release obj %p\n", dobj);
45 
46 	drm_gem_free_mmap_offset(&dobj->obj);
47 
48 	might_lock(&priv->linear_lock);
49 
50 	if (dobj->page) {
51 		/* page backed memory */
52 		unsigned int order = get_order(dobj->obj.size);
53 		__free_pages(dobj->page, order);
54 	} else if (dobj->linear) {
55 		/* linear backed memory */
56 		mutex_lock(&priv->linear_lock);
57 		drm_mm_remove_node(dobj->linear);
58 		mutex_unlock(&priv->linear_lock);
59 		kfree(dobj->linear);
60 		if (dobj->addr)
61 			iounmap(dobj->addr);
62 	}
63 
64 	if (dobj->obj.import_attach) {
65 		/* We only ever display imported data */
66 		if (dobj->sgt)
67 			dma_buf_unmap_attachment(dobj->obj.import_attach,
68 						 dobj->sgt, DMA_TO_DEVICE);
69 		drm_prime_gem_destroy(&dobj->obj, NULL);
70 	}
71 
72 	drm_gem_object_release(&dobj->obj);
73 
74 	kfree(dobj);
75 }
76 
77 int
78 armada_gem_linear_back(struct drm_device *dev, struct armada_gem_object *obj)
79 {
80 	struct armada_private *priv = dev->dev_private;
81 	size_t size = obj->obj.size;
82 
83 	if (obj->page || obj->linear)
84 		return 0;
85 
86 	/*
87 	 * If it is a small allocation (typically cursor, which will
88 	 * be 32x64 or 64x32 ARGB pixels) try to get it from the system.
89 	 * Framebuffers will never be this small (our minimum size for
90 	 * framebuffers is larger than this anyway.)  Such objects are
91 	 * only accessed by the CPU so we don't need any special handing
92 	 * here.
93 	 */
94 	if (size <= 8192) {
95 		unsigned int order = get_order(size);
96 		struct page *p = alloc_pages(GFP_KERNEL, order);
97 
98 		if (p) {
99 			obj->addr = page_address(p);
100 			obj->phys_addr = page_to_phys(p);
101 			obj->page = p;
102 
103 			memset(obj->addr, 0, PAGE_ALIGN(size));
104 		}
105 	}
106 
107 	/*
108 	 * We could grab something from CMA if it's enabled, but that
109 	 * involves building in a problem:
110 	 *
111 	 * CMA's interface uses dma_alloc_coherent(), which provides us
112 	 * with an CPU virtual address and a device address.
113 	 *
114 	 * The CPU virtual address may be either an address in the kernel
115 	 * direct mapped region (for example, as it would be on x86) or
116 	 * it may be remapped into another part of kernel memory space
117 	 * (eg, as it would be on ARM.)  This means virt_to_phys() on the
118 	 * returned virtual address is invalid depending on the architecture
119 	 * implementation.
120 	 *
121 	 * The device address may also not be a physical address; it may
122 	 * be that there is some kind of remapping between the device and
123 	 * system RAM, which makes the use of the device address also
124 	 * unsafe to re-use as a physical address.
125 	 *
126 	 * This makes DRM usage of dma_alloc_coherent() in a generic way
127 	 * at best very questionable and unsafe.
128 	 */
129 
130 	/* Otherwise, grab it from our linear allocation */
131 	if (!obj->page) {
132 		struct drm_mm_node *node;
133 		unsigned align = min_t(unsigned, size, SZ_2M);
134 		void __iomem *ptr;
135 		int ret;
136 
137 		node = kzalloc(sizeof(*node), GFP_KERNEL);
138 		if (!node)
139 			return -ENOSPC;
140 
141 		mutex_lock(&priv->linear_lock);
142 		ret = drm_mm_insert_node_generic(&priv->linear, node,
143 						 size, align, 0, 0);
144 		mutex_unlock(&priv->linear_lock);
145 		if (ret) {
146 			kfree(node);
147 			return ret;
148 		}
149 
150 		obj->linear = node;
151 
152 		/* Ensure that the memory we're returning is cleared. */
153 		ptr = ioremap_wc(obj->linear->start, size);
154 		if (!ptr) {
155 			mutex_lock(&priv->linear_lock);
156 			drm_mm_remove_node(obj->linear);
157 			mutex_unlock(&priv->linear_lock);
158 			kfree(obj->linear);
159 			obj->linear = NULL;
160 			return -ENOMEM;
161 		}
162 
163 		memset_io(ptr, 0, size);
164 		iounmap(ptr);
165 
166 		obj->phys_addr = obj->linear->start;
167 		obj->dev_addr = obj->linear->start;
168 		obj->mapped = true;
169 	}
170 
171 	DRM_DEBUG_DRIVER("obj %p phys %#llx dev %#llx\n", obj,
172 			 (unsigned long long)obj->phys_addr,
173 			 (unsigned long long)obj->dev_addr);
174 
175 	return 0;
176 }
177 
178 void *
179 armada_gem_map_object(struct drm_device *dev, struct armada_gem_object *dobj)
180 {
181 	/* only linear objects need to be ioremap'd */
182 	if (!dobj->addr && dobj->linear)
183 		dobj->addr = ioremap_wc(dobj->phys_addr, dobj->obj.size);
184 	return dobj->addr;
185 }
186 
187 struct armada_gem_object *
188 armada_gem_alloc_private_object(struct drm_device *dev, size_t size)
189 {
190 	struct armada_gem_object *obj;
191 
192 	size = roundup_gem_size(size);
193 
194 	obj = kzalloc(sizeof(*obj), GFP_KERNEL);
195 	if (!obj)
196 		return NULL;
197 
198 	drm_gem_private_object_init(dev, &obj->obj, size);
199 
200 	DRM_DEBUG_DRIVER("alloc private obj %p size %zu\n", obj, size);
201 
202 	return obj;
203 }
204 
205 static struct armada_gem_object *armada_gem_alloc_object(struct drm_device *dev,
206 	size_t size)
207 {
208 	struct armada_gem_object *obj;
209 	struct address_space *mapping;
210 
211 	size = roundup_gem_size(size);
212 
213 	obj = kzalloc(sizeof(*obj), GFP_KERNEL);
214 	if (!obj)
215 		return NULL;
216 
217 	if (drm_gem_object_init(dev, &obj->obj, size)) {
218 		kfree(obj);
219 		return NULL;
220 	}
221 
222 	mapping = obj->obj.filp->f_mapping;
223 	mapping_set_gfp_mask(mapping, GFP_HIGHUSER | __GFP_RECLAIMABLE);
224 
225 	DRM_DEBUG_DRIVER("alloc obj %p size %zu\n", obj, size);
226 
227 	return obj;
228 }
229 
230 /* Dumb alloc support */
231 int armada_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
232 	struct drm_mode_create_dumb *args)
233 {
234 	struct armada_gem_object *dobj;
235 	u32 handle;
236 	size_t size;
237 	int ret;
238 
239 	args->pitch = armada_pitch(args->width, args->bpp);
240 	args->size = size = args->pitch * args->height;
241 
242 	dobj = armada_gem_alloc_private_object(dev, size);
243 	if (dobj == NULL)
244 		return -ENOMEM;
245 
246 	ret = armada_gem_linear_back(dev, dobj);
247 	if (ret)
248 		goto err;
249 
250 	ret = drm_gem_handle_create(file, &dobj->obj, &handle);
251 	if (ret)
252 		goto err;
253 
254 	args->handle = handle;
255 
256 	/* drop reference from allocate - handle holds it now */
257 	DRM_DEBUG_DRIVER("obj %p size %zu handle %#x\n", dobj, size, handle);
258  err:
259 	drm_gem_object_put_unlocked(&dobj->obj);
260 	return ret;
261 }
262 
263 /* Private driver gem ioctls */
264 int armada_gem_create_ioctl(struct drm_device *dev, void *data,
265 	struct drm_file *file)
266 {
267 	struct drm_armada_gem_create *args = data;
268 	struct armada_gem_object *dobj;
269 	size_t size;
270 	u32 handle;
271 	int ret;
272 
273 	if (args->size == 0)
274 		return -ENOMEM;
275 
276 	size = args->size;
277 
278 	dobj = armada_gem_alloc_object(dev, size);
279 	if (dobj == NULL)
280 		return -ENOMEM;
281 
282 	ret = drm_gem_handle_create(file, &dobj->obj, &handle);
283 	if (ret)
284 		goto err;
285 
286 	args->handle = handle;
287 
288 	/* drop reference from allocate - handle holds it now */
289 	DRM_DEBUG_DRIVER("obj %p size %zu handle %#x\n", dobj, size, handle);
290  err:
291 	drm_gem_object_put_unlocked(&dobj->obj);
292 	return ret;
293 }
294 
295 /* Map a shmem-backed object into process memory space */
296 int armada_gem_mmap_ioctl(struct drm_device *dev, void *data,
297 	struct drm_file *file)
298 {
299 	struct drm_armada_gem_mmap *args = data;
300 	struct armada_gem_object *dobj;
301 	unsigned long addr;
302 
303 	dobj = armada_gem_object_lookup(file, args->handle);
304 	if (dobj == NULL)
305 		return -ENOENT;
306 
307 	if (!dobj->obj.filp) {
308 		drm_gem_object_put_unlocked(&dobj->obj);
309 		return -EINVAL;
310 	}
311 
312 	addr = vm_mmap(dobj->obj.filp, 0, args->size, PROT_READ | PROT_WRITE,
313 		       MAP_SHARED, args->offset);
314 	drm_gem_object_put_unlocked(&dobj->obj);
315 	if (IS_ERR_VALUE(addr))
316 		return addr;
317 
318 	args->addr = addr;
319 
320 	return 0;
321 }
322 
323 int armada_gem_pwrite_ioctl(struct drm_device *dev, void *data,
324 	struct drm_file *file)
325 {
326 	struct drm_armada_gem_pwrite *args = data;
327 	struct armada_gem_object *dobj;
328 	char __user *ptr;
329 	int ret;
330 
331 	DRM_DEBUG_DRIVER("handle %u off %u size %u ptr 0x%llx\n",
332 		args->handle, args->offset, args->size, args->ptr);
333 
334 	if (args->size == 0)
335 		return 0;
336 
337 	ptr = (char __user *)(uintptr_t)args->ptr;
338 
339 	if (!access_ok(ptr, args->size))
340 		return -EFAULT;
341 
342 	ret = fault_in_pages_readable(ptr, args->size);
343 	if (ret)
344 		return ret;
345 
346 	dobj = armada_gem_object_lookup(file, args->handle);
347 	if (dobj == NULL)
348 		return -ENOENT;
349 
350 	/* Must be a kernel-mapped object */
351 	if (!dobj->addr)
352 		return -EINVAL;
353 
354 	if (args->offset > dobj->obj.size ||
355 	    args->size > dobj->obj.size - args->offset) {
356 		DRM_ERROR("invalid size: object size %u\n", dobj->obj.size);
357 		ret = -EINVAL;
358 		goto unref;
359 	}
360 
361 	if (copy_from_user(dobj->addr + args->offset, ptr, args->size)) {
362 		ret = -EFAULT;
363 	} else if (dobj->update) {
364 		dobj->update(dobj->update_data);
365 		ret = 0;
366 	}
367 
368  unref:
369 	drm_gem_object_put_unlocked(&dobj->obj);
370 	return ret;
371 }
372 
373 /* Prime support */
374 static struct sg_table *
375 armada_gem_prime_map_dma_buf(struct dma_buf_attachment *attach,
376 	enum dma_data_direction dir)
377 {
378 	struct drm_gem_object *obj = attach->dmabuf->priv;
379 	struct armada_gem_object *dobj = drm_to_armada_gem(obj);
380 	struct scatterlist *sg;
381 	struct sg_table *sgt;
382 	int i, num;
383 
384 	sgt = kmalloc(sizeof(*sgt), GFP_KERNEL);
385 	if (!sgt)
386 		return NULL;
387 
388 	if (dobj->obj.filp) {
389 		struct address_space *mapping;
390 		int count;
391 
392 		count = dobj->obj.size / PAGE_SIZE;
393 		if (sg_alloc_table(sgt, count, GFP_KERNEL))
394 			goto free_sgt;
395 
396 		mapping = dobj->obj.filp->f_mapping;
397 
398 		for_each_sg(sgt->sgl, sg, count, i) {
399 			struct page *page;
400 
401 			page = shmem_read_mapping_page(mapping, i);
402 			if (IS_ERR(page)) {
403 				num = i;
404 				goto release;
405 			}
406 
407 			sg_set_page(sg, page, PAGE_SIZE, 0);
408 		}
409 
410 		if (dma_map_sg(attach->dev, sgt->sgl, sgt->nents, dir) == 0) {
411 			num = sgt->nents;
412 			goto release;
413 		}
414 	} else if (dobj->page) {
415 		/* Single contiguous page */
416 		if (sg_alloc_table(sgt, 1, GFP_KERNEL))
417 			goto free_sgt;
418 
419 		sg_set_page(sgt->sgl, dobj->page, dobj->obj.size, 0);
420 
421 		if (dma_map_sg(attach->dev, sgt->sgl, sgt->nents, dir) == 0)
422 			goto free_table;
423 	} else if (dobj->linear) {
424 		/* Single contiguous physical region - no struct page */
425 		if (sg_alloc_table(sgt, 1, GFP_KERNEL))
426 			goto free_sgt;
427 		sg_dma_address(sgt->sgl) = dobj->dev_addr;
428 		sg_dma_len(sgt->sgl) = dobj->obj.size;
429 	} else {
430 		goto free_sgt;
431 	}
432 	return sgt;
433 
434  release:
435 	for_each_sg(sgt->sgl, sg, num, i)
436 		put_page(sg_page(sg));
437  free_table:
438 	sg_free_table(sgt);
439  free_sgt:
440 	kfree(sgt);
441 	return NULL;
442 }
443 
444 static void armada_gem_prime_unmap_dma_buf(struct dma_buf_attachment *attach,
445 	struct sg_table *sgt, enum dma_data_direction dir)
446 {
447 	struct drm_gem_object *obj = attach->dmabuf->priv;
448 	struct armada_gem_object *dobj = drm_to_armada_gem(obj);
449 	int i;
450 
451 	if (!dobj->linear)
452 		dma_unmap_sg(attach->dev, sgt->sgl, sgt->nents, dir);
453 
454 	if (dobj->obj.filp) {
455 		struct scatterlist *sg;
456 		for_each_sg(sgt->sgl, sg, sgt->nents, i)
457 			put_page(sg_page(sg));
458 	}
459 
460 	sg_free_table(sgt);
461 	kfree(sgt);
462 }
463 
464 static void *armada_gem_dmabuf_no_kmap(struct dma_buf *buf, unsigned long n)
465 {
466 	return NULL;
467 }
468 
469 static void
470 armada_gem_dmabuf_no_kunmap(struct dma_buf *buf, unsigned long n, void *addr)
471 {
472 }
473 
474 static int
475 armada_gem_dmabuf_mmap(struct dma_buf *buf, struct vm_area_struct *vma)
476 {
477 	return -EINVAL;
478 }
479 
480 static const struct dma_buf_ops armada_gem_prime_dmabuf_ops = {
481 	.map_dma_buf	= armada_gem_prime_map_dma_buf,
482 	.unmap_dma_buf	= armada_gem_prime_unmap_dma_buf,
483 	.release	= drm_gem_dmabuf_release,
484 	.map		= armada_gem_dmabuf_no_kmap,
485 	.unmap		= armada_gem_dmabuf_no_kunmap,
486 	.mmap		= armada_gem_dmabuf_mmap,
487 };
488 
489 struct dma_buf *
490 armada_gem_prime_export(struct drm_gem_object *obj, int flags)
491 {
492 	DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
493 
494 	exp_info.ops = &armada_gem_prime_dmabuf_ops;
495 	exp_info.size = obj->size;
496 	exp_info.flags = O_RDWR;
497 	exp_info.priv = obj;
498 
499 	return drm_gem_dmabuf_export(obj->dev, &exp_info);
500 }
501 
502 struct drm_gem_object *
503 armada_gem_prime_import(struct drm_device *dev, struct dma_buf *buf)
504 {
505 	struct dma_buf_attachment *attach;
506 	struct armada_gem_object *dobj;
507 
508 	if (buf->ops == &armada_gem_prime_dmabuf_ops) {
509 		struct drm_gem_object *obj = buf->priv;
510 		if (obj->dev == dev) {
511 			/*
512 			 * Importing our own dmabuf(s) increases the
513 			 * refcount on the gem object itself.
514 			 */
515 			drm_gem_object_get(obj);
516 			return obj;
517 		}
518 	}
519 
520 	attach = dma_buf_attach(buf, dev->dev);
521 	if (IS_ERR(attach))
522 		return ERR_CAST(attach);
523 
524 	dobj = armada_gem_alloc_private_object(dev, buf->size);
525 	if (!dobj) {
526 		dma_buf_detach(buf, attach);
527 		return ERR_PTR(-ENOMEM);
528 	}
529 
530 	dobj->obj.import_attach = attach;
531 	get_dma_buf(buf);
532 
533 	/*
534 	 * Don't call dma_buf_map_attachment() here - it maps the
535 	 * scatterlist immediately for DMA, and this is not always
536 	 * an appropriate thing to do.
537 	 */
538 	return &dobj->obj;
539 }
540 
541 int armada_gem_map_import(struct armada_gem_object *dobj)
542 {
543 	int ret;
544 
545 	dobj->sgt = dma_buf_map_attachment(dobj->obj.import_attach,
546 					   DMA_TO_DEVICE);
547 	if (IS_ERR(dobj->sgt)) {
548 		ret = PTR_ERR(dobj->sgt);
549 		dobj->sgt = NULL;
550 		DRM_ERROR("dma_buf_map_attachment() error: %d\n", ret);
551 		return ret;
552 	}
553 	if (dobj->sgt->nents > 1) {
554 		DRM_ERROR("dma_buf_map_attachment() returned an (unsupported) scattered list\n");
555 		return -EINVAL;
556 	}
557 	if (sg_dma_len(dobj->sgt->sgl) < dobj->obj.size) {
558 		DRM_ERROR("dma_buf_map_attachment() returned a small buffer\n");
559 		return -EINVAL;
560 	}
561 	dobj->dev_addr = sg_dma_address(dobj->sgt->sgl);
562 	dobj->mapped = true;
563 	return 0;
564 }
565