1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Copyright (C) 2012 Russell King
4 */
5
6 #include <linux/dma-buf.h>
7 #include <linux/dma-mapping.h>
8 #include <linux/mman.h>
9 #include <linux/shmem_fs.h>
10
11 #include <drm/armada_drm.h>
12 #include <drm/drm_prime.h>
13 #include <drm/drm_print.h>
14
15 #include "armada_drm.h"
16 #include "armada_gem.h"
17 #include "armada_ioctlP.h"
18
19 MODULE_IMPORT_NS("DMA_BUF");
20
armada_gem_vm_fault(struct vm_fault * vmf)21 static vm_fault_t armada_gem_vm_fault(struct vm_fault *vmf)
22 {
23 struct drm_gem_object *gobj = vmf->vma->vm_private_data;
24 struct armada_gem_object *obj = drm_to_armada_gem(gobj);
25 unsigned long pfn = obj->phys_addr >> PAGE_SHIFT;
26
27 pfn += (vmf->address - vmf->vma->vm_start) >> PAGE_SHIFT;
28 return vmf_insert_pfn(vmf->vma, vmf->address, pfn);
29 }
30
31 static const struct vm_operations_struct armada_gem_vm_ops = {
32 .fault = armada_gem_vm_fault,
33 .open = drm_gem_vm_open,
34 .close = drm_gem_vm_close,
35 };
36
roundup_gem_size(size_t size)37 static size_t roundup_gem_size(size_t size)
38 {
39 return roundup(size, PAGE_SIZE);
40 }
41
armada_gem_free_object(struct drm_gem_object * obj)42 void armada_gem_free_object(struct drm_gem_object *obj)
43 {
44 struct armada_gem_object *dobj = drm_to_armada_gem(obj);
45 struct armada_private *priv = drm_to_armada_dev(obj->dev);
46
47 DRM_DEBUG_DRIVER("release obj %p\n", dobj);
48
49 drm_gem_free_mmap_offset(&dobj->obj);
50
51 might_lock(&priv->linear_lock);
52
53 if (dobj->page) {
54 /* page backed memory */
55 unsigned int order = get_order(dobj->obj.size);
56 __free_pages(dobj->page, order);
57 } else if (dobj->linear) {
58 /* linear backed memory */
59 mutex_lock(&priv->linear_lock);
60 drm_mm_remove_node(dobj->linear);
61 mutex_unlock(&priv->linear_lock);
62 kfree(dobj->linear);
63 if (dobj->addr)
64 iounmap(dobj->addr);
65 }
66
67 if (dobj->obj.import_attach) {
68 /* We only ever display imported data */
69 if (dobj->sgt)
70 dma_buf_unmap_attachment_unlocked(dobj->obj.import_attach,
71 dobj->sgt, DMA_TO_DEVICE);
72 drm_prime_gem_destroy(&dobj->obj, NULL);
73 }
74
75 drm_gem_object_release(&dobj->obj);
76
77 kfree(dobj);
78 }
79
80 int
armada_gem_linear_back(struct drm_device * dev,struct armada_gem_object * obj)81 armada_gem_linear_back(struct drm_device *dev, struct armada_gem_object *obj)
82 {
83 struct armada_private *priv = drm_to_armada_dev(dev);
84 size_t size = obj->obj.size;
85
86 if (obj->page || obj->linear)
87 return 0;
88
89 /*
90 * If it is a small allocation (typically cursor, which will
91 * be 32x64 or 64x32 ARGB pixels) try to get it from the system.
92 * Framebuffers will never be this small (our minimum size for
93 * framebuffers is larger than this anyway.) Such objects are
94 * only accessed by the CPU so we don't need any special handing
95 * here.
96 */
97 if (size <= 8192) {
98 unsigned int order = get_order(size);
99 struct page *p = alloc_pages(GFP_KERNEL, order);
100
101 if (p) {
102 obj->addr = page_address(p);
103 obj->phys_addr = page_to_phys(p);
104 obj->page = p;
105
106 memset(obj->addr, 0, PAGE_ALIGN(size));
107 }
108 }
109
110 /*
111 * We could grab something from DMA if it's enabled, but that
112 * involves building in a problem:
113 *
114 * GEM DMA helper interface uses dma_alloc_coherent(), which provides
115 * us with an CPU virtual address and a device address.
116 *
117 * The CPU virtual address may be either an address in the kernel
118 * direct mapped region (for example, as it would be on x86) or
119 * it may be remapped into another part of kernel memory space
120 * (eg, as it would be on ARM.) This means virt_to_phys() on the
121 * returned virtual address is invalid depending on the architecture
122 * implementation.
123 *
124 * The device address may also not be a physical address; it may
125 * be that there is some kind of remapping between the device and
126 * system RAM, which makes the use of the device address also
127 * unsafe to re-use as a physical address.
128 *
129 * This makes DRM usage of dma_alloc_coherent() in a generic way
130 * at best very questionable and unsafe.
131 */
132
133 /* Otherwise, grab it from our linear allocation */
134 if (!obj->page) {
135 struct drm_mm_node *node;
136 unsigned align = min_t(unsigned, size, SZ_2M);
137 void __iomem *ptr;
138 int ret;
139
140 node = kzalloc_obj(*node);
141 if (!node)
142 return -ENOSPC;
143
144 mutex_lock(&priv->linear_lock);
145 ret = drm_mm_insert_node_generic(&priv->linear, node,
146 size, align, 0, 0);
147 mutex_unlock(&priv->linear_lock);
148 if (ret) {
149 kfree(node);
150 return ret;
151 }
152
153 obj->linear = node;
154
155 /* Ensure that the memory we're returning is cleared. */
156 ptr = ioremap_wc(obj->linear->start, size);
157 if (!ptr) {
158 mutex_lock(&priv->linear_lock);
159 drm_mm_remove_node(obj->linear);
160 mutex_unlock(&priv->linear_lock);
161 kfree(obj->linear);
162 obj->linear = NULL;
163 return -ENOMEM;
164 }
165
166 memset_io(ptr, 0, size);
167 iounmap(ptr);
168
169 obj->phys_addr = obj->linear->start;
170 obj->dev_addr = obj->linear->start;
171 obj->mapped = true;
172 }
173
174 DRM_DEBUG_DRIVER("obj %p phys %#llx dev %#llx\n", obj,
175 (unsigned long long)obj->phys_addr,
176 (unsigned long long)obj->dev_addr);
177
178 return 0;
179 }
180
181 void *
armada_gem_map_object(struct drm_device * dev,struct armada_gem_object * dobj)182 armada_gem_map_object(struct drm_device *dev, struct armada_gem_object *dobj)
183 {
184 /* only linear objects need to be ioremap'd */
185 if (!dobj->addr && dobj->linear)
186 dobj->addr = ioremap_wc(dobj->phys_addr, dobj->obj.size);
187 return dobj->addr;
188 }
189
190 static const struct drm_gem_object_funcs armada_gem_object_funcs = {
191 .free = armada_gem_free_object,
192 .export = armada_gem_prime_export,
193 .vm_ops = &armada_gem_vm_ops,
194 };
195
196 struct armada_gem_object *
armada_gem_alloc_private_object(struct drm_device * dev,size_t size)197 armada_gem_alloc_private_object(struct drm_device *dev, size_t size)
198 {
199 struct armada_gem_object *obj;
200
201 size = roundup_gem_size(size);
202
203 obj = kzalloc_obj(*obj);
204 if (!obj)
205 return NULL;
206
207 obj->obj.funcs = &armada_gem_object_funcs;
208
209 drm_gem_private_object_init(dev, &obj->obj, size);
210
211 DRM_DEBUG_DRIVER("alloc private obj %p size %zu\n", obj, size);
212
213 return obj;
214 }
215
armada_gem_alloc_object(struct drm_device * dev,size_t size)216 static struct armada_gem_object *armada_gem_alloc_object(struct drm_device *dev,
217 size_t size)
218 {
219 struct armada_gem_object *obj;
220 struct address_space *mapping;
221
222 size = roundup_gem_size(size);
223
224 obj = kzalloc_obj(*obj);
225 if (!obj)
226 return NULL;
227
228 obj->obj.funcs = &armada_gem_object_funcs;
229
230 if (drm_gem_object_init(dev, &obj->obj, size)) {
231 kfree(obj);
232 return NULL;
233 }
234
235 mapping = obj->obj.filp->f_mapping;
236 mapping_set_gfp_mask(mapping, GFP_HIGHUSER | __GFP_RECLAIMABLE);
237
238 DRM_DEBUG_DRIVER("alloc obj %p size %zu\n", obj, size);
239
240 return obj;
241 }
242
243 /* Dumb alloc support */
armada_gem_dumb_create(struct drm_file * file,struct drm_device * dev,struct drm_mode_create_dumb * args)244 int armada_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
245 struct drm_mode_create_dumb *args)
246 {
247 struct armada_gem_object *dobj;
248 u32 handle;
249 size_t size;
250 int ret;
251
252 args->pitch = armada_pitch(args->width, args->bpp);
253 args->size = size = args->pitch * args->height;
254
255 dobj = armada_gem_alloc_private_object(dev, size);
256 if (dobj == NULL)
257 return -ENOMEM;
258
259 ret = armada_gem_linear_back(dev, dobj);
260 if (ret)
261 goto err;
262
263 ret = drm_gem_handle_create(file, &dobj->obj, &handle);
264 if (ret)
265 goto err;
266
267 args->handle = handle;
268
269 /* drop reference from allocate - handle holds it now */
270 DRM_DEBUG_DRIVER("obj %p size %zu handle %#x\n", dobj, size, handle);
271 err:
272 drm_gem_object_put(&dobj->obj);
273 return ret;
274 }
275
276 /* Private driver gem ioctls */
armada_gem_create_ioctl(struct drm_device * dev,void * data,struct drm_file * file)277 int armada_gem_create_ioctl(struct drm_device *dev, void *data,
278 struct drm_file *file)
279 {
280 struct drm_armada_gem_create *args = data;
281 struct armada_gem_object *dobj;
282 size_t size;
283 u32 handle;
284 int ret;
285
286 if (args->size == 0)
287 return -ENOMEM;
288
289 size = args->size;
290
291 dobj = armada_gem_alloc_object(dev, size);
292 if (dobj == NULL)
293 return -ENOMEM;
294
295 ret = drm_gem_handle_create(file, &dobj->obj, &handle);
296 if (ret)
297 goto err;
298
299 args->handle = handle;
300
301 /* drop reference from allocate - handle holds it now */
302 DRM_DEBUG_DRIVER("obj %p size %zu handle %#x\n", dobj, size, handle);
303 err:
304 drm_gem_object_put(&dobj->obj);
305 return ret;
306 }
307
308 /* Map a shmem-backed object into process memory space */
armada_gem_mmap_ioctl(struct drm_device * dev,void * data,struct drm_file * file)309 int armada_gem_mmap_ioctl(struct drm_device *dev, void *data,
310 struct drm_file *file)
311 {
312 struct drm_armada_gem_mmap *args = data;
313 struct armada_gem_object *dobj;
314 unsigned long addr;
315
316 dobj = armada_gem_object_lookup(file, args->handle);
317 if (dobj == NULL)
318 return -ENOENT;
319
320 if (!dobj->obj.filp) {
321 drm_gem_object_put(&dobj->obj);
322 return -EINVAL;
323 }
324
325 addr = vm_mmap(dobj->obj.filp, 0, args->size, PROT_READ | PROT_WRITE,
326 MAP_SHARED, args->offset);
327 drm_gem_object_put(&dobj->obj);
328 if (IS_ERR_VALUE(addr))
329 return addr;
330
331 args->addr = addr;
332
333 return 0;
334 }
335
armada_gem_pwrite_ioctl(struct drm_device * dev,void * data,struct drm_file * file)336 int armada_gem_pwrite_ioctl(struct drm_device *dev, void *data,
337 struct drm_file *file)
338 {
339 struct drm_armada_gem_pwrite *args = data;
340 struct armada_gem_object *dobj;
341 char __user *ptr;
342 int ret = 0;
343
344 DRM_DEBUG_DRIVER("handle %u off %u size %u ptr 0x%llx\n",
345 args->handle, args->offset, args->size, args->ptr);
346
347 if (args->size == 0)
348 return 0;
349
350 ptr = (char __user *)(uintptr_t)args->ptr;
351
352 if (!access_ok(ptr, args->size))
353 return -EFAULT;
354
355 if (fault_in_readable(ptr, args->size))
356 return -EFAULT;
357
358 dobj = armada_gem_object_lookup(file, args->handle);
359 if (dobj == NULL)
360 return -ENOENT;
361
362 /* Must be a kernel-mapped object */
363 if (!dobj->addr)
364 return -EINVAL;
365
366 if (args->offset > dobj->obj.size ||
367 args->size > dobj->obj.size - args->offset) {
368 DRM_ERROR("invalid size: object size %u\n", dobj->obj.size);
369 ret = -EINVAL;
370 goto unref;
371 }
372
373 if (copy_from_user(dobj->addr + args->offset, ptr, args->size)) {
374 ret = -EFAULT;
375 } else if (dobj->update) {
376 dobj->update(dobj->update_data);
377 ret = 0;
378 }
379
380 unref:
381 drm_gem_object_put(&dobj->obj);
382 return ret;
383 }
384
385 /* Prime support */
386 static struct sg_table *
armada_gem_prime_map_dma_buf(struct dma_buf_attachment * attach,enum dma_data_direction dir)387 armada_gem_prime_map_dma_buf(struct dma_buf_attachment *attach,
388 enum dma_data_direction dir)
389 {
390 struct drm_gem_object *obj = attach->dmabuf->priv;
391 struct armada_gem_object *dobj = drm_to_armada_gem(obj);
392 struct scatterlist *sg;
393 struct sg_table *sgt;
394 int i;
395
396 sgt = kmalloc_obj(*sgt);
397 if (!sgt)
398 return NULL;
399
400 if (dobj->obj.filp) {
401 struct address_space *mapping;
402 int count;
403
404 count = dobj->obj.size / PAGE_SIZE;
405 if (sg_alloc_table(sgt, count, GFP_KERNEL))
406 goto free_sgt;
407
408 mapping = dobj->obj.filp->f_mapping;
409
410 for_each_sgtable_sg(sgt, sg, i) {
411 struct page *page;
412
413 page = shmem_read_mapping_page(mapping, i);
414 if (IS_ERR(page))
415 goto release;
416
417 sg_set_page(sg, page, PAGE_SIZE, 0);
418 }
419
420 if (dma_map_sgtable(attach->dev, sgt, dir, 0))
421 goto release;
422 } else if (dobj->page) {
423 /* Single contiguous page */
424 if (sg_alloc_table(sgt, 1, GFP_KERNEL))
425 goto free_sgt;
426
427 sg_set_page(sgt->sgl, dobj->page, dobj->obj.size, 0);
428
429 if (dma_map_sgtable(attach->dev, sgt, dir, 0))
430 goto free_table;
431 } else if (dobj->linear) {
432 /* Single contiguous physical region - no struct page */
433 if (sg_alloc_table(sgt, 1, GFP_KERNEL))
434 goto free_sgt;
435 sg_dma_address(sgt->sgl) = dobj->dev_addr;
436 sg_dma_len(sgt->sgl) = dobj->obj.size;
437 } else {
438 goto free_sgt;
439 }
440 return sgt;
441
442 release:
443 for_each_sgtable_sg(sgt, sg, i)
444 if (sg_page(sg))
445 put_page(sg_page(sg));
446 free_table:
447 sg_free_table(sgt);
448 free_sgt:
449 kfree(sgt);
450 return NULL;
451 }
452
armada_gem_prime_unmap_dma_buf(struct dma_buf_attachment * attach,struct sg_table * sgt,enum dma_data_direction dir)453 static void armada_gem_prime_unmap_dma_buf(struct dma_buf_attachment *attach,
454 struct sg_table *sgt, enum dma_data_direction dir)
455 {
456 struct drm_gem_object *obj = attach->dmabuf->priv;
457 struct armada_gem_object *dobj = drm_to_armada_gem(obj);
458 int i;
459
460 if (!dobj->linear)
461 dma_unmap_sgtable(attach->dev, sgt, dir, 0);
462
463 if (dobj->obj.filp) {
464 struct scatterlist *sg;
465
466 for_each_sgtable_sg(sgt, sg, i)
467 put_page(sg_page(sg));
468 }
469
470 sg_free_table(sgt);
471 kfree(sgt);
472 }
473
474 static int
armada_gem_dmabuf_mmap(struct dma_buf * buf,struct vm_area_struct * vma)475 armada_gem_dmabuf_mmap(struct dma_buf *buf, struct vm_area_struct *vma)
476 {
477 return -EINVAL;
478 }
479
480 static const struct dma_buf_ops armada_gem_prime_dmabuf_ops = {
481 .map_dma_buf = armada_gem_prime_map_dma_buf,
482 .unmap_dma_buf = armada_gem_prime_unmap_dma_buf,
483 .release = drm_gem_dmabuf_release,
484 .mmap = armada_gem_dmabuf_mmap,
485 };
486
487 struct dma_buf *
armada_gem_prime_export(struct drm_gem_object * obj,int flags)488 armada_gem_prime_export(struct drm_gem_object *obj, int flags)
489 {
490 DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
491
492 exp_info.ops = &armada_gem_prime_dmabuf_ops;
493 exp_info.size = obj->size;
494 exp_info.flags = O_RDWR;
495 exp_info.priv = obj;
496
497 return drm_gem_dmabuf_export(obj->dev, &exp_info);
498 }
499
500 struct drm_gem_object *
armada_gem_prime_import(struct drm_device * dev,struct dma_buf * buf)501 armada_gem_prime_import(struct drm_device *dev, struct dma_buf *buf)
502 {
503 struct dma_buf_attachment *attach;
504 struct armada_gem_object *dobj;
505
506 if (buf->ops == &armada_gem_prime_dmabuf_ops) {
507 struct drm_gem_object *obj = buf->priv;
508 if (obj->dev == dev) {
509 /*
510 * Importing our own dmabuf(s) increases the
511 * refcount on the gem object itself.
512 */
513 drm_gem_object_get(obj);
514 return obj;
515 }
516 }
517
518 attach = dma_buf_attach(buf, dev->dev);
519 if (IS_ERR(attach))
520 return ERR_CAST(attach);
521
522 dobj = armada_gem_alloc_private_object(dev, buf->size);
523 if (!dobj) {
524 dma_buf_detach(buf, attach);
525 return ERR_PTR(-ENOMEM);
526 }
527
528 dobj->obj.import_attach = attach;
529 get_dma_buf(buf);
530
531 /*
532 * Don't call dma_buf_map_attachment() here - it maps the
533 * scatterlist immediately for DMA, and this is not always
534 * an appropriate thing to do.
535 */
536 return &dobj->obj;
537 }
538
armada_gem_map_import(struct armada_gem_object * dobj)539 int armada_gem_map_import(struct armada_gem_object *dobj)
540 {
541 int ret;
542
543 dobj->sgt = dma_buf_map_attachment_unlocked(dobj->obj.import_attach,
544 DMA_TO_DEVICE);
545 if (IS_ERR(dobj->sgt)) {
546 ret = PTR_ERR(dobj->sgt);
547 dobj->sgt = NULL;
548 DRM_ERROR("dma_buf_map_attachment() error: %d\n", ret);
549 return ret;
550 }
551 if (dobj->sgt->nents > 1) {
552 DRM_ERROR("dma_buf_map_attachment() returned an (unsupported) scattered list\n");
553 return -EINVAL;
554 }
555 if (sg_dma_len(dobj->sgt->sgl) < dobj->obj.size) {
556 DRM_ERROR("dma_buf_map_attachment() returned a small buffer\n");
557 return -EINVAL;
558 }
559 dobj->dev_addr = sg_dma_address(dobj->sgt->sgl);
560 dobj->mapped = true;
561 return 0;
562 }
563