1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * drm gem DMA helper functions
4 *
5 * Copyright (C) 2012 Sascha Hauer, Pengutronix
6 *
7 * Based on Samsung Exynos code
8 *
9 * Copyright (c) 2011 Samsung Electronics Co., Ltd.
10 */
11
12 #include <linux/dma-buf.h>
13 #include <linux/dma-mapping.h>
14 #include <linux/export.h>
15 #include <linux/mm.h>
16 #include <linux/module.h>
17 #include <linux/mutex.h>
18 #include <linux/slab.h>
19
20 #include <drm/drm.h>
21 #include <drm/drm_device.h>
22 #include <drm/drm_drv.h>
23 #include <drm/drm_dumb_buffers.h>
24 #include <drm/drm_gem_dma_helper.h>
25 #include <drm/drm_print.h>
26 #include <drm/drm_vma_manager.h>
27
28 /**
29 * DOC: dma helpers
30 *
31 * The DRM GEM/DMA helpers are a means to provide buffer objects that are
32 * presented to the device as a contiguous chunk of memory. This is useful
33 * for devices that do not support scatter-gather DMA (either directly or
34 * by using an intimately attached IOMMU).
35 *
36 * For devices that access the memory bus through an (external) IOMMU then
37 * the buffer objects are allocated using a traditional page-based
38 * allocator and may be scattered through physical memory. However they
39 * are contiguous in the IOVA space so appear contiguous to devices using
40 * them.
41 *
42 * For other devices then the helpers rely on CMA to provide buffer
43 * objects that are physically contiguous in memory.
44 *
45 * For GEM callback helpers in struct &drm_gem_object functions, see likewise
46 * named functions with an _object_ infix (e.g., drm_gem_dma_object_vmap() wraps
47 * drm_gem_dma_vmap()). These helpers perform the necessary type conversion.
48 */
49
50 static const struct drm_gem_object_funcs drm_gem_dma_default_funcs = {
51 .free = drm_gem_dma_object_free,
52 .print_info = drm_gem_dma_object_print_info,
53 .get_sg_table = drm_gem_dma_object_get_sg_table,
54 .vmap = drm_gem_dma_object_vmap,
55 .mmap = drm_gem_dma_object_mmap,
56 .vm_ops = &drm_gem_dma_vm_ops,
57 };
58
59 /**
60 * __drm_gem_dma_create - Create a GEM DMA object without allocating memory
61 * @drm: DRM device
62 * @size: size of the object to allocate
63 * @private: true if used for internal purposes
64 *
65 * This function creates and initializes a GEM DMA object of the given size,
66 * but doesn't allocate any memory to back the object.
67 *
68 * Returns:
69 * A struct drm_gem_dma_object * on success or an ERR_PTR()-encoded negative
70 * error code on failure.
71 */
72 static struct drm_gem_dma_object *
__drm_gem_dma_create(struct drm_device * drm,size_t size,bool private)73 __drm_gem_dma_create(struct drm_device *drm, size_t size, bool private)
74 {
75 struct drm_gem_dma_object *dma_obj;
76 struct drm_gem_object *gem_obj;
77 int ret = 0;
78
79 if (drm->driver->gem_create_object) {
80 gem_obj = drm->driver->gem_create_object(drm, size);
81 if (IS_ERR(gem_obj))
82 return ERR_CAST(gem_obj);
83 dma_obj = to_drm_gem_dma_obj(gem_obj);
84 } else {
85 dma_obj = kzalloc(sizeof(*dma_obj), GFP_KERNEL);
86 if (!dma_obj)
87 return ERR_PTR(-ENOMEM);
88 gem_obj = &dma_obj->base;
89 }
90
91 if (!gem_obj->funcs)
92 gem_obj->funcs = &drm_gem_dma_default_funcs;
93
94 if (private) {
95 drm_gem_private_object_init(drm, gem_obj, size);
96
97 /* Always use writecombine for dma-buf mappings */
98 dma_obj->map_noncoherent = false;
99 } else {
100 ret = drm_gem_object_init(drm, gem_obj, size);
101 }
102 if (ret)
103 goto error;
104
105 ret = drm_gem_create_mmap_offset(gem_obj);
106 if (ret) {
107 drm_gem_object_release(gem_obj);
108 goto error;
109 }
110
111 return dma_obj;
112
113 error:
114 kfree(dma_obj);
115 return ERR_PTR(ret);
116 }
117
118 /**
119 * drm_gem_dma_create - allocate an object with the given size
120 * @drm: DRM device
121 * @size: size of the object to allocate
122 *
123 * This function creates a DMA GEM object and allocates memory as backing store.
124 * The allocated memory will occupy a contiguous chunk of bus address space.
125 *
126 * For devices that are directly connected to the memory bus then the allocated
127 * memory will be physically contiguous. For devices that access through an
128 * IOMMU, then the allocated memory is not expected to be physically contiguous
129 * because having contiguous IOVAs is sufficient to meet a devices DMA
130 * requirements.
131 *
132 * Returns:
133 * A struct drm_gem_dma_object * on success or an ERR_PTR()-encoded negative
134 * error code on failure.
135 */
drm_gem_dma_create(struct drm_device * drm,size_t size)136 struct drm_gem_dma_object *drm_gem_dma_create(struct drm_device *drm,
137 size_t size)
138 {
139 struct drm_gem_dma_object *dma_obj;
140 int ret;
141
142 size = round_up(size, PAGE_SIZE);
143
144 dma_obj = __drm_gem_dma_create(drm, size, false);
145 if (IS_ERR(dma_obj))
146 return dma_obj;
147
148 if (dma_obj->map_noncoherent) {
149 dma_obj->vaddr = dma_alloc_noncoherent(drm->dev, size,
150 &dma_obj->dma_addr,
151 DMA_TO_DEVICE,
152 GFP_KERNEL | __GFP_NOWARN);
153 } else {
154 dma_obj->vaddr = dma_alloc_wc(drm->dev, size,
155 &dma_obj->dma_addr,
156 GFP_KERNEL | __GFP_NOWARN);
157 }
158 if (!dma_obj->vaddr) {
159 drm_dbg(drm, "failed to allocate buffer with size %zu\n",
160 size);
161 ret = -ENOMEM;
162 goto error;
163 }
164
165 return dma_obj;
166
167 error:
168 drm_gem_object_put(&dma_obj->base);
169 return ERR_PTR(ret);
170 }
171 EXPORT_SYMBOL_GPL(drm_gem_dma_create);
172
173 /**
174 * drm_gem_dma_create_with_handle - allocate an object with the given size and
175 * return a GEM handle to it
176 * @file_priv: DRM file-private structure to register the handle for
177 * @drm: DRM device
178 * @size: size of the object to allocate
179 * @handle: return location for the GEM handle
180 *
181 * This function creates a DMA GEM object, allocating a chunk of memory as
182 * backing store. The GEM object is then added to the list of object associated
183 * with the given file and a handle to it is returned.
184 *
185 * The allocated memory will occupy a contiguous chunk of bus address space.
186 * See drm_gem_dma_create() for more details.
187 *
188 * Returns:
189 * A struct drm_gem_dma_object * on success or an ERR_PTR()-encoded negative
190 * error code on failure.
191 */
192 static struct drm_gem_dma_object *
drm_gem_dma_create_with_handle(struct drm_file * file_priv,struct drm_device * drm,size_t size,uint32_t * handle)193 drm_gem_dma_create_with_handle(struct drm_file *file_priv,
194 struct drm_device *drm, size_t size,
195 uint32_t *handle)
196 {
197 struct drm_gem_dma_object *dma_obj;
198 struct drm_gem_object *gem_obj;
199 int ret;
200
201 dma_obj = drm_gem_dma_create(drm, size);
202 if (IS_ERR(dma_obj))
203 return dma_obj;
204
205 gem_obj = &dma_obj->base;
206
207 /*
208 * allocate a id of idr table where the obj is registered
209 * and handle has the id what user can see.
210 */
211 ret = drm_gem_handle_create(file_priv, gem_obj, handle);
212 /* drop reference from allocate - handle holds it now. */
213 drm_gem_object_put(gem_obj);
214 if (ret)
215 return ERR_PTR(ret);
216
217 return dma_obj;
218 }
219
220 /**
221 * drm_gem_dma_free - free resources associated with a DMA GEM object
222 * @dma_obj: DMA GEM object to free
223 *
224 * This function frees the backing memory of the DMA GEM object, cleans up the
225 * GEM object state and frees the memory used to store the object itself.
226 * If the buffer is imported and the virtual address is set, it is released.
227 */
drm_gem_dma_free(struct drm_gem_dma_object * dma_obj)228 void drm_gem_dma_free(struct drm_gem_dma_object *dma_obj)
229 {
230 struct drm_gem_object *gem_obj = &dma_obj->base;
231 struct iosys_map map = IOSYS_MAP_INIT_VADDR(dma_obj->vaddr);
232
233 if (drm_gem_is_imported(gem_obj)) {
234 if (dma_obj->vaddr)
235 dma_buf_vunmap_unlocked(gem_obj->import_attach->dmabuf, &map);
236 drm_prime_gem_destroy(gem_obj, dma_obj->sgt);
237 } else if (dma_obj->vaddr) {
238 if (dma_obj->map_noncoherent)
239 dma_free_noncoherent(gem_obj->dev->dev, dma_obj->base.size,
240 dma_obj->vaddr, dma_obj->dma_addr,
241 DMA_TO_DEVICE);
242 else
243 dma_free_wc(gem_obj->dev->dev, dma_obj->base.size,
244 dma_obj->vaddr, dma_obj->dma_addr);
245 }
246
247 drm_gem_object_release(gem_obj);
248
249 kfree(dma_obj);
250 }
251 EXPORT_SYMBOL_GPL(drm_gem_dma_free);
252
253 /**
254 * drm_gem_dma_dumb_create_internal - create a dumb buffer object
255 * @file_priv: DRM file-private structure to create the dumb buffer for
256 * @drm: DRM device
257 * @args: IOCTL data
258 *
259 * This aligns the pitch and size arguments to the minimum required. This is
260 * an internal helper that can be wrapped by a driver to account for hardware
261 * with more specific alignment requirements. It should not be used directly
262 * as their &drm_driver.dumb_create callback.
263 *
264 * Returns:
265 * 0 on success or a negative error code on failure.
266 */
drm_gem_dma_dumb_create_internal(struct drm_file * file_priv,struct drm_device * drm,struct drm_mode_create_dumb * args)267 int drm_gem_dma_dumb_create_internal(struct drm_file *file_priv,
268 struct drm_device *drm,
269 struct drm_mode_create_dumb *args)
270 {
271 unsigned int min_pitch = DIV_ROUND_UP(args->width * args->bpp, 8);
272 struct drm_gem_dma_object *dma_obj;
273
274 if (args->pitch < min_pitch)
275 args->pitch = min_pitch;
276
277 if (args->size < args->pitch * args->height)
278 args->size = args->pitch * args->height;
279
280 dma_obj = drm_gem_dma_create_with_handle(file_priv, drm, args->size,
281 &args->handle);
282 return PTR_ERR_OR_ZERO(dma_obj);
283 }
284 EXPORT_SYMBOL_GPL(drm_gem_dma_dumb_create_internal);
285
286 /**
287 * drm_gem_dma_dumb_create - create a dumb buffer object
288 * @file_priv: DRM file-private structure to create the dumb buffer for
289 * @drm: DRM device
290 * @args: IOCTL data
291 *
292 * This function computes the pitch of the dumb buffer and rounds it up to an
293 * integer number of bytes per pixel. Drivers for hardware that doesn't have
294 * any additional restrictions on the pitch can directly use this function as
295 * their &drm_driver.dumb_create callback.
296 *
297 * For hardware with additional restrictions, drivers can adjust the fields
298 * set up by userspace and pass the IOCTL data along to the
299 * drm_gem_dma_dumb_create_internal() function.
300 *
301 * Returns:
302 * 0 on success or a negative error code on failure.
303 */
drm_gem_dma_dumb_create(struct drm_file * file_priv,struct drm_device * drm,struct drm_mode_create_dumb * args)304 int drm_gem_dma_dumb_create(struct drm_file *file_priv,
305 struct drm_device *drm,
306 struct drm_mode_create_dumb *args)
307 {
308 struct drm_gem_dma_object *dma_obj;
309 int ret;
310
311 ret = drm_mode_size_dumb(drm, args, 0, 0);
312 if (ret)
313 return ret;
314
315 dma_obj = drm_gem_dma_create_with_handle(file_priv, drm, args->size,
316 &args->handle);
317 return PTR_ERR_OR_ZERO(dma_obj);
318 }
319 EXPORT_SYMBOL_GPL(drm_gem_dma_dumb_create);
320
321 const struct vm_operations_struct drm_gem_dma_vm_ops = {
322 .open = drm_gem_vm_open,
323 .close = drm_gem_vm_close,
324 };
325 EXPORT_SYMBOL_GPL(drm_gem_dma_vm_ops);
326
327 #ifndef CONFIG_MMU
328 /**
329 * drm_gem_dma_get_unmapped_area - propose address for mapping in noMMU cases
330 * @filp: file object
331 * @addr: memory address
332 * @len: buffer size
333 * @pgoff: page offset
334 * @flags: memory flags
335 *
336 * This function is used in noMMU platforms to propose address mapping
337 * for a given buffer.
338 * It's intended to be used as a direct handler for the struct
339 * &file_operations.get_unmapped_area operation.
340 *
341 * Returns:
342 * mapping address on success or a negative error code on failure.
343 */
drm_gem_dma_get_unmapped_area(struct file * filp,unsigned long addr,unsigned long len,unsigned long pgoff,unsigned long flags)344 unsigned long drm_gem_dma_get_unmapped_area(struct file *filp,
345 unsigned long addr,
346 unsigned long len,
347 unsigned long pgoff,
348 unsigned long flags)
349 {
350 struct drm_gem_dma_object *dma_obj;
351 struct drm_gem_object *obj = NULL;
352 struct drm_file *priv = filp->private_data;
353 struct drm_device *dev = priv->minor->dev;
354 struct drm_vma_offset_node *node;
355
356 if (drm_dev_is_unplugged(dev))
357 return -ENODEV;
358
359 drm_vma_offset_lock_lookup(dev->vma_offset_manager);
360 node = drm_vma_offset_exact_lookup_locked(dev->vma_offset_manager,
361 pgoff,
362 len >> PAGE_SHIFT);
363 if (likely(node)) {
364 obj = container_of(node, struct drm_gem_object, vma_node);
365 /*
366 * When the object is being freed, after it hits 0-refcnt it
367 * proceeds to tear down the object. In the process it will
368 * attempt to remove the VMA offset and so acquire this
369 * mgr->vm_lock. Therefore if we find an object with a 0-refcnt
370 * that matches our range, we know it is in the process of being
371 * destroyed and will be freed as soon as we release the lock -
372 * so we have to check for the 0-refcnted object and treat it as
373 * invalid.
374 */
375 if (!kref_get_unless_zero(&obj->refcount))
376 obj = NULL;
377 }
378
379 drm_vma_offset_unlock_lookup(dev->vma_offset_manager);
380
381 if (!obj)
382 return -EINVAL;
383
384 if (!drm_vma_node_is_allowed(node, priv)) {
385 drm_gem_object_put(obj);
386 return -EACCES;
387 }
388
389 dma_obj = to_drm_gem_dma_obj(obj);
390
391 drm_gem_object_put(obj);
392
393 return dma_obj->vaddr ? (unsigned long)dma_obj->vaddr : -EINVAL;
394 }
395 EXPORT_SYMBOL_GPL(drm_gem_dma_get_unmapped_area);
396 #endif
397
398 /**
399 * drm_gem_dma_print_info() - Print &drm_gem_dma_object info for debugfs
400 * @dma_obj: DMA GEM object
401 * @p: DRM printer
402 * @indent: Tab indentation level
403 *
404 * This function prints dma_addr and vaddr for use in e.g. debugfs output.
405 */
drm_gem_dma_print_info(const struct drm_gem_dma_object * dma_obj,struct drm_printer * p,unsigned int indent)406 void drm_gem_dma_print_info(const struct drm_gem_dma_object *dma_obj,
407 struct drm_printer *p, unsigned int indent)
408 {
409 drm_printf_indent(p, indent, "dma_addr=%pad\n", &dma_obj->dma_addr);
410 drm_printf_indent(p, indent, "vaddr=%p\n", dma_obj->vaddr);
411 }
412 EXPORT_SYMBOL(drm_gem_dma_print_info);
413
414 /**
415 * drm_gem_dma_get_sg_table - provide a scatter/gather table of pinned
416 * pages for a DMA GEM object
417 * @dma_obj: DMA GEM object
418 *
419 * This function exports a scatter/gather table by calling the standard
420 * DMA mapping API.
421 *
422 * Returns:
423 * A pointer to the scatter/gather table of pinned pages or NULL on failure.
424 */
drm_gem_dma_get_sg_table(struct drm_gem_dma_object * dma_obj)425 struct sg_table *drm_gem_dma_get_sg_table(struct drm_gem_dma_object *dma_obj)
426 {
427 struct drm_gem_object *obj = &dma_obj->base;
428 struct sg_table *sgt;
429 int ret;
430
431 sgt = kzalloc(sizeof(*sgt), GFP_KERNEL);
432 if (!sgt)
433 return ERR_PTR(-ENOMEM);
434
435 ret = dma_get_sgtable(obj->dev->dev, sgt, dma_obj->vaddr,
436 dma_obj->dma_addr, obj->size);
437 if (ret < 0)
438 goto out;
439
440 return sgt;
441
442 out:
443 kfree(sgt);
444 return ERR_PTR(ret);
445 }
446 EXPORT_SYMBOL_GPL(drm_gem_dma_get_sg_table);
447
448 /**
449 * drm_gem_dma_prime_import_sg_table - produce a DMA GEM object from another
450 * driver's scatter/gather table of pinned pages
451 * @dev: device to import into
452 * @attach: DMA-BUF attachment
453 * @sgt: scatter/gather table of pinned pages
454 *
455 * This function imports a scatter/gather table exported via DMA-BUF by
456 * another driver. Imported buffers must be physically contiguous in memory
457 * (i.e. the scatter/gather table must contain a single entry). Drivers that
458 * use the DMA helpers should set this as their
459 * &drm_driver.gem_prime_import_sg_table callback.
460 *
461 * Returns:
462 * A pointer to a newly created GEM object or an ERR_PTR-encoded negative
463 * error code on failure.
464 */
465 struct drm_gem_object *
drm_gem_dma_prime_import_sg_table(struct drm_device * dev,struct dma_buf_attachment * attach,struct sg_table * sgt)466 drm_gem_dma_prime_import_sg_table(struct drm_device *dev,
467 struct dma_buf_attachment *attach,
468 struct sg_table *sgt)
469 {
470 struct drm_gem_dma_object *dma_obj;
471
472 /* check if the entries in the sg_table are contiguous */
473 if (drm_prime_get_contiguous_size(sgt) < attach->dmabuf->size)
474 return ERR_PTR(-EINVAL);
475
476 /* Create a DMA GEM buffer. */
477 dma_obj = __drm_gem_dma_create(dev, attach->dmabuf->size, true);
478 if (IS_ERR(dma_obj))
479 return ERR_CAST(dma_obj);
480
481 dma_obj->dma_addr = sg_dma_address(sgt->sgl);
482 dma_obj->sgt = sgt;
483
484 drm_dbg_prime(dev, "dma_addr = %pad, size = %zu\n", &dma_obj->dma_addr,
485 attach->dmabuf->size);
486
487 return &dma_obj->base;
488 }
489 EXPORT_SYMBOL_GPL(drm_gem_dma_prime_import_sg_table);
490
491 /**
492 * drm_gem_dma_vmap - map a DMA GEM object into the kernel's virtual
493 * address space
494 * @dma_obj: DMA GEM object
495 * @map: Returns the kernel virtual address of the DMA GEM object's backing
496 * store.
497 *
498 * This function maps a buffer into the kernel's virtual address space.
499 * Since the DMA buffers are already mapped into the kernel virtual address
500 * space this simply returns the cached virtual address.
501 *
502 * Returns:
503 * 0 on success, or a negative error code otherwise.
504 */
drm_gem_dma_vmap(struct drm_gem_dma_object * dma_obj,struct iosys_map * map)505 int drm_gem_dma_vmap(struct drm_gem_dma_object *dma_obj,
506 struct iosys_map *map)
507 {
508 iosys_map_set_vaddr(map, dma_obj->vaddr);
509
510 return 0;
511 }
512 EXPORT_SYMBOL_GPL(drm_gem_dma_vmap);
513
514 /**
515 * drm_gem_dma_mmap - memory-map an exported DMA GEM object
516 * @dma_obj: DMA GEM object
517 * @vma: VMA for the area to be mapped
518 *
519 * This function maps a buffer into a userspace process's address space.
520 * In addition to the usual GEM VMA setup it immediately faults in the entire
521 * object instead of using on-demand faulting.
522 *
523 * Returns:
524 * 0 on success or a negative error code on failure.
525 */
drm_gem_dma_mmap(struct drm_gem_dma_object * dma_obj,struct vm_area_struct * vma)526 int drm_gem_dma_mmap(struct drm_gem_dma_object *dma_obj, struct vm_area_struct *vma)
527 {
528 struct drm_gem_object *obj = &dma_obj->base;
529 int ret;
530
531 /*
532 * Clear the VM_PFNMAP flag that was set by drm_gem_mmap(), and set the
533 * vm_pgoff (used as a fake buffer offset by DRM) to 0 as we want to map
534 * the whole buffer.
535 */
536 vma->vm_pgoff -= drm_vma_node_start(&obj->vma_node);
537 vm_flags_mod(vma, VM_DONTEXPAND, VM_PFNMAP);
538
539 if (dma_obj->map_noncoherent) {
540 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
541
542 ret = dma_mmap_pages(dma_obj->base.dev->dev,
543 vma, vma->vm_end - vma->vm_start,
544 virt_to_page(dma_obj->vaddr));
545 } else {
546 ret = dma_mmap_wc(dma_obj->base.dev->dev, vma, dma_obj->vaddr,
547 dma_obj->dma_addr,
548 vma->vm_end - vma->vm_start);
549 }
550 if (ret)
551 drm_gem_vm_close(vma);
552
553 return ret;
554 }
555 EXPORT_SYMBOL_GPL(drm_gem_dma_mmap);
556
557 /**
558 * drm_gem_dma_prime_import_sg_table_vmap - PRIME import another driver's
559 * scatter/gather table and get the virtual address of the buffer
560 * @dev: DRM device
561 * @attach: DMA-BUF attachment
562 * @sgt: Scatter/gather table of pinned pages
563 *
564 * This function imports a scatter/gather table using
565 * drm_gem_dma_prime_import_sg_table() and uses dma_buf_vmap() to get the kernel
566 * virtual address. This ensures that a DMA GEM object always has its virtual
567 * address set. This address is released when the object is freed.
568 *
569 * This function can be used as the &drm_driver.gem_prime_import_sg_table
570 * callback. The &DRM_GEM_DMA_DRIVER_OPS_VMAP macro provides a shortcut to set
571 * the necessary DRM driver operations.
572 *
573 * Returns:
574 * A pointer to a newly created GEM object or an ERR_PTR-encoded negative
575 * error code on failure.
576 */
577 struct drm_gem_object *
drm_gem_dma_prime_import_sg_table_vmap(struct drm_device * dev,struct dma_buf_attachment * attach,struct sg_table * sgt)578 drm_gem_dma_prime_import_sg_table_vmap(struct drm_device *dev,
579 struct dma_buf_attachment *attach,
580 struct sg_table *sgt)
581 {
582 struct drm_gem_dma_object *dma_obj;
583 struct drm_gem_object *obj;
584 struct iosys_map map;
585 int ret;
586
587 ret = dma_buf_vmap_unlocked(attach->dmabuf, &map);
588 if (ret) {
589 drm_err(dev, "Failed to vmap PRIME buffer\n");
590 return ERR_PTR(ret);
591 }
592
593 obj = drm_gem_dma_prime_import_sg_table(dev, attach, sgt);
594 if (IS_ERR(obj)) {
595 dma_buf_vunmap_unlocked(attach->dmabuf, &map);
596 return obj;
597 }
598
599 dma_obj = to_drm_gem_dma_obj(obj);
600 dma_obj->vaddr = map.vaddr;
601
602 return obj;
603 }
604 EXPORT_SYMBOL(drm_gem_dma_prime_import_sg_table_vmap);
605
606 MODULE_DESCRIPTION("DRM DMA memory-management helpers");
607 MODULE_IMPORT_NS("DMA_BUF");
608 MODULE_LICENSE("GPL");
609