xref: /linux/drivers/gpu/drm/drm_gem_shmem_helper.c (revision cdd30ebb1b9f36159d66f088b61aee264e649d7a)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright 2018 Noralf Trønnes
4  */
5 
6 #include <linux/dma-buf.h>
7 #include <linux/export.h>
8 #include <linux/module.h>
9 #include <linux/mutex.h>
10 #include <linux/shmem_fs.h>
11 #include <linux/slab.h>
12 #include <linux/vmalloc.h>
13 
14 #ifdef CONFIG_X86
15 #include <asm/set_memory.h>
16 #endif
17 
18 #include <drm/drm.h>
19 #include <drm/drm_device.h>
20 #include <drm/drm_drv.h>
21 #include <drm/drm_gem_shmem_helper.h>
22 #include <drm/drm_prime.h>
23 #include <drm/drm_print.h>
24 
25 MODULE_IMPORT_NS("DMA_BUF");
26 
27 /**
28  * DOC: overview
29  *
30  * This library provides helpers for GEM objects backed by shmem buffers
31  * allocated using anonymous pageable memory.
32  *
33  * Functions that operate on the GEM object receive struct &drm_gem_shmem_object.
34  * For GEM callback helpers in struct &drm_gem_object functions, see likewise
35  * named functions with an _object_ infix (e.g., drm_gem_shmem_object_vmap() wraps
36  * drm_gem_shmem_vmap()). These helpers perform the necessary type conversion.
37  */
38 
39 static const struct drm_gem_object_funcs drm_gem_shmem_funcs = {
40 	.free = drm_gem_shmem_object_free,
41 	.print_info = drm_gem_shmem_object_print_info,
42 	.pin = drm_gem_shmem_object_pin,
43 	.unpin = drm_gem_shmem_object_unpin,
44 	.get_sg_table = drm_gem_shmem_object_get_sg_table,
45 	.vmap = drm_gem_shmem_object_vmap,
46 	.vunmap = drm_gem_shmem_object_vunmap,
47 	.mmap = drm_gem_shmem_object_mmap,
48 	.vm_ops = &drm_gem_shmem_vm_ops,
49 };
50 
51 static struct drm_gem_shmem_object *
__drm_gem_shmem_create(struct drm_device * dev,size_t size,bool private,struct vfsmount * gemfs)52 __drm_gem_shmem_create(struct drm_device *dev, size_t size, bool private,
53 		       struct vfsmount *gemfs)
54 {
55 	struct drm_gem_shmem_object *shmem;
56 	struct drm_gem_object *obj;
57 	int ret = 0;
58 
59 	size = PAGE_ALIGN(size);
60 
61 	if (dev->driver->gem_create_object) {
62 		obj = dev->driver->gem_create_object(dev, size);
63 		if (IS_ERR(obj))
64 			return ERR_CAST(obj);
65 		shmem = to_drm_gem_shmem_obj(obj);
66 	} else {
67 		shmem = kzalloc(sizeof(*shmem), GFP_KERNEL);
68 		if (!shmem)
69 			return ERR_PTR(-ENOMEM);
70 		obj = &shmem->base;
71 	}
72 
73 	if (!obj->funcs)
74 		obj->funcs = &drm_gem_shmem_funcs;
75 
76 	if (private) {
77 		drm_gem_private_object_init(dev, obj, size);
78 		shmem->map_wc = false; /* dma-buf mappings use always writecombine */
79 	} else {
80 		ret = drm_gem_object_init_with_mnt(dev, obj, size, gemfs);
81 	}
82 	if (ret) {
83 		drm_gem_private_object_fini(obj);
84 		goto err_free;
85 	}
86 
87 	ret = drm_gem_create_mmap_offset(obj);
88 	if (ret)
89 		goto err_release;
90 
91 	INIT_LIST_HEAD(&shmem->madv_list);
92 
93 	if (!private) {
94 		/*
95 		 * Our buffers are kept pinned, so allocating them
96 		 * from the MOVABLE zone is a really bad idea, and
97 		 * conflicts with CMA. See comments above new_inode()
98 		 * why this is required _and_ expected if you're
99 		 * going to pin these pages.
100 		 */
101 		mapping_set_gfp_mask(obj->filp->f_mapping, GFP_HIGHUSER |
102 				     __GFP_RETRY_MAYFAIL | __GFP_NOWARN);
103 	}
104 
105 	return shmem;
106 
107 err_release:
108 	drm_gem_object_release(obj);
109 err_free:
110 	kfree(obj);
111 
112 	return ERR_PTR(ret);
113 }
114 /**
115  * drm_gem_shmem_create - Allocate an object with the given size
116  * @dev: DRM device
117  * @size: Size of the object to allocate
118  *
119  * This function creates a shmem GEM object.
120  *
121  * Returns:
122  * A struct drm_gem_shmem_object * on success or an ERR_PTR()-encoded negative
123  * error code on failure.
124  */
drm_gem_shmem_create(struct drm_device * dev,size_t size)125 struct drm_gem_shmem_object *drm_gem_shmem_create(struct drm_device *dev, size_t size)
126 {
127 	return __drm_gem_shmem_create(dev, size, false, NULL);
128 }
129 EXPORT_SYMBOL_GPL(drm_gem_shmem_create);
130 
131 /**
132  * drm_gem_shmem_create_with_mnt - Allocate an object with the given size in a
133  * given mountpoint
134  * @dev: DRM device
135  * @size: Size of the object to allocate
136  * @gemfs: tmpfs mount where the GEM object will be created
137  *
138  * This function creates a shmem GEM object in a given tmpfs mountpoint.
139  *
140  * Returns:
141  * A struct drm_gem_shmem_object * on success or an ERR_PTR()-encoded negative
142  * error code on failure.
143  */
drm_gem_shmem_create_with_mnt(struct drm_device * dev,size_t size,struct vfsmount * gemfs)144 struct drm_gem_shmem_object *drm_gem_shmem_create_with_mnt(struct drm_device *dev,
145 							   size_t size,
146 							   struct vfsmount *gemfs)
147 {
148 	return __drm_gem_shmem_create(dev, size, false, gemfs);
149 }
150 EXPORT_SYMBOL_GPL(drm_gem_shmem_create_with_mnt);
151 
152 /**
153  * drm_gem_shmem_free - Free resources associated with a shmem GEM object
154  * @shmem: shmem GEM object to free
155  *
156  * This function cleans up the GEM object state and frees the memory used to
157  * store the object itself.
158  */
drm_gem_shmem_free(struct drm_gem_shmem_object * shmem)159 void drm_gem_shmem_free(struct drm_gem_shmem_object *shmem)
160 {
161 	struct drm_gem_object *obj = &shmem->base;
162 
163 	if (obj->import_attach) {
164 		drm_prime_gem_destroy(obj, shmem->sgt);
165 	} else {
166 		dma_resv_lock(shmem->base.resv, NULL);
167 
168 		drm_WARN_ON(obj->dev, shmem->vmap_use_count);
169 
170 		if (shmem->sgt) {
171 			dma_unmap_sgtable(obj->dev->dev, shmem->sgt,
172 					  DMA_BIDIRECTIONAL, 0);
173 			sg_free_table(shmem->sgt);
174 			kfree(shmem->sgt);
175 		}
176 		if (shmem->pages)
177 			drm_gem_shmem_put_pages(shmem);
178 
179 		drm_WARN_ON(obj->dev, shmem->pages_use_count);
180 
181 		dma_resv_unlock(shmem->base.resv);
182 	}
183 
184 	drm_gem_object_release(obj);
185 	kfree(shmem);
186 }
187 EXPORT_SYMBOL_GPL(drm_gem_shmem_free);
188 
drm_gem_shmem_get_pages(struct drm_gem_shmem_object * shmem)189 static int drm_gem_shmem_get_pages(struct drm_gem_shmem_object *shmem)
190 {
191 	struct drm_gem_object *obj = &shmem->base;
192 	struct page **pages;
193 
194 	dma_resv_assert_held(shmem->base.resv);
195 
196 	if (shmem->pages_use_count++ > 0)
197 		return 0;
198 
199 	pages = drm_gem_get_pages(obj);
200 	if (IS_ERR(pages)) {
201 		drm_dbg_kms(obj->dev, "Failed to get pages (%ld)\n",
202 			    PTR_ERR(pages));
203 		shmem->pages_use_count = 0;
204 		return PTR_ERR(pages);
205 	}
206 
207 	/*
208 	 * TODO: Allocating WC pages which are correctly flushed is only
209 	 * supported on x86. Ideal solution would be a GFP_WC flag, which also
210 	 * ttm_pool.c could use.
211 	 */
212 #ifdef CONFIG_X86
213 	if (shmem->map_wc)
214 		set_pages_array_wc(pages, obj->size >> PAGE_SHIFT);
215 #endif
216 
217 	shmem->pages = pages;
218 
219 	return 0;
220 }
221 
222 /*
223  * drm_gem_shmem_put_pages - Decrease use count on the backing pages for a shmem GEM object
224  * @shmem: shmem GEM object
225  *
226  * This function decreases the use count and puts the backing pages when use drops to zero.
227  */
drm_gem_shmem_put_pages(struct drm_gem_shmem_object * shmem)228 void drm_gem_shmem_put_pages(struct drm_gem_shmem_object *shmem)
229 {
230 	struct drm_gem_object *obj = &shmem->base;
231 
232 	dma_resv_assert_held(shmem->base.resv);
233 
234 	if (drm_WARN_ON_ONCE(obj->dev, !shmem->pages_use_count))
235 		return;
236 
237 	if (--shmem->pages_use_count > 0)
238 		return;
239 
240 #ifdef CONFIG_X86
241 	if (shmem->map_wc)
242 		set_pages_array_wb(shmem->pages, obj->size >> PAGE_SHIFT);
243 #endif
244 
245 	drm_gem_put_pages(obj, shmem->pages,
246 			  shmem->pages_mark_dirty_on_put,
247 			  shmem->pages_mark_accessed_on_put);
248 	shmem->pages = NULL;
249 }
250 EXPORT_SYMBOL(drm_gem_shmem_put_pages);
251 
drm_gem_shmem_pin_locked(struct drm_gem_shmem_object * shmem)252 int drm_gem_shmem_pin_locked(struct drm_gem_shmem_object *shmem)
253 {
254 	int ret;
255 
256 	dma_resv_assert_held(shmem->base.resv);
257 
258 	drm_WARN_ON(shmem->base.dev, shmem->base.import_attach);
259 
260 	ret = drm_gem_shmem_get_pages(shmem);
261 
262 	return ret;
263 }
264 EXPORT_SYMBOL(drm_gem_shmem_pin_locked);
265 
drm_gem_shmem_unpin_locked(struct drm_gem_shmem_object * shmem)266 void drm_gem_shmem_unpin_locked(struct drm_gem_shmem_object *shmem)
267 {
268 	dma_resv_assert_held(shmem->base.resv);
269 
270 	drm_gem_shmem_put_pages(shmem);
271 }
272 EXPORT_SYMBOL(drm_gem_shmem_unpin_locked);
273 
274 /**
275  * drm_gem_shmem_pin - Pin backing pages for a shmem GEM object
276  * @shmem: shmem GEM object
277  *
278  * This function makes sure the backing pages are pinned in memory while the
279  * buffer is exported.
280  *
281  * Returns:
282  * 0 on success or a negative error code on failure.
283  */
drm_gem_shmem_pin(struct drm_gem_shmem_object * shmem)284 int drm_gem_shmem_pin(struct drm_gem_shmem_object *shmem)
285 {
286 	struct drm_gem_object *obj = &shmem->base;
287 	int ret;
288 
289 	drm_WARN_ON(obj->dev, obj->import_attach);
290 
291 	ret = dma_resv_lock_interruptible(shmem->base.resv, NULL);
292 	if (ret)
293 		return ret;
294 	ret = drm_gem_shmem_pin_locked(shmem);
295 	dma_resv_unlock(shmem->base.resv);
296 
297 	return ret;
298 }
299 EXPORT_SYMBOL(drm_gem_shmem_pin);
300 
301 /**
302  * drm_gem_shmem_unpin - Unpin backing pages for a shmem GEM object
303  * @shmem: shmem GEM object
304  *
305  * This function removes the requirement that the backing pages are pinned in
306  * memory.
307  */
drm_gem_shmem_unpin(struct drm_gem_shmem_object * shmem)308 void drm_gem_shmem_unpin(struct drm_gem_shmem_object *shmem)
309 {
310 	struct drm_gem_object *obj = &shmem->base;
311 
312 	drm_WARN_ON(obj->dev, obj->import_attach);
313 
314 	dma_resv_lock(shmem->base.resv, NULL);
315 	drm_gem_shmem_unpin_locked(shmem);
316 	dma_resv_unlock(shmem->base.resv);
317 }
318 EXPORT_SYMBOL(drm_gem_shmem_unpin);
319 
320 /*
321  * drm_gem_shmem_vmap - Create a virtual mapping for a shmem GEM object
322  * @shmem: shmem GEM object
323  * @map: Returns the kernel virtual address of the SHMEM GEM object's backing
324  *       store.
325  *
326  * This function makes sure that a contiguous kernel virtual address mapping
327  * exists for the buffer backing the shmem GEM object. It hides the differences
328  * between dma-buf imported and natively allocated objects.
329  *
330  * Acquired mappings should be cleaned up by calling drm_gem_shmem_vunmap().
331  *
332  * Returns:
333  * 0 on success or a negative error code on failure.
334  */
drm_gem_shmem_vmap(struct drm_gem_shmem_object * shmem,struct iosys_map * map)335 int drm_gem_shmem_vmap(struct drm_gem_shmem_object *shmem,
336 		       struct iosys_map *map)
337 {
338 	struct drm_gem_object *obj = &shmem->base;
339 	int ret = 0;
340 
341 	if (obj->import_attach) {
342 		ret = dma_buf_vmap(obj->import_attach->dmabuf, map);
343 		if (!ret) {
344 			if (drm_WARN_ON(obj->dev, map->is_iomem)) {
345 				dma_buf_vunmap(obj->import_attach->dmabuf, map);
346 				return -EIO;
347 			}
348 		}
349 	} else {
350 		pgprot_t prot = PAGE_KERNEL;
351 
352 		dma_resv_assert_held(shmem->base.resv);
353 
354 		if (shmem->vmap_use_count++ > 0) {
355 			iosys_map_set_vaddr(map, shmem->vaddr);
356 			return 0;
357 		}
358 
359 		ret = drm_gem_shmem_get_pages(shmem);
360 		if (ret)
361 			goto err_zero_use;
362 
363 		if (shmem->map_wc)
364 			prot = pgprot_writecombine(prot);
365 		shmem->vaddr = vmap(shmem->pages, obj->size >> PAGE_SHIFT,
366 				    VM_MAP, prot);
367 		if (!shmem->vaddr)
368 			ret = -ENOMEM;
369 		else
370 			iosys_map_set_vaddr(map, shmem->vaddr);
371 	}
372 
373 	if (ret) {
374 		drm_dbg_kms(obj->dev, "Failed to vmap pages, error %d\n", ret);
375 		goto err_put_pages;
376 	}
377 
378 	return 0;
379 
380 err_put_pages:
381 	if (!obj->import_attach)
382 		drm_gem_shmem_put_pages(shmem);
383 err_zero_use:
384 	shmem->vmap_use_count = 0;
385 
386 	return ret;
387 }
388 EXPORT_SYMBOL(drm_gem_shmem_vmap);
389 
390 /*
391  * drm_gem_shmem_vunmap - Unmap a virtual mapping for a shmem GEM object
392  * @shmem: shmem GEM object
393  * @map: Kernel virtual address where the SHMEM GEM object was mapped
394  *
395  * This function cleans up a kernel virtual address mapping acquired by
396  * drm_gem_shmem_vmap(). The mapping is only removed when the use count drops to
397  * zero.
398  *
399  * This function hides the differences between dma-buf imported and natively
400  * allocated objects.
401  */
drm_gem_shmem_vunmap(struct drm_gem_shmem_object * shmem,struct iosys_map * map)402 void drm_gem_shmem_vunmap(struct drm_gem_shmem_object *shmem,
403 			  struct iosys_map *map)
404 {
405 	struct drm_gem_object *obj = &shmem->base;
406 
407 	if (obj->import_attach) {
408 		dma_buf_vunmap(obj->import_attach->dmabuf, map);
409 	} else {
410 		dma_resv_assert_held(shmem->base.resv);
411 
412 		if (drm_WARN_ON_ONCE(obj->dev, !shmem->vmap_use_count))
413 			return;
414 
415 		if (--shmem->vmap_use_count > 0)
416 			return;
417 
418 		vunmap(shmem->vaddr);
419 		drm_gem_shmem_put_pages(shmem);
420 	}
421 
422 	shmem->vaddr = NULL;
423 }
424 EXPORT_SYMBOL(drm_gem_shmem_vunmap);
425 
426 static int
drm_gem_shmem_create_with_handle(struct drm_file * file_priv,struct drm_device * dev,size_t size,uint32_t * handle)427 drm_gem_shmem_create_with_handle(struct drm_file *file_priv,
428 				 struct drm_device *dev, size_t size,
429 				 uint32_t *handle)
430 {
431 	struct drm_gem_shmem_object *shmem;
432 	int ret;
433 
434 	shmem = drm_gem_shmem_create(dev, size);
435 	if (IS_ERR(shmem))
436 		return PTR_ERR(shmem);
437 
438 	/*
439 	 * Allocate an id of idr table where the obj is registered
440 	 * and handle has the id what user can see.
441 	 */
442 	ret = drm_gem_handle_create(file_priv, &shmem->base, handle);
443 	/* drop reference from allocate - handle holds it now. */
444 	drm_gem_object_put(&shmem->base);
445 
446 	return ret;
447 }
448 
449 /* Update madvise status, returns true if not purged, else
450  * false or -errno.
451  */
drm_gem_shmem_madvise(struct drm_gem_shmem_object * shmem,int madv)452 int drm_gem_shmem_madvise(struct drm_gem_shmem_object *shmem, int madv)
453 {
454 	dma_resv_assert_held(shmem->base.resv);
455 
456 	if (shmem->madv >= 0)
457 		shmem->madv = madv;
458 
459 	madv = shmem->madv;
460 
461 	return (madv >= 0);
462 }
463 EXPORT_SYMBOL(drm_gem_shmem_madvise);
464 
drm_gem_shmem_purge(struct drm_gem_shmem_object * shmem)465 void drm_gem_shmem_purge(struct drm_gem_shmem_object *shmem)
466 {
467 	struct drm_gem_object *obj = &shmem->base;
468 	struct drm_device *dev = obj->dev;
469 
470 	dma_resv_assert_held(shmem->base.resv);
471 
472 	drm_WARN_ON(obj->dev, !drm_gem_shmem_is_purgeable(shmem));
473 
474 	dma_unmap_sgtable(dev->dev, shmem->sgt, DMA_BIDIRECTIONAL, 0);
475 	sg_free_table(shmem->sgt);
476 	kfree(shmem->sgt);
477 	shmem->sgt = NULL;
478 
479 	drm_gem_shmem_put_pages(shmem);
480 
481 	shmem->madv = -1;
482 
483 	drm_vma_node_unmap(&obj->vma_node, dev->anon_inode->i_mapping);
484 	drm_gem_free_mmap_offset(obj);
485 
486 	/* Our goal here is to return as much of the memory as
487 	 * is possible back to the system as we are called from OOM.
488 	 * To do this we must instruct the shmfs to drop all of its
489 	 * backing pages, *now*.
490 	 */
491 	shmem_truncate_range(file_inode(obj->filp), 0, (loff_t)-1);
492 
493 	invalidate_mapping_pages(file_inode(obj->filp)->i_mapping, 0, (loff_t)-1);
494 }
495 EXPORT_SYMBOL(drm_gem_shmem_purge);
496 
497 /**
498  * drm_gem_shmem_dumb_create - Create a dumb shmem buffer object
499  * @file: DRM file structure to create the dumb buffer for
500  * @dev: DRM device
501  * @args: IOCTL data
502  *
503  * This function computes the pitch of the dumb buffer and rounds it up to an
504  * integer number of bytes per pixel. Drivers for hardware that doesn't have
505  * any additional restrictions on the pitch can directly use this function as
506  * their &drm_driver.dumb_create callback.
507  *
508  * For hardware with additional restrictions, drivers can adjust the fields
509  * set up by userspace before calling into this function.
510  *
511  * Returns:
512  * 0 on success or a negative error code on failure.
513  */
drm_gem_shmem_dumb_create(struct drm_file * file,struct drm_device * dev,struct drm_mode_create_dumb * args)514 int drm_gem_shmem_dumb_create(struct drm_file *file, struct drm_device *dev,
515 			      struct drm_mode_create_dumb *args)
516 {
517 	u32 min_pitch = DIV_ROUND_UP(args->width * args->bpp, 8);
518 
519 	if (!args->pitch || !args->size) {
520 		args->pitch = min_pitch;
521 		args->size = PAGE_ALIGN(args->pitch * args->height);
522 	} else {
523 		/* ensure sane minimum values */
524 		if (args->pitch < min_pitch)
525 			args->pitch = min_pitch;
526 		if (args->size < args->pitch * args->height)
527 			args->size = PAGE_ALIGN(args->pitch * args->height);
528 	}
529 
530 	return drm_gem_shmem_create_with_handle(file, dev, args->size, &args->handle);
531 }
532 EXPORT_SYMBOL_GPL(drm_gem_shmem_dumb_create);
533 
drm_gem_shmem_fault(struct vm_fault * vmf)534 static vm_fault_t drm_gem_shmem_fault(struct vm_fault *vmf)
535 {
536 	struct vm_area_struct *vma = vmf->vma;
537 	struct drm_gem_object *obj = vma->vm_private_data;
538 	struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
539 	loff_t num_pages = obj->size >> PAGE_SHIFT;
540 	vm_fault_t ret;
541 	struct page *page;
542 	pgoff_t page_offset;
543 
544 	/* We don't use vmf->pgoff since that has the fake offset */
545 	page_offset = (vmf->address - vma->vm_start) >> PAGE_SHIFT;
546 
547 	dma_resv_lock(shmem->base.resv, NULL);
548 
549 	if (page_offset >= num_pages ||
550 	    drm_WARN_ON_ONCE(obj->dev, !shmem->pages) ||
551 	    shmem->madv < 0) {
552 		ret = VM_FAULT_SIGBUS;
553 	} else {
554 		page = shmem->pages[page_offset];
555 
556 		ret = vmf_insert_pfn(vma, vmf->address, page_to_pfn(page));
557 	}
558 
559 	dma_resv_unlock(shmem->base.resv);
560 
561 	return ret;
562 }
563 
drm_gem_shmem_vm_open(struct vm_area_struct * vma)564 static void drm_gem_shmem_vm_open(struct vm_area_struct *vma)
565 {
566 	struct drm_gem_object *obj = vma->vm_private_data;
567 	struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
568 
569 	drm_WARN_ON(obj->dev, obj->import_attach);
570 
571 	dma_resv_lock(shmem->base.resv, NULL);
572 
573 	/*
574 	 * We should have already pinned the pages when the buffer was first
575 	 * mmap'd, vm_open() just grabs an additional reference for the new
576 	 * mm the vma is getting copied into (ie. on fork()).
577 	 */
578 	if (!drm_WARN_ON_ONCE(obj->dev, !shmem->pages_use_count))
579 		shmem->pages_use_count++;
580 
581 	dma_resv_unlock(shmem->base.resv);
582 
583 	drm_gem_vm_open(vma);
584 }
585 
drm_gem_shmem_vm_close(struct vm_area_struct * vma)586 static void drm_gem_shmem_vm_close(struct vm_area_struct *vma)
587 {
588 	struct drm_gem_object *obj = vma->vm_private_data;
589 	struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
590 
591 	dma_resv_lock(shmem->base.resv, NULL);
592 	drm_gem_shmem_put_pages(shmem);
593 	dma_resv_unlock(shmem->base.resv);
594 
595 	drm_gem_vm_close(vma);
596 }
597 
598 const struct vm_operations_struct drm_gem_shmem_vm_ops = {
599 	.fault = drm_gem_shmem_fault,
600 	.open = drm_gem_shmem_vm_open,
601 	.close = drm_gem_shmem_vm_close,
602 };
603 EXPORT_SYMBOL_GPL(drm_gem_shmem_vm_ops);
604 
605 /**
606  * drm_gem_shmem_mmap - Memory-map a shmem GEM object
607  * @shmem: shmem GEM object
608  * @vma: VMA for the area to be mapped
609  *
610  * This function implements an augmented version of the GEM DRM file mmap
611  * operation for shmem objects.
612  *
613  * Returns:
614  * 0 on success or a negative error code on failure.
615  */
drm_gem_shmem_mmap(struct drm_gem_shmem_object * shmem,struct vm_area_struct * vma)616 int drm_gem_shmem_mmap(struct drm_gem_shmem_object *shmem, struct vm_area_struct *vma)
617 {
618 	struct drm_gem_object *obj = &shmem->base;
619 	int ret;
620 
621 	if (obj->import_attach) {
622 		/* Reset both vm_ops and vm_private_data, so we don't end up with
623 		 * vm_ops pointing to our implementation if the dma-buf backend
624 		 * doesn't set those fields.
625 		 */
626 		vma->vm_private_data = NULL;
627 		vma->vm_ops = NULL;
628 
629 		ret = dma_buf_mmap(obj->dma_buf, vma, 0);
630 
631 		/* Drop the reference drm_gem_mmap_obj() acquired.*/
632 		if (!ret)
633 			drm_gem_object_put(obj);
634 
635 		return ret;
636 	}
637 
638 	if (is_cow_mapping(vma->vm_flags))
639 		return -EINVAL;
640 
641 	dma_resv_lock(shmem->base.resv, NULL);
642 	ret = drm_gem_shmem_get_pages(shmem);
643 	dma_resv_unlock(shmem->base.resv);
644 
645 	if (ret)
646 		return ret;
647 
648 	vm_flags_set(vma, VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP);
649 	vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
650 	if (shmem->map_wc)
651 		vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
652 
653 	return 0;
654 }
655 EXPORT_SYMBOL_GPL(drm_gem_shmem_mmap);
656 
657 /**
658  * drm_gem_shmem_print_info() - Print &drm_gem_shmem_object info for debugfs
659  * @shmem: shmem GEM object
660  * @p: DRM printer
661  * @indent: Tab indentation level
662  */
drm_gem_shmem_print_info(const struct drm_gem_shmem_object * shmem,struct drm_printer * p,unsigned int indent)663 void drm_gem_shmem_print_info(const struct drm_gem_shmem_object *shmem,
664 			      struct drm_printer *p, unsigned int indent)
665 {
666 	if (shmem->base.import_attach)
667 		return;
668 
669 	drm_printf_indent(p, indent, "pages_use_count=%u\n", shmem->pages_use_count);
670 	drm_printf_indent(p, indent, "vmap_use_count=%u\n", shmem->vmap_use_count);
671 	drm_printf_indent(p, indent, "vaddr=%p\n", shmem->vaddr);
672 }
673 EXPORT_SYMBOL(drm_gem_shmem_print_info);
674 
675 /**
676  * drm_gem_shmem_get_sg_table - Provide a scatter/gather table of pinned
677  *                              pages for a shmem GEM object
678  * @shmem: shmem GEM object
679  *
680  * This function exports a scatter/gather table suitable for PRIME usage by
681  * calling the standard DMA mapping API.
682  *
683  * Drivers who need to acquire an scatter/gather table for objects need to call
684  * drm_gem_shmem_get_pages_sgt() instead.
685  *
686  * Returns:
687  * A pointer to the scatter/gather table of pinned pages or error pointer on failure.
688  */
drm_gem_shmem_get_sg_table(struct drm_gem_shmem_object * shmem)689 struct sg_table *drm_gem_shmem_get_sg_table(struct drm_gem_shmem_object *shmem)
690 {
691 	struct drm_gem_object *obj = &shmem->base;
692 
693 	drm_WARN_ON(obj->dev, obj->import_attach);
694 
695 	return drm_prime_pages_to_sg(obj->dev, shmem->pages, obj->size >> PAGE_SHIFT);
696 }
697 EXPORT_SYMBOL_GPL(drm_gem_shmem_get_sg_table);
698 
drm_gem_shmem_get_pages_sgt_locked(struct drm_gem_shmem_object * shmem)699 static struct sg_table *drm_gem_shmem_get_pages_sgt_locked(struct drm_gem_shmem_object *shmem)
700 {
701 	struct drm_gem_object *obj = &shmem->base;
702 	int ret;
703 	struct sg_table *sgt;
704 
705 	if (shmem->sgt)
706 		return shmem->sgt;
707 
708 	drm_WARN_ON(obj->dev, obj->import_attach);
709 
710 	ret = drm_gem_shmem_get_pages(shmem);
711 	if (ret)
712 		return ERR_PTR(ret);
713 
714 	sgt = drm_gem_shmem_get_sg_table(shmem);
715 	if (IS_ERR(sgt)) {
716 		ret = PTR_ERR(sgt);
717 		goto err_put_pages;
718 	}
719 	/* Map the pages for use by the h/w. */
720 	ret = dma_map_sgtable(obj->dev->dev, sgt, DMA_BIDIRECTIONAL, 0);
721 	if (ret)
722 		goto err_free_sgt;
723 
724 	shmem->sgt = sgt;
725 
726 	return sgt;
727 
728 err_free_sgt:
729 	sg_free_table(sgt);
730 	kfree(sgt);
731 err_put_pages:
732 	drm_gem_shmem_put_pages(shmem);
733 	return ERR_PTR(ret);
734 }
735 
736 /**
737  * drm_gem_shmem_get_pages_sgt - Pin pages, dma map them, and return a
738  *				 scatter/gather table for a shmem GEM object.
739  * @shmem: shmem GEM object
740  *
741  * This function returns a scatter/gather table suitable for driver usage. If
742  * the sg table doesn't exist, the pages are pinned, dma-mapped, and a sg
743  * table created.
744  *
745  * This is the main function for drivers to get at backing storage, and it hides
746  * and difference between dma-buf imported and natively allocated objects.
747  * drm_gem_shmem_get_sg_table() should not be directly called by drivers.
748  *
749  * Returns:
750  * A pointer to the scatter/gather table of pinned pages or errno on failure.
751  */
drm_gem_shmem_get_pages_sgt(struct drm_gem_shmem_object * shmem)752 struct sg_table *drm_gem_shmem_get_pages_sgt(struct drm_gem_shmem_object *shmem)
753 {
754 	int ret;
755 	struct sg_table *sgt;
756 
757 	ret = dma_resv_lock_interruptible(shmem->base.resv, NULL);
758 	if (ret)
759 		return ERR_PTR(ret);
760 	sgt = drm_gem_shmem_get_pages_sgt_locked(shmem);
761 	dma_resv_unlock(shmem->base.resv);
762 
763 	return sgt;
764 }
765 EXPORT_SYMBOL_GPL(drm_gem_shmem_get_pages_sgt);
766 
767 /**
768  * drm_gem_shmem_prime_import_sg_table - Produce a shmem GEM object from
769  *                 another driver's scatter/gather table of pinned pages
770  * @dev: Device to import into
771  * @attach: DMA-BUF attachment
772  * @sgt: Scatter/gather table of pinned pages
773  *
774  * This function imports a scatter/gather table exported via DMA-BUF by
775  * another driver. Drivers that use the shmem helpers should set this as their
776  * &drm_driver.gem_prime_import_sg_table callback.
777  *
778  * Returns:
779  * A pointer to a newly created GEM object or an ERR_PTR-encoded negative
780  * error code on failure.
781  */
782 struct drm_gem_object *
drm_gem_shmem_prime_import_sg_table(struct drm_device * dev,struct dma_buf_attachment * attach,struct sg_table * sgt)783 drm_gem_shmem_prime_import_sg_table(struct drm_device *dev,
784 				    struct dma_buf_attachment *attach,
785 				    struct sg_table *sgt)
786 {
787 	size_t size = PAGE_ALIGN(attach->dmabuf->size);
788 	struct drm_gem_shmem_object *shmem;
789 
790 	shmem = __drm_gem_shmem_create(dev, size, true, NULL);
791 	if (IS_ERR(shmem))
792 		return ERR_CAST(shmem);
793 
794 	shmem->sgt = sgt;
795 
796 	drm_dbg_prime(dev, "size = %zu\n", size);
797 
798 	return &shmem->base;
799 }
800 EXPORT_SYMBOL_GPL(drm_gem_shmem_prime_import_sg_table);
801 
802 MODULE_DESCRIPTION("DRM SHMEM memory-management helpers");
803 MODULE_IMPORT_NS("DMA_BUF");
804 MODULE_LICENSE("GPL v2");
805