xref: /linux/drivers/gpu/drm/drm_gem_shmem_helper.c (revision f6e8dc9edf963dbc99085e54f6ced6da9daa6100)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright 2018 Noralf Trønnes
4  */
5 
6 #include <linux/dma-buf.h>
7 #include <linux/export.h>
8 #include <linux/module.h>
9 #include <linux/mutex.h>
10 #include <linux/shmem_fs.h>
11 #include <linux/slab.h>
12 #include <linux/vmalloc.h>
13 
14 #ifdef CONFIG_X86
15 #include <asm/set_memory.h>
16 #endif
17 
18 #include <drm/drm.h>
19 #include <drm/drm_device.h>
20 #include <drm/drm_drv.h>
21 #include <drm/drm_dumb_buffers.h>
22 #include <drm/drm_gem_shmem_helper.h>
23 #include <drm/drm_prime.h>
24 #include <drm/drm_print.h>
25 
26 MODULE_IMPORT_NS("DMA_BUF");
27 
28 /**
29  * DOC: overview
30  *
31  * This library provides helpers for GEM objects backed by shmem buffers
32  * allocated using anonymous pageable memory.
33  *
34  * Functions that operate on the GEM object receive struct &drm_gem_shmem_object.
35  * For GEM callback helpers in struct &drm_gem_object functions, see likewise
36  * named functions with an _object_ infix (e.g., drm_gem_shmem_object_vmap() wraps
37  * drm_gem_shmem_vmap()). These helpers perform the necessary type conversion.
38  */
39 
40 static const struct drm_gem_object_funcs drm_gem_shmem_funcs = {
41 	.free = drm_gem_shmem_object_free,
42 	.print_info = drm_gem_shmem_object_print_info,
43 	.pin = drm_gem_shmem_object_pin,
44 	.unpin = drm_gem_shmem_object_unpin,
45 	.get_sg_table = drm_gem_shmem_object_get_sg_table,
46 	.vmap = drm_gem_shmem_object_vmap,
47 	.vunmap = drm_gem_shmem_object_vunmap,
48 	.mmap = drm_gem_shmem_object_mmap,
49 	.vm_ops = &drm_gem_shmem_vm_ops,
50 };
51 
52 static int __drm_gem_shmem_init(struct drm_device *dev, struct drm_gem_shmem_object *shmem,
53 				size_t size, bool private, struct vfsmount *gemfs)
54 {
55 	struct drm_gem_object *obj = &shmem->base;
56 	int ret = 0;
57 
58 	if (!obj->funcs)
59 		obj->funcs = &drm_gem_shmem_funcs;
60 
61 	if (private) {
62 		drm_gem_private_object_init(dev, obj, size);
63 		shmem->map_wc = false; /* dma-buf mappings use always writecombine */
64 	} else {
65 		ret = drm_gem_object_init_with_mnt(dev, obj, size, gemfs);
66 	}
67 	if (ret) {
68 		drm_gem_private_object_fini(obj);
69 		return ret;
70 	}
71 
72 	ret = drm_gem_create_mmap_offset(obj);
73 	if (ret)
74 		goto err_release;
75 
76 	INIT_LIST_HEAD(&shmem->madv_list);
77 
78 	if (!private) {
79 		/*
80 		 * Our buffers are kept pinned, so allocating them
81 		 * from the MOVABLE zone is a really bad idea, and
82 		 * conflicts with CMA. See comments above new_inode()
83 		 * why this is required _and_ expected if you're
84 		 * going to pin these pages.
85 		 */
86 		mapping_set_gfp_mask(obj->filp->f_mapping, GFP_HIGHUSER |
87 				     __GFP_RETRY_MAYFAIL | __GFP_NOWARN);
88 	}
89 
90 	return 0;
91 err_release:
92 	drm_gem_object_release(obj);
93 	return ret;
94 }
95 
96 /**
97  * drm_gem_shmem_init - Initialize an allocated object.
98  * @dev: DRM device
99  * @obj: The allocated shmem GEM object.
100  *
101  * Returns:
102  * 0 on success, or a negative error code on failure.
103  */
104 int drm_gem_shmem_init(struct drm_device *dev, struct drm_gem_shmem_object *shmem, size_t size)
105 {
106 	return __drm_gem_shmem_init(dev, shmem, size, false, NULL);
107 }
108 EXPORT_SYMBOL_GPL(drm_gem_shmem_init);
109 
110 static struct drm_gem_shmem_object *
111 __drm_gem_shmem_create(struct drm_device *dev, size_t size, bool private,
112 		       struct vfsmount *gemfs)
113 {
114 	struct drm_gem_shmem_object *shmem;
115 	struct drm_gem_object *obj;
116 	int ret = 0;
117 
118 	size = PAGE_ALIGN(size);
119 
120 	if (dev->driver->gem_create_object) {
121 		obj = dev->driver->gem_create_object(dev, size);
122 		if (IS_ERR(obj))
123 			return ERR_CAST(obj);
124 		shmem = to_drm_gem_shmem_obj(obj);
125 	} else {
126 		shmem = kzalloc(sizeof(*shmem), GFP_KERNEL);
127 		if (!shmem)
128 			return ERR_PTR(-ENOMEM);
129 		obj = &shmem->base;
130 	}
131 
132 	ret = __drm_gem_shmem_init(dev, shmem, size, private, gemfs);
133 	if (ret) {
134 		kfree(obj);
135 		return ERR_PTR(ret);
136 	}
137 
138 	return shmem;
139 }
140 /**
141  * drm_gem_shmem_create - Allocate an object with the given size
142  * @dev: DRM device
143  * @size: Size of the object to allocate
144  *
145  * This function creates a shmem GEM object.
146  *
147  * Returns:
148  * A struct drm_gem_shmem_object * on success or an ERR_PTR()-encoded negative
149  * error code on failure.
150  */
151 struct drm_gem_shmem_object *drm_gem_shmem_create(struct drm_device *dev, size_t size)
152 {
153 	return __drm_gem_shmem_create(dev, size, false, NULL);
154 }
155 EXPORT_SYMBOL_GPL(drm_gem_shmem_create);
156 
157 /**
158  * drm_gem_shmem_create_with_mnt - Allocate an object with the given size in a
159  * given mountpoint
160  * @dev: DRM device
161  * @size: Size of the object to allocate
162  * @gemfs: tmpfs mount where the GEM object will be created
163  *
164  * This function creates a shmem GEM object in a given tmpfs mountpoint.
165  *
166  * Returns:
167  * A struct drm_gem_shmem_object * on success or an ERR_PTR()-encoded negative
168  * error code on failure.
169  */
170 struct drm_gem_shmem_object *drm_gem_shmem_create_with_mnt(struct drm_device *dev,
171 							   size_t size,
172 							   struct vfsmount *gemfs)
173 {
174 	return __drm_gem_shmem_create(dev, size, false, gemfs);
175 }
176 EXPORT_SYMBOL_GPL(drm_gem_shmem_create_with_mnt);
177 
178 /**
179  * drm_gem_shmem_release - Release resources associated with a shmem GEM object.
180  * @shmem: shmem GEM object
181  *
182  * This function cleans up the GEM object state, but does not free the memory used to store the
183  * object itself. This function is meant to be a dedicated helper for the Rust GEM bindings.
184  */
185 void drm_gem_shmem_release(struct drm_gem_shmem_object *shmem)
186 {
187 	struct drm_gem_object *obj = &shmem->base;
188 
189 	if (drm_gem_is_imported(obj)) {
190 		drm_prime_gem_destroy(obj, shmem->sgt);
191 	} else {
192 		dma_resv_lock(shmem->base.resv, NULL);
193 
194 		drm_WARN_ON(obj->dev, refcount_read(&shmem->vmap_use_count));
195 
196 		if (shmem->sgt) {
197 			dma_unmap_sgtable(obj->dev->dev, shmem->sgt,
198 					  DMA_BIDIRECTIONAL, 0);
199 			sg_free_table(shmem->sgt);
200 			kfree(shmem->sgt);
201 		}
202 		if (shmem->pages)
203 			drm_gem_shmem_put_pages_locked(shmem);
204 
205 		drm_WARN_ON(obj->dev, refcount_read(&shmem->pages_use_count));
206 		drm_WARN_ON(obj->dev, refcount_read(&shmem->pages_pin_count));
207 
208 		dma_resv_unlock(shmem->base.resv);
209 	}
210 
211 	drm_gem_object_release(obj);
212 }
213 EXPORT_SYMBOL_GPL(drm_gem_shmem_release);
214 
215 /**
216  * drm_gem_shmem_free - Free resources associated with a shmem GEM object
217  * @shmem: shmem GEM object to free
218  *
219  * This function cleans up the GEM object state and frees the memory used to
220  * store the object itself.
221  */
222 void drm_gem_shmem_free(struct drm_gem_shmem_object *shmem)
223 {
224 	drm_gem_shmem_release(shmem);
225 	kfree(shmem);
226 }
227 EXPORT_SYMBOL_GPL(drm_gem_shmem_free);
228 
229 static int drm_gem_shmem_get_pages_locked(struct drm_gem_shmem_object *shmem)
230 {
231 	struct drm_gem_object *obj = &shmem->base;
232 	struct page **pages;
233 
234 	dma_resv_assert_held(shmem->base.resv);
235 
236 	if (refcount_inc_not_zero(&shmem->pages_use_count))
237 		return 0;
238 
239 	pages = drm_gem_get_pages(obj);
240 	if (IS_ERR(pages)) {
241 		drm_dbg_kms(obj->dev, "Failed to get pages (%ld)\n",
242 			    PTR_ERR(pages));
243 		return PTR_ERR(pages);
244 	}
245 
246 	/*
247 	 * TODO: Allocating WC pages which are correctly flushed is only
248 	 * supported on x86. Ideal solution would be a GFP_WC flag, which also
249 	 * ttm_pool.c could use.
250 	 */
251 #ifdef CONFIG_X86
252 	if (shmem->map_wc)
253 		set_pages_array_wc(pages, obj->size >> PAGE_SHIFT);
254 #endif
255 
256 	shmem->pages = pages;
257 
258 	refcount_set(&shmem->pages_use_count, 1);
259 
260 	return 0;
261 }
262 
263 /*
264  * drm_gem_shmem_put_pages_locked - Decrease use count on the backing pages for a shmem GEM object
265  * @shmem: shmem GEM object
266  *
267  * This function decreases the use count and puts the backing pages when use drops to zero.
268  */
269 void drm_gem_shmem_put_pages_locked(struct drm_gem_shmem_object *shmem)
270 {
271 	struct drm_gem_object *obj = &shmem->base;
272 
273 	dma_resv_assert_held(shmem->base.resv);
274 
275 	if (refcount_dec_and_test(&shmem->pages_use_count)) {
276 #ifdef CONFIG_X86
277 		if (shmem->map_wc)
278 			set_pages_array_wb(shmem->pages, obj->size >> PAGE_SHIFT);
279 #endif
280 
281 		drm_gem_put_pages(obj, shmem->pages,
282 				  shmem->pages_mark_dirty_on_put,
283 				  shmem->pages_mark_accessed_on_put);
284 		shmem->pages = NULL;
285 	}
286 }
287 EXPORT_SYMBOL_GPL(drm_gem_shmem_put_pages_locked);
288 
289 int drm_gem_shmem_pin_locked(struct drm_gem_shmem_object *shmem)
290 {
291 	int ret;
292 
293 	dma_resv_assert_held(shmem->base.resv);
294 
295 	drm_WARN_ON(shmem->base.dev, drm_gem_is_imported(&shmem->base));
296 
297 	if (refcount_inc_not_zero(&shmem->pages_pin_count))
298 		return 0;
299 
300 	ret = drm_gem_shmem_get_pages_locked(shmem);
301 	if (!ret)
302 		refcount_set(&shmem->pages_pin_count, 1);
303 
304 	return ret;
305 }
306 EXPORT_SYMBOL(drm_gem_shmem_pin_locked);
307 
308 void drm_gem_shmem_unpin_locked(struct drm_gem_shmem_object *shmem)
309 {
310 	dma_resv_assert_held(shmem->base.resv);
311 
312 	if (refcount_dec_and_test(&shmem->pages_pin_count))
313 		drm_gem_shmem_put_pages_locked(shmem);
314 }
315 EXPORT_SYMBOL(drm_gem_shmem_unpin_locked);
316 
317 /**
318  * drm_gem_shmem_pin - Pin backing pages for a shmem GEM object
319  * @shmem: shmem GEM object
320  *
321  * This function makes sure the backing pages are pinned in memory while the
322  * buffer is exported.
323  *
324  * Returns:
325  * 0 on success or a negative error code on failure.
326  */
327 int drm_gem_shmem_pin(struct drm_gem_shmem_object *shmem)
328 {
329 	struct drm_gem_object *obj = &shmem->base;
330 	int ret;
331 
332 	drm_WARN_ON(obj->dev, drm_gem_is_imported(obj));
333 
334 	if (refcount_inc_not_zero(&shmem->pages_pin_count))
335 		return 0;
336 
337 	ret = dma_resv_lock_interruptible(shmem->base.resv, NULL);
338 	if (ret)
339 		return ret;
340 	ret = drm_gem_shmem_pin_locked(shmem);
341 	dma_resv_unlock(shmem->base.resv);
342 
343 	return ret;
344 }
345 EXPORT_SYMBOL_GPL(drm_gem_shmem_pin);
346 
347 /**
348  * drm_gem_shmem_unpin - Unpin backing pages for a shmem GEM object
349  * @shmem: shmem GEM object
350  *
351  * This function removes the requirement that the backing pages are pinned in
352  * memory.
353  */
354 void drm_gem_shmem_unpin(struct drm_gem_shmem_object *shmem)
355 {
356 	struct drm_gem_object *obj = &shmem->base;
357 
358 	drm_WARN_ON(obj->dev, drm_gem_is_imported(obj));
359 
360 	if (refcount_dec_not_one(&shmem->pages_pin_count))
361 		return;
362 
363 	dma_resv_lock(shmem->base.resv, NULL);
364 	drm_gem_shmem_unpin_locked(shmem);
365 	dma_resv_unlock(shmem->base.resv);
366 }
367 EXPORT_SYMBOL_GPL(drm_gem_shmem_unpin);
368 
369 /*
370  * drm_gem_shmem_vmap_locked - Create a virtual mapping for a shmem GEM object
371  * @shmem: shmem GEM object
372  * @map: Returns the kernel virtual address of the SHMEM GEM object's backing
373  *       store.
374  *
375  * This function makes sure that a contiguous kernel virtual address mapping
376  * exists for the buffer backing the shmem GEM object. It hides the differences
377  * between dma-buf imported and natively allocated objects.
378  *
379  * Acquired mappings should be cleaned up by calling drm_gem_shmem_vunmap_locked().
380  *
381  * Returns:
382  * 0 on success or a negative error code on failure.
383  */
384 int drm_gem_shmem_vmap_locked(struct drm_gem_shmem_object *shmem,
385 			      struct iosys_map *map)
386 {
387 	struct drm_gem_object *obj = &shmem->base;
388 	int ret = 0;
389 
390 	dma_resv_assert_held(obj->resv);
391 
392 	if (drm_gem_is_imported(obj)) {
393 		ret = dma_buf_vmap(obj->import_attach->dmabuf, map);
394 	} else {
395 		pgprot_t prot = PAGE_KERNEL;
396 
397 		dma_resv_assert_held(shmem->base.resv);
398 
399 		if (refcount_inc_not_zero(&shmem->vmap_use_count)) {
400 			iosys_map_set_vaddr(map, shmem->vaddr);
401 			return 0;
402 		}
403 
404 		ret = drm_gem_shmem_pin_locked(shmem);
405 		if (ret)
406 			return ret;
407 
408 		if (shmem->map_wc)
409 			prot = pgprot_writecombine(prot);
410 		shmem->vaddr = vmap(shmem->pages, obj->size >> PAGE_SHIFT,
411 				    VM_MAP, prot);
412 		if (!shmem->vaddr) {
413 			ret = -ENOMEM;
414 		} else {
415 			iosys_map_set_vaddr(map, shmem->vaddr);
416 			refcount_set(&shmem->vmap_use_count, 1);
417 		}
418 	}
419 
420 	if (ret) {
421 		drm_dbg_kms(obj->dev, "Failed to vmap pages, error %d\n", ret);
422 		goto err_put_pages;
423 	}
424 
425 	return 0;
426 
427 err_put_pages:
428 	if (!drm_gem_is_imported(obj))
429 		drm_gem_shmem_unpin_locked(shmem);
430 
431 	return ret;
432 }
433 EXPORT_SYMBOL_GPL(drm_gem_shmem_vmap_locked);
434 
435 /*
436  * drm_gem_shmem_vunmap_locked - Unmap a virtual mapping for a shmem GEM object
437  * @shmem: shmem GEM object
438  * @map: Kernel virtual address where the SHMEM GEM object was mapped
439  *
440  * This function cleans up a kernel virtual address mapping acquired by
441  * drm_gem_shmem_vmap_locked(). The mapping is only removed when the use count
442  * drops to zero.
443  *
444  * This function hides the differences between dma-buf imported and natively
445  * allocated objects.
446  */
447 void drm_gem_shmem_vunmap_locked(struct drm_gem_shmem_object *shmem,
448 				 struct iosys_map *map)
449 {
450 	struct drm_gem_object *obj = &shmem->base;
451 
452 	dma_resv_assert_held(obj->resv);
453 
454 	if (drm_gem_is_imported(obj)) {
455 		dma_buf_vunmap(obj->import_attach->dmabuf, map);
456 	} else {
457 		dma_resv_assert_held(shmem->base.resv);
458 
459 		if (refcount_dec_and_test(&shmem->vmap_use_count)) {
460 			vunmap(shmem->vaddr);
461 			shmem->vaddr = NULL;
462 
463 			drm_gem_shmem_unpin_locked(shmem);
464 		}
465 	}
466 }
467 EXPORT_SYMBOL_GPL(drm_gem_shmem_vunmap_locked);
468 
469 static int
470 drm_gem_shmem_create_with_handle(struct drm_file *file_priv,
471 				 struct drm_device *dev, size_t size,
472 				 uint32_t *handle)
473 {
474 	struct drm_gem_shmem_object *shmem;
475 	int ret;
476 
477 	shmem = drm_gem_shmem_create(dev, size);
478 	if (IS_ERR(shmem))
479 		return PTR_ERR(shmem);
480 
481 	/*
482 	 * Allocate an id of idr table where the obj is registered
483 	 * and handle has the id what user can see.
484 	 */
485 	ret = drm_gem_handle_create(file_priv, &shmem->base, handle);
486 	/* drop reference from allocate - handle holds it now. */
487 	drm_gem_object_put(&shmem->base);
488 
489 	return ret;
490 }
491 
492 /* Update madvise status, returns true if not purged, else
493  * false or -errno.
494  */
495 int drm_gem_shmem_madvise_locked(struct drm_gem_shmem_object *shmem, int madv)
496 {
497 	dma_resv_assert_held(shmem->base.resv);
498 
499 	if (shmem->madv >= 0)
500 		shmem->madv = madv;
501 
502 	madv = shmem->madv;
503 
504 	return (madv >= 0);
505 }
506 EXPORT_SYMBOL_GPL(drm_gem_shmem_madvise_locked);
507 
508 void drm_gem_shmem_purge_locked(struct drm_gem_shmem_object *shmem)
509 {
510 	struct drm_gem_object *obj = &shmem->base;
511 	struct drm_device *dev = obj->dev;
512 
513 	dma_resv_assert_held(shmem->base.resv);
514 
515 	drm_WARN_ON(obj->dev, !drm_gem_shmem_is_purgeable(shmem));
516 
517 	dma_unmap_sgtable(dev->dev, shmem->sgt, DMA_BIDIRECTIONAL, 0);
518 	sg_free_table(shmem->sgt);
519 	kfree(shmem->sgt);
520 	shmem->sgt = NULL;
521 
522 	drm_gem_shmem_put_pages_locked(shmem);
523 
524 	shmem->madv = -1;
525 
526 	drm_vma_node_unmap(&obj->vma_node, dev->anon_inode->i_mapping);
527 	drm_gem_free_mmap_offset(obj);
528 
529 	/* Our goal here is to return as much of the memory as
530 	 * is possible back to the system as we are called from OOM.
531 	 * To do this we must instruct the shmfs to drop all of its
532 	 * backing pages, *now*.
533 	 */
534 	shmem_truncate_range(file_inode(obj->filp), 0, (loff_t)-1);
535 
536 	invalidate_mapping_pages(file_inode(obj->filp)->i_mapping, 0, (loff_t)-1);
537 }
538 EXPORT_SYMBOL_GPL(drm_gem_shmem_purge_locked);
539 
540 /**
541  * drm_gem_shmem_dumb_create - Create a dumb shmem buffer object
542  * @file: DRM file structure to create the dumb buffer for
543  * @dev: DRM device
544  * @args: IOCTL data
545  *
546  * This function computes the pitch of the dumb buffer and rounds it up to an
547  * integer number of bytes per pixel. Drivers for hardware that doesn't have
548  * any additional restrictions on the pitch can directly use this function as
549  * their &drm_driver.dumb_create callback.
550  *
551  * For hardware with additional restrictions, drivers can adjust the fields
552  * set up by userspace before calling into this function.
553  *
554  * Returns:
555  * 0 on success or a negative error code on failure.
556  */
557 int drm_gem_shmem_dumb_create(struct drm_file *file, struct drm_device *dev,
558 			      struct drm_mode_create_dumb *args)
559 {
560 	int ret;
561 
562 	ret = drm_mode_size_dumb(dev, args, SZ_8, 0);
563 	if (ret)
564 		return ret;
565 
566 	return drm_gem_shmem_create_with_handle(file, dev, args->size, &args->handle);
567 }
568 EXPORT_SYMBOL_GPL(drm_gem_shmem_dumb_create);
569 
570 static vm_fault_t drm_gem_shmem_fault(struct vm_fault *vmf)
571 {
572 	struct vm_area_struct *vma = vmf->vma;
573 	struct drm_gem_object *obj = vma->vm_private_data;
574 	struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
575 	loff_t num_pages = obj->size >> PAGE_SHIFT;
576 	vm_fault_t ret;
577 	struct page *page;
578 	pgoff_t page_offset;
579 
580 	/* We don't use vmf->pgoff since that has the fake offset */
581 	page_offset = (vmf->address - vma->vm_start) >> PAGE_SHIFT;
582 
583 	dma_resv_lock(shmem->base.resv, NULL);
584 
585 	if (page_offset >= num_pages ||
586 	    drm_WARN_ON_ONCE(obj->dev, !shmem->pages) ||
587 	    shmem->madv < 0) {
588 		ret = VM_FAULT_SIGBUS;
589 	} else {
590 		page = shmem->pages[page_offset];
591 
592 		ret = vmf_insert_pfn(vma, vmf->address, page_to_pfn(page));
593 	}
594 
595 	dma_resv_unlock(shmem->base.resv);
596 
597 	return ret;
598 }
599 
600 static void drm_gem_shmem_vm_open(struct vm_area_struct *vma)
601 {
602 	struct drm_gem_object *obj = vma->vm_private_data;
603 	struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
604 
605 	drm_WARN_ON(obj->dev, drm_gem_is_imported(obj));
606 
607 	dma_resv_lock(shmem->base.resv, NULL);
608 
609 	/*
610 	 * We should have already pinned the pages when the buffer was first
611 	 * mmap'd, vm_open() just grabs an additional reference for the new
612 	 * mm the vma is getting copied into (ie. on fork()).
613 	 */
614 	drm_WARN_ON_ONCE(obj->dev,
615 			 !refcount_inc_not_zero(&shmem->pages_use_count));
616 
617 	dma_resv_unlock(shmem->base.resv);
618 
619 	drm_gem_vm_open(vma);
620 }
621 
622 static void drm_gem_shmem_vm_close(struct vm_area_struct *vma)
623 {
624 	struct drm_gem_object *obj = vma->vm_private_data;
625 	struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
626 
627 	dma_resv_lock(shmem->base.resv, NULL);
628 	drm_gem_shmem_put_pages_locked(shmem);
629 	dma_resv_unlock(shmem->base.resv);
630 
631 	drm_gem_vm_close(vma);
632 }
633 
634 const struct vm_operations_struct drm_gem_shmem_vm_ops = {
635 	.fault = drm_gem_shmem_fault,
636 	.open = drm_gem_shmem_vm_open,
637 	.close = drm_gem_shmem_vm_close,
638 };
639 EXPORT_SYMBOL_GPL(drm_gem_shmem_vm_ops);
640 
641 /**
642  * drm_gem_shmem_mmap - Memory-map a shmem GEM object
643  * @shmem: shmem GEM object
644  * @vma: VMA for the area to be mapped
645  *
646  * This function implements an augmented version of the GEM DRM file mmap
647  * operation for shmem objects.
648  *
649  * Returns:
650  * 0 on success or a negative error code on failure.
651  */
652 int drm_gem_shmem_mmap(struct drm_gem_shmem_object *shmem, struct vm_area_struct *vma)
653 {
654 	struct drm_gem_object *obj = &shmem->base;
655 	int ret;
656 
657 	if (drm_gem_is_imported(obj)) {
658 		/* Reset both vm_ops and vm_private_data, so we don't end up with
659 		 * vm_ops pointing to our implementation if the dma-buf backend
660 		 * doesn't set those fields.
661 		 */
662 		vma->vm_private_data = NULL;
663 		vma->vm_ops = NULL;
664 
665 		ret = dma_buf_mmap(obj->dma_buf, vma, 0);
666 
667 		/* Drop the reference drm_gem_mmap_obj() acquired.*/
668 		if (!ret)
669 			drm_gem_object_put(obj);
670 
671 		return ret;
672 	}
673 
674 	if (is_cow_mapping(vma->vm_flags))
675 		return -EINVAL;
676 
677 	dma_resv_lock(shmem->base.resv, NULL);
678 	ret = drm_gem_shmem_get_pages_locked(shmem);
679 	dma_resv_unlock(shmem->base.resv);
680 
681 	if (ret)
682 		return ret;
683 
684 	vm_flags_set(vma, VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP);
685 	vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
686 	if (shmem->map_wc)
687 		vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
688 
689 	return 0;
690 }
691 EXPORT_SYMBOL_GPL(drm_gem_shmem_mmap);
692 
693 /**
694  * drm_gem_shmem_print_info() - Print &drm_gem_shmem_object info for debugfs
695  * @shmem: shmem GEM object
696  * @p: DRM printer
697  * @indent: Tab indentation level
698  */
699 void drm_gem_shmem_print_info(const struct drm_gem_shmem_object *shmem,
700 			      struct drm_printer *p, unsigned int indent)
701 {
702 	if (drm_gem_is_imported(&shmem->base))
703 		return;
704 
705 	drm_printf_indent(p, indent, "pages_pin_count=%u\n", refcount_read(&shmem->pages_pin_count));
706 	drm_printf_indent(p, indent, "pages_use_count=%u\n", refcount_read(&shmem->pages_use_count));
707 	drm_printf_indent(p, indent, "vmap_use_count=%u\n", refcount_read(&shmem->vmap_use_count));
708 	drm_printf_indent(p, indent, "vaddr=%p\n", shmem->vaddr);
709 }
710 EXPORT_SYMBOL_GPL(drm_gem_shmem_print_info);
711 
712 /**
713  * drm_gem_shmem_get_sg_table - Provide a scatter/gather table of pinned
714  *                              pages for a shmem GEM object
715  * @shmem: shmem GEM object
716  *
717  * This function exports a scatter/gather table suitable for PRIME usage by
718  * calling the standard DMA mapping API.
719  *
720  * Drivers who need to acquire an scatter/gather table for objects need to call
721  * drm_gem_shmem_get_pages_sgt() instead.
722  *
723  * Returns:
724  * A pointer to the scatter/gather table of pinned pages or error pointer on failure.
725  */
726 struct sg_table *drm_gem_shmem_get_sg_table(struct drm_gem_shmem_object *shmem)
727 {
728 	struct drm_gem_object *obj = &shmem->base;
729 
730 	drm_WARN_ON(obj->dev, drm_gem_is_imported(obj));
731 
732 	return drm_prime_pages_to_sg(obj->dev, shmem->pages, obj->size >> PAGE_SHIFT);
733 }
734 EXPORT_SYMBOL_GPL(drm_gem_shmem_get_sg_table);
735 
736 static struct sg_table *drm_gem_shmem_get_pages_sgt_locked(struct drm_gem_shmem_object *shmem)
737 {
738 	struct drm_gem_object *obj = &shmem->base;
739 	int ret;
740 	struct sg_table *sgt;
741 
742 	if (shmem->sgt)
743 		return shmem->sgt;
744 
745 	drm_WARN_ON(obj->dev, drm_gem_is_imported(obj));
746 
747 	ret = drm_gem_shmem_get_pages_locked(shmem);
748 	if (ret)
749 		return ERR_PTR(ret);
750 
751 	sgt = drm_gem_shmem_get_sg_table(shmem);
752 	if (IS_ERR(sgt)) {
753 		ret = PTR_ERR(sgt);
754 		goto err_put_pages;
755 	}
756 	/* Map the pages for use by the h/w. */
757 	ret = dma_map_sgtable(obj->dev->dev, sgt, DMA_BIDIRECTIONAL, 0);
758 	if (ret)
759 		goto err_free_sgt;
760 
761 	shmem->sgt = sgt;
762 
763 	return sgt;
764 
765 err_free_sgt:
766 	sg_free_table(sgt);
767 	kfree(sgt);
768 err_put_pages:
769 	drm_gem_shmem_put_pages_locked(shmem);
770 	return ERR_PTR(ret);
771 }
772 
773 /**
774  * drm_gem_shmem_get_pages_sgt - Pin pages, dma map them, and return a
775  *				 scatter/gather table for a shmem GEM object.
776  * @shmem: shmem GEM object
777  *
778  * This function returns a scatter/gather table suitable for driver usage. If
779  * the sg table doesn't exist, the pages are pinned, dma-mapped, and a sg
780  * table created.
781  *
782  * This is the main function for drivers to get at backing storage, and it hides
783  * and difference between dma-buf imported and natively allocated objects.
784  * drm_gem_shmem_get_sg_table() should not be directly called by drivers.
785  *
786  * Returns:
787  * A pointer to the scatter/gather table of pinned pages or errno on failure.
788  */
789 struct sg_table *drm_gem_shmem_get_pages_sgt(struct drm_gem_shmem_object *shmem)
790 {
791 	int ret;
792 	struct sg_table *sgt;
793 
794 	ret = dma_resv_lock_interruptible(shmem->base.resv, NULL);
795 	if (ret)
796 		return ERR_PTR(ret);
797 	sgt = drm_gem_shmem_get_pages_sgt_locked(shmem);
798 	dma_resv_unlock(shmem->base.resv);
799 
800 	return sgt;
801 }
802 EXPORT_SYMBOL_GPL(drm_gem_shmem_get_pages_sgt);
803 
804 /**
805  * drm_gem_shmem_prime_import_sg_table - Produce a shmem GEM object from
806  *                 another driver's scatter/gather table of pinned pages
807  * @dev: Device to import into
808  * @attach: DMA-BUF attachment
809  * @sgt: Scatter/gather table of pinned pages
810  *
811  * This function imports a scatter/gather table exported via DMA-BUF by
812  * another driver. Drivers that use the shmem helpers should set this as their
813  * &drm_driver.gem_prime_import_sg_table callback.
814  *
815  * Returns:
816  * A pointer to a newly created GEM object or an ERR_PTR-encoded negative
817  * error code on failure.
818  */
819 struct drm_gem_object *
820 drm_gem_shmem_prime_import_sg_table(struct drm_device *dev,
821 				    struct dma_buf_attachment *attach,
822 				    struct sg_table *sgt)
823 {
824 	size_t size = PAGE_ALIGN(attach->dmabuf->size);
825 	struct drm_gem_shmem_object *shmem;
826 
827 	shmem = __drm_gem_shmem_create(dev, size, true, NULL);
828 	if (IS_ERR(shmem))
829 		return ERR_CAST(shmem);
830 
831 	shmem->sgt = sgt;
832 
833 	drm_dbg_prime(dev, "size = %zu\n", size);
834 
835 	return &shmem->base;
836 }
837 EXPORT_SYMBOL_GPL(drm_gem_shmem_prime_import_sg_table);
838 
839 /**
840  * drm_gem_shmem_prime_import_no_map - Import dmabuf without mapping its sg_table
841  * @dev: Device to import into
842  * @dma_buf: dma-buf object to import
843  *
844  * Drivers that use the shmem helpers but also wants to import dmabuf without
845  * mapping its sg_table can use this as their &drm_driver.gem_prime_import
846  * implementation.
847  */
848 struct drm_gem_object *drm_gem_shmem_prime_import_no_map(struct drm_device *dev,
849 							 struct dma_buf *dma_buf)
850 {
851 	struct dma_buf_attachment *attach;
852 	struct drm_gem_shmem_object *shmem;
853 	struct drm_gem_object *obj;
854 	size_t size;
855 	int ret;
856 
857 	if (drm_gem_is_prime_exported_dma_buf(dev, dma_buf)) {
858 		/*
859 		 * Importing dmabuf exported from our own gem increases
860 		 * refcount on gem itself instead of f_count of dmabuf.
861 		 */
862 		obj = dma_buf->priv;
863 		drm_gem_object_get(obj);
864 		return obj;
865 	}
866 
867 	attach = dma_buf_attach(dma_buf, dev->dev);
868 	if (IS_ERR(attach))
869 		return ERR_CAST(attach);
870 
871 	get_dma_buf(dma_buf);
872 
873 	size = PAGE_ALIGN(attach->dmabuf->size);
874 
875 	shmem = __drm_gem_shmem_create(dev, size, true, NULL);
876 	if (IS_ERR(shmem)) {
877 		ret = PTR_ERR(shmem);
878 		goto fail_detach;
879 	}
880 
881 	drm_dbg_prime(dev, "size = %zu\n", size);
882 
883 	shmem->base.import_attach = attach;
884 	shmem->base.resv = dma_buf->resv;
885 
886 	return &shmem->base;
887 
888 fail_detach:
889 	dma_buf_detach(dma_buf, attach);
890 	dma_buf_put(dma_buf);
891 
892 	return ERR_PTR(ret);
893 }
894 EXPORT_SYMBOL_GPL(drm_gem_shmem_prime_import_no_map);
895 
896 MODULE_DESCRIPTION("DRM SHMEM memory-management helpers");
897 MODULE_IMPORT_NS("DMA_BUF");
898 MODULE_LICENSE("GPL v2");
899