xref: /linux/drivers/gpu/drm/drm_gem_shmem_helper.c (revision 80bb50e2d459213cccff3111d5ef98ed4238c0d5)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright 2018 Noralf Trønnes
4  */
5 
6 #include <linux/dma-buf.h>
7 #include <linux/export.h>
8 #include <linux/module.h>
9 #include <linux/mutex.h>
10 #include <linux/shmem_fs.h>
11 #include <linux/slab.h>
12 #include <linux/vmalloc.h>
13 
14 #ifdef CONFIG_X86
15 #include <asm/set_memory.h>
16 #endif
17 
18 #include <kunit/visibility.h>
19 
20 #include <drm/drm.h>
21 #include <drm/drm_device.h>
22 #include <drm/drm_drv.h>
23 #include <drm/drm_dumb_buffers.h>
24 #include <drm/drm_gem_shmem_helper.h>
25 #include <drm/drm_prime.h>
26 #include <drm/drm_print.h>
27 
28 MODULE_IMPORT_NS("DMA_BUF");
29 
30 /**
31  * DOC: overview
32  *
33  * This library provides helpers for GEM objects backed by shmem buffers
34  * allocated using anonymous pageable memory.
35  *
36  * Functions that operate on the GEM object receive struct &drm_gem_shmem_object.
37  * For GEM callback helpers in struct &drm_gem_object functions, see likewise
38  * named functions with an _object_ infix (e.g., drm_gem_shmem_object_vmap() wraps
39  * drm_gem_shmem_vmap()). These helpers perform the necessary type conversion.
40  */
41 
42 static const struct drm_gem_object_funcs drm_gem_shmem_funcs = {
43 	.free = drm_gem_shmem_object_free,
44 	.print_info = drm_gem_shmem_object_print_info,
45 	.pin = drm_gem_shmem_object_pin,
46 	.unpin = drm_gem_shmem_object_unpin,
47 	.get_sg_table = drm_gem_shmem_object_get_sg_table,
48 	.vmap = drm_gem_shmem_object_vmap,
49 	.vunmap = drm_gem_shmem_object_vunmap,
50 	.mmap = drm_gem_shmem_object_mmap,
51 	.vm_ops = &drm_gem_shmem_vm_ops,
52 };
53 
54 static int __drm_gem_shmem_init(struct drm_device *dev, struct drm_gem_shmem_object *shmem,
55 				size_t size, bool private)
56 {
57 	struct drm_gem_object *obj = &shmem->base;
58 	int ret = 0;
59 
60 	if (!obj->funcs)
61 		obj->funcs = &drm_gem_shmem_funcs;
62 
63 	if (private) {
64 		drm_gem_private_object_init(dev, obj, size);
65 		shmem->map_wc = false; /* dma-buf mappings use always writecombine */
66 	} else {
67 		ret = drm_gem_object_init(dev, obj, size);
68 	}
69 	if (ret) {
70 		drm_gem_private_object_fini(obj);
71 		return ret;
72 	}
73 
74 	ret = drm_gem_create_mmap_offset(obj);
75 	if (ret)
76 		goto err_release;
77 
78 	INIT_LIST_HEAD(&shmem->madv_list);
79 
80 	if (!private) {
81 		/*
82 		 * Our buffers are kept pinned, so allocating them
83 		 * from the MOVABLE zone is a really bad idea, and
84 		 * conflicts with CMA. See comments above new_inode()
85 		 * why this is required _and_ expected if you're
86 		 * going to pin these pages.
87 		 */
88 		mapping_set_gfp_mask(obj->filp->f_mapping, GFP_HIGHUSER |
89 				     __GFP_RETRY_MAYFAIL | __GFP_NOWARN);
90 	}
91 
92 	return 0;
93 err_release:
94 	drm_gem_object_release(obj);
95 	return ret;
96 }
97 
98 /**
99  * drm_gem_shmem_init - Initialize an allocated object.
100  * @dev: DRM device
101  * @shmem: shmem GEM object to initialize
102  * @size: Buffer size in bytes
103  *
104  * This function initializes an allocated shmem GEM object.
105  *
106  * Returns:
107  * 0 on success, or a negative error code on failure.
108  */
109 int drm_gem_shmem_init(struct drm_device *dev, struct drm_gem_shmem_object *shmem, size_t size)
110 {
111 	return __drm_gem_shmem_init(dev, shmem, size, false);
112 }
113 EXPORT_SYMBOL_GPL(drm_gem_shmem_init);
114 
115 static struct drm_gem_shmem_object *
116 __drm_gem_shmem_create(struct drm_device *dev, size_t size, bool private)
117 {
118 	struct drm_gem_shmem_object *shmem;
119 	struct drm_gem_object *obj;
120 	int ret = 0;
121 
122 	size = PAGE_ALIGN(size);
123 
124 	if (dev->driver->gem_create_object) {
125 		obj = dev->driver->gem_create_object(dev, size);
126 		if (IS_ERR(obj))
127 			return ERR_CAST(obj);
128 		shmem = to_drm_gem_shmem_obj(obj);
129 	} else {
130 		shmem = kzalloc_obj(*shmem);
131 		if (!shmem)
132 			return ERR_PTR(-ENOMEM);
133 		obj = &shmem->base;
134 	}
135 
136 	ret = __drm_gem_shmem_init(dev, shmem, size, private);
137 	if (ret) {
138 		kfree(obj);
139 		return ERR_PTR(ret);
140 	}
141 
142 	return shmem;
143 }
144 /**
145  * drm_gem_shmem_create - Allocate an object with the given size
146  * @dev: DRM device
147  * @size: Size of the object to allocate
148  *
149  * This function creates a shmem GEM object.
150  *
151  * Returns:
152  * A struct drm_gem_shmem_object * on success or an ERR_PTR()-encoded negative
153  * error code on failure.
154  */
155 struct drm_gem_shmem_object *drm_gem_shmem_create(struct drm_device *dev, size_t size)
156 {
157 	return __drm_gem_shmem_create(dev, size, false);
158 }
159 EXPORT_SYMBOL_GPL(drm_gem_shmem_create);
160 
161 /**
162  * drm_gem_shmem_release - Release resources associated with a shmem GEM object.
163  * @shmem: shmem GEM object
164  *
165  * This function cleans up the GEM object state, but does not free the memory used to store the
166  * object itself. This function is meant to be a dedicated helper for the Rust GEM bindings.
167  */
168 void drm_gem_shmem_release(struct drm_gem_shmem_object *shmem)
169 {
170 	struct drm_gem_object *obj = &shmem->base;
171 
172 	if (drm_gem_is_imported(obj)) {
173 		drm_prime_gem_destroy(obj, shmem->sgt);
174 	} else {
175 		dma_resv_lock(shmem->base.resv, NULL);
176 
177 		drm_WARN_ON(obj->dev, refcount_read(&shmem->vmap_use_count));
178 
179 		if (shmem->sgt) {
180 			dma_unmap_sgtable(obj->dev->dev, shmem->sgt,
181 					  DMA_BIDIRECTIONAL, 0);
182 			sg_free_table(shmem->sgt);
183 			kfree(shmem->sgt);
184 		}
185 		if (shmem->pages)
186 			drm_gem_shmem_put_pages_locked(shmem);
187 
188 		drm_WARN_ON(obj->dev, refcount_read(&shmem->pages_use_count));
189 		drm_WARN_ON(obj->dev, refcount_read(&shmem->pages_pin_count));
190 
191 		dma_resv_unlock(shmem->base.resv);
192 	}
193 
194 	drm_gem_object_release(obj);
195 }
196 EXPORT_SYMBOL_GPL(drm_gem_shmem_release);
197 
198 /**
199  * drm_gem_shmem_free - Free resources associated with a shmem GEM object
200  * @shmem: shmem GEM object to free
201  *
202  * This function cleans up the GEM object state and frees the memory used to
203  * store the object itself.
204  */
205 void drm_gem_shmem_free(struct drm_gem_shmem_object *shmem)
206 {
207 	drm_gem_shmem_release(shmem);
208 	kfree(shmem);
209 }
210 EXPORT_SYMBOL_GPL(drm_gem_shmem_free);
211 
212 static int drm_gem_shmem_get_pages_locked(struct drm_gem_shmem_object *shmem)
213 {
214 	struct drm_gem_object *obj = &shmem->base;
215 	struct page **pages;
216 
217 	dma_resv_assert_held(shmem->base.resv);
218 
219 	if (refcount_inc_not_zero(&shmem->pages_use_count))
220 		return 0;
221 
222 	pages = drm_gem_get_pages(obj);
223 	if (IS_ERR(pages)) {
224 		drm_dbg_kms(obj->dev, "Failed to get pages (%ld)\n",
225 			    PTR_ERR(pages));
226 		return PTR_ERR(pages);
227 	}
228 
229 	/*
230 	 * TODO: Allocating WC pages which are correctly flushed is only
231 	 * supported on x86. Ideal solution would be a GFP_WC flag, which also
232 	 * ttm_pool.c could use.
233 	 */
234 #ifdef CONFIG_X86
235 	if (shmem->map_wc)
236 		set_pages_array_wc(pages, obj->size >> PAGE_SHIFT);
237 #endif
238 
239 	shmem->pages = pages;
240 
241 	refcount_set(&shmem->pages_use_count, 1);
242 
243 	return 0;
244 }
245 
246 /*
247  * drm_gem_shmem_put_pages_locked - Decrease use count on the backing pages for a shmem GEM object
248  * @shmem: shmem GEM object
249  *
250  * This function decreases the use count and puts the backing pages when use drops to zero.
251  */
252 void drm_gem_shmem_put_pages_locked(struct drm_gem_shmem_object *shmem)
253 {
254 	struct drm_gem_object *obj = &shmem->base;
255 
256 	dma_resv_assert_held(shmem->base.resv);
257 
258 	if (refcount_dec_and_test(&shmem->pages_use_count)) {
259 #ifdef CONFIG_X86
260 		if (shmem->map_wc)
261 			set_pages_array_wb(shmem->pages, obj->size >> PAGE_SHIFT);
262 #endif
263 
264 		drm_gem_put_pages(obj, shmem->pages,
265 				  shmem->pages_mark_dirty_on_put,
266 				  shmem->pages_mark_accessed_on_put);
267 		shmem->pages = NULL;
268 	}
269 }
270 EXPORT_SYMBOL_GPL(drm_gem_shmem_put_pages_locked);
271 
272 int drm_gem_shmem_pin_locked(struct drm_gem_shmem_object *shmem)
273 {
274 	int ret;
275 
276 	dma_resv_assert_held(shmem->base.resv);
277 
278 	drm_WARN_ON(shmem->base.dev, drm_gem_is_imported(&shmem->base));
279 
280 	if (refcount_inc_not_zero(&shmem->pages_pin_count))
281 		return 0;
282 
283 	ret = drm_gem_shmem_get_pages_locked(shmem);
284 	if (!ret)
285 		refcount_set(&shmem->pages_pin_count, 1);
286 
287 	return ret;
288 }
289 EXPORT_SYMBOL(drm_gem_shmem_pin_locked);
290 
291 void drm_gem_shmem_unpin_locked(struct drm_gem_shmem_object *shmem)
292 {
293 	dma_resv_assert_held(shmem->base.resv);
294 
295 	if (refcount_dec_and_test(&shmem->pages_pin_count))
296 		drm_gem_shmem_put_pages_locked(shmem);
297 }
298 EXPORT_SYMBOL(drm_gem_shmem_unpin_locked);
299 
300 /**
301  * drm_gem_shmem_pin - Pin backing pages for a shmem GEM object
302  * @shmem: shmem GEM object
303  *
304  * This function makes sure the backing pages are pinned in memory while the
305  * buffer is exported.
306  *
307  * Returns:
308  * 0 on success or a negative error code on failure.
309  */
310 int drm_gem_shmem_pin(struct drm_gem_shmem_object *shmem)
311 {
312 	struct drm_gem_object *obj = &shmem->base;
313 	int ret;
314 
315 	drm_WARN_ON(obj->dev, drm_gem_is_imported(obj));
316 
317 	if (refcount_inc_not_zero(&shmem->pages_pin_count))
318 		return 0;
319 
320 	ret = dma_resv_lock_interruptible(shmem->base.resv, NULL);
321 	if (ret)
322 		return ret;
323 	ret = drm_gem_shmem_pin_locked(shmem);
324 	dma_resv_unlock(shmem->base.resv);
325 
326 	return ret;
327 }
328 EXPORT_SYMBOL_GPL(drm_gem_shmem_pin);
329 
330 /**
331  * drm_gem_shmem_unpin - Unpin backing pages for a shmem GEM object
332  * @shmem: shmem GEM object
333  *
334  * This function removes the requirement that the backing pages are pinned in
335  * memory.
336  */
337 void drm_gem_shmem_unpin(struct drm_gem_shmem_object *shmem)
338 {
339 	struct drm_gem_object *obj = &shmem->base;
340 
341 	drm_WARN_ON(obj->dev, drm_gem_is_imported(obj));
342 
343 	if (refcount_dec_not_one(&shmem->pages_pin_count))
344 		return;
345 
346 	dma_resv_lock(shmem->base.resv, NULL);
347 	drm_gem_shmem_unpin_locked(shmem);
348 	dma_resv_unlock(shmem->base.resv);
349 }
350 EXPORT_SYMBOL_GPL(drm_gem_shmem_unpin);
351 
352 /*
353  * drm_gem_shmem_vmap_locked - Create a virtual mapping for a shmem GEM object
354  * @shmem: shmem GEM object
355  * @map: Returns the kernel virtual address of the SHMEM GEM object's backing
356  *       store.
357  *
358  * This function makes sure that a contiguous kernel virtual address mapping
359  * exists for the buffer backing the shmem GEM object. It hides the differences
360  * between dma-buf imported and natively allocated objects.
361  *
362  * Acquired mappings should be cleaned up by calling drm_gem_shmem_vunmap_locked().
363  *
364  * Returns:
365  * 0 on success or a negative error code on failure.
366  */
367 int drm_gem_shmem_vmap_locked(struct drm_gem_shmem_object *shmem,
368 			      struct iosys_map *map)
369 {
370 	struct drm_gem_object *obj = &shmem->base;
371 	int ret = 0;
372 
373 	dma_resv_assert_held(obj->resv);
374 
375 	if (drm_gem_is_imported(obj)) {
376 		ret = dma_buf_vmap(obj->import_attach->dmabuf, map);
377 	} else {
378 		pgprot_t prot = PAGE_KERNEL;
379 
380 		dma_resv_assert_held(shmem->base.resv);
381 
382 		if (refcount_inc_not_zero(&shmem->vmap_use_count)) {
383 			iosys_map_set_vaddr(map, shmem->vaddr);
384 			return 0;
385 		}
386 
387 		ret = drm_gem_shmem_pin_locked(shmem);
388 		if (ret)
389 			return ret;
390 
391 		if (shmem->map_wc)
392 			prot = pgprot_writecombine(prot);
393 		shmem->vaddr = vmap(shmem->pages, obj->size >> PAGE_SHIFT,
394 				    VM_MAP, prot);
395 		if (!shmem->vaddr) {
396 			ret = -ENOMEM;
397 		} else {
398 			iosys_map_set_vaddr(map, shmem->vaddr);
399 			refcount_set(&shmem->vmap_use_count, 1);
400 		}
401 	}
402 
403 	if (ret) {
404 		drm_dbg_kms(obj->dev, "Failed to vmap pages, error %d\n", ret);
405 		goto err_put_pages;
406 	}
407 
408 	return 0;
409 
410 err_put_pages:
411 	if (!drm_gem_is_imported(obj))
412 		drm_gem_shmem_unpin_locked(shmem);
413 
414 	return ret;
415 }
416 EXPORT_SYMBOL_GPL(drm_gem_shmem_vmap_locked);
417 
418 /*
419  * drm_gem_shmem_vunmap_locked - Unmap a virtual mapping for a shmem GEM object
420  * @shmem: shmem GEM object
421  * @map: Kernel virtual address where the SHMEM GEM object was mapped
422  *
423  * This function cleans up a kernel virtual address mapping acquired by
424  * drm_gem_shmem_vmap_locked(). The mapping is only removed when the use count
425  * drops to zero.
426  *
427  * This function hides the differences between dma-buf imported and natively
428  * allocated objects.
429  */
430 void drm_gem_shmem_vunmap_locked(struct drm_gem_shmem_object *shmem,
431 				 struct iosys_map *map)
432 {
433 	struct drm_gem_object *obj = &shmem->base;
434 
435 	dma_resv_assert_held(obj->resv);
436 
437 	if (drm_gem_is_imported(obj)) {
438 		dma_buf_vunmap(obj->import_attach->dmabuf, map);
439 	} else {
440 		dma_resv_assert_held(shmem->base.resv);
441 
442 		if (refcount_dec_and_test(&shmem->vmap_use_count)) {
443 			vunmap(shmem->vaddr);
444 			shmem->vaddr = NULL;
445 
446 			drm_gem_shmem_unpin_locked(shmem);
447 		}
448 	}
449 }
450 EXPORT_SYMBOL_GPL(drm_gem_shmem_vunmap_locked);
451 
452 static int
453 drm_gem_shmem_create_with_handle(struct drm_file *file_priv,
454 				 struct drm_device *dev, size_t size,
455 				 uint32_t *handle)
456 {
457 	struct drm_gem_shmem_object *shmem;
458 	int ret;
459 
460 	shmem = drm_gem_shmem_create(dev, size);
461 	if (IS_ERR(shmem))
462 		return PTR_ERR(shmem);
463 
464 	/*
465 	 * Allocate an id of idr table where the obj is registered
466 	 * and handle has the id what user can see.
467 	 */
468 	ret = drm_gem_handle_create(file_priv, &shmem->base, handle);
469 	/* drop reference from allocate - handle holds it now. */
470 	drm_gem_object_put(&shmem->base);
471 
472 	return ret;
473 }
474 
475 /* Update madvise status, returns true if not purged, else
476  * false or -errno.
477  */
478 int drm_gem_shmem_madvise_locked(struct drm_gem_shmem_object *shmem, int madv)
479 {
480 	dma_resv_assert_held(shmem->base.resv);
481 
482 	if (shmem->madv >= 0)
483 		shmem->madv = madv;
484 
485 	madv = shmem->madv;
486 
487 	return (madv >= 0);
488 }
489 EXPORT_SYMBOL_GPL(drm_gem_shmem_madvise_locked);
490 
491 void drm_gem_shmem_purge_locked(struct drm_gem_shmem_object *shmem)
492 {
493 	struct drm_gem_object *obj = &shmem->base;
494 	struct drm_device *dev = obj->dev;
495 
496 	dma_resv_assert_held(shmem->base.resv);
497 
498 	drm_WARN_ON(obj->dev, !drm_gem_shmem_is_purgeable(shmem));
499 
500 	dma_unmap_sgtable(dev->dev, shmem->sgt, DMA_BIDIRECTIONAL, 0);
501 	sg_free_table(shmem->sgt);
502 	kfree(shmem->sgt);
503 	shmem->sgt = NULL;
504 
505 	drm_gem_shmem_put_pages_locked(shmem);
506 
507 	shmem->madv = -1;
508 
509 	drm_vma_node_unmap(&obj->vma_node, dev->anon_inode->i_mapping);
510 	drm_gem_free_mmap_offset(obj);
511 
512 	/* Our goal here is to return as much of the memory as
513 	 * is possible back to the system as we are called from OOM.
514 	 * To do this we must instruct the shmfs to drop all of its
515 	 * backing pages, *now*.
516 	 */
517 	shmem_truncate_range(file_inode(obj->filp), 0, (loff_t)-1);
518 
519 	invalidate_mapping_pages(file_inode(obj->filp)->i_mapping, 0, (loff_t)-1);
520 }
521 EXPORT_SYMBOL_GPL(drm_gem_shmem_purge_locked);
522 
523 /**
524  * drm_gem_shmem_dumb_create - Create a dumb shmem buffer object
525  * @file: DRM file structure to create the dumb buffer for
526  * @dev: DRM device
527  * @args: IOCTL data
528  *
529  * This function computes the pitch of the dumb buffer and rounds it up to an
530  * integer number of bytes per pixel. Drivers for hardware that doesn't have
531  * any additional restrictions on the pitch can directly use this function as
532  * their &drm_driver.dumb_create callback.
533  *
534  * For hardware with additional restrictions, drivers can adjust the fields
535  * set up by userspace before calling into this function.
536  *
537  * Returns:
538  * 0 on success or a negative error code on failure.
539  */
540 int drm_gem_shmem_dumb_create(struct drm_file *file, struct drm_device *dev,
541 			      struct drm_mode_create_dumb *args)
542 {
543 	int ret;
544 
545 	ret = drm_mode_size_dumb(dev, args, 0, 0);
546 	if (ret)
547 		return ret;
548 
549 	return drm_gem_shmem_create_with_handle(file, dev, args->size, &args->handle);
550 }
551 EXPORT_SYMBOL_GPL(drm_gem_shmem_dumb_create);
552 
553 static vm_fault_t try_insert_pfn(struct vm_fault *vmf, unsigned int order,
554 				 unsigned long pfn)
555 {
556 	if (!order) {
557 		return vmf_insert_pfn(vmf->vma, vmf->address, pfn);
558 #ifdef CONFIG_ARCH_SUPPORTS_PMD_PFNMAP
559 	} else if (order == PMD_ORDER) {
560 		unsigned long paddr = pfn << PAGE_SHIFT;
561 		bool aligned = (vmf->address & ~PMD_MASK) == (paddr & ~PMD_MASK);
562 
563 		if (aligned &&
564 		    folio_test_pmd_mappable(page_folio(pfn_to_page(pfn)))) {
565 			pfn &= PMD_MASK >> PAGE_SHIFT;
566 			return vmf_insert_pfn_pmd(vmf, pfn, false);
567 		}
568 #endif
569 	}
570 	return VM_FAULT_FALLBACK;
571 }
572 
573 static vm_fault_t drm_gem_shmem_any_fault(struct vm_fault *vmf, unsigned int order)
574 {
575 	struct vm_area_struct *vma = vmf->vma;
576 	struct drm_gem_object *obj = vma->vm_private_data;
577 	struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
578 	loff_t num_pages = obj->size >> PAGE_SHIFT;
579 	vm_fault_t ret;
580 	struct page **pages = shmem->pages;
581 	pgoff_t page_offset;
582 	unsigned long pfn;
583 
584 	if (order && order != PMD_ORDER)
585 		return VM_FAULT_FALLBACK;
586 
587 	/* Offset to faulty address in the VMA. */
588 	page_offset = vmf->pgoff - vma->vm_pgoff;
589 
590 	dma_resv_lock(shmem->base.resv, NULL);
591 
592 	if (page_offset >= num_pages ||
593 	    drm_WARN_ON_ONCE(obj->dev, !shmem->pages) ||
594 	    shmem->madv < 0) {
595 		ret = VM_FAULT_SIGBUS;
596 		goto out;
597 	}
598 
599 	pfn = page_to_pfn(pages[page_offset]);
600 	ret = try_insert_pfn(vmf, order, pfn);
601 
602  out:
603 	dma_resv_unlock(shmem->base.resv);
604 
605 	return ret;
606 }
607 
608 static vm_fault_t drm_gem_shmem_fault(struct vm_fault *vmf)
609 {
610 	return drm_gem_shmem_any_fault(vmf, 0);
611 }
612 
613 static void drm_gem_shmem_vm_open(struct vm_area_struct *vma)
614 {
615 	struct drm_gem_object *obj = vma->vm_private_data;
616 	struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
617 
618 	drm_WARN_ON(obj->dev, drm_gem_is_imported(obj));
619 
620 	dma_resv_lock(shmem->base.resv, NULL);
621 
622 	/*
623 	 * We should have already pinned the pages when the buffer was first
624 	 * mmap'd, vm_open() just grabs an additional reference for the new
625 	 * mm the vma is getting copied into (ie. on fork()).
626 	 */
627 	drm_WARN_ON_ONCE(obj->dev,
628 			 !refcount_inc_not_zero(&shmem->pages_use_count));
629 
630 	dma_resv_unlock(shmem->base.resv);
631 
632 	drm_gem_vm_open(vma);
633 }
634 
635 static void drm_gem_shmem_vm_close(struct vm_area_struct *vma)
636 {
637 	struct drm_gem_object *obj = vma->vm_private_data;
638 	struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
639 
640 	dma_resv_lock(shmem->base.resv, NULL);
641 	drm_gem_shmem_put_pages_locked(shmem);
642 	dma_resv_unlock(shmem->base.resv);
643 
644 	drm_gem_vm_close(vma);
645 }
646 
647 const struct vm_operations_struct drm_gem_shmem_vm_ops = {
648 	.fault = drm_gem_shmem_fault,
649 #ifdef CONFIG_ARCH_SUPPORTS_PMD_PFNMAP
650 	.huge_fault = drm_gem_shmem_any_fault,
651 #endif
652 	.open = drm_gem_shmem_vm_open,
653 	.close = drm_gem_shmem_vm_close,
654 };
655 EXPORT_SYMBOL_GPL(drm_gem_shmem_vm_ops);
656 
657 /**
658  * drm_gem_shmem_mmap - Memory-map a shmem GEM object
659  * @shmem: shmem GEM object
660  * @vma: VMA for the area to be mapped
661  *
662  * This function implements an augmented version of the GEM DRM file mmap
663  * operation for shmem objects.
664  *
665  * Returns:
666  * 0 on success or a negative error code on failure.
667  */
668 int drm_gem_shmem_mmap(struct drm_gem_shmem_object *shmem, struct vm_area_struct *vma)
669 {
670 	struct drm_gem_object *obj = &shmem->base;
671 	int ret;
672 
673 	if (drm_gem_is_imported(obj)) {
674 		/* Reset both vm_ops and vm_private_data, so we don't end up with
675 		 * vm_ops pointing to our implementation if the dma-buf backend
676 		 * doesn't set those fields.
677 		 */
678 		vma->vm_private_data = NULL;
679 		vma->vm_ops = NULL;
680 
681 		ret = dma_buf_mmap(obj->dma_buf, vma, 0);
682 
683 		/* Drop the reference drm_gem_mmap_obj() acquired.*/
684 		if (!ret)
685 			drm_gem_object_put(obj);
686 
687 		return ret;
688 	}
689 
690 	if (is_cow_mapping(vma->vm_flags))
691 		return -EINVAL;
692 
693 	dma_resv_lock(shmem->base.resv, NULL);
694 	ret = drm_gem_shmem_get_pages_locked(shmem);
695 	dma_resv_unlock(shmem->base.resv);
696 
697 	if (ret)
698 		return ret;
699 
700 	vm_flags_set(vma, VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP);
701 	vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
702 	if (shmem->map_wc)
703 		vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
704 
705 	return 0;
706 }
707 EXPORT_SYMBOL_GPL(drm_gem_shmem_mmap);
708 
709 /**
710  * drm_gem_shmem_print_info() - Print &drm_gem_shmem_object info for debugfs
711  * @shmem: shmem GEM object
712  * @p: DRM printer
713  * @indent: Tab indentation level
714  */
715 void drm_gem_shmem_print_info(const struct drm_gem_shmem_object *shmem,
716 			      struct drm_printer *p, unsigned int indent)
717 {
718 	if (drm_gem_is_imported(&shmem->base))
719 		return;
720 
721 	drm_printf_indent(p, indent, "pages_pin_count=%u\n", refcount_read(&shmem->pages_pin_count));
722 	drm_printf_indent(p, indent, "pages_use_count=%u\n", refcount_read(&shmem->pages_use_count));
723 	drm_printf_indent(p, indent, "vmap_use_count=%u\n", refcount_read(&shmem->vmap_use_count));
724 	drm_printf_indent(p, indent, "vaddr=%p\n", shmem->vaddr);
725 }
726 EXPORT_SYMBOL_GPL(drm_gem_shmem_print_info);
727 
728 /**
729  * drm_gem_shmem_get_sg_table - Provide a scatter/gather table of pinned
730  *                              pages for a shmem GEM object
731  * @shmem: shmem GEM object
732  *
733  * This function exports a scatter/gather table suitable for PRIME usage by
734  * calling the standard DMA mapping API.
735  *
736  * Drivers who need to acquire an scatter/gather table for objects need to call
737  * drm_gem_shmem_get_pages_sgt() instead.
738  *
739  * Returns:
740  * A pointer to the scatter/gather table of pinned pages or error pointer on failure.
741  */
742 struct sg_table *drm_gem_shmem_get_sg_table(struct drm_gem_shmem_object *shmem)
743 {
744 	struct drm_gem_object *obj = &shmem->base;
745 
746 	drm_WARN_ON(obj->dev, drm_gem_is_imported(obj));
747 
748 	return drm_prime_pages_to_sg(obj->dev, shmem->pages, obj->size >> PAGE_SHIFT);
749 }
750 EXPORT_SYMBOL_GPL(drm_gem_shmem_get_sg_table);
751 
752 static struct sg_table *drm_gem_shmem_get_pages_sgt_locked(struct drm_gem_shmem_object *shmem)
753 {
754 	struct drm_gem_object *obj = &shmem->base;
755 	int ret;
756 	struct sg_table *sgt;
757 
758 	if (shmem->sgt)
759 		return shmem->sgt;
760 
761 	drm_WARN_ON(obj->dev, drm_gem_is_imported(obj));
762 
763 	ret = drm_gem_shmem_get_pages_locked(shmem);
764 	if (ret)
765 		return ERR_PTR(ret);
766 
767 	sgt = drm_gem_shmem_get_sg_table(shmem);
768 	if (IS_ERR(sgt)) {
769 		ret = PTR_ERR(sgt);
770 		goto err_put_pages;
771 	}
772 	/* Map the pages for use by the h/w. */
773 	ret = dma_map_sgtable(obj->dev->dev, sgt, DMA_BIDIRECTIONAL, 0);
774 	if (ret)
775 		goto err_free_sgt;
776 
777 	shmem->sgt = sgt;
778 
779 	return sgt;
780 
781 err_free_sgt:
782 	sg_free_table(sgt);
783 	kfree(sgt);
784 err_put_pages:
785 	drm_gem_shmem_put_pages_locked(shmem);
786 	return ERR_PTR(ret);
787 }
788 
789 /**
790  * drm_gem_shmem_get_pages_sgt - Pin pages, dma map them, and return a
791  *				 scatter/gather table for a shmem GEM object.
792  * @shmem: shmem GEM object
793  *
794  * This function returns a scatter/gather table suitable for driver usage. If
795  * the sg table doesn't exist, the pages are pinned, dma-mapped, and a sg
796  * table created.
797  *
798  * This is the main function for drivers to get at backing storage, and it hides
799  * and difference between dma-buf imported and natively allocated objects.
800  * drm_gem_shmem_get_sg_table() should not be directly called by drivers.
801  *
802  * Returns:
803  * A pointer to the scatter/gather table of pinned pages or errno on failure.
804  */
805 struct sg_table *drm_gem_shmem_get_pages_sgt(struct drm_gem_shmem_object *shmem)
806 {
807 	int ret;
808 	struct sg_table *sgt;
809 
810 	ret = dma_resv_lock_interruptible(shmem->base.resv, NULL);
811 	if (ret)
812 		return ERR_PTR(ret);
813 	sgt = drm_gem_shmem_get_pages_sgt_locked(shmem);
814 	dma_resv_unlock(shmem->base.resv);
815 
816 	return sgt;
817 }
818 EXPORT_SYMBOL_GPL(drm_gem_shmem_get_pages_sgt);
819 
820 /**
821  * drm_gem_shmem_prime_import_sg_table - Produce a shmem GEM object from
822  *                 another driver's scatter/gather table of pinned pages
823  * @dev: Device to import into
824  * @attach: DMA-BUF attachment
825  * @sgt: Scatter/gather table of pinned pages
826  *
827  * This function imports a scatter/gather table exported via DMA-BUF by
828  * another driver. Drivers that use the shmem helpers should set this as their
829  * &drm_driver.gem_prime_import_sg_table callback.
830  *
831  * Returns:
832  * A pointer to a newly created GEM object or an ERR_PTR-encoded negative
833  * error code on failure.
834  */
835 struct drm_gem_object *
836 drm_gem_shmem_prime_import_sg_table(struct drm_device *dev,
837 				    struct dma_buf_attachment *attach,
838 				    struct sg_table *sgt)
839 {
840 	size_t size = PAGE_ALIGN(attach->dmabuf->size);
841 	struct drm_gem_shmem_object *shmem;
842 
843 	shmem = __drm_gem_shmem_create(dev, size, true);
844 	if (IS_ERR(shmem))
845 		return ERR_CAST(shmem);
846 
847 	shmem->sgt = sgt;
848 
849 	drm_dbg_prime(dev, "size = %zu\n", size);
850 
851 	return &shmem->base;
852 }
853 EXPORT_SYMBOL_GPL(drm_gem_shmem_prime_import_sg_table);
854 
855 /**
856  * drm_gem_shmem_prime_import_no_map - Import dmabuf without mapping its sg_table
857  * @dev: Device to import into
858  * @dma_buf: dma-buf object to import
859  *
860  * Drivers that use the shmem helpers but also wants to import dmabuf without
861  * mapping its sg_table can use this as their &drm_driver.gem_prime_import
862  * implementation.
863  */
864 struct drm_gem_object *drm_gem_shmem_prime_import_no_map(struct drm_device *dev,
865 							 struct dma_buf *dma_buf)
866 {
867 	struct dma_buf_attachment *attach;
868 	struct drm_gem_shmem_object *shmem;
869 	struct drm_gem_object *obj;
870 	size_t size;
871 	int ret;
872 
873 	if (drm_gem_is_prime_exported_dma_buf(dev, dma_buf)) {
874 		/*
875 		 * Importing dmabuf exported from our own gem increases
876 		 * refcount on gem itself instead of f_count of dmabuf.
877 		 */
878 		obj = dma_buf->priv;
879 		drm_gem_object_get(obj);
880 		return obj;
881 	}
882 
883 	attach = dma_buf_attach(dma_buf, dev->dev);
884 	if (IS_ERR(attach))
885 		return ERR_CAST(attach);
886 
887 	get_dma_buf(dma_buf);
888 
889 	size = PAGE_ALIGN(attach->dmabuf->size);
890 
891 	shmem = __drm_gem_shmem_create(dev, size, true);
892 	if (IS_ERR(shmem)) {
893 		ret = PTR_ERR(shmem);
894 		goto fail_detach;
895 	}
896 
897 	drm_dbg_prime(dev, "size = %zu\n", size);
898 
899 	shmem->base.import_attach = attach;
900 	shmem->base.resv = dma_buf->resv;
901 
902 	return &shmem->base;
903 
904 fail_detach:
905 	dma_buf_detach(dma_buf, attach);
906 	dma_buf_put(dma_buf);
907 
908 	return ERR_PTR(ret);
909 }
910 EXPORT_SYMBOL_GPL(drm_gem_shmem_prime_import_no_map);
911 
912 /*
913  * Kunit helpers
914  */
915 
916 #if IS_ENABLED(CONFIG_KUNIT)
917 int drm_gem_shmem_vmap(struct drm_gem_shmem_object *shmem, struct iosys_map *map)
918 {
919 	struct drm_gem_object *obj = &shmem->base;
920 	int ret;
921 
922 	ret = dma_resv_lock_interruptible(obj->resv, NULL);
923 	if (ret)
924 		return ret;
925 	ret = drm_gem_shmem_vmap_locked(shmem, map);
926 	dma_resv_unlock(obj->resv);
927 
928 	return ret;
929 }
930 EXPORT_SYMBOL_IF_KUNIT(drm_gem_shmem_vmap);
931 
932 void drm_gem_shmem_vunmap(struct drm_gem_shmem_object *shmem, struct iosys_map *map)
933 {
934 	struct drm_gem_object *obj = &shmem->base;
935 
936 	dma_resv_lock_interruptible(obj->resv, NULL);
937 	drm_gem_shmem_vunmap_locked(shmem, map);
938 	dma_resv_unlock(obj->resv);
939 }
940 EXPORT_SYMBOL_IF_KUNIT(drm_gem_shmem_vunmap);
941 
942 int drm_gem_shmem_madvise(struct drm_gem_shmem_object *shmem, int madv)
943 {
944 	struct drm_gem_object *obj = &shmem->base;
945 	int ret;
946 
947 	ret = dma_resv_lock_interruptible(obj->resv, NULL);
948 	if (ret)
949 		return ret;
950 	ret = drm_gem_shmem_madvise_locked(shmem, madv);
951 	dma_resv_unlock(obj->resv);
952 
953 	return ret;
954 }
955 EXPORT_SYMBOL_IF_KUNIT(drm_gem_shmem_madvise);
956 
957 int drm_gem_shmem_purge(struct drm_gem_shmem_object *shmem)
958 {
959 	struct drm_gem_object *obj = &shmem->base;
960 	int ret;
961 
962 	ret = dma_resv_lock_interruptible(obj->resv, NULL);
963 	if (ret)
964 		return ret;
965 	drm_gem_shmem_purge_locked(shmem);
966 	dma_resv_unlock(obj->resv);
967 
968 	return 0;
969 }
970 EXPORT_SYMBOL_IF_KUNIT(drm_gem_shmem_purge);
971 #endif
972 
973 MODULE_DESCRIPTION("DRM SHMEM memory-management helpers");
974 MODULE_IMPORT_NS("DMA_BUF");
975 MODULE_LICENSE("GPL");
976