xref: /linux/drivers/gpu/drm/drm_gem_shmem_helper.c (revision 13c072b8e91a5ccb5855ca1ba6fe3ea467dbf94d)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright 2018 Noralf Trønnes
4  */
5 
6 #include <linux/dma-buf.h>
7 #include <linux/export.h>
8 #include <linux/module.h>
9 #include <linux/mutex.h>
10 #include <linux/shmem_fs.h>
11 #include <linux/slab.h>
12 #include <linux/vmalloc.h>
13 
14 #ifdef CONFIG_X86
15 #include <asm/set_memory.h>
16 #endif
17 
18 #include <kunit/visibility.h>
19 
20 #include <drm/drm.h>
21 #include <drm/drm_device.h>
22 #include <drm/drm_drv.h>
23 #include <drm/drm_dumb_buffers.h>
24 #include <drm/drm_gem_shmem_helper.h>
25 #include <drm/drm_prime.h>
26 #include <drm/drm_print.h>
27 
28 MODULE_IMPORT_NS("DMA_BUF");
29 
30 /**
31  * DOC: overview
32  *
33  * This library provides helpers for GEM objects backed by shmem buffers
34  * allocated using anonymous pageable memory.
35  *
36  * Functions that operate on the GEM object receive struct &drm_gem_shmem_object.
37  * For GEM callback helpers in struct &drm_gem_object functions, see likewise
38  * named functions with an _object_ infix (e.g., drm_gem_shmem_object_vmap() wraps
39  * drm_gem_shmem_vmap()). These helpers perform the necessary type conversion.
40  */
41 
42 static const struct drm_gem_object_funcs drm_gem_shmem_funcs = {
43 	.free = drm_gem_shmem_object_free,
44 	.print_info = drm_gem_shmem_object_print_info,
45 	.pin = drm_gem_shmem_object_pin,
46 	.unpin = drm_gem_shmem_object_unpin,
47 	.get_sg_table = drm_gem_shmem_object_get_sg_table,
48 	.vmap = drm_gem_shmem_object_vmap,
49 	.vunmap = drm_gem_shmem_object_vunmap,
50 	.mmap = drm_gem_shmem_object_mmap,
51 	.vm_ops = &drm_gem_shmem_vm_ops,
52 };
53 
54 static int __drm_gem_shmem_init(struct drm_device *dev, struct drm_gem_shmem_object *shmem,
55 				size_t size, bool private)
56 {
57 	struct drm_gem_object *obj = &shmem->base;
58 	int ret = 0;
59 
60 	if (!obj->funcs)
61 		obj->funcs = &drm_gem_shmem_funcs;
62 
63 	if (private) {
64 		drm_gem_private_object_init(dev, obj, size);
65 		shmem->map_wc = false; /* dma-buf mappings use always writecombine */
66 	} else {
67 		ret = drm_gem_object_init(dev, obj, size);
68 	}
69 	if (ret) {
70 		drm_gem_private_object_fini(obj);
71 		return ret;
72 	}
73 
74 	ret = drm_gem_create_mmap_offset(obj);
75 	if (ret)
76 		goto err_release;
77 
78 	INIT_LIST_HEAD(&shmem->madv_list);
79 
80 	if (!private) {
81 		/*
82 		 * Our buffers are kept pinned, so allocating them
83 		 * from the MOVABLE zone is a really bad idea, and
84 		 * conflicts with CMA. See comments above new_inode()
85 		 * why this is required _and_ expected if you're
86 		 * going to pin these pages.
87 		 */
88 		mapping_set_gfp_mask(obj->filp->f_mapping, GFP_HIGHUSER |
89 				     __GFP_RETRY_MAYFAIL | __GFP_NOWARN);
90 	}
91 
92 	return 0;
93 err_release:
94 	drm_gem_object_release(obj);
95 	return ret;
96 }
97 
98 /**
99  * drm_gem_shmem_init - Initialize an allocated object.
100  * @dev: DRM device
101  * @shmem: shmem GEM object to initialize
102  * @size: Buffer size in bytes
103  *
104  * This function initializes an allocated shmem GEM object.
105  *
106  * Returns:
107  * 0 on success, or a negative error code on failure.
108  */
109 int drm_gem_shmem_init(struct drm_device *dev, struct drm_gem_shmem_object *shmem, size_t size)
110 {
111 	return __drm_gem_shmem_init(dev, shmem, size, false);
112 }
113 EXPORT_SYMBOL_GPL(drm_gem_shmem_init);
114 
115 static struct drm_gem_shmem_object *
116 __drm_gem_shmem_create(struct drm_device *dev, size_t size, bool private)
117 {
118 	struct drm_gem_shmem_object *shmem;
119 	struct drm_gem_object *obj;
120 	int ret = 0;
121 
122 	size = PAGE_ALIGN(size);
123 
124 	if (dev->driver->gem_create_object) {
125 		obj = dev->driver->gem_create_object(dev, size);
126 		if (IS_ERR(obj))
127 			return ERR_CAST(obj);
128 		shmem = to_drm_gem_shmem_obj(obj);
129 	} else {
130 		shmem = kzalloc_obj(*shmem);
131 		if (!shmem)
132 			return ERR_PTR(-ENOMEM);
133 		obj = &shmem->base;
134 	}
135 
136 	ret = __drm_gem_shmem_init(dev, shmem, size, private);
137 	if (ret) {
138 		kfree(obj);
139 		return ERR_PTR(ret);
140 	}
141 
142 	return shmem;
143 }
144 /**
145  * drm_gem_shmem_create - Allocate an object with the given size
146  * @dev: DRM device
147  * @size: Size of the object to allocate
148  *
149  * This function creates a shmem GEM object.
150  *
151  * Returns:
152  * A struct drm_gem_shmem_object * on success or an ERR_PTR()-encoded negative
153  * error code on failure.
154  */
155 struct drm_gem_shmem_object *drm_gem_shmem_create(struct drm_device *dev, size_t size)
156 {
157 	return __drm_gem_shmem_create(dev, size, false);
158 }
159 EXPORT_SYMBOL_GPL(drm_gem_shmem_create);
160 
161 /**
162  * drm_gem_shmem_release - Release resources associated with a shmem GEM object.
163  * @shmem: shmem GEM object
164  *
165  * This function cleans up the GEM object state, but does not free the memory used to store the
166  * object itself. This function is meant to be a dedicated helper for the Rust GEM bindings.
167  */
168 void drm_gem_shmem_release(struct drm_gem_shmem_object *shmem)
169 {
170 	struct drm_gem_object *obj = &shmem->base;
171 
172 	if (drm_gem_is_imported(obj)) {
173 		drm_prime_gem_destroy(obj, shmem->sgt);
174 	} else {
175 		dma_resv_lock(shmem->base.resv, NULL);
176 
177 		drm_WARN_ON(obj->dev, refcount_read(&shmem->vmap_use_count));
178 
179 		if (shmem->sgt) {
180 			dma_unmap_sgtable(obj->dev->dev, shmem->sgt,
181 					  DMA_BIDIRECTIONAL, 0);
182 			sg_free_table(shmem->sgt);
183 			kfree(shmem->sgt);
184 		}
185 		if (shmem->pages)
186 			drm_gem_shmem_put_pages_locked(shmem);
187 
188 		drm_WARN_ON(obj->dev, refcount_read(&shmem->pages_use_count));
189 		drm_WARN_ON(obj->dev, refcount_read(&shmem->pages_pin_count));
190 
191 		dma_resv_unlock(shmem->base.resv);
192 	}
193 
194 	drm_gem_object_release(obj);
195 }
196 EXPORT_SYMBOL_GPL(drm_gem_shmem_release);
197 
198 /**
199  * drm_gem_shmem_free - Free resources associated with a shmem GEM object
200  * @shmem: shmem GEM object to free
201  *
202  * This function cleans up the GEM object state and frees the memory used to
203  * store the object itself.
204  */
205 void drm_gem_shmem_free(struct drm_gem_shmem_object *shmem)
206 {
207 	drm_gem_shmem_release(shmem);
208 	kfree(shmem);
209 }
210 EXPORT_SYMBOL_GPL(drm_gem_shmem_free);
211 
212 static int drm_gem_shmem_get_pages_locked(struct drm_gem_shmem_object *shmem)
213 {
214 	struct drm_gem_object *obj = &shmem->base;
215 	struct page **pages;
216 
217 	dma_resv_assert_held(shmem->base.resv);
218 
219 	if (refcount_inc_not_zero(&shmem->pages_use_count))
220 		return 0;
221 
222 	pages = drm_gem_get_pages(obj);
223 	if (IS_ERR(pages)) {
224 		drm_dbg_kms(obj->dev, "Failed to get pages (%ld)\n",
225 			    PTR_ERR(pages));
226 		return PTR_ERR(pages);
227 	}
228 
229 	/*
230 	 * TODO: Allocating WC pages which are correctly flushed is only
231 	 * supported on x86. Ideal solution would be a GFP_WC flag, which also
232 	 * ttm_pool.c could use.
233 	 */
234 #ifdef CONFIG_X86
235 	if (shmem->map_wc)
236 		set_pages_array_wc(pages, obj->size >> PAGE_SHIFT);
237 #endif
238 
239 	shmem->pages = pages;
240 
241 	refcount_set(&shmem->pages_use_count, 1);
242 
243 	return 0;
244 }
245 
246 /*
247  * drm_gem_shmem_put_pages_locked - Decrease use count on the backing pages for a shmem GEM object
248  * @shmem: shmem GEM object
249  *
250  * This function decreases the use count and puts the backing pages when use drops to zero.
251  */
252 void drm_gem_shmem_put_pages_locked(struct drm_gem_shmem_object *shmem)
253 {
254 	struct drm_gem_object *obj = &shmem->base;
255 
256 	dma_resv_assert_held(shmem->base.resv);
257 
258 	if (refcount_dec_and_test(&shmem->pages_use_count)) {
259 #ifdef CONFIG_X86
260 		if (shmem->map_wc)
261 			set_pages_array_wb(shmem->pages, obj->size >> PAGE_SHIFT);
262 #endif
263 
264 		drm_gem_put_pages(obj, shmem->pages,
265 				  shmem->pages_mark_dirty_on_put,
266 				  shmem->pages_mark_accessed_on_put);
267 		shmem->pages = NULL;
268 		shmem->pages_mark_accessed_on_put = false;
269 		shmem->pages_mark_dirty_on_put = false;
270 	}
271 }
272 EXPORT_SYMBOL_GPL(drm_gem_shmem_put_pages_locked);
273 
274 int drm_gem_shmem_pin_locked(struct drm_gem_shmem_object *shmem)
275 {
276 	int ret;
277 
278 	dma_resv_assert_held(shmem->base.resv);
279 
280 	drm_WARN_ON(shmem->base.dev, drm_gem_is_imported(&shmem->base));
281 
282 	if (refcount_inc_not_zero(&shmem->pages_pin_count))
283 		return 0;
284 
285 	ret = drm_gem_shmem_get_pages_locked(shmem);
286 	if (!ret)
287 		refcount_set(&shmem->pages_pin_count, 1);
288 
289 	return ret;
290 }
291 EXPORT_SYMBOL(drm_gem_shmem_pin_locked);
292 
293 void drm_gem_shmem_unpin_locked(struct drm_gem_shmem_object *shmem)
294 {
295 	dma_resv_assert_held(shmem->base.resv);
296 
297 	if (refcount_dec_and_test(&shmem->pages_pin_count))
298 		drm_gem_shmem_put_pages_locked(shmem);
299 }
300 EXPORT_SYMBOL(drm_gem_shmem_unpin_locked);
301 
302 /**
303  * drm_gem_shmem_pin - Pin backing pages for a shmem GEM object
304  * @shmem: shmem GEM object
305  *
306  * This function makes sure the backing pages are pinned in memory while the
307  * buffer is exported.
308  *
309  * Returns:
310  * 0 on success or a negative error code on failure.
311  */
312 int drm_gem_shmem_pin(struct drm_gem_shmem_object *shmem)
313 {
314 	struct drm_gem_object *obj = &shmem->base;
315 	int ret;
316 
317 	drm_WARN_ON(obj->dev, drm_gem_is_imported(obj));
318 
319 	if (refcount_inc_not_zero(&shmem->pages_pin_count))
320 		return 0;
321 
322 	ret = dma_resv_lock_interruptible(shmem->base.resv, NULL);
323 	if (ret)
324 		return ret;
325 	ret = drm_gem_shmem_pin_locked(shmem);
326 	dma_resv_unlock(shmem->base.resv);
327 
328 	return ret;
329 }
330 EXPORT_SYMBOL_GPL(drm_gem_shmem_pin);
331 
332 /**
333  * drm_gem_shmem_unpin - Unpin backing pages for a shmem GEM object
334  * @shmem: shmem GEM object
335  *
336  * This function removes the requirement that the backing pages are pinned in
337  * memory.
338  */
339 void drm_gem_shmem_unpin(struct drm_gem_shmem_object *shmem)
340 {
341 	struct drm_gem_object *obj = &shmem->base;
342 
343 	drm_WARN_ON(obj->dev, drm_gem_is_imported(obj));
344 
345 	if (refcount_dec_not_one(&shmem->pages_pin_count))
346 		return;
347 
348 	dma_resv_lock(shmem->base.resv, NULL);
349 	drm_gem_shmem_unpin_locked(shmem);
350 	dma_resv_unlock(shmem->base.resv);
351 }
352 EXPORT_SYMBOL_GPL(drm_gem_shmem_unpin);
353 
354 /*
355  * drm_gem_shmem_vmap_locked - Create a virtual mapping for a shmem GEM object
356  * @shmem: shmem GEM object
357  * @map: Returns the kernel virtual address of the SHMEM GEM object's backing
358  *       store.
359  *
360  * This function makes sure that a contiguous kernel virtual address mapping
361  * exists for the buffer backing the shmem GEM object. It hides the differences
362  * between dma-buf imported and natively allocated objects.
363  *
364  * Acquired mappings should be cleaned up by calling drm_gem_shmem_vunmap_locked().
365  *
366  * Returns:
367  * 0 on success or a negative error code on failure.
368  */
369 int drm_gem_shmem_vmap_locked(struct drm_gem_shmem_object *shmem,
370 			      struct iosys_map *map)
371 {
372 	struct drm_gem_object *obj = &shmem->base;
373 	int ret = 0;
374 
375 	dma_resv_assert_held(obj->resv);
376 
377 	if (drm_gem_is_imported(obj)) {
378 		ret = dma_buf_vmap(obj->import_attach->dmabuf, map);
379 	} else {
380 		pgprot_t prot = PAGE_KERNEL;
381 
382 		dma_resv_assert_held(shmem->base.resv);
383 
384 		if (refcount_inc_not_zero(&shmem->vmap_use_count)) {
385 			iosys_map_set_vaddr(map, shmem->vaddr);
386 			return 0;
387 		}
388 
389 		ret = drm_gem_shmem_pin_locked(shmem);
390 		if (ret)
391 			return ret;
392 
393 		if (shmem->map_wc)
394 			prot = pgprot_writecombine(prot);
395 		shmem->vaddr = vmap(shmem->pages, obj->size >> PAGE_SHIFT,
396 				    VM_MAP, prot);
397 		if (!shmem->vaddr) {
398 			ret = -ENOMEM;
399 		} else {
400 			iosys_map_set_vaddr(map, shmem->vaddr);
401 			refcount_set(&shmem->vmap_use_count, 1);
402 			shmem->pages_mark_accessed_on_put = true;
403 			shmem->pages_mark_dirty_on_put = true;
404 		}
405 	}
406 
407 	if (ret) {
408 		drm_dbg_kms(obj->dev, "Failed to vmap pages, error %d\n", ret);
409 		goto err_put_pages;
410 	}
411 
412 	return 0;
413 
414 err_put_pages:
415 	if (!drm_gem_is_imported(obj))
416 		drm_gem_shmem_unpin_locked(shmem);
417 
418 	return ret;
419 }
420 EXPORT_SYMBOL_GPL(drm_gem_shmem_vmap_locked);
421 
422 /*
423  * drm_gem_shmem_vunmap_locked - Unmap a virtual mapping for a shmem GEM object
424  * @shmem: shmem GEM object
425  * @map: Kernel virtual address where the SHMEM GEM object was mapped
426  *
427  * This function cleans up a kernel virtual address mapping acquired by
428  * drm_gem_shmem_vmap_locked(). The mapping is only removed when the use count
429  * drops to zero.
430  *
431  * This function hides the differences between dma-buf imported and natively
432  * allocated objects.
433  */
434 void drm_gem_shmem_vunmap_locked(struct drm_gem_shmem_object *shmem,
435 				 struct iosys_map *map)
436 {
437 	struct drm_gem_object *obj = &shmem->base;
438 
439 	dma_resv_assert_held(obj->resv);
440 
441 	if (drm_gem_is_imported(obj)) {
442 		dma_buf_vunmap(obj->import_attach->dmabuf, map);
443 	} else {
444 		dma_resv_assert_held(shmem->base.resv);
445 
446 		if (refcount_dec_and_test(&shmem->vmap_use_count)) {
447 			vunmap(shmem->vaddr);
448 			shmem->vaddr = NULL;
449 
450 			drm_gem_shmem_unpin_locked(shmem);
451 		}
452 	}
453 }
454 EXPORT_SYMBOL_GPL(drm_gem_shmem_vunmap_locked);
455 
456 static int
457 drm_gem_shmem_create_with_handle(struct drm_file *file_priv,
458 				 struct drm_device *dev, size_t size,
459 				 uint32_t *handle)
460 {
461 	struct drm_gem_shmem_object *shmem;
462 	int ret;
463 
464 	shmem = drm_gem_shmem_create(dev, size);
465 	if (IS_ERR(shmem))
466 		return PTR_ERR(shmem);
467 
468 	/*
469 	 * Allocate an id of idr table where the obj is registered
470 	 * and handle has the id what user can see.
471 	 */
472 	ret = drm_gem_handle_create(file_priv, &shmem->base, handle);
473 	/* drop reference from allocate - handle holds it now. */
474 	drm_gem_object_put(&shmem->base);
475 
476 	return ret;
477 }
478 
479 /* Update madvise status, returns true if not purged, else
480  * false or -errno.
481  */
482 int drm_gem_shmem_madvise_locked(struct drm_gem_shmem_object *shmem, int madv)
483 {
484 	dma_resv_assert_held(shmem->base.resv);
485 
486 	if (shmem->madv >= 0)
487 		shmem->madv = madv;
488 
489 	madv = shmem->madv;
490 
491 	return (madv >= 0);
492 }
493 EXPORT_SYMBOL_GPL(drm_gem_shmem_madvise_locked);
494 
495 void drm_gem_shmem_purge_locked(struct drm_gem_shmem_object *shmem)
496 {
497 	struct drm_gem_object *obj = &shmem->base;
498 	struct drm_device *dev = obj->dev;
499 
500 	dma_resv_assert_held(shmem->base.resv);
501 
502 	drm_WARN_ON(obj->dev, !drm_gem_shmem_is_purgeable(shmem));
503 
504 	dma_unmap_sgtable(dev->dev, shmem->sgt, DMA_BIDIRECTIONAL, 0);
505 	sg_free_table(shmem->sgt);
506 	kfree(shmem->sgt);
507 	shmem->sgt = NULL;
508 
509 	drm_gem_shmem_put_pages_locked(shmem);
510 
511 	shmem->madv = -1;
512 
513 	drm_vma_node_unmap(&obj->vma_node, dev->anon_inode->i_mapping);
514 	drm_gem_free_mmap_offset(obj);
515 
516 	/* Our goal here is to return as much of the memory as
517 	 * is possible back to the system as we are called from OOM.
518 	 * To do this we must instruct the shmfs to drop all of its
519 	 * backing pages, *now*.
520 	 */
521 	shmem_truncate_range(file_inode(obj->filp), 0, (loff_t)-1);
522 
523 	invalidate_mapping_pages(file_inode(obj->filp)->i_mapping, 0, (loff_t)-1);
524 }
525 EXPORT_SYMBOL_GPL(drm_gem_shmem_purge_locked);
526 
527 /**
528  * drm_gem_shmem_dumb_create - Create a dumb shmem buffer object
529  * @file: DRM file structure to create the dumb buffer for
530  * @dev: DRM device
531  * @args: IOCTL data
532  *
533  * This function computes the pitch of the dumb buffer and rounds it up to an
534  * integer number of bytes per pixel. Drivers for hardware that doesn't have
535  * any additional restrictions on the pitch can directly use this function as
536  * their &drm_driver.dumb_create callback.
537  *
538  * For hardware with additional restrictions, drivers can adjust the fields
539  * set up by userspace before calling into this function.
540  *
541  * Returns:
542  * 0 on success or a negative error code on failure.
543  */
544 int drm_gem_shmem_dumb_create(struct drm_file *file, struct drm_device *dev,
545 			      struct drm_mode_create_dumb *args)
546 {
547 	int ret;
548 
549 	ret = drm_mode_size_dumb(dev, args, 0, 0);
550 	if (ret)
551 		return ret;
552 
553 	return drm_gem_shmem_create_with_handle(file, dev, args->size, &args->handle);
554 }
555 EXPORT_SYMBOL_GPL(drm_gem_shmem_dumb_create);
556 
557 static vm_fault_t drm_gem_shmem_try_insert_pfn_pmd(struct vm_fault *vmf, unsigned long pfn)
558 {
559 #ifdef CONFIG_ARCH_SUPPORTS_PMD_PFNMAP
560 	unsigned long paddr = pfn << PAGE_SHIFT;
561 	bool aligned = (vmf->address & ~PMD_MASK) == (paddr & ~PMD_MASK);
562 
563 	if (aligned && pmd_none(*vmf->pmd)) {
564 		/* Read-only mapping; split upon write fault */
565 		pfn &= PMD_MASK >> PAGE_SHIFT;
566 		return vmf_insert_pfn_pmd(vmf, pfn, false);
567 	}
568 #endif
569 
570 	return 0;
571 }
572 
573 static vm_fault_t drm_gem_shmem_fault(struct vm_fault *vmf)
574 {
575 	struct vm_area_struct *vma = vmf->vma;
576 	struct drm_gem_object *obj = vma->vm_private_data;
577 	struct drm_device *dev = obj->dev;
578 	struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
579 	loff_t num_pages = obj->size >> PAGE_SHIFT;
580 	vm_fault_t ret = VM_FAULT_SIGBUS;
581 	struct page **pages = shmem->pages;
582 	pgoff_t page_offset = vmf->pgoff - vma->vm_pgoff; /* page offset within VMA */
583 	struct page *page;
584 	struct folio *folio;
585 	unsigned long pfn;
586 
587 	dma_resv_lock(obj->resv, NULL);
588 
589 	if (page_offset >= num_pages || drm_WARN_ON_ONCE(dev, !shmem->pages) ||
590 	    shmem->madv < 0)
591 		goto out;
592 
593 	page = pages[page_offset];
594 	if (drm_WARN_ON_ONCE(dev, !page))
595 		goto out;
596 	folio = page_folio(page);
597 
598 	pfn = page_to_pfn(page);
599 
600 	if (folio_test_pmd_mappable(folio))
601 		ret = drm_gem_shmem_try_insert_pfn_pmd(vmf, pfn);
602 	if (ret != VM_FAULT_NOPAGE)
603 		ret = vmf_insert_pfn(vma, vmf->address, pfn);
604 
605 	if (ret == VM_FAULT_NOPAGE)
606 		folio_mark_accessed(folio);
607 
608 out:
609 	dma_resv_unlock(obj->resv);
610 
611 	return ret;
612 }
613 
614 static void drm_gem_shmem_vm_open(struct vm_area_struct *vma)
615 {
616 	struct drm_gem_object *obj = vma->vm_private_data;
617 	struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
618 
619 	drm_WARN_ON(obj->dev, drm_gem_is_imported(obj));
620 
621 	dma_resv_lock(shmem->base.resv, NULL);
622 
623 	/*
624 	 * We should have already pinned the pages when the buffer was first
625 	 * mmap'd, vm_open() just grabs an additional reference for the new
626 	 * mm the vma is getting copied into (ie. on fork()).
627 	 */
628 	drm_WARN_ON_ONCE(obj->dev,
629 			 !refcount_inc_not_zero(&shmem->pages_use_count));
630 
631 	dma_resv_unlock(shmem->base.resv);
632 
633 	drm_gem_vm_open(vma);
634 }
635 
636 static void drm_gem_shmem_vm_close(struct vm_area_struct *vma)
637 {
638 	struct drm_gem_object *obj = vma->vm_private_data;
639 	struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
640 
641 	dma_resv_lock(shmem->base.resv, NULL);
642 	drm_gem_shmem_put_pages_locked(shmem);
643 	dma_resv_unlock(shmem->base.resv);
644 
645 	drm_gem_vm_close(vma);
646 }
647 
648 static vm_fault_t drm_gem_shmem_pfn_mkwrite(struct vm_fault *vmf)
649 {
650 	struct vm_area_struct *vma = vmf->vma;
651 	struct drm_gem_object *obj = vma->vm_private_data;
652 	struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
653 	loff_t num_pages = obj->size >> PAGE_SHIFT;
654 	pgoff_t page_offset = vmf->pgoff - vma->vm_pgoff; /* page offset within VMA */
655 
656 	if (drm_WARN_ON(obj->dev, !shmem->pages || page_offset >= num_pages))
657 		return VM_FAULT_SIGBUS;
658 
659 	file_update_time(vma->vm_file);
660 
661 	folio_mark_dirty(page_folio(shmem->pages[page_offset]));
662 
663 	return 0;
664 }
665 
666 const struct vm_operations_struct drm_gem_shmem_vm_ops = {
667 	.fault = drm_gem_shmem_fault,
668 	.open = drm_gem_shmem_vm_open,
669 	.close = drm_gem_shmem_vm_close,
670 	.pfn_mkwrite = drm_gem_shmem_pfn_mkwrite,
671 };
672 EXPORT_SYMBOL_GPL(drm_gem_shmem_vm_ops);
673 
674 /**
675  * drm_gem_shmem_mmap - Memory-map a shmem GEM object
676  * @shmem: shmem GEM object
677  * @vma: VMA for the area to be mapped
678  *
679  * This function implements an augmented version of the GEM DRM file mmap
680  * operation for shmem objects.
681  *
682  * Returns:
683  * 0 on success or a negative error code on failure.
684  */
685 int drm_gem_shmem_mmap(struct drm_gem_shmem_object *shmem, struct vm_area_struct *vma)
686 {
687 	struct drm_gem_object *obj = &shmem->base;
688 	int ret;
689 
690 	if (drm_gem_is_imported(obj)) {
691 		/* Reset both vm_ops and vm_private_data, so we don't end up with
692 		 * vm_ops pointing to our implementation if the dma-buf backend
693 		 * doesn't set those fields.
694 		 */
695 		vma->vm_private_data = NULL;
696 		vma->vm_ops = NULL;
697 
698 		ret = dma_buf_mmap(obj->dma_buf, vma, 0);
699 
700 		/* Drop the reference drm_gem_mmap_obj() acquired.*/
701 		if (!ret)
702 			drm_gem_object_put(obj);
703 
704 		return ret;
705 	}
706 
707 	if (is_cow_mapping(vma->vm_flags))
708 		return -EINVAL;
709 
710 	dma_resv_lock(shmem->base.resv, NULL);
711 	ret = drm_gem_shmem_get_pages_locked(shmem);
712 	dma_resv_unlock(shmem->base.resv);
713 
714 	if (ret)
715 		return ret;
716 
717 	vm_flags_set(vma, VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP);
718 	vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
719 	if (shmem->map_wc)
720 		vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
721 
722 	return 0;
723 }
724 EXPORT_SYMBOL_GPL(drm_gem_shmem_mmap);
725 
726 /**
727  * drm_gem_shmem_print_info() - Print &drm_gem_shmem_object info for debugfs
728  * @shmem: shmem GEM object
729  * @p: DRM printer
730  * @indent: Tab indentation level
731  */
732 void drm_gem_shmem_print_info(const struct drm_gem_shmem_object *shmem,
733 			      struct drm_printer *p, unsigned int indent)
734 {
735 	if (drm_gem_is_imported(&shmem->base))
736 		return;
737 
738 	drm_printf_indent(p, indent, "pages_pin_count=%u\n", refcount_read(&shmem->pages_pin_count));
739 	drm_printf_indent(p, indent, "pages_use_count=%u\n", refcount_read(&shmem->pages_use_count));
740 	drm_printf_indent(p, indent, "vmap_use_count=%u\n", refcount_read(&shmem->vmap_use_count));
741 	drm_printf_indent(p, indent, "vaddr=%p\n", shmem->vaddr);
742 }
743 EXPORT_SYMBOL_GPL(drm_gem_shmem_print_info);
744 
745 /**
746  * drm_gem_shmem_get_sg_table - Provide a scatter/gather table of pinned
747  *                              pages for a shmem GEM object
748  * @shmem: shmem GEM object
749  *
750  * This function exports a scatter/gather table suitable for PRIME usage by
751  * calling the standard DMA mapping API.
752  *
753  * Drivers who need to acquire an scatter/gather table for objects need to call
754  * drm_gem_shmem_get_pages_sgt() instead.
755  *
756  * Returns:
757  * A pointer to the scatter/gather table of pinned pages or error pointer on failure.
758  */
759 struct sg_table *drm_gem_shmem_get_sg_table(struct drm_gem_shmem_object *shmem)
760 {
761 	struct drm_gem_object *obj = &shmem->base;
762 
763 	drm_WARN_ON(obj->dev, drm_gem_is_imported(obj));
764 
765 	return drm_prime_pages_to_sg(obj->dev, shmem->pages, obj->size >> PAGE_SHIFT);
766 }
767 EXPORT_SYMBOL_GPL(drm_gem_shmem_get_sg_table);
768 
769 static struct sg_table *drm_gem_shmem_get_pages_sgt_locked(struct drm_gem_shmem_object *shmem)
770 {
771 	struct drm_gem_object *obj = &shmem->base;
772 	int ret;
773 	struct sg_table *sgt;
774 
775 	if (shmem->sgt)
776 		return shmem->sgt;
777 
778 	drm_WARN_ON(obj->dev, drm_gem_is_imported(obj));
779 
780 	ret = drm_gem_shmem_get_pages_locked(shmem);
781 	if (ret)
782 		return ERR_PTR(ret);
783 
784 	sgt = drm_gem_shmem_get_sg_table(shmem);
785 	if (IS_ERR(sgt)) {
786 		ret = PTR_ERR(sgt);
787 		goto err_put_pages;
788 	}
789 	/* Map the pages for use by the h/w. */
790 	ret = dma_map_sgtable(obj->dev->dev, sgt, DMA_BIDIRECTIONAL, 0);
791 	if (ret)
792 		goto err_free_sgt;
793 
794 	shmem->sgt = sgt;
795 
796 	return sgt;
797 
798 err_free_sgt:
799 	sg_free_table(sgt);
800 	kfree(sgt);
801 err_put_pages:
802 	drm_gem_shmem_put_pages_locked(shmem);
803 	return ERR_PTR(ret);
804 }
805 
806 /**
807  * drm_gem_shmem_get_pages_sgt - Pin pages, dma map them, and return a
808  *				 scatter/gather table for a shmem GEM object.
809  * @shmem: shmem GEM object
810  *
811  * This function returns a scatter/gather table suitable for driver usage. If
812  * the sg table doesn't exist, the pages are pinned, dma-mapped, and a sg
813  * table created.
814  *
815  * This is the main function for drivers to get at backing storage, and it hides
816  * and difference between dma-buf imported and natively allocated objects.
817  * drm_gem_shmem_get_sg_table() should not be directly called by drivers.
818  *
819  * Returns:
820  * A pointer to the scatter/gather table of pinned pages or errno on failure.
821  */
822 struct sg_table *drm_gem_shmem_get_pages_sgt(struct drm_gem_shmem_object *shmem)
823 {
824 	int ret;
825 	struct sg_table *sgt;
826 
827 	ret = dma_resv_lock_interruptible(shmem->base.resv, NULL);
828 	if (ret)
829 		return ERR_PTR(ret);
830 	sgt = drm_gem_shmem_get_pages_sgt_locked(shmem);
831 	dma_resv_unlock(shmem->base.resv);
832 
833 	return sgt;
834 }
835 EXPORT_SYMBOL_GPL(drm_gem_shmem_get_pages_sgt);
836 
837 /**
838  * drm_gem_shmem_prime_import_sg_table - Produce a shmem GEM object from
839  *                 another driver's scatter/gather table of pinned pages
840  * @dev: Device to import into
841  * @attach: DMA-BUF attachment
842  * @sgt: Scatter/gather table of pinned pages
843  *
844  * This function imports a scatter/gather table exported via DMA-BUF by
845  * another driver. Drivers that use the shmem helpers should set this as their
846  * &drm_driver.gem_prime_import_sg_table callback.
847  *
848  * Returns:
849  * A pointer to a newly created GEM object or an ERR_PTR-encoded negative
850  * error code on failure.
851  */
852 struct drm_gem_object *
853 drm_gem_shmem_prime_import_sg_table(struct drm_device *dev,
854 				    struct dma_buf_attachment *attach,
855 				    struct sg_table *sgt)
856 {
857 	size_t size = PAGE_ALIGN(attach->dmabuf->size);
858 	struct drm_gem_shmem_object *shmem;
859 
860 	shmem = __drm_gem_shmem_create(dev, size, true);
861 	if (IS_ERR(shmem))
862 		return ERR_CAST(shmem);
863 
864 	shmem->sgt = sgt;
865 
866 	drm_dbg_prime(dev, "size = %zu\n", size);
867 
868 	return &shmem->base;
869 }
870 EXPORT_SYMBOL_GPL(drm_gem_shmem_prime_import_sg_table);
871 
872 /**
873  * drm_gem_shmem_prime_import_no_map - Import dmabuf without mapping its sg_table
874  * @dev: Device to import into
875  * @dma_buf: dma-buf object to import
876  *
877  * Drivers that use the shmem helpers but also wants to import dmabuf without
878  * mapping its sg_table can use this as their &drm_driver.gem_prime_import
879  * implementation.
880  */
881 struct drm_gem_object *drm_gem_shmem_prime_import_no_map(struct drm_device *dev,
882 							 struct dma_buf *dma_buf)
883 {
884 	struct dma_buf_attachment *attach;
885 	struct drm_gem_shmem_object *shmem;
886 	struct drm_gem_object *obj;
887 	size_t size;
888 	int ret;
889 
890 	if (drm_gem_is_prime_exported_dma_buf(dev, dma_buf)) {
891 		/*
892 		 * Importing dmabuf exported from our own gem increases
893 		 * refcount on gem itself instead of f_count of dmabuf.
894 		 */
895 		obj = dma_buf->priv;
896 		drm_gem_object_get(obj);
897 		return obj;
898 	}
899 
900 	attach = dma_buf_attach(dma_buf, dev->dev);
901 	if (IS_ERR(attach))
902 		return ERR_CAST(attach);
903 
904 	get_dma_buf(dma_buf);
905 
906 	size = PAGE_ALIGN(attach->dmabuf->size);
907 
908 	shmem = __drm_gem_shmem_create(dev, size, true);
909 	if (IS_ERR(shmem)) {
910 		ret = PTR_ERR(shmem);
911 		goto fail_detach;
912 	}
913 
914 	drm_dbg_prime(dev, "size = %zu\n", size);
915 
916 	shmem->base.import_attach = attach;
917 	shmem->base.resv = dma_buf->resv;
918 
919 	return &shmem->base;
920 
921 fail_detach:
922 	dma_buf_detach(dma_buf, attach);
923 	dma_buf_put(dma_buf);
924 
925 	return ERR_PTR(ret);
926 }
927 EXPORT_SYMBOL_GPL(drm_gem_shmem_prime_import_no_map);
928 
929 /*
930  * Kunit helpers
931  */
932 
933 #if IS_ENABLED(CONFIG_KUNIT)
934 int drm_gem_shmem_vmap(struct drm_gem_shmem_object *shmem, struct iosys_map *map)
935 {
936 	struct drm_gem_object *obj = &shmem->base;
937 	int ret;
938 
939 	ret = dma_resv_lock_interruptible(obj->resv, NULL);
940 	if (ret)
941 		return ret;
942 	ret = drm_gem_shmem_vmap_locked(shmem, map);
943 	dma_resv_unlock(obj->resv);
944 
945 	return ret;
946 }
947 EXPORT_SYMBOL_IF_KUNIT(drm_gem_shmem_vmap);
948 
949 void drm_gem_shmem_vunmap(struct drm_gem_shmem_object *shmem, struct iosys_map *map)
950 {
951 	struct drm_gem_object *obj = &shmem->base;
952 
953 	dma_resv_lock_interruptible(obj->resv, NULL);
954 	drm_gem_shmem_vunmap_locked(shmem, map);
955 	dma_resv_unlock(obj->resv);
956 }
957 EXPORT_SYMBOL_IF_KUNIT(drm_gem_shmem_vunmap);
958 
959 int drm_gem_shmem_madvise(struct drm_gem_shmem_object *shmem, int madv)
960 {
961 	struct drm_gem_object *obj = &shmem->base;
962 	int ret;
963 
964 	ret = dma_resv_lock_interruptible(obj->resv, NULL);
965 	if (ret)
966 		return ret;
967 	ret = drm_gem_shmem_madvise_locked(shmem, madv);
968 	dma_resv_unlock(obj->resv);
969 
970 	return ret;
971 }
972 EXPORT_SYMBOL_IF_KUNIT(drm_gem_shmem_madvise);
973 
974 int drm_gem_shmem_purge(struct drm_gem_shmem_object *shmem)
975 {
976 	struct drm_gem_object *obj = &shmem->base;
977 	int ret;
978 
979 	ret = dma_resv_lock_interruptible(obj->resv, NULL);
980 	if (ret)
981 		return ret;
982 	drm_gem_shmem_purge_locked(shmem);
983 	dma_resv_unlock(obj->resv);
984 
985 	return 0;
986 }
987 EXPORT_SYMBOL_IF_KUNIT(drm_gem_shmem_purge);
988 #endif
989 
990 MODULE_DESCRIPTION("DRM SHMEM memory-management helpers");
991 MODULE_IMPORT_NS("DMA_BUF");
992 MODULE_LICENSE("GPL");
993