xref: /linux/drivers/gpu/drm/drm_gem_shmem_helper.c (revision 2d2f1dc74cfbbb4891a8fd666029c6fce926fcfd)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright 2018 Noralf Trønnes
4  */
5 
6 #include <linux/dma-buf.h>
7 #include <linux/export.h>
8 #include <linux/module.h>
9 #include <linux/mutex.h>
10 #include <linux/shmem_fs.h>
11 #include <linux/slab.h>
12 #include <linux/vmalloc.h>
13 
14 #ifdef CONFIG_X86
15 #include <asm/set_memory.h>
16 #endif
17 
18 #include <drm/drm.h>
19 #include <drm/drm_device.h>
20 #include <drm/drm_drv.h>
21 #include <drm/drm_gem_shmem_helper.h>
22 #include <drm/drm_prime.h>
23 #include <drm/drm_print.h>
24 
25 MODULE_IMPORT_NS("DMA_BUF");
26 
27 /**
28  * DOC: overview
29  *
30  * This library provides helpers for GEM objects backed by shmem buffers
31  * allocated using anonymous pageable memory.
32  *
33  * Functions that operate on the GEM object receive struct &drm_gem_shmem_object.
34  * For GEM callback helpers in struct &drm_gem_object functions, see likewise
35  * named functions with an _object_ infix (e.g., drm_gem_shmem_object_vmap() wraps
36  * drm_gem_shmem_vmap()). These helpers perform the necessary type conversion.
37  */
38 
39 static const struct drm_gem_object_funcs drm_gem_shmem_funcs = {
40 	.free = drm_gem_shmem_object_free,
41 	.print_info = drm_gem_shmem_object_print_info,
42 	.pin = drm_gem_shmem_object_pin,
43 	.unpin = drm_gem_shmem_object_unpin,
44 	.get_sg_table = drm_gem_shmem_object_get_sg_table,
45 	.vmap = drm_gem_shmem_object_vmap,
46 	.vunmap = drm_gem_shmem_object_vunmap,
47 	.mmap = drm_gem_shmem_object_mmap,
48 	.vm_ops = &drm_gem_shmem_vm_ops,
49 };
50 
51 static int __drm_gem_shmem_init(struct drm_device *dev, struct drm_gem_shmem_object *shmem,
52 				size_t size, bool private, struct vfsmount *gemfs)
53 {
54 	struct drm_gem_object *obj = &shmem->base;
55 	int ret = 0;
56 
57 	if (!obj->funcs)
58 		obj->funcs = &drm_gem_shmem_funcs;
59 
60 	if (private) {
61 		drm_gem_private_object_init(dev, obj, size);
62 		shmem->map_wc = false; /* dma-buf mappings use always writecombine */
63 	} else {
64 		ret = drm_gem_object_init_with_mnt(dev, obj, size, gemfs);
65 	}
66 	if (ret) {
67 		drm_gem_private_object_fini(obj);
68 		return ret;
69 	}
70 
71 	ret = drm_gem_create_mmap_offset(obj);
72 	if (ret)
73 		goto err_release;
74 
75 	INIT_LIST_HEAD(&shmem->madv_list);
76 
77 	if (!private) {
78 		/*
79 		 * Our buffers are kept pinned, so allocating them
80 		 * from the MOVABLE zone is a really bad idea, and
81 		 * conflicts with CMA. See comments above new_inode()
82 		 * why this is required _and_ expected if you're
83 		 * going to pin these pages.
84 		 */
85 		mapping_set_gfp_mask(obj->filp->f_mapping, GFP_HIGHUSER |
86 				     __GFP_RETRY_MAYFAIL | __GFP_NOWARN);
87 	}
88 
89 	return 0;
90 err_release:
91 	drm_gem_object_release(obj);
92 	return ret;
93 }
94 
95 /**
96  * drm_gem_shmem_init - Initialize an allocated object.
97  * @dev: DRM device
98  * @obj: The allocated shmem GEM object.
99  *
100  * Returns:
101  * 0 on success, or a negative error code on failure.
102  */
103 int drm_gem_shmem_init(struct drm_device *dev, struct drm_gem_shmem_object *shmem, size_t size)
104 {
105 	return __drm_gem_shmem_init(dev, shmem, size, false, NULL);
106 }
107 EXPORT_SYMBOL_GPL(drm_gem_shmem_init);
108 
109 static struct drm_gem_shmem_object *
110 __drm_gem_shmem_create(struct drm_device *dev, size_t size, bool private,
111 		       struct vfsmount *gemfs)
112 {
113 	struct drm_gem_shmem_object *shmem;
114 	struct drm_gem_object *obj;
115 	int ret = 0;
116 
117 	size = PAGE_ALIGN(size);
118 
119 	if (dev->driver->gem_create_object) {
120 		obj = dev->driver->gem_create_object(dev, size);
121 		if (IS_ERR(obj))
122 			return ERR_CAST(obj);
123 		shmem = to_drm_gem_shmem_obj(obj);
124 	} else {
125 		shmem = kzalloc(sizeof(*shmem), GFP_KERNEL);
126 		if (!shmem)
127 			return ERR_PTR(-ENOMEM);
128 		obj = &shmem->base;
129 	}
130 
131 	ret = __drm_gem_shmem_init(dev, shmem, size, private, gemfs);
132 	if (ret) {
133 		kfree(obj);
134 		return ERR_PTR(ret);
135 	}
136 
137 	return shmem;
138 }
139 /**
140  * drm_gem_shmem_create - Allocate an object with the given size
141  * @dev: DRM device
142  * @size: Size of the object to allocate
143  *
144  * This function creates a shmem GEM object.
145  *
146  * Returns:
147  * A struct drm_gem_shmem_object * on success or an ERR_PTR()-encoded negative
148  * error code on failure.
149  */
150 struct drm_gem_shmem_object *drm_gem_shmem_create(struct drm_device *dev, size_t size)
151 {
152 	return __drm_gem_shmem_create(dev, size, false, NULL);
153 }
154 EXPORT_SYMBOL_GPL(drm_gem_shmem_create);
155 
156 /**
157  * drm_gem_shmem_create_with_mnt - Allocate an object with the given size in a
158  * given mountpoint
159  * @dev: DRM device
160  * @size: Size of the object to allocate
161  * @gemfs: tmpfs mount where the GEM object will be created
162  *
163  * This function creates a shmem GEM object in a given tmpfs mountpoint.
164  *
165  * Returns:
166  * A struct drm_gem_shmem_object * on success or an ERR_PTR()-encoded negative
167  * error code on failure.
168  */
169 struct drm_gem_shmem_object *drm_gem_shmem_create_with_mnt(struct drm_device *dev,
170 							   size_t size,
171 							   struct vfsmount *gemfs)
172 {
173 	return __drm_gem_shmem_create(dev, size, false, gemfs);
174 }
175 EXPORT_SYMBOL_GPL(drm_gem_shmem_create_with_mnt);
176 
177 /**
178  * drm_gem_shmem_release - Release resources associated with a shmem GEM object.
179  * @shmem: shmem GEM object
180  *
181  * This function cleans up the GEM object state, but does not free the memory used to store the
182  * object itself. This function is meant to be a dedicated helper for the Rust GEM bindings.
183  */
184 void drm_gem_shmem_release(struct drm_gem_shmem_object *shmem)
185 {
186 	struct drm_gem_object *obj = &shmem->base;
187 
188 	if (drm_gem_is_imported(obj)) {
189 		drm_prime_gem_destroy(obj, shmem->sgt);
190 	} else {
191 		dma_resv_lock(shmem->base.resv, NULL);
192 
193 		drm_WARN_ON(obj->dev, refcount_read(&shmem->vmap_use_count));
194 
195 		if (shmem->sgt) {
196 			dma_unmap_sgtable(obj->dev->dev, shmem->sgt,
197 					  DMA_BIDIRECTIONAL, 0);
198 			sg_free_table(shmem->sgt);
199 			kfree(shmem->sgt);
200 		}
201 		if (shmem->pages)
202 			drm_gem_shmem_put_pages_locked(shmem);
203 
204 		drm_WARN_ON(obj->dev, refcount_read(&shmem->pages_use_count));
205 		drm_WARN_ON(obj->dev, refcount_read(&shmem->pages_pin_count));
206 
207 		dma_resv_unlock(shmem->base.resv);
208 	}
209 
210 	drm_gem_object_release(obj);
211 }
212 EXPORT_SYMBOL_GPL(drm_gem_shmem_release);
213 
214 /**
215  * drm_gem_shmem_free - Free resources associated with a shmem GEM object
216  * @shmem: shmem GEM object to free
217  *
218  * This function cleans up the GEM object state and frees the memory used to
219  * store the object itself.
220  */
221 void drm_gem_shmem_free(struct drm_gem_shmem_object *shmem)
222 {
223 	drm_gem_shmem_release(shmem);
224 	kfree(shmem);
225 }
226 EXPORT_SYMBOL_GPL(drm_gem_shmem_free);
227 
228 static int drm_gem_shmem_get_pages_locked(struct drm_gem_shmem_object *shmem)
229 {
230 	struct drm_gem_object *obj = &shmem->base;
231 	struct page **pages;
232 
233 	dma_resv_assert_held(shmem->base.resv);
234 
235 	if (refcount_inc_not_zero(&shmem->pages_use_count))
236 		return 0;
237 
238 	pages = drm_gem_get_pages(obj);
239 	if (IS_ERR(pages)) {
240 		drm_dbg_kms(obj->dev, "Failed to get pages (%ld)\n",
241 			    PTR_ERR(pages));
242 		return PTR_ERR(pages);
243 	}
244 
245 	/*
246 	 * TODO: Allocating WC pages which are correctly flushed is only
247 	 * supported on x86. Ideal solution would be a GFP_WC flag, which also
248 	 * ttm_pool.c could use.
249 	 */
250 #ifdef CONFIG_X86
251 	if (shmem->map_wc)
252 		set_pages_array_wc(pages, obj->size >> PAGE_SHIFT);
253 #endif
254 
255 	shmem->pages = pages;
256 
257 	refcount_set(&shmem->pages_use_count, 1);
258 
259 	return 0;
260 }
261 
262 /*
263  * drm_gem_shmem_put_pages_locked - Decrease use count on the backing pages for a shmem GEM object
264  * @shmem: shmem GEM object
265  *
266  * This function decreases the use count and puts the backing pages when use drops to zero.
267  */
268 void drm_gem_shmem_put_pages_locked(struct drm_gem_shmem_object *shmem)
269 {
270 	struct drm_gem_object *obj = &shmem->base;
271 
272 	dma_resv_assert_held(shmem->base.resv);
273 
274 	if (refcount_dec_and_test(&shmem->pages_use_count)) {
275 #ifdef CONFIG_X86
276 		if (shmem->map_wc)
277 			set_pages_array_wb(shmem->pages, obj->size >> PAGE_SHIFT);
278 #endif
279 
280 		drm_gem_put_pages(obj, shmem->pages,
281 				  shmem->pages_mark_dirty_on_put,
282 				  shmem->pages_mark_accessed_on_put);
283 		shmem->pages = NULL;
284 	}
285 }
286 EXPORT_SYMBOL_GPL(drm_gem_shmem_put_pages_locked);
287 
288 int drm_gem_shmem_pin_locked(struct drm_gem_shmem_object *shmem)
289 {
290 	int ret;
291 
292 	dma_resv_assert_held(shmem->base.resv);
293 
294 	drm_WARN_ON(shmem->base.dev, drm_gem_is_imported(&shmem->base));
295 
296 	if (refcount_inc_not_zero(&shmem->pages_pin_count))
297 		return 0;
298 
299 	ret = drm_gem_shmem_get_pages_locked(shmem);
300 	if (!ret)
301 		refcount_set(&shmem->pages_pin_count, 1);
302 
303 	return ret;
304 }
305 EXPORT_SYMBOL(drm_gem_shmem_pin_locked);
306 
307 void drm_gem_shmem_unpin_locked(struct drm_gem_shmem_object *shmem)
308 {
309 	dma_resv_assert_held(shmem->base.resv);
310 
311 	if (refcount_dec_and_test(&shmem->pages_pin_count))
312 		drm_gem_shmem_put_pages_locked(shmem);
313 }
314 EXPORT_SYMBOL(drm_gem_shmem_unpin_locked);
315 
316 /**
317  * drm_gem_shmem_pin - Pin backing pages for a shmem GEM object
318  * @shmem: shmem GEM object
319  *
320  * This function makes sure the backing pages are pinned in memory while the
321  * buffer is exported.
322  *
323  * Returns:
324  * 0 on success or a negative error code on failure.
325  */
326 int drm_gem_shmem_pin(struct drm_gem_shmem_object *shmem)
327 {
328 	struct drm_gem_object *obj = &shmem->base;
329 	int ret;
330 
331 	drm_WARN_ON(obj->dev, drm_gem_is_imported(obj));
332 
333 	if (refcount_inc_not_zero(&shmem->pages_pin_count))
334 		return 0;
335 
336 	ret = dma_resv_lock_interruptible(shmem->base.resv, NULL);
337 	if (ret)
338 		return ret;
339 	ret = drm_gem_shmem_pin_locked(shmem);
340 	dma_resv_unlock(shmem->base.resv);
341 
342 	return ret;
343 }
344 EXPORT_SYMBOL_GPL(drm_gem_shmem_pin);
345 
346 /**
347  * drm_gem_shmem_unpin - Unpin backing pages for a shmem GEM object
348  * @shmem: shmem GEM object
349  *
350  * This function removes the requirement that the backing pages are pinned in
351  * memory.
352  */
353 void drm_gem_shmem_unpin(struct drm_gem_shmem_object *shmem)
354 {
355 	struct drm_gem_object *obj = &shmem->base;
356 
357 	drm_WARN_ON(obj->dev, drm_gem_is_imported(obj));
358 
359 	if (refcount_dec_not_one(&shmem->pages_pin_count))
360 		return;
361 
362 	dma_resv_lock(shmem->base.resv, NULL);
363 	drm_gem_shmem_unpin_locked(shmem);
364 	dma_resv_unlock(shmem->base.resv);
365 }
366 EXPORT_SYMBOL_GPL(drm_gem_shmem_unpin);
367 
368 /*
369  * drm_gem_shmem_vmap_locked - Create a virtual mapping for a shmem GEM object
370  * @shmem: shmem GEM object
371  * @map: Returns the kernel virtual address of the SHMEM GEM object's backing
372  *       store.
373  *
374  * This function makes sure that a contiguous kernel virtual address mapping
375  * exists for the buffer backing the shmem GEM object. It hides the differences
376  * between dma-buf imported and natively allocated objects.
377  *
378  * Acquired mappings should be cleaned up by calling drm_gem_shmem_vunmap_locked().
379  *
380  * Returns:
381  * 0 on success or a negative error code on failure.
382  */
383 int drm_gem_shmem_vmap_locked(struct drm_gem_shmem_object *shmem,
384 			      struct iosys_map *map)
385 {
386 	struct drm_gem_object *obj = &shmem->base;
387 	int ret = 0;
388 
389 	dma_resv_assert_held(obj->resv);
390 
391 	if (drm_gem_is_imported(obj)) {
392 		ret = dma_buf_vmap(obj->import_attach->dmabuf, map);
393 	} else {
394 		pgprot_t prot = PAGE_KERNEL;
395 
396 		dma_resv_assert_held(shmem->base.resv);
397 
398 		if (refcount_inc_not_zero(&shmem->vmap_use_count)) {
399 			iosys_map_set_vaddr(map, shmem->vaddr);
400 			return 0;
401 		}
402 
403 		ret = drm_gem_shmem_pin_locked(shmem);
404 		if (ret)
405 			return ret;
406 
407 		if (shmem->map_wc)
408 			prot = pgprot_writecombine(prot);
409 		shmem->vaddr = vmap(shmem->pages, obj->size >> PAGE_SHIFT,
410 				    VM_MAP, prot);
411 		if (!shmem->vaddr) {
412 			ret = -ENOMEM;
413 		} else {
414 			iosys_map_set_vaddr(map, shmem->vaddr);
415 			refcount_set(&shmem->vmap_use_count, 1);
416 		}
417 	}
418 
419 	if (ret) {
420 		drm_dbg_kms(obj->dev, "Failed to vmap pages, error %d\n", ret);
421 		goto err_put_pages;
422 	}
423 
424 	return 0;
425 
426 err_put_pages:
427 	if (!drm_gem_is_imported(obj))
428 		drm_gem_shmem_unpin_locked(shmem);
429 
430 	return ret;
431 }
432 EXPORT_SYMBOL_GPL(drm_gem_shmem_vmap_locked);
433 
434 /*
435  * drm_gem_shmem_vunmap_locked - Unmap a virtual mapping for a shmem GEM object
436  * @shmem: shmem GEM object
437  * @map: Kernel virtual address where the SHMEM GEM object was mapped
438  *
439  * This function cleans up a kernel virtual address mapping acquired by
440  * drm_gem_shmem_vmap_locked(). The mapping is only removed when the use count
441  * drops to zero.
442  *
443  * This function hides the differences between dma-buf imported and natively
444  * allocated objects.
445  */
446 void drm_gem_shmem_vunmap_locked(struct drm_gem_shmem_object *shmem,
447 				 struct iosys_map *map)
448 {
449 	struct drm_gem_object *obj = &shmem->base;
450 
451 	dma_resv_assert_held(obj->resv);
452 
453 	if (drm_gem_is_imported(obj)) {
454 		dma_buf_vunmap(obj->import_attach->dmabuf, map);
455 	} else {
456 		dma_resv_assert_held(shmem->base.resv);
457 
458 		if (refcount_dec_and_test(&shmem->vmap_use_count)) {
459 			vunmap(shmem->vaddr);
460 			shmem->vaddr = NULL;
461 
462 			drm_gem_shmem_unpin_locked(shmem);
463 		}
464 	}
465 }
466 EXPORT_SYMBOL_GPL(drm_gem_shmem_vunmap_locked);
467 
468 static int
469 drm_gem_shmem_create_with_handle(struct drm_file *file_priv,
470 				 struct drm_device *dev, size_t size,
471 				 uint32_t *handle)
472 {
473 	struct drm_gem_shmem_object *shmem;
474 	int ret;
475 
476 	shmem = drm_gem_shmem_create(dev, size);
477 	if (IS_ERR(shmem))
478 		return PTR_ERR(shmem);
479 
480 	/*
481 	 * Allocate an id of idr table where the obj is registered
482 	 * and handle has the id what user can see.
483 	 */
484 	ret = drm_gem_handle_create(file_priv, &shmem->base, handle);
485 	/* drop reference from allocate - handle holds it now. */
486 	drm_gem_object_put(&shmem->base);
487 
488 	return ret;
489 }
490 
491 /* Update madvise status, returns true if not purged, else
492  * false or -errno.
493  */
494 int drm_gem_shmem_madvise_locked(struct drm_gem_shmem_object *shmem, int madv)
495 {
496 	dma_resv_assert_held(shmem->base.resv);
497 
498 	if (shmem->madv >= 0)
499 		shmem->madv = madv;
500 
501 	madv = shmem->madv;
502 
503 	return (madv >= 0);
504 }
505 EXPORT_SYMBOL_GPL(drm_gem_shmem_madvise_locked);
506 
507 void drm_gem_shmem_purge_locked(struct drm_gem_shmem_object *shmem)
508 {
509 	struct drm_gem_object *obj = &shmem->base;
510 	struct drm_device *dev = obj->dev;
511 
512 	dma_resv_assert_held(shmem->base.resv);
513 
514 	drm_WARN_ON(obj->dev, !drm_gem_shmem_is_purgeable(shmem));
515 
516 	dma_unmap_sgtable(dev->dev, shmem->sgt, DMA_BIDIRECTIONAL, 0);
517 	sg_free_table(shmem->sgt);
518 	kfree(shmem->sgt);
519 	shmem->sgt = NULL;
520 
521 	drm_gem_shmem_put_pages_locked(shmem);
522 
523 	shmem->madv = -1;
524 
525 	drm_vma_node_unmap(&obj->vma_node, dev->anon_inode->i_mapping);
526 	drm_gem_free_mmap_offset(obj);
527 
528 	/* Our goal here is to return as much of the memory as
529 	 * is possible back to the system as we are called from OOM.
530 	 * To do this we must instruct the shmfs to drop all of its
531 	 * backing pages, *now*.
532 	 */
533 	shmem_truncate_range(file_inode(obj->filp), 0, (loff_t)-1);
534 
535 	invalidate_mapping_pages(file_inode(obj->filp)->i_mapping, 0, (loff_t)-1);
536 }
537 EXPORT_SYMBOL_GPL(drm_gem_shmem_purge_locked);
538 
539 /**
540  * drm_gem_shmem_dumb_create - Create a dumb shmem buffer object
541  * @file: DRM file structure to create the dumb buffer for
542  * @dev: DRM device
543  * @args: IOCTL data
544  *
545  * This function computes the pitch of the dumb buffer and rounds it up to an
546  * integer number of bytes per pixel. Drivers for hardware that doesn't have
547  * any additional restrictions on the pitch can directly use this function as
548  * their &drm_driver.dumb_create callback.
549  *
550  * For hardware with additional restrictions, drivers can adjust the fields
551  * set up by userspace before calling into this function.
552  *
553  * Returns:
554  * 0 on success or a negative error code on failure.
555  */
556 int drm_gem_shmem_dumb_create(struct drm_file *file, struct drm_device *dev,
557 			      struct drm_mode_create_dumb *args)
558 {
559 	u32 min_pitch = DIV_ROUND_UP(args->width * args->bpp, 8);
560 
561 	if (!args->pitch || !args->size) {
562 		args->pitch = min_pitch;
563 		args->size = PAGE_ALIGN(args->pitch * args->height);
564 	} else {
565 		/* ensure sane minimum values */
566 		if (args->pitch < min_pitch)
567 			args->pitch = min_pitch;
568 		if (args->size < args->pitch * args->height)
569 			args->size = PAGE_ALIGN(args->pitch * args->height);
570 	}
571 
572 	return drm_gem_shmem_create_with_handle(file, dev, args->size, &args->handle);
573 }
574 EXPORT_SYMBOL_GPL(drm_gem_shmem_dumb_create);
575 
576 static vm_fault_t drm_gem_shmem_fault(struct vm_fault *vmf)
577 {
578 	struct vm_area_struct *vma = vmf->vma;
579 	struct drm_gem_object *obj = vma->vm_private_data;
580 	struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
581 	loff_t num_pages = obj->size >> PAGE_SHIFT;
582 	vm_fault_t ret;
583 	struct page *page;
584 	pgoff_t page_offset;
585 
586 	/* We don't use vmf->pgoff since that has the fake offset */
587 	page_offset = (vmf->address - vma->vm_start) >> PAGE_SHIFT;
588 
589 	dma_resv_lock(shmem->base.resv, NULL);
590 
591 	if (page_offset >= num_pages ||
592 	    drm_WARN_ON_ONCE(obj->dev, !shmem->pages) ||
593 	    shmem->madv < 0) {
594 		ret = VM_FAULT_SIGBUS;
595 	} else {
596 		page = shmem->pages[page_offset];
597 
598 		ret = vmf_insert_pfn(vma, vmf->address, page_to_pfn(page));
599 	}
600 
601 	dma_resv_unlock(shmem->base.resv);
602 
603 	return ret;
604 }
605 
606 static void drm_gem_shmem_vm_open(struct vm_area_struct *vma)
607 {
608 	struct drm_gem_object *obj = vma->vm_private_data;
609 	struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
610 
611 	drm_WARN_ON(obj->dev, drm_gem_is_imported(obj));
612 
613 	dma_resv_lock(shmem->base.resv, NULL);
614 
615 	/*
616 	 * We should have already pinned the pages when the buffer was first
617 	 * mmap'd, vm_open() just grabs an additional reference for the new
618 	 * mm the vma is getting copied into (ie. on fork()).
619 	 */
620 	drm_WARN_ON_ONCE(obj->dev,
621 			 !refcount_inc_not_zero(&shmem->pages_use_count));
622 
623 	dma_resv_unlock(shmem->base.resv);
624 
625 	drm_gem_vm_open(vma);
626 }
627 
628 static void drm_gem_shmem_vm_close(struct vm_area_struct *vma)
629 {
630 	struct drm_gem_object *obj = vma->vm_private_data;
631 	struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
632 
633 	dma_resv_lock(shmem->base.resv, NULL);
634 	drm_gem_shmem_put_pages_locked(shmem);
635 	dma_resv_unlock(shmem->base.resv);
636 
637 	drm_gem_vm_close(vma);
638 }
639 
640 const struct vm_operations_struct drm_gem_shmem_vm_ops = {
641 	.fault = drm_gem_shmem_fault,
642 	.open = drm_gem_shmem_vm_open,
643 	.close = drm_gem_shmem_vm_close,
644 };
645 EXPORT_SYMBOL_GPL(drm_gem_shmem_vm_ops);
646 
647 /**
648  * drm_gem_shmem_mmap - Memory-map a shmem GEM object
649  * @shmem: shmem GEM object
650  * @vma: VMA for the area to be mapped
651  *
652  * This function implements an augmented version of the GEM DRM file mmap
653  * operation for shmem objects.
654  *
655  * Returns:
656  * 0 on success or a negative error code on failure.
657  */
658 int drm_gem_shmem_mmap(struct drm_gem_shmem_object *shmem, struct vm_area_struct *vma)
659 {
660 	struct drm_gem_object *obj = &shmem->base;
661 	int ret;
662 
663 	if (drm_gem_is_imported(obj)) {
664 		/* Reset both vm_ops and vm_private_data, so we don't end up with
665 		 * vm_ops pointing to our implementation if the dma-buf backend
666 		 * doesn't set those fields.
667 		 */
668 		vma->vm_private_data = NULL;
669 		vma->vm_ops = NULL;
670 
671 		ret = dma_buf_mmap(obj->dma_buf, vma, 0);
672 
673 		/* Drop the reference drm_gem_mmap_obj() acquired.*/
674 		if (!ret)
675 			drm_gem_object_put(obj);
676 
677 		return ret;
678 	}
679 
680 	if (is_cow_mapping(vma->vm_flags))
681 		return -EINVAL;
682 
683 	dma_resv_lock(shmem->base.resv, NULL);
684 	ret = drm_gem_shmem_get_pages_locked(shmem);
685 	dma_resv_unlock(shmem->base.resv);
686 
687 	if (ret)
688 		return ret;
689 
690 	vm_flags_set(vma, VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP);
691 	vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
692 	if (shmem->map_wc)
693 		vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
694 
695 	return 0;
696 }
697 EXPORT_SYMBOL_GPL(drm_gem_shmem_mmap);
698 
699 /**
700  * drm_gem_shmem_print_info() - Print &drm_gem_shmem_object info for debugfs
701  * @shmem: shmem GEM object
702  * @p: DRM printer
703  * @indent: Tab indentation level
704  */
705 void drm_gem_shmem_print_info(const struct drm_gem_shmem_object *shmem,
706 			      struct drm_printer *p, unsigned int indent)
707 {
708 	if (drm_gem_is_imported(&shmem->base))
709 		return;
710 
711 	drm_printf_indent(p, indent, "pages_pin_count=%u\n", refcount_read(&shmem->pages_pin_count));
712 	drm_printf_indent(p, indent, "pages_use_count=%u\n", refcount_read(&shmem->pages_use_count));
713 	drm_printf_indent(p, indent, "vmap_use_count=%u\n", refcount_read(&shmem->vmap_use_count));
714 	drm_printf_indent(p, indent, "vaddr=%p\n", shmem->vaddr);
715 }
716 EXPORT_SYMBOL_GPL(drm_gem_shmem_print_info);
717 
718 /**
719  * drm_gem_shmem_get_sg_table - Provide a scatter/gather table of pinned
720  *                              pages for a shmem GEM object
721  * @shmem: shmem GEM object
722  *
723  * This function exports a scatter/gather table suitable for PRIME usage by
724  * calling the standard DMA mapping API.
725  *
726  * Drivers who need to acquire an scatter/gather table for objects need to call
727  * drm_gem_shmem_get_pages_sgt() instead.
728  *
729  * Returns:
730  * A pointer to the scatter/gather table of pinned pages or error pointer on failure.
731  */
732 struct sg_table *drm_gem_shmem_get_sg_table(struct drm_gem_shmem_object *shmem)
733 {
734 	struct drm_gem_object *obj = &shmem->base;
735 
736 	drm_WARN_ON(obj->dev, drm_gem_is_imported(obj));
737 
738 	return drm_prime_pages_to_sg(obj->dev, shmem->pages, obj->size >> PAGE_SHIFT);
739 }
740 EXPORT_SYMBOL_GPL(drm_gem_shmem_get_sg_table);
741 
742 static struct sg_table *drm_gem_shmem_get_pages_sgt_locked(struct drm_gem_shmem_object *shmem)
743 {
744 	struct drm_gem_object *obj = &shmem->base;
745 	int ret;
746 	struct sg_table *sgt;
747 
748 	if (shmem->sgt)
749 		return shmem->sgt;
750 
751 	drm_WARN_ON(obj->dev, drm_gem_is_imported(obj));
752 
753 	ret = drm_gem_shmem_get_pages_locked(shmem);
754 	if (ret)
755 		return ERR_PTR(ret);
756 
757 	sgt = drm_gem_shmem_get_sg_table(shmem);
758 	if (IS_ERR(sgt)) {
759 		ret = PTR_ERR(sgt);
760 		goto err_put_pages;
761 	}
762 	/* Map the pages for use by the h/w. */
763 	ret = dma_map_sgtable(obj->dev->dev, sgt, DMA_BIDIRECTIONAL, 0);
764 	if (ret)
765 		goto err_free_sgt;
766 
767 	shmem->sgt = sgt;
768 
769 	return sgt;
770 
771 err_free_sgt:
772 	sg_free_table(sgt);
773 	kfree(sgt);
774 err_put_pages:
775 	drm_gem_shmem_put_pages_locked(shmem);
776 	return ERR_PTR(ret);
777 }
778 
779 /**
780  * drm_gem_shmem_get_pages_sgt - Pin pages, dma map them, and return a
781  *				 scatter/gather table for a shmem GEM object.
782  * @shmem: shmem GEM object
783  *
784  * This function returns a scatter/gather table suitable for driver usage. If
785  * the sg table doesn't exist, the pages are pinned, dma-mapped, and a sg
786  * table created.
787  *
788  * This is the main function for drivers to get at backing storage, and it hides
789  * and difference between dma-buf imported and natively allocated objects.
790  * drm_gem_shmem_get_sg_table() should not be directly called by drivers.
791  *
792  * Returns:
793  * A pointer to the scatter/gather table of pinned pages or errno on failure.
794  */
795 struct sg_table *drm_gem_shmem_get_pages_sgt(struct drm_gem_shmem_object *shmem)
796 {
797 	int ret;
798 	struct sg_table *sgt;
799 
800 	ret = dma_resv_lock_interruptible(shmem->base.resv, NULL);
801 	if (ret)
802 		return ERR_PTR(ret);
803 	sgt = drm_gem_shmem_get_pages_sgt_locked(shmem);
804 	dma_resv_unlock(shmem->base.resv);
805 
806 	return sgt;
807 }
808 EXPORT_SYMBOL_GPL(drm_gem_shmem_get_pages_sgt);
809 
810 /**
811  * drm_gem_shmem_prime_import_sg_table - Produce a shmem GEM object from
812  *                 another driver's scatter/gather table of pinned pages
813  * @dev: Device to import into
814  * @attach: DMA-BUF attachment
815  * @sgt: Scatter/gather table of pinned pages
816  *
817  * This function imports a scatter/gather table exported via DMA-BUF by
818  * another driver. Drivers that use the shmem helpers should set this as their
819  * &drm_driver.gem_prime_import_sg_table callback.
820  *
821  * Returns:
822  * A pointer to a newly created GEM object or an ERR_PTR-encoded negative
823  * error code on failure.
824  */
825 struct drm_gem_object *
826 drm_gem_shmem_prime_import_sg_table(struct drm_device *dev,
827 				    struct dma_buf_attachment *attach,
828 				    struct sg_table *sgt)
829 {
830 	size_t size = PAGE_ALIGN(attach->dmabuf->size);
831 	struct drm_gem_shmem_object *shmem;
832 
833 	shmem = __drm_gem_shmem_create(dev, size, true, NULL);
834 	if (IS_ERR(shmem))
835 		return ERR_CAST(shmem);
836 
837 	shmem->sgt = sgt;
838 
839 	drm_dbg_prime(dev, "size = %zu\n", size);
840 
841 	return &shmem->base;
842 }
843 EXPORT_SYMBOL_GPL(drm_gem_shmem_prime_import_sg_table);
844 
845 /**
846  * drm_gem_shmem_prime_import_no_map - Import dmabuf without mapping its sg_table
847  * @dev: Device to import into
848  * @dma_buf: dma-buf object to import
849  *
850  * Drivers that use the shmem helpers but also wants to import dmabuf without
851  * mapping its sg_table can use this as their &drm_driver.gem_prime_import
852  * implementation.
853  */
854 struct drm_gem_object *drm_gem_shmem_prime_import_no_map(struct drm_device *dev,
855 							 struct dma_buf *dma_buf)
856 {
857 	struct dma_buf_attachment *attach;
858 	struct drm_gem_shmem_object *shmem;
859 	struct drm_gem_object *obj;
860 	size_t size;
861 	int ret;
862 
863 	if (drm_gem_is_prime_exported_dma_buf(dev, dma_buf)) {
864 		/*
865 		 * Importing dmabuf exported from our own gem increases
866 		 * refcount on gem itself instead of f_count of dmabuf.
867 		 */
868 		obj = dma_buf->priv;
869 		drm_gem_object_get(obj);
870 		return obj;
871 	}
872 
873 	attach = dma_buf_attach(dma_buf, dev->dev);
874 	if (IS_ERR(attach))
875 		return ERR_CAST(attach);
876 
877 	get_dma_buf(dma_buf);
878 
879 	size = PAGE_ALIGN(attach->dmabuf->size);
880 
881 	shmem = __drm_gem_shmem_create(dev, size, true, NULL);
882 	if (IS_ERR(shmem)) {
883 		ret = PTR_ERR(shmem);
884 		goto fail_detach;
885 	}
886 
887 	drm_dbg_prime(dev, "size = %zu\n", size);
888 
889 	shmem->base.import_attach = attach;
890 	shmem->base.resv = dma_buf->resv;
891 
892 	return &shmem->base;
893 
894 fail_detach:
895 	dma_buf_detach(dma_buf, attach);
896 	dma_buf_put(dma_buf);
897 
898 	return ERR_PTR(ret);
899 }
900 EXPORT_SYMBOL_GPL(drm_gem_shmem_prime_import_no_map);
901 
902 MODULE_DESCRIPTION("DRM SHMEM memory-management helpers");
903 MODULE_IMPORT_NS("DMA_BUF");
904 MODULE_LICENSE("GPL v2");
905