xref: /linux/drivers/gpu/drm/drm_gem_shmem_helper.c (revision fcff71fd888dce1533a3975e68fc80824ff69ef9)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright 2018 Noralf Trønnes
4  */
5 
6 #include <linux/dma-buf.h>
7 #include <linux/export.h>
8 #include <linux/module.h>
9 #include <linux/mutex.h>
10 #include <linux/shmem_fs.h>
11 #include <linux/slab.h>
12 #include <linux/vmalloc.h>
13 
14 #ifdef CONFIG_X86
15 #include <asm/set_memory.h>
16 #endif
17 
18 #include <drm/drm.h>
19 #include <drm/drm_device.h>
20 #include <drm/drm_drv.h>
21 #include <drm/drm_dumb_buffers.h>
22 #include <drm/drm_gem_shmem_helper.h>
23 #include <drm/drm_prime.h>
24 #include <drm/drm_print.h>
25 
26 MODULE_IMPORT_NS("DMA_BUF");
27 
28 /**
29  * DOC: overview
30  *
31  * This library provides helpers for GEM objects backed by shmem buffers
32  * allocated using anonymous pageable memory.
33  *
34  * Functions that operate on the GEM object receive struct &drm_gem_shmem_object.
35  * For GEM callback helpers in struct &drm_gem_object functions, see likewise
36  * named functions with an _object_ infix (e.g., drm_gem_shmem_object_vmap() wraps
37  * drm_gem_shmem_vmap()). These helpers perform the necessary type conversion.
38  */
39 
40 static const struct drm_gem_object_funcs drm_gem_shmem_funcs = {
41 	.free = drm_gem_shmem_object_free,
42 	.print_info = drm_gem_shmem_object_print_info,
43 	.pin = drm_gem_shmem_object_pin,
44 	.unpin = drm_gem_shmem_object_unpin,
45 	.get_sg_table = drm_gem_shmem_object_get_sg_table,
46 	.vmap = drm_gem_shmem_object_vmap,
47 	.vunmap = drm_gem_shmem_object_vunmap,
48 	.mmap = drm_gem_shmem_object_mmap,
49 	.vm_ops = &drm_gem_shmem_vm_ops,
50 };
51 
52 static int __drm_gem_shmem_init(struct drm_device *dev, struct drm_gem_shmem_object *shmem,
53 				size_t size, bool private, struct vfsmount *gemfs)
54 {
55 	struct drm_gem_object *obj = &shmem->base;
56 	int ret = 0;
57 
58 	if (!obj->funcs)
59 		obj->funcs = &drm_gem_shmem_funcs;
60 
61 	if (private) {
62 		drm_gem_private_object_init(dev, obj, size);
63 		shmem->map_wc = false; /* dma-buf mappings use always writecombine */
64 	} else {
65 		ret = drm_gem_object_init_with_mnt(dev, obj, size, gemfs);
66 	}
67 	if (ret) {
68 		drm_gem_private_object_fini(obj);
69 		return ret;
70 	}
71 
72 	ret = drm_gem_create_mmap_offset(obj);
73 	if (ret)
74 		goto err_release;
75 
76 	INIT_LIST_HEAD(&shmem->madv_list);
77 
78 	if (!private) {
79 		/*
80 		 * Our buffers are kept pinned, so allocating them
81 		 * from the MOVABLE zone is a really bad idea, and
82 		 * conflicts with CMA. See comments above new_inode()
83 		 * why this is required _and_ expected if you're
84 		 * going to pin these pages.
85 		 */
86 		mapping_set_gfp_mask(obj->filp->f_mapping, GFP_HIGHUSER |
87 				     __GFP_RETRY_MAYFAIL | __GFP_NOWARN);
88 	}
89 
90 	return 0;
91 err_release:
92 	drm_gem_object_release(obj);
93 	return ret;
94 }
95 
96 /**
97  * drm_gem_shmem_init - Initialize an allocated object.
98  * @dev: DRM device
99  * @shmem: The allocated shmem GEM object.
100  * @size: Buffer size in bytes
101  *
102  * Returns:
103  * 0 on success, or a negative error code on failure.
104  */
105 int drm_gem_shmem_init(struct drm_device *dev, struct drm_gem_shmem_object *shmem, size_t size)
106 {
107 	return __drm_gem_shmem_init(dev, shmem, size, false, NULL);
108 }
109 EXPORT_SYMBOL_GPL(drm_gem_shmem_init);
110 
111 static struct drm_gem_shmem_object *
112 __drm_gem_shmem_create(struct drm_device *dev, size_t size, bool private,
113 		       struct vfsmount *gemfs)
114 {
115 	struct drm_gem_shmem_object *shmem;
116 	struct drm_gem_object *obj;
117 	int ret = 0;
118 
119 	size = PAGE_ALIGN(size);
120 
121 	if (dev->driver->gem_create_object) {
122 		obj = dev->driver->gem_create_object(dev, size);
123 		if (IS_ERR(obj))
124 			return ERR_CAST(obj);
125 		shmem = to_drm_gem_shmem_obj(obj);
126 	} else {
127 		shmem = kzalloc(sizeof(*shmem), GFP_KERNEL);
128 		if (!shmem)
129 			return ERR_PTR(-ENOMEM);
130 		obj = &shmem->base;
131 	}
132 
133 	ret = __drm_gem_shmem_init(dev, shmem, size, private, gemfs);
134 	if (ret) {
135 		kfree(obj);
136 		return ERR_PTR(ret);
137 	}
138 
139 	return shmem;
140 }
141 /**
142  * drm_gem_shmem_create - Allocate an object with the given size
143  * @dev: DRM device
144  * @size: Size of the object to allocate
145  *
146  * This function creates a shmem GEM object.
147  *
148  * Returns:
149  * A struct drm_gem_shmem_object * on success or an ERR_PTR()-encoded negative
150  * error code on failure.
151  */
152 struct drm_gem_shmem_object *drm_gem_shmem_create(struct drm_device *dev, size_t size)
153 {
154 	return __drm_gem_shmem_create(dev, size, false, NULL);
155 }
156 EXPORT_SYMBOL_GPL(drm_gem_shmem_create);
157 
158 /**
159  * drm_gem_shmem_create_with_mnt - Allocate an object with the given size in a
160  * given mountpoint
161  * @dev: DRM device
162  * @size: Size of the object to allocate
163  * @gemfs: tmpfs mount where the GEM object will be created
164  *
165  * This function creates a shmem GEM object in a given tmpfs mountpoint.
166  *
167  * Returns:
168  * A struct drm_gem_shmem_object * on success or an ERR_PTR()-encoded negative
169  * error code on failure.
170  */
171 struct drm_gem_shmem_object *drm_gem_shmem_create_with_mnt(struct drm_device *dev,
172 							   size_t size,
173 							   struct vfsmount *gemfs)
174 {
175 	return __drm_gem_shmem_create(dev, size, false, gemfs);
176 }
177 EXPORT_SYMBOL_GPL(drm_gem_shmem_create_with_mnt);
178 
179 /**
180  * drm_gem_shmem_release - Release resources associated with a shmem GEM object.
181  * @shmem: shmem GEM object
182  *
183  * This function cleans up the GEM object state, but does not free the memory used to store the
184  * object itself. This function is meant to be a dedicated helper for the Rust GEM bindings.
185  */
186 void drm_gem_shmem_release(struct drm_gem_shmem_object *shmem)
187 {
188 	struct drm_gem_object *obj = &shmem->base;
189 
190 	if (drm_gem_is_imported(obj)) {
191 		drm_prime_gem_destroy(obj, shmem->sgt);
192 	} else {
193 		dma_resv_lock(shmem->base.resv, NULL);
194 
195 		drm_WARN_ON(obj->dev, refcount_read(&shmem->vmap_use_count));
196 
197 		if (shmem->sgt) {
198 			dma_unmap_sgtable(obj->dev->dev, shmem->sgt,
199 					  DMA_BIDIRECTIONAL, 0);
200 			sg_free_table(shmem->sgt);
201 			kfree(shmem->sgt);
202 		}
203 		if (shmem->pages)
204 			drm_gem_shmem_put_pages_locked(shmem);
205 
206 		drm_WARN_ON(obj->dev, refcount_read(&shmem->pages_use_count));
207 		drm_WARN_ON(obj->dev, refcount_read(&shmem->pages_pin_count));
208 
209 		dma_resv_unlock(shmem->base.resv);
210 	}
211 
212 	drm_gem_object_release(obj);
213 }
214 EXPORT_SYMBOL_GPL(drm_gem_shmem_release);
215 
216 /**
217  * drm_gem_shmem_free - Free resources associated with a shmem GEM object
218  * @shmem: shmem GEM object to free
219  *
220  * This function cleans up the GEM object state and frees the memory used to
221  * store the object itself.
222  */
223 void drm_gem_shmem_free(struct drm_gem_shmem_object *shmem)
224 {
225 	drm_gem_shmem_release(shmem);
226 	kfree(shmem);
227 }
228 EXPORT_SYMBOL_GPL(drm_gem_shmem_free);
229 
230 static int drm_gem_shmem_get_pages_locked(struct drm_gem_shmem_object *shmem)
231 {
232 	struct drm_gem_object *obj = &shmem->base;
233 	struct page **pages;
234 
235 	dma_resv_assert_held(shmem->base.resv);
236 
237 	if (refcount_inc_not_zero(&shmem->pages_use_count))
238 		return 0;
239 
240 	pages = drm_gem_get_pages(obj);
241 	if (IS_ERR(pages)) {
242 		drm_dbg_kms(obj->dev, "Failed to get pages (%ld)\n",
243 			    PTR_ERR(pages));
244 		return PTR_ERR(pages);
245 	}
246 
247 	/*
248 	 * TODO: Allocating WC pages which are correctly flushed is only
249 	 * supported on x86. Ideal solution would be a GFP_WC flag, which also
250 	 * ttm_pool.c could use.
251 	 */
252 #ifdef CONFIG_X86
253 	if (shmem->map_wc)
254 		set_pages_array_wc(pages, obj->size >> PAGE_SHIFT);
255 #endif
256 
257 	shmem->pages = pages;
258 
259 	refcount_set(&shmem->pages_use_count, 1);
260 
261 	return 0;
262 }
263 
264 /*
265  * drm_gem_shmem_put_pages_locked - Decrease use count on the backing pages for a shmem GEM object
266  * @shmem: shmem GEM object
267  *
268  * This function decreases the use count and puts the backing pages when use drops to zero.
269  */
270 void drm_gem_shmem_put_pages_locked(struct drm_gem_shmem_object *shmem)
271 {
272 	struct drm_gem_object *obj = &shmem->base;
273 
274 	dma_resv_assert_held(shmem->base.resv);
275 
276 	if (refcount_dec_and_test(&shmem->pages_use_count)) {
277 #ifdef CONFIG_X86
278 		if (shmem->map_wc)
279 			set_pages_array_wb(shmem->pages, obj->size >> PAGE_SHIFT);
280 #endif
281 
282 		drm_gem_put_pages(obj, shmem->pages,
283 				  shmem->pages_mark_dirty_on_put,
284 				  shmem->pages_mark_accessed_on_put);
285 		shmem->pages = NULL;
286 	}
287 }
288 EXPORT_SYMBOL_GPL(drm_gem_shmem_put_pages_locked);
289 
290 int drm_gem_shmem_pin_locked(struct drm_gem_shmem_object *shmem)
291 {
292 	int ret;
293 
294 	dma_resv_assert_held(shmem->base.resv);
295 
296 	drm_WARN_ON(shmem->base.dev, drm_gem_is_imported(&shmem->base));
297 
298 	if (refcount_inc_not_zero(&shmem->pages_pin_count))
299 		return 0;
300 
301 	ret = drm_gem_shmem_get_pages_locked(shmem);
302 	if (!ret)
303 		refcount_set(&shmem->pages_pin_count, 1);
304 
305 	return ret;
306 }
307 EXPORT_SYMBOL(drm_gem_shmem_pin_locked);
308 
309 void drm_gem_shmem_unpin_locked(struct drm_gem_shmem_object *shmem)
310 {
311 	dma_resv_assert_held(shmem->base.resv);
312 
313 	if (refcount_dec_and_test(&shmem->pages_pin_count))
314 		drm_gem_shmem_put_pages_locked(shmem);
315 }
316 EXPORT_SYMBOL(drm_gem_shmem_unpin_locked);
317 
318 /**
319  * drm_gem_shmem_pin - Pin backing pages for a shmem GEM object
320  * @shmem: shmem GEM object
321  *
322  * This function makes sure the backing pages are pinned in memory while the
323  * buffer is exported.
324  *
325  * Returns:
326  * 0 on success or a negative error code on failure.
327  */
328 int drm_gem_shmem_pin(struct drm_gem_shmem_object *shmem)
329 {
330 	struct drm_gem_object *obj = &shmem->base;
331 	int ret;
332 
333 	drm_WARN_ON(obj->dev, drm_gem_is_imported(obj));
334 
335 	if (refcount_inc_not_zero(&shmem->pages_pin_count))
336 		return 0;
337 
338 	ret = dma_resv_lock_interruptible(shmem->base.resv, NULL);
339 	if (ret)
340 		return ret;
341 	ret = drm_gem_shmem_pin_locked(shmem);
342 	dma_resv_unlock(shmem->base.resv);
343 
344 	return ret;
345 }
346 EXPORT_SYMBOL_GPL(drm_gem_shmem_pin);
347 
348 /**
349  * drm_gem_shmem_unpin - Unpin backing pages for a shmem GEM object
350  * @shmem: shmem GEM object
351  *
352  * This function removes the requirement that the backing pages are pinned in
353  * memory.
354  */
355 void drm_gem_shmem_unpin(struct drm_gem_shmem_object *shmem)
356 {
357 	struct drm_gem_object *obj = &shmem->base;
358 
359 	drm_WARN_ON(obj->dev, drm_gem_is_imported(obj));
360 
361 	if (refcount_dec_not_one(&shmem->pages_pin_count))
362 		return;
363 
364 	dma_resv_lock(shmem->base.resv, NULL);
365 	drm_gem_shmem_unpin_locked(shmem);
366 	dma_resv_unlock(shmem->base.resv);
367 }
368 EXPORT_SYMBOL_GPL(drm_gem_shmem_unpin);
369 
370 /*
371  * drm_gem_shmem_vmap_locked - Create a virtual mapping for a shmem GEM object
372  * @shmem: shmem GEM object
373  * @map: Returns the kernel virtual address of the SHMEM GEM object's backing
374  *       store.
375  *
376  * This function makes sure that a contiguous kernel virtual address mapping
377  * exists for the buffer backing the shmem GEM object. It hides the differences
378  * between dma-buf imported and natively allocated objects.
379  *
380  * Acquired mappings should be cleaned up by calling drm_gem_shmem_vunmap_locked().
381  *
382  * Returns:
383  * 0 on success or a negative error code on failure.
384  */
385 int drm_gem_shmem_vmap_locked(struct drm_gem_shmem_object *shmem,
386 			      struct iosys_map *map)
387 {
388 	struct drm_gem_object *obj = &shmem->base;
389 	int ret = 0;
390 
391 	dma_resv_assert_held(obj->resv);
392 
393 	if (drm_gem_is_imported(obj)) {
394 		ret = dma_buf_vmap(obj->import_attach->dmabuf, map);
395 	} else {
396 		pgprot_t prot = PAGE_KERNEL;
397 
398 		dma_resv_assert_held(shmem->base.resv);
399 
400 		if (refcount_inc_not_zero(&shmem->vmap_use_count)) {
401 			iosys_map_set_vaddr(map, shmem->vaddr);
402 			return 0;
403 		}
404 
405 		ret = drm_gem_shmem_pin_locked(shmem);
406 		if (ret)
407 			return ret;
408 
409 		if (shmem->map_wc)
410 			prot = pgprot_writecombine(prot);
411 		shmem->vaddr = vmap(shmem->pages, obj->size >> PAGE_SHIFT,
412 				    VM_MAP, prot);
413 		if (!shmem->vaddr) {
414 			ret = -ENOMEM;
415 		} else {
416 			iosys_map_set_vaddr(map, shmem->vaddr);
417 			refcount_set(&shmem->vmap_use_count, 1);
418 		}
419 	}
420 
421 	if (ret) {
422 		drm_dbg_kms(obj->dev, "Failed to vmap pages, error %d\n", ret);
423 		goto err_put_pages;
424 	}
425 
426 	return 0;
427 
428 err_put_pages:
429 	if (!drm_gem_is_imported(obj))
430 		drm_gem_shmem_unpin_locked(shmem);
431 
432 	return ret;
433 }
434 EXPORT_SYMBOL_GPL(drm_gem_shmem_vmap_locked);
435 
436 /*
437  * drm_gem_shmem_vunmap_locked - Unmap a virtual mapping for a shmem GEM object
438  * @shmem: shmem GEM object
439  * @map: Kernel virtual address where the SHMEM GEM object was mapped
440  *
441  * This function cleans up a kernel virtual address mapping acquired by
442  * drm_gem_shmem_vmap_locked(). The mapping is only removed when the use count
443  * drops to zero.
444  *
445  * This function hides the differences between dma-buf imported and natively
446  * allocated objects.
447  */
448 void drm_gem_shmem_vunmap_locked(struct drm_gem_shmem_object *shmem,
449 				 struct iosys_map *map)
450 {
451 	struct drm_gem_object *obj = &shmem->base;
452 
453 	dma_resv_assert_held(obj->resv);
454 
455 	if (drm_gem_is_imported(obj)) {
456 		dma_buf_vunmap(obj->import_attach->dmabuf, map);
457 	} else {
458 		dma_resv_assert_held(shmem->base.resv);
459 
460 		if (refcount_dec_and_test(&shmem->vmap_use_count)) {
461 			vunmap(shmem->vaddr);
462 			shmem->vaddr = NULL;
463 
464 			drm_gem_shmem_unpin_locked(shmem);
465 		}
466 	}
467 }
468 EXPORT_SYMBOL_GPL(drm_gem_shmem_vunmap_locked);
469 
470 static int
471 drm_gem_shmem_create_with_handle(struct drm_file *file_priv,
472 				 struct drm_device *dev, size_t size,
473 				 uint32_t *handle)
474 {
475 	struct drm_gem_shmem_object *shmem;
476 	int ret;
477 
478 	shmem = drm_gem_shmem_create(dev, size);
479 	if (IS_ERR(shmem))
480 		return PTR_ERR(shmem);
481 
482 	/*
483 	 * Allocate an id of idr table where the obj is registered
484 	 * and handle has the id what user can see.
485 	 */
486 	ret = drm_gem_handle_create(file_priv, &shmem->base, handle);
487 	/* drop reference from allocate - handle holds it now. */
488 	drm_gem_object_put(&shmem->base);
489 
490 	return ret;
491 }
492 
493 /* Update madvise status, returns true if not purged, else
494  * false or -errno.
495  */
496 int drm_gem_shmem_madvise_locked(struct drm_gem_shmem_object *shmem, int madv)
497 {
498 	dma_resv_assert_held(shmem->base.resv);
499 
500 	if (shmem->madv >= 0)
501 		shmem->madv = madv;
502 
503 	madv = shmem->madv;
504 
505 	return (madv >= 0);
506 }
507 EXPORT_SYMBOL_GPL(drm_gem_shmem_madvise_locked);
508 
509 void drm_gem_shmem_purge_locked(struct drm_gem_shmem_object *shmem)
510 {
511 	struct drm_gem_object *obj = &shmem->base;
512 	struct drm_device *dev = obj->dev;
513 
514 	dma_resv_assert_held(shmem->base.resv);
515 
516 	drm_WARN_ON(obj->dev, !drm_gem_shmem_is_purgeable(shmem));
517 
518 	dma_unmap_sgtable(dev->dev, shmem->sgt, DMA_BIDIRECTIONAL, 0);
519 	sg_free_table(shmem->sgt);
520 	kfree(shmem->sgt);
521 	shmem->sgt = NULL;
522 
523 	drm_gem_shmem_put_pages_locked(shmem);
524 
525 	shmem->madv = -1;
526 
527 	drm_vma_node_unmap(&obj->vma_node, dev->anon_inode->i_mapping);
528 	drm_gem_free_mmap_offset(obj);
529 
530 	/* Our goal here is to return as much of the memory as
531 	 * is possible back to the system as we are called from OOM.
532 	 * To do this we must instruct the shmfs to drop all of its
533 	 * backing pages, *now*.
534 	 */
535 	shmem_truncate_range(file_inode(obj->filp), 0, (loff_t)-1);
536 
537 	invalidate_mapping_pages(file_inode(obj->filp)->i_mapping, 0, (loff_t)-1);
538 }
539 EXPORT_SYMBOL_GPL(drm_gem_shmem_purge_locked);
540 
541 /**
542  * drm_gem_shmem_dumb_create - Create a dumb shmem buffer object
543  * @file: DRM file structure to create the dumb buffer for
544  * @dev: DRM device
545  * @args: IOCTL data
546  *
547  * This function computes the pitch of the dumb buffer and rounds it up to an
548  * integer number of bytes per pixel. Drivers for hardware that doesn't have
549  * any additional restrictions on the pitch can directly use this function as
550  * their &drm_driver.dumb_create callback.
551  *
552  * For hardware with additional restrictions, drivers can adjust the fields
553  * set up by userspace before calling into this function.
554  *
555  * Returns:
556  * 0 on success or a negative error code on failure.
557  */
558 int drm_gem_shmem_dumb_create(struct drm_file *file, struct drm_device *dev,
559 			      struct drm_mode_create_dumb *args)
560 {
561 	int ret;
562 
563 	ret = drm_mode_size_dumb(dev, args, 0, 0);
564 	if (ret)
565 		return ret;
566 
567 	return drm_gem_shmem_create_with_handle(file, dev, args->size, &args->handle);
568 }
569 EXPORT_SYMBOL_GPL(drm_gem_shmem_dumb_create);
570 
571 static vm_fault_t drm_gem_shmem_fault(struct vm_fault *vmf)
572 {
573 	struct vm_area_struct *vma = vmf->vma;
574 	struct drm_gem_object *obj = vma->vm_private_data;
575 	struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
576 	loff_t num_pages = obj->size >> PAGE_SHIFT;
577 	vm_fault_t ret;
578 	struct page *page;
579 	pgoff_t page_offset;
580 
581 	/* We don't use vmf->pgoff since that has the fake offset */
582 	page_offset = (vmf->address - vma->vm_start) >> PAGE_SHIFT;
583 
584 	dma_resv_lock(shmem->base.resv, NULL);
585 
586 	if (page_offset >= num_pages ||
587 	    drm_WARN_ON_ONCE(obj->dev, !shmem->pages) ||
588 	    shmem->madv < 0) {
589 		ret = VM_FAULT_SIGBUS;
590 	} else {
591 		page = shmem->pages[page_offset];
592 
593 		ret = vmf_insert_pfn(vma, vmf->address, page_to_pfn(page));
594 	}
595 
596 	dma_resv_unlock(shmem->base.resv);
597 
598 	return ret;
599 }
600 
601 static void drm_gem_shmem_vm_open(struct vm_area_struct *vma)
602 {
603 	struct drm_gem_object *obj = vma->vm_private_data;
604 	struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
605 
606 	drm_WARN_ON(obj->dev, drm_gem_is_imported(obj));
607 
608 	dma_resv_lock(shmem->base.resv, NULL);
609 
610 	/*
611 	 * We should have already pinned the pages when the buffer was first
612 	 * mmap'd, vm_open() just grabs an additional reference for the new
613 	 * mm the vma is getting copied into (ie. on fork()).
614 	 */
615 	drm_WARN_ON_ONCE(obj->dev,
616 			 !refcount_inc_not_zero(&shmem->pages_use_count));
617 
618 	dma_resv_unlock(shmem->base.resv);
619 
620 	drm_gem_vm_open(vma);
621 }
622 
623 static void drm_gem_shmem_vm_close(struct vm_area_struct *vma)
624 {
625 	struct drm_gem_object *obj = vma->vm_private_data;
626 	struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
627 
628 	dma_resv_lock(shmem->base.resv, NULL);
629 	drm_gem_shmem_put_pages_locked(shmem);
630 	dma_resv_unlock(shmem->base.resv);
631 
632 	drm_gem_vm_close(vma);
633 }
634 
635 const struct vm_operations_struct drm_gem_shmem_vm_ops = {
636 	.fault = drm_gem_shmem_fault,
637 	.open = drm_gem_shmem_vm_open,
638 	.close = drm_gem_shmem_vm_close,
639 };
640 EXPORT_SYMBOL_GPL(drm_gem_shmem_vm_ops);
641 
642 /**
643  * drm_gem_shmem_mmap - Memory-map a shmem GEM object
644  * @shmem: shmem GEM object
645  * @vma: VMA for the area to be mapped
646  *
647  * This function implements an augmented version of the GEM DRM file mmap
648  * operation for shmem objects.
649  *
650  * Returns:
651  * 0 on success or a negative error code on failure.
652  */
653 int drm_gem_shmem_mmap(struct drm_gem_shmem_object *shmem, struct vm_area_struct *vma)
654 {
655 	struct drm_gem_object *obj = &shmem->base;
656 	int ret;
657 
658 	if (drm_gem_is_imported(obj)) {
659 		/* Reset both vm_ops and vm_private_data, so we don't end up with
660 		 * vm_ops pointing to our implementation if the dma-buf backend
661 		 * doesn't set those fields.
662 		 */
663 		vma->vm_private_data = NULL;
664 		vma->vm_ops = NULL;
665 
666 		ret = dma_buf_mmap(obj->dma_buf, vma, 0);
667 
668 		/* Drop the reference drm_gem_mmap_obj() acquired.*/
669 		if (!ret)
670 			drm_gem_object_put(obj);
671 
672 		return ret;
673 	}
674 
675 	if (is_cow_mapping(vma->vm_flags))
676 		return -EINVAL;
677 
678 	dma_resv_lock(shmem->base.resv, NULL);
679 	ret = drm_gem_shmem_get_pages_locked(shmem);
680 	dma_resv_unlock(shmem->base.resv);
681 
682 	if (ret)
683 		return ret;
684 
685 	vm_flags_set(vma, VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP);
686 	vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
687 	if (shmem->map_wc)
688 		vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
689 
690 	return 0;
691 }
692 EXPORT_SYMBOL_GPL(drm_gem_shmem_mmap);
693 
694 /**
695  * drm_gem_shmem_print_info() - Print &drm_gem_shmem_object info for debugfs
696  * @shmem: shmem GEM object
697  * @p: DRM printer
698  * @indent: Tab indentation level
699  */
700 void drm_gem_shmem_print_info(const struct drm_gem_shmem_object *shmem,
701 			      struct drm_printer *p, unsigned int indent)
702 {
703 	if (drm_gem_is_imported(&shmem->base))
704 		return;
705 
706 	drm_printf_indent(p, indent, "pages_pin_count=%u\n", refcount_read(&shmem->pages_pin_count));
707 	drm_printf_indent(p, indent, "pages_use_count=%u\n", refcount_read(&shmem->pages_use_count));
708 	drm_printf_indent(p, indent, "vmap_use_count=%u\n", refcount_read(&shmem->vmap_use_count));
709 	drm_printf_indent(p, indent, "vaddr=%p\n", shmem->vaddr);
710 }
711 EXPORT_SYMBOL_GPL(drm_gem_shmem_print_info);
712 
713 /**
714  * drm_gem_shmem_get_sg_table - Provide a scatter/gather table of pinned
715  *                              pages for a shmem GEM object
716  * @shmem: shmem GEM object
717  *
718  * This function exports a scatter/gather table suitable for PRIME usage by
719  * calling the standard DMA mapping API.
720  *
721  * Drivers who need to acquire an scatter/gather table for objects need to call
722  * drm_gem_shmem_get_pages_sgt() instead.
723  *
724  * Returns:
725  * A pointer to the scatter/gather table of pinned pages or error pointer on failure.
726  */
727 struct sg_table *drm_gem_shmem_get_sg_table(struct drm_gem_shmem_object *shmem)
728 {
729 	struct drm_gem_object *obj = &shmem->base;
730 
731 	drm_WARN_ON(obj->dev, drm_gem_is_imported(obj));
732 
733 	return drm_prime_pages_to_sg(obj->dev, shmem->pages, obj->size >> PAGE_SHIFT);
734 }
735 EXPORT_SYMBOL_GPL(drm_gem_shmem_get_sg_table);
736 
737 static struct sg_table *drm_gem_shmem_get_pages_sgt_locked(struct drm_gem_shmem_object *shmem)
738 {
739 	struct drm_gem_object *obj = &shmem->base;
740 	int ret;
741 	struct sg_table *sgt;
742 
743 	if (shmem->sgt)
744 		return shmem->sgt;
745 
746 	drm_WARN_ON(obj->dev, drm_gem_is_imported(obj));
747 
748 	ret = drm_gem_shmem_get_pages_locked(shmem);
749 	if (ret)
750 		return ERR_PTR(ret);
751 
752 	sgt = drm_gem_shmem_get_sg_table(shmem);
753 	if (IS_ERR(sgt)) {
754 		ret = PTR_ERR(sgt);
755 		goto err_put_pages;
756 	}
757 	/* Map the pages for use by the h/w. */
758 	ret = dma_map_sgtable(obj->dev->dev, sgt, DMA_BIDIRECTIONAL, 0);
759 	if (ret)
760 		goto err_free_sgt;
761 
762 	shmem->sgt = sgt;
763 
764 	return sgt;
765 
766 err_free_sgt:
767 	sg_free_table(sgt);
768 	kfree(sgt);
769 err_put_pages:
770 	drm_gem_shmem_put_pages_locked(shmem);
771 	return ERR_PTR(ret);
772 }
773 
774 /**
775  * drm_gem_shmem_get_pages_sgt - Pin pages, dma map them, and return a
776  *				 scatter/gather table for a shmem GEM object.
777  * @shmem: shmem GEM object
778  *
779  * This function returns a scatter/gather table suitable for driver usage. If
780  * the sg table doesn't exist, the pages are pinned, dma-mapped, and a sg
781  * table created.
782  *
783  * This is the main function for drivers to get at backing storage, and it hides
784  * and difference between dma-buf imported and natively allocated objects.
785  * drm_gem_shmem_get_sg_table() should not be directly called by drivers.
786  *
787  * Returns:
788  * A pointer to the scatter/gather table of pinned pages or errno on failure.
789  */
790 struct sg_table *drm_gem_shmem_get_pages_sgt(struct drm_gem_shmem_object *shmem)
791 {
792 	int ret;
793 	struct sg_table *sgt;
794 
795 	ret = dma_resv_lock_interruptible(shmem->base.resv, NULL);
796 	if (ret)
797 		return ERR_PTR(ret);
798 	sgt = drm_gem_shmem_get_pages_sgt_locked(shmem);
799 	dma_resv_unlock(shmem->base.resv);
800 
801 	return sgt;
802 }
803 EXPORT_SYMBOL_GPL(drm_gem_shmem_get_pages_sgt);
804 
805 /**
806  * drm_gem_shmem_prime_import_sg_table - Produce a shmem GEM object from
807  *                 another driver's scatter/gather table of pinned pages
808  * @dev: Device to import into
809  * @attach: DMA-BUF attachment
810  * @sgt: Scatter/gather table of pinned pages
811  *
812  * This function imports a scatter/gather table exported via DMA-BUF by
813  * another driver. Drivers that use the shmem helpers should set this as their
814  * &drm_driver.gem_prime_import_sg_table callback.
815  *
816  * Returns:
817  * A pointer to a newly created GEM object or an ERR_PTR-encoded negative
818  * error code on failure.
819  */
820 struct drm_gem_object *
821 drm_gem_shmem_prime_import_sg_table(struct drm_device *dev,
822 				    struct dma_buf_attachment *attach,
823 				    struct sg_table *sgt)
824 {
825 	size_t size = PAGE_ALIGN(attach->dmabuf->size);
826 	struct drm_gem_shmem_object *shmem;
827 
828 	shmem = __drm_gem_shmem_create(dev, size, true, NULL);
829 	if (IS_ERR(shmem))
830 		return ERR_CAST(shmem);
831 
832 	shmem->sgt = sgt;
833 
834 	drm_dbg_prime(dev, "size = %zu\n", size);
835 
836 	return &shmem->base;
837 }
838 EXPORT_SYMBOL_GPL(drm_gem_shmem_prime_import_sg_table);
839 
840 /**
841  * drm_gem_shmem_prime_import_no_map - Import dmabuf without mapping its sg_table
842  * @dev: Device to import into
843  * @dma_buf: dma-buf object to import
844  *
845  * Drivers that use the shmem helpers but also wants to import dmabuf without
846  * mapping its sg_table can use this as their &drm_driver.gem_prime_import
847  * implementation.
848  */
849 struct drm_gem_object *drm_gem_shmem_prime_import_no_map(struct drm_device *dev,
850 							 struct dma_buf *dma_buf)
851 {
852 	struct dma_buf_attachment *attach;
853 	struct drm_gem_shmem_object *shmem;
854 	struct drm_gem_object *obj;
855 	size_t size;
856 	int ret;
857 
858 	if (drm_gem_is_prime_exported_dma_buf(dev, dma_buf)) {
859 		/*
860 		 * Importing dmabuf exported from our own gem increases
861 		 * refcount on gem itself instead of f_count of dmabuf.
862 		 */
863 		obj = dma_buf->priv;
864 		drm_gem_object_get(obj);
865 		return obj;
866 	}
867 
868 	attach = dma_buf_attach(dma_buf, dev->dev);
869 	if (IS_ERR(attach))
870 		return ERR_CAST(attach);
871 
872 	get_dma_buf(dma_buf);
873 
874 	size = PAGE_ALIGN(attach->dmabuf->size);
875 
876 	shmem = __drm_gem_shmem_create(dev, size, true, NULL);
877 	if (IS_ERR(shmem)) {
878 		ret = PTR_ERR(shmem);
879 		goto fail_detach;
880 	}
881 
882 	drm_dbg_prime(dev, "size = %zu\n", size);
883 
884 	shmem->base.import_attach = attach;
885 	shmem->base.resv = dma_buf->resv;
886 
887 	return &shmem->base;
888 
889 fail_detach:
890 	dma_buf_detach(dma_buf, attach);
891 	dma_buf_put(dma_buf);
892 
893 	return ERR_PTR(ret);
894 }
895 EXPORT_SYMBOL_GPL(drm_gem_shmem_prime_import_no_map);
896 
897 MODULE_DESCRIPTION("DRM SHMEM memory-management helpers");
898 MODULE_IMPORT_NS("DMA_BUF");
899 MODULE_LICENSE("GPL");
900