1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Copyright 2018 Noralf Trønnes
4 */
5
6 #include <linux/dma-buf.h>
7 #include <linux/export.h>
8 #include <linux/module.h>
9 #include <linux/mutex.h>
10 #include <linux/shmem_fs.h>
11 #include <linux/slab.h>
12 #include <linux/vmalloc.h>
13
14 #ifdef CONFIG_X86
15 #include <asm/set_memory.h>
16 #endif
17
18 #include <kunit/visibility.h>
19
20 #include <drm/drm.h>
21 #include <drm/drm_device.h>
22 #include <drm/drm_drv.h>
23 #include <drm/drm_dumb_buffers.h>
24 #include <drm/drm_gem_shmem_helper.h>
25 #include <drm/drm_prime.h>
26 #include <drm/drm_print.h>
27
28 MODULE_IMPORT_NS("DMA_BUF");
29
30 /**
31 * DOC: overview
32 *
33 * This library provides helpers for GEM objects backed by shmem buffers
34 * allocated using anonymous pageable memory.
35 *
36 * Functions that operate on the GEM object receive struct &drm_gem_shmem_object.
37 * For GEM callback helpers in struct &drm_gem_object functions, see likewise
38 * named functions with an _object_ infix (e.g., drm_gem_shmem_object_vmap() wraps
39 * drm_gem_shmem_vmap()). These helpers perform the necessary type conversion.
40 */
41
42 static const struct drm_gem_object_funcs drm_gem_shmem_funcs = {
43 .free = drm_gem_shmem_object_free,
44 .print_info = drm_gem_shmem_object_print_info,
45 .pin = drm_gem_shmem_object_pin,
46 .unpin = drm_gem_shmem_object_unpin,
47 .get_sg_table = drm_gem_shmem_object_get_sg_table,
48 .vmap = drm_gem_shmem_object_vmap,
49 .vunmap = drm_gem_shmem_object_vunmap,
50 .mmap = drm_gem_shmem_object_mmap,
51 .vm_ops = &drm_gem_shmem_vm_ops,
52 };
53
__drm_gem_shmem_init(struct drm_device * dev,struct drm_gem_shmem_object * shmem,size_t size,bool private)54 static int __drm_gem_shmem_init(struct drm_device *dev, struct drm_gem_shmem_object *shmem,
55 size_t size, bool private)
56 {
57 struct drm_gem_object *obj = &shmem->base;
58 int ret = 0;
59
60 if (!obj->funcs)
61 obj->funcs = &drm_gem_shmem_funcs;
62
63 if (private) {
64 drm_gem_private_object_init(dev, obj, size);
65 shmem->map_wc = false; /* dma-buf mappings use always writecombine */
66 } else {
67 ret = drm_gem_object_init(dev, obj, size);
68 }
69 if (ret) {
70 drm_gem_private_object_fini(obj);
71 return ret;
72 }
73
74 ret = drm_gem_create_mmap_offset(obj);
75 if (ret)
76 goto err_release;
77
78 INIT_LIST_HEAD(&shmem->madv_list);
79
80 if (!private) {
81 /*
82 * Our buffers are kept pinned, so allocating them
83 * from the MOVABLE zone is a really bad idea, and
84 * conflicts with CMA. See comments above new_inode()
85 * why this is required _and_ expected if you're
86 * going to pin these pages.
87 */
88 mapping_set_gfp_mask(obj->filp->f_mapping, GFP_HIGHUSER |
89 __GFP_RETRY_MAYFAIL | __GFP_NOWARN);
90 }
91
92 return 0;
93 err_release:
94 drm_gem_object_release(obj);
95 return ret;
96 }
97
98 /**
99 * drm_gem_shmem_init - Initialize an allocated object.
100 * @dev: DRM device
101 * @shmem: shmem GEM object to initialize
102 * @size: Buffer size in bytes
103 *
104 * This function initializes an allocated shmem GEM object.
105 *
106 * Returns:
107 * 0 on success, or a negative error code on failure.
108 */
drm_gem_shmem_init(struct drm_device * dev,struct drm_gem_shmem_object * shmem,size_t size)109 int drm_gem_shmem_init(struct drm_device *dev, struct drm_gem_shmem_object *shmem, size_t size)
110 {
111 return __drm_gem_shmem_init(dev, shmem, size, false);
112 }
113 EXPORT_SYMBOL_GPL(drm_gem_shmem_init);
114
115 static struct drm_gem_shmem_object *
__drm_gem_shmem_create(struct drm_device * dev,size_t size,bool private)116 __drm_gem_shmem_create(struct drm_device *dev, size_t size, bool private)
117 {
118 struct drm_gem_shmem_object *shmem;
119 struct drm_gem_object *obj;
120 int ret = 0;
121
122 size = PAGE_ALIGN(size);
123
124 if (dev->driver->gem_create_object) {
125 obj = dev->driver->gem_create_object(dev, size);
126 if (IS_ERR(obj))
127 return ERR_CAST(obj);
128 shmem = to_drm_gem_shmem_obj(obj);
129 } else {
130 shmem = kzalloc_obj(*shmem);
131 if (!shmem)
132 return ERR_PTR(-ENOMEM);
133 obj = &shmem->base;
134 }
135
136 ret = __drm_gem_shmem_init(dev, shmem, size, private);
137 if (ret) {
138 kfree(obj);
139 return ERR_PTR(ret);
140 }
141
142 return shmem;
143 }
144 /**
145 * drm_gem_shmem_create - Allocate an object with the given size
146 * @dev: DRM device
147 * @size: Size of the object to allocate
148 *
149 * This function creates a shmem GEM object.
150 *
151 * Returns:
152 * A struct drm_gem_shmem_object * on success or an ERR_PTR()-encoded negative
153 * error code on failure.
154 */
drm_gem_shmem_create(struct drm_device * dev,size_t size)155 struct drm_gem_shmem_object *drm_gem_shmem_create(struct drm_device *dev, size_t size)
156 {
157 return __drm_gem_shmem_create(dev, size, false);
158 }
159 EXPORT_SYMBOL_GPL(drm_gem_shmem_create);
160
161 /**
162 * drm_gem_shmem_release - Release resources associated with a shmem GEM object.
163 * @shmem: shmem GEM object
164 *
165 * This function cleans up the GEM object state, but does not free the memory used to store the
166 * object itself. This function is meant to be a dedicated helper for the Rust GEM bindings.
167 */
drm_gem_shmem_release(struct drm_gem_shmem_object * shmem)168 void drm_gem_shmem_release(struct drm_gem_shmem_object *shmem)
169 {
170 struct drm_gem_object *obj = &shmem->base;
171
172 if (drm_gem_is_imported(obj)) {
173 drm_prime_gem_destroy(obj, shmem->sgt);
174 } else {
175 dma_resv_lock(shmem->base.resv, NULL);
176
177 drm_WARN_ON(obj->dev, refcount_read(&shmem->vmap_use_count));
178
179 if (shmem->sgt) {
180 dma_unmap_sgtable(obj->dev->dev, shmem->sgt,
181 DMA_BIDIRECTIONAL, 0);
182 sg_free_table(shmem->sgt);
183 kfree(shmem->sgt);
184 }
185 if (shmem->pages)
186 drm_gem_shmem_put_pages_locked(shmem);
187
188 drm_WARN_ON(obj->dev, refcount_read(&shmem->pages_use_count));
189 drm_WARN_ON(obj->dev, refcount_read(&shmem->pages_pin_count));
190
191 dma_resv_unlock(shmem->base.resv);
192 }
193
194 drm_gem_object_release(obj);
195 }
196 EXPORT_SYMBOL_GPL(drm_gem_shmem_release);
197
198 /**
199 * drm_gem_shmem_free - Free resources associated with a shmem GEM object
200 * @shmem: shmem GEM object to free
201 *
202 * This function cleans up the GEM object state and frees the memory used to
203 * store the object itself.
204 */
drm_gem_shmem_free(struct drm_gem_shmem_object * shmem)205 void drm_gem_shmem_free(struct drm_gem_shmem_object *shmem)
206 {
207 drm_gem_shmem_release(shmem);
208 kfree(shmem);
209 }
210 EXPORT_SYMBOL_GPL(drm_gem_shmem_free);
211
drm_gem_shmem_get_pages_locked(struct drm_gem_shmem_object * shmem)212 static int drm_gem_shmem_get_pages_locked(struct drm_gem_shmem_object *shmem)
213 {
214 struct drm_gem_object *obj = &shmem->base;
215 struct page **pages;
216
217 dma_resv_assert_held(shmem->base.resv);
218
219 if (refcount_inc_not_zero(&shmem->pages_use_count))
220 return 0;
221
222 pages = drm_gem_get_pages(obj);
223 if (IS_ERR(pages)) {
224 drm_dbg_kms(obj->dev, "Failed to get pages (%ld)\n",
225 PTR_ERR(pages));
226 return PTR_ERR(pages);
227 }
228
229 /*
230 * TODO: Allocating WC pages which are correctly flushed is only
231 * supported on x86. Ideal solution would be a GFP_WC flag, which also
232 * ttm_pool.c could use.
233 */
234 #ifdef CONFIG_X86
235 if (shmem->map_wc)
236 set_pages_array_wc(pages, obj->size >> PAGE_SHIFT);
237 #endif
238
239 shmem->pages = pages;
240
241 refcount_set(&shmem->pages_use_count, 1);
242
243 return 0;
244 }
245
246 /*
247 * drm_gem_shmem_put_pages_locked - Decrease use count on the backing pages for a shmem GEM object
248 * @shmem: shmem GEM object
249 *
250 * This function decreases the use count and puts the backing pages when use drops to zero.
251 */
drm_gem_shmem_put_pages_locked(struct drm_gem_shmem_object * shmem)252 void drm_gem_shmem_put_pages_locked(struct drm_gem_shmem_object *shmem)
253 {
254 struct drm_gem_object *obj = &shmem->base;
255
256 dma_resv_assert_held(shmem->base.resv);
257
258 if (refcount_dec_and_test(&shmem->pages_use_count)) {
259 #ifdef CONFIG_X86
260 if (shmem->map_wc)
261 set_pages_array_wb(shmem->pages, obj->size >> PAGE_SHIFT);
262 #endif
263
264 drm_gem_put_pages(obj, shmem->pages,
265 shmem->pages_mark_dirty_on_put,
266 shmem->pages_mark_accessed_on_put);
267 shmem->pages = NULL;
268 }
269 }
270 EXPORT_SYMBOL_GPL(drm_gem_shmem_put_pages_locked);
271
drm_gem_shmem_pin_locked(struct drm_gem_shmem_object * shmem)272 int drm_gem_shmem_pin_locked(struct drm_gem_shmem_object *shmem)
273 {
274 int ret;
275
276 dma_resv_assert_held(shmem->base.resv);
277
278 drm_WARN_ON(shmem->base.dev, drm_gem_is_imported(&shmem->base));
279
280 if (refcount_inc_not_zero(&shmem->pages_pin_count))
281 return 0;
282
283 ret = drm_gem_shmem_get_pages_locked(shmem);
284 if (!ret)
285 refcount_set(&shmem->pages_pin_count, 1);
286
287 return ret;
288 }
289 EXPORT_SYMBOL(drm_gem_shmem_pin_locked);
290
drm_gem_shmem_unpin_locked(struct drm_gem_shmem_object * shmem)291 void drm_gem_shmem_unpin_locked(struct drm_gem_shmem_object *shmem)
292 {
293 dma_resv_assert_held(shmem->base.resv);
294
295 if (refcount_dec_and_test(&shmem->pages_pin_count))
296 drm_gem_shmem_put_pages_locked(shmem);
297 }
298 EXPORT_SYMBOL(drm_gem_shmem_unpin_locked);
299
300 /**
301 * drm_gem_shmem_pin - Pin backing pages for a shmem GEM object
302 * @shmem: shmem GEM object
303 *
304 * This function makes sure the backing pages are pinned in memory while the
305 * buffer is exported.
306 *
307 * Returns:
308 * 0 on success or a negative error code on failure.
309 */
drm_gem_shmem_pin(struct drm_gem_shmem_object * shmem)310 int drm_gem_shmem_pin(struct drm_gem_shmem_object *shmem)
311 {
312 struct drm_gem_object *obj = &shmem->base;
313 int ret;
314
315 drm_WARN_ON(obj->dev, drm_gem_is_imported(obj));
316
317 if (refcount_inc_not_zero(&shmem->pages_pin_count))
318 return 0;
319
320 ret = dma_resv_lock_interruptible(shmem->base.resv, NULL);
321 if (ret)
322 return ret;
323 ret = drm_gem_shmem_pin_locked(shmem);
324 dma_resv_unlock(shmem->base.resv);
325
326 return ret;
327 }
328 EXPORT_SYMBOL_GPL(drm_gem_shmem_pin);
329
330 /**
331 * drm_gem_shmem_unpin - Unpin backing pages for a shmem GEM object
332 * @shmem: shmem GEM object
333 *
334 * This function removes the requirement that the backing pages are pinned in
335 * memory.
336 */
drm_gem_shmem_unpin(struct drm_gem_shmem_object * shmem)337 void drm_gem_shmem_unpin(struct drm_gem_shmem_object *shmem)
338 {
339 struct drm_gem_object *obj = &shmem->base;
340
341 drm_WARN_ON(obj->dev, drm_gem_is_imported(obj));
342
343 if (refcount_dec_not_one(&shmem->pages_pin_count))
344 return;
345
346 dma_resv_lock(shmem->base.resv, NULL);
347 drm_gem_shmem_unpin_locked(shmem);
348 dma_resv_unlock(shmem->base.resv);
349 }
350 EXPORT_SYMBOL_GPL(drm_gem_shmem_unpin);
351
352 /*
353 * drm_gem_shmem_vmap_locked - Create a virtual mapping for a shmem GEM object
354 * @shmem: shmem GEM object
355 * @map: Returns the kernel virtual address of the SHMEM GEM object's backing
356 * store.
357 *
358 * This function makes sure that a contiguous kernel virtual address mapping
359 * exists for the buffer backing the shmem GEM object. It hides the differences
360 * between dma-buf imported and natively allocated objects.
361 *
362 * Acquired mappings should be cleaned up by calling drm_gem_shmem_vunmap_locked().
363 *
364 * Returns:
365 * 0 on success or a negative error code on failure.
366 */
drm_gem_shmem_vmap_locked(struct drm_gem_shmem_object * shmem,struct iosys_map * map)367 int drm_gem_shmem_vmap_locked(struct drm_gem_shmem_object *shmem,
368 struct iosys_map *map)
369 {
370 struct drm_gem_object *obj = &shmem->base;
371 int ret = 0;
372
373 dma_resv_assert_held(obj->resv);
374
375 if (drm_gem_is_imported(obj)) {
376 ret = dma_buf_vmap(obj->import_attach->dmabuf, map);
377 } else {
378 pgprot_t prot = PAGE_KERNEL;
379
380 dma_resv_assert_held(shmem->base.resv);
381
382 if (refcount_inc_not_zero(&shmem->vmap_use_count)) {
383 iosys_map_set_vaddr(map, shmem->vaddr);
384 return 0;
385 }
386
387 ret = drm_gem_shmem_pin_locked(shmem);
388 if (ret)
389 return ret;
390
391 if (shmem->map_wc)
392 prot = pgprot_writecombine(prot);
393 shmem->vaddr = vmap(shmem->pages, obj->size >> PAGE_SHIFT,
394 VM_MAP, prot);
395 if (!shmem->vaddr) {
396 ret = -ENOMEM;
397 } else {
398 iosys_map_set_vaddr(map, shmem->vaddr);
399 refcount_set(&shmem->vmap_use_count, 1);
400 }
401 }
402
403 if (ret) {
404 drm_dbg_kms(obj->dev, "Failed to vmap pages, error %d\n", ret);
405 goto err_put_pages;
406 }
407
408 return 0;
409
410 err_put_pages:
411 if (!drm_gem_is_imported(obj))
412 drm_gem_shmem_unpin_locked(shmem);
413
414 return ret;
415 }
416 EXPORT_SYMBOL_GPL(drm_gem_shmem_vmap_locked);
417
418 /*
419 * drm_gem_shmem_vunmap_locked - Unmap a virtual mapping for a shmem GEM object
420 * @shmem: shmem GEM object
421 * @map: Kernel virtual address where the SHMEM GEM object was mapped
422 *
423 * This function cleans up a kernel virtual address mapping acquired by
424 * drm_gem_shmem_vmap_locked(). The mapping is only removed when the use count
425 * drops to zero.
426 *
427 * This function hides the differences between dma-buf imported and natively
428 * allocated objects.
429 */
drm_gem_shmem_vunmap_locked(struct drm_gem_shmem_object * shmem,struct iosys_map * map)430 void drm_gem_shmem_vunmap_locked(struct drm_gem_shmem_object *shmem,
431 struct iosys_map *map)
432 {
433 struct drm_gem_object *obj = &shmem->base;
434
435 dma_resv_assert_held(obj->resv);
436
437 if (drm_gem_is_imported(obj)) {
438 dma_buf_vunmap(obj->import_attach->dmabuf, map);
439 } else {
440 dma_resv_assert_held(shmem->base.resv);
441
442 if (refcount_dec_and_test(&shmem->vmap_use_count)) {
443 vunmap(shmem->vaddr);
444 shmem->vaddr = NULL;
445
446 drm_gem_shmem_unpin_locked(shmem);
447 }
448 }
449 }
450 EXPORT_SYMBOL_GPL(drm_gem_shmem_vunmap_locked);
451
452 static int
drm_gem_shmem_create_with_handle(struct drm_file * file_priv,struct drm_device * dev,size_t size,uint32_t * handle)453 drm_gem_shmem_create_with_handle(struct drm_file *file_priv,
454 struct drm_device *dev, size_t size,
455 uint32_t *handle)
456 {
457 struct drm_gem_shmem_object *shmem;
458 int ret;
459
460 shmem = drm_gem_shmem_create(dev, size);
461 if (IS_ERR(shmem))
462 return PTR_ERR(shmem);
463
464 /*
465 * Allocate an id of idr table where the obj is registered
466 * and handle has the id what user can see.
467 */
468 ret = drm_gem_handle_create(file_priv, &shmem->base, handle);
469 /* drop reference from allocate - handle holds it now. */
470 drm_gem_object_put(&shmem->base);
471
472 return ret;
473 }
474
475 /* Update madvise status, returns true if not purged, else
476 * false or -errno.
477 */
drm_gem_shmem_madvise_locked(struct drm_gem_shmem_object * shmem,int madv)478 int drm_gem_shmem_madvise_locked(struct drm_gem_shmem_object *shmem, int madv)
479 {
480 dma_resv_assert_held(shmem->base.resv);
481
482 if (shmem->madv >= 0)
483 shmem->madv = madv;
484
485 madv = shmem->madv;
486
487 return (madv >= 0);
488 }
489 EXPORT_SYMBOL_GPL(drm_gem_shmem_madvise_locked);
490
drm_gem_shmem_purge_locked(struct drm_gem_shmem_object * shmem)491 void drm_gem_shmem_purge_locked(struct drm_gem_shmem_object *shmem)
492 {
493 struct drm_gem_object *obj = &shmem->base;
494 struct drm_device *dev = obj->dev;
495
496 dma_resv_assert_held(shmem->base.resv);
497
498 drm_WARN_ON(obj->dev, !drm_gem_shmem_is_purgeable(shmem));
499
500 dma_unmap_sgtable(dev->dev, shmem->sgt, DMA_BIDIRECTIONAL, 0);
501 sg_free_table(shmem->sgt);
502 kfree(shmem->sgt);
503 shmem->sgt = NULL;
504
505 drm_gem_shmem_put_pages_locked(shmem);
506
507 shmem->madv = -1;
508
509 drm_vma_node_unmap(&obj->vma_node, dev->anon_inode->i_mapping);
510 drm_gem_free_mmap_offset(obj);
511
512 /* Our goal here is to return as much of the memory as
513 * is possible back to the system as we are called from OOM.
514 * To do this we must instruct the shmfs to drop all of its
515 * backing pages, *now*.
516 */
517 shmem_truncate_range(file_inode(obj->filp), 0, (loff_t)-1);
518
519 invalidate_mapping_pages(file_inode(obj->filp)->i_mapping, 0, (loff_t)-1);
520 }
521 EXPORT_SYMBOL_GPL(drm_gem_shmem_purge_locked);
522
523 /**
524 * drm_gem_shmem_dumb_create - Create a dumb shmem buffer object
525 * @file: DRM file structure to create the dumb buffer for
526 * @dev: DRM device
527 * @args: IOCTL data
528 *
529 * This function computes the pitch of the dumb buffer and rounds it up to an
530 * integer number of bytes per pixel. Drivers for hardware that doesn't have
531 * any additional restrictions on the pitch can directly use this function as
532 * their &drm_driver.dumb_create callback.
533 *
534 * For hardware with additional restrictions, drivers can adjust the fields
535 * set up by userspace before calling into this function.
536 *
537 * Returns:
538 * 0 on success or a negative error code on failure.
539 */
drm_gem_shmem_dumb_create(struct drm_file * file,struct drm_device * dev,struct drm_mode_create_dumb * args)540 int drm_gem_shmem_dumb_create(struct drm_file *file, struct drm_device *dev,
541 struct drm_mode_create_dumb *args)
542 {
543 int ret;
544
545 ret = drm_mode_size_dumb(dev, args, 0, 0);
546 if (ret)
547 return ret;
548
549 return drm_gem_shmem_create_with_handle(file, dev, args->size, &args->handle);
550 }
551 EXPORT_SYMBOL_GPL(drm_gem_shmem_dumb_create);
552
drm_gem_shmem_try_map_pmd(struct vm_fault * vmf,unsigned long addr,struct page * page)553 static bool drm_gem_shmem_try_map_pmd(struct vm_fault *vmf, unsigned long addr,
554 struct page *page)
555 {
556 #ifdef CONFIG_ARCH_SUPPORTS_PMD_PFNMAP
557 unsigned long pfn = page_to_pfn(page);
558 unsigned long paddr = pfn << PAGE_SHIFT;
559 bool aligned = (addr & ~PMD_MASK) == (paddr & ~PMD_MASK);
560
561 if (aligned &&
562 pmd_none(*vmf->pmd) &&
563 folio_test_pmd_mappable(page_folio(page))) {
564 pfn &= PMD_MASK >> PAGE_SHIFT;
565 if (vmf_insert_pfn_pmd(vmf, pfn, false) == VM_FAULT_NOPAGE)
566 return true;
567 }
568 #endif
569
570 return false;
571 }
572
drm_gem_shmem_fault(struct vm_fault * vmf)573 static vm_fault_t drm_gem_shmem_fault(struct vm_fault *vmf)
574 {
575 struct vm_area_struct *vma = vmf->vma;
576 struct drm_gem_object *obj = vma->vm_private_data;
577 struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
578 loff_t num_pages = obj->size >> PAGE_SHIFT;
579 vm_fault_t ret;
580 struct page **pages = shmem->pages;
581 pgoff_t page_offset;
582 unsigned long pfn;
583
584 /* Offset to faulty address in the VMA. */
585 page_offset = vmf->pgoff - vma->vm_pgoff;
586
587 dma_resv_lock(shmem->base.resv, NULL);
588
589 if (page_offset >= num_pages ||
590 drm_WARN_ON_ONCE(obj->dev, !shmem->pages) ||
591 shmem->madv < 0) {
592 ret = VM_FAULT_SIGBUS;
593 goto out;
594 }
595
596 if (drm_gem_shmem_try_map_pmd(vmf, vmf->address, pages[page_offset])) {
597 ret = VM_FAULT_NOPAGE;
598 goto out;
599 }
600
601 pfn = page_to_pfn(pages[page_offset]);
602 ret = vmf_insert_pfn(vma, vmf->address, pfn);
603
604 out:
605 dma_resv_unlock(shmem->base.resv);
606
607 return ret;
608 }
609
drm_gem_shmem_vm_open(struct vm_area_struct * vma)610 static void drm_gem_shmem_vm_open(struct vm_area_struct *vma)
611 {
612 struct drm_gem_object *obj = vma->vm_private_data;
613 struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
614
615 drm_WARN_ON(obj->dev, drm_gem_is_imported(obj));
616
617 dma_resv_lock(shmem->base.resv, NULL);
618
619 /*
620 * We should have already pinned the pages when the buffer was first
621 * mmap'd, vm_open() just grabs an additional reference for the new
622 * mm the vma is getting copied into (ie. on fork()).
623 */
624 drm_WARN_ON_ONCE(obj->dev,
625 !refcount_inc_not_zero(&shmem->pages_use_count));
626
627 dma_resv_unlock(shmem->base.resv);
628
629 drm_gem_vm_open(vma);
630 }
631
drm_gem_shmem_vm_close(struct vm_area_struct * vma)632 static void drm_gem_shmem_vm_close(struct vm_area_struct *vma)
633 {
634 struct drm_gem_object *obj = vma->vm_private_data;
635 struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
636
637 dma_resv_lock(shmem->base.resv, NULL);
638 drm_gem_shmem_put_pages_locked(shmem);
639 dma_resv_unlock(shmem->base.resv);
640
641 drm_gem_vm_close(vma);
642 }
643
644 const struct vm_operations_struct drm_gem_shmem_vm_ops = {
645 .fault = drm_gem_shmem_fault,
646 .open = drm_gem_shmem_vm_open,
647 .close = drm_gem_shmem_vm_close,
648 };
649 EXPORT_SYMBOL_GPL(drm_gem_shmem_vm_ops);
650
651 /**
652 * drm_gem_shmem_mmap - Memory-map a shmem GEM object
653 * @shmem: shmem GEM object
654 * @vma: VMA for the area to be mapped
655 *
656 * This function implements an augmented version of the GEM DRM file mmap
657 * operation for shmem objects.
658 *
659 * Returns:
660 * 0 on success or a negative error code on failure.
661 */
drm_gem_shmem_mmap(struct drm_gem_shmem_object * shmem,struct vm_area_struct * vma)662 int drm_gem_shmem_mmap(struct drm_gem_shmem_object *shmem, struct vm_area_struct *vma)
663 {
664 struct drm_gem_object *obj = &shmem->base;
665 int ret;
666
667 if (drm_gem_is_imported(obj)) {
668 /* Reset both vm_ops and vm_private_data, so we don't end up with
669 * vm_ops pointing to our implementation if the dma-buf backend
670 * doesn't set those fields.
671 */
672 vma->vm_private_data = NULL;
673 vma->vm_ops = NULL;
674
675 ret = dma_buf_mmap(obj->dma_buf, vma, 0);
676
677 /* Drop the reference drm_gem_mmap_obj() acquired.*/
678 if (!ret)
679 drm_gem_object_put(obj);
680
681 return ret;
682 }
683
684 if (is_cow_mapping(vma->vm_flags))
685 return -EINVAL;
686
687 dma_resv_lock(shmem->base.resv, NULL);
688 ret = drm_gem_shmem_get_pages_locked(shmem);
689 dma_resv_unlock(shmem->base.resv);
690
691 if (ret)
692 return ret;
693
694 vm_flags_set(vma, VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP);
695 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
696 if (shmem->map_wc)
697 vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
698
699 return 0;
700 }
701 EXPORT_SYMBOL_GPL(drm_gem_shmem_mmap);
702
703 /**
704 * drm_gem_shmem_print_info() - Print &drm_gem_shmem_object info for debugfs
705 * @shmem: shmem GEM object
706 * @p: DRM printer
707 * @indent: Tab indentation level
708 */
drm_gem_shmem_print_info(const struct drm_gem_shmem_object * shmem,struct drm_printer * p,unsigned int indent)709 void drm_gem_shmem_print_info(const struct drm_gem_shmem_object *shmem,
710 struct drm_printer *p, unsigned int indent)
711 {
712 if (drm_gem_is_imported(&shmem->base))
713 return;
714
715 drm_printf_indent(p, indent, "pages_pin_count=%u\n", refcount_read(&shmem->pages_pin_count));
716 drm_printf_indent(p, indent, "pages_use_count=%u\n", refcount_read(&shmem->pages_use_count));
717 drm_printf_indent(p, indent, "vmap_use_count=%u\n", refcount_read(&shmem->vmap_use_count));
718 drm_printf_indent(p, indent, "vaddr=%p\n", shmem->vaddr);
719 }
720 EXPORT_SYMBOL_GPL(drm_gem_shmem_print_info);
721
722 /**
723 * drm_gem_shmem_get_sg_table - Provide a scatter/gather table of pinned
724 * pages for a shmem GEM object
725 * @shmem: shmem GEM object
726 *
727 * This function exports a scatter/gather table suitable for PRIME usage by
728 * calling the standard DMA mapping API.
729 *
730 * Drivers who need to acquire an scatter/gather table for objects need to call
731 * drm_gem_shmem_get_pages_sgt() instead.
732 *
733 * Returns:
734 * A pointer to the scatter/gather table of pinned pages or error pointer on failure.
735 */
drm_gem_shmem_get_sg_table(struct drm_gem_shmem_object * shmem)736 struct sg_table *drm_gem_shmem_get_sg_table(struct drm_gem_shmem_object *shmem)
737 {
738 struct drm_gem_object *obj = &shmem->base;
739
740 drm_WARN_ON(obj->dev, drm_gem_is_imported(obj));
741
742 return drm_prime_pages_to_sg(obj->dev, shmem->pages, obj->size >> PAGE_SHIFT);
743 }
744 EXPORT_SYMBOL_GPL(drm_gem_shmem_get_sg_table);
745
drm_gem_shmem_get_pages_sgt_locked(struct drm_gem_shmem_object * shmem)746 static struct sg_table *drm_gem_shmem_get_pages_sgt_locked(struct drm_gem_shmem_object *shmem)
747 {
748 struct drm_gem_object *obj = &shmem->base;
749 int ret;
750 struct sg_table *sgt;
751
752 if (shmem->sgt)
753 return shmem->sgt;
754
755 drm_WARN_ON(obj->dev, drm_gem_is_imported(obj));
756
757 ret = drm_gem_shmem_get_pages_locked(shmem);
758 if (ret)
759 return ERR_PTR(ret);
760
761 sgt = drm_gem_shmem_get_sg_table(shmem);
762 if (IS_ERR(sgt)) {
763 ret = PTR_ERR(sgt);
764 goto err_put_pages;
765 }
766 /* Map the pages for use by the h/w. */
767 ret = dma_map_sgtable(obj->dev->dev, sgt, DMA_BIDIRECTIONAL, 0);
768 if (ret)
769 goto err_free_sgt;
770
771 shmem->sgt = sgt;
772
773 return sgt;
774
775 err_free_sgt:
776 sg_free_table(sgt);
777 kfree(sgt);
778 err_put_pages:
779 drm_gem_shmem_put_pages_locked(shmem);
780 return ERR_PTR(ret);
781 }
782
783 /**
784 * drm_gem_shmem_get_pages_sgt - Pin pages, dma map them, and return a
785 * scatter/gather table for a shmem GEM object.
786 * @shmem: shmem GEM object
787 *
788 * This function returns a scatter/gather table suitable for driver usage. If
789 * the sg table doesn't exist, the pages are pinned, dma-mapped, and a sg
790 * table created.
791 *
792 * This is the main function for drivers to get at backing storage, and it hides
793 * and difference between dma-buf imported and natively allocated objects.
794 * drm_gem_shmem_get_sg_table() should not be directly called by drivers.
795 *
796 * Returns:
797 * A pointer to the scatter/gather table of pinned pages or errno on failure.
798 */
drm_gem_shmem_get_pages_sgt(struct drm_gem_shmem_object * shmem)799 struct sg_table *drm_gem_shmem_get_pages_sgt(struct drm_gem_shmem_object *shmem)
800 {
801 int ret;
802 struct sg_table *sgt;
803
804 ret = dma_resv_lock_interruptible(shmem->base.resv, NULL);
805 if (ret)
806 return ERR_PTR(ret);
807 sgt = drm_gem_shmem_get_pages_sgt_locked(shmem);
808 dma_resv_unlock(shmem->base.resv);
809
810 return sgt;
811 }
812 EXPORT_SYMBOL_GPL(drm_gem_shmem_get_pages_sgt);
813
814 /**
815 * drm_gem_shmem_prime_import_sg_table - Produce a shmem GEM object from
816 * another driver's scatter/gather table of pinned pages
817 * @dev: Device to import into
818 * @attach: DMA-BUF attachment
819 * @sgt: Scatter/gather table of pinned pages
820 *
821 * This function imports a scatter/gather table exported via DMA-BUF by
822 * another driver. Drivers that use the shmem helpers should set this as their
823 * &drm_driver.gem_prime_import_sg_table callback.
824 *
825 * Returns:
826 * A pointer to a newly created GEM object or an ERR_PTR-encoded negative
827 * error code on failure.
828 */
829 struct drm_gem_object *
drm_gem_shmem_prime_import_sg_table(struct drm_device * dev,struct dma_buf_attachment * attach,struct sg_table * sgt)830 drm_gem_shmem_prime_import_sg_table(struct drm_device *dev,
831 struct dma_buf_attachment *attach,
832 struct sg_table *sgt)
833 {
834 size_t size = PAGE_ALIGN(attach->dmabuf->size);
835 struct drm_gem_shmem_object *shmem;
836
837 shmem = __drm_gem_shmem_create(dev, size, true);
838 if (IS_ERR(shmem))
839 return ERR_CAST(shmem);
840
841 shmem->sgt = sgt;
842
843 drm_dbg_prime(dev, "size = %zu\n", size);
844
845 return &shmem->base;
846 }
847 EXPORT_SYMBOL_GPL(drm_gem_shmem_prime_import_sg_table);
848
849 /**
850 * drm_gem_shmem_prime_import_no_map - Import dmabuf without mapping its sg_table
851 * @dev: Device to import into
852 * @dma_buf: dma-buf object to import
853 *
854 * Drivers that use the shmem helpers but also wants to import dmabuf without
855 * mapping its sg_table can use this as their &drm_driver.gem_prime_import
856 * implementation.
857 */
drm_gem_shmem_prime_import_no_map(struct drm_device * dev,struct dma_buf * dma_buf)858 struct drm_gem_object *drm_gem_shmem_prime_import_no_map(struct drm_device *dev,
859 struct dma_buf *dma_buf)
860 {
861 struct dma_buf_attachment *attach;
862 struct drm_gem_shmem_object *shmem;
863 struct drm_gem_object *obj;
864 size_t size;
865 int ret;
866
867 if (drm_gem_is_prime_exported_dma_buf(dev, dma_buf)) {
868 /*
869 * Importing dmabuf exported from our own gem increases
870 * refcount on gem itself instead of f_count of dmabuf.
871 */
872 obj = dma_buf->priv;
873 drm_gem_object_get(obj);
874 return obj;
875 }
876
877 attach = dma_buf_attach(dma_buf, dev->dev);
878 if (IS_ERR(attach))
879 return ERR_CAST(attach);
880
881 get_dma_buf(dma_buf);
882
883 size = PAGE_ALIGN(attach->dmabuf->size);
884
885 shmem = __drm_gem_shmem_create(dev, size, true);
886 if (IS_ERR(shmem)) {
887 ret = PTR_ERR(shmem);
888 goto fail_detach;
889 }
890
891 drm_dbg_prime(dev, "size = %zu\n", size);
892
893 shmem->base.import_attach = attach;
894 shmem->base.resv = dma_buf->resv;
895
896 return &shmem->base;
897
898 fail_detach:
899 dma_buf_detach(dma_buf, attach);
900 dma_buf_put(dma_buf);
901
902 return ERR_PTR(ret);
903 }
904 EXPORT_SYMBOL_GPL(drm_gem_shmem_prime_import_no_map);
905
906 /*
907 * Kunit helpers
908 */
909
910 #if IS_ENABLED(CONFIG_KUNIT)
drm_gem_shmem_vmap(struct drm_gem_shmem_object * shmem,struct iosys_map * map)911 int drm_gem_shmem_vmap(struct drm_gem_shmem_object *shmem, struct iosys_map *map)
912 {
913 struct drm_gem_object *obj = &shmem->base;
914 int ret;
915
916 ret = dma_resv_lock_interruptible(obj->resv, NULL);
917 if (ret)
918 return ret;
919 ret = drm_gem_shmem_vmap_locked(shmem, map);
920 dma_resv_unlock(obj->resv);
921
922 return ret;
923 }
924 EXPORT_SYMBOL_IF_KUNIT(drm_gem_shmem_vmap);
925
drm_gem_shmem_vunmap(struct drm_gem_shmem_object * shmem,struct iosys_map * map)926 void drm_gem_shmem_vunmap(struct drm_gem_shmem_object *shmem, struct iosys_map *map)
927 {
928 struct drm_gem_object *obj = &shmem->base;
929
930 dma_resv_lock_interruptible(obj->resv, NULL);
931 drm_gem_shmem_vunmap_locked(shmem, map);
932 dma_resv_unlock(obj->resv);
933 }
934 EXPORT_SYMBOL_IF_KUNIT(drm_gem_shmem_vunmap);
935
drm_gem_shmem_madvise(struct drm_gem_shmem_object * shmem,int madv)936 int drm_gem_shmem_madvise(struct drm_gem_shmem_object *shmem, int madv)
937 {
938 struct drm_gem_object *obj = &shmem->base;
939 int ret;
940
941 ret = dma_resv_lock_interruptible(obj->resv, NULL);
942 if (ret)
943 return ret;
944 ret = drm_gem_shmem_madvise_locked(shmem, madv);
945 dma_resv_unlock(obj->resv);
946
947 return ret;
948 }
949 EXPORT_SYMBOL_IF_KUNIT(drm_gem_shmem_madvise);
950
drm_gem_shmem_purge(struct drm_gem_shmem_object * shmem)951 int drm_gem_shmem_purge(struct drm_gem_shmem_object *shmem)
952 {
953 struct drm_gem_object *obj = &shmem->base;
954 int ret;
955
956 ret = dma_resv_lock_interruptible(obj->resv, NULL);
957 if (ret)
958 return ret;
959 drm_gem_shmem_purge_locked(shmem);
960 dma_resv_unlock(obj->resv);
961
962 return 0;
963 }
964 EXPORT_SYMBOL_IF_KUNIT(drm_gem_shmem_purge);
965 #endif
966
967 MODULE_DESCRIPTION("DRM SHMEM memory-management helpers");
968 MODULE_IMPORT_NS("DMA_BUF");
969 MODULE_LICENSE("GPL");
970