1 /*
2 * Copyright © 2008 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Eric Anholt <eric@anholt.net>
25 *
26 */
27
28 #include <linux/dma-buf.h>
29 #include <linux/export.h>
30 #include <linux/file.h>
31 #include <linux/fs.h>
32 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
33 #include <linux/fs_context.h>
34 #endif
35 #include <linux/iosys-map.h>
36 #include <linux/mem_encrypt.h>
37 #include <linux/mm.h>
38 #include <linux/mman.h>
39 #include <linux/module.h>
40 #include <linux/pagemap.h>
41 #include <linux/pagevec.h>
42 #include <linux/sched/mm.h>
43 #include <linux/shmem_fs.h>
44 #include <linux/slab.h>
45 #include <linux/string_helpers.h>
46 #include <linux/types.h>
47 #include <linux/uaccess.h>
48
49 #include <drm/drm.h>
50 #include <drm/drm_device.h>
51 #include <drm/drm_drv.h>
52 #include <drm/drm_file.h>
53 #include <drm/drm_gem.h>
54 #include <drm/drm_managed.h>
55 #include <drm/drm_print.h>
56 #include <drm/drm_vma_manager.h>
57
58 #include "drm_internal.h"
59
60 /** @file drm_gem.c
61 *
62 * This file provides some of the base ioctls and library routines for
63 * the graphics memory manager implemented by each device driver.
64 *
65 * Because various devices have different requirements in terms of
66 * synchronization and migration strategies, implementing that is left up to
67 * the driver, and all that the general API provides should be generic --
68 * allocating objects, reading/writing data with the cpu, freeing objects.
69 * Even there, platform-dependent optimizations for reading/writing data with
70 * the CPU mean we'll likely hook those out to driver-specific calls. However,
71 * the DRI2 implementation wants to have at least allocate/mmap be generic.
72 *
73 * The goal was to have swap-backed object allocation managed through
74 * struct file. However, file descriptors as handles to a struct file have
75 * two major failings:
76 * - Process limits prevent more than 1024 or so being used at a time by
77 * default.
78 * - Inability to allocate high fds will aggravate the X Server's select()
79 * handling, and likely that of many GL client applications as well.
80 *
81 * This led to a plan of using our own integer IDs (called handles, following
82 * DRM terminology) to mimic fds, and implement the fd syscalls we need as
83 * ioctls. The objects themselves will still include the struct file so
84 * that we can transition to fds if the required kernel infrastructure shows
85 * up at a later date, and as our interface with shmfs for memory allocation.
86 */
87
88 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
drm_gem_huge_mnt_free(struct drm_device * dev,void * data)89 static void drm_gem_huge_mnt_free(struct drm_device *dev, void *data)
90 {
91 kern_unmount(dev->huge_mnt);
92 }
93
94 /**
95 * drm_gem_huge_mnt_create - Create, mount and use a huge tmpfs mountpoint
96 * @dev: DRM device that will use the huge tmpfs mountpoint
97 * @value: huge tmpfs mount option value
98 *
99 * This function creates and mounts a dedicated huge tmpfs mountpoint for the
100 * lifetime of the DRM device @dev which is used at GEM object initialization
101 * with drm_gem_object_init().
102 *
103 * The most common option for @value is "within_size" which only allocates huge
104 * pages if the page will be fully within the GEM object size. "always",
105 * "advise" and "never" are supported too but the latter would just create a
106 * mountpoint similar to the default one (`shm_mnt`). See shmemfs and
107 * Transparent Hugepage for more information.
108 *
109 * Returns:
110 * 0 on success or a negative error code on failure.
111 */
drm_gem_huge_mnt_create(struct drm_device * dev,const char * value)112 int drm_gem_huge_mnt_create(struct drm_device *dev, const char *value)
113 {
114 struct file_system_type *type;
115 struct fs_context *fc;
116 int ret;
117
118 if (unlikely(drm_gem_get_huge_mnt(dev)))
119 return 0;
120
121 type = get_fs_type("tmpfs");
122 if (unlikely(!type))
123 return -EOPNOTSUPP;
124 fc = fs_context_for_mount(type, SB_KERNMOUNT);
125 if (IS_ERR(fc))
126 return PTR_ERR(fc);
127 ret = vfs_parse_fs_string(fc, "source", "tmpfs");
128 if (unlikely(ret))
129 return -ENOPARAM;
130 ret = vfs_parse_fs_string(fc, "huge", value);
131 if (unlikely(ret))
132 return -ENOPARAM;
133
134 dev->huge_mnt = fc_mount_longterm(fc);
135 put_fs_context(fc);
136
137 return drmm_add_action_or_reset(dev, drm_gem_huge_mnt_free, NULL);
138 }
139 EXPORT_SYMBOL_GPL(drm_gem_huge_mnt_create);
140 #endif
141
142 static void
drm_gem_init_release(struct drm_device * dev,void * ptr)143 drm_gem_init_release(struct drm_device *dev, void *ptr)
144 {
145 drm_vma_offset_manager_destroy(dev->vma_offset_manager);
146 }
147
148 /**
149 * drm_gem_init - Initialize the GEM device fields
150 * @dev: drm_devic structure to initialize
151 */
152 int
drm_gem_init(struct drm_device * dev)153 drm_gem_init(struct drm_device *dev)
154 {
155 struct drm_vma_offset_manager *vma_offset_manager;
156
157 mutex_init(&dev->object_name_lock);
158 idr_init_base(&dev->object_name_idr, 1);
159
160 vma_offset_manager = drmm_kzalloc(dev, sizeof(*vma_offset_manager),
161 GFP_KERNEL);
162 if (!vma_offset_manager)
163 return -ENOMEM;
164
165 dev->vma_offset_manager = vma_offset_manager;
166 drm_vma_offset_manager_init(vma_offset_manager,
167 DRM_FILE_PAGE_OFFSET_START,
168 DRM_FILE_PAGE_OFFSET_SIZE);
169
170 return drmm_add_action(dev, drm_gem_init_release, NULL);
171 }
172
173 /**
174 * drm_gem_object_init - initialize an allocated shmem-backed GEM object
175 *
176 * @dev: drm_device the object should be initialized for
177 * @obj: drm_gem_object to initialize
178 * @size: object size
179 *
180 * Initialize an already allocated GEM object of the specified size with
181 * shmfs backing store. A huge mountpoint can be used by calling
182 * drm_gem_huge_mnt_create() beforehand.
183 */
drm_gem_object_init(struct drm_device * dev,struct drm_gem_object * obj,size_t size)184 int drm_gem_object_init(struct drm_device *dev, struct drm_gem_object *obj,
185 size_t size)
186 {
187 struct vfsmount *huge_mnt;
188 struct file *filp;
189 const vma_flags_t flags = mk_vma_flags(VMA_NORESERVE_BIT);
190
191 drm_gem_private_object_init(dev, obj, size);
192
193 huge_mnt = drm_gem_get_huge_mnt(dev);
194 if (huge_mnt)
195 filp = shmem_file_setup_with_mnt(huge_mnt, "drm mm object",
196 size, flags);
197 else
198 filp = shmem_file_setup("drm mm object", size, flags);
199
200 if (IS_ERR(filp))
201 return PTR_ERR(filp);
202
203 obj->filp = filp;
204
205 return 0;
206 }
207 EXPORT_SYMBOL(drm_gem_object_init);
208
209 /**
210 * drm_gem_private_object_init - initialize an allocated private GEM object
211 * @dev: drm_device the object should be initialized for
212 * @obj: drm_gem_object to initialize
213 * @size: object size
214 *
215 * Initialize an already allocated GEM object of the specified size with
216 * no GEM provided backing store. Instead the caller is responsible for
217 * backing the object and handling it.
218 */
drm_gem_private_object_init(struct drm_device * dev,struct drm_gem_object * obj,size_t size)219 void drm_gem_private_object_init(struct drm_device *dev,
220 struct drm_gem_object *obj, size_t size)
221 {
222 BUG_ON((size & (PAGE_SIZE - 1)) != 0);
223
224 obj->dev = dev;
225 obj->filp = NULL;
226
227 kref_init(&obj->refcount);
228 obj->handle_count = 0;
229 obj->size = size;
230 mutex_init(&obj->gpuva.lock);
231 dma_resv_init(&obj->_resv);
232 if (!obj->resv)
233 obj->resv = &obj->_resv;
234
235 if (drm_core_check_feature(dev, DRIVER_GEM_GPUVA))
236 drm_gem_gpuva_init(obj);
237
238 drm_vma_node_reset(&obj->vma_node);
239 INIT_LIST_HEAD(&obj->lru_node);
240 }
241 EXPORT_SYMBOL(drm_gem_private_object_init);
242
243 /**
244 * drm_gem_private_object_fini - Finalize a failed drm_gem_object
245 * @obj: drm_gem_object
246 *
247 * Uninitialize an already allocated GEM object when it initialized failed
248 */
drm_gem_private_object_fini(struct drm_gem_object * obj)249 void drm_gem_private_object_fini(struct drm_gem_object *obj)
250 {
251 WARN_ON(obj->dma_buf);
252
253 dma_resv_fini(&obj->_resv);
254 mutex_destroy(&obj->gpuva.lock);
255 }
256 EXPORT_SYMBOL(drm_gem_private_object_fini);
257
drm_gem_object_handle_get(struct drm_gem_object * obj)258 static void drm_gem_object_handle_get(struct drm_gem_object *obj)
259 {
260 struct drm_device *dev = obj->dev;
261
262 drm_WARN_ON(dev, !mutex_is_locked(&dev->object_name_lock));
263
264 if (obj->handle_count++ == 0)
265 drm_gem_object_get(obj);
266 }
267
268 /**
269 * drm_gem_object_handle_get_if_exists_unlocked - acquire reference on user-space handle, if any
270 * @obj: GEM object
271 *
272 * Acquires a reference on the GEM buffer object's handle. Required to keep
273 * the GEM object alive. Call drm_gem_object_handle_put_if_exists_unlocked()
274 * to release the reference. Does nothing if the buffer object has no handle.
275 *
276 * Returns:
277 * True if a handle exists, or false otherwise
278 */
drm_gem_object_handle_get_if_exists_unlocked(struct drm_gem_object * obj)279 bool drm_gem_object_handle_get_if_exists_unlocked(struct drm_gem_object *obj)
280 {
281 struct drm_device *dev = obj->dev;
282
283 guard(mutex)(&dev->object_name_lock);
284
285 /*
286 * First ref taken during GEM object creation, if any. Some
287 * drivers set up internal framebuffers with GEM objects that
288 * do not have a GEM handle. Hence, this counter can be zero.
289 */
290 if (!obj->handle_count)
291 return false;
292
293 drm_gem_object_handle_get(obj);
294
295 return true;
296 }
297
298 /**
299 * drm_gem_object_handle_free - release resources bound to userspace handles
300 * @obj: GEM object to clean up.
301 *
302 * Called after the last handle to the object has been closed
303 *
304 * Removes any name for the object. Note that this must be
305 * called before drm_gem_object_free or we'll be touching
306 * freed memory
307 */
drm_gem_object_handle_free(struct drm_gem_object * obj)308 static void drm_gem_object_handle_free(struct drm_gem_object *obj)
309 {
310 struct drm_device *dev = obj->dev;
311
312 /* Remove any name for this object */
313 if (obj->name) {
314 idr_remove(&dev->object_name_idr, obj->name);
315 obj->name = 0;
316 }
317 }
318
drm_gem_object_exported_dma_buf_free(struct drm_gem_object * obj)319 static void drm_gem_object_exported_dma_buf_free(struct drm_gem_object *obj)
320 {
321 /* Unbreak the reference cycle if we have an exported dma_buf. */
322 if (obj->dma_buf) {
323 dma_buf_put(obj->dma_buf);
324 obj->dma_buf = NULL;
325 }
326 }
327
328 /**
329 * drm_gem_object_handle_put_unlocked - releases reference on user-space handle
330 * @obj: GEM object
331 *
332 * Releases a reference on the GEM buffer object's handle. Possibly releases
333 * the GEM buffer object and associated dma-buf objects.
334 */
drm_gem_object_handle_put_unlocked(struct drm_gem_object * obj)335 void drm_gem_object_handle_put_unlocked(struct drm_gem_object *obj)
336 {
337 struct drm_device *dev = obj->dev;
338 bool final = false;
339
340 if (drm_WARN_ON(dev, READ_ONCE(obj->handle_count) == 0))
341 return;
342
343 /*
344 * Must bump handle count first as this may be the last
345 * ref, in which case the object would disappear before
346 * we checked for a name.
347 */
348
349 mutex_lock(&dev->object_name_lock);
350 if (--obj->handle_count == 0) {
351 drm_gem_object_handle_free(obj);
352 drm_gem_object_exported_dma_buf_free(obj);
353 final = true;
354 }
355 mutex_unlock(&dev->object_name_lock);
356
357 if (final)
358 drm_gem_object_put(obj);
359 }
360
361 /*
362 * Called at device or object close to release the file's
363 * handle references on objects.
364 */
365 static int
drm_gem_object_release_handle(int id,void * ptr,void * data)366 drm_gem_object_release_handle(int id, void *ptr, void *data)
367 {
368 struct drm_file *file_priv = data;
369 struct drm_gem_object *obj = ptr;
370
371 if (drm_WARN_ON(obj->dev, !data))
372 return 0;
373
374 if (obj->funcs->close)
375 obj->funcs->close(obj, file_priv);
376
377 mutex_lock(&file_priv->prime.lock);
378
379 drm_prime_remove_buf_handle(&file_priv->prime, id);
380
381 mutex_unlock(&file_priv->prime.lock);
382
383 drm_vma_node_revoke(&obj->vma_node, file_priv);
384
385 drm_gem_object_handle_put_unlocked(obj);
386
387 return 0;
388 }
389
390 /**
391 * drm_gem_handle_delete - deletes the given file-private handle
392 * @filp: drm file-private structure to use for the handle look up
393 * @handle: userspace handle to delete
394 *
395 * Removes the GEM handle from the @filp lookup table which has been added with
396 * drm_gem_handle_create(). If this is the last handle also cleans up linked
397 * resources like GEM names.
398 */
399 int
drm_gem_handle_delete(struct drm_file * filp,u32 handle)400 drm_gem_handle_delete(struct drm_file *filp, u32 handle)
401 {
402 struct drm_gem_object *obj;
403
404 spin_lock(&filp->table_lock);
405
406 /* Check if we currently have a reference on the object */
407 obj = idr_replace(&filp->object_idr, NULL, handle);
408 spin_unlock(&filp->table_lock);
409 if (IS_ERR_OR_NULL(obj))
410 return -EINVAL;
411
412 /* Release driver's reference and decrement refcount. */
413 drm_gem_object_release_handle(handle, obj, filp);
414
415 /* And finally make the handle available for future allocations. */
416 spin_lock(&filp->table_lock);
417 idr_remove(&filp->object_idr, handle);
418 spin_unlock(&filp->table_lock);
419
420 return 0;
421 }
422 EXPORT_SYMBOL(drm_gem_handle_delete);
423
424 /**
425 * drm_gem_dumb_map_offset - return the fake mmap offset for a gem object
426 * @file: drm file-private structure containing the gem object
427 * @dev: corresponding drm_device
428 * @handle: gem object handle
429 * @offset: return location for the fake mmap offset
430 *
431 * This implements the &drm_driver.dumb_map_offset kms driver callback for
432 * drivers which use gem to manage their backing storage.
433 *
434 * Returns:
435 * 0 on success or a negative error code on failure.
436 */
drm_gem_dumb_map_offset(struct drm_file * file,struct drm_device * dev,u32 handle,u64 * offset)437 int drm_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev,
438 u32 handle, u64 *offset)
439 {
440 struct drm_gem_object *obj;
441 int ret;
442
443 obj = drm_gem_object_lookup(file, handle);
444 if (!obj)
445 return -ENOENT;
446
447 /* Don't allow imported objects to be mapped */
448 if (drm_gem_is_imported(obj)) {
449 ret = -EINVAL;
450 goto out;
451 }
452
453 ret = drm_gem_create_mmap_offset(obj);
454 if (ret)
455 goto out;
456
457 *offset = drm_vma_node_offset_addr(&obj->vma_node);
458 out:
459 drm_gem_object_put(obj);
460
461 return ret;
462 }
463 EXPORT_SYMBOL_GPL(drm_gem_dumb_map_offset);
464
465 /**
466 * drm_gem_handle_create_tail - internal functions to create a handle
467 * @file_priv: drm file-private structure to register the handle for
468 * @obj: object to register
469 * @handlep: pointer to return the created handle to the caller
470 *
471 * This expects the &drm_device.object_name_lock to be held already and will
472 * drop it before returning. Used to avoid races in establishing new handles
473 * when importing an object from either an flink name or a dma-buf.
474 *
475 * Handles must be release again through drm_gem_handle_delete(). This is done
476 * when userspace closes @file_priv for all attached handles, or through the
477 * GEM_CLOSE ioctl for individual handles.
478 */
479 int
drm_gem_handle_create_tail(struct drm_file * file_priv,struct drm_gem_object * obj,u32 * handlep)480 drm_gem_handle_create_tail(struct drm_file *file_priv,
481 struct drm_gem_object *obj,
482 u32 *handlep)
483 {
484 struct drm_device *dev = obj->dev;
485 u32 handle;
486 int ret;
487
488 WARN_ON(!mutex_is_locked(&dev->object_name_lock));
489
490 drm_gem_object_handle_get(obj);
491
492 /*
493 * Get the user-visible handle using idr. Preload and perform
494 * allocation under our spinlock.
495 */
496 idr_preload(GFP_KERNEL);
497 spin_lock(&file_priv->table_lock);
498
499 ret = idr_alloc(&file_priv->object_idr, NULL, 1, 0, GFP_NOWAIT);
500
501 spin_unlock(&file_priv->table_lock);
502 idr_preload_end();
503
504 mutex_unlock(&dev->object_name_lock);
505 if (ret < 0)
506 goto err_unref;
507
508 handle = ret;
509
510 ret = drm_vma_node_allow(&obj->vma_node, file_priv);
511 if (ret)
512 goto err_remove;
513
514 if (obj->funcs->open) {
515 ret = obj->funcs->open(obj, file_priv);
516 if (ret)
517 goto err_revoke;
518 }
519
520 /* mirrors drm_gem_handle_delete to avoid races */
521 spin_lock(&file_priv->table_lock);
522 obj = idr_replace(&file_priv->object_idr, obj, handle);
523 WARN_ON(obj != NULL);
524 spin_unlock(&file_priv->table_lock);
525 *handlep = handle;
526 return 0;
527
528 err_revoke:
529 drm_vma_node_revoke(&obj->vma_node, file_priv);
530 err_remove:
531 spin_lock(&file_priv->table_lock);
532 idr_remove(&file_priv->object_idr, handle);
533 spin_unlock(&file_priv->table_lock);
534 err_unref:
535 drm_gem_object_handle_put_unlocked(obj);
536 return ret;
537 }
538
539 /**
540 * drm_gem_handle_create - create a gem handle for an object
541 * @file_priv: drm file-private structure to register the handle for
542 * @obj: object to register
543 * @handlep: pointer to return the created handle to the caller
544 *
545 * Create a handle for this object. This adds a handle reference to the object,
546 * which includes a regular reference count. Callers will likely want to
547 * dereference the object afterwards.
548 *
549 * Since this publishes @obj to userspace it must be fully set up by this point,
550 * drivers must call this last in their buffer object creation callbacks.
551 */
drm_gem_handle_create(struct drm_file * file_priv,struct drm_gem_object * obj,u32 * handlep)552 int drm_gem_handle_create(struct drm_file *file_priv,
553 struct drm_gem_object *obj,
554 u32 *handlep)
555 {
556 mutex_lock(&obj->dev->object_name_lock);
557
558 return drm_gem_handle_create_tail(file_priv, obj, handlep);
559 }
560 EXPORT_SYMBOL(drm_gem_handle_create);
561
562
563 /**
564 * drm_gem_free_mmap_offset - release a fake mmap offset for an object
565 * @obj: obj in question
566 *
567 * This routine frees fake offsets allocated by drm_gem_create_mmap_offset().
568 *
569 * Note that drm_gem_object_release() already calls this function, so drivers
570 * don't have to take care of releasing the mmap offset themselves when freeing
571 * the GEM object.
572 */
573 void
drm_gem_free_mmap_offset(struct drm_gem_object * obj)574 drm_gem_free_mmap_offset(struct drm_gem_object *obj)
575 {
576 struct drm_device *dev = obj->dev;
577
578 drm_vma_offset_remove(dev->vma_offset_manager, &obj->vma_node);
579 }
580 EXPORT_SYMBOL(drm_gem_free_mmap_offset);
581
582 /**
583 * drm_gem_create_mmap_offset_size - create a fake mmap offset for an object
584 * @obj: obj in question
585 * @size: the virtual size
586 *
587 * GEM memory mapping works by handing back to userspace a fake mmap offset
588 * it can use in a subsequent mmap(2) call. The DRM core code then looks
589 * up the object based on the offset and sets up the various memory mapping
590 * structures.
591 *
592 * This routine allocates and attaches a fake offset for @obj, in cases where
593 * the virtual size differs from the physical size (ie. &drm_gem_object.size).
594 * Otherwise just use drm_gem_create_mmap_offset().
595 *
596 * This function is idempotent and handles an already allocated mmap offset
597 * transparently. Drivers do not need to check for this case.
598 */
599 int
drm_gem_create_mmap_offset_size(struct drm_gem_object * obj,size_t size)600 drm_gem_create_mmap_offset_size(struct drm_gem_object *obj, size_t size)
601 {
602 struct drm_device *dev = obj->dev;
603
604 return drm_vma_offset_add(dev->vma_offset_manager, &obj->vma_node,
605 size / PAGE_SIZE);
606 }
607 EXPORT_SYMBOL(drm_gem_create_mmap_offset_size);
608
609 /**
610 * drm_gem_create_mmap_offset - create a fake mmap offset for an object
611 * @obj: obj in question
612 *
613 * GEM memory mapping works by handing back to userspace a fake mmap offset
614 * it can use in a subsequent mmap(2) call. The DRM core code then looks
615 * up the object based on the offset and sets up the various memory mapping
616 * structures.
617 *
618 * This routine allocates and attaches a fake offset for @obj.
619 *
620 * Drivers can call drm_gem_free_mmap_offset() before freeing @obj to release
621 * the fake offset again.
622 */
drm_gem_create_mmap_offset(struct drm_gem_object * obj)623 int drm_gem_create_mmap_offset(struct drm_gem_object *obj)
624 {
625 return drm_gem_create_mmap_offset_size(obj, obj->size);
626 }
627 EXPORT_SYMBOL(drm_gem_create_mmap_offset);
628
629 /*
630 * Move folios to appropriate lru and release the folios, decrementing the
631 * ref count of those folios.
632 */
drm_gem_check_release_batch(struct folio_batch * fbatch)633 static void drm_gem_check_release_batch(struct folio_batch *fbatch)
634 {
635 check_move_unevictable_folios(fbatch);
636 __folio_batch_release(fbatch);
637 cond_resched();
638 }
639
640 /**
641 * drm_gem_get_pages - helper to allocate backing pages for a GEM object
642 * from shmem
643 * @obj: obj in question
644 *
645 * This reads the page-array of the shmem-backing storage of the given gem
646 * object. An array of pages is returned. If a page is not allocated or
647 * swapped-out, this will allocate/swap-in the required pages. Note that the
648 * whole object is covered by the page-array and pinned in memory.
649 *
650 * Use drm_gem_put_pages() to release the array and unpin all pages.
651 *
652 * This uses the GFP-mask set on the shmem-mapping (see mapping_set_gfp_mask()).
653 * If you require other GFP-masks, you have to do those allocations yourself.
654 *
655 * Note that you are not allowed to change gfp-zones during runtime. That is,
656 * shmem_read_mapping_page_gfp() must be called with the same gfp_zone(gfp) as
657 * set during initialization. If you have special zone constraints, set them
658 * after drm_gem_object_init() via mapping_set_gfp_mask(). shmem-core takes care
659 * to keep pages in the required zone during swap-in.
660 *
661 * This function is only valid on objects initialized with
662 * drm_gem_object_init(), but not for those initialized with
663 * drm_gem_private_object_init() only.
664 */
drm_gem_get_pages(struct drm_gem_object * obj)665 struct page **drm_gem_get_pages(struct drm_gem_object *obj)
666 {
667 struct address_space *mapping;
668 struct page **pages;
669 struct folio *folio;
670 struct folio_batch fbatch;
671 unsigned long i, j, npages;
672
673 if (WARN_ON(!obj->filp))
674 return ERR_PTR(-EINVAL);
675
676 /* This is the shared memory object that backs the GEM resource */
677 mapping = obj->filp->f_mapping;
678
679 /* We already BUG_ON() for non-page-aligned sizes in
680 * drm_gem_object_init(), so we should never hit this unless
681 * driver author is doing something really wrong:
682 */
683 WARN_ON((obj->size & (PAGE_SIZE - 1)) != 0);
684
685 npages = obj->size >> PAGE_SHIFT;
686
687 pages = kvmalloc_objs(struct page *, npages);
688 if (pages == NULL)
689 return ERR_PTR(-ENOMEM);
690
691 mapping_set_unevictable(mapping);
692
693 i = 0;
694 while (i < npages) {
695 unsigned long nr;
696 folio = shmem_read_folio_gfp(mapping, i,
697 mapping_gfp_mask(mapping));
698 if (IS_ERR(folio))
699 goto fail;
700 nr = min(npages - i, folio_nr_pages(folio));
701 for (j = 0; j < nr; j++, i++)
702 pages[i] = folio_file_page(folio, i);
703
704 /* Make sure shmem keeps __GFP_DMA32 allocated pages in the
705 * correct region during swapin. Note that this requires
706 * __GFP_DMA32 to be set in mapping_gfp_mask(inode->i_mapping)
707 * so shmem can relocate pages during swapin if required.
708 */
709 BUG_ON(mapping_gfp_constraint(mapping, __GFP_DMA32) &&
710 (folio_pfn(folio) >= 0x00100000UL));
711 }
712
713 return pages;
714
715 fail:
716 mapping_clear_unevictable(mapping);
717 folio_batch_init(&fbatch);
718 j = 0;
719 while (j < i) {
720 struct folio *f = page_folio(pages[j]);
721 if (!folio_batch_add(&fbatch, f))
722 drm_gem_check_release_batch(&fbatch);
723 j += folio_nr_pages(f);
724 }
725 if (fbatch.nr)
726 drm_gem_check_release_batch(&fbatch);
727
728 kvfree(pages);
729 return ERR_CAST(folio);
730 }
731 EXPORT_SYMBOL(drm_gem_get_pages);
732
733 /**
734 * drm_gem_put_pages - helper to free backing pages for a GEM object
735 * @obj: obj in question
736 * @pages: pages to free
737 * @dirty: if true, pages will be marked as dirty
738 * @accessed: if true, the pages will be marked as accessed
739 */
drm_gem_put_pages(struct drm_gem_object * obj,struct page ** pages,bool dirty,bool accessed)740 void drm_gem_put_pages(struct drm_gem_object *obj, struct page **pages,
741 bool dirty, bool accessed)
742 {
743 int i, npages;
744 struct address_space *mapping;
745 struct folio_batch fbatch;
746
747 mapping = file_inode(obj->filp)->i_mapping;
748 mapping_clear_unevictable(mapping);
749
750 /* We already BUG_ON() for non-page-aligned sizes in
751 * drm_gem_object_init(), so we should never hit this unless
752 * driver author is doing something really wrong:
753 */
754 WARN_ON((obj->size & (PAGE_SIZE - 1)) != 0);
755
756 npages = obj->size >> PAGE_SHIFT;
757
758 folio_batch_init(&fbatch);
759 for (i = 0; i < npages; i++) {
760 struct folio *folio;
761
762 if (!pages[i])
763 continue;
764 folio = page_folio(pages[i]);
765
766 if (dirty)
767 folio_mark_dirty(folio);
768
769 if (accessed)
770 folio_mark_accessed(folio);
771
772 /* Undo the reference we took when populating the table */
773 if (!folio_batch_add(&fbatch, folio))
774 drm_gem_check_release_batch(&fbatch);
775 i += folio_nr_pages(folio) - 1;
776 }
777 if (folio_batch_count(&fbatch))
778 drm_gem_check_release_batch(&fbatch);
779
780 kvfree(pages);
781 }
782 EXPORT_SYMBOL(drm_gem_put_pages);
783
objects_lookup(struct drm_file * filp,u32 * handle,int count,struct drm_gem_object ** objs)784 static int objects_lookup(struct drm_file *filp, u32 *handle, int count,
785 struct drm_gem_object **objs)
786 {
787 int i, ret = 0;
788 struct drm_gem_object *obj;
789
790 spin_lock(&filp->table_lock);
791
792 for (i = 0; i < count; i++) {
793 /* Check if we currently have a reference on the object */
794 obj = idr_find(&filp->object_idr, handle[i]);
795 if (!obj) {
796 ret = -ENOENT;
797 break;
798 }
799 drm_gem_object_get(obj);
800 objs[i] = obj;
801 }
802 spin_unlock(&filp->table_lock);
803
804 return ret;
805 }
806
807 /**
808 * drm_gem_objects_lookup - look up GEM objects from an array of handles
809 * @filp: DRM file private date
810 * @bo_handles: user pointer to array of userspace handle
811 * @count: size of handle array
812 * @objs_out: returned pointer to array of drm_gem_object pointers
813 *
814 * Takes an array of userspace handles and returns a newly allocated array of
815 * GEM objects.
816 *
817 * For a single handle lookup, use drm_gem_object_lookup().
818 *
819 * Returns:
820 * @objs filled in with GEM object pointers. Returned GEM objects need to be
821 * released with drm_gem_object_put(). -ENOENT is returned on a lookup
822 * failure. 0 is returned on success.
823 *
824 */
drm_gem_objects_lookup(struct drm_file * filp,void __user * bo_handles,int count,struct drm_gem_object *** objs_out)825 int drm_gem_objects_lookup(struct drm_file *filp, void __user *bo_handles,
826 int count, struct drm_gem_object ***objs_out)
827 {
828 struct drm_gem_object **objs;
829 u32 *handles;
830 int ret;
831
832 if (!count)
833 return 0;
834
835 objs = kvmalloc_objs(struct drm_gem_object *, count,
836 GFP_KERNEL | __GFP_ZERO);
837 if (!objs)
838 return -ENOMEM;
839
840 *objs_out = objs;
841
842 handles = vmemdup_array_user(bo_handles, count, sizeof(u32));
843 if (IS_ERR(handles))
844 return PTR_ERR(handles);
845
846 ret = objects_lookup(filp, handles, count, objs);
847 kvfree(handles);
848 return ret;
849
850 }
851 EXPORT_SYMBOL(drm_gem_objects_lookup);
852
853 /**
854 * drm_gem_object_lookup - look up a GEM object from its handle
855 * @filp: DRM file private date
856 * @handle: userspace handle
857 *
858 * If looking up an array of handles, use drm_gem_objects_lookup().
859 *
860 * Returns:
861 * A reference to the object named by the handle if such exists on @filp, NULL
862 * otherwise.
863 */
864 struct drm_gem_object *
drm_gem_object_lookup(struct drm_file * filp,u32 handle)865 drm_gem_object_lookup(struct drm_file *filp, u32 handle)
866 {
867 struct drm_gem_object *obj = NULL;
868
869 objects_lookup(filp, &handle, 1, &obj);
870 return obj;
871 }
872 EXPORT_SYMBOL(drm_gem_object_lookup);
873
874 /**
875 * drm_gem_dma_resv_wait - Wait on GEM object's reservation's objects
876 * shared and/or exclusive fences.
877 * @filep: DRM file private date
878 * @handle: userspace handle
879 * @wait_all: if true, wait on all fences, else wait on just exclusive fence
880 * @timeout: timeout value in jiffies or zero to return immediately
881 *
882 * Returns:
883 * Returns -ERESTARTSYS if interrupted, 0 if the wait timed out, or
884 * greater than 0 on success.
885 */
drm_gem_dma_resv_wait(struct drm_file * filep,u32 handle,bool wait_all,unsigned long timeout)886 long drm_gem_dma_resv_wait(struct drm_file *filep, u32 handle,
887 bool wait_all, unsigned long timeout)
888 {
889 struct drm_device *dev = filep->minor->dev;
890 struct drm_gem_object *obj;
891 long ret;
892
893 obj = drm_gem_object_lookup(filep, handle);
894 if (!obj) {
895 drm_dbg_core(dev, "Failed to look up GEM BO %d\n", handle);
896 return -EINVAL;
897 }
898
899 ret = dma_resv_wait_timeout(obj->resv, dma_resv_usage_rw(wait_all),
900 true, timeout);
901 if (ret == 0)
902 ret = -ETIME;
903 else if (ret > 0)
904 ret = 0;
905
906 drm_gem_object_put(obj);
907
908 return ret;
909 }
910 EXPORT_SYMBOL(drm_gem_dma_resv_wait);
911
912 int
drm_gem_close_ioctl(struct drm_device * dev,void * data,struct drm_file * file_priv)913 drm_gem_close_ioctl(struct drm_device *dev, void *data,
914 struct drm_file *file_priv)
915 {
916 struct drm_gem_close *args = data;
917 int ret;
918
919 if (!drm_core_check_feature(dev, DRIVER_GEM))
920 return -EOPNOTSUPP;
921
922 ret = drm_gem_handle_delete(file_priv, args->handle);
923
924 return ret;
925 }
926
927 int
drm_gem_flink_ioctl(struct drm_device * dev,void * data,struct drm_file * file_priv)928 drm_gem_flink_ioctl(struct drm_device *dev, void *data,
929 struct drm_file *file_priv)
930 {
931 struct drm_gem_flink *args = data;
932 struct drm_gem_object *obj;
933 int ret;
934
935 if (!drm_core_check_feature(dev, DRIVER_GEM))
936 return -EOPNOTSUPP;
937
938 obj = drm_gem_object_lookup(file_priv, args->handle);
939 if (obj == NULL)
940 return -ENOENT;
941
942 mutex_lock(&dev->object_name_lock);
943 /* prevent races with concurrent gem_close. */
944 if (obj->handle_count == 0) {
945 ret = -ENOENT;
946 goto err;
947 }
948
949 if (!obj->name) {
950 ret = idr_alloc(&dev->object_name_idr, obj, 1, 0, GFP_KERNEL);
951 if (ret < 0)
952 goto err;
953
954 obj->name = ret;
955 }
956
957 args->name = (uint64_t) obj->name;
958 ret = 0;
959
960 err:
961 mutex_unlock(&dev->object_name_lock);
962 drm_gem_object_put(obj);
963 return ret;
964 }
965
966 int
drm_gem_open_ioctl(struct drm_device * dev,void * data,struct drm_file * file_priv)967 drm_gem_open_ioctl(struct drm_device *dev, void *data,
968 struct drm_file *file_priv)
969 {
970 struct drm_gem_open *args = data;
971 struct drm_gem_object *obj;
972 int ret;
973 u32 handle;
974
975 if (!drm_core_check_feature(dev, DRIVER_GEM))
976 return -EOPNOTSUPP;
977
978 mutex_lock(&dev->object_name_lock);
979 obj = idr_find(&dev->object_name_idr, (int) args->name);
980 if (obj) {
981 drm_gem_object_get(obj);
982 } else {
983 mutex_unlock(&dev->object_name_lock);
984 return -ENOENT;
985 }
986
987 /* drm_gem_handle_create_tail unlocks dev->object_name_lock. */
988 ret = drm_gem_handle_create_tail(file_priv, obj, &handle);
989 if (ret)
990 goto err;
991
992 args->handle = handle;
993 args->size = obj->size;
994
995 err:
996 drm_gem_object_put(obj);
997 return ret;
998 }
999
drm_gem_change_handle_ioctl(struct drm_device * dev,void * data,struct drm_file * file_priv)1000 int drm_gem_change_handle_ioctl(struct drm_device *dev, void *data,
1001 struct drm_file *file_priv)
1002 {
1003 struct drm_gem_change_handle *args = data;
1004 struct drm_gem_object *obj;
1005 int handle, ret;
1006
1007 if (!drm_core_check_feature(dev, DRIVER_GEM))
1008 return -EOPNOTSUPP;
1009
1010 /* idr_alloc() limitation. */
1011 if (args->new_handle > INT_MAX)
1012 return -EINVAL;
1013 handle = args->new_handle;
1014
1015 obj = drm_gem_object_lookup(file_priv, args->handle);
1016 if (!obj)
1017 return -ENOENT;
1018
1019 if (args->handle == handle) {
1020 ret = 0;
1021 goto out;
1022 }
1023
1024 mutex_lock(&file_priv->prime.lock);
1025
1026 spin_lock(&file_priv->table_lock);
1027 ret = idr_alloc(&file_priv->object_idr, obj, handle, handle + 1,
1028 GFP_NOWAIT);
1029 spin_unlock(&file_priv->table_lock);
1030
1031 if (ret < 0)
1032 goto out_unlock;
1033
1034 if (obj->dma_buf) {
1035 ret = drm_prime_add_buf_handle(&file_priv->prime, obj->dma_buf,
1036 handle);
1037 if (ret < 0) {
1038 spin_lock(&file_priv->table_lock);
1039 idr_remove(&file_priv->object_idr, handle);
1040 spin_unlock(&file_priv->table_lock);
1041 goto out_unlock;
1042 }
1043
1044 drm_prime_remove_buf_handle(&file_priv->prime, args->handle);
1045 }
1046
1047 ret = 0;
1048
1049 spin_lock(&file_priv->table_lock);
1050 idr_remove(&file_priv->object_idr, args->handle);
1051 spin_unlock(&file_priv->table_lock);
1052
1053 out_unlock:
1054 mutex_unlock(&file_priv->prime.lock);
1055 out:
1056 drm_gem_object_put(obj);
1057
1058 return ret;
1059 }
1060
1061 /**
1062 * drm_gem_open - initializes GEM file-private structures at devnode open time
1063 * @dev: drm_device which is being opened by userspace
1064 * @file_private: drm file-private structure to set up
1065 *
1066 * Called at device open time, sets up the structure for handling refcounting
1067 * of mm objects.
1068 */
1069 void
drm_gem_open(struct drm_device * dev,struct drm_file * file_private)1070 drm_gem_open(struct drm_device *dev, struct drm_file *file_private)
1071 {
1072 idr_init_base(&file_private->object_idr, 1);
1073 spin_lock_init(&file_private->table_lock);
1074 }
1075
1076 /**
1077 * drm_gem_release - release file-private GEM resources
1078 * @dev: drm_device which is being closed by userspace
1079 * @file_private: drm file-private structure to clean up
1080 *
1081 * Called at close time when the filp is going away.
1082 *
1083 * Releases any remaining references on objects by this filp.
1084 */
1085 void
drm_gem_release(struct drm_device * dev,struct drm_file * file_private)1086 drm_gem_release(struct drm_device *dev, struct drm_file *file_private)
1087 {
1088 idr_for_each(&file_private->object_idr,
1089 &drm_gem_object_release_handle, file_private);
1090 idr_destroy(&file_private->object_idr);
1091 }
1092
1093 /**
1094 * drm_gem_object_release - release GEM buffer object resources
1095 * @obj: GEM buffer object
1096 *
1097 * This releases any structures and resources used by @obj and is the inverse of
1098 * drm_gem_object_init().
1099 */
1100 void
drm_gem_object_release(struct drm_gem_object * obj)1101 drm_gem_object_release(struct drm_gem_object *obj)
1102 {
1103 if (obj->filp)
1104 fput(obj->filp);
1105
1106 drm_gem_private_object_fini(obj);
1107
1108 drm_gem_free_mmap_offset(obj);
1109 drm_gem_lru_remove(obj);
1110 }
1111 EXPORT_SYMBOL(drm_gem_object_release);
1112
1113 /**
1114 * drm_gem_object_free - free a GEM object
1115 * @kref: kref of the object to free
1116 *
1117 * Called after the last reference to the object has been lost.
1118 *
1119 * Frees the object
1120 */
1121 void
drm_gem_object_free(struct kref * kref)1122 drm_gem_object_free(struct kref *kref)
1123 {
1124 struct drm_gem_object *obj =
1125 container_of(kref, struct drm_gem_object, refcount);
1126
1127 if (WARN_ON(!obj->funcs->free))
1128 return;
1129
1130 obj->funcs->free(obj);
1131 }
1132 EXPORT_SYMBOL(drm_gem_object_free);
1133
1134 /**
1135 * drm_gem_vm_open - vma->ops->open implementation for GEM
1136 * @vma: VM area structure
1137 *
1138 * This function implements the #vm_operations_struct open() callback for GEM
1139 * drivers. This must be used together with drm_gem_vm_close().
1140 */
drm_gem_vm_open(struct vm_area_struct * vma)1141 void drm_gem_vm_open(struct vm_area_struct *vma)
1142 {
1143 struct drm_gem_object *obj = vma->vm_private_data;
1144
1145 drm_gem_object_get(obj);
1146 }
1147 EXPORT_SYMBOL(drm_gem_vm_open);
1148
1149 /**
1150 * drm_gem_vm_close - vma->ops->close implementation for GEM
1151 * @vma: VM area structure
1152 *
1153 * This function implements the #vm_operations_struct close() callback for GEM
1154 * drivers. This must be used together with drm_gem_vm_open().
1155 */
drm_gem_vm_close(struct vm_area_struct * vma)1156 void drm_gem_vm_close(struct vm_area_struct *vma)
1157 {
1158 struct drm_gem_object *obj = vma->vm_private_data;
1159
1160 drm_gem_object_put(obj);
1161 }
1162 EXPORT_SYMBOL(drm_gem_vm_close);
1163
1164 /**
1165 * drm_gem_mmap_obj - memory map a GEM object
1166 * @obj: the GEM object to map
1167 * @obj_size: the object size to be mapped, in bytes
1168 * @vma: VMA for the area to be mapped
1169 *
1170 * Set up the VMA to prepare mapping of the GEM object using the GEM object's
1171 * vm_ops. Depending on their requirements, GEM objects can either
1172 * provide a fault handler in their vm_ops (in which case any accesses to
1173 * the object will be trapped, to perform migration, GTT binding, surface
1174 * register allocation, or performance monitoring), or mmap the buffer memory
1175 * synchronously after calling drm_gem_mmap_obj.
1176 *
1177 * This function is mainly intended to implement the DMABUF mmap operation, when
1178 * the GEM object is not looked up based on its fake offset. To implement the
1179 * DRM mmap operation, drivers should use the drm_gem_mmap() function.
1180 *
1181 * drm_gem_mmap_obj() assumes the user is granted access to the buffer while
1182 * drm_gem_mmap() prevents unprivileged users from mapping random objects. So
1183 * callers must verify access restrictions before calling this helper.
1184 *
1185 * Return 0 or success or -EINVAL if the object size is smaller than the VMA
1186 * size, or if no vm_ops are provided.
1187 */
drm_gem_mmap_obj(struct drm_gem_object * obj,unsigned long obj_size,struct vm_area_struct * vma)1188 int drm_gem_mmap_obj(struct drm_gem_object *obj, unsigned long obj_size,
1189 struct vm_area_struct *vma)
1190 {
1191 int ret;
1192
1193 /* Check for valid size. */
1194 if (obj_size < vma->vm_end - vma->vm_start)
1195 return -EINVAL;
1196
1197 /* Take a ref for this mapping of the object, so that the fault
1198 * handler can dereference the mmap offset's pointer to the object.
1199 * This reference is cleaned up by the corresponding vm_close
1200 * (which should happen whether the vma was created by this call, or
1201 * by a vm_open due to mremap or partial unmap or whatever).
1202 */
1203 drm_gem_object_get(obj);
1204
1205 vma->vm_private_data = obj;
1206 vma->vm_ops = obj->funcs->vm_ops;
1207
1208 if (obj->funcs->mmap) {
1209 ret = obj->funcs->mmap(obj, vma);
1210 if (ret)
1211 goto err_drm_gem_object_put;
1212 WARN_ON(!(vma->vm_flags & VM_DONTEXPAND));
1213 } else {
1214 if (!vma->vm_ops) {
1215 ret = -EINVAL;
1216 goto err_drm_gem_object_put;
1217 }
1218
1219 vm_flags_set(vma, VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP);
1220 vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
1221 vma->vm_page_prot = pgprot_decrypted(vma->vm_page_prot);
1222 }
1223
1224 return 0;
1225
1226 err_drm_gem_object_put:
1227 drm_gem_object_put(obj);
1228 return ret;
1229 }
1230 EXPORT_SYMBOL(drm_gem_mmap_obj);
1231
1232 /*
1233 * Look up a GEM object in offset space based on the exact start address. The
1234 * caller must be granted access to the object. Returns a GEM object on success
1235 * or a negative error code on failure. The returned GEM object needs to be
1236 * released with drm_gem_object_put().
1237 */
1238 static struct drm_gem_object *
drm_gem_object_lookup_at_offset(struct file * filp,unsigned long start,unsigned long pages)1239 drm_gem_object_lookup_at_offset(struct file *filp, unsigned long start,
1240 unsigned long pages)
1241 {
1242 struct drm_file *priv = filp->private_data;
1243 struct drm_device *dev = priv->minor->dev;
1244 struct drm_gem_object *obj = NULL;
1245 struct drm_vma_offset_node *node;
1246
1247 if (drm_dev_is_unplugged(dev))
1248 return ERR_PTR(-ENODEV);
1249
1250 drm_vma_offset_lock_lookup(dev->vma_offset_manager);
1251 node = drm_vma_offset_exact_lookup_locked(dev->vma_offset_manager,
1252 start, pages);
1253 if (likely(node)) {
1254 obj = container_of(node, struct drm_gem_object, vma_node);
1255 /*
1256 * When the object is being freed, after it hits 0-refcnt it
1257 * proceeds to tear down the object. In the process it will
1258 * attempt to remove the VMA offset and so acquire this
1259 * mgr->vm_lock. Therefore if we find an object with a 0-refcnt
1260 * that matches our range, we know it is in the process of being
1261 * destroyed and will be freed as soon as we release the lock -
1262 * so we have to check for the 0-refcnted object and treat it as
1263 * invalid.
1264 */
1265 if (!kref_get_unless_zero(&obj->refcount))
1266 obj = NULL;
1267 }
1268 drm_vma_offset_unlock_lookup(dev->vma_offset_manager);
1269
1270 if (!obj)
1271 return ERR_PTR(-EINVAL);
1272
1273 if (!drm_vma_node_is_allowed(node, priv)) {
1274 drm_gem_object_put(obj);
1275 return ERR_PTR(-EACCES);
1276 }
1277
1278 return obj;
1279 }
1280
1281 #ifdef CONFIG_MMU
1282 /**
1283 * drm_gem_get_unmapped_area - get memory mapping region routine for GEM objects
1284 * @filp: DRM file pointer
1285 * @uaddr: User address hint
1286 * @len: Mapping length
1287 * @pgoff: Offset (in pages)
1288 * @flags: Mapping flags
1289 *
1290 * If a driver supports GEM object mapping, before ending up in drm_gem_mmap(),
1291 * mmap calls on the DRM file descriptor will first try to find a free linear
1292 * address space large enough for a mapping. Since GEM objects are backed by
1293 * shmem buffers, this should preferably be handled by the shmem virtual memory
1294 * filesystem which can appropriately align addresses to huge page sizes when
1295 * needed.
1296 *
1297 * Look up the GEM object based on the offset passed in (vma->vm_pgoff will
1298 * contain the fake offset we created) and call shmem_get_unmapped_area() with
1299 * the right file pointer.
1300 *
1301 * If a GEM object is not available at the given offset or if the caller is not
1302 * granted access to it, fall back to mm_get_unmapped_area().
1303 */
drm_gem_get_unmapped_area(struct file * filp,unsigned long uaddr,unsigned long len,unsigned long pgoff,unsigned long flags)1304 unsigned long drm_gem_get_unmapped_area(struct file *filp, unsigned long uaddr,
1305 unsigned long len, unsigned long pgoff,
1306 unsigned long flags)
1307 {
1308 struct drm_gem_object *obj;
1309 unsigned long ret;
1310
1311 obj = drm_gem_object_lookup_at_offset(filp, pgoff, len >> PAGE_SHIFT);
1312 if (IS_ERR(obj))
1313 obj = NULL;
1314
1315 if (!obj || !obj->filp || !obj->filp->f_op->get_unmapped_area)
1316 ret = mm_get_unmapped_area(filp, uaddr, len, 0, flags);
1317 else
1318 ret = obj->filp->f_op->get_unmapped_area(obj->filp, uaddr, len, 0, flags);
1319
1320 drm_gem_object_put(obj);
1321
1322 return ret;
1323 }
1324 EXPORT_SYMBOL_GPL(drm_gem_get_unmapped_area);
1325 #endif
1326
1327 /**
1328 * drm_gem_mmap - memory map routine for GEM objects
1329 * @filp: DRM file pointer
1330 * @vma: VMA for the area to be mapped
1331 *
1332 * If a driver supports GEM object mapping, mmap calls on the DRM file
1333 * descriptor will end up here.
1334 *
1335 * Look up the GEM object based on the offset passed in (vma->vm_pgoff will
1336 * contain the fake offset we created) and map it with a call to
1337 * drm_gem_mmap_obj().
1338 *
1339 * If the caller is not granted access to the buffer object, the mmap will fail
1340 * with EACCES. Please see the vma manager for more information.
1341 */
drm_gem_mmap(struct file * filp,struct vm_area_struct * vma)1342 int drm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
1343 {
1344 struct drm_gem_object *obj;
1345 int ret;
1346
1347 obj = drm_gem_object_lookup_at_offset(filp, vma->vm_pgoff,
1348 vma_pages(vma));
1349 if (IS_ERR(obj))
1350 return PTR_ERR(obj);
1351
1352 ret = drm_gem_mmap_obj(obj,
1353 drm_vma_node_size(&obj->vma_node) << PAGE_SHIFT,
1354 vma);
1355
1356 drm_gem_object_put(obj);
1357
1358 return ret;
1359 }
1360 EXPORT_SYMBOL(drm_gem_mmap);
1361
drm_gem_print_info(struct drm_printer * p,unsigned int indent,const struct drm_gem_object * obj)1362 void drm_gem_print_info(struct drm_printer *p, unsigned int indent,
1363 const struct drm_gem_object *obj)
1364 {
1365 drm_printf_indent(p, indent, "name=%d\n", obj->name);
1366 drm_printf_indent(p, indent, "refcount=%u\n",
1367 kref_read(&obj->refcount));
1368 drm_printf_indent(p, indent, "start=%08lx\n",
1369 drm_vma_node_start(&obj->vma_node));
1370 drm_printf_indent(p, indent, "size=%zu\n", obj->size);
1371 drm_printf_indent(p, indent, "imported=%s\n",
1372 str_yes_no(drm_gem_is_imported(obj)));
1373
1374 if (obj->funcs->print_info)
1375 obj->funcs->print_info(p, indent, obj);
1376 }
1377
drm_gem_vmap_locked(struct drm_gem_object * obj,struct iosys_map * map)1378 int drm_gem_vmap_locked(struct drm_gem_object *obj, struct iosys_map *map)
1379 {
1380 int ret;
1381
1382 dma_resv_assert_held(obj->resv);
1383
1384 if (!obj->funcs->vmap)
1385 return -EOPNOTSUPP;
1386
1387 ret = obj->funcs->vmap(obj, map);
1388 if (ret)
1389 return ret;
1390 else if (iosys_map_is_null(map))
1391 return -ENOMEM;
1392
1393 return 0;
1394 }
1395 EXPORT_SYMBOL(drm_gem_vmap_locked);
1396
drm_gem_vunmap_locked(struct drm_gem_object * obj,struct iosys_map * map)1397 void drm_gem_vunmap_locked(struct drm_gem_object *obj, struct iosys_map *map)
1398 {
1399 dma_resv_assert_held(obj->resv);
1400
1401 if (iosys_map_is_null(map))
1402 return;
1403
1404 if (obj->funcs->vunmap)
1405 obj->funcs->vunmap(obj, map);
1406
1407 /* Always set the mapping to NULL. Callers may rely on this. */
1408 iosys_map_clear(map);
1409 }
1410 EXPORT_SYMBOL(drm_gem_vunmap_locked);
1411
drm_gem_lock(struct drm_gem_object * obj)1412 void drm_gem_lock(struct drm_gem_object *obj)
1413 {
1414 dma_resv_lock(obj->resv, NULL);
1415 }
1416 EXPORT_SYMBOL(drm_gem_lock);
1417
drm_gem_unlock(struct drm_gem_object * obj)1418 void drm_gem_unlock(struct drm_gem_object *obj)
1419 {
1420 dma_resv_unlock(obj->resv);
1421 }
1422 EXPORT_SYMBOL(drm_gem_unlock);
1423
drm_gem_vmap(struct drm_gem_object * obj,struct iosys_map * map)1424 int drm_gem_vmap(struct drm_gem_object *obj, struct iosys_map *map)
1425 {
1426 int ret;
1427
1428 dma_resv_lock(obj->resv, NULL);
1429 ret = drm_gem_vmap_locked(obj, map);
1430 dma_resv_unlock(obj->resv);
1431
1432 return ret;
1433 }
1434 EXPORT_SYMBOL(drm_gem_vmap);
1435
drm_gem_vunmap(struct drm_gem_object * obj,struct iosys_map * map)1436 void drm_gem_vunmap(struct drm_gem_object *obj, struct iosys_map *map)
1437 {
1438 dma_resv_lock(obj->resv, NULL);
1439 drm_gem_vunmap_locked(obj, map);
1440 dma_resv_unlock(obj->resv);
1441 }
1442 EXPORT_SYMBOL(drm_gem_vunmap);
1443
1444 /**
1445 * drm_gem_lock_reservations - Sets up the ww context and acquires
1446 * the lock on an array of GEM objects.
1447 *
1448 * Once you've locked your reservations, you'll want to set up space
1449 * for your shared fences (if applicable), submit your job, then
1450 * drm_gem_unlock_reservations().
1451 *
1452 * @objs: drm_gem_objects to lock
1453 * @count: Number of objects in @objs
1454 * @acquire_ctx: struct ww_acquire_ctx that will be initialized as
1455 * part of tracking this set of locked reservations.
1456 */
1457 int
drm_gem_lock_reservations(struct drm_gem_object ** objs,int count,struct ww_acquire_ctx * acquire_ctx)1458 drm_gem_lock_reservations(struct drm_gem_object **objs, int count,
1459 struct ww_acquire_ctx *acquire_ctx)
1460 {
1461 int contended = -1;
1462 int i, ret;
1463
1464 ww_acquire_init(acquire_ctx, &reservation_ww_class);
1465
1466 retry:
1467 if (contended != -1) {
1468 struct drm_gem_object *obj = objs[contended];
1469
1470 ret = dma_resv_lock_slow_interruptible(obj->resv,
1471 acquire_ctx);
1472 if (ret) {
1473 ww_acquire_fini(acquire_ctx);
1474 return ret;
1475 }
1476 }
1477
1478 for (i = 0; i < count; i++) {
1479 if (i == contended)
1480 continue;
1481
1482 ret = dma_resv_lock_interruptible(objs[i]->resv,
1483 acquire_ctx);
1484 if (ret) {
1485 int j;
1486
1487 for (j = 0; j < i; j++)
1488 dma_resv_unlock(objs[j]->resv);
1489
1490 if (contended != -1 && contended >= i)
1491 dma_resv_unlock(objs[contended]->resv);
1492
1493 if (ret == -EDEADLK) {
1494 contended = i;
1495 goto retry;
1496 }
1497
1498 ww_acquire_fini(acquire_ctx);
1499 return ret;
1500 }
1501 }
1502
1503 ww_acquire_done(acquire_ctx);
1504
1505 return 0;
1506 }
1507 EXPORT_SYMBOL(drm_gem_lock_reservations);
1508
1509 void
drm_gem_unlock_reservations(struct drm_gem_object ** objs,int count,struct ww_acquire_ctx * acquire_ctx)1510 drm_gem_unlock_reservations(struct drm_gem_object **objs, int count,
1511 struct ww_acquire_ctx *acquire_ctx)
1512 {
1513 int i;
1514
1515 for (i = 0; i < count; i++)
1516 dma_resv_unlock(objs[i]->resv);
1517
1518 ww_acquire_fini(acquire_ctx);
1519 }
1520 EXPORT_SYMBOL(drm_gem_unlock_reservations);
1521
1522 /**
1523 * drm_gem_lru_init - initialize a LRU
1524 *
1525 * @lru: The LRU to initialize
1526 * @lock: The lock protecting the LRU
1527 */
1528 void
drm_gem_lru_init(struct drm_gem_lru * lru,struct mutex * lock)1529 drm_gem_lru_init(struct drm_gem_lru *lru, struct mutex *lock)
1530 {
1531 lru->lock = lock;
1532 lru->count = 0;
1533 INIT_LIST_HEAD(&lru->list);
1534 }
1535 EXPORT_SYMBOL(drm_gem_lru_init);
1536
1537 static void
drm_gem_lru_remove_locked(struct drm_gem_object * obj)1538 drm_gem_lru_remove_locked(struct drm_gem_object *obj)
1539 {
1540 obj->lru->count -= obj->size >> PAGE_SHIFT;
1541 WARN_ON(obj->lru->count < 0);
1542 list_del(&obj->lru_node);
1543 obj->lru = NULL;
1544 }
1545
1546 /**
1547 * drm_gem_lru_remove - remove object from whatever LRU it is in
1548 *
1549 * If the object is currently in any LRU, remove it.
1550 *
1551 * @obj: The GEM object to remove from current LRU
1552 */
1553 void
drm_gem_lru_remove(struct drm_gem_object * obj)1554 drm_gem_lru_remove(struct drm_gem_object *obj)
1555 {
1556 struct drm_gem_lru *lru = obj->lru;
1557
1558 if (!lru)
1559 return;
1560
1561 mutex_lock(lru->lock);
1562 drm_gem_lru_remove_locked(obj);
1563 mutex_unlock(lru->lock);
1564 }
1565 EXPORT_SYMBOL(drm_gem_lru_remove);
1566
1567 /**
1568 * drm_gem_lru_move_tail_locked - move the object to the tail of the LRU
1569 *
1570 * Like &drm_gem_lru_move_tail but lru lock must be held
1571 *
1572 * @lru: The LRU to move the object into.
1573 * @obj: The GEM object to move into this LRU
1574 */
1575 void
drm_gem_lru_move_tail_locked(struct drm_gem_lru * lru,struct drm_gem_object * obj)1576 drm_gem_lru_move_tail_locked(struct drm_gem_lru *lru, struct drm_gem_object *obj)
1577 {
1578 lockdep_assert_held_once(lru->lock);
1579
1580 if (obj->lru)
1581 drm_gem_lru_remove_locked(obj);
1582
1583 lru->count += obj->size >> PAGE_SHIFT;
1584 list_add_tail(&obj->lru_node, &lru->list);
1585 obj->lru = lru;
1586 }
1587 EXPORT_SYMBOL(drm_gem_lru_move_tail_locked);
1588
1589 /**
1590 * drm_gem_lru_move_tail - move the object to the tail of the LRU
1591 *
1592 * If the object is already in this LRU it will be moved to the
1593 * tail. Otherwise it will be removed from whichever other LRU
1594 * it is in (if any) and moved into this LRU.
1595 *
1596 * @lru: The LRU to move the object into.
1597 * @obj: The GEM object to move into this LRU
1598 */
1599 void
drm_gem_lru_move_tail(struct drm_gem_lru * lru,struct drm_gem_object * obj)1600 drm_gem_lru_move_tail(struct drm_gem_lru *lru, struct drm_gem_object *obj)
1601 {
1602 mutex_lock(lru->lock);
1603 drm_gem_lru_move_tail_locked(lru, obj);
1604 mutex_unlock(lru->lock);
1605 }
1606 EXPORT_SYMBOL(drm_gem_lru_move_tail);
1607
1608 /**
1609 * drm_gem_lru_scan - helper to implement shrinker.scan_objects
1610 *
1611 * If the shrink callback succeeds, it is expected that the driver
1612 * move the object out of this LRU.
1613 *
1614 * If the LRU possibly contain active buffers, it is the responsibility
1615 * of the shrink callback to check for this (ie. dma_resv_test_signaled())
1616 * or if necessary block until the buffer becomes idle.
1617 *
1618 * @lru: The LRU to scan
1619 * @nr_to_scan: The number of pages to try to reclaim
1620 * @remaining: The number of pages left to reclaim, should be initialized by caller
1621 * @shrink: Callback to try to shrink/reclaim the object.
1622 * @ticket: Optional ww_acquire_ctx context to use for locking
1623 */
1624 unsigned long
drm_gem_lru_scan(struct drm_gem_lru * lru,unsigned int nr_to_scan,unsigned long * remaining,bool (* shrink)(struct drm_gem_object * obj,struct ww_acquire_ctx * ticket),struct ww_acquire_ctx * ticket)1625 drm_gem_lru_scan(struct drm_gem_lru *lru,
1626 unsigned int nr_to_scan,
1627 unsigned long *remaining,
1628 bool (*shrink)(struct drm_gem_object *obj, struct ww_acquire_ctx *ticket),
1629 struct ww_acquire_ctx *ticket)
1630 {
1631 struct drm_gem_lru still_in_lru;
1632 struct drm_gem_object *obj;
1633 unsigned freed = 0;
1634
1635 drm_gem_lru_init(&still_in_lru, lru->lock);
1636
1637 mutex_lock(lru->lock);
1638
1639 while (freed < nr_to_scan) {
1640 obj = list_first_entry_or_null(&lru->list, typeof(*obj), lru_node);
1641
1642 if (!obj)
1643 break;
1644
1645 drm_gem_lru_move_tail_locked(&still_in_lru, obj);
1646
1647 /*
1648 * If it's in the process of being freed, gem_object->free()
1649 * may be blocked on lock waiting to remove it. So just
1650 * skip it.
1651 */
1652 if (!kref_get_unless_zero(&obj->refcount))
1653 continue;
1654
1655 /*
1656 * Now that we own a reference, we can drop the lock for the
1657 * rest of the loop body, to reduce contention with other
1658 * code paths that need the LRU lock
1659 */
1660 mutex_unlock(lru->lock);
1661
1662 if (ticket)
1663 ww_acquire_init(ticket, &reservation_ww_class);
1664
1665 /*
1666 * Note that this still needs to be trylock, since we can
1667 * hit shrinker in response to trying to get backing pages
1668 * for this obj (ie. while it's lock is already held)
1669 */
1670 if (!ww_mutex_trylock(&obj->resv->lock, ticket)) {
1671 *remaining += obj->size >> PAGE_SHIFT;
1672 goto tail;
1673 }
1674
1675 if (shrink(obj, ticket)) {
1676 freed += obj->size >> PAGE_SHIFT;
1677
1678 /*
1679 * If we succeeded in releasing the object's backing
1680 * pages, we expect the driver to have moved the object
1681 * out of this LRU
1682 */
1683 WARN_ON(obj->lru == &still_in_lru);
1684 WARN_ON(obj->lru == lru);
1685 }
1686
1687 dma_resv_unlock(obj->resv);
1688
1689 if (ticket)
1690 ww_acquire_fini(ticket);
1691
1692 tail:
1693 drm_gem_object_put(obj);
1694 mutex_lock(lru->lock);
1695 }
1696
1697 /*
1698 * Move objects we've skipped over out of the temporary still_in_lru
1699 * back into this LRU
1700 */
1701 list_for_each_entry (obj, &still_in_lru.list, lru_node)
1702 obj->lru = lru;
1703 list_splice_tail(&still_in_lru.list, &lru->list);
1704 lru->count += still_in_lru.count;
1705
1706 mutex_unlock(lru->lock);
1707
1708 return freed;
1709 }
1710 EXPORT_SYMBOL(drm_gem_lru_scan);
1711
1712 /**
1713 * drm_gem_evict_locked - helper to evict backing pages for a GEM object
1714 * @obj: obj in question
1715 */
drm_gem_evict_locked(struct drm_gem_object * obj)1716 int drm_gem_evict_locked(struct drm_gem_object *obj)
1717 {
1718 dma_resv_assert_held(obj->resv);
1719
1720 if (!dma_resv_test_signaled(obj->resv, DMA_RESV_USAGE_READ))
1721 return -EBUSY;
1722
1723 if (obj->funcs->evict)
1724 return obj->funcs->evict(obj);
1725
1726 return 0;
1727 }
1728 EXPORT_SYMBOL(drm_gem_evict_locked);
1729