xref: /linux/drivers/gpu/drm/drm_gem.c (revision 3c2fe27971c3c9cc27de6e369385f6428db6c0b5)
1 /*
2  * Copyright © 2008 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  *
23  * Authors:
24  *    Eric Anholt <eric@anholt.net>
25  *
26  */
27 
28 #include <linux/dma-buf.h>
29 #include <linux/file.h>
30 #include <linux/fs.h>
31 #include <linux/iosys-map.h>
32 #include <linux/mem_encrypt.h>
33 #include <linux/mm.h>
34 #include <linux/mman.h>
35 #include <linux/module.h>
36 #include <linux/pagemap.h>
37 #include <linux/pagevec.h>
38 #include <linux/shmem_fs.h>
39 #include <linux/slab.h>
40 #include <linux/string_helpers.h>
41 #include <linux/types.h>
42 #include <linux/uaccess.h>
43 
44 #include <drm/drm.h>
45 #include <drm/drm_device.h>
46 #include <drm/drm_drv.h>
47 #include <drm/drm_file.h>
48 #include <drm/drm_gem.h>
49 #include <drm/drm_managed.h>
50 #include <drm/drm_print.h>
51 #include <drm/drm_vma_manager.h>
52 
53 #include "drm_internal.h"
54 
55 /** @file drm_gem.c
56  *
57  * This file provides some of the base ioctls and library routines for
58  * the graphics memory manager implemented by each device driver.
59  *
60  * Because various devices have different requirements in terms of
61  * synchronization and migration strategies, implementing that is left up to
62  * the driver, and all that the general API provides should be generic --
63  * allocating objects, reading/writing data with the cpu, freeing objects.
64  * Even there, platform-dependent optimizations for reading/writing data with
65  * the CPU mean we'll likely hook those out to driver-specific calls.  However,
66  * the DRI2 implementation wants to have at least allocate/mmap be generic.
67  *
68  * The goal was to have swap-backed object allocation managed through
69  * struct file.  However, file descriptors as handles to a struct file have
70  * two major failings:
71  * - Process limits prevent more than 1024 or so being used at a time by
72  *   default.
73  * - Inability to allocate high fds will aggravate the X Server's select()
74  *   handling, and likely that of many GL client applications as well.
75  *
76  * This led to a plan of using our own integer IDs (called handles, following
77  * DRM terminology) to mimic fds, and implement the fd syscalls we need as
78  * ioctls.  The objects themselves will still include the struct file so
79  * that we can transition to fds if the required kernel infrastructure shows
80  * up at a later date, and as our interface with shmfs for memory allocation.
81  */
82 
83 static void
drm_gem_init_release(struct drm_device * dev,void * ptr)84 drm_gem_init_release(struct drm_device *dev, void *ptr)
85 {
86 	drm_vma_offset_manager_destroy(dev->vma_offset_manager);
87 }
88 
89 /**
90  * drm_gem_init - Initialize the GEM device fields
91  * @dev: drm_devic structure to initialize
92  */
93 int
drm_gem_init(struct drm_device * dev)94 drm_gem_init(struct drm_device *dev)
95 {
96 	struct drm_vma_offset_manager *vma_offset_manager;
97 
98 	mutex_init(&dev->object_name_lock);
99 	idr_init_base(&dev->object_name_idr, 1);
100 
101 	vma_offset_manager = drmm_kzalloc(dev, sizeof(*vma_offset_manager),
102 					  GFP_KERNEL);
103 	if (!vma_offset_manager) {
104 		DRM_ERROR("out of memory\n");
105 		return -ENOMEM;
106 	}
107 
108 	dev->vma_offset_manager = vma_offset_manager;
109 	drm_vma_offset_manager_init(vma_offset_manager,
110 				    DRM_FILE_PAGE_OFFSET_START,
111 				    DRM_FILE_PAGE_OFFSET_SIZE);
112 
113 	return drmm_add_action(dev, drm_gem_init_release, NULL);
114 }
115 
116 /**
117  * drm_gem_object_init_with_mnt - initialize an allocated shmem-backed GEM
118  * object in a given shmfs mountpoint
119  *
120  * @dev: drm_device the object should be initialized for
121  * @obj: drm_gem_object to initialize
122  * @size: object size
123  * @gemfs: tmpfs mount where the GEM object will be created. If NULL, use
124  * the usual tmpfs mountpoint (`shm_mnt`).
125  *
126  * Initialize an already allocated GEM object of the specified size with
127  * shmfs backing store.
128  */
drm_gem_object_init_with_mnt(struct drm_device * dev,struct drm_gem_object * obj,size_t size,struct vfsmount * gemfs)129 int drm_gem_object_init_with_mnt(struct drm_device *dev,
130 				 struct drm_gem_object *obj, size_t size,
131 				 struct vfsmount *gemfs)
132 {
133 	struct file *filp;
134 
135 	drm_gem_private_object_init(dev, obj, size);
136 
137 	if (gemfs)
138 		filp = shmem_file_setup_with_mnt(gemfs, "drm mm object", size,
139 						 VM_NORESERVE);
140 	else
141 		filp = shmem_file_setup("drm mm object", size, VM_NORESERVE);
142 
143 	if (IS_ERR(filp))
144 		return PTR_ERR(filp);
145 
146 	obj->filp = filp;
147 
148 	return 0;
149 }
150 EXPORT_SYMBOL(drm_gem_object_init_with_mnt);
151 
152 /**
153  * drm_gem_object_init - initialize an allocated shmem-backed GEM object
154  * @dev: drm_device the object should be initialized for
155  * @obj: drm_gem_object to initialize
156  * @size: object size
157  *
158  * Initialize an already allocated GEM object of the specified size with
159  * shmfs backing store.
160  */
drm_gem_object_init(struct drm_device * dev,struct drm_gem_object * obj,size_t size)161 int drm_gem_object_init(struct drm_device *dev, struct drm_gem_object *obj,
162 			size_t size)
163 {
164 	return drm_gem_object_init_with_mnt(dev, obj, size, NULL);
165 }
166 EXPORT_SYMBOL(drm_gem_object_init);
167 
168 /**
169  * drm_gem_private_object_init - initialize an allocated private GEM object
170  * @dev: drm_device the object should be initialized for
171  * @obj: drm_gem_object to initialize
172  * @size: object size
173  *
174  * Initialize an already allocated GEM object of the specified size with
175  * no GEM provided backing store. Instead the caller is responsible for
176  * backing the object and handling it.
177  */
drm_gem_private_object_init(struct drm_device * dev,struct drm_gem_object * obj,size_t size)178 void drm_gem_private_object_init(struct drm_device *dev,
179 				 struct drm_gem_object *obj, size_t size)
180 {
181 	BUG_ON((size & (PAGE_SIZE - 1)) != 0);
182 
183 	obj->dev = dev;
184 	obj->filp = NULL;
185 
186 	kref_init(&obj->refcount);
187 	obj->handle_count = 0;
188 	obj->size = size;
189 	dma_resv_init(&obj->_resv);
190 	if (!obj->resv)
191 		obj->resv = &obj->_resv;
192 
193 	if (drm_core_check_feature(dev, DRIVER_GEM_GPUVA))
194 		drm_gem_gpuva_init(obj);
195 
196 	drm_vma_node_reset(&obj->vma_node);
197 	INIT_LIST_HEAD(&obj->lru_node);
198 }
199 EXPORT_SYMBOL(drm_gem_private_object_init);
200 
201 /**
202  * drm_gem_private_object_fini - Finalize a failed drm_gem_object
203  * @obj: drm_gem_object
204  *
205  * Uninitialize an already allocated GEM object when it initialized failed
206  */
drm_gem_private_object_fini(struct drm_gem_object * obj)207 void drm_gem_private_object_fini(struct drm_gem_object *obj)
208 {
209 	WARN_ON(obj->dma_buf);
210 
211 	dma_resv_fini(&obj->_resv);
212 }
213 EXPORT_SYMBOL(drm_gem_private_object_fini);
214 
drm_gem_object_handle_get(struct drm_gem_object * obj)215 static void drm_gem_object_handle_get(struct drm_gem_object *obj)
216 {
217 	struct drm_device *dev = obj->dev;
218 
219 	drm_WARN_ON(dev, !mutex_is_locked(&dev->object_name_lock));
220 
221 	if (obj->handle_count++ == 0)
222 		drm_gem_object_get(obj);
223 }
224 
225 /**
226  * drm_gem_object_handle_get_if_exists_unlocked - acquire reference on user-space handle, if any
227  * @obj: GEM object
228  *
229  * Acquires a reference on the GEM buffer object's handle. Required to keep
230  * the GEM object alive. Call drm_gem_object_handle_put_if_exists_unlocked()
231  * to release the reference. Does nothing if the buffer object has no handle.
232  *
233  * Returns:
234  * True if a handle exists, or false otherwise
235  */
drm_gem_object_handle_get_if_exists_unlocked(struct drm_gem_object * obj)236 bool drm_gem_object_handle_get_if_exists_unlocked(struct drm_gem_object *obj)
237 {
238 	struct drm_device *dev = obj->dev;
239 
240 	guard(mutex)(&dev->object_name_lock);
241 
242 	/*
243 	 * First ref taken during GEM object creation, if any. Some
244 	 * drivers set up internal framebuffers with GEM objects that
245 	 * do not have a GEM handle. Hence, this counter can be zero.
246 	 */
247 	if (!obj->handle_count)
248 		return false;
249 
250 	drm_gem_object_handle_get(obj);
251 
252 	return true;
253 }
254 
255 /**
256  * drm_gem_object_handle_free - release resources bound to userspace handles
257  * @obj: GEM object to clean up.
258  *
259  * Called after the last handle to the object has been closed
260  *
261  * Removes any name for the object. Note that this must be
262  * called before drm_gem_object_free or we'll be touching
263  * freed memory
264  */
drm_gem_object_handle_free(struct drm_gem_object * obj)265 static void drm_gem_object_handle_free(struct drm_gem_object *obj)
266 {
267 	struct drm_device *dev = obj->dev;
268 
269 	/* Remove any name for this object */
270 	if (obj->name) {
271 		idr_remove(&dev->object_name_idr, obj->name);
272 		obj->name = 0;
273 	}
274 }
275 
drm_gem_object_exported_dma_buf_free(struct drm_gem_object * obj)276 static void drm_gem_object_exported_dma_buf_free(struct drm_gem_object *obj)
277 {
278 	/* Unbreak the reference cycle if we have an exported dma_buf. */
279 	if (obj->dma_buf) {
280 		dma_buf_put(obj->dma_buf);
281 		obj->dma_buf = NULL;
282 	}
283 }
284 
285 /**
286  * drm_gem_object_handle_put_unlocked - releases reference on user-space handle
287  * @obj: GEM object
288  *
289  * Releases a reference on the GEM buffer object's handle. Possibly releases
290  * the GEM buffer object and associated dma-buf objects.
291  */
drm_gem_object_handle_put_unlocked(struct drm_gem_object * obj)292 void drm_gem_object_handle_put_unlocked(struct drm_gem_object *obj)
293 {
294 	struct drm_device *dev = obj->dev;
295 	bool final = false;
296 
297 	if (drm_WARN_ON(dev, READ_ONCE(obj->handle_count) == 0))
298 		return;
299 
300 	/*
301 	 * Must bump handle count first as this may be the last
302 	 * ref, in which case the object would disappear before
303 	 * we checked for a name.
304 	 */
305 
306 	mutex_lock(&dev->object_name_lock);
307 	if (--obj->handle_count == 0) {
308 		drm_gem_object_handle_free(obj);
309 		drm_gem_object_exported_dma_buf_free(obj);
310 		final = true;
311 	}
312 	mutex_unlock(&dev->object_name_lock);
313 
314 	if (final)
315 		drm_gem_object_put(obj);
316 }
317 
318 /*
319  * Called at device or object close to release the file's
320  * handle references on objects.
321  */
322 static int
drm_gem_object_release_handle(int id,void * ptr,void * data)323 drm_gem_object_release_handle(int id, void *ptr, void *data)
324 {
325 	struct drm_file *file_priv = data;
326 	struct drm_gem_object *obj = ptr;
327 
328 	if (drm_WARN_ON(obj->dev, !data))
329 		return 0;
330 
331 	if (obj->funcs->close)
332 		obj->funcs->close(obj, file_priv);
333 
334 	drm_prime_remove_buf_handle(&file_priv->prime, id);
335 	drm_vma_node_revoke(&obj->vma_node, file_priv);
336 
337 	drm_gem_object_handle_put_unlocked(obj);
338 
339 	return 0;
340 }
341 
342 /**
343  * drm_gem_handle_delete - deletes the given file-private handle
344  * @filp: drm file-private structure to use for the handle look up
345  * @handle: userspace handle to delete
346  *
347  * Removes the GEM handle from the @filp lookup table which has been added with
348  * drm_gem_handle_create(). If this is the last handle also cleans up linked
349  * resources like GEM names.
350  */
351 int
drm_gem_handle_delete(struct drm_file * filp,u32 handle)352 drm_gem_handle_delete(struct drm_file *filp, u32 handle)
353 {
354 	struct drm_gem_object *obj;
355 
356 	spin_lock(&filp->table_lock);
357 
358 	/* Check if we currently have a reference on the object */
359 	obj = idr_replace(&filp->object_idr, NULL, handle);
360 	spin_unlock(&filp->table_lock);
361 	if (IS_ERR_OR_NULL(obj))
362 		return -EINVAL;
363 
364 	/* Release driver's reference and decrement refcount. */
365 	drm_gem_object_release_handle(handle, obj, filp);
366 
367 	/* And finally make the handle available for future allocations. */
368 	spin_lock(&filp->table_lock);
369 	idr_remove(&filp->object_idr, handle);
370 	spin_unlock(&filp->table_lock);
371 
372 	return 0;
373 }
374 EXPORT_SYMBOL(drm_gem_handle_delete);
375 
376 /**
377  * drm_gem_dumb_map_offset - return the fake mmap offset for a gem object
378  * @file: drm file-private structure containing the gem object
379  * @dev: corresponding drm_device
380  * @handle: gem object handle
381  * @offset: return location for the fake mmap offset
382  *
383  * This implements the &drm_driver.dumb_map_offset kms driver callback for
384  * drivers which use gem to manage their backing storage.
385  *
386  * Returns:
387  * 0 on success or a negative error code on failure.
388  */
drm_gem_dumb_map_offset(struct drm_file * file,struct drm_device * dev,u32 handle,u64 * offset)389 int drm_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev,
390 			    u32 handle, u64 *offset)
391 {
392 	struct drm_gem_object *obj;
393 	int ret;
394 
395 	obj = drm_gem_object_lookup(file, handle);
396 	if (!obj)
397 		return -ENOENT;
398 
399 	/* Don't allow imported objects to be mapped */
400 	if (drm_gem_is_imported(obj)) {
401 		ret = -EINVAL;
402 		goto out;
403 	}
404 
405 	ret = drm_gem_create_mmap_offset(obj);
406 	if (ret)
407 		goto out;
408 
409 	*offset = drm_vma_node_offset_addr(&obj->vma_node);
410 out:
411 	drm_gem_object_put(obj);
412 
413 	return ret;
414 }
415 EXPORT_SYMBOL_GPL(drm_gem_dumb_map_offset);
416 
417 /**
418  * drm_gem_handle_create_tail - internal functions to create a handle
419  * @file_priv: drm file-private structure to register the handle for
420  * @obj: object to register
421  * @handlep: pointer to return the created handle to the caller
422  *
423  * This expects the &drm_device.object_name_lock to be held already and will
424  * drop it before returning. Used to avoid races in establishing new handles
425  * when importing an object from either an flink name or a dma-buf.
426  *
427  * Handles must be release again through drm_gem_handle_delete(). This is done
428  * when userspace closes @file_priv for all attached handles, or through the
429  * GEM_CLOSE ioctl for individual handles.
430  */
431 int
drm_gem_handle_create_tail(struct drm_file * file_priv,struct drm_gem_object * obj,u32 * handlep)432 drm_gem_handle_create_tail(struct drm_file *file_priv,
433 			   struct drm_gem_object *obj,
434 			   u32 *handlep)
435 {
436 	struct drm_device *dev = obj->dev;
437 	u32 handle;
438 	int ret;
439 
440 	WARN_ON(!mutex_is_locked(&dev->object_name_lock));
441 
442 	drm_gem_object_handle_get(obj);
443 
444 	/*
445 	 * Get the user-visible handle using idr.  Preload and perform
446 	 * allocation under our spinlock.
447 	 */
448 	idr_preload(GFP_KERNEL);
449 	spin_lock(&file_priv->table_lock);
450 
451 	ret = idr_alloc(&file_priv->object_idr, NULL, 1, 0, GFP_NOWAIT);
452 
453 	spin_unlock(&file_priv->table_lock);
454 	idr_preload_end();
455 
456 	mutex_unlock(&dev->object_name_lock);
457 	if (ret < 0)
458 		goto err_unref;
459 
460 	handle = ret;
461 
462 	ret = drm_vma_node_allow(&obj->vma_node, file_priv);
463 	if (ret)
464 		goto err_remove;
465 
466 	if (obj->funcs->open) {
467 		ret = obj->funcs->open(obj, file_priv);
468 		if (ret)
469 			goto err_revoke;
470 	}
471 
472 	/* mirrors drm_gem_handle_delete to avoid races */
473 	spin_lock(&file_priv->table_lock);
474 	obj = idr_replace(&file_priv->object_idr, obj, handle);
475 	WARN_ON(obj != NULL);
476 	spin_unlock(&file_priv->table_lock);
477 	*handlep = handle;
478 	return 0;
479 
480 err_revoke:
481 	drm_vma_node_revoke(&obj->vma_node, file_priv);
482 err_remove:
483 	spin_lock(&file_priv->table_lock);
484 	idr_remove(&file_priv->object_idr, handle);
485 	spin_unlock(&file_priv->table_lock);
486 err_unref:
487 	drm_gem_object_handle_put_unlocked(obj);
488 	return ret;
489 }
490 
491 /**
492  * drm_gem_handle_create - create a gem handle for an object
493  * @file_priv: drm file-private structure to register the handle for
494  * @obj: object to register
495  * @handlep: pointer to return the created handle to the caller
496  *
497  * Create a handle for this object. This adds a handle reference to the object,
498  * which includes a regular reference count. Callers will likely want to
499  * dereference the object afterwards.
500  *
501  * Since this publishes @obj to userspace it must be fully set up by this point,
502  * drivers must call this last in their buffer object creation callbacks.
503  */
drm_gem_handle_create(struct drm_file * file_priv,struct drm_gem_object * obj,u32 * handlep)504 int drm_gem_handle_create(struct drm_file *file_priv,
505 			  struct drm_gem_object *obj,
506 			  u32 *handlep)
507 {
508 	mutex_lock(&obj->dev->object_name_lock);
509 
510 	return drm_gem_handle_create_tail(file_priv, obj, handlep);
511 }
512 EXPORT_SYMBOL(drm_gem_handle_create);
513 
514 
515 /**
516  * drm_gem_free_mmap_offset - release a fake mmap offset for an object
517  * @obj: obj in question
518  *
519  * This routine frees fake offsets allocated by drm_gem_create_mmap_offset().
520  *
521  * Note that drm_gem_object_release() already calls this function, so drivers
522  * don't have to take care of releasing the mmap offset themselves when freeing
523  * the GEM object.
524  */
525 void
drm_gem_free_mmap_offset(struct drm_gem_object * obj)526 drm_gem_free_mmap_offset(struct drm_gem_object *obj)
527 {
528 	struct drm_device *dev = obj->dev;
529 
530 	drm_vma_offset_remove(dev->vma_offset_manager, &obj->vma_node);
531 }
532 EXPORT_SYMBOL(drm_gem_free_mmap_offset);
533 
534 /**
535  * drm_gem_create_mmap_offset_size - create a fake mmap offset for an object
536  * @obj: obj in question
537  * @size: the virtual size
538  *
539  * GEM memory mapping works by handing back to userspace a fake mmap offset
540  * it can use in a subsequent mmap(2) call.  The DRM core code then looks
541  * up the object based on the offset and sets up the various memory mapping
542  * structures.
543  *
544  * This routine allocates and attaches a fake offset for @obj, in cases where
545  * the virtual size differs from the physical size (ie. &drm_gem_object.size).
546  * Otherwise just use drm_gem_create_mmap_offset().
547  *
548  * This function is idempotent and handles an already allocated mmap offset
549  * transparently. Drivers do not need to check for this case.
550  */
551 int
drm_gem_create_mmap_offset_size(struct drm_gem_object * obj,size_t size)552 drm_gem_create_mmap_offset_size(struct drm_gem_object *obj, size_t size)
553 {
554 	struct drm_device *dev = obj->dev;
555 
556 	return drm_vma_offset_add(dev->vma_offset_manager, &obj->vma_node,
557 				  size / PAGE_SIZE);
558 }
559 EXPORT_SYMBOL(drm_gem_create_mmap_offset_size);
560 
561 /**
562  * drm_gem_create_mmap_offset - create a fake mmap offset for an object
563  * @obj: obj in question
564  *
565  * GEM memory mapping works by handing back to userspace a fake mmap offset
566  * it can use in a subsequent mmap(2) call.  The DRM core code then looks
567  * up the object based on the offset and sets up the various memory mapping
568  * structures.
569  *
570  * This routine allocates and attaches a fake offset for @obj.
571  *
572  * Drivers can call drm_gem_free_mmap_offset() before freeing @obj to release
573  * the fake offset again.
574  */
drm_gem_create_mmap_offset(struct drm_gem_object * obj)575 int drm_gem_create_mmap_offset(struct drm_gem_object *obj)
576 {
577 	return drm_gem_create_mmap_offset_size(obj, obj->size);
578 }
579 EXPORT_SYMBOL(drm_gem_create_mmap_offset);
580 
581 /*
582  * Move folios to appropriate lru and release the folios, decrementing the
583  * ref count of those folios.
584  */
drm_gem_check_release_batch(struct folio_batch * fbatch)585 static void drm_gem_check_release_batch(struct folio_batch *fbatch)
586 {
587 	check_move_unevictable_folios(fbatch);
588 	__folio_batch_release(fbatch);
589 	cond_resched();
590 }
591 
592 /**
593  * drm_gem_get_pages - helper to allocate backing pages for a GEM object
594  * from shmem
595  * @obj: obj in question
596  *
597  * This reads the page-array of the shmem-backing storage of the given gem
598  * object. An array of pages is returned. If a page is not allocated or
599  * swapped-out, this will allocate/swap-in the required pages. Note that the
600  * whole object is covered by the page-array and pinned in memory.
601  *
602  * Use drm_gem_put_pages() to release the array and unpin all pages.
603  *
604  * This uses the GFP-mask set on the shmem-mapping (see mapping_set_gfp_mask()).
605  * If you require other GFP-masks, you have to do those allocations yourself.
606  *
607  * Note that you are not allowed to change gfp-zones during runtime. That is,
608  * shmem_read_mapping_page_gfp() must be called with the same gfp_zone(gfp) as
609  * set during initialization. If you have special zone constraints, set them
610  * after drm_gem_object_init() via mapping_set_gfp_mask(). shmem-core takes care
611  * to keep pages in the required zone during swap-in.
612  *
613  * This function is only valid on objects initialized with
614  * drm_gem_object_init(), but not for those initialized with
615  * drm_gem_private_object_init() only.
616  */
drm_gem_get_pages(struct drm_gem_object * obj)617 struct page **drm_gem_get_pages(struct drm_gem_object *obj)
618 {
619 	struct address_space *mapping;
620 	struct page **pages;
621 	struct folio *folio;
622 	struct folio_batch fbatch;
623 	long i, j, npages;
624 
625 	if (WARN_ON(!obj->filp))
626 		return ERR_PTR(-EINVAL);
627 
628 	/* This is the shared memory object that backs the GEM resource */
629 	mapping = obj->filp->f_mapping;
630 
631 	/* We already BUG_ON() for non-page-aligned sizes in
632 	 * drm_gem_object_init(), so we should never hit this unless
633 	 * driver author is doing something really wrong:
634 	 */
635 	WARN_ON((obj->size & (PAGE_SIZE - 1)) != 0);
636 
637 	npages = obj->size >> PAGE_SHIFT;
638 
639 	pages = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL);
640 	if (pages == NULL)
641 		return ERR_PTR(-ENOMEM);
642 
643 	mapping_set_unevictable(mapping);
644 
645 	i = 0;
646 	while (i < npages) {
647 		long nr;
648 		folio = shmem_read_folio_gfp(mapping, i,
649 				mapping_gfp_mask(mapping));
650 		if (IS_ERR(folio))
651 			goto fail;
652 		nr = min(npages - i, folio_nr_pages(folio));
653 		for (j = 0; j < nr; j++, i++)
654 			pages[i] = folio_file_page(folio, i);
655 
656 		/* Make sure shmem keeps __GFP_DMA32 allocated pages in the
657 		 * correct region during swapin. Note that this requires
658 		 * __GFP_DMA32 to be set in mapping_gfp_mask(inode->i_mapping)
659 		 * so shmem can relocate pages during swapin if required.
660 		 */
661 		BUG_ON(mapping_gfp_constraint(mapping, __GFP_DMA32) &&
662 				(folio_pfn(folio) >= 0x00100000UL));
663 	}
664 
665 	return pages;
666 
667 fail:
668 	mapping_clear_unevictable(mapping);
669 	folio_batch_init(&fbatch);
670 	j = 0;
671 	while (j < i) {
672 		struct folio *f = page_folio(pages[j]);
673 		if (!folio_batch_add(&fbatch, f))
674 			drm_gem_check_release_batch(&fbatch);
675 		j += folio_nr_pages(f);
676 	}
677 	if (fbatch.nr)
678 		drm_gem_check_release_batch(&fbatch);
679 
680 	kvfree(pages);
681 	return ERR_CAST(folio);
682 }
683 EXPORT_SYMBOL(drm_gem_get_pages);
684 
685 /**
686  * drm_gem_put_pages - helper to free backing pages for a GEM object
687  * @obj: obj in question
688  * @pages: pages to free
689  * @dirty: if true, pages will be marked as dirty
690  * @accessed: if true, the pages will be marked as accessed
691  */
drm_gem_put_pages(struct drm_gem_object * obj,struct page ** pages,bool dirty,bool accessed)692 void drm_gem_put_pages(struct drm_gem_object *obj, struct page **pages,
693 		bool dirty, bool accessed)
694 {
695 	int i, npages;
696 	struct address_space *mapping;
697 	struct folio_batch fbatch;
698 
699 	mapping = file_inode(obj->filp)->i_mapping;
700 	mapping_clear_unevictable(mapping);
701 
702 	/* We already BUG_ON() for non-page-aligned sizes in
703 	 * drm_gem_object_init(), so we should never hit this unless
704 	 * driver author is doing something really wrong:
705 	 */
706 	WARN_ON((obj->size & (PAGE_SIZE - 1)) != 0);
707 
708 	npages = obj->size >> PAGE_SHIFT;
709 
710 	folio_batch_init(&fbatch);
711 	for (i = 0; i < npages; i++) {
712 		struct folio *folio;
713 
714 		if (!pages[i])
715 			continue;
716 		folio = page_folio(pages[i]);
717 
718 		if (dirty)
719 			folio_mark_dirty(folio);
720 
721 		if (accessed)
722 			folio_mark_accessed(folio);
723 
724 		/* Undo the reference we took when populating the table */
725 		if (!folio_batch_add(&fbatch, folio))
726 			drm_gem_check_release_batch(&fbatch);
727 		i += folio_nr_pages(folio) - 1;
728 	}
729 	if (folio_batch_count(&fbatch))
730 		drm_gem_check_release_batch(&fbatch);
731 
732 	kvfree(pages);
733 }
734 EXPORT_SYMBOL(drm_gem_put_pages);
735 
objects_lookup(struct drm_file * filp,u32 * handle,int count,struct drm_gem_object ** objs)736 static int objects_lookup(struct drm_file *filp, u32 *handle, int count,
737 			  struct drm_gem_object **objs)
738 {
739 	int i, ret = 0;
740 	struct drm_gem_object *obj;
741 
742 	spin_lock(&filp->table_lock);
743 
744 	for (i = 0; i < count; i++) {
745 		/* Check if we currently have a reference on the object */
746 		obj = idr_find(&filp->object_idr, handle[i]);
747 		if (!obj) {
748 			ret = -ENOENT;
749 			break;
750 		}
751 		drm_gem_object_get(obj);
752 		objs[i] = obj;
753 	}
754 	spin_unlock(&filp->table_lock);
755 
756 	return ret;
757 }
758 
759 /**
760  * drm_gem_objects_lookup - look up GEM objects from an array of handles
761  * @filp: DRM file private date
762  * @bo_handles: user pointer to array of userspace handle
763  * @count: size of handle array
764  * @objs_out: returned pointer to array of drm_gem_object pointers
765  *
766  * Takes an array of userspace handles and returns a newly allocated array of
767  * GEM objects.
768  *
769  * For a single handle lookup, use drm_gem_object_lookup().
770  *
771  * Returns:
772  * @objs filled in with GEM object pointers. Returned GEM objects need to be
773  * released with drm_gem_object_put(). -ENOENT is returned on a lookup
774  * failure. 0 is returned on success.
775  *
776  */
drm_gem_objects_lookup(struct drm_file * filp,void __user * bo_handles,int count,struct drm_gem_object *** objs_out)777 int drm_gem_objects_lookup(struct drm_file *filp, void __user *bo_handles,
778 			   int count, struct drm_gem_object ***objs_out)
779 {
780 	int ret;
781 	u32 *handles;
782 	struct drm_gem_object **objs;
783 
784 	if (!count)
785 		return 0;
786 
787 	objs = kvmalloc_array(count, sizeof(struct drm_gem_object *),
788 			     GFP_KERNEL | __GFP_ZERO);
789 	if (!objs)
790 		return -ENOMEM;
791 
792 	*objs_out = objs;
793 
794 	handles = kvmalloc_array(count, sizeof(u32), GFP_KERNEL);
795 	if (!handles) {
796 		ret = -ENOMEM;
797 		goto out;
798 	}
799 
800 	if (copy_from_user(handles, bo_handles, count * sizeof(u32))) {
801 		ret = -EFAULT;
802 		DRM_DEBUG("Failed to copy in GEM handles\n");
803 		goto out;
804 	}
805 
806 	ret = objects_lookup(filp, handles, count, objs);
807 out:
808 	kvfree(handles);
809 	return ret;
810 
811 }
812 EXPORT_SYMBOL(drm_gem_objects_lookup);
813 
814 /**
815  * drm_gem_object_lookup - look up a GEM object from its handle
816  * @filp: DRM file private date
817  * @handle: userspace handle
818  *
819  * If looking up an array of handles, use drm_gem_objects_lookup().
820  *
821  * Returns:
822  * A reference to the object named by the handle if such exists on @filp, NULL
823  * otherwise.
824  */
825 struct drm_gem_object *
drm_gem_object_lookup(struct drm_file * filp,u32 handle)826 drm_gem_object_lookup(struct drm_file *filp, u32 handle)
827 {
828 	struct drm_gem_object *obj = NULL;
829 
830 	objects_lookup(filp, &handle, 1, &obj);
831 	return obj;
832 }
833 EXPORT_SYMBOL(drm_gem_object_lookup);
834 
835 /**
836  * drm_gem_dma_resv_wait - Wait on GEM object's reservation's objects
837  * shared and/or exclusive fences.
838  * @filep: DRM file private date
839  * @handle: userspace handle
840  * @wait_all: if true, wait on all fences, else wait on just exclusive fence
841  * @timeout: timeout value in jiffies or zero to return immediately
842  *
843  * Returns:
844  * Returns -ERESTARTSYS if interrupted, 0 if the wait timed out, or
845  * greater than 0 on success.
846  */
drm_gem_dma_resv_wait(struct drm_file * filep,u32 handle,bool wait_all,unsigned long timeout)847 long drm_gem_dma_resv_wait(struct drm_file *filep, u32 handle,
848 				    bool wait_all, unsigned long timeout)
849 {
850 	long ret;
851 	struct drm_gem_object *obj;
852 
853 	obj = drm_gem_object_lookup(filep, handle);
854 	if (!obj) {
855 		DRM_DEBUG("Failed to look up GEM BO %d\n", handle);
856 		return -EINVAL;
857 	}
858 
859 	ret = dma_resv_wait_timeout(obj->resv, dma_resv_usage_rw(wait_all),
860 				    true, timeout);
861 	if (ret == 0)
862 		ret = -ETIME;
863 	else if (ret > 0)
864 		ret = 0;
865 
866 	drm_gem_object_put(obj);
867 
868 	return ret;
869 }
870 EXPORT_SYMBOL(drm_gem_dma_resv_wait);
871 
872 /**
873  * drm_gem_close_ioctl - implementation of the GEM_CLOSE ioctl
874  * @dev: drm_device
875  * @data: ioctl data
876  * @file_priv: drm file-private structure
877  *
878  * Releases the handle to an mm object.
879  */
880 int
drm_gem_close_ioctl(struct drm_device * dev,void * data,struct drm_file * file_priv)881 drm_gem_close_ioctl(struct drm_device *dev, void *data,
882 		    struct drm_file *file_priv)
883 {
884 	struct drm_gem_close *args = data;
885 	int ret;
886 
887 	if (!drm_core_check_feature(dev, DRIVER_GEM))
888 		return -EOPNOTSUPP;
889 
890 	ret = drm_gem_handle_delete(file_priv, args->handle);
891 
892 	return ret;
893 }
894 
895 /**
896  * drm_gem_flink_ioctl - implementation of the GEM_FLINK ioctl
897  * @dev: drm_device
898  * @data: ioctl data
899  * @file_priv: drm file-private structure
900  *
901  * Create a global name for an object, returning the name.
902  *
903  * Note that the name does not hold a reference; when the object
904  * is freed, the name goes away.
905  */
906 int
drm_gem_flink_ioctl(struct drm_device * dev,void * data,struct drm_file * file_priv)907 drm_gem_flink_ioctl(struct drm_device *dev, void *data,
908 		    struct drm_file *file_priv)
909 {
910 	struct drm_gem_flink *args = data;
911 	struct drm_gem_object *obj;
912 	int ret;
913 
914 	if (!drm_core_check_feature(dev, DRIVER_GEM))
915 		return -EOPNOTSUPP;
916 
917 	obj = drm_gem_object_lookup(file_priv, args->handle);
918 	if (obj == NULL)
919 		return -ENOENT;
920 
921 	mutex_lock(&dev->object_name_lock);
922 	/* prevent races with concurrent gem_close. */
923 	if (obj->handle_count == 0) {
924 		ret = -ENOENT;
925 		goto err;
926 	}
927 
928 	if (!obj->name) {
929 		ret = idr_alloc(&dev->object_name_idr, obj, 1, 0, GFP_KERNEL);
930 		if (ret < 0)
931 			goto err;
932 
933 		obj->name = ret;
934 	}
935 
936 	args->name = (uint64_t) obj->name;
937 	ret = 0;
938 
939 err:
940 	mutex_unlock(&dev->object_name_lock);
941 	drm_gem_object_put(obj);
942 	return ret;
943 }
944 
945 /**
946  * drm_gem_open_ioctl - implementation of the GEM_OPEN ioctl
947  * @dev: drm_device
948  * @data: ioctl data
949  * @file_priv: drm file-private structure
950  *
951  * Open an object using the global name, returning a handle and the size.
952  *
953  * This handle (of course) holds a reference to the object, so the object
954  * will not go away until the handle is deleted.
955  */
956 int
drm_gem_open_ioctl(struct drm_device * dev,void * data,struct drm_file * file_priv)957 drm_gem_open_ioctl(struct drm_device *dev, void *data,
958 		   struct drm_file *file_priv)
959 {
960 	struct drm_gem_open *args = data;
961 	struct drm_gem_object *obj;
962 	int ret;
963 	u32 handle;
964 
965 	if (!drm_core_check_feature(dev, DRIVER_GEM))
966 		return -EOPNOTSUPP;
967 
968 	mutex_lock(&dev->object_name_lock);
969 	obj = idr_find(&dev->object_name_idr, (int) args->name);
970 	if (obj) {
971 		drm_gem_object_get(obj);
972 	} else {
973 		mutex_unlock(&dev->object_name_lock);
974 		return -ENOENT;
975 	}
976 
977 	/* drm_gem_handle_create_tail unlocks dev->object_name_lock. */
978 	ret = drm_gem_handle_create_tail(file_priv, obj, &handle);
979 	if (ret)
980 		goto err;
981 
982 	args->handle = handle;
983 	args->size = obj->size;
984 
985 err:
986 	drm_gem_object_put(obj);
987 	return ret;
988 }
989 
990 /**
991  * drm_gem_open - initializes GEM file-private structures at devnode open time
992  * @dev: drm_device which is being opened by userspace
993  * @file_private: drm file-private structure to set up
994  *
995  * Called at device open time, sets up the structure for handling refcounting
996  * of mm objects.
997  */
998 void
drm_gem_open(struct drm_device * dev,struct drm_file * file_private)999 drm_gem_open(struct drm_device *dev, struct drm_file *file_private)
1000 {
1001 	idr_init_base(&file_private->object_idr, 1);
1002 	spin_lock_init(&file_private->table_lock);
1003 }
1004 
1005 /**
1006  * drm_gem_release - release file-private GEM resources
1007  * @dev: drm_device which is being closed by userspace
1008  * @file_private: drm file-private structure to clean up
1009  *
1010  * Called at close time when the filp is going away.
1011  *
1012  * Releases any remaining references on objects by this filp.
1013  */
1014 void
drm_gem_release(struct drm_device * dev,struct drm_file * file_private)1015 drm_gem_release(struct drm_device *dev, struct drm_file *file_private)
1016 {
1017 	idr_for_each(&file_private->object_idr,
1018 		     &drm_gem_object_release_handle, file_private);
1019 	idr_destroy(&file_private->object_idr);
1020 }
1021 
1022 /**
1023  * drm_gem_object_release - release GEM buffer object resources
1024  * @obj: GEM buffer object
1025  *
1026  * This releases any structures and resources used by @obj and is the inverse of
1027  * drm_gem_object_init().
1028  */
1029 void
drm_gem_object_release(struct drm_gem_object * obj)1030 drm_gem_object_release(struct drm_gem_object *obj)
1031 {
1032 	if (obj->filp)
1033 		fput(obj->filp);
1034 
1035 	drm_gem_private_object_fini(obj);
1036 
1037 	drm_gem_free_mmap_offset(obj);
1038 	drm_gem_lru_remove(obj);
1039 }
1040 EXPORT_SYMBOL(drm_gem_object_release);
1041 
1042 /**
1043  * drm_gem_object_free - free a GEM object
1044  * @kref: kref of the object to free
1045  *
1046  * Called after the last reference to the object has been lost.
1047  *
1048  * Frees the object
1049  */
1050 void
drm_gem_object_free(struct kref * kref)1051 drm_gem_object_free(struct kref *kref)
1052 {
1053 	struct drm_gem_object *obj =
1054 		container_of(kref, struct drm_gem_object, refcount);
1055 
1056 	if (WARN_ON(!obj->funcs->free))
1057 		return;
1058 
1059 	obj->funcs->free(obj);
1060 }
1061 EXPORT_SYMBOL(drm_gem_object_free);
1062 
1063 /**
1064  * drm_gem_vm_open - vma->ops->open implementation for GEM
1065  * @vma: VM area structure
1066  *
1067  * This function implements the #vm_operations_struct open() callback for GEM
1068  * drivers. This must be used together with drm_gem_vm_close().
1069  */
drm_gem_vm_open(struct vm_area_struct * vma)1070 void drm_gem_vm_open(struct vm_area_struct *vma)
1071 {
1072 	struct drm_gem_object *obj = vma->vm_private_data;
1073 
1074 	drm_gem_object_get(obj);
1075 }
1076 EXPORT_SYMBOL(drm_gem_vm_open);
1077 
1078 /**
1079  * drm_gem_vm_close - vma->ops->close implementation for GEM
1080  * @vma: VM area structure
1081  *
1082  * This function implements the #vm_operations_struct close() callback for GEM
1083  * drivers. This must be used together with drm_gem_vm_open().
1084  */
drm_gem_vm_close(struct vm_area_struct * vma)1085 void drm_gem_vm_close(struct vm_area_struct *vma)
1086 {
1087 	struct drm_gem_object *obj = vma->vm_private_data;
1088 
1089 	drm_gem_object_put(obj);
1090 }
1091 EXPORT_SYMBOL(drm_gem_vm_close);
1092 
1093 /**
1094  * drm_gem_mmap_obj - memory map a GEM object
1095  * @obj: the GEM object to map
1096  * @obj_size: the object size to be mapped, in bytes
1097  * @vma: VMA for the area to be mapped
1098  *
1099  * Set up the VMA to prepare mapping of the GEM object using the GEM object's
1100  * vm_ops. Depending on their requirements, GEM objects can either
1101  * provide a fault handler in their vm_ops (in which case any accesses to
1102  * the object will be trapped, to perform migration, GTT binding, surface
1103  * register allocation, or performance monitoring), or mmap the buffer memory
1104  * synchronously after calling drm_gem_mmap_obj.
1105  *
1106  * This function is mainly intended to implement the DMABUF mmap operation, when
1107  * the GEM object is not looked up based on its fake offset. To implement the
1108  * DRM mmap operation, drivers should use the drm_gem_mmap() function.
1109  *
1110  * drm_gem_mmap_obj() assumes the user is granted access to the buffer while
1111  * drm_gem_mmap() prevents unprivileged users from mapping random objects. So
1112  * callers must verify access restrictions before calling this helper.
1113  *
1114  * Return 0 or success or -EINVAL if the object size is smaller than the VMA
1115  * size, or if no vm_ops are provided.
1116  */
drm_gem_mmap_obj(struct drm_gem_object * obj,unsigned long obj_size,struct vm_area_struct * vma)1117 int drm_gem_mmap_obj(struct drm_gem_object *obj, unsigned long obj_size,
1118 		     struct vm_area_struct *vma)
1119 {
1120 	int ret;
1121 
1122 	/* Check for valid size. */
1123 	if (obj_size < vma->vm_end - vma->vm_start)
1124 		return -EINVAL;
1125 
1126 	/* Take a ref for this mapping of the object, so that the fault
1127 	 * handler can dereference the mmap offset's pointer to the object.
1128 	 * This reference is cleaned up by the corresponding vm_close
1129 	 * (which should happen whether the vma was created by this call, or
1130 	 * by a vm_open due to mremap or partial unmap or whatever).
1131 	 */
1132 	drm_gem_object_get(obj);
1133 
1134 	vma->vm_private_data = obj;
1135 	vma->vm_ops = obj->funcs->vm_ops;
1136 
1137 	if (obj->funcs->mmap) {
1138 		ret = obj->funcs->mmap(obj, vma);
1139 		if (ret)
1140 			goto err_drm_gem_object_put;
1141 		WARN_ON(!(vma->vm_flags & VM_DONTEXPAND));
1142 	} else {
1143 		if (!vma->vm_ops) {
1144 			ret = -EINVAL;
1145 			goto err_drm_gem_object_put;
1146 		}
1147 
1148 		vm_flags_set(vma, VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP);
1149 		vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
1150 		vma->vm_page_prot = pgprot_decrypted(vma->vm_page_prot);
1151 	}
1152 
1153 	return 0;
1154 
1155 err_drm_gem_object_put:
1156 	drm_gem_object_put(obj);
1157 	return ret;
1158 }
1159 EXPORT_SYMBOL(drm_gem_mmap_obj);
1160 
1161 /**
1162  * drm_gem_mmap - memory map routine for GEM objects
1163  * @filp: DRM file pointer
1164  * @vma: VMA for the area to be mapped
1165  *
1166  * If a driver supports GEM object mapping, mmap calls on the DRM file
1167  * descriptor will end up here.
1168  *
1169  * Look up the GEM object based on the offset passed in (vma->vm_pgoff will
1170  * contain the fake offset we created when the GTT map ioctl was called on
1171  * the object) and map it with a call to drm_gem_mmap_obj().
1172  *
1173  * If the caller is not granted access to the buffer object, the mmap will fail
1174  * with EACCES. Please see the vma manager for more information.
1175  */
drm_gem_mmap(struct file * filp,struct vm_area_struct * vma)1176 int drm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
1177 {
1178 	struct drm_file *priv = filp->private_data;
1179 	struct drm_device *dev = priv->minor->dev;
1180 	struct drm_gem_object *obj = NULL;
1181 	struct drm_vma_offset_node *node;
1182 	int ret;
1183 
1184 	if (drm_dev_is_unplugged(dev))
1185 		return -ENODEV;
1186 
1187 	drm_vma_offset_lock_lookup(dev->vma_offset_manager);
1188 	node = drm_vma_offset_exact_lookup_locked(dev->vma_offset_manager,
1189 						  vma->vm_pgoff,
1190 						  vma_pages(vma));
1191 	if (likely(node)) {
1192 		obj = container_of(node, struct drm_gem_object, vma_node);
1193 		/*
1194 		 * When the object is being freed, after it hits 0-refcnt it
1195 		 * proceeds to tear down the object. In the process it will
1196 		 * attempt to remove the VMA offset and so acquire this
1197 		 * mgr->vm_lock.  Therefore if we find an object with a 0-refcnt
1198 		 * that matches our range, we know it is in the process of being
1199 		 * destroyed and will be freed as soon as we release the lock -
1200 		 * so we have to check for the 0-refcnted object and treat it as
1201 		 * invalid.
1202 		 */
1203 		if (!kref_get_unless_zero(&obj->refcount))
1204 			obj = NULL;
1205 	}
1206 	drm_vma_offset_unlock_lookup(dev->vma_offset_manager);
1207 
1208 	if (!obj)
1209 		return -EINVAL;
1210 
1211 	if (!drm_vma_node_is_allowed(node, priv)) {
1212 		drm_gem_object_put(obj);
1213 		return -EACCES;
1214 	}
1215 
1216 	ret = drm_gem_mmap_obj(obj, drm_vma_node_size(node) << PAGE_SHIFT,
1217 			       vma);
1218 
1219 	drm_gem_object_put(obj);
1220 
1221 	return ret;
1222 }
1223 EXPORT_SYMBOL(drm_gem_mmap);
1224 
drm_gem_print_info(struct drm_printer * p,unsigned int indent,const struct drm_gem_object * obj)1225 void drm_gem_print_info(struct drm_printer *p, unsigned int indent,
1226 			const struct drm_gem_object *obj)
1227 {
1228 	drm_printf_indent(p, indent, "name=%d\n", obj->name);
1229 	drm_printf_indent(p, indent, "refcount=%u\n",
1230 			  kref_read(&obj->refcount));
1231 	drm_printf_indent(p, indent, "start=%08lx\n",
1232 			  drm_vma_node_start(&obj->vma_node));
1233 	drm_printf_indent(p, indent, "size=%zu\n", obj->size);
1234 	drm_printf_indent(p, indent, "imported=%s\n",
1235 			  str_yes_no(drm_gem_is_imported(obj)));
1236 
1237 	if (obj->funcs->print_info)
1238 		obj->funcs->print_info(p, indent, obj);
1239 }
1240 
drm_gem_pin_locked(struct drm_gem_object * obj)1241 int drm_gem_pin_locked(struct drm_gem_object *obj)
1242 {
1243 	if (obj->funcs->pin)
1244 		return obj->funcs->pin(obj);
1245 
1246 	return 0;
1247 }
1248 
drm_gem_unpin_locked(struct drm_gem_object * obj)1249 void drm_gem_unpin_locked(struct drm_gem_object *obj)
1250 {
1251 	if (obj->funcs->unpin)
1252 		obj->funcs->unpin(obj);
1253 }
1254 
drm_gem_pin(struct drm_gem_object * obj)1255 int drm_gem_pin(struct drm_gem_object *obj)
1256 {
1257 	int ret;
1258 
1259 	dma_resv_lock(obj->resv, NULL);
1260 	ret = drm_gem_pin_locked(obj);
1261 	dma_resv_unlock(obj->resv);
1262 
1263 	return ret;
1264 }
1265 
drm_gem_unpin(struct drm_gem_object * obj)1266 void drm_gem_unpin(struct drm_gem_object *obj)
1267 {
1268 	dma_resv_lock(obj->resv, NULL);
1269 	drm_gem_unpin_locked(obj);
1270 	dma_resv_unlock(obj->resv);
1271 }
1272 
drm_gem_vmap_locked(struct drm_gem_object * obj,struct iosys_map * map)1273 int drm_gem_vmap_locked(struct drm_gem_object *obj, struct iosys_map *map)
1274 {
1275 	int ret;
1276 
1277 	dma_resv_assert_held(obj->resv);
1278 
1279 	if (!obj->funcs->vmap)
1280 		return -EOPNOTSUPP;
1281 
1282 	ret = obj->funcs->vmap(obj, map);
1283 	if (ret)
1284 		return ret;
1285 	else if (iosys_map_is_null(map))
1286 		return -ENOMEM;
1287 
1288 	return 0;
1289 }
1290 EXPORT_SYMBOL(drm_gem_vmap_locked);
1291 
drm_gem_vunmap_locked(struct drm_gem_object * obj,struct iosys_map * map)1292 void drm_gem_vunmap_locked(struct drm_gem_object *obj, struct iosys_map *map)
1293 {
1294 	dma_resv_assert_held(obj->resv);
1295 
1296 	if (iosys_map_is_null(map))
1297 		return;
1298 
1299 	if (obj->funcs->vunmap)
1300 		obj->funcs->vunmap(obj, map);
1301 
1302 	/* Always set the mapping to NULL. Callers may rely on this. */
1303 	iosys_map_clear(map);
1304 }
1305 EXPORT_SYMBOL(drm_gem_vunmap_locked);
1306 
drm_gem_lock(struct drm_gem_object * obj)1307 void drm_gem_lock(struct drm_gem_object *obj)
1308 {
1309 	dma_resv_lock(obj->resv, NULL);
1310 }
1311 EXPORT_SYMBOL(drm_gem_lock);
1312 
drm_gem_unlock(struct drm_gem_object * obj)1313 void drm_gem_unlock(struct drm_gem_object *obj)
1314 {
1315 	dma_resv_unlock(obj->resv);
1316 }
1317 EXPORT_SYMBOL(drm_gem_unlock);
1318 
drm_gem_vmap(struct drm_gem_object * obj,struct iosys_map * map)1319 int drm_gem_vmap(struct drm_gem_object *obj, struct iosys_map *map)
1320 {
1321 	int ret;
1322 
1323 	dma_resv_lock(obj->resv, NULL);
1324 	ret = drm_gem_vmap_locked(obj, map);
1325 	dma_resv_unlock(obj->resv);
1326 
1327 	return ret;
1328 }
1329 EXPORT_SYMBOL(drm_gem_vmap);
1330 
drm_gem_vunmap(struct drm_gem_object * obj,struct iosys_map * map)1331 void drm_gem_vunmap(struct drm_gem_object *obj, struct iosys_map *map)
1332 {
1333 	dma_resv_lock(obj->resv, NULL);
1334 	drm_gem_vunmap_locked(obj, map);
1335 	dma_resv_unlock(obj->resv);
1336 }
1337 EXPORT_SYMBOL(drm_gem_vunmap);
1338 
1339 /**
1340  * drm_gem_lock_reservations - Sets up the ww context and acquires
1341  * the lock on an array of GEM objects.
1342  *
1343  * Once you've locked your reservations, you'll want to set up space
1344  * for your shared fences (if applicable), submit your job, then
1345  * drm_gem_unlock_reservations().
1346  *
1347  * @objs: drm_gem_objects to lock
1348  * @count: Number of objects in @objs
1349  * @acquire_ctx: struct ww_acquire_ctx that will be initialized as
1350  * part of tracking this set of locked reservations.
1351  */
1352 int
drm_gem_lock_reservations(struct drm_gem_object ** objs,int count,struct ww_acquire_ctx * acquire_ctx)1353 drm_gem_lock_reservations(struct drm_gem_object **objs, int count,
1354 			  struct ww_acquire_ctx *acquire_ctx)
1355 {
1356 	int contended = -1;
1357 	int i, ret;
1358 
1359 	ww_acquire_init(acquire_ctx, &reservation_ww_class);
1360 
1361 retry:
1362 	if (contended != -1) {
1363 		struct drm_gem_object *obj = objs[contended];
1364 
1365 		ret = dma_resv_lock_slow_interruptible(obj->resv,
1366 								 acquire_ctx);
1367 		if (ret) {
1368 			ww_acquire_fini(acquire_ctx);
1369 			return ret;
1370 		}
1371 	}
1372 
1373 	for (i = 0; i < count; i++) {
1374 		if (i == contended)
1375 			continue;
1376 
1377 		ret = dma_resv_lock_interruptible(objs[i]->resv,
1378 							    acquire_ctx);
1379 		if (ret) {
1380 			int j;
1381 
1382 			for (j = 0; j < i; j++)
1383 				dma_resv_unlock(objs[j]->resv);
1384 
1385 			if (contended != -1 && contended >= i)
1386 				dma_resv_unlock(objs[contended]->resv);
1387 
1388 			if (ret == -EDEADLK) {
1389 				contended = i;
1390 				goto retry;
1391 			}
1392 
1393 			ww_acquire_fini(acquire_ctx);
1394 			return ret;
1395 		}
1396 	}
1397 
1398 	ww_acquire_done(acquire_ctx);
1399 
1400 	return 0;
1401 }
1402 EXPORT_SYMBOL(drm_gem_lock_reservations);
1403 
1404 void
drm_gem_unlock_reservations(struct drm_gem_object ** objs,int count,struct ww_acquire_ctx * acquire_ctx)1405 drm_gem_unlock_reservations(struct drm_gem_object **objs, int count,
1406 			    struct ww_acquire_ctx *acquire_ctx)
1407 {
1408 	int i;
1409 
1410 	for (i = 0; i < count; i++)
1411 		dma_resv_unlock(objs[i]->resv);
1412 
1413 	ww_acquire_fini(acquire_ctx);
1414 }
1415 EXPORT_SYMBOL(drm_gem_unlock_reservations);
1416 
1417 /**
1418  * drm_gem_lru_init - initialize a LRU
1419  *
1420  * @lru: The LRU to initialize
1421  * @lock: The lock protecting the LRU
1422  */
1423 void
drm_gem_lru_init(struct drm_gem_lru * lru,struct mutex * lock)1424 drm_gem_lru_init(struct drm_gem_lru *lru, struct mutex *lock)
1425 {
1426 	lru->lock = lock;
1427 	lru->count = 0;
1428 	INIT_LIST_HEAD(&lru->list);
1429 }
1430 EXPORT_SYMBOL(drm_gem_lru_init);
1431 
1432 static void
drm_gem_lru_remove_locked(struct drm_gem_object * obj)1433 drm_gem_lru_remove_locked(struct drm_gem_object *obj)
1434 {
1435 	obj->lru->count -= obj->size >> PAGE_SHIFT;
1436 	WARN_ON(obj->lru->count < 0);
1437 	list_del(&obj->lru_node);
1438 	obj->lru = NULL;
1439 }
1440 
1441 /**
1442  * drm_gem_lru_remove - remove object from whatever LRU it is in
1443  *
1444  * If the object is currently in any LRU, remove it.
1445  *
1446  * @obj: The GEM object to remove from current LRU
1447  */
1448 void
drm_gem_lru_remove(struct drm_gem_object * obj)1449 drm_gem_lru_remove(struct drm_gem_object *obj)
1450 {
1451 	struct drm_gem_lru *lru = obj->lru;
1452 
1453 	if (!lru)
1454 		return;
1455 
1456 	mutex_lock(lru->lock);
1457 	drm_gem_lru_remove_locked(obj);
1458 	mutex_unlock(lru->lock);
1459 }
1460 EXPORT_SYMBOL(drm_gem_lru_remove);
1461 
1462 /**
1463  * drm_gem_lru_move_tail_locked - move the object to the tail of the LRU
1464  *
1465  * Like &drm_gem_lru_move_tail but lru lock must be held
1466  *
1467  * @lru: The LRU to move the object into.
1468  * @obj: The GEM object to move into this LRU
1469  */
1470 void
drm_gem_lru_move_tail_locked(struct drm_gem_lru * lru,struct drm_gem_object * obj)1471 drm_gem_lru_move_tail_locked(struct drm_gem_lru *lru, struct drm_gem_object *obj)
1472 {
1473 	lockdep_assert_held_once(lru->lock);
1474 
1475 	if (obj->lru)
1476 		drm_gem_lru_remove_locked(obj);
1477 
1478 	lru->count += obj->size >> PAGE_SHIFT;
1479 	list_add_tail(&obj->lru_node, &lru->list);
1480 	obj->lru = lru;
1481 }
1482 EXPORT_SYMBOL(drm_gem_lru_move_tail_locked);
1483 
1484 /**
1485  * drm_gem_lru_move_tail - move the object to the tail of the LRU
1486  *
1487  * If the object is already in this LRU it will be moved to the
1488  * tail.  Otherwise it will be removed from whichever other LRU
1489  * it is in (if any) and moved into this LRU.
1490  *
1491  * @lru: The LRU to move the object into.
1492  * @obj: The GEM object to move into this LRU
1493  */
1494 void
drm_gem_lru_move_tail(struct drm_gem_lru * lru,struct drm_gem_object * obj)1495 drm_gem_lru_move_tail(struct drm_gem_lru *lru, struct drm_gem_object *obj)
1496 {
1497 	mutex_lock(lru->lock);
1498 	drm_gem_lru_move_tail_locked(lru, obj);
1499 	mutex_unlock(lru->lock);
1500 }
1501 EXPORT_SYMBOL(drm_gem_lru_move_tail);
1502 
1503 /**
1504  * drm_gem_lru_scan - helper to implement shrinker.scan_objects
1505  *
1506  * If the shrink callback succeeds, it is expected that the driver
1507  * move the object out of this LRU.
1508  *
1509  * If the LRU possibly contain active buffers, it is the responsibility
1510  * of the shrink callback to check for this (ie. dma_resv_test_signaled())
1511  * or if necessary block until the buffer becomes idle.
1512  *
1513  * @lru: The LRU to scan
1514  * @nr_to_scan: The number of pages to try to reclaim
1515  * @remaining: The number of pages left to reclaim, should be initialized by caller
1516  * @shrink: Callback to try to shrink/reclaim the object.
1517  */
1518 unsigned long
drm_gem_lru_scan(struct drm_gem_lru * lru,unsigned int nr_to_scan,unsigned long * remaining,bool (* shrink)(struct drm_gem_object * obj))1519 drm_gem_lru_scan(struct drm_gem_lru *lru,
1520 		 unsigned int nr_to_scan,
1521 		 unsigned long *remaining,
1522 		 bool (*shrink)(struct drm_gem_object *obj))
1523 {
1524 	struct drm_gem_lru still_in_lru;
1525 	struct drm_gem_object *obj;
1526 	unsigned freed = 0;
1527 
1528 	drm_gem_lru_init(&still_in_lru, lru->lock);
1529 
1530 	mutex_lock(lru->lock);
1531 
1532 	while (freed < nr_to_scan) {
1533 		obj = list_first_entry_or_null(&lru->list, typeof(*obj), lru_node);
1534 
1535 		if (!obj)
1536 			break;
1537 
1538 		drm_gem_lru_move_tail_locked(&still_in_lru, obj);
1539 
1540 		/*
1541 		 * If it's in the process of being freed, gem_object->free()
1542 		 * may be blocked on lock waiting to remove it.  So just
1543 		 * skip it.
1544 		 */
1545 		if (!kref_get_unless_zero(&obj->refcount))
1546 			continue;
1547 
1548 		/*
1549 		 * Now that we own a reference, we can drop the lock for the
1550 		 * rest of the loop body, to reduce contention with other
1551 		 * code paths that need the LRU lock
1552 		 */
1553 		mutex_unlock(lru->lock);
1554 
1555 		/*
1556 		 * Note that this still needs to be trylock, since we can
1557 		 * hit shrinker in response to trying to get backing pages
1558 		 * for this obj (ie. while it's lock is already held)
1559 		 */
1560 		if (!dma_resv_trylock(obj->resv)) {
1561 			*remaining += obj->size >> PAGE_SHIFT;
1562 			goto tail;
1563 		}
1564 
1565 		if (shrink(obj)) {
1566 			freed += obj->size >> PAGE_SHIFT;
1567 
1568 			/*
1569 			 * If we succeeded in releasing the object's backing
1570 			 * pages, we expect the driver to have moved the object
1571 			 * out of this LRU
1572 			 */
1573 			WARN_ON(obj->lru == &still_in_lru);
1574 			WARN_ON(obj->lru == lru);
1575 		}
1576 
1577 		dma_resv_unlock(obj->resv);
1578 
1579 tail:
1580 		drm_gem_object_put(obj);
1581 		mutex_lock(lru->lock);
1582 	}
1583 
1584 	/*
1585 	 * Move objects we've skipped over out of the temporary still_in_lru
1586 	 * back into this LRU
1587 	 */
1588 	list_for_each_entry (obj, &still_in_lru.list, lru_node)
1589 		obj->lru = lru;
1590 	list_splice_tail(&still_in_lru.list, &lru->list);
1591 	lru->count += still_in_lru.count;
1592 
1593 	mutex_unlock(lru->lock);
1594 
1595 	return freed;
1596 }
1597 EXPORT_SYMBOL(drm_gem_lru_scan);
1598 
1599 /**
1600  * drm_gem_evict_locked - helper to evict backing pages for a GEM object
1601  * @obj: obj in question
1602  */
drm_gem_evict_locked(struct drm_gem_object * obj)1603 int drm_gem_evict_locked(struct drm_gem_object *obj)
1604 {
1605 	dma_resv_assert_held(obj->resv);
1606 
1607 	if (!dma_resv_test_signaled(obj->resv, DMA_RESV_USAGE_READ))
1608 		return -EBUSY;
1609 
1610 	if (obj->funcs->evict)
1611 		return obj->funcs->evict(obj);
1612 
1613 	return 0;
1614 }
1615 EXPORT_SYMBOL(drm_gem_evict_locked);
1616