xref: /linux/drivers/gpu/drm/drm_gem.c (revision b0f84a84fff180718995b1269da2988e5b28be42)
1 /*
2  * Copyright © 2008 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  *
23  * Authors:
24  *    Eric Anholt <eric@anholt.net>
25  *
26  */
27 
28 #include <linux/types.h>
29 #include <linux/slab.h>
30 #include <linux/mm.h>
31 #include <linux/uaccess.h>
32 #include <linux/fs.h>
33 #include <linux/file.h>
34 #include <linux/module.h>
35 #include <linux/mman.h>
36 #include <linux/pagemap.h>
37 #include <linux/shmem_fs.h>
38 #include <linux/dma-buf.h>
39 #include <linux/mem_encrypt.h>
40 #include <linux/pagevec.h>
41 
42 #include <drm/drm_device.h>
43 #include <drm/drm_drv.h>
44 #include <drm/drm_file.h>
45 #include <drm/drm_gem.h>
46 #include <drm/drm_print.h>
47 #include <drm/drm_vma_manager.h>
48 
49 #include "drm_internal.h"
50 
51 /** @file drm_gem.c
52  *
53  * This file provides some of the base ioctls and library routines for
54  * the graphics memory manager implemented by each device driver.
55  *
56  * Because various devices have different requirements in terms of
57  * synchronization and migration strategies, implementing that is left up to
58  * the driver, and all that the general API provides should be generic --
59  * allocating objects, reading/writing data with the cpu, freeing objects.
60  * Even there, platform-dependent optimizations for reading/writing data with
61  * the CPU mean we'll likely hook those out to driver-specific calls.  However,
62  * the DRI2 implementation wants to have at least allocate/mmap be generic.
63  *
64  * The goal was to have swap-backed object allocation managed through
65  * struct file.  However, file descriptors as handles to a struct file have
66  * two major failings:
67  * - Process limits prevent more than 1024 or so being used at a time by
68  *   default.
69  * - Inability to allocate high fds will aggravate the X Server's select()
70  *   handling, and likely that of many GL client applications as well.
71  *
72  * This led to a plan of using our own integer IDs (called handles, following
73  * DRM terminology) to mimic fds, and implement the fd syscalls we need as
74  * ioctls.  The objects themselves will still include the struct file so
75  * that we can transition to fds if the required kernel infrastructure shows
76  * up at a later date, and as our interface with shmfs for memory allocation.
77  */
78 
79 /**
80  * drm_gem_init - Initialize the GEM device fields
81  * @dev: drm_devic structure to initialize
82  */
83 int
84 drm_gem_init(struct drm_device *dev)
85 {
86 	struct drm_vma_offset_manager *vma_offset_manager;
87 
88 	mutex_init(&dev->object_name_lock);
89 	idr_init_base(&dev->object_name_idr, 1);
90 
91 	vma_offset_manager = kzalloc(sizeof(*vma_offset_manager), GFP_KERNEL);
92 	if (!vma_offset_manager) {
93 		DRM_ERROR("out of memory\n");
94 		return -ENOMEM;
95 	}
96 
97 	dev->vma_offset_manager = vma_offset_manager;
98 	drm_vma_offset_manager_init(vma_offset_manager,
99 				    DRM_FILE_PAGE_OFFSET_START,
100 				    DRM_FILE_PAGE_OFFSET_SIZE);
101 
102 	return 0;
103 }
104 
105 void
106 drm_gem_destroy(struct drm_device *dev)
107 {
108 
109 	drm_vma_offset_manager_destroy(dev->vma_offset_manager);
110 	kfree(dev->vma_offset_manager);
111 	dev->vma_offset_manager = NULL;
112 }
113 
114 /**
115  * drm_gem_object_init - initialize an allocated shmem-backed GEM object
116  * @dev: drm_device the object should be initialized for
117  * @obj: drm_gem_object to initialize
118  * @size: object size
119  *
120  * Initialize an already allocated GEM object of the specified size with
121  * shmfs backing store.
122  */
123 int drm_gem_object_init(struct drm_device *dev,
124 			struct drm_gem_object *obj, size_t size)
125 {
126 	struct file *filp;
127 
128 	drm_gem_private_object_init(dev, obj, size);
129 
130 	filp = shmem_file_setup("drm mm object", size, VM_NORESERVE);
131 	if (IS_ERR(filp))
132 		return PTR_ERR(filp);
133 
134 	obj->filp = filp;
135 
136 	return 0;
137 }
138 EXPORT_SYMBOL(drm_gem_object_init);
139 
140 /**
141  * drm_gem_private_object_init - initialize an allocated private GEM object
142  * @dev: drm_device the object should be initialized for
143  * @obj: drm_gem_object to initialize
144  * @size: object size
145  *
146  * Initialize an already allocated GEM object of the specified size with
147  * no GEM provided backing store. Instead the caller is responsible for
148  * backing the object and handling it.
149  */
150 void drm_gem_private_object_init(struct drm_device *dev,
151 				 struct drm_gem_object *obj, size_t size)
152 {
153 	BUG_ON((size & (PAGE_SIZE - 1)) != 0);
154 
155 	obj->dev = dev;
156 	obj->filp = NULL;
157 
158 	kref_init(&obj->refcount);
159 	obj->handle_count = 0;
160 	obj->size = size;
161 	reservation_object_init(&obj->_resv);
162 	if (!obj->resv)
163 		obj->resv = &obj->_resv;
164 
165 	drm_vma_node_reset(&obj->vma_node);
166 }
167 EXPORT_SYMBOL(drm_gem_private_object_init);
168 
169 static void
170 drm_gem_remove_prime_handles(struct drm_gem_object *obj, struct drm_file *filp)
171 {
172 	/*
173 	 * Note: obj->dma_buf can't disappear as long as we still hold a
174 	 * handle reference in obj->handle_count.
175 	 */
176 	mutex_lock(&filp->prime.lock);
177 	if (obj->dma_buf) {
178 		drm_prime_remove_buf_handle_locked(&filp->prime,
179 						   obj->dma_buf);
180 	}
181 	mutex_unlock(&filp->prime.lock);
182 }
183 
184 /**
185  * drm_gem_object_handle_free - release resources bound to userspace handles
186  * @obj: GEM object to clean up.
187  *
188  * Called after the last handle to the object has been closed
189  *
190  * Removes any name for the object. Note that this must be
191  * called before drm_gem_object_free or we'll be touching
192  * freed memory
193  */
194 static void drm_gem_object_handle_free(struct drm_gem_object *obj)
195 {
196 	struct drm_device *dev = obj->dev;
197 
198 	/* Remove any name for this object */
199 	if (obj->name) {
200 		idr_remove(&dev->object_name_idr, obj->name);
201 		obj->name = 0;
202 	}
203 }
204 
205 static void drm_gem_object_exported_dma_buf_free(struct drm_gem_object *obj)
206 {
207 	/* Unbreak the reference cycle if we have an exported dma_buf. */
208 	if (obj->dma_buf) {
209 		dma_buf_put(obj->dma_buf);
210 		obj->dma_buf = NULL;
211 	}
212 }
213 
214 static void
215 drm_gem_object_handle_put_unlocked(struct drm_gem_object *obj)
216 {
217 	struct drm_device *dev = obj->dev;
218 	bool final = false;
219 
220 	if (WARN_ON(obj->handle_count == 0))
221 		return;
222 
223 	/*
224 	* Must bump handle count first as this may be the last
225 	* ref, in which case the object would disappear before we
226 	* checked for a name
227 	*/
228 
229 	mutex_lock(&dev->object_name_lock);
230 	if (--obj->handle_count == 0) {
231 		drm_gem_object_handle_free(obj);
232 		drm_gem_object_exported_dma_buf_free(obj);
233 		final = true;
234 	}
235 	mutex_unlock(&dev->object_name_lock);
236 
237 	if (final)
238 		drm_gem_object_put_unlocked(obj);
239 }
240 
241 /*
242  * Called at device or object close to release the file's
243  * handle references on objects.
244  */
245 static int
246 drm_gem_object_release_handle(int id, void *ptr, void *data)
247 {
248 	struct drm_file *file_priv = data;
249 	struct drm_gem_object *obj = ptr;
250 	struct drm_device *dev = obj->dev;
251 
252 	if (obj->funcs && obj->funcs->close)
253 		obj->funcs->close(obj, file_priv);
254 	else if (dev->driver->gem_close_object)
255 		dev->driver->gem_close_object(obj, file_priv);
256 
257 	if (drm_core_check_feature(dev, DRIVER_PRIME))
258 		drm_gem_remove_prime_handles(obj, file_priv);
259 	drm_vma_node_revoke(&obj->vma_node, file_priv);
260 
261 	drm_gem_object_handle_put_unlocked(obj);
262 
263 	return 0;
264 }
265 
266 /**
267  * drm_gem_handle_delete - deletes the given file-private handle
268  * @filp: drm file-private structure to use for the handle look up
269  * @handle: userspace handle to delete
270  *
271  * Removes the GEM handle from the @filp lookup table which has been added with
272  * drm_gem_handle_create(). If this is the last handle also cleans up linked
273  * resources like GEM names.
274  */
275 int
276 drm_gem_handle_delete(struct drm_file *filp, u32 handle)
277 {
278 	struct drm_gem_object *obj;
279 
280 	spin_lock(&filp->table_lock);
281 
282 	/* Check if we currently have a reference on the object */
283 	obj = idr_replace(&filp->object_idr, NULL, handle);
284 	spin_unlock(&filp->table_lock);
285 	if (IS_ERR_OR_NULL(obj))
286 		return -EINVAL;
287 
288 	/* Release driver's reference and decrement refcount. */
289 	drm_gem_object_release_handle(handle, obj, filp);
290 
291 	/* And finally make the handle available for future allocations. */
292 	spin_lock(&filp->table_lock);
293 	idr_remove(&filp->object_idr, handle);
294 	spin_unlock(&filp->table_lock);
295 
296 	return 0;
297 }
298 EXPORT_SYMBOL(drm_gem_handle_delete);
299 
300 /**
301  * drm_gem_dumb_map_offset - return the fake mmap offset for a gem object
302  * @file: drm file-private structure containing the gem object
303  * @dev: corresponding drm_device
304  * @handle: gem object handle
305  * @offset: return location for the fake mmap offset
306  *
307  * This implements the &drm_driver.dumb_map_offset kms driver callback for
308  * drivers which use gem to manage their backing storage.
309  *
310  * Returns:
311  * 0 on success or a negative error code on failure.
312  */
313 int drm_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev,
314 			    u32 handle, u64 *offset)
315 {
316 	struct drm_gem_object *obj;
317 	int ret;
318 
319 	obj = drm_gem_object_lookup(file, handle);
320 	if (!obj)
321 		return -ENOENT;
322 
323 	/* Don't allow imported objects to be mapped */
324 	if (obj->import_attach) {
325 		ret = -EINVAL;
326 		goto out;
327 	}
328 
329 	ret = drm_gem_create_mmap_offset(obj);
330 	if (ret)
331 		goto out;
332 
333 	*offset = drm_vma_node_offset_addr(&obj->vma_node);
334 out:
335 	drm_gem_object_put_unlocked(obj);
336 
337 	return ret;
338 }
339 EXPORT_SYMBOL_GPL(drm_gem_dumb_map_offset);
340 
341 /**
342  * drm_gem_dumb_destroy - dumb fb callback helper for gem based drivers
343  * @file: drm file-private structure to remove the dumb handle from
344  * @dev: corresponding drm_device
345  * @handle: the dumb handle to remove
346  *
347  * This implements the &drm_driver.dumb_destroy kms driver callback for drivers
348  * which use gem to manage their backing storage.
349  */
350 int drm_gem_dumb_destroy(struct drm_file *file,
351 			 struct drm_device *dev,
352 			 uint32_t handle)
353 {
354 	return drm_gem_handle_delete(file, handle);
355 }
356 EXPORT_SYMBOL(drm_gem_dumb_destroy);
357 
358 /**
359  * drm_gem_handle_create_tail - internal functions to create a handle
360  * @file_priv: drm file-private structure to register the handle for
361  * @obj: object to register
362  * @handlep: pointer to return the created handle to the caller
363  *
364  * This expects the &drm_device.object_name_lock to be held already and will
365  * drop it before returning. Used to avoid races in establishing new handles
366  * when importing an object from either an flink name or a dma-buf.
367  *
368  * Handles must be release again through drm_gem_handle_delete(). This is done
369  * when userspace closes @file_priv for all attached handles, or through the
370  * GEM_CLOSE ioctl for individual handles.
371  */
372 int
373 drm_gem_handle_create_tail(struct drm_file *file_priv,
374 			   struct drm_gem_object *obj,
375 			   u32 *handlep)
376 {
377 	struct drm_device *dev = obj->dev;
378 	u32 handle;
379 	int ret;
380 
381 	WARN_ON(!mutex_is_locked(&dev->object_name_lock));
382 	if (obj->handle_count++ == 0)
383 		drm_gem_object_get(obj);
384 
385 	/*
386 	 * Get the user-visible handle using idr.  Preload and perform
387 	 * allocation under our spinlock.
388 	 */
389 	idr_preload(GFP_KERNEL);
390 	spin_lock(&file_priv->table_lock);
391 
392 	ret = idr_alloc(&file_priv->object_idr, obj, 1, 0, GFP_NOWAIT);
393 
394 	spin_unlock(&file_priv->table_lock);
395 	idr_preload_end();
396 
397 	mutex_unlock(&dev->object_name_lock);
398 	if (ret < 0)
399 		goto err_unref;
400 
401 	handle = ret;
402 
403 	ret = drm_vma_node_allow(&obj->vma_node, file_priv);
404 	if (ret)
405 		goto err_remove;
406 
407 	if (obj->funcs && obj->funcs->open) {
408 		ret = obj->funcs->open(obj, file_priv);
409 		if (ret)
410 			goto err_revoke;
411 	} else if (dev->driver->gem_open_object) {
412 		ret = dev->driver->gem_open_object(obj, file_priv);
413 		if (ret)
414 			goto err_revoke;
415 	}
416 
417 	*handlep = handle;
418 	return 0;
419 
420 err_revoke:
421 	drm_vma_node_revoke(&obj->vma_node, file_priv);
422 err_remove:
423 	spin_lock(&file_priv->table_lock);
424 	idr_remove(&file_priv->object_idr, handle);
425 	spin_unlock(&file_priv->table_lock);
426 err_unref:
427 	drm_gem_object_handle_put_unlocked(obj);
428 	return ret;
429 }
430 
431 /**
432  * drm_gem_handle_create - create a gem handle for an object
433  * @file_priv: drm file-private structure to register the handle for
434  * @obj: object to register
435  * @handlep: pionter to return the created handle to the caller
436  *
437  * Create a handle for this object. This adds a handle reference to the object,
438  * which includes a regular reference count. Callers will likely want to
439  * dereference the object afterwards.
440  *
441  * Since this publishes @obj to userspace it must be fully set up by this point,
442  * drivers must call this last in their buffer object creation callbacks.
443  */
444 int drm_gem_handle_create(struct drm_file *file_priv,
445 			  struct drm_gem_object *obj,
446 			  u32 *handlep)
447 {
448 	mutex_lock(&obj->dev->object_name_lock);
449 
450 	return drm_gem_handle_create_tail(file_priv, obj, handlep);
451 }
452 EXPORT_SYMBOL(drm_gem_handle_create);
453 
454 
455 /**
456  * drm_gem_free_mmap_offset - release a fake mmap offset for an object
457  * @obj: obj in question
458  *
459  * This routine frees fake offsets allocated by drm_gem_create_mmap_offset().
460  *
461  * Note that drm_gem_object_release() already calls this function, so drivers
462  * don't have to take care of releasing the mmap offset themselves when freeing
463  * the GEM object.
464  */
465 void
466 drm_gem_free_mmap_offset(struct drm_gem_object *obj)
467 {
468 	struct drm_device *dev = obj->dev;
469 
470 	drm_vma_offset_remove(dev->vma_offset_manager, &obj->vma_node);
471 }
472 EXPORT_SYMBOL(drm_gem_free_mmap_offset);
473 
474 /**
475  * drm_gem_create_mmap_offset_size - create a fake mmap offset for an object
476  * @obj: obj in question
477  * @size: the virtual size
478  *
479  * GEM memory mapping works by handing back to userspace a fake mmap offset
480  * it can use in a subsequent mmap(2) call.  The DRM core code then looks
481  * up the object based on the offset and sets up the various memory mapping
482  * structures.
483  *
484  * This routine allocates and attaches a fake offset for @obj, in cases where
485  * the virtual size differs from the physical size (ie. &drm_gem_object.size).
486  * Otherwise just use drm_gem_create_mmap_offset().
487  *
488  * This function is idempotent and handles an already allocated mmap offset
489  * transparently. Drivers do not need to check for this case.
490  */
491 int
492 drm_gem_create_mmap_offset_size(struct drm_gem_object *obj, size_t size)
493 {
494 	struct drm_device *dev = obj->dev;
495 
496 	return drm_vma_offset_add(dev->vma_offset_manager, &obj->vma_node,
497 				  size / PAGE_SIZE);
498 }
499 EXPORT_SYMBOL(drm_gem_create_mmap_offset_size);
500 
501 /**
502  * drm_gem_create_mmap_offset - create a fake mmap offset for an object
503  * @obj: obj in question
504  *
505  * GEM memory mapping works by handing back to userspace a fake mmap offset
506  * it can use in a subsequent mmap(2) call.  The DRM core code then looks
507  * up the object based on the offset and sets up the various memory mapping
508  * structures.
509  *
510  * This routine allocates and attaches a fake offset for @obj.
511  *
512  * Drivers can call drm_gem_free_mmap_offset() before freeing @obj to release
513  * the fake offset again.
514  */
515 int drm_gem_create_mmap_offset(struct drm_gem_object *obj)
516 {
517 	return drm_gem_create_mmap_offset_size(obj, obj->size);
518 }
519 EXPORT_SYMBOL(drm_gem_create_mmap_offset);
520 
521 /*
522  * Move pages to appropriate lru and release the pagevec, decrementing the
523  * ref count of those pages.
524  */
525 static void drm_gem_check_release_pagevec(struct pagevec *pvec)
526 {
527 	check_move_unevictable_pages(pvec);
528 	__pagevec_release(pvec);
529 	cond_resched();
530 }
531 
532 /**
533  * drm_gem_get_pages - helper to allocate backing pages for a GEM object
534  * from shmem
535  * @obj: obj in question
536  *
537  * This reads the page-array of the shmem-backing storage of the given gem
538  * object. An array of pages is returned. If a page is not allocated or
539  * swapped-out, this will allocate/swap-in the required pages. Note that the
540  * whole object is covered by the page-array and pinned in memory.
541  *
542  * Use drm_gem_put_pages() to release the array and unpin all pages.
543  *
544  * This uses the GFP-mask set on the shmem-mapping (see mapping_set_gfp_mask()).
545  * If you require other GFP-masks, you have to do those allocations yourself.
546  *
547  * Note that you are not allowed to change gfp-zones during runtime. That is,
548  * shmem_read_mapping_page_gfp() must be called with the same gfp_zone(gfp) as
549  * set during initialization. If you have special zone constraints, set them
550  * after drm_gem_object_init() via mapping_set_gfp_mask(). shmem-core takes care
551  * to keep pages in the required zone during swap-in.
552  */
553 struct page **drm_gem_get_pages(struct drm_gem_object *obj)
554 {
555 	struct address_space *mapping;
556 	struct page *p, **pages;
557 	struct pagevec pvec;
558 	int i, npages;
559 
560 	/* This is the shared memory object that backs the GEM resource */
561 	mapping = obj->filp->f_mapping;
562 
563 	/* We already BUG_ON() for non-page-aligned sizes in
564 	 * drm_gem_object_init(), so we should never hit this unless
565 	 * driver author is doing something really wrong:
566 	 */
567 	WARN_ON((obj->size & (PAGE_SIZE - 1)) != 0);
568 
569 	npages = obj->size >> PAGE_SHIFT;
570 
571 	pages = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL);
572 	if (pages == NULL)
573 		return ERR_PTR(-ENOMEM);
574 
575 	mapping_set_unevictable(mapping);
576 
577 	for (i = 0; i < npages; i++) {
578 		p = shmem_read_mapping_page(mapping, i);
579 		if (IS_ERR(p))
580 			goto fail;
581 		pages[i] = p;
582 
583 		/* Make sure shmem keeps __GFP_DMA32 allocated pages in the
584 		 * correct region during swapin. Note that this requires
585 		 * __GFP_DMA32 to be set in mapping_gfp_mask(inode->i_mapping)
586 		 * so shmem can relocate pages during swapin if required.
587 		 */
588 		BUG_ON(mapping_gfp_constraint(mapping, __GFP_DMA32) &&
589 				(page_to_pfn(p) >= 0x00100000UL));
590 	}
591 
592 	return pages;
593 
594 fail:
595 	mapping_clear_unevictable(mapping);
596 	pagevec_init(&pvec);
597 	while (i--) {
598 		if (!pagevec_add(&pvec, pages[i]))
599 			drm_gem_check_release_pagevec(&pvec);
600 	}
601 	if (pagevec_count(&pvec))
602 		drm_gem_check_release_pagevec(&pvec);
603 
604 	kvfree(pages);
605 	return ERR_CAST(p);
606 }
607 EXPORT_SYMBOL(drm_gem_get_pages);
608 
609 /**
610  * drm_gem_put_pages - helper to free backing pages for a GEM object
611  * @obj: obj in question
612  * @pages: pages to free
613  * @dirty: if true, pages will be marked as dirty
614  * @accessed: if true, the pages will be marked as accessed
615  */
616 void drm_gem_put_pages(struct drm_gem_object *obj, struct page **pages,
617 		bool dirty, bool accessed)
618 {
619 	int i, npages;
620 	struct address_space *mapping;
621 	struct pagevec pvec;
622 
623 	mapping = file_inode(obj->filp)->i_mapping;
624 	mapping_clear_unevictable(mapping);
625 
626 	/* We already BUG_ON() for non-page-aligned sizes in
627 	 * drm_gem_object_init(), so we should never hit this unless
628 	 * driver author is doing something really wrong:
629 	 */
630 	WARN_ON((obj->size & (PAGE_SIZE - 1)) != 0);
631 
632 	npages = obj->size >> PAGE_SHIFT;
633 
634 	pagevec_init(&pvec);
635 	for (i = 0; i < npages; i++) {
636 		if (dirty)
637 			set_page_dirty(pages[i]);
638 
639 		if (accessed)
640 			mark_page_accessed(pages[i]);
641 
642 		/* Undo the reference we took when populating the table */
643 		if (!pagevec_add(&pvec, pages[i]))
644 			drm_gem_check_release_pagevec(&pvec);
645 	}
646 	if (pagevec_count(&pvec))
647 		drm_gem_check_release_pagevec(&pvec);
648 
649 	kvfree(pages);
650 }
651 EXPORT_SYMBOL(drm_gem_put_pages);
652 
653 static int objects_lookup(struct drm_file *filp, u32 *handle, int count,
654 			  struct drm_gem_object **objs)
655 {
656 	int i, ret = 0;
657 	struct drm_gem_object *obj;
658 
659 	spin_lock(&filp->table_lock);
660 
661 	for (i = 0; i < count; i++) {
662 		/* Check if we currently have a reference on the object */
663 		obj = idr_find(&filp->object_idr, handle[i]);
664 		if (!obj) {
665 			ret = -ENOENT;
666 			break;
667 		}
668 		drm_gem_object_get(obj);
669 		objs[i] = obj;
670 	}
671 	spin_unlock(&filp->table_lock);
672 
673 	return ret;
674 }
675 
676 /**
677  * drm_gem_objects_lookup - look up GEM objects from an array of handles
678  * @filp: DRM file private date
679  * @bo_handles: user pointer to array of userspace handle
680  * @count: size of handle array
681  * @objs_out: returned pointer to array of drm_gem_object pointers
682  *
683  * Takes an array of userspace handles and returns a newly allocated array of
684  * GEM objects.
685  *
686  * For a single handle lookup, use drm_gem_object_lookup().
687  *
688  * Returns:
689  *
690  * @objs filled in with GEM object pointers. Returned GEM objects need to be
691  * released with drm_gem_object_put(). -ENOENT is returned on a lookup
692  * failure. 0 is returned on success.
693  *
694  */
695 int drm_gem_objects_lookup(struct drm_file *filp, void __user *bo_handles,
696 			   int count, struct drm_gem_object ***objs_out)
697 {
698 	int ret;
699 	u32 *handles;
700 	struct drm_gem_object **objs;
701 
702 	if (!count)
703 		return 0;
704 
705 	objs = kvmalloc_array(count, sizeof(struct drm_gem_object *),
706 			     GFP_KERNEL | __GFP_ZERO);
707 	if (!objs)
708 		return -ENOMEM;
709 
710 	handles = kvmalloc_array(count, sizeof(u32), GFP_KERNEL);
711 	if (!handles) {
712 		ret = -ENOMEM;
713 		goto out;
714 	}
715 
716 	if (copy_from_user(handles, bo_handles, count * sizeof(u32))) {
717 		ret = -EFAULT;
718 		DRM_DEBUG("Failed to copy in GEM handles\n");
719 		goto out;
720 	}
721 
722 	ret = objects_lookup(filp, handles, count, objs);
723 	*objs_out = objs;
724 
725 out:
726 	kvfree(handles);
727 	return ret;
728 
729 }
730 EXPORT_SYMBOL(drm_gem_objects_lookup);
731 
732 /**
733  * drm_gem_object_lookup - look up a GEM object from its handle
734  * @filp: DRM file private date
735  * @handle: userspace handle
736  *
737  * Returns:
738  *
739  * A reference to the object named by the handle if such exists on @filp, NULL
740  * otherwise.
741  *
742  * If looking up an array of handles, use drm_gem_objects_lookup().
743  */
744 struct drm_gem_object *
745 drm_gem_object_lookup(struct drm_file *filp, u32 handle)
746 {
747 	struct drm_gem_object *obj = NULL;
748 
749 	objects_lookup(filp, &handle, 1, &obj);
750 	return obj;
751 }
752 EXPORT_SYMBOL(drm_gem_object_lookup);
753 
754 /**
755  * drm_gem_reservation_object_wait - Wait on GEM object's reservation's objects
756  * shared and/or exclusive fences.
757  * @filep: DRM file private date
758  * @handle: userspace handle
759  * @wait_all: if true, wait on all fences, else wait on just exclusive fence
760  * @timeout: timeout value in jiffies or zero to return immediately
761  *
762  * Returns:
763  *
764  * Returns -ERESTARTSYS if interrupted, 0 if the wait timed out, or
765  * greater than 0 on success.
766  */
767 long drm_gem_reservation_object_wait(struct drm_file *filep, u32 handle,
768 				    bool wait_all, unsigned long timeout)
769 {
770 	long ret;
771 	struct drm_gem_object *obj;
772 
773 	obj = drm_gem_object_lookup(filep, handle);
774 	if (!obj) {
775 		DRM_DEBUG("Failed to look up GEM BO %d\n", handle);
776 		return -EINVAL;
777 	}
778 
779 	ret = reservation_object_wait_timeout_rcu(obj->resv, wait_all,
780 						  true, timeout);
781 	if (ret == 0)
782 		ret = -ETIME;
783 	else if (ret > 0)
784 		ret = 0;
785 
786 	drm_gem_object_put_unlocked(obj);
787 
788 	return ret;
789 }
790 EXPORT_SYMBOL(drm_gem_reservation_object_wait);
791 
792 /**
793  * drm_gem_close_ioctl - implementation of the GEM_CLOSE ioctl
794  * @dev: drm_device
795  * @data: ioctl data
796  * @file_priv: drm file-private structure
797  *
798  * Releases the handle to an mm object.
799  */
800 int
801 drm_gem_close_ioctl(struct drm_device *dev, void *data,
802 		    struct drm_file *file_priv)
803 {
804 	struct drm_gem_close *args = data;
805 	int ret;
806 
807 	if (!drm_core_check_feature(dev, DRIVER_GEM))
808 		return -EOPNOTSUPP;
809 
810 	ret = drm_gem_handle_delete(file_priv, args->handle);
811 
812 	return ret;
813 }
814 
815 /**
816  * drm_gem_flink_ioctl - implementation of the GEM_FLINK ioctl
817  * @dev: drm_device
818  * @data: ioctl data
819  * @file_priv: drm file-private structure
820  *
821  * Create a global name for an object, returning the name.
822  *
823  * Note that the name does not hold a reference; when the object
824  * is freed, the name goes away.
825  */
826 int
827 drm_gem_flink_ioctl(struct drm_device *dev, void *data,
828 		    struct drm_file *file_priv)
829 {
830 	struct drm_gem_flink *args = data;
831 	struct drm_gem_object *obj;
832 	int ret;
833 
834 	if (!drm_core_check_feature(dev, DRIVER_GEM))
835 		return -EOPNOTSUPP;
836 
837 	obj = drm_gem_object_lookup(file_priv, args->handle);
838 	if (obj == NULL)
839 		return -ENOENT;
840 
841 	mutex_lock(&dev->object_name_lock);
842 	/* prevent races with concurrent gem_close. */
843 	if (obj->handle_count == 0) {
844 		ret = -ENOENT;
845 		goto err;
846 	}
847 
848 	if (!obj->name) {
849 		ret = idr_alloc(&dev->object_name_idr, obj, 1, 0, GFP_KERNEL);
850 		if (ret < 0)
851 			goto err;
852 
853 		obj->name = ret;
854 	}
855 
856 	args->name = (uint64_t) obj->name;
857 	ret = 0;
858 
859 err:
860 	mutex_unlock(&dev->object_name_lock);
861 	drm_gem_object_put_unlocked(obj);
862 	return ret;
863 }
864 
865 /**
866  * drm_gem_open - implementation of the GEM_OPEN ioctl
867  * @dev: drm_device
868  * @data: ioctl data
869  * @file_priv: drm file-private structure
870  *
871  * Open an object using the global name, returning a handle and the size.
872  *
873  * This handle (of course) holds a reference to the object, so the object
874  * will not go away until the handle is deleted.
875  */
876 int
877 drm_gem_open_ioctl(struct drm_device *dev, void *data,
878 		   struct drm_file *file_priv)
879 {
880 	struct drm_gem_open *args = data;
881 	struct drm_gem_object *obj;
882 	int ret;
883 	u32 handle;
884 
885 	if (!drm_core_check_feature(dev, DRIVER_GEM))
886 		return -EOPNOTSUPP;
887 
888 	mutex_lock(&dev->object_name_lock);
889 	obj = idr_find(&dev->object_name_idr, (int) args->name);
890 	if (obj) {
891 		drm_gem_object_get(obj);
892 	} else {
893 		mutex_unlock(&dev->object_name_lock);
894 		return -ENOENT;
895 	}
896 
897 	/* drm_gem_handle_create_tail unlocks dev->object_name_lock. */
898 	ret = drm_gem_handle_create_tail(file_priv, obj, &handle);
899 	drm_gem_object_put_unlocked(obj);
900 	if (ret)
901 		return ret;
902 
903 	args->handle = handle;
904 	args->size = obj->size;
905 
906 	return 0;
907 }
908 
909 /**
910  * gem_gem_open - initalizes GEM file-private structures at devnode open time
911  * @dev: drm_device which is being opened by userspace
912  * @file_private: drm file-private structure to set up
913  *
914  * Called at device open time, sets up the structure for handling refcounting
915  * of mm objects.
916  */
917 void
918 drm_gem_open(struct drm_device *dev, struct drm_file *file_private)
919 {
920 	idr_init_base(&file_private->object_idr, 1);
921 	spin_lock_init(&file_private->table_lock);
922 }
923 
924 /**
925  * drm_gem_release - release file-private GEM resources
926  * @dev: drm_device which is being closed by userspace
927  * @file_private: drm file-private structure to clean up
928  *
929  * Called at close time when the filp is going away.
930  *
931  * Releases any remaining references on objects by this filp.
932  */
933 void
934 drm_gem_release(struct drm_device *dev, struct drm_file *file_private)
935 {
936 	idr_for_each(&file_private->object_idr,
937 		     &drm_gem_object_release_handle, file_private);
938 	idr_destroy(&file_private->object_idr);
939 }
940 
941 /**
942  * drm_gem_object_release - release GEM buffer object resources
943  * @obj: GEM buffer object
944  *
945  * This releases any structures and resources used by @obj and is the invers of
946  * drm_gem_object_init().
947  */
948 void
949 drm_gem_object_release(struct drm_gem_object *obj)
950 {
951 	WARN_ON(obj->dma_buf);
952 
953 	if (obj->filp)
954 		fput(obj->filp);
955 
956 	reservation_object_fini(&obj->_resv);
957 	drm_gem_free_mmap_offset(obj);
958 }
959 EXPORT_SYMBOL(drm_gem_object_release);
960 
961 /**
962  * drm_gem_object_free - free a GEM object
963  * @kref: kref of the object to free
964  *
965  * Called after the last reference to the object has been lost.
966  * Must be called holding &drm_device.struct_mutex.
967  *
968  * Frees the object
969  */
970 void
971 drm_gem_object_free(struct kref *kref)
972 {
973 	struct drm_gem_object *obj =
974 		container_of(kref, struct drm_gem_object, refcount);
975 	struct drm_device *dev = obj->dev;
976 
977 	if (obj->funcs) {
978 		obj->funcs->free(obj);
979 	} else if (dev->driver->gem_free_object_unlocked) {
980 		dev->driver->gem_free_object_unlocked(obj);
981 	} else if (dev->driver->gem_free_object) {
982 		WARN_ON(!mutex_is_locked(&dev->struct_mutex));
983 
984 		dev->driver->gem_free_object(obj);
985 	}
986 }
987 EXPORT_SYMBOL(drm_gem_object_free);
988 
989 /**
990  * drm_gem_object_put_unlocked - drop a GEM buffer object reference
991  * @obj: GEM buffer object
992  *
993  * This releases a reference to @obj. Callers must not hold the
994  * &drm_device.struct_mutex lock when calling this function.
995  *
996  * See also __drm_gem_object_put().
997  */
998 void
999 drm_gem_object_put_unlocked(struct drm_gem_object *obj)
1000 {
1001 	struct drm_device *dev;
1002 
1003 	if (!obj)
1004 		return;
1005 
1006 	dev = obj->dev;
1007 
1008 	if (dev->driver->gem_free_object) {
1009 		might_lock(&dev->struct_mutex);
1010 		if (kref_put_mutex(&obj->refcount, drm_gem_object_free,
1011 				&dev->struct_mutex))
1012 			mutex_unlock(&dev->struct_mutex);
1013 	} else {
1014 		kref_put(&obj->refcount, drm_gem_object_free);
1015 	}
1016 }
1017 EXPORT_SYMBOL(drm_gem_object_put_unlocked);
1018 
1019 /**
1020  * drm_gem_object_put - release a GEM buffer object reference
1021  * @obj: GEM buffer object
1022  *
1023  * This releases a reference to @obj. Callers must hold the
1024  * &drm_device.struct_mutex lock when calling this function, even when the
1025  * driver doesn't use &drm_device.struct_mutex for anything.
1026  *
1027  * For drivers not encumbered with legacy locking use
1028  * drm_gem_object_put_unlocked() instead.
1029  */
1030 void
1031 drm_gem_object_put(struct drm_gem_object *obj)
1032 {
1033 	if (obj) {
1034 		WARN_ON(!mutex_is_locked(&obj->dev->struct_mutex));
1035 
1036 		kref_put(&obj->refcount, drm_gem_object_free);
1037 	}
1038 }
1039 EXPORT_SYMBOL(drm_gem_object_put);
1040 
1041 /**
1042  * drm_gem_vm_open - vma->ops->open implementation for GEM
1043  * @vma: VM area structure
1044  *
1045  * This function implements the #vm_operations_struct open() callback for GEM
1046  * drivers. This must be used together with drm_gem_vm_close().
1047  */
1048 void drm_gem_vm_open(struct vm_area_struct *vma)
1049 {
1050 	struct drm_gem_object *obj = vma->vm_private_data;
1051 
1052 	drm_gem_object_get(obj);
1053 }
1054 EXPORT_SYMBOL(drm_gem_vm_open);
1055 
1056 /**
1057  * drm_gem_vm_close - vma->ops->close implementation for GEM
1058  * @vma: VM area structure
1059  *
1060  * This function implements the #vm_operations_struct close() callback for GEM
1061  * drivers. This must be used together with drm_gem_vm_open().
1062  */
1063 void drm_gem_vm_close(struct vm_area_struct *vma)
1064 {
1065 	struct drm_gem_object *obj = vma->vm_private_data;
1066 
1067 	drm_gem_object_put_unlocked(obj);
1068 }
1069 EXPORT_SYMBOL(drm_gem_vm_close);
1070 
1071 /**
1072  * drm_gem_mmap_obj - memory map a GEM object
1073  * @obj: the GEM object to map
1074  * @obj_size: the object size to be mapped, in bytes
1075  * @vma: VMA for the area to be mapped
1076  *
1077  * Set up the VMA to prepare mapping of the GEM object using the gem_vm_ops
1078  * provided by the driver. Depending on their requirements, drivers can either
1079  * provide a fault handler in their gem_vm_ops (in which case any accesses to
1080  * the object will be trapped, to perform migration, GTT binding, surface
1081  * register allocation, or performance monitoring), or mmap the buffer memory
1082  * synchronously after calling drm_gem_mmap_obj.
1083  *
1084  * This function is mainly intended to implement the DMABUF mmap operation, when
1085  * the GEM object is not looked up based on its fake offset. To implement the
1086  * DRM mmap operation, drivers should use the drm_gem_mmap() function.
1087  *
1088  * drm_gem_mmap_obj() assumes the user is granted access to the buffer while
1089  * drm_gem_mmap() prevents unprivileged users from mapping random objects. So
1090  * callers must verify access restrictions before calling this helper.
1091  *
1092  * Return 0 or success or -EINVAL if the object size is smaller than the VMA
1093  * size, or if no gem_vm_ops are provided.
1094  */
1095 int drm_gem_mmap_obj(struct drm_gem_object *obj, unsigned long obj_size,
1096 		     struct vm_area_struct *vma)
1097 {
1098 	struct drm_device *dev = obj->dev;
1099 
1100 	/* Check for valid size. */
1101 	if (obj_size < vma->vm_end - vma->vm_start)
1102 		return -EINVAL;
1103 
1104 	if (obj->funcs && obj->funcs->vm_ops)
1105 		vma->vm_ops = obj->funcs->vm_ops;
1106 	else if (dev->driver->gem_vm_ops)
1107 		vma->vm_ops = dev->driver->gem_vm_ops;
1108 	else
1109 		return -EINVAL;
1110 
1111 	vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP;
1112 	vma->vm_private_data = obj;
1113 	vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
1114 	vma->vm_page_prot = pgprot_decrypted(vma->vm_page_prot);
1115 
1116 	/* Take a ref for this mapping of the object, so that the fault
1117 	 * handler can dereference the mmap offset's pointer to the object.
1118 	 * This reference is cleaned up by the corresponding vm_close
1119 	 * (which should happen whether the vma was created by this call, or
1120 	 * by a vm_open due to mremap or partial unmap or whatever).
1121 	 */
1122 	drm_gem_object_get(obj);
1123 
1124 	return 0;
1125 }
1126 EXPORT_SYMBOL(drm_gem_mmap_obj);
1127 
1128 /**
1129  * drm_gem_mmap - memory map routine for GEM objects
1130  * @filp: DRM file pointer
1131  * @vma: VMA for the area to be mapped
1132  *
1133  * If a driver supports GEM object mapping, mmap calls on the DRM file
1134  * descriptor will end up here.
1135  *
1136  * Look up the GEM object based on the offset passed in (vma->vm_pgoff will
1137  * contain the fake offset we created when the GTT map ioctl was called on
1138  * the object) and map it with a call to drm_gem_mmap_obj().
1139  *
1140  * If the caller is not granted access to the buffer object, the mmap will fail
1141  * with EACCES. Please see the vma manager for more information.
1142  */
1143 int drm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
1144 {
1145 	struct drm_file *priv = filp->private_data;
1146 	struct drm_device *dev = priv->minor->dev;
1147 	struct drm_gem_object *obj = NULL;
1148 	struct drm_vma_offset_node *node;
1149 	int ret;
1150 
1151 	if (drm_dev_is_unplugged(dev))
1152 		return -ENODEV;
1153 
1154 	drm_vma_offset_lock_lookup(dev->vma_offset_manager);
1155 	node = drm_vma_offset_exact_lookup_locked(dev->vma_offset_manager,
1156 						  vma->vm_pgoff,
1157 						  vma_pages(vma));
1158 	if (likely(node)) {
1159 		obj = container_of(node, struct drm_gem_object, vma_node);
1160 		/*
1161 		 * When the object is being freed, after it hits 0-refcnt it
1162 		 * proceeds to tear down the object. In the process it will
1163 		 * attempt to remove the VMA offset and so acquire this
1164 		 * mgr->vm_lock.  Therefore if we find an object with a 0-refcnt
1165 		 * that matches our range, we know it is in the process of being
1166 		 * destroyed and will be freed as soon as we release the lock -
1167 		 * so we have to check for the 0-refcnted object and treat it as
1168 		 * invalid.
1169 		 */
1170 		if (!kref_get_unless_zero(&obj->refcount))
1171 			obj = NULL;
1172 	}
1173 	drm_vma_offset_unlock_lookup(dev->vma_offset_manager);
1174 
1175 	if (!obj)
1176 		return -EINVAL;
1177 
1178 	if (!drm_vma_node_is_allowed(node, priv)) {
1179 		drm_gem_object_put_unlocked(obj);
1180 		return -EACCES;
1181 	}
1182 
1183 	if (node->readonly) {
1184 		if (vma->vm_flags & VM_WRITE) {
1185 			drm_gem_object_put_unlocked(obj);
1186 			return -EINVAL;
1187 		}
1188 
1189 		vma->vm_flags &= ~VM_MAYWRITE;
1190 	}
1191 
1192 	ret = drm_gem_mmap_obj(obj, drm_vma_node_size(node) << PAGE_SHIFT,
1193 			       vma);
1194 
1195 	drm_gem_object_put_unlocked(obj);
1196 
1197 	return ret;
1198 }
1199 EXPORT_SYMBOL(drm_gem_mmap);
1200 
1201 void drm_gem_print_info(struct drm_printer *p, unsigned int indent,
1202 			const struct drm_gem_object *obj)
1203 {
1204 	drm_printf_indent(p, indent, "name=%d\n", obj->name);
1205 	drm_printf_indent(p, indent, "refcount=%u\n",
1206 			  kref_read(&obj->refcount));
1207 	drm_printf_indent(p, indent, "start=%08lx\n",
1208 			  drm_vma_node_start(&obj->vma_node));
1209 	drm_printf_indent(p, indent, "size=%zu\n", obj->size);
1210 	drm_printf_indent(p, indent, "imported=%s\n",
1211 			  obj->import_attach ? "yes" : "no");
1212 
1213 	if (obj->funcs && obj->funcs->print_info)
1214 		obj->funcs->print_info(p, indent, obj);
1215 	else if (obj->dev->driver->gem_print_info)
1216 		obj->dev->driver->gem_print_info(p, indent, obj);
1217 }
1218 
1219 int drm_gem_pin(struct drm_gem_object *obj)
1220 {
1221 	if (obj->funcs && obj->funcs->pin)
1222 		return obj->funcs->pin(obj);
1223 	else if (obj->dev->driver->gem_prime_pin)
1224 		return obj->dev->driver->gem_prime_pin(obj);
1225 	else
1226 		return 0;
1227 }
1228 
1229 void drm_gem_unpin(struct drm_gem_object *obj)
1230 {
1231 	if (obj->funcs && obj->funcs->unpin)
1232 		obj->funcs->unpin(obj);
1233 	else if (obj->dev->driver->gem_prime_unpin)
1234 		obj->dev->driver->gem_prime_unpin(obj);
1235 }
1236 
1237 void *drm_gem_vmap(struct drm_gem_object *obj)
1238 {
1239 	void *vaddr;
1240 
1241 	if (obj->funcs && obj->funcs->vmap)
1242 		vaddr = obj->funcs->vmap(obj);
1243 	else if (obj->dev->driver->gem_prime_vmap)
1244 		vaddr = obj->dev->driver->gem_prime_vmap(obj);
1245 	else
1246 		vaddr = ERR_PTR(-EOPNOTSUPP);
1247 
1248 	if (!vaddr)
1249 		vaddr = ERR_PTR(-ENOMEM);
1250 
1251 	return vaddr;
1252 }
1253 
1254 void drm_gem_vunmap(struct drm_gem_object *obj, void *vaddr)
1255 {
1256 	if (!vaddr)
1257 		return;
1258 
1259 	if (obj->funcs && obj->funcs->vunmap)
1260 		obj->funcs->vunmap(obj, vaddr);
1261 	else if (obj->dev->driver->gem_prime_vunmap)
1262 		obj->dev->driver->gem_prime_vunmap(obj, vaddr);
1263 }
1264 
1265 /**
1266  * drm_gem_lock_reservations - Sets up the ww context and acquires
1267  * the lock on an array of GEM objects.
1268  *
1269  * Once you've locked your reservations, you'll want to set up space
1270  * for your shared fences (if applicable), submit your job, then
1271  * drm_gem_unlock_reservations().
1272  *
1273  * @objs: drm_gem_objects to lock
1274  * @count: Number of objects in @objs
1275  * @acquire_ctx: struct ww_acquire_ctx that will be initialized as
1276  * part of tracking this set of locked reservations.
1277  */
1278 int
1279 drm_gem_lock_reservations(struct drm_gem_object **objs, int count,
1280 			  struct ww_acquire_ctx *acquire_ctx)
1281 {
1282 	int contended = -1;
1283 	int i, ret;
1284 
1285 	ww_acquire_init(acquire_ctx, &reservation_ww_class);
1286 
1287 retry:
1288 	if (contended != -1) {
1289 		struct drm_gem_object *obj = objs[contended];
1290 
1291 		ret = ww_mutex_lock_slow_interruptible(&obj->resv->lock,
1292 						       acquire_ctx);
1293 		if (ret) {
1294 			ww_acquire_done(acquire_ctx);
1295 			return ret;
1296 		}
1297 	}
1298 
1299 	for (i = 0; i < count; i++) {
1300 		if (i == contended)
1301 			continue;
1302 
1303 		ret = ww_mutex_lock_interruptible(&objs[i]->resv->lock,
1304 						  acquire_ctx);
1305 		if (ret) {
1306 			int j;
1307 
1308 			for (j = 0; j < i; j++)
1309 				ww_mutex_unlock(&objs[j]->resv->lock);
1310 
1311 			if (contended != -1 && contended >= i)
1312 				ww_mutex_unlock(&objs[contended]->resv->lock);
1313 
1314 			if (ret == -EDEADLK) {
1315 				contended = i;
1316 				goto retry;
1317 			}
1318 
1319 			ww_acquire_done(acquire_ctx);
1320 			return ret;
1321 		}
1322 	}
1323 
1324 	ww_acquire_done(acquire_ctx);
1325 
1326 	return 0;
1327 }
1328 EXPORT_SYMBOL(drm_gem_lock_reservations);
1329 
1330 void
1331 drm_gem_unlock_reservations(struct drm_gem_object **objs, int count,
1332 			    struct ww_acquire_ctx *acquire_ctx)
1333 {
1334 	int i;
1335 
1336 	for (i = 0; i < count; i++)
1337 		ww_mutex_unlock(&objs[i]->resv->lock);
1338 
1339 	ww_acquire_fini(acquire_ctx);
1340 }
1341 EXPORT_SYMBOL(drm_gem_unlock_reservations);
1342 
1343 /**
1344  * drm_gem_fence_array_add - Adds the fence to an array of fences to be
1345  * waited on, deduplicating fences from the same context.
1346  *
1347  * @fence_array: array of dma_fence * for the job to block on.
1348  * @fence: the dma_fence to add to the list of dependencies.
1349  *
1350  * Returns:
1351  * 0 on success, or an error on failing to expand the array.
1352  */
1353 int drm_gem_fence_array_add(struct xarray *fence_array,
1354 			    struct dma_fence *fence)
1355 {
1356 	struct dma_fence *entry;
1357 	unsigned long index;
1358 	u32 id = 0;
1359 	int ret;
1360 
1361 	if (!fence)
1362 		return 0;
1363 
1364 	/* Deduplicate if we already depend on a fence from the same context.
1365 	 * This lets the size of the array of deps scale with the number of
1366 	 * engines involved, rather than the number of BOs.
1367 	 */
1368 	xa_for_each(fence_array, index, entry) {
1369 		if (entry->context != fence->context)
1370 			continue;
1371 
1372 		if (dma_fence_is_later(fence, entry)) {
1373 			dma_fence_put(entry);
1374 			xa_store(fence_array, index, fence, GFP_KERNEL);
1375 		} else {
1376 			dma_fence_put(fence);
1377 		}
1378 		return 0;
1379 	}
1380 
1381 	ret = xa_alloc(fence_array, &id, fence, xa_limit_32b, GFP_KERNEL);
1382 	if (ret != 0)
1383 		dma_fence_put(fence);
1384 
1385 	return ret;
1386 }
1387 EXPORT_SYMBOL(drm_gem_fence_array_add);
1388 
1389 /**
1390  * drm_gem_fence_array_add_implicit - Adds the implicit dependencies tracked
1391  * in the GEM object's reservation object to an array of dma_fences for use in
1392  * scheduling a rendering job.
1393  *
1394  * This should be called after drm_gem_lock_reservations() on your array of
1395  * GEM objects used in the job but before updating the reservations with your
1396  * own fences.
1397  *
1398  * @fence_array: array of dma_fence * for the job to block on.
1399  * @obj: the gem object to add new dependencies from.
1400  * @write: whether the job might write the object (so we need to depend on
1401  * shared fences in the reservation object).
1402  */
1403 int drm_gem_fence_array_add_implicit(struct xarray *fence_array,
1404 				     struct drm_gem_object *obj,
1405 				     bool write)
1406 {
1407 	int ret;
1408 	struct dma_fence **fences;
1409 	unsigned int i, fence_count;
1410 
1411 	if (!write) {
1412 		struct dma_fence *fence =
1413 			reservation_object_get_excl_rcu(obj->resv);
1414 
1415 		return drm_gem_fence_array_add(fence_array, fence);
1416 	}
1417 
1418 	ret = reservation_object_get_fences_rcu(obj->resv, NULL,
1419 						&fence_count, &fences);
1420 	if (ret || !fence_count)
1421 		return ret;
1422 
1423 	for (i = 0; i < fence_count; i++) {
1424 		ret = drm_gem_fence_array_add(fence_array, fences[i]);
1425 		if (ret)
1426 			break;
1427 	}
1428 
1429 	for (; i < fence_count; i++)
1430 		dma_fence_put(fences[i]);
1431 	kfree(fences);
1432 	return ret;
1433 }
1434 EXPORT_SYMBOL(drm_gem_fence_array_add_implicit);
1435