xref: /linux/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c (revision b6c0783ff278671e38fed978fefb732101ac8836)
1 /*
2  * Copyright 2008 Advanced Micro Devices, Inc.
3  * Copyright 2008 Red Hat Inc.
4  * Copyright 2009 Jerome Glisse.
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the "Software"),
8  * to deal in the Software without restriction, including without limitation
9  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10  * and/or sell copies of the Software, and to permit persons to whom the
11  * Software is furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in
14  * all copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
19  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22  * OTHER DEALINGS IN THE SOFTWARE.
23  *
24  * Authors: Dave Airlie
25  *          Alex Deucher
26  *          Jerome Glisse
27  */
28 #include <linux/ktime.h>
29 #include <linux/module.h>
30 #include <linux/pagemap.h>
31 #include <linux/pci.h>
32 #include <linux/dma-buf.h>
33 
34 #include <drm/amdgpu_drm.h>
35 #include <drm/drm_drv.h>
36 #include <drm/drm_exec.h>
37 #include <drm/drm_gem_ttm_helper.h>
38 #include <drm/ttm/ttm_tt.h>
39 #include <drm/drm_syncobj.h>
40 
41 #include "amdgpu.h"
42 #include "amdgpu_display.h"
43 #include "amdgpu_dma_buf.h"
44 #include "amdgpu_hmm.h"
45 #include "amdgpu_xgmi.h"
46 #include "amdgpu_vm.h"
47 
48 static int
49 amdgpu_gem_add_input_fence(struct drm_file *filp,
50 			   uint64_t syncobj_handles_array,
51 			   uint32_t num_syncobj_handles)
52 {
53 	struct dma_fence *fence;
54 	uint32_t *syncobj_handles;
55 	int ret, i;
56 
57 	if (!num_syncobj_handles)
58 		return 0;
59 
60 	syncobj_handles = memdup_user(u64_to_user_ptr(syncobj_handles_array),
61 				      size_mul(sizeof(uint32_t), num_syncobj_handles));
62 	if (IS_ERR(syncobj_handles))
63 		return PTR_ERR(syncobj_handles);
64 
65 	for (i = 0; i < num_syncobj_handles; i++) {
66 
67 		if (!syncobj_handles[i]) {
68 			ret = -EINVAL;
69 			goto free_memdup;
70 		}
71 
72 		ret = drm_syncobj_find_fence(filp, syncobj_handles[i], 0, 0, &fence);
73 		if (ret)
74 			goto free_memdup;
75 
76 		dma_fence_wait(fence, false);
77 
78 		/* TODO: optimize async handling */
79 		dma_fence_put(fence);
80 	}
81 
82 free_memdup:
83 	kfree(syncobj_handles);
84 	return ret;
85 }
86 
87 static int
88 amdgpu_gem_update_timeline_node(struct drm_file *filp,
89 				uint32_t syncobj_handle,
90 				uint64_t point,
91 				struct drm_syncobj **syncobj,
92 				struct dma_fence_chain **chain)
93 {
94 	if (!syncobj_handle)
95 		return 0;
96 
97 	/* Find the sync object */
98 	*syncobj = drm_syncobj_find(filp, syncobj_handle);
99 	if (!*syncobj)
100 		return -ENOENT;
101 
102 	if (!point)
103 		return 0;
104 
105 	/* Allocate the chain node */
106 	*chain = dma_fence_chain_alloc();
107 	if (!*chain) {
108 		drm_syncobj_put(*syncobj);
109 		return -ENOMEM;
110 	}
111 
112 	return 0;
113 }
114 
115 static vm_fault_t amdgpu_gem_fault(struct vm_fault *vmf)
116 {
117 	struct ttm_buffer_object *bo = vmf->vma->vm_private_data;
118 	struct drm_device *ddev = bo->base.dev;
119 	vm_fault_t ret;
120 	int idx;
121 
122 	ret = ttm_bo_vm_reserve(bo, vmf);
123 	if (ret)
124 		return ret;
125 
126 	if (drm_dev_enter(ddev, &idx)) {
127 		ret = amdgpu_bo_fault_reserve_notify(bo);
128 		if (ret) {
129 			drm_dev_exit(idx);
130 			goto unlock;
131 		}
132 
133 		ret = ttm_bo_vm_fault_reserved(vmf, vmf->vma->vm_page_prot,
134 					       TTM_BO_VM_NUM_PREFAULT);
135 
136 		drm_dev_exit(idx);
137 	} else {
138 		ret = ttm_bo_vm_dummy_page(vmf, vmf->vma->vm_page_prot);
139 	}
140 	if (ret == VM_FAULT_RETRY && !(vmf->flags & FAULT_FLAG_RETRY_NOWAIT))
141 		return ret;
142 
143 unlock:
144 	dma_resv_unlock(bo->base.resv);
145 	return ret;
146 }
147 
148 static const struct vm_operations_struct amdgpu_gem_vm_ops = {
149 	.fault = amdgpu_gem_fault,
150 	.open = ttm_bo_vm_open,
151 	.close = ttm_bo_vm_close,
152 	.access = ttm_bo_vm_access
153 };
154 
155 static void amdgpu_gem_object_free(struct drm_gem_object *gobj)
156 {
157 	struct amdgpu_bo *aobj = gem_to_amdgpu_bo(gobj);
158 
159 	amdgpu_hmm_unregister(aobj);
160 	ttm_bo_fini(&aobj->tbo);
161 }
162 
163 int amdgpu_gem_object_create(struct amdgpu_device *adev, unsigned long size,
164 			     int alignment, u32 initial_domain,
165 			     u64 flags, enum ttm_bo_type type,
166 			     struct dma_resv *resv,
167 			     struct drm_gem_object **obj, int8_t xcp_id_plus1)
168 {
169 	struct amdgpu_bo *bo;
170 	struct amdgpu_bo_user *ubo;
171 	struct amdgpu_bo_param bp;
172 	int r;
173 
174 	memset(&bp, 0, sizeof(bp));
175 	*obj = NULL;
176 	flags |= AMDGPU_GEM_CREATE_VRAM_WIPE_ON_RELEASE;
177 
178 	bp.size = size;
179 	bp.byte_align = alignment;
180 	bp.type = type;
181 	bp.resv = resv;
182 	bp.preferred_domain = initial_domain;
183 	bp.flags = flags;
184 	bp.domain = initial_domain;
185 	bp.bo_ptr_size = sizeof(struct amdgpu_bo);
186 	bp.xcp_id_plus1 = xcp_id_plus1;
187 
188 	r = amdgpu_bo_create_user(adev, &bp, &ubo);
189 	if (r)
190 		return r;
191 
192 	bo = &ubo->bo;
193 	*obj = &bo->tbo.base;
194 
195 	return 0;
196 }
197 
198 void amdgpu_gem_force_release(struct amdgpu_device *adev)
199 {
200 	struct drm_device *ddev = adev_to_drm(adev);
201 	struct drm_file *file;
202 
203 	mutex_lock(&ddev->filelist_mutex);
204 
205 	list_for_each_entry(file, &ddev->filelist, lhead) {
206 		struct drm_gem_object *gobj;
207 		int handle;
208 
209 		WARN_ONCE(1, "Still active user space clients!\n");
210 		spin_lock(&file->table_lock);
211 		idr_for_each_entry(&file->object_idr, gobj, handle) {
212 			WARN_ONCE(1, "And also active allocations!\n");
213 			drm_gem_object_put(gobj);
214 		}
215 		idr_destroy(&file->object_idr);
216 		spin_unlock(&file->table_lock);
217 	}
218 
219 	mutex_unlock(&ddev->filelist_mutex);
220 }
221 
222 /*
223  * Call from drm_gem_handle_create which appear in both new and open ioctl
224  * case.
225  */
226 static int amdgpu_gem_object_open(struct drm_gem_object *obj,
227 				  struct drm_file *file_priv)
228 {
229 	struct amdgpu_bo *abo = gem_to_amdgpu_bo(obj);
230 	struct amdgpu_device *adev = amdgpu_ttm_adev(abo->tbo.bdev);
231 	struct amdgpu_fpriv *fpriv = file_priv->driver_priv;
232 	struct amdgpu_vm *vm = &fpriv->vm;
233 	struct amdgpu_bo_va *bo_va;
234 	struct mm_struct *mm;
235 	struct drm_exec exec;
236 	int r;
237 
238 	mm = amdgpu_ttm_tt_get_usermm(abo->tbo.ttm);
239 	if (mm && mm != current->mm)
240 		return -EPERM;
241 
242 	if (abo->flags & AMDGPU_GEM_CREATE_VM_ALWAYS_VALID &&
243 	    !amdgpu_vm_is_bo_always_valid(vm, abo))
244 		return -EPERM;
245 
246 	drm_exec_init(&exec, DRM_EXEC_IGNORE_DUPLICATES, 0);
247 	drm_exec_until_all_locked(&exec) {
248 		r = drm_exec_prepare_obj(&exec, &abo->tbo.base, 1);
249 		drm_exec_retry_on_contention(&exec);
250 		if (unlikely(r))
251 			goto out_unlock;
252 
253 		r = amdgpu_vm_lock_pd(vm, &exec, 0);
254 		drm_exec_retry_on_contention(&exec);
255 		if (unlikely(r))
256 			goto out_unlock;
257 	}
258 
259 	amdgpu_vm_bo_update_shared(abo);
260 	bo_va = amdgpu_vm_bo_find(vm, abo);
261 	if (!bo_va) {
262 		bo_va = amdgpu_vm_bo_add(adev, vm, abo);
263 		r = amdgpu_evf_mgr_attach_fence(&fpriv->evf_mgr, abo);
264 		if (r)
265 			goto out_unlock;
266 	} else {
267 		++bo_va->ref_count;
268 	}
269 
270 	drm_exec_fini(&exec);
271 
272 	/* Validate and add eviction fence to DMABuf imports with dynamic
273 	 * attachment in compute VMs. Re-validation will be done by
274 	 * amdgpu_vm_validate. Fences are on the reservation shared with the
275 	 * export, which is currently required to be validated and fenced
276 	 * already by amdgpu_amdkfd_gpuvm_restore_process_bos.
277 	 *
278 	 * Nested locking below for the case that a GEM object is opened in
279 	 * kfd_mem_export_dmabuf. Since the lock below is only taken for imports,
280 	 * but not for export, this is a different lock class that cannot lead to
281 	 * circular lock dependencies.
282 	 */
283 	if (!vm->is_compute_context || !vm->process_info)
284 		return 0;
285 	if (!drm_gem_is_imported(obj) ||
286 	    !dma_buf_is_dynamic(obj->import_attach->dmabuf))
287 		return 0;
288 	mutex_lock_nested(&vm->process_info->lock, 1);
289 	if (!WARN_ON(!vm->process_info->eviction_fence)) {
290 		r = amdgpu_amdkfd_bo_validate_and_fence(abo, AMDGPU_GEM_DOMAIN_GTT,
291 							&vm->process_info->eviction_fence->base);
292 		if (r) {
293 			struct amdgpu_task_info *ti = amdgpu_vm_get_task_info_vm(vm);
294 
295 			dev_warn(adev->dev, "validate_and_fence failed: %d\n", r);
296 			if (ti) {
297 				dev_warn(adev->dev, "pid %d\n", ti->task.pid);
298 				amdgpu_vm_put_task_info(ti);
299 			}
300 		}
301 	}
302 	mutex_unlock(&vm->process_info->lock);
303 	return r;
304 
305 out_unlock:
306 	drm_exec_fini(&exec);
307 	return r;
308 }
309 
310 static void amdgpu_gem_object_close(struct drm_gem_object *obj,
311 				    struct drm_file *file_priv)
312 {
313 	struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
314 	struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
315 	struct amdgpu_fpriv *fpriv = file_priv->driver_priv;
316 	struct amdgpu_vm *vm = &fpriv->vm;
317 
318 	struct dma_fence *fence = NULL;
319 	struct amdgpu_bo_va *bo_va;
320 	struct drm_exec exec;
321 	long r;
322 
323 	drm_exec_init(&exec, DRM_EXEC_IGNORE_DUPLICATES, 0);
324 	drm_exec_until_all_locked(&exec) {
325 		r = drm_exec_prepare_obj(&exec, &bo->tbo.base, 1);
326 		drm_exec_retry_on_contention(&exec);
327 		if (unlikely(r))
328 			goto out_unlock;
329 
330 		r = amdgpu_vm_lock_pd(vm, &exec, 0);
331 		drm_exec_retry_on_contention(&exec);
332 		if (unlikely(r))
333 			goto out_unlock;
334 	}
335 
336 	if (!amdgpu_vm_is_bo_always_valid(vm, bo))
337 		amdgpu_evf_mgr_detach_fence(&fpriv->evf_mgr, bo);
338 
339 	bo_va = amdgpu_vm_bo_find(vm, bo);
340 	if (!bo_va || --bo_va->ref_count)
341 		goto out_unlock;
342 
343 	amdgpu_vm_bo_del(adev, bo_va);
344 	amdgpu_vm_bo_update_shared(bo);
345 	if (!amdgpu_vm_ready(vm))
346 		goto out_unlock;
347 
348 	r = amdgpu_vm_clear_freed(adev, vm, &fence);
349 	if (unlikely(r < 0) && !drm_dev_is_unplugged(adev_to_drm(adev)))
350 		dev_err(adev->dev, "failed to clear page "
351 			"tables on GEM object close (%ld)\n", r);
352 	if (r || !fence)
353 		goto out_unlock;
354 
355 	amdgpu_bo_fence(bo, fence, true);
356 	dma_fence_put(fence);
357 
358 out_unlock:
359 	if (r && !drm_dev_is_unplugged(adev_to_drm(adev)))
360 		dev_err(adev->dev, "leaking bo va (%ld)\n", r);
361 	drm_exec_fini(&exec);
362 }
363 
364 static int amdgpu_gem_object_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma)
365 {
366 	struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
367 
368 	if (amdgpu_ttm_tt_get_usermm(bo->tbo.ttm))
369 		return -EPERM;
370 	if (bo->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS)
371 		return -EPERM;
372 
373 	/* Workaround for Thunk bug creating PROT_NONE,MAP_PRIVATE mappings
374 	 * for debugger access to invisible VRAM. Should have used MAP_SHARED
375 	 * instead. Clearing VM_MAYWRITE prevents the mapping from ever
376 	 * becoming writable and makes is_cow_mapping(vm_flags) false.
377 	 */
378 	if (is_cow_mapping(vma->vm_flags) &&
379 	    !(vma->vm_flags & VM_ACCESS_FLAGS))
380 		vm_flags_clear(vma, VM_MAYWRITE);
381 
382 	return drm_gem_ttm_mmap(obj, vma);
383 }
384 
385 const struct drm_gem_object_funcs amdgpu_gem_object_funcs = {
386 	.free = amdgpu_gem_object_free,
387 	.open = amdgpu_gem_object_open,
388 	.close = amdgpu_gem_object_close,
389 	.export = amdgpu_gem_prime_export,
390 	.vmap = drm_gem_ttm_vmap,
391 	.vunmap = drm_gem_ttm_vunmap,
392 	.mmap = amdgpu_gem_object_mmap,
393 	.vm_ops = &amdgpu_gem_vm_ops,
394 };
395 
396 /*
397  * GEM ioctls.
398  */
399 int amdgpu_gem_create_ioctl(struct drm_device *dev, void *data,
400 			    struct drm_file *filp)
401 {
402 	struct amdgpu_device *adev = drm_to_adev(dev);
403 	struct amdgpu_fpriv *fpriv = filp->driver_priv;
404 	struct amdgpu_vm *vm = &fpriv->vm;
405 	union drm_amdgpu_gem_create *args = data;
406 	uint64_t flags = args->in.domain_flags;
407 	uint64_t size = args->in.bo_size;
408 	struct dma_resv *resv = NULL;
409 	struct drm_gem_object *gobj;
410 	uint32_t handle, initial_domain;
411 	int r;
412 
413 	/* reject invalid gem flags */
414 	if (flags & ~AMDGPU_GEM_CREATE_SETTABLE_MASK)
415 		return -EINVAL;
416 
417 	/* reject invalid gem domains */
418 	if (args->in.domains & ~AMDGPU_GEM_DOMAIN_MASK)
419 		return -EINVAL;
420 
421 	if (!amdgpu_is_tmz(adev) && (flags & AMDGPU_GEM_CREATE_ENCRYPTED)) {
422 		DRM_NOTE_ONCE("Cannot allocate secure buffer since TMZ is disabled\n");
423 		return -EINVAL;
424 	}
425 
426 	/* always clear VRAM */
427 	flags |= AMDGPU_GEM_CREATE_VRAM_CLEARED;
428 
429 	/* create a gem object to contain this object in */
430 	if (args->in.domains & (AMDGPU_GEM_DOMAIN_GDS |
431 	    AMDGPU_GEM_DOMAIN_GWS | AMDGPU_GEM_DOMAIN_OA)) {
432 		if (flags & AMDGPU_GEM_CREATE_VM_ALWAYS_VALID) {
433 			/* if gds bo is created from user space, it must be
434 			 * passed to bo list
435 			 */
436 			DRM_ERROR("GDS bo cannot be per-vm-bo\n");
437 			return -EINVAL;
438 		}
439 		flags |= AMDGPU_GEM_CREATE_NO_CPU_ACCESS;
440 	}
441 
442 	if (flags & AMDGPU_GEM_CREATE_VM_ALWAYS_VALID) {
443 		r = amdgpu_bo_reserve(vm->root.bo, false);
444 		if (r)
445 			return r;
446 
447 		resv = vm->root.bo->tbo.base.resv;
448 	}
449 
450 	initial_domain = (u32)(0xffffffff & args->in.domains);
451 retry:
452 	r = amdgpu_gem_object_create(adev, size, args->in.alignment,
453 				     initial_domain,
454 				     flags, ttm_bo_type_device, resv, &gobj, fpriv->xcp_id + 1);
455 	if (r && r != -ERESTARTSYS) {
456 		if (flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED) {
457 			flags &= ~AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
458 			goto retry;
459 		}
460 
461 		if (initial_domain == AMDGPU_GEM_DOMAIN_VRAM) {
462 			initial_domain |= AMDGPU_GEM_DOMAIN_GTT;
463 			goto retry;
464 		}
465 		DRM_DEBUG("Failed to allocate GEM object (%llu, %d, %llu, %d)\n",
466 				size, initial_domain, args->in.alignment, r);
467 	}
468 
469 	if (flags & AMDGPU_GEM_CREATE_VM_ALWAYS_VALID) {
470 		if (!r) {
471 			struct amdgpu_bo *abo = gem_to_amdgpu_bo(gobj);
472 
473 			abo->parent = amdgpu_bo_ref(vm->root.bo);
474 		}
475 		amdgpu_bo_unreserve(vm->root.bo);
476 	}
477 	if (r)
478 		return r;
479 
480 	r = drm_gem_handle_create(filp, gobj, &handle);
481 	/* drop reference from allocate - handle holds it now */
482 	drm_gem_object_put(gobj);
483 	if (r)
484 		return r;
485 
486 	memset(args, 0, sizeof(*args));
487 	args->out.handle = handle;
488 	return 0;
489 }
490 
491 int amdgpu_gem_userptr_ioctl(struct drm_device *dev, void *data,
492 			     struct drm_file *filp)
493 {
494 	struct ttm_operation_ctx ctx = { true, false };
495 	struct amdgpu_device *adev = drm_to_adev(dev);
496 	struct drm_amdgpu_gem_userptr *args = data;
497 	struct amdgpu_fpriv *fpriv = filp->driver_priv;
498 	struct drm_gem_object *gobj;
499 	struct amdgpu_hmm_range *range;
500 	struct amdgpu_bo *bo;
501 	uint32_t handle;
502 	int r;
503 
504 	args->addr = untagged_addr(args->addr);
505 
506 	if (offset_in_page(args->addr | args->size))
507 		return -EINVAL;
508 
509 	/* reject unknown flag values */
510 	if (args->flags & ~(AMDGPU_GEM_USERPTR_READONLY |
511 	    AMDGPU_GEM_USERPTR_ANONONLY | AMDGPU_GEM_USERPTR_VALIDATE |
512 	    AMDGPU_GEM_USERPTR_REGISTER))
513 		return -EINVAL;
514 
515 	if (!(args->flags & AMDGPU_GEM_USERPTR_READONLY) &&
516 	     !(args->flags & AMDGPU_GEM_USERPTR_REGISTER)) {
517 
518 		/* if we want to write to it we must install a MMU notifier */
519 		return -EACCES;
520 	}
521 
522 	/* create a gem object to contain this object in */
523 	r = amdgpu_gem_object_create(adev, args->size, 0, AMDGPU_GEM_DOMAIN_CPU,
524 				     0, ttm_bo_type_device, NULL, &gobj, fpriv->xcp_id + 1);
525 	if (r)
526 		return r;
527 
528 	bo = gem_to_amdgpu_bo(gobj);
529 	bo->preferred_domains = AMDGPU_GEM_DOMAIN_GTT;
530 	bo->allowed_domains = AMDGPU_GEM_DOMAIN_GTT;
531 	r = amdgpu_ttm_tt_set_userptr(&bo->tbo, args->addr, args->flags);
532 	if (r)
533 		goto release_object;
534 
535 	r = amdgpu_hmm_register(bo, args->addr);
536 	if (r)
537 		goto release_object;
538 
539 	if (args->flags & AMDGPU_GEM_USERPTR_VALIDATE) {
540 		range = amdgpu_hmm_range_alloc(NULL);
541 		if (unlikely(!range))
542 			return -ENOMEM;
543 		r = amdgpu_ttm_tt_get_user_pages(bo, range);
544 		if (r) {
545 			amdgpu_hmm_range_free(range);
546 			goto release_object;
547 		}
548 		r = amdgpu_bo_reserve(bo, true);
549 		if (r)
550 			goto user_pages_done;
551 
552 		amdgpu_ttm_tt_set_user_pages(bo->tbo.ttm, range);
553 
554 		amdgpu_bo_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_GTT);
555 		r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
556 		amdgpu_bo_unreserve(bo);
557 		if (r)
558 			goto user_pages_done;
559 	}
560 
561 	r = drm_gem_handle_create(filp, gobj, &handle);
562 	if (r)
563 		goto user_pages_done;
564 
565 	args->handle = handle;
566 
567 user_pages_done:
568 	if (args->flags & AMDGPU_GEM_USERPTR_VALIDATE)
569 		amdgpu_hmm_range_free(range);
570 release_object:
571 	drm_gem_object_put(gobj);
572 
573 	return r;
574 }
575 
576 int amdgpu_mode_dumb_mmap(struct drm_file *filp,
577 			  struct drm_device *dev,
578 			  uint32_t handle, uint64_t *offset_p)
579 {
580 	struct drm_gem_object *gobj;
581 	struct amdgpu_bo *robj;
582 
583 	gobj = drm_gem_object_lookup(filp, handle);
584 	if (!gobj)
585 		return -ENOENT;
586 
587 	robj = gem_to_amdgpu_bo(gobj);
588 	if (amdgpu_ttm_tt_get_usermm(robj->tbo.ttm) ||
589 	    (robj->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS)) {
590 		drm_gem_object_put(gobj);
591 		return -EPERM;
592 	}
593 	*offset_p = amdgpu_bo_mmap_offset(robj);
594 	drm_gem_object_put(gobj);
595 	return 0;
596 }
597 
598 int amdgpu_gem_mmap_ioctl(struct drm_device *dev, void *data,
599 			  struct drm_file *filp)
600 {
601 	union drm_amdgpu_gem_mmap *args = data;
602 	uint32_t handle = args->in.handle;
603 
604 	memset(args, 0, sizeof(*args));
605 	return amdgpu_mode_dumb_mmap(filp, dev, handle, &args->out.addr_ptr);
606 }
607 
608 /**
609  * amdgpu_gem_timeout - calculate jiffies timeout from absolute value
610  *
611  * @timeout_ns: timeout in ns
612  *
613  * Calculate the timeout in jiffies from an absolute timeout in ns.
614  */
615 unsigned long amdgpu_gem_timeout(uint64_t timeout_ns)
616 {
617 	unsigned long timeout_jiffies;
618 	ktime_t timeout;
619 
620 	/* clamp timeout if it's to large */
621 	if (((int64_t)timeout_ns) < 0)
622 		return MAX_SCHEDULE_TIMEOUT;
623 
624 	timeout = ktime_sub(ns_to_ktime(timeout_ns), ktime_get());
625 	if (ktime_to_ns(timeout) < 0)
626 		return 0;
627 
628 	timeout_jiffies = nsecs_to_jiffies(ktime_to_ns(timeout));
629 	/*  clamp timeout to avoid unsigned-> signed overflow */
630 	if (timeout_jiffies > MAX_SCHEDULE_TIMEOUT)
631 		return MAX_SCHEDULE_TIMEOUT - 1;
632 
633 	return timeout_jiffies;
634 }
635 
636 int amdgpu_gem_wait_idle_ioctl(struct drm_device *dev, void *data,
637 			      struct drm_file *filp)
638 {
639 	union drm_amdgpu_gem_wait_idle *args = data;
640 	struct drm_gem_object *gobj;
641 	struct amdgpu_bo *robj;
642 	uint32_t handle = args->in.handle;
643 	unsigned long timeout = amdgpu_gem_timeout(args->in.timeout);
644 	int r = 0;
645 	long ret;
646 
647 	gobj = drm_gem_object_lookup(filp, handle);
648 	if (!gobj)
649 		return -ENOENT;
650 
651 	robj = gem_to_amdgpu_bo(gobj);
652 	ret = dma_resv_wait_timeout(robj->tbo.base.resv, DMA_RESV_USAGE_READ,
653 				    true, timeout);
654 
655 	/* ret == 0 means not signaled,
656 	 * ret > 0 means signaled
657 	 * ret < 0 means interrupted before timeout
658 	 */
659 	if (ret >= 0) {
660 		memset(args, 0, sizeof(*args));
661 		args->out.status = (ret == 0);
662 	} else
663 		r = ret;
664 
665 	drm_gem_object_put(gobj);
666 	return r;
667 }
668 
669 int amdgpu_gem_metadata_ioctl(struct drm_device *dev, void *data,
670 				struct drm_file *filp)
671 {
672 	struct drm_amdgpu_gem_metadata *args = data;
673 	struct drm_gem_object *gobj;
674 	struct amdgpu_bo *robj;
675 	int r = -1;
676 
677 	DRM_DEBUG("%d\n", args->handle);
678 	gobj = drm_gem_object_lookup(filp, args->handle);
679 	if (gobj == NULL)
680 		return -ENOENT;
681 	robj = gem_to_amdgpu_bo(gobj);
682 
683 	r = amdgpu_bo_reserve(robj, false);
684 	if (unlikely(r != 0))
685 		goto out;
686 
687 	/* Reject MMIO_REMAP BOs at IOCTL level: metadata/tiling does not apply. */
688 	if (robj->tbo.resource &&
689 	    robj->tbo.resource->mem_type == AMDGPU_PL_MMIO_REMAP) {
690 		DRM_WARN("metadata ioctl on MMIO_REMAP BO (handle %d)\n",
691 			 args->handle);
692 		r = -EINVAL;
693 		goto unreserve;
694 	}
695 
696 	if (args->op == AMDGPU_GEM_METADATA_OP_GET_METADATA) {
697 		amdgpu_bo_get_tiling_flags(robj, &args->data.tiling_info);
698 		r = amdgpu_bo_get_metadata(robj, args->data.data,
699 					   sizeof(args->data.data),
700 					   &args->data.data_size_bytes,
701 					   &args->data.flags);
702 	} else if (args->op == AMDGPU_GEM_METADATA_OP_SET_METADATA) {
703 		if (args->data.data_size_bytes > sizeof(args->data.data)) {
704 			r = -EINVAL;
705 			goto unreserve;
706 		}
707 		r = amdgpu_bo_set_tiling_flags(robj, args->data.tiling_info);
708 		if (!r)
709 			r = amdgpu_bo_set_metadata(robj, args->data.data,
710 						   args->data.data_size_bytes,
711 						   args->data.flags);
712 	}
713 
714 unreserve:
715 	amdgpu_bo_unreserve(robj);
716 out:
717 	drm_gem_object_put(gobj);
718 	return r;
719 }
720 
721 /**
722  * amdgpu_gem_va_update_vm -update the bo_va in its VM
723  *
724  * @adev: amdgpu_device pointer
725  * @vm: vm to update
726  * @bo_va: bo_va to update
727  * @operation: map, unmap or clear
728  *
729  * Update the bo_va directly after setting its address. Errors are not
730  * vital here, so they are not reported back to userspace.
731  *
732  * Returns resulting fence if freed BO(s) got cleared from the PT.
733  * otherwise stub fence in case of error.
734  */
735 static struct dma_fence *
736 amdgpu_gem_va_update_vm(struct amdgpu_device *adev,
737 			struct amdgpu_vm *vm,
738 			struct amdgpu_bo_va *bo_va,
739 			uint32_t operation)
740 {
741 	struct dma_fence *fence;
742 	int r = 0;
743 
744 	/* Always start from the VM's existing last update fence. */
745 	fence = dma_fence_get(vm->last_update);
746 
747 	if (!amdgpu_vm_ready(vm))
748 		return fence;
749 
750 	/*
751 	 * First clean up any freed mappings in the VM.
752 	 *
753 	 * amdgpu_vm_clear_freed() may replace @fence with a new fence if it
754 	 * schedules GPU work. If nothing needs clearing, @fence can remain as
755 	 * the original vm->last_update.
756 	 */
757 	r = amdgpu_vm_clear_freed(adev, vm, &fence);
758 	if (r)
759 		goto error;
760 
761 	/* For MAP/REPLACE we also need to update the BO mappings. */
762 	if (operation == AMDGPU_VA_OP_MAP ||
763 	    operation == AMDGPU_VA_OP_REPLACE) {
764 		r = amdgpu_vm_bo_update(adev, bo_va, false);
765 		if (r)
766 			goto error;
767 	}
768 
769 	/* Always update PDEs after we touched the mappings. */
770 	r = amdgpu_vm_update_pdes(adev, vm, false);
771 	if (r)
772 		goto error;
773 
774 	/*
775 	 * Decide which fence best represents the last update:
776 	 *
777 	 * MAP/REPLACE:
778 	 *   - For always-valid mappings, use vm->last_update.
779 	 *   - Otherwise, export bo_va->last_pt_update.
780 	 *
781 	 * UNMAP/CLEAR:
782 	 *   Keep the fence returned by amdgpu_vm_clear_freed(). If no work was
783 	 *   needed, it can remain as vm->last_pt_update.
784 	 *
785 	 * The VM and BO update fences are always initialized to a valid value.
786 	 * vm->last_update and bo_va->last_pt_update always start as valid fences.
787 	 * and are never expected to be NULL.
788 	 */
789 	switch (operation) {
790 	case AMDGPU_VA_OP_MAP:
791 	case AMDGPU_VA_OP_REPLACE:
792 		/*
793 		 * For MAP/REPLACE, return the page table update fence for the
794 		 * mapping we just modified. bo_va is expected to be valid here.
795 		 */
796 		dma_fence_put(fence);
797 
798 		if (amdgpu_vm_is_bo_always_valid(vm, bo_va->base.bo))
799 			fence = dma_fence_get(vm->last_update);
800 		else
801 			fence = dma_fence_get(bo_va->last_pt_update);
802 		break;
803 	case AMDGPU_VA_OP_UNMAP:
804 	case AMDGPU_VA_OP_CLEAR:
805 	default:
806 		/* keep @fence as returned by amdgpu_vm_clear_freed() */
807 		break;
808 	}
809 
810 error:
811 	if (r && r != -ERESTARTSYS)
812 		DRM_ERROR("Couldn't update BO_VA (%d)\n", r);
813 
814 	return fence;
815 }
816 
817 int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
818 			  struct drm_file *filp)
819 {
820 	const uint32_t valid_flags = AMDGPU_VM_DELAY_UPDATE |
821 		AMDGPU_VM_PAGE_READABLE | AMDGPU_VM_PAGE_WRITEABLE |
822 		AMDGPU_VM_PAGE_EXECUTABLE | AMDGPU_VM_MTYPE_MASK |
823 		AMDGPU_VM_PAGE_NOALLOC;
824 	const uint32_t prt_flags = AMDGPU_VM_DELAY_UPDATE |
825 		AMDGPU_VM_PAGE_PRT;
826 
827 	struct drm_amdgpu_gem_va *args = data;
828 	struct drm_gem_object *gobj;
829 	struct amdgpu_device *adev = drm_to_adev(dev);
830 	struct amdgpu_fpriv *fpriv = filp->driver_priv;
831 	struct amdgpu_bo *abo;
832 	struct amdgpu_bo_va *bo_va;
833 	struct drm_syncobj *timeline_syncobj = NULL;
834 	struct dma_fence_chain *timeline_chain = NULL;
835 	struct dma_fence *fence;
836 	struct drm_exec exec;
837 	uint64_t vm_size;
838 	int r = 0;
839 
840 	/* Validate virtual address range against reserved regions. */
841 	if (args->va_address < AMDGPU_VA_RESERVED_BOTTOM) {
842 		dev_dbg(dev->dev,
843 			"va_address 0x%llx is in reserved area 0x%llx\n",
844 			args->va_address, AMDGPU_VA_RESERVED_BOTTOM);
845 		return -EINVAL;
846 	}
847 
848 	if (args->va_address >= AMDGPU_GMC_HOLE_START &&
849 	    args->va_address < AMDGPU_GMC_HOLE_END) {
850 		dev_dbg(dev->dev,
851 			"va_address 0x%llx is in VA hole 0x%llx-0x%llx\n",
852 			args->va_address, AMDGPU_GMC_HOLE_START,
853 			AMDGPU_GMC_HOLE_END);
854 		return -EINVAL;
855 	}
856 
857 	args->va_address &= AMDGPU_GMC_HOLE_MASK;
858 
859 	vm_size = adev->vm_manager.max_pfn * AMDGPU_GPU_PAGE_SIZE;
860 	vm_size -= AMDGPU_VA_RESERVED_TOP;
861 	if (args->va_address + args->map_size > vm_size) {
862 		dev_dbg(dev->dev,
863 			"va_address 0x%llx is in top reserved area 0x%llx\n",
864 			args->va_address + args->map_size, vm_size);
865 		return -EINVAL;
866 	}
867 
868 	if ((args->flags & ~valid_flags) && (args->flags & ~prt_flags)) {
869 		dev_dbg(dev->dev, "invalid flags combination 0x%08X\n",
870 			args->flags);
871 		return -EINVAL;
872 	}
873 
874 	/* Validate operation type. */
875 	switch (args->operation) {
876 	case AMDGPU_VA_OP_MAP:
877 	case AMDGPU_VA_OP_UNMAP:
878 	case AMDGPU_VA_OP_CLEAR:
879 	case AMDGPU_VA_OP_REPLACE:
880 		break;
881 	default:
882 		dev_dbg(dev->dev, "unsupported operation %d\n",
883 			args->operation);
884 		return -EINVAL;
885 	}
886 
887 	if ((args->operation != AMDGPU_VA_OP_CLEAR) &&
888 	    !(args->flags & AMDGPU_VM_PAGE_PRT)) {
889 		gobj = drm_gem_object_lookup(filp, args->handle);
890 		if (gobj == NULL)
891 			return -ENOENT;
892 		abo = gem_to_amdgpu_bo(gobj);
893 	} else {
894 		gobj = NULL;
895 		abo = NULL;
896 	}
897 
898 	/* Add input syncobj fences (if any) for synchronization. */
899 	r = amdgpu_gem_add_input_fence(filp,
900 				       args->input_fence_syncobj_handles,
901 				       args->num_syncobj_handles);
902 	if (r)
903 		goto error_put_gobj;
904 
905 	drm_exec_init(&exec, DRM_EXEC_INTERRUPTIBLE_WAIT |
906 		      DRM_EXEC_IGNORE_DUPLICATES, 0);
907 	drm_exec_until_all_locked(&exec) {
908 		if (gobj) {
909 			r = drm_exec_lock_obj(&exec, gobj);
910 			drm_exec_retry_on_contention(&exec);
911 			if (unlikely(r))
912 				goto error;
913 		}
914 
915 		r = amdgpu_vm_lock_pd(&fpriv->vm, &exec, 2);
916 		drm_exec_retry_on_contention(&exec);
917 		if (unlikely(r))
918 			goto error;
919 	}
920 
921 	/* Resolve the BO-VA mapping for this VM/BO combination. */
922 	if (abo) {
923 		bo_va = amdgpu_vm_bo_find(&fpriv->vm, abo);
924 		if (!bo_va) {
925 			r = -ENOENT;
926 			goto error;
927 		}
928 	} else if (args->operation != AMDGPU_VA_OP_CLEAR) {
929 		bo_va = fpriv->prt_va;
930 	} else {
931 		bo_va = NULL;
932 	}
933 
934 	/*
935 	 * Prepare the timeline syncobj node if the user requested a VM
936 	 * timeline update. This only allocates/looks up the syncobj and
937 	 * chain node; the actual fence is attached later.
938 	 */
939 	r = amdgpu_gem_update_timeline_node(filp,
940 					    args->vm_timeline_syncobj_out,
941 					    args->vm_timeline_point,
942 					    &timeline_syncobj,
943 					    &timeline_chain);
944 	if (r)
945 		goto error;
946 
947 	switch (args->operation) {
948 	case AMDGPU_VA_OP_MAP:
949 		r = amdgpu_vm_bo_map(adev, bo_va, args->va_address,
950 				     args->offset_in_bo, args->map_size,
951 				     args->flags);
952 		break;
953 	case AMDGPU_VA_OP_UNMAP:
954 		r = amdgpu_vm_bo_unmap(adev, bo_va, args->va_address);
955 		break;
956 
957 	case AMDGPU_VA_OP_CLEAR:
958 		r = amdgpu_vm_bo_clear_mappings(adev, &fpriv->vm,
959 						args->va_address,
960 						args->map_size);
961 		break;
962 	case AMDGPU_VA_OP_REPLACE:
963 		r = amdgpu_vm_bo_replace_map(adev, bo_va, args->va_address,
964 					     args->offset_in_bo, args->map_size,
965 					     args->flags);
966 		break;
967 	default:
968 		break;
969 	}
970 
971 	/*
972 	 * Once the VA operation is done, update the VM and obtain the fence
973 	 * that represents the last relevant update for this mapping. This
974 	 * fence can then be exported to the user-visible VM timeline.
975 	 */
976 	if (!r && !(args->flags & AMDGPU_VM_DELAY_UPDATE) && !adev->debug_vm) {
977 		fence = amdgpu_gem_va_update_vm(adev, &fpriv->vm, bo_va,
978 						args->operation);
979 
980 		if (timeline_syncobj && fence) {
981 			if (!args->vm_timeline_point) {
982 				/* Replace the existing fence when no point is given. */
983 				drm_syncobj_replace_fence(timeline_syncobj,
984 							  fence);
985 			} else {
986 				/* Attach the last-update fence at a specific point. */
987 				drm_syncobj_add_point(timeline_syncobj,
988 						      timeline_chain,
989 						      fence,
990 						      args->vm_timeline_point);
991 			}
992 		}
993 		dma_fence_put(fence);
994 
995 	}
996 
997 error:
998 	drm_exec_fini(&exec);
999 error_put_gobj:
1000 	drm_gem_object_put(gobj);
1001 	return r;
1002 }
1003 
1004 int amdgpu_gem_op_ioctl(struct drm_device *dev, void *data,
1005 			struct drm_file *filp)
1006 {
1007 	struct drm_amdgpu_gem_op *args = data;
1008 	struct drm_gem_object *gobj;
1009 	struct amdgpu_vm_bo_base *base;
1010 	struct amdgpu_bo *robj;
1011 	struct drm_exec exec;
1012 	struct amdgpu_fpriv *fpriv = filp->driver_priv;
1013 	int r;
1014 
1015 	if (args->padding)
1016 		return -EINVAL;
1017 
1018 	gobj = drm_gem_object_lookup(filp, args->handle);
1019 	if (!gobj)
1020 		return -ENOENT;
1021 
1022 	robj = gem_to_amdgpu_bo(gobj);
1023 
1024 	drm_exec_init(&exec, DRM_EXEC_INTERRUPTIBLE_WAIT |
1025 			  DRM_EXEC_IGNORE_DUPLICATES, 0);
1026 	drm_exec_until_all_locked(&exec) {
1027 		r = drm_exec_lock_obj(&exec, gobj);
1028 		drm_exec_retry_on_contention(&exec);
1029 		if (r)
1030 			goto out_exec;
1031 
1032 		if (args->op == AMDGPU_GEM_OP_GET_MAPPING_INFO) {
1033 			r = amdgpu_vm_lock_pd(&fpriv->vm, &exec, 0);
1034 			drm_exec_retry_on_contention(&exec);
1035 			if (r)
1036 				goto out_exec;
1037 		}
1038 	}
1039 
1040 	switch (args->op) {
1041 	case AMDGPU_GEM_OP_GET_GEM_CREATE_INFO: {
1042 		struct drm_amdgpu_gem_create_in info;
1043 		void __user *out = u64_to_user_ptr(args->value);
1044 
1045 		info.bo_size = robj->tbo.base.size;
1046 		info.alignment = robj->tbo.page_alignment << PAGE_SHIFT;
1047 		info.domains = robj->preferred_domains;
1048 		info.domain_flags = robj->flags;
1049 		drm_exec_fini(&exec);
1050 		if (copy_to_user(out, &info, sizeof(info)))
1051 			r = -EFAULT;
1052 		break;
1053 	}
1054 	case AMDGPU_GEM_OP_SET_PLACEMENT:
1055 		if (drm_gem_is_imported(&robj->tbo.base) &&
1056 		    args->value & AMDGPU_GEM_DOMAIN_VRAM) {
1057 			r = -EINVAL;
1058 			goto out_exec;
1059 		}
1060 		if (amdgpu_ttm_tt_get_usermm(robj->tbo.ttm)) {
1061 			r = -EPERM;
1062 			goto out_exec;
1063 		}
1064 		for (base = robj->vm_bo; base; base = base->next)
1065 			if (amdgpu_xgmi_same_hive(amdgpu_ttm_adev(robj->tbo.bdev),
1066 				amdgpu_ttm_adev(base->vm->root.bo->tbo.bdev))) {
1067 				r = -EINVAL;
1068 				goto out_exec;
1069 			}
1070 
1071 
1072 		robj->preferred_domains = args->value & (AMDGPU_GEM_DOMAIN_VRAM |
1073 							AMDGPU_GEM_DOMAIN_GTT |
1074 							AMDGPU_GEM_DOMAIN_CPU);
1075 		robj->allowed_domains = robj->preferred_domains;
1076 		if (robj->allowed_domains == AMDGPU_GEM_DOMAIN_VRAM)
1077 			robj->allowed_domains |= AMDGPU_GEM_DOMAIN_GTT;
1078 
1079 		if (robj->flags & AMDGPU_GEM_CREATE_VM_ALWAYS_VALID)
1080 			amdgpu_vm_bo_invalidate(robj, true);
1081 		drm_exec_fini(&exec);
1082 		break;
1083 	case AMDGPU_GEM_OP_GET_MAPPING_INFO: {
1084 		struct amdgpu_bo_va *bo_va = amdgpu_vm_bo_find(&fpriv->vm, robj);
1085 		struct drm_amdgpu_gem_vm_entry *vm_entries;
1086 		struct amdgpu_bo_va_mapping *mapping;
1087 		int num_mappings = 0;
1088 		/*
1089 		 * num_entries is set as an input to the size of the user-allocated array of
1090 		 * drm_amdgpu_gem_vm_entry stored at args->value.
1091 		 * num_entries is sent back as output as the number of mappings the bo has.
1092 		 * If that number is larger than the size of the array, the ioctl must
1093 		 * be retried.
1094 		 */
1095 		vm_entries = kvcalloc(args->num_entries, sizeof(*vm_entries), GFP_KERNEL);
1096 		if (!vm_entries)
1097 			return -ENOMEM;
1098 
1099 		amdgpu_vm_bo_va_for_each_valid_mapping(bo_va, mapping) {
1100 			if (num_mappings < args->num_entries) {
1101 				vm_entries[num_mappings].addr = mapping->start * AMDGPU_GPU_PAGE_SIZE;
1102 				vm_entries[num_mappings].size = (mapping->last - mapping->start + 1) * AMDGPU_GPU_PAGE_SIZE;
1103 				vm_entries[num_mappings].offset = mapping->offset;
1104 				vm_entries[num_mappings].flags = mapping->flags;
1105 			}
1106 			num_mappings += 1;
1107 		}
1108 
1109 		amdgpu_vm_bo_va_for_each_invalid_mapping(bo_va, mapping) {
1110 			if (num_mappings < args->num_entries) {
1111 				vm_entries[num_mappings].addr = mapping->start * AMDGPU_GPU_PAGE_SIZE;
1112 				vm_entries[num_mappings].size = (mapping->last - mapping->start + 1) * AMDGPU_GPU_PAGE_SIZE;
1113 				vm_entries[num_mappings].offset = mapping->offset;
1114 				vm_entries[num_mappings].flags = mapping->flags;
1115 			}
1116 			num_mappings += 1;
1117 		}
1118 
1119 		drm_exec_fini(&exec);
1120 
1121 		if (num_mappings > 0 && num_mappings <= args->num_entries)
1122 			if (copy_to_user(u64_to_user_ptr(args->value), vm_entries, num_mappings * sizeof(*vm_entries)))
1123 				r = -EFAULT;
1124 
1125 		args->num_entries = num_mappings;
1126 
1127 		kvfree(vm_entries);
1128 		break;
1129 	}
1130 	default:
1131 		drm_exec_fini(&exec);
1132 		r = -EINVAL;
1133 	}
1134 
1135 	drm_gem_object_put(gobj);
1136 	return r;
1137 out_exec:
1138 	drm_exec_fini(&exec);
1139 	drm_gem_object_put(gobj);
1140 	return r;
1141 }
1142 
1143 /**
1144  * amdgpu_gem_list_handles_ioctl - get information about a process' buffer objects
1145  *
1146  * @dev: drm device pointer
1147  * @data: drm_amdgpu_gem_list_handles
1148  * @filp: drm file pointer
1149  *
1150  * num_entries is set as an input to the size of the entries array.
1151  * num_entries is sent back as output as the number of bos in the process.
1152  * If that number is larger than the size of the array, the ioctl must
1153  * be retried.
1154  *
1155  * Returns:
1156  * 0 for success, -errno for errors.
1157  */
1158 int amdgpu_gem_list_handles_ioctl(struct drm_device *dev, void *data,
1159 				  struct drm_file *filp)
1160 {
1161 	struct drm_amdgpu_gem_list_handles *args = data;
1162 	struct drm_amdgpu_gem_list_handles_entry *bo_entries;
1163 	struct drm_gem_object *gobj;
1164 	int id, ret = 0;
1165 	int bo_index = 0;
1166 	int num_bos = 0;
1167 
1168 	spin_lock(&filp->table_lock);
1169 	idr_for_each_entry(&filp->object_idr, gobj, id)
1170 		num_bos += 1;
1171 	spin_unlock(&filp->table_lock);
1172 
1173 	if (args->num_entries < num_bos) {
1174 		args->num_entries = num_bos;
1175 		return 0;
1176 	}
1177 
1178 	if (num_bos == 0) {
1179 		args->num_entries = 0;
1180 		return 0;
1181 	}
1182 
1183 	bo_entries = kvzalloc_objs(*bo_entries, num_bos);
1184 	if (!bo_entries)
1185 		return -ENOMEM;
1186 
1187 	spin_lock(&filp->table_lock);
1188 	idr_for_each_entry(&filp->object_idr, gobj, id) {
1189 		struct amdgpu_bo *bo = gem_to_amdgpu_bo(gobj);
1190 		struct drm_amdgpu_gem_list_handles_entry *bo_entry;
1191 
1192 		if (bo_index >= num_bos) {
1193 			ret = -EAGAIN;
1194 			break;
1195 		}
1196 
1197 		bo_entry = &bo_entries[bo_index];
1198 
1199 		bo_entry->size = amdgpu_bo_size(bo);
1200 		bo_entry->alloc_flags = bo->flags & AMDGPU_GEM_CREATE_SETTABLE_MASK;
1201 		bo_entry->preferred_domains = bo->preferred_domains;
1202 		bo_entry->gem_handle = id;
1203 		bo_entry->alignment = bo->tbo.page_alignment;
1204 
1205 		if (bo->tbo.base.import_attach)
1206 			bo_entry->flags |= AMDGPU_GEM_LIST_HANDLES_FLAG_IS_IMPORT;
1207 
1208 		bo_index += 1;
1209 	}
1210 	spin_unlock(&filp->table_lock);
1211 
1212 	args->num_entries = bo_index;
1213 
1214 	if (!ret)
1215 		if (copy_to_user(u64_to_user_ptr(args->entries), bo_entries, num_bos * sizeof(*bo_entries)))
1216 			ret = -EFAULT;
1217 
1218 	kvfree(bo_entries);
1219 
1220 	return ret;
1221 }
1222 
1223 static int amdgpu_gem_align_pitch(struct amdgpu_device *adev,
1224 				  int width,
1225 				  int cpp,
1226 				  bool tiled)
1227 {
1228 	int aligned = width;
1229 	int pitch_mask = 0;
1230 
1231 	switch (cpp) {
1232 	case 1:
1233 		pitch_mask = 255;
1234 		break;
1235 	case 2:
1236 		pitch_mask = 127;
1237 		break;
1238 	case 3:
1239 	case 4:
1240 		pitch_mask = 63;
1241 		break;
1242 	}
1243 
1244 	aligned += pitch_mask;
1245 	aligned &= ~pitch_mask;
1246 	return aligned * cpp;
1247 }
1248 
1249 int amdgpu_mode_dumb_create(struct drm_file *file_priv,
1250 			    struct drm_device *dev,
1251 			    struct drm_mode_create_dumb *args)
1252 {
1253 	struct amdgpu_device *adev = drm_to_adev(dev);
1254 	struct amdgpu_fpriv *fpriv = file_priv->driver_priv;
1255 	struct drm_gem_object *gobj;
1256 	uint32_t handle;
1257 	u64 flags = AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
1258 		    AMDGPU_GEM_CREATE_CPU_GTT_USWC |
1259 		    AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS;
1260 	u32 domain;
1261 	int r;
1262 
1263 	/*
1264 	 * The buffer returned from this function should be cleared, but
1265 	 * it can only be done if the ring is enabled or we'll fail to
1266 	 * create the buffer.
1267 	 */
1268 	if (adev->mman.buffer_funcs_enabled)
1269 		flags |= AMDGPU_GEM_CREATE_VRAM_CLEARED;
1270 
1271 	args->pitch = amdgpu_gem_align_pitch(adev, args->width,
1272 					     DIV_ROUND_UP(args->bpp, 8), 0);
1273 	args->size = (u64)args->pitch * args->height;
1274 	args->size = ALIGN(args->size, PAGE_SIZE);
1275 	domain = amdgpu_bo_get_preferred_domain(adev,
1276 				amdgpu_display_supported_domains(adev, flags));
1277 	r = amdgpu_gem_object_create(adev, args->size, 0, domain, flags,
1278 				     ttm_bo_type_device, NULL, &gobj, fpriv->xcp_id + 1);
1279 	if (r)
1280 		return -ENOMEM;
1281 
1282 	r = drm_gem_handle_create(file_priv, gobj, &handle);
1283 	/* drop reference from allocate - handle holds it now */
1284 	drm_gem_object_put(gobj);
1285 	if (r)
1286 		return r;
1287 
1288 	args->handle = handle;
1289 	return 0;
1290 }
1291 
1292 #if defined(CONFIG_DEBUG_FS)
1293 static int amdgpu_debugfs_gem_info_show(struct seq_file *m, void *unused)
1294 {
1295 	struct amdgpu_device *adev = m->private;
1296 	struct drm_device *dev = adev_to_drm(adev);
1297 	struct drm_file *file;
1298 	int r;
1299 
1300 	r = mutex_lock_interruptible(&dev->filelist_mutex);
1301 	if (r)
1302 		return r;
1303 
1304 	list_for_each_entry(file, &dev->filelist, lhead) {
1305 		struct task_struct *task;
1306 		struct drm_gem_object *gobj;
1307 		struct pid *pid;
1308 		int id;
1309 
1310 		/*
1311 		 * Although we have a valid reference on file->pid, that does
1312 		 * not guarantee that the task_struct who called get_pid() is
1313 		 * still alive (e.g. get_pid(current) => fork() => exit()).
1314 		 * Therefore, we need to protect this ->comm access using RCU.
1315 		 */
1316 		rcu_read_lock();
1317 		pid = rcu_dereference(file->pid);
1318 		task = pid_task(pid, PIDTYPE_TGID);
1319 		seq_printf(m, "pid %8d command %s:\n", pid_nr(pid),
1320 			   task ? task->comm : "<unknown>");
1321 		rcu_read_unlock();
1322 
1323 		spin_lock(&file->table_lock);
1324 		idr_for_each_entry(&file->object_idr, gobj, id) {
1325 			struct amdgpu_bo *bo = gem_to_amdgpu_bo(gobj);
1326 
1327 			amdgpu_bo_print_info(id, bo, m);
1328 		}
1329 		spin_unlock(&file->table_lock);
1330 	}
1331 
1332 	mutex_unlock(&dev->filelist_mutex);
1333 	return 0;
1334 }
1335 
1336 DEFINE_SHOW_ATTRIBUTE(amdgpu_debugfs_gem_info);
1337 
1338 #endif
1339 
1340 void amdgpu_debugfs_gem_init(struct amdgpu_device *adev)
1341 {
1342 #if defined(CONFIG_DEBUG_FS)
1343 	struct drm_minor *minor = adev_to_drm(adev)->primary;
1344 	struct dentry *root = minor->debugfs_root;
1345 
1346 	debugfs_create_file("amdgpu_gem_info", 0444, root, adev,
1347 			    &amdgpu_debugfs_gem_info_fops);
1348 #endif
1349 }
1350