xref: /linux/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c (revision 2c1ed907520c50326b8f604907a8478b27881a2e)
1 /*
2  * Copyright 2008 Advanced Micro Devices, Inc.
3  * Copyright 2008 Red Hat Inc.
4  * Copyright 2009 Jerome Glisse.
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the "Software"),
8  * to deal in the Software without restriction, including without limitation
9  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10  * and/or sell copies of the Software, and to permit persons to whom the
11  * Software is furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in
14  * all copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
19  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22  * OTHER DEALINGS IN THE SOFTWARE.
23  *
24  * Authors: Dave Airlie
25  *          Alex Deucher
26  *          Jerome Glisse
27  */
28 #include <linux/ktime.h>
29 #include <linux/module.h>
30 #include <linux/pagemap.h>
31 #include <linux/pci.h>
32 #include <linux/dma-buf.h>
33 
34 #include <drm/amdgpu_drm.h>
35 #include <drm/drm_drv.h>
36 #include <drm/drm_exec.h>
37 #include <drm/drm_gem_ttm_helper.h>
38 #include <drm/ttm/ttm_tt.h>
39 
40 #include "amdgpu.h"
41 #include "amdgpu_display.h"
42 #include "amdgpu_dma_buf.h"
43 #include "amdgpu_hmm.h"
44 #include "amdgpu_xgmi.h"
45 #include "amdgpu_vm.h"
46 
amdgpu_gem_fault(struct vm_fault * vmf)47 static vm_fault_t amdgpu_gem_fault(struct vm_fault *vmf)
48 {
49 	struct ttm_buffer_object *bo = vmf->vma->vm_private_data;
50 	struct drm_device *ddev = bo->base.dev;
51 	vm_fault_t ret;
52 	int idx;
53 
54 	ret = ttm_bo_vm_reserve(bo, vmf);
55 	if (ret)
56 		return ret;
57 
58 	if (drm_dev_enter(ddev, &idx)) {
59 		ret = amdgpu_bo_fault_reserve_notify(bo);
60 		if (ret) {
61 			drm_dev_exit(idx);
62 			goto unlock;
63 		}
64 
65 		ret = ttm_bo_vm_fault_reserved(vmf, vmf->vma->vm_page_prot,
66 					       TTM_BO_VM_NUM_PREFAULT);
67 
68 		drm_dev_exit(idx);
69 	} else {
70 		ret = ttm_bo_vm_dummy_page(vmf, vmf->vma->vm_page_prot);
71 	}
72 	if (ret == VM_FAULT_RETRY && !(vmf->flags & FAULT_FLAG_RETRY_NOWAIT))
73 		return ret;
74 
75 unlock:
76 	dma_resv_unlock(bo->base.resv);
77 	return ret;
78 }
79 
80 static const struct vm_operations_struct amdgpu_gem_vm_ops = {
81 	.fault = amdgpu_gem_fault,
82 	.open = ttm_bo_vm_open,
83 	.close = ttm_bo_vm_close,
84 	.access = ttm_bo_vm_access
85 };
86 
amdgpu_gem_object_free(struct drm_gem_object * gobj)87 static void amdgpu_gem_object_free(struct drm_gem_object *gobj)
88 {
89 	struct amdgpu_bo *aobj = gem_to_amdgpu_bo(gobj);
90 
91 	amdgpu_hmm_unregister(aobj);
92 	ttm_bo_put(&aobj->tbo);
93 }
94 
amdgpu_gem_object_create(struct amdgpu_device * adev,unsigned long size,int alignment,u32 initial_domain,u64 flags,enum ttm_bo_type type,struct dma_resv * resv,struct drm_gem_object ** obj,int8_t xcp_id_plus1)95 int amdgpu_gem_object_create(struct amdgpu_device *adev, unsigned long size,
96 			     int alignment, u32 initial_domain,
97 			     u64 flags, enum ttm_bo_type type,
98 			     struct dma_resv *resv,
99 			     struct drm_gem_object **obj, int8_t xcp_id_plus1)
100 {
101 	struct amdgpu_bo *bo;
102 	struct amdgpu_bo_user *ubo;
103 	struct amdgpu_bo_param bp;
104 	int r;
105 
106 	memset(&bp, 0, sizeof(bp));
107 	*obj = NULL;
108 	flags |= AMDGPU_GEM_CREATE_VRAM_WIPE_ON_RELEASE;
109 
110 	bp.size = size;
111 	bp.byte_align = alignment;
112 	bp.type = type;
113 	bp.resv = resv;
114 	bp.preferred_domain = initial_domain;
115 	bp.flags = flags;
116 	bp.domain = initial_domain;
117 	bp.bo_ptr_size = sizeof(struct amdgpu_bo);
118 	bp.xcp_id_plus1 = xcp_id_plus1;
119 
120 	r = amdgpu_bo_create_user(adev, &bp, &ubo);
121 	if (r)
122 		return r;
123 
124 	bo = &ubo->bo;
125 	*obj = &bo->tbo.base;
126 
127 	return 0;
128 }
129 
amdgpu_gem_force_release(struct amdgpu_device * adev)130 void amdgpu_gem_force_release(struct amdgpu_device *adev)
131 {
132 	struct drm_device *ddev = adev_to_drm(adev);
133 	struct drm_file *file;
134 
135 	mutex_lock(&ddev->filelist_mutex);
136 
137 	list_for_each_entry(file, &ddev->filelist, lhead) {
138 		struct drm_gem_object *gobj;
139 		int handle;
140 
141 		WARN_ONCE(1, "Still active user space clients!\n");
142 		spin_lock(&file->table_lock);
143 		idr_for_each_entry(&file->object_idr, gobj, handle) {
144 			WARN_ONCE(1, "And also active allocations!\n");
145 			drm_gem_object_put(gobj);
146 		}
147 		idr_destroy(&file->object_idr);
148 		spin_unlock(&file->table_lock);
149 	}
150 
151 	mutex_unlock(&ddev->filelist_mutex);
152 }
153 
154 /*
155  * Call from drm_gem_handle_create which appear in both new and open ioctl
156  * case.
157  */
amdgpu_gem_object_open(struct drm_gem_object * obj,struct drm_file * file_priv)158 static int amdgpu_gem_object_open(struct drm_gem_object *obj,
159 				  struct drm_file *file_priv)
160 {
161 	struct amdgpu_bo *abo = gem_to_amdgpu_bo(obj);
162 	struct amdgpu_device *adev = amdgpu_ttm_adev(abo->tbo.bdev);
163 	struct amdgpu_fpriv *fpriv = file_priv->driver_priv;
164 	struct amdgpu_vm *vm = &fpriv->vm;
165 	struct amdgpu_bo_va *bo_va;
166 	struct mm_struct *mm;
167 	int r;
168 
169 	mm = amdgpu_ttm_tt_get_usermm(abo->tbo.ttm);
170 	if (mm && mm != current->mm)
171 		return -EPERM;
172 
173 	if (abo->flags & AMDGPU_GEM_CREATE_VM_ALWAYS_VALID &&
174 	    !amdgpu_vm_is_bo_always_valid(vm, abo))
175 		return -EPERM;
176 
177 	r = amdgpu_bo_reserve(abo, false);
178 	if (r)
179 		return r;
180 
181 	amdgpu_vm_bo_update_shared(abo);
182 	bo_va = amdgpu_vm_bo_find(vm, abo);
183 	if (!bo_va)
184 		bo_va = amdgpu_vm_bo_add(adev, vm, abo);
185 	else
186 		++bo_va->ref_count;
187 	amdgpu_bo_unreserve(abo);
188 
189 	/* Validate and add eviction fence to DMABuf imports with dynamic
190 	 * attachment in compute VMs. Re-validation will be done by
191 	 * amdgpu_vm_validate. Fences are on the reservation shared with the
192 	 * export, which is currently required to be validated and fenced
193 	 * already by amdgpu_amdkfd_gpuvm_restore_process_bos.
194 	 *
195 	 * Nested locking below for the case that a GEM object is opened in
196 	 * kfd_mem_export_dmabuf. Since the lock below is only taken for imports,
197 	 * but not for export, this is a different lock class that cannot lead to
198 	 * circular lock dependencies.
199 	 */
200 	if (!vm->is_compute_context || !vm->process_info)
201 		return 0;
202 	if (!obj->import_attach ||
203 	    !dma_buf_is_dynamic(obj->import_attach->dmabuf))
204 		return 0;
205 	mutex_lock_nested(&vm->process_info->lock, 1);
206 	if (!WARN_ON(!vm->process_info->eviction_fence)) {
207 		r = amdgpu_amdkfd_bo_validate_and_fence(abo, AMDGPU_GEM_DOMAIN_GTT,
208 							&vm->process_info->eviction_fence->base);
209 		if (r) {
210 			struct amdgpu_task_info *ti = amdgpu_vm_get_task_info_vm(vm);
211 
212 			dev_warn(adev->dev, "validate_and_fence failed: %d\n", r);
213 			if (ti) {
214 				dev_warn(adev->dev, "pid %d\n", ti->pid);
215 				amdgpu_vm_put_task_info(ti);
216 			}
217 		}
218 	}
219 	mutex_unlock(&vm->process_info->lock);
220 
221 	return r;
222 }
223 
amdgpu_gem_object_close(struct drm_gem_object * obj,struct drm_file * file_priv)224 static void amdgpu_gem_object_close(struct drm_gem_object *obj,
225 				    struct drm_file *file_priv)
226 {
227 	struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
228 	struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
229 	struct amdgpu_fpriv *fpriv = file_priv->driver_priv;
230 	struct amdgpu_vm *vm = &fpriv->vm;
231 
232 	struct dma_fence *fence = NULL;
233 	struct amdgpu_bo_va *bo_va;
234 	struct drm_exec exec;
235 	long r;
236 
237 	drm_exec_init(&exec, DRM_EXEC_IGNORE_DUPLICATES, 0);
238 	drm_exec_until_all_locked(&exec) {
239 		r = drm_exec_prepare_obj(&exec, &bo->tbo.base, 1);
240 		drm_exec_retry_on_contention(&exec);
241 		if (unlikely(r))
242 			goto out_unlock;
243 
244 		r = amdgpu_vm_lock_pd(vm, &exec, 0);
245 		drm_exec_retry_on_contention(&exec);
246 		if (unlikely(r))
247 			goto out_unlock;
248 	}
249 
250 	bo_va = amdgpu_vm_bo_find(vm, bo);
251 	if (!bo_va || --bo_va->ref_count)
252 		goto out_unlock;
253 
254 	amdgpu_vm_bo_del(adev, bo_va);
255 	amdgpu_vm_bo_update_shared(bo);
256 	if (!amdgpu_vm_ready(vm))
257 		goto out_unlock;
258 
259 	r = amdgpu_vm_clear_freed(adev, vm, &fence);
260 	if (unlikely(r < 0))
261 		dev_err(adev->dev, "failed to clear page "
262 			"tables on GEM object close (%ld)\n", r);
263 	if (r || !fence)
264 		goto out_unlock;
265 
266 	amdgpu_bo_fence(bo, fence, true);
267 	dma_fence_put(fence);
268 
269 out_unlock:
270 	if (r)
271 		dev_err(adev->dev, "leaking bo va (%ld)\n", r);
272 	drm_exec_fini(&exec);
273 }
274 
amdgpu_gem_object_mmap(struct drm_gem_object * obj,struct vm_area_struct * vma)275 static int amdgpu_gem_object_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma)
276 {
277 	struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
278 
279 	if (amdgpu_ttm_tt_get_usermm(bo->tbo.ttm))
280 		return -EPERM;
281 	if (bo->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS)
282 		return -EPERM;
283 
284 	/* Workaround for Thunk bug creating PROT_NONE,MAP_PRIVATE mappings
285 	 * for debugger access to invisible VRAM. Should have used MAP_SHARED
286 	 * instead. Clearing VM_MAYWRITE prevents the mapping from ever
287 	 * becoming writable and makes is_cow_mapping(vm_flags) false.
288 	 */
289 	if (is_cow_mapping(vma->vm_flags) &&
290 	    !(vma->vm_flags & VM_ACCESS_FLAGS))
291 		vm_flags_clear(vma, VM_MAYWRITE);
292 
293 	return drm_gem_ttm_mmap(obj, vma);
294 }
295 
296 const struct drm_gem_object_funcs amdgpu_gem_object_funcs = {
297 	.free = amdgpu_gem_object_free,
298 	.open = amdgpu_gem_object_open,
299 	.close = amdgpu_gem_object_close,
300 	.export = amdgpu_gem_prime_export,
301 	.vmap = drm_gem_ttm_vmap,
302 	.vunmap = drm_gem_ttm_vunmap,
303 	.mmap = amdgpu_gem_object_mmap,
304 	.vm_ops = &amdgpu_gem_vm_ops,
305 };
306 
307 /*
308  * GEM ioctls.
309  */
amdgpu_gem_create_ioctl(struct drm_device * dev,void * data,struct drm_file * filp)310 int amdgpu_gem_create_ioctl(struct drm_device *dev, void *data,
311 			    struct drm_file *filp)
312 {
313 	struct amdgpu_device *adev = drm_to_adev(dev);
314 	struct amdgpu_fpriv *fpriv = filp->driver_priv;
315 	struct amdgpu_vm *vm = &fpriv->vm;
316 	union drm_amdgpu_gem_create *args = data;
317 	uint64_t flags = args->in.domain_flags;
318 	uint64_t size = args->in.bo_size;
319 	struct dma_resv *resv = NULL;
320 	struct drm_gem_object *gobj;
321 	uint32_t handle, initial_domain;
322 	int r;
323 
324 	/* reject DOORBELLs until userspace code to use it is available */
325 	if (args->in.domains & AMDGPU_GEM_DOMAIN_DOORBELL)
326 		return -EINVAL;
327 
328 	/* reject invalid gem flags */
329 	if (flags & ~(AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
330 		      AMDGPU_GEM_CREATE_NO_CPU_ACCESS |
331 		      AMDGPU_GEM_CREATE_CPU_GTT_USWC |
332 		      AMDGPU_GEM_CREATE_VRAM_CLEARED |
333 		      AMDGPU_GEM_CREATE_VM_ALWAYS_VALID |
334 		      AMDGPU_GEM_CREATE_EXPLICIT_SYNC |
335 		      AMDGPU_GEM_CREATE_ENCRYPTED |
336 		      AMDGPU_GEM_CREATE_GFX12_DCC |
337 		      AMDGPU_GEM_CREATE_DISCARDABLE))
338 		return -EINVAL;
339 
340 	/* reject invalid gem domains */
341 	if (args->in.domains & ~AMDGPU_GEM_DOMAIN_MASK)
342 		return -EINVAL;
343 
344 	if (!amdgpu_is_tmz(adev) && (flags & AMDGPU_GEM_CREATE_ENCRYPTED)) {
345 		DRM_NOTE_ONCE("Cannot allocate secure buffer since TMZ is disabled\n");
346 		return -EINVAL;
347 	}
348 
349 	/* always clear VRAM */
350 	flags |= AMDGPU_GEM_CREATE_VRAM_CLEARED;
351 
352 	/* create a gem object to contain this object in */
353 	if (args->in.domains & (AMDGPU_GEM_DOMAIN_GDS |
354 	    AMDGPU_GEM_DOMAIN_GWS | AMDGPU_GEM_DOMAIN_OA)) {
355 		if (flags & AMDGPU_GEM_CREATE_VM_ALWAYS_VALID) {
356 			/* if gds bo is created from user space, it must be
357 			 * passed to bo list
358 			 */
359 			DRM_ERROR("GDS bo cannot be per-vm-bo\n");
360 			return -EINVAL;
361 		}
362 		flags |= AMDGPU_GEM_CREATE_NO_CPU_ACCESS;
363 	}
364 
365 	if (flags & AMDGPU_GEM_CREATE_VM_ALWAYS_VALID) {
366 		r = amdgpu_bo_reserve(vm->root.bo, false);
367 		if (r)
368 			return r;
369 
370 		resv = vm->root.bo->tbo.base.resv;
371 	}
372 
373 	initial_domain = (u32)(0xffffffff & args->in.domains);
374 retry:
375 	r = amdgpu_gem_object_create(adev, size, args->in.alignment,
376 				     initial_domain,
377 				     flags, ttm_bo_type_device, resv, &gobj, fpriv->xcp_id + 1);
378 	if (r && r != -ERESTARTSYS) {
379 		if (flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED) {
380 			flags &= ~AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
381 			goto retry;
382 		}
383 
384 		if (initial_domain == AMDGPU_GEM_DOMAIN_VRAM) {
385 			initial_domain |= AMDGPU_GEM_DOMAIN_GTT;
386 			goto retry;
387 		}
388 		DRM_DEBUG("Failed to allocate GEM object (%llu, %d, %llu, %d)\n",
389 				size, initial_domain, args->in.alignment, r);
390 	}
391 
392 	if (flags & AMDGPU_GEM_CREATE_VM_ALWAYS_VALID) {
393 		if (!r) {
394 			struct amdgpu_bo *abo = gem_to_amdgpu_bo(gobj);
395 
396 			abo->parent = amdgpu_bo_ref(vm->root.bo);
397 		}
398 		amdgpu_bo_unreserve(vm->root.bo);
399 	}
400 	if (r)
401 		return r;
402 
403 	r = drm_gem_handle_create(filp, gobj, &handle);
404 	/* drop reference from allocate - handle holds it now */
405 	drm_gem_object_put(gobj);
406 	if (r)
407 		return r;
408 
409 	memset(args, 0, sizeof(*args));
410 	args->out.handle = handle;
411 	return 0;
412 }
413 
amdgpu_gem_userptr_ioctl(struct drm_device * dev,void * data,struct drm_file * filp)414 int amdgpu_gem_userptr_ioctl(struct drm_device *dev, void *data,
415 			     struct drm_file *filp)
416 {
417 	struct ttm_operation_ctx ctx = { true, false };
418 	struct amdgpu_device *adev = drm_to_adev(dev);
419 	struct drm_amdgpu_gem_userptr *args = data;
420 	struct amdgpu_fpriv *fpriv = filp->driver_priv;
421 	struct drm_gem_object *gobj;
422 	struct hmm_range *range;
423 	struct amdgpu_bo *bo;
424 	uint32_t handle;
425 	int r;
426 
427 	args->addr = untagged_addr(args->addr);
428 
429 	if (offset_in_page(args->addr | args->size))
430 		return -EINVAL;
431 
432 	/* reject unknown flag values */
433 	if (args->flags & ~(AMDGPU_GEM_USERPTR_READONLY |
434 	    AMDGPU_GEM_USERPTR_ANONONLY | AMDGPU_GEM_USERPTR_VALIDATE |
435 	    AMDGPU_GEM_USERPTR_REGISTER))
436 		return -EINVAL;
437 
438 	if (!(args->flags & AMDGPU_GEM_USERPTR_READONLY) &&
439 	     !(args->flags & AMDGPU_GEM_USERPTR_REGISTER)) {
440 
441 		/* if we want to write to it we must install a MMU notifier */
442 		return -EACCES;
443 	}
444 
445 	/* create a gem object to contain this object in */
446 	r = amdgpu_gem_object_create(adev, args->size, 0, AMDGPU_GEM_DOMAIN_CPU,
447 				     0, ttm_bo_type_device, NULL, &gobj, fpriv->xcp_id + 1);
448 	if (r)
449 		return r;
450 
451 	bo = gem_to_amdgpu_bo(gobj);
452 	bo->preferred_domains = AMDGPU_GEM_DOMAIN_GTT;
453 	bo->allowed_domains = AMDGPU_GEM_DOMAIN_GTT;
454 	r = amdgpu_ttm_tt_set_userptr(&bo->tbo, args->addr, args->flags);
455 	if (r)
456 		goto release_object;
457 
458 	r = amdgpu_hmm_register(bo, args->addr);
459 	if (r)
460 		goto release_object;
461 
462 	if (args->flags & AMDGPU_GEM_USERPTR_VALIDATE) {
463 		r = amdgpu_ttm_tt_get_user_pages(bo, bo->tbo.ttm->pages,
464 						 &range);
465 		if (r)
466 			goto release_object;
467 
468 		r = amdgpu_bo_reserve(bo, true);
469 		if (r)
470 			goto user_pages_done;
471 
472 		amdgpu_bo_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_GTT);
473 		r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
474 		amdgpu_bo_unreserve(bo);
475 		if (r)
476 			goto user_pages_done;
477 	}
478 
479 	r = drm_gem_handle_create(filp, gobj, &handle);
480 	if (r)
481 		goto user_pages_done;
482 
483 	args->handle = handle;
484 
485 user_pages_done:
486 	if (args->flags & AMDGPU_GEM_USERPTR_VALIDATE)
487 		amdgpu_ttm_tt_get_user_pages_done(bo->tbo.ttm, range);
488 
489 release_object:
490 	drm_gem_object_put(gobj);
491 
492 	return r;
493 }
494 
amdgpu_mode_dumb_mmap(struct drm_file * filp,struct drm_device * dev,uint32_t handle,uint64_t * offset_p)495 int amdgpu_mode_dumb_mmap(struct drm_file *filp,
496 			  struct drm_device *dev,
497 			  uint32_t handle, uint64_t *offset_p)
498 {
499 	struct drm_gem_object *gobj;
500 	struct amdgpu_bo *robj;
501 
502 	gobj = drm_gem_object_lookup(filp, handle);
503 	if (!gobj)
504 		return -ENOENT;
505 
506 	robj = gem_to_amdgpu_bo(gobj);
507 	if (amdgpu_ttm_tt_get_usermm(robj->tbo.ttm) ||
508 	    (robj->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS)) {
509 		drm_gem_object_put(gobj);
510 		return -EPERM;
511 	}
512 	*offset_p = amdgpu_bo_mmap_offset(robj);
513 	drm_gem_object_put(gobj);
514 	return 0;
515 }
516 
amdgpu_gem_mmap_ioctl(struct drm_device * dev,void * data,struct drm_file * filp)517 int amdgpu_gem_mmap_ioctl(struct drm_device *dev, void *data,
518 			  struct drm_file *filp)
519 {
520 	union drm_amdgpu_gem_mmap *args = data;
521 	uint32_t handle = args->in.handle;
522 
523 	memset(args, 0, sizeof(*args));
524 	return amdgpu_mode_dumb_mmap(filp, dev, handle, &args->out.addr_ptr);
525 }
526 
527 /**
528  * amdgpu_gem_timeout - calculate jiffies timeout from absolute value
529  *
530  * @timeout_ns: timeout in ns
531  *
532  * Calculate the timeout in jiffies from an absolute timeout in ns.
533  */
amdgpu_gem_timeout(uint64_t timeout_ns)534 unsigned long amdgpu_gem_timeout(uint64_t timeout_ns)
535 {
536 	unsigned long timeout_jiffies;
537 	ktime_t timeout;
538 
539 	/* clamp timeout if it's to large */
540 	if (((int64_t)timeout_ns) < 0)
541 		return MAX_SCHEDULE_TIMEOUT;
542 
543 	timeout = ktime_sub(ns_to_ktime(timeout_ns), ktime_get());
544 	if (ktime_to_ns(timeout) < 0)
545 		return 0;
546 
547 	timeout_jiffies = nsecs_to_jiffies(ktime_to_ns(timeout));
548 	/*  clamp timeout to avoid unsigned-> signed overflow */
549 	if (timeout_jiffies > MAX_SCHEDULE_TIMEOUT)
550 		return MAX_SCHEDULE_TIMEOUT - 1;
551 
552 	return timeout_jiffies;
553 }
554 
amdgpu_gem_wait_idle_ioctl(struct drm_device * dev,void * data,struct drm_file * filp)555 int amdgpu_gem_wait_idle_ioctl(struct drm_device *dev, void *data,
556 			      struct drm_file *filp)
557 {
558 	union drm_amdgpu_gem_wait_idle *args = data;
559 	struct drm_gem_object *gobj;
560 	struct amdgpu_bo *robj;
561 	uint32_t handle = args->in.handle;
562 	unsigned long timeout = amdgpu_gem_timeout(args->in.timeout);
563 	int r = 0;
564 	long ret;
565 
566 	gobj = drm_gem_object_lookup(filp, handle);
567 	if (!gobj)
568 		return -ENOENT;
569 
570 	robj = gem_to_amdgpu_bo(gobj);
571 	ret = dma_resv_wait_timeout(robj->tbo.base.resv, DMA_RESV_USAGE_READ,
572 				    true, timeout);
573 
574 	/* ret == 0 means not signaled,
575 	 * ret > 0 means signaled
576 	 * ret < 0 means interrupted before timeout
577 	 */
578 	if (ret >= 0) {
579 		memset(args, 0, sizeof(*args));
580 		args->out.status = (ret == 0);
581 	} else
582 		r = ret;
583 
584 	drm_gem_object_put(gobj);
585 	return r;
586 }
587 
amdgpu_gem_metadata_ioctl(struct drm_device * dev,void * data,struct drm_file * filp)588 int amdgpu_gem_metadata_ioctl(struct drm_device *dev, void *data,
589 				struct drm_file *filp)
590 {
591 	struct drm_amdgpu_gem_metadata *args = data;
592 	struct drm_gem_object *gobj;
593 	struct amdgpu_bo *robj;
594 	int r = -1;
595 
596 	DRM_DEBUG("%d\n", args->handle);
597 	gobj = drm_gem_object_lookup(filp, args->handle);
598 	if (gobj == NULL)
599 		return -ENOENT;
600 	robj = gem_to_amdgpu_bo(gobj);
601 
602 	r = amdgpu_bo_reserve(robj, false);
603 	if (unlikely(r != 0))
604 		goto out;
605 
606 	if (args->op == AMDGPU_GEM_METADATA_OP_GET_METADATA) {
607 		amdgpu_bo_get_tiling_flags(robj, &args->data.tiling_info);
608 		r = amdgpu_bo_get_metadata(robj, args->data.data,
609 					   sizeof(args->data.data),
610 					   &args->data.data_size_bytes,
611 					   &args->data.flags);
612 	} else if (args->op == AMDGPU_GEM_METADATA_OP_SET_METADATA) {
613 		if (args->data.data_size_bytes > sizeof(args->data.data)) {
614 			r = -EINVAL;
615 			goto unreserve;
616 		}
617 		r = amdgpu_bo_set_tiling_flags(robj, args->data.tiling_info);
618 		if (!r)
619 			r = amdgpu_bo_set_metadata(robj, args->data.data,
620 						   args->data.data_size_bytes,
621 						   args->data.flags);
622 	}
623 
624 unreserve:
625 	amdgpu_bo_unreserve(robj);
626 out:
627 	drm_gem_object_put(gobj);
628 	return r;
629 }
630 
631 /**
632  * amdgpu_gem_va_update_vm -update the bo_va in its VM
633  *
634  * @adev: amdgpu_device pointer
635  * @vm: vm to update
636  * @bo_va: bo_va to update
637  * @operation: map, unmap or clear
638  *
639  * Update the bo_va directly after setting its address. Errors are not
640  * vital here, so they are not reported back to userspace.
641  */
amdgpu_gem_va_update_vm(struct amdgpu_device * adev,struct amdgpu_vm * vm,struct amdgpu_bo_va * bo_va,uint32_t operation)642 static void amdgpu_gem_va_update_vm(struct amdgpu_device *adev,
643 				    struct amdgpu_vm *vm,
644 				    struct amdgpu_bo_va *bo_va,
645 				    uint32_t operation)
646 {
647 	int r;
648 
649 	if (!amdgpu_vm_ready(vm))
650 		return;
651 
652 	r = amdgpu_vm_clear_freed(adev, vm, NULL);
653 	if (r)
654 		goto error;
655 
656 	if (operation == AMDGPU_VA_OP_MAP ||
657 	    operation == AMDGPU_VA_OP_REPLACE) {
658 		r = amdgpu_vm_bo_update(adev, bo_va, false);
659 		if (r)
660 			goto error;
661 	}
662 
663 	r = amdgpu_vm_update_pdes(adev, vm, false);
664 
665 error:
666 	if (r && r != -ERESTARTSYS)
667 		DRM_ERROR("Couldn't update BO_VA (%d)\n", r);
668 }
669 
670 /**
671  * amdgpu_gem_va_map_flags - map GEM UAPI flags into hardware flags
672  *
673  * @adev: amdgpu_device pointer
674  * @flags: GEM UAPI flags
675  *
676  * Returns the GEM UAPI flags mapped into hardware for the ASIC.
677  */
amdgpu_gem_va_map_flags(struct amdgpu_device * adev,uint32_t flags)678 uint64_t amdgpu_gem_va_map_flags(struct amdgpu_device *adev, uint32_t flags)
679 {
680 	uint64_t pte_flag = 0;
681 
682 	if (flags & AMDGPU_VM_PAGE_EXECUTABLE)
683 		pte_flag |= AMDGPU_PTE_EXECUTABLE;
684 	if (flags & AMDGPU_VM_PAGE_READABLE)
685 		pte_flag |= AMDGPU_PTE_READABLE;
686 	if (flags & AMDGPU_VM_PAGE_WRITEABLE)
687 		pte_flag |= AMDGPU_PTE_WRITEABLE;
688 	if (flags & AMDGPU_VM_PAGE_PRT)
689 		pte_flag |= AMDGPU_PTE_PRT_FLAG(adev);
690 	if (flags & AMDGPU_VM_PAGE_NOALLOC)
691 		pte_flag |= AMDGPU_PTE_NOALLOC;
692 
693 	if (adev->gmc.gmc_funcs->map_mtype)
694 		pte_flag |= amdgpu_gmc_map_mtype(adev,
695 						 flags & AMDGPU_VM_MTYPE_MASK);
696 
697 	return pte_flag;
698 }
699 
amdgpu_gem_va_ioctl(struct drm_device * dev,void * data,struct drm_file * filp)700 int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
701 			  struct drm_file *filp)
702 {
703 	const uint32_t valid_flags = AMDGPU_VM_DELAY_UPDATE |
704 		AMDGPU_VM_PAGE_READABLE | AMDGPU_VM_PAGE_WRITEABLE |
705 		AMDGPU_VM_PAGE_EXECUTABLE | AMDGPU_VM_MTYPE_MASK |
706 		AMDGPU_VM_PAGE_NOALLOC;
707 	const uint32_t prt_flags = AMDGPU_VM_DELAY_UPDATE |
708 		AMDGPU_VM_PAGE_PRT;
709 
710 	struct drm_amdgpu_gem_va *args = data;
711 	struct drm_gem_object *gobj;
712 	struct amdgpu_device *adev = drm_to_adev(dev);
713 	struct amdgpu_fpriv *fpriv = filp->driver_priv;
714 	struct amdgpu_bo *abo;
715 	struct amdgpu_bo_va *bo_va;
716 	struct drm_exec exec;
717 	uint64_t va_flags;
718 	uint64_t vm_size;
719 	int r = 0;
720 
721 	if (args->va_address < AMDGPU_VA_RESERVED_BOTTOM) {
722 		dev_dbg(dev->dev,
723 			"va_address 0x%llx is in reserved area 0x%llx\n",
724 			args->va_address, AMDGPU_VA_RESERVED_BOTTOM);
725 		return -EINVAL;
726 	}
727 
728 	if (args->va_address >= AMDGPU_GMC_HOLE_START &&
729 	    args->va_address < AMDGPU_GMC_HOLE_END) {
730 		dev_dbg(dev->dev,
731 			"va_address 0x%llx is in VA hole 0x%llx-0x%llx\n",
732 			args->va_address, AMDGPU_GMC_HOLE_START,
733 			AMDGPU_GMC_HOLE_END);
734 		return -EINVAL;
735 	}
736 
737 	args->va_address &= AMDGPU_GMC_HOLE_MASK;
738 
739 	vm_size = adev->vm_manager.max_pfn * AMDGPU_GPU_PAGE_SIZE;
740 	vm_size -= AMDGPU_VA_RESERVED_TOP;
741 	if (args->va_address + args->map_size > vm_size) {
742 		dev_dbg(dev->dev,
743 			"va_address 0x%llx is in top reserved area 0x%llx\n",
744 			args->va_address + args->map_size, vm_size);
745 		return -EINVAL;
746 	}
747 
748 	if ((args->flags & ~valid_flags) && (args->flags & ~prt_flags)) {
749 		dev_dbg(dev->dev, "invalid flags combination 0x%08X\n",
750 			args->flags);
751 		return -EINVAL;
752 	}
753 
754 	switch (args->operation) {
755 	case AMDGPU_VA_OP_MAP:
756 	case AMDGPU_VA_OP_UNMAP:
757 	case AMDGPU_VA_OP_CLEAR:
758 	case AMDGPU_VA_OP_REPLACE:
759 		break;
760 	default:
761 		dev_dbg(dev->dev, "unsupported operation %d\n",
762 			args->operation);
763 		return -EINVAL;
764 	}
765 
766 	if ((args->operation != AMDGPU_VA_OP_CLEAR) &&
767 	    !(args->flags & AMDGPU_VM_PAGE_PRT)) {
768 		gobj = drm_gem_object_lookup(filp, args->handle);
769 		if (gobj == NULL)
770 			return -ENOENT;
771 		abo = gem_to_amdgpu_bo(gobj);
772 	} else {
773 		gobj = NULL;
774 		abo = NULL;
775 	}
776 
777 	drm_exec_init(&exec, DRM_EXEC_INTERRUPTIBLE_WAIT |
778 		      DRM_EXEC_IGNORE_DUPLICATES, 0);
779 	drm_exec_until_all_locked(&exec) {
780 		if (gobj) {
781 			r = drm_exec_lock_obj(&exec, gobj);
782 			drm_exec_retry_on_contention(&exec);
783 			if (unlikely(r))
784 				goto error;
785 		}
786 
787 		r = amdgpu_vm_lock_pd(&fpriv->vm, &exec, 2);
788 		drm_exec_retry_on_contention(&exec);
789 		if (unlikely(r))
790 			goto error;
791 	}
792 
793 	if (abo) {
794 		bo_va = amdgpu_vm_bo_find(&fpriv->vm, abo);
795 		if (!bo_va) {
796 			r = -ENOENT;
797 			goto error;
798 		}
799 	} else if (args->operation != AMDGPU_VA_OP_CLEAR) {
800 		bo_va = fpriv->prt_va;
801 	} else {
802 		bo_va = NULL;
803 	}
804 
805 	switch (args->operation) {
806 	case AMDGPU_VA_OP_MAP:
807 		va_flags = amdgpu_gem_va_map_flags(adev, args->flags);
808 		r = amdgpu_vm_bo_map(adev, bo_va, args->va_address,
809 				     args->offset_in_bo, args->map_size,
810 				     va_flags);
811 		break;
812 	case AMDGPU_VA_OP_UNMAP:
813 		r = amdgpu_vm_bo_unmap(adev, bo_va, args->va_address);
814 		break;
815 
816 	case AMDGPU_VA_OP_CLEAR:
817 		r = amdgpu_vm_bo_clear_mappings(adev, &fpriv->vm,
818 						args->va_address,
819 						args->map_size);
820 		break;
821 	case AMDGPU_VA_OP_REPLACE:
822 		va_flags = amdgpu_gem_va_map_flags(adev, args->flags);
823 		r = amdgpu_vm_bo_replace_map(adev, bo_va, args->va_address,
824 					     args->offset_in_bo, args->map_size,
825 					     va_flags);
826 		break;
827 	default:
828 		break;
829 	}
830 	if (!r && !(args->flags & AMDGPU_VM_DELAY_UPDATE) && !adev->debug_vm)
831 		amdgpu_gem_va_update_vm(adev, &fpriv->vm, bo_va,
832 					args->operation);
833 
834 error:
835 	drm_exec_fini(&exec);
836 	drm_gem_object_put(gobj);
837 	return r;
838 }
839 
amdgpu_gem_op_ioctl(struct drm_device * dev,void * data,struct drm_file * filp)840 int amdgpu_gem_op_ioctl(struct drm_device *dev, void *data,
841 			struct drm_file *filp)
842 {
843 	struct drm_amdgpu_gem_op *args = data;
844 	struct drm_gem_object *gobj;
845 	struct amdgpu_vm_bo_base *base;
846 	struct amdgpu_bo *robj;
847 	int r;
848 
849 	gobj = drm_gem_object_lookup(filp, args->handle);
850 	if (!gobj)
851 		return -ENOENT;
852 
853 	robj = gem_to_amdgpu_bo(gobj);
854 
855 	r = amdgpu_bo_reserve(robj, false);
856 	if (unlikely(r))
857 		goto out;
858 
859 	switch (args->op) {
860 	case AMDGPU_GEM_OP_GET_GEM_CREATE_INFO: {
861 		struct drm_amdgpu_gem_create_in info;
862 		void __user *out = u64_to_user_ptr(args->value);
863 
864 		info.bo_size = robj->tbo.base.size;
865 		info.alignment = robj->tbo.page_alignment << PAGE_SHIFT;
866 		info.domains = robj->preferred_domains;
867 		info.domain_flags = robj->flags;
868 		amdgpu_bo_unreserve(robj);
869 		if (copy_to_user(out, &info, sizeof(info)))
870 			r = -EFAULT;
871 		break;
872 	}
873 	case AMDGPU_GEM_OP_SET_PLACEMENT:
874 		if (robj->tbo.base.import_attach &&
875 		    args->value & AMDGPU_GEM_DOMAIN_VRAM) {
876 			r = -EINVAL;
877 			amdgpu_bo_unreserve(robj);
878 			break;
879 		}
880 		if (amdgpu_ttm_tt_get_usermm(robj->tbo.ttm)) {
881 			r = -EPERM;
882 			amdgpu_bo_unreserve(robj);
883 			break;
884 		}
885 		for (base = robj->vm_bo; base; base = base->next)
886 			if (amdgpu_xgmi_same_hive(amdgpu_ttm_adev(robj->tbo.bdev),
887 				amdgpu_ttm_adev(base->vm->root.bo->tbo.bdev))) {
888 				r = -EINVAL;
889 				amdgpu_bo_unreserve(robj);
890 				goto out;
891 			}
892 
893 
894 		robj->preferred_domains = args->value & (AMDGPU_GEM_DOMAIN_VRAM |
895 							AMDGPU_GEM_DOMAIN_GTT |
896 							AMDGPU_GEM_DOMAIN_CPU);
897 		robj->allowed_domains = robj->preferred_domains;
898 		if (robj->allowed_domains == AMDGPU_GEM_DOMAIN_VRAM)
899 			robj->allowed_domains |= AMDGPU_GEM_DOMAIN_GTT;
900 
901 		if (robj->flags & AMDGPU_GEM_CREATE_VM_ALWAYS_VALID)
902 			amdgpu_vm_bo_invalidate(robj, true);
903 
904 		amdgpu_bo_unreserve(robj);
905 		break;
906 	default:
907 		amdgpu_bo_unreserve(robj);
908 		r = -EINVAL;
909 	}
910 
911 out:
912 	drm_gem_object_put(gobj);
913 	return r;
914 }
915 
amdgpu_gem_align_pitch(struct amdgpu_device * adev,int width,int cpp,bool tiled)916 static int amdgpu_gem_align_pitch(struct amdgpu_device *adev,
917 				  int width,
918 				  int cpp,
919 				  bool tiled)
920 {
921 	int aligned = width;
922 	int pitch_mask = 0;
923 
924 	switch (cpp) {
925 	case 1:
926 		pitch_mask = 255;
927 		break;
928 	case 2:
929 		pitch_mask = 127;
930 		break;
931 	case 3:
932 	case 4:
933 		pitch_mask = 63;
934 		break;
935 	}
936 
937 	aligned += pitch_mask;
938 	aligned &= ~pitch_mask;
939 	return aligned * cpp;
940 }
941 
amdgpu_mode_dumb_create(struct drm_file * file_priv,struct drm_device * dev,struct drm_mode_create_dumb * args)942 int amdgpu_mode_dumb_create(struct drm_file *file_priv,
943 			    struct drm_device *dev,
944 			    struct drm_mode_create_dumb *args)
945 {
946 	struct amdgpu_device *adev = drm_to_adev(dev);
947 	struct amdgpu_fpriv *fpriv = file_priv->driver_priv;
948 	struct drm_gem_object *gobj;
949 	uint32_t handle;
950 	u64 flags = AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
951 		    AMDGPU_GEM_CREATE_CPU_GTT_USWC |
952 		    AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS;
953 	u32 domain;
954 	int r;
955 
956 	/*
957 	 * The buffer returned from this function should be cleared, but
958 	 * it can only be done if the ring is enabled or we'll fail to
959 	 * create the buffer.
960 	 */
961 	if (adev->mman.buffer_funcs_enabled)
962 		flags |= AMDGPU_GEM_CREATE_VRAM_CLEARED;
963 
964 	args->pitch = amdgpu_gem_align_pitch(adev, args->width,
965 					     DIV_ROUND_UP(args->bpp, 8), 0);
966 	args->size = (u64)args->pitch * args->height;
967 	args->size = ALIGN(args->size, PAGE_SIZE);
968 	domain = amdgpu_bo_get_preferred_domain(adev,
969 				amdgpu_display_supported_domains(adev, flags));
970 	r = amdgpu_gem_object_create(adev, args->size, 0, domain, flags,
971 				     ttm_bo_type_device, NULL, &gobj, fpriv->xcp_id + 1);
972 	if (r)
973 		return -ENOMEM;
974 
975 	r = drm_gem_handle_create(file_priv, gobj, &handle);
976 	/* drop reference from allocate - handle holds it now */
977 	drm_gem_object_put(gobj);
978 	if (r)
979 		return r;
980 
981 	args->handle = handle;
982 	return 0;
983 }
984 
985 #if defined(CONFIG_DEBUG_FS)
amdgpu_debugfs_gem_info_show(struct seq_file * m,void * unused)986 static int amdgpu_debugfs_gem_info_show(struct seq_file *m, void *unused)
987 {
988 	struct amdgpu_device *adev = m->private;
989 	struct drm_device *dev = adev_to_drm(adev);
990 	struct drm_file *file;
991 	int r;
992 
993 	r = mutex_lock_interruptible(&dev->filelist_mutex);
994 	if (r)
995 		return r;
996 
997 	list_for_each_entry(file, &dev->filelist, lhead) {
998 		struct task_struct *task;
999 		struct drm_gem_object *gobj;
1000 		struct pid *pid;
1001 		int id;
1002 
1003 		/*
1004 		 * Although we have a valid reference on file->pid, that does
1005 		 * not guarantee that the task_struct who called get_pid() is
1006 		 * still alive (e.g. get_pid(current) => fork() => exit()).
1007 		 * Therefore, we need to protect this ->comm access using RCU.
1008 		 */
1009 		rcu_read_lock();
1010 		pid = rcu_dereference(file->pid);
1011 		task = pid_task(pid, PIDTYPE_TGID);
1012 		seq_printf(m, "pid %8d command %s:\n", pid_nr(pid),
1013 			   task ? task->comm : "<unknown>");
1014 		rcu_read_unlock();
1015 
1016 		spin_lock(&file->table_lock);
1017 		idr_for_each_entry(&file->object_idr, gobj, id) {
1018 			struct amdgpu_bo *bo = gem_to_amdgpu_bo(gobj);
1019 
1020 			amdgpu_bo_print_info(id, bo, m);
1021 		}
1022 		spin_unlock(&file->table_lock);
1023 	}
1024 
1025 	mutex_unlock(&dev->filelist_mutex);
1026 	return 0;
1027 }
1028 
1029 DEFINE_SHOW_ATTRIBUTE(amdgpu_debugfs_gem_info);
1030 
1031 #endif
1032 
amdgpu_debugfs_gem_init(struct amdgpu_device * adev)1033 void amdgpu_debugfs_gem_init(struct amdgpu_device *adev)
1034 {
1035 #if defined(CONFIG_DEBUG_FS)
1036 	struct drm_minor *minor = adev_to_drm(adev)->primary;
1037 	struct dentry *root = minor->debugfs_root;
1038 
1039 	debugfs_create_file("amdgpu_gem_info", 0444, root, adev,
1040 			    &amdgpu_debugfs_gem_info_fops);
1041 #endif
1042 }
1043