xref: /linux/drivers/gpu/drm/amd/amdgpu/amdgpu_userq.c (revision 1bcd679209420305a86833bc357d50021909edaf)
1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright 2023 Advanced Micro Devices, Inc.
4  *
5  * Permission is hereby granted, free of charge, to any person obtaining a
6  * copy of this software and associated documentation files (the "Software"),
7  * to deal in the Software without restriction, including without limitation
8  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9  * and/or sell copies of the Software, and to permit persons to whom the
10  * Software is furnished to do so, subject to the following conditions:
11  *
12  * The above copyright notice and this permission notice shall be included in
13  * all copies or substantial portions of the Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
19  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
20  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
21  * OTHER DEALINGS IN THE SOFTWARE.
22  *
23  */
24 
25 #include <drm/drm_auth.h>
26 #include <drm/drm_exec.h>
27 #include <linux/pm_runtime.h>
28 
29 #include "amdgpu.h"
30 #include "amdgpu_vm.h"
31 #include "amdgpu_userq.h"
32 #include "amdgpu_userq_fence.h"
33 
34 u32 amdgpu_userq_get_supported_ip_mask(struct amdgpu_device *adev)
35 {
36 	int i;
37 	u32 userq_ip_mask = 0;
38 
39 	for (i = 0; i < AMDGPU_HW_IP_NUM; i++) {
40 		if (adev->userq_funcs[i])
41 			userq_ip_mask |= (1 << i);
42 	}
43 
44 	return userq_ip_mask;
45 }
46 
47 static int
48 amdgpu_userq_unmap_helper(struct amdgpu_userq_mgr *uq_mgr,
49 			  struct amdgpu_usermode_queue *queue)
50 {
51 	struct amdgpu_device *adev = uq_mgr->adev;
52 	const struct amdgpu_userq_funcs *userq_funcs =
53 		adev->userq_funcs[queue->queue_type];
54 	int r = 0;
55 
56 	if (queue->state == AMDGPU_USERQ_STATE_MAPPED) {
57 		r = userq_funcs->unmap(uq_mgr, queue);
58 		if (r)
59 			queue->state = AMDGPU_USERQ_STATE_HUNG;
60 		else
61 			queue->state = AMDGPU_USERQ_STATE_UNMAPPED;
62 	}
63 	return r;
64 }
65 
66 static int
67 amdgpu_userq_map_helper(struct amdgpu_userq_mgr *uq_mgr,
68 			struct amdgpu_usermode_queue *queue)
69 {
70 	struct amdgpu_device *adev = uq_mgr->adev;
71 	const struct amdgpu_userq_funcs *userq_funcs =
72 		adev->userq_funcs[queue->queue_type];
73 	int r = 0;
74 
75 	if (queue->state == AMDGPU_USERQ_STATE_UNMAPPED) {
76 		r = userq_funcs->map(uq_mgr, queue);
77 		if (r) {
78 			queue->state = AMDGPU_USERQ_STATE_HUNG;
79 		} else {
80 			queue->state = AMDGPU_USERQ_STATE_MAPPED;
81 		}
82 	}
83 	return r;
84 }
85 
86 static void
87 amdgpu_userq_wait_for_last_fence(struct amdgpu_userq_mgr *uq_mgr,
88 				 struct amdgpu_usermode_queue *queue)
89 {
90 	struct amdgpu_device *adev = uq_mgr->adev;
91 	struct dma_fence *f = queue->last_fence;
92 	int ret;
93 
94 	if (f && !dma_fence_is_signaled(f)) {
95 		ret = dma_fence_wait_timeout(f, true, msecs_to_jiffies(100));
96 		if (ret <= 0)
97 			dev_err(adev->dev, "Timed out waiting for fence=%llu:%llu\n",
98 				f->context, f->seqno);
99 	}
100 }
101 
102 static void
103 amdgpu_userq_cleanup(struct amdgpu_userq_mgr *uq_mgr,
104 		     struct amdgpu_usermode_queue *queue,
105 		     int queue_id)
106 {
107 	struct amdgpu_device *adev = uq_mgr->adev;
108 	const struct amdgpu_userq_funcs *uq_funcs = adev->userq_funcs[queue->queue_type];
109 
110 	uq_funcs->mqd_destroy(uq_mgr, queue);
111 	amdgpu_userq_fence_driver_free(queue);
112 	idr_remove(&uq_mgr->userq_idr, queue_id);
113 	kfree(queue);
114 }
115 
116 int
117 amdgpu_userq_active(struct amdgpu_userq_mgr *uq_mgr)
118 {
119 	struct amdgpu_usermode_queue *queue;
120 	int queue_id;
121 	int ret = 0;
122 
123 	mutex_lock(&uq_mgr->userq_mutex);
124 	/* Resume all the queues for this process */
125 	idr_for_each_entry(&uq_mgr->userq_idr, queue, queue_id)
126 		ret += queue->state == AMDGPU_USERQ_STATE_MAPPED;
127 
128 	mutex_unlock(&uq_mgr->userq_mutex);
129 	return ret;
130 }
131 
132 static struct amdgpu_usermode_queue *
133 amdgpu_userq_find(struct amdgpu_userq_mgr *uq_mgr, int qid)
134 {
135 	return idr_find(&uq_mgr->userq_idr, qid);
136 }
137 
138 void
139 amdgpu_userq_ensure_ev_fence(struct amdgpu_userq_mgr *uq_mgr,
140 			     struct amdgpu_eviction_fence_mgr *evf_mgr)
141 {
142 	struct amdgpu_eviction_fence *ev_fence;
143 
144 retry:
145 	/* Flush any pending resume work to create ev_fence */
146 	flush_delayed_work(&uq_mgr->resume_work);
147 
148 	mutex_lock(&uq_mgr->userq_mutex);
149 	spin_lock(&evf_mgr->ev_fence_lock);
150 	ev_fence = evf_mgr->ev_fence;
151 	spin_unlock(&evf_mgr->ev_fence_lock);
152 	if (!ev_fence || dma_fence_is_signaled(&ev_fence->base)) {
153 		mutex_unlock(&uq_mgr->userq_mutex);
154 		/*
155 		 * Looks like there was no pending resume work,
156 		 * add one now to create a valid eviction fence
157 		 */
158 		schedule_delayed_work(&uq_mgr->resume_work, 0);
159 		goto retry;
160 	}
161 }
162 
163 int amdgpu_userq_create_object(struct amdgpu_userq_mgr *uq_mgr,
164 			       struct amdgpu_userq_obj *userq_obj,
165 			       int size)
166 {
167 	struct amdgpu_device *adev = uq_mgr->adev;
168 	struct amdgpu_bo_param bp;
169 	int r;
170 
171 	memset(&bp, 0, sizeof(bp));
172 	bp.byte_align = PAGE_SIZE;
173 	bp.domain = AMDGPU_GEM_DOMAIN_GTT;
174 	bp.flags = AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS |
175 		   AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
176 	bp.type = ttm_bo_type_kernel;
177 	bp.size = size;
178 	bp.resv = NULL;
179 	bp.bo_ptr_size = sizeof(struct amdgpu_bo);
180 
181 	r = amdgpu_bo_create(adev, &bp, &userq_obj->obj);
182 	if (r) {
183 		DRM_ERROR("Failed to allocate BO for userqueue (%d)", r);
184 		return r;
185 	}
186 
187 	r = amdgpu_bo_reserve(userq_obj->obj, true);
188 	if (r) {
189 		DRM_ERROR("Failed to reserve BO to map (%d)", r);
190 		goto free_obj;
191 	}
192 
193 	r = amdgpu_ttm_alloc_gart(&(userq_obj->obj)->tbo);
194 	if (r) {
195 		DRM_ERROR("Failed to alloc GART for userqueue object (%d)", r);
196 		goto unresv;
197 	}
198 
199 	r = amdgpu_bo_kmap(userq_obj->obj, &userq_obj->cpu_ptr);
200 	if (r) {
201 		DRM_ERROR("Failed to map BO for userqueue (%d)", r);
202 		goto unresv;
203 	}
204 
205 	userq_obj->gpu_addr = amdgpu_bo_gpu_offset(userq_obj->obj);
206 	amdgpu_bo_unreserve(userq_obj->obj);
207 	memset(userq_obj->cpu_ptr, 0, size);
208 	return 0;
209 
210 unresv:
211 	amdgpu_bo_unreserve(userq_obj->obj);
212 
213 free_obj:
214 	amdgpu_bo_unref(&userq_obj->obj);
215 	return r;
216 }
217 
218 void amdgpu_userq_destroy_object(struct amdgpu_userq_mgr *uq_mgr,
219 				 struct amdgpu_userq_obj *userq_obj)
220 {
221 	amdgpu_bo_kunmap(userq_obj->obj);
222 	amdgpu_bo_unref(&userq_obj->obj);
223 }
224 
225 uint64_t
226 amdgpu_userq_get_doorbell_index(struct amdgpu_userq_mgr *uq_mgr,
227 				struct amdgpu_db_info *db_info,
228 				struct drm_file *filp)
229 {
230 	uint64_t index;
231 	struct drm_gem_object *gobj;
232 	struct amdgpu_userq_obj *db_obj = db_info->db_obj;
233 	int r, db_size;
234 
235 	gobj = drm_gem_object_lookup(filp, db_info->doorbell_handle);
236 	if (gobj == NULL) {
237 		DRM_ERROR("Can't find GEM object for doorbell\n");
238 		return -EINVAL;
239 	}
240 
241 	db_obj->obj = amdgpu_bo_ref(gem_to_amdgpu_bo(gobj));
242 	drm_gem_object_put(gobj);
243 
244 	/* Pin the BO before generating the index, unpin in queue destroy */
245 	r = amdgpu_bo_pin(db_obj->obj, AMDGPU_GEM_DOMAIN_DOORBELL);
246 	if (r) {
247 		DRM_ERROR("[Usermode queues] Failed to pin doorbell object\n");
248 		goto unref_bo;
249 	}
250 
251 	r = amdgpu_bo_reserve(db_obj->obj, true);
252 	if (r) {
253 		DRM_ERROR("[Usermode queues] Failed to pin doorbell object\n");
254 		goto unpin_bo;
255 	}
256 
257 	switch (db_info->queue_type) {
258 	case AMDGPU_HW_IP_GFX:
259 	case AMDGPU_HW_IP_COMPUTE:
260 	case AMDGPU_HW_IP_DMA:
261 		db_size = sizeof(u64);
262 		break;
263 
264 	case AMDGPU_HW_IP_VCN_ENC:
265 		db_size = sizeof(u32);
266 		db_info->doorbell_offset += AMDGPU_NAVI10_DOORBELL64_VCN0_1 << 1;
267 		break;
268 
269 	case AMDGPU_HW_IP_VPE:
270 		db_size = sizeof(u32);
271 		db_info->doorbell_offset += AMDGPU_NAVI10_DOORBELL64_VPE << 1;
272 		break;
273 
274 	default:
275 		DRM_ERROR("[Usermode queues] IP %d not support\n", db_info->queue_type);
276 		r = -EINVAL;
277 		goto unpin_bo;
278 	}
279 
280 	index = amdgpu_doorbell_index_on_bar(uq_mgr->adev, db_obj->obj,
281 					     db_info->doorbell_offset, db_size);
282 	DRM_DEBUG_DRIVER("[Usermode queues] doorbell index=%lld\n", index);
283 	amdgpu_bo_unreserve(db_obj->obj);
284 	return index;
285 
286 unpin_bo:
287 	amdgpu_bo_unpin(db_obj->obj);
288 
289 unref_bo:
290 	amdgpu_bo_unref(&db_obj->obj);
291 	return r;
292 }
293 
294 static int
295 amdgpu_userq_destroy(struct drm_file *filp, int queue_id)
296 {
297 	struct amdgpu_fpriv *fpriv = filp->driver_priv;
298 	struct amdgpu_userq_mgr *uq_mgr = &fpriv->userq_mgr;
299 	struct amdgpu_device *adev = uq_mgr->adev;
300 	struct amdgpu_usermode_queue *queue;
301 	int r = 0;
302 
303 	cancel_delayed_work(&uq_mgr->resume_work);
304 	mutex_lock(&uq_mgr->userq_mutex);
305 
306 	queue = amdgpu_userq_find(uq_mgr, queue_id);
307 	if (!queue) {
308 		DRM_DEBUG_DRIVER("Invalid queue id to destroy\n");
309 		mutex_unlock(&uq_mgr->userq_mutex);
310 		return -EINVAL;
311 	}
312 	amdgpu_userq_wait_for_last_fence(uq_mgr, queue);
313 	r = amdgpu_userq_unmap_helper(uq_mgr, queue);
314 	amdgpu_bo_unpin(queue->db_obj.obj);
315 	amdgpu_bo_unref(&queue->db_obj.obj);
316 	amdgpu_userq_cleanup(uq_mgr, queue, queue_id);
317 	mutex_unlock(&uq_mgr->userq_mutex);
318 
319 	pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
320 	pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
321 
322 	return r;
323 }
324 
325 static int amdgpu_userq_priority_permit(struct drm_file *filp,
326 					int priority)
327 {
328 	if (priority < AMDGPU_USERQ_CREATE_FLAGS_QUEUE_PRIORITY_HIGH)
329 		return 0;
330 
331 	if (capable(CAP_SYS_NICE))
332 		return 0;
333 
334 	if (drm_is_current_master(filp))
335 		return 0;
336 
337 	return -EACCES;
338 }
339 
340 static int
341 amdgpu_userq_create(struct drm_file *filp, union drm_amdgpu_userq *args)
342 {
343 	struct amdgpu_fpriv *fpriv = filp->driver_priv;
344 	struct amdgpu_userq_mgr *uq_mgr = &fpriv->userq_mgr;
345 	struct amdgpu_device *adev = uq_mgr->adev;
346 	const struct amdgpu_userq_funcs *uq_funcs;
347 	struct amdgpu_usermode_queue *queue;
348 	struct amdgpu_db_info db_info;
349 	bool skip_map_queue;
350 	uint64_t index;
351 	int qid, r = 0;
352 	int priority =
353 		(args->in.flags & AMDGPU_USERQ_CREATE_FLAGS_QUEUE_PRIORITY_MASK) >>
354 		AMDGPU_USERQ_CREATE_FLAGS_QUEUE_PRIORITY_SHIFT;
355 
356 	/* Usermode queues are only supported for GFX IP as of now */
357 	if (args->in.ip_type != AMDGPU_HW_IP_GFX &&
358 	    args->in.ip_type != AMDGPU_HW_IP_DMA &&
359 	    args->in.ip_type != AMDGPU_HW_IP_COMPUTE) {
360 		DRM_ERROR("Usermode queue doesn't support IP type %u\n", args->in.ip_type);
361 		return -EINVAL;
362 	}
363 
364 	r = amdgpu_userq_priority_permit(filp, priority);
365 	if (r)
366 		return r;
367 
368 	if ((args->in.flags & AMDGPU_USERQ_CREATE_FLAGS_QUEUE_SECURE) &&
369 	    (args->in.ip_type != AMDGPU_HW_IP_GFX) &&
370 	    (args->in.ip_type != AMDGPU_HW_IP_COMPUTE) &&
371 	    !amdgpu_is_tmz(adev)) {
372 		drm_err(adev_to_drm(adev), "Secure only supported on GFX/Compute queues\n");
373 		return -EINVAL;
374 	}
375 
376 	r = pm_runtime_get_sync(adev_to_drm(adev)->dev);
377 	if (r < 0) {
378 		dev_err(adev->dev, "pm_runtime_get_sync() failed for userqueue create\n");
379 		pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
380 		return r;
381 	}
382 
383 	/*
384 	 * There could be a situation that we are creating a new queue while
385 	 * the other queues under this UQ_mgr are suspended. So if there is any
386 	 * resume work pending, wait for it to get done.
387 	 *
388 	 * This will also make sure we have a valid eviction fence ready to be used.
389 	 */
390 	amdgpu_userq_ensure_ev_fence(&fpriv->userq_mgr, &fpriv->evf_mgr);
391 
392 	uq_funcs = adev->userq_funcs[args->in.ip_type];
393 	if (!uq_funcs) {
394 		DRM_ERROR("Usermode queue is not supported for this IP (%u)\n", args->in.ip_type);
395 		r = -EINVAL;
396 		goto unlock;
397 	}
398 
399 	queue = kzalloc(sizeof(struct amdgpu_usermode_queue), GFP_KERNEL);
400 	if (!queue) {
401 		DRM_ERROR("Failed to allocate memory for queue\n");
402 		r = -ENOMEM;
403 		goto unlock;
404 	}
405 	queue->doorbell_handle = args->in.doorbell_handle;
406 	queue->queue_type = args->in.ip_type;
407 	queue->vm = &fpriv->vm;
408 	queue->priority = priority;
409 
410 	db_info.queue_type = queue->queue_type;
411 	db_info.doorbell_handle = queue->doorbell_handle;
412 	db_info.db_obj = &queue->db_obj;
413 	db_info.doorbell_offset = args->in.doorbell_offset;
414 
415 	/* Convert relative doorbell offset into absolute doorbell index */
416 	index = amdgpu_userq_get_doorbell_index(uq_mgr, &db_info, filp);
417 	if (index == (uint64_t)-EINVAL) {
418 		DRM_ERROR("Failed to get doorbell for queue\n");
419 		kfree(queue);
420 		goto unlock;
421 	}
422 
423 	queue->doorbell_index = index;
424 	xa_init_flags(&queue->fence_drv_xa, XA_FLAGS_ALLOC);
425 	r = amdgpu_userq_fence_driver_alloc(adev, queue);
426 	if (r) {
427 		DRM_ERROR("Failed to alloc fence driver\n");
428 		goto unlock;
429 	}
430 
431 	r = uq_funcs->mqd_create(uq_mgr, &args->in, queue);
432 	if (r) {
433 		DRM_ERROR("Failed to create Queue\n");
434 		amdgpu_userq_fence_driver_free(queue);
435 		kfree(queue);
436 		goto unlock;
437 	}
438 
439 
440 	qid = idr_alloc(&uq_mgr->userq_idr, queue, 1, AMDGPU_MAX_USERQ_COUNT, GFP_KERNEL);
441 	if (qid < 0) {
442 		DRM_ERROR("Failed to allocate a queue id\n");
443 		amdgpu_userq_fence_driver_free(queue);
444 		uq_funcs->mqd_destroy(uq_mgr, queue);
445 		kfree(queue);
446 		r = -ENOMEM;
447 		goto unlock;
448 	}
449 
450 	/* don't map the queue if scheduling is halted */
451 	mutex_lock(&adev->userq_mutex);
452 	if (adev->userq_halt_for_enforce_isolation &&
453 	    ((queue->queue_type == AMDGPU_HW_IP_GFX) ||
454 	     (queue->queue_type == AMDGPU_HW_IP_COMPUTE)))
455 		skip_map_queue = true;
456 	else
457 		skip_map_queue = false;
458 	if (!skip_map_queue) {
459 		r = amdgpu_userq_map_helper(uq_mgr, queue);
460 		if (r) {
461 			mutex_unlock(&adev->userq_mutex);
462 			DRM_ERROR("Failed to map Queue\n");
463 			idr_remove(&uq_mgr->userq_idr, qid);
464 			amdgpu_userq_fence_driver_free(queue);
465 			uq_funcs->mqd_destroy(uq_mgr, queue);
466 			kfree(queue);
467 			goto unlock;
468 		}
469 	}
470 	mutex_unlock(&adev->userq_mutex);
471 
472 
473 	args->out.queue_id = qid;
474 
475 unlock:
476 	mutex_unlock(&uq_mgr->userq_mutex);
477 
478 	return r;
479 }
480 
481 int amdgpu_userq_ioctl(struct drm_device *dev, void *data,
482 		       struct drm_file *filp)
483 {
484 	union drm_amdgpu_userq *args = data;
485 	int r;
486 
487 	switch (args->in.op) {
488 	case AMDGPU_USERQ_OP_CREATE:
489 		if (args->in.flags & ~(AMDGPU_USERQ_CREATE_FLAGS_QUEUE_PRIORITY_MASK |
490 				       AMDGPU_USERQ_CREATE_FLAGS_QUEUE_SECURE))
491 			return -EINVAL;
492 		r = amdgpu_userq_create(filp, args);
493 		if (r)
494 			DRM_ERROR("Failed to create usermode queue\n");
495 		break;
496 
497 	case AMDGPU_USERQ_OP_FREE:
498 		if (args->in.ip_type ||
499 		    args->in.doorbell_handle ||
500 		    args->in.doorbell_offset ||
501 		    args->in.flags ||
502 		    args->in.queue_va ||
503 		    args->in.queue_size ||
504 		    args->in.rptr_va ||
505 		    args->in.wptr_va ||
506 		    args->in.wptr_va ||
507 		    args->in.mqd ||
508 		    args->in.mqd_size)
509 			return -EINVAL;
510 		r = amdgpu_userq_destroy(filp, args->in.queue_id);
511 		if (r)
512 			DRM_ERROR("Failed to destroy usermode queue\n");
513 		break;
514 
515 	default:
516 		DRM_DEBUG_DRIVER("Invalid user queue op specified: %d\n", args->in.op);
517 		return -EINVAL;
518 	}
519 
520 	return r;
521 }
522 
523 static int
524 amdgpu_userq_restore_all(struct amdgpu_userq_mgr *uq_mgr)
525 {
526 	struct amdgpu_device *adev = uq_mgr->adev;
527 	struct amdgpu_usermode_queue *queue;
528 	int queue_id;
529 	int ret = 0, r;
530 
531 	/* Resume all the queues for this process */
532 	idr_for_each_entry(&uq_mgr->userq_idr, queue, queue_id) {
533 		r = amdgpu_userq_map_helper(uq_mgr, queue);
534 		if (r)
535 			ret = r;
536 	}
537 
538 	if (ret)
539 		dev_err(adev->dev, "Failed to map all the queues\n");
540 	return ret;
541 }
542 
543 static int
544 amdgpu_userq_validate_vm_bo(void *_unused, struct amdgpu_bo *bo)
545 {
546 	struct ttm_operation_ctx ctx = { false, false };
547 	int ret;
548 
549 	amdgpu_bo_placement_from_domain(bo, bo->allowed_domains);
550 
551 	ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
552 	if (ret)
553 		DRM_ERROR("Fail to validate\n");
554 
555 	return ret;
556 }
557 
558 static int
559 amdgpu_userq_validate_bos(struct amdgpu_userq_mgr *uq_mgr)
560 {
561 	struct amdgpu_fpriv *fpriv = uq_mgr_to_fpriv(uq_mgr);
562 	struct amdgpu_vm *vm = &fpriv->vm;
563 	struct amdgpu_device *adev = uq_mgr->adev;
564 	struct amdgpu_bo_va *bo_va;
565 	struct ww_acquire_ctx *ticket;
566 	struct drm_exec exec;
567 	struct amdgpu_bo *bo;
568 	struct dma_resv *resv;
569 	bool clear, unlock;
570 	int ret = 0;
571 
572 	drm_exec_init(&exec, DRM_EXEC_IGNORE_DUPLICATES, 0);
573 	drm_exec_until_all_locked(&exec) {
574 		ret = amdgpu_vm_lock_pd(vm, &exec, 2);
575 		drm_exec_retry_on_contention(&exec);
576 		if (unlikely(ret)) {
577 			DRM_ERROR("Failed to lock PD\n");
578 			goto unlock_all;
579 		}
580 
581 		/* Lock the done list */
582 		list_for_each_entry(bo_va, &vm->done, base.vm_status) {
583 			bo = bo_va->base.bo;
584 			if (!bo)
585 				continue;
586 
587 			ret = drm_exec_lock_obj(&exec, &bo->tbo.base);
588 			drm_exec_retry_on_contention(&exec);
589 			if (unlikely(ret))
590 				goto unlock_all;
591 		}
592 	}
593 
594 	spin_lock(&vm->status_lock);
595 	while (!list_empty(&vm->moved)) {
596 		bo_va = list_first_entry(&vm->moved, struct amdgpu_bo_va,
597 					 base.vm_status);
598 		spin_unlock(&vm->status_lock);
599 
600 		/* Per VM BOs never need to bo cleared in the page tables */
601 		ret = amdgpu_vm_bo_update(adev, bo_va, false);
602 		if (ret)
603 			goto unlock_all;
604 		spin_lock(&vm->status_lock);
605 	}
606 
607 	ticket = &exec.ticket;
608 	while (!list_empty(&vm->invalidated)) {
609 		bo_va = list_first_entry(&vm->invalidated, struct amdgpu_bo_va,
610 					 base.vm_status);
611 		resv = bo_va->base.bo->tbo.base.resv;
612 		spin_unlock(&vm->status_lock);
613 
614 		bo = bo_va->base.bo;
615 		ret = amdgpu_userq_validate_vm_bo(NULL, bo);
616 		if (ret) {
617 			DRM_ERROR("Failed to validate BO\n");
618 			goto unlock_all;
619 		}
620 
621 		/* Try to reserve the BO to avoid clearing its ptes */
622 		if (!adev->debug_vm && dma_resv_trylock(resv)) {
623 			clear = false;
624 			unlock = true;
625 		/* The caller is already holding the reservation lock */
626 		} else if (dma_resv_locking_ctx(resv) == ticket) {
627 			clear = false;
628 			unlock = false;
629 		/* Somebody else is using the BO right now */
630 		} else {
631 			clear = true;
632 			unlock = false;
633 		}
634 
635 		ret = amdgpu_vm_bo_update(adev, bo_va, clear);
636 
637 		if (unlock)
638 			dma_resv_unlock(resv);
639 		if (ret)
640 			goto unlock_all;
641 
642 		spin_lock(&vm->status_lock);
643 	}
644 	spin_unlock(&vm->status_lock);
645 
646 	ret = amdgpu_eviction_fence_replace_fence(&fpriv->evf_mgr, &exec);
647 	if (ret)
648 		DRM_ERROR("Failed to replace eviction fence\n");
649 
650 unlock_all:
651 	drm_exec_fini(&exec);
652 	return ret;
653 }
654 
655 static void amdgpu_userq_restore_worker(struct work_struct *work)
656 {
657 	struct amdgpu_userq_mgr *uq_mgr = work_to_uq_mgr(work, resume_work.work);
658 	struct amdgpu_fpriv *fpriv = uq_mgr_to_fpriv(uq_mgr);
659 	int ret;
660 
661 	flush_work(&fpriv->evf_mgr.suspend_work.work);
662 
663 	mutex_lock(&uq_mgr->userq_mutex);
664 
665 	ret = amdgpu_userq_validate_bos(uq_mgr);
666 	if (ret) {
667 		DRM_ERROR("Failed to validate BOs to restore\n");
668 		goto unlock;
669 	}
670 
671 	ret = amdgpu_userq_restore_all(uq_mgr);
672 	if (ret) {
673 		DRM_ERROR("Failed to restore all queues\n");
674 		goto unlock;
675 	}
676 
677 unlock:
678 	mutex_unlock(&uq_mgr->userq_mutex);
679 }
680 
681 static int
682 amdgpu_userq_evict_all(struct amdgpu_userq_mgr *uq_mgr)
683 {
684 	struct amdgpu_device *adev = uq_mgr->adev;
685 	struct amdgpu_usermode_queue *queue;
686 	int queue_id;
687 	int ret = 0, r;
688 
689 	/* Try to unmap all the queues in this process ctx */
690 	idr_for_each_entry(&uq_mgr->userq_idr, queue, queue_id) {
691 		r = amdgpu_userq_unmap_helper(uq_mgr, queue);
692 		if (r)
693 			ret = r;
694 	}
695 
696 	if (ret)
697 		dev_err(adev->dev, "Couldn't unmap all the queues\n");
698 	return ret;
699 }
700 
701 static int
702 amdgpu_userq_wait_for_signal(struct amdgpu_userq_mgr *uq_mgr)
703 {
704 	struct amdgpu_usermode_queue *queue;
705 	int queue_id, ret;
706 
707 	idr_for_each_entry(&uq_mgr->userq_idr, queue, queue_id) {
708 		struct dma_fence *f = queue->last_fence;
709 
710 		if (!f || dma_fence_is_signaled(f))
711 			continue;
712 		ret = dma_fence_wait_timeout(f, true, msecs_to_jiffies(100));
713 		if (ret <= 0) {
714 			DRM_ERROR("Timed out waiting for fence=%llu:%llu\n",
715 				  f->context, f->seqno);
716 			return -ETIMEDOUT;
717 		}
718 	}
719 
720 	return 0;
721 }
722 
723 void
724 amdgpu_userq_evict(struct amdgpu_userq_mgr *uq_mgr,
725 		   struct amdgpu_eviction_fence *ev_fence)
726 {
727 	int ret;
728 	struct amdgpu_fpriv *fpriv = uq_mgr_to_fpriv(uq_mgr);
729 	struct amdgpu_eviction_fence_mgr *evf_mgr = &fpriv->evf_mgr;
730 
731 	/* Wait for any pending userqueue fence work to finish */
732 	ret = amdgpu_userq_wait_for_signal(uq_mgr);
733 	if (ret) {
734 		DRM_ERROR("Not evicting userqueue, timeout waiting for work\n");
735 		return;
736 	}
737 
738 	ret = amdgpu_userq_evict_all(uq_mgr);
739 	if (ret) {
740 		DRM_ERROR("Failed to evict userqueue\n");
741 		return;
742 	}
743 
744 	/* Signal current eviction fence */
745 	amdgpu_eviction_fence_signal(evf_mgr, ev_fence);
746 
747 	if (evf_mgr->fd_closing) {
748 		cancel_delayed_work(&uq_mgr->resume_work);
749 		return;
750 	}
751 
752 	/* Schedule a resume work */
753 	schedule_delayed_work(&uq_mgr->resume_work, 0);
754 }
755 
756 int amdgpu_userq_mgr_init(struct amdgpu_userq_mgr *userq_mgr, struct amdgpu_device *adev)
757 {
758 	mutex_init(&userq_mgr->userq_mutex);
759 	idr_init_base(&userq_mgr->userq_idr, 1);
760 	userq_mgr->adev = adev;
761 
762 	mutex_lock(&adev->userq_mutex);
763 	list_add(&userq_mgr->list, &adev->userq_mgr_list);
764 	mutex_unlock(&adev->userq_mutex);
765 
766 	INIT_DELAYED_WORK(&userq_mgr->resume_work, amdgpu_userq_restore_worker);
767 	return 0;
768 }
769 
770 void amdgpu_userq_mgr_fini(struct amdgpu_userq_mgr *userq_mgr)
771 {
772 	struct amdgpu_device *adev = userq_mgr->adev;
773 	struct amdgpu_usermode_queue *queue;
774 	struct amdgpu_userq_mgr *uqm, *tmp;
775 	uint32_t queue_id;
776 
777 	cancel_delayed_work(&userq_mgr->resume_work);
778 
779 	mutex_lock(&userq_mgr->userq_mutex);
780 	idr_for_each_entry(&userq_mgr->userq_idr, queue, queue_id) {
781 		amdgpu_userq_wait_for_last_fence(userq_mgr, queue);
782 		amdgpu_userq_unmap_helper(userq_mgr, queue);
783 		amdgpu_userq_cleanup(userq_mgr, queue, queue_id);
784 	}
785 	mutex_lock(&adev->userq_mutex);
786 	list_for_each_entry_safe(uqm, tmp, &adev->userq_mgr_list, list) {
787 		if (uqm == userq_mgr) {
788 			list_del(&uqm->list);
789 			break;
790 		}
791 	}
792 	mutex_unlock(&adev->userq_mutex);
793 	idr_destroy(&userq_mgr->userq_idr);
794 	mutex_unlock(&userq_mgr->userq_mutex);
795 	mutex_destroy(&userq_mgr->userq_mutex);
796 }
797 
798 int amdgpu_userq_suspend(struct amdgpu_device *adev)
799 {
800 	u32 ip_mask = amdgpu_userq_get_supported_ip_mask(adev);
801 	struct amdgpu_usermode_queue *queue;
802 	struct amdgpu_userq_mgr *uqm, *tmp;
803 	int queue_id;
804 	int ret = 0, r;
805 
806 	if (!ip_mask)
807 		return 0;
808 
809 	mutex_lock(&adev->userq_mutex);
810 	list_for_each_entry_safe(uqm, tmp, &adev->userq_mgr_list, list) {
811 		cancel_delayed_work_sync(&uqm->resume_work);
812 		mutex_lock(&uqm->userq_mutex);
813 		idr_for_each_entry(&uqm->userq_idr, queue, queue_id) {
814 			r = amdgpu_userq_unmap_helper(uqm, queue);
815 			if (r)
816 				ret = r;
817 		}
818 		mutex_unlock(&uqm->userq_mutex);
819 	}
820 	mutex_unlock(&adev->userq_mutex);
821 	return ret;
822 }
823 
824 int amdgpu_userq_resume(struct amdgpu_device *adev)
825 {
826 	u32 ip_mask = amdgpu_userq_get_supported_ip_mask(adev);
827 	struct amdgpu_usermode_queue *queue;
828 	struct amdgpu_userq_mgr *uqm, *tmp;
829 	int queue_id;
830 	int ret = 0, r;
831 
832 	if (!ip_mask)
833 		return 0;
834 
835 	mutex_lock(&adev->userq_mutex);
836 	list_for_each_entry_safe(uqm, tmp, &adev->userq_mgr_list, list) {
837 		mutex_lock(&uqm->userq_mutex);
838 		idr_for_each_entry(&uqm->userq_idr, queue, queue_id) {
839 			r = amdgpu_userq_map_helper(uqm, queue);
840 			if (r)
841 				ret = r;
842 		}
843 		mutex_unlock(&uqm->userq_mutex);
844 	}
845 	mutex_unlock(&adev->userq_mutex);
846 	return ret;
847 }
848 
849 int amdgpu_userq_stop_sched_for_enforce_isolation(struct amdgpu_device *adev,
850 						  u32 idx)
851 {
852 	u32 ip_mask = amdgpu_userq_get_supported_ip_mask(adev);
853 	struct amdgpu_usermode_queue *queue;
854 	struct amdgpu_userq_mgr *uqm, *tmp;
855 	int queue_id;
856 	int ret = 0, r;
857 
858 	/* only need to stop gfx/compute */
859 	if (!(ip_mask & ((1 << AMDGPU_HW_IP_GFX) | (1 << AMDGPU_HW_IP_COMPUTE))))
860 		return 0;
861 
862 	mutex_lock(&adev->userq_mutex);
863 	if (adev->userq_halt_for_enforce_isolation)
864 		dev_warn(adev->dev, "userq scheduling already stopped!\n");
865 	adev->userq_halt_for_enforce_isolation = true;
866 	list_for_each_entry_safe(uqm, tmp, &adev->userq_mgr_list, list) {
867 		cancel_delayed_work_sync(&uqm->resume_work);
868 		mutex_lock(&uqm->userq_mutex);
869 		idr_for_each_entry(&uqm->userq_idr, queue, queue_id) {
870 			if (((queue->queue_type == AMDGPU_HW_IP_GFX) ||
871 			     (queue->queue_type == AMDGPU_HW_IP_COMPUTE)) &&
872 			    (queue->xcp_id == idx)) {
873 				r = amdgpu_userq_unmap_helper(uqm, queue);
874 				if (r)
875 					ret = r;
876 			}
877 		}
878 		mutex_unlock(&uqm->userq_mutex);
879 	}
880 	mutex_unlock(&adev->userq_mutex);
881 	return ret;
882 }
883 
884 int amdgpu_userq_start_sched_for_enforce_isolation(struct amdgpu_device *adev,
885 						   u32 idx)
886 {
887 	u32 ip_mask = amdgpu_userq_get_supported_ip_mask(adev);
888 	struct amdgpu_usermode_queue *queue;
889 	struct amdgpu_userq_mgr *uqm, *tmp;
890 	int queue_id;
891 	int ret = 0, r;
892 
893 	/* only need to stop gfx/compute */
894 	if (!(ip_mask & ((1 << AMDGPU_HW_IP_GFX) | (1 << AMDGPU_HW_IP_COMPUTE))))
895 		return 0;
896 
897 	mutex_lock(&adev->userq_mutex);
898 	if (!adev->userq_halt_for_enforce_isolation)
899 		dev_warn(adev->dev, "userq scheduling already started!\n");
900 	adev->userq_halt_for_enforce_isolation = false;
901 	list_for_each_entry_safe(uqm, tmp, &adev->userq_mgr_list, list) {
902 		mutex_lock(&uqm->userq_mutex);
903 		idr_for_each_entry(&uqm->userq_idr, queue, queue_id) {
904 			if (((queue->queue_type == AMDGPU_HW_IP_GFX) ||
905 			     (queue->queue_type == AMDGPU_HW_IP_COMPUTE)) &&
906 			    (queue->xcp_id == idx)) {
907 				r = amdgpu_userq_map_helper(uqm, queue);
908 				if (r)
909 					ret = r;
910 			}
911 		}
912 		mutex_unlock(&uqm->userq_mutex);
913 	}
914 	mutex_unlock(&adev->userq_mutex);
915 	return ret;
916 }
917