xref: /linux/drivers/gpu/drm/amd/amdgpu/amdgpu_userq.c (revision 727b77df826b44853476d6e8690fec4cf5515eca)
1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright 2023 Advanced Micro Devices, Inc.
4  *
5  * Permission is hereby granted, free of charge, to any person obtaining a
6  * copy of this software and associated documentation files (the "Software"),
7  * to deal in the Software without restriction, including without limitation
8  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9  * and/or sell copies of the Software, and to permit persons to whom the
10  * Software is furnished to do so, subject to the following conditions:
11  *
12  * The above copyright notice and this permission notice shall be included in
13  * all copies or substantial portions of the Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
19  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
20  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
21  * OTHER DEALINGS IN THE SOFTWARE.
22  *
23  */
24 
25 #include <drm/drm_auth.h>
26 #include <drm/drm_exec.h>
27 #include <linux/pm_runtime.h>
28 
29 #include "amdgpu.h"
30 #include "amdgpu_vm.h"
31 #include "amdgpu_userq.h"
32 #include "amdgpu_userq_fence.h"
33 
34 u32 amdgpu_userq_get_supported_ip_mask(struct amdgpu_device *adev)
35 {
36 	int i;
37 	u32 userq_ip_mask = 0;
38 
39 	for (i = 0; i < AMDGPU_HW_IP_NUM; i++) {
40 		if (adev->userq_funcs[i])
41 			userq_ip_mask |= (1 << i);
42 	}
43 
44 	return userq_ip_mask;
45 }
46 
47 static int
48 amdgpu_userq_unmap_helper(struct amdgpu_userq_mgr *uq_mgr,
49 			  struct amdgpu_usermode_queue *queue)
50 {
51 	struct amdgpu_device *adev = uq_mgr->adev;
52 	const struct amdgpu_userq_funcs *userq_funcs =
53 		adev->userq_funcs[queue->queue_type];
54 	int r = 0;
55 
56 	if (queue->state == AMDGPU_USERQ_STATE_MAPPED) {
57 		r = userq_funcs->unmap(uq_mgr, queue);
58 		if (r)
59 			queue->state = AMDGPU_USERQ_STATE_HUNG;
60 		else
61 			queue->state = AMDGPU_USERQ_STATE_UNMAPPED;
62 	}
63 	return r;
64 }
65 
66 static int
67 amdgpu_userq_map_helper(struct amdgpu_userq_mgr *uq_mgr,
68 			struct amdgpu_usermode_queue *queue)
69 {
70 	struct amdgpu_device *adev = uq_mgr->adev;
71 	const struct amdgpu_userq_funcs *userq_funcs =
72 		adev->userq_funcs[queue->queue_type];
73 	int r = 0;
74 
75 	if (queue->state == AMDGPU_USERQ_STATE_UNMAPPED) {
76 		r = userq_funcs->map(uq_mgr, queue);
77 		if (r) {
78 			queue->state = AMDGPU_USERQ_STATE_HUNG;
79 		} else {
80 			queue->state = AMDGPU_USERQ_STATE_MAPPED;
81 		}
82 	}
83 	return r;
84 }
85 
86 static void
87 amdgpu_userq_wait_for_last_fence(struct amdgpu_userq_mgr *uq_mgr,
88 				 struct amdgpu_usermode_queue *queue)
89 {
90 	struct amdgpu_device *adev = uq_mgr->adev;
91 	struct dma_fence *f = queue->last_fence;
92 	int ret;
93 
94 	if (f && !dma_fence_is_signaled(f)) {
95 		ret = dma_fence_wait_timeout(f, true, msecs_to_jiffies(100));
96 		if (ret <= 0)
97 			dev_err(adev->dev, "Timed out waiting for fence=%llu:%llu\n",
98 				f->context, f->seqno);
99 	}
100 }
101 
102 static void
103 amdgpu_userq_cleanup(struct amdgpu_userq_mgr *uq_mgr,
104 		     struct amdgpu_usermode_queue *queue,
105 		     int queue_id)
106 {
107 	struct amdgpu_device *adev = uq_mgr->adev;
108 	const struct amdgpu_userq_funcs *uq_funcs = adev->userq_funcs[queue->queue_type];
109 
110 	uq_funcs->mqd_destroy(uq_mgr, queue);
111 	amdgpu_userq_fence_driver_free(queue);
112 	idr_remove(&uq_mgr->userq_idr, queue_id);
113 	kfree(queue);
114 }
115 
116 int
117 amdgpu_userq_active(struct amdgpu_userq_mgr *uq_mgr)
118 {
119 	struct amdgpu_usermode_queue *queue;
120 	int queue_id;
121 	int ret = 0;
122 
123 	mutex_lock(&uq_mgr->userq_mutex);
124 	/* Resume all the queues for this process */
125 	idr_for_each_entry(&uq_mgr->userq_idr, queue, queue_id)
126 		ret += queue->state == AMDGPU_USERQ_STATE_MAPPED;
127 
128 	mutex_unlock(&uq_mgr->userq_mutex);
129 	return ret;
130 }
131 
132 #ifdef CONFIG_DRM_AMDGPU_NAVI3X_USERQ
133 static struct amdgpu_usermode_queue *
134 amdgpu_userq_find(struct amdgpu_userq_mgr *uq_mgr, int qid)
135 {
136 	return idr_find(&uq_mgr->userq_idr, qid);
137 }
138 
139 void
140 amdgpu_userq_ensure_ev_fence(struct amdgpu_userq_mgr *uq_mgr,
141 			     struct amdgpu_eviction_fence_mgr *evf_mgr)
142 {
143 	struct amdgpu_eviction_fence *ev_fence;
144 
145 retry:
146 	/* Flush any pending resume work to create ev_fence */
147 	flush_delayed_work(&uq_mgr->resume_work);
148 
149 	mutex_lock(&uq_mgr->userq_mutex);
150 	spin_lock(&evf_mgr->ev_fence_lock);
151 	ev_fence = evf_mgr->ev_fence;
152 	spin_unlock(&evf_mgr->ev_fence_lock);
153 	if (!ev_fence || dma_fence_is_signaled(&ev_fence->base)) {
154 		mutex_unlock(&uq_mgr->userq_mutex);
155 		/*
156 		 * Looks like there was no pending resume work,
157 		 * add one now to create a valid eviction fence
158 		 */
159 		schedule_delayed_work(&uq_mgr->resume_work, 0);
160 		goto retry;
161 	}
162 }
163 
164 int amdgpu_userq_create_object(struct amdgpu_userq_mgr *uq_mgr,
165 			       struct amdgpu_userq_obj *userq_obj,
166 			       int size)
167 {
168 	struct amdgpu_device *adev = uq_mgr->adev;
169 	struct amdgpu_bo_param bp;
170 	int r;
171 
172 	memset(&bp, 0, sizeof(bp));
173 	bp.byte_align = PAGE_SIZE;
174 	bp.domain = AMDGPU_GEM_DOMAIN_GTT;
175 	bp.flags = AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS |
176 		   AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
177 	bp.type = ttm_bo_type_kernel;
178 	bp.size = size;
179 	bp.resv = NULL;
180 	bp.bo_ptr_size = sizeof(struct amdgpu_bo);
181 
182 	r = amdgpu_bo_create(adev, &bp, &userq_obj->obj);
183 	if (r) {
184 		DRM_ERROR("Failed to allocate BO for userqueue (%d)", r);
185 		return r;
186 	}
187 
188 	r = amdgpu_bo_reserve(userq_obj->obj, true);
189 	if (r) {
190 		DRM_ERROR("Failed to reserve BO to map (%d)", r);
191 		goto free_obj;
192 	}
193 
194 	r = amdgpu_ttm_alloc_gart(&(userq_obj->obj)->tbo);
195 	if (r) {
196 		DRM_ERROR("Failed to alloc GART for userqueue object (%d)", r);
197 		goto unresv;
198 	}
199 
200 	r = amdgpu_bo_kmap(userq_obj->obj, &userq_obj->cpu_ptr);
201 	if (r) {
202 		DRM_ERROR("Failed to map BO for userqueue (%d)", r);
203 		goto unresv;
204 	}
205 
206 	userq_obj->gpu_addr = amdgpu_bo_gpu_offset(userq_obj->obj);
207 	amdgpu_bo_unreserve(userq_obj->obj);
208 	memset(userq_obj->cpu_ptr, 0, size);
209 	return 0;
210 
211 unresv:
212 	amdgpu_bo_unreserve(userq_obj->obj);
213 
214 free_obj:
215 	amdgpu_bo_unref(&userq_obj->obj);
216 	return r;
217 }
218 
219 void amdgpu_userq_destroy_object(struct amdgpu_userq_mgr *uq_mgr,
220 				 struct amdgpu_userq_obj *userq_obj)
221 {
222 	amdgpu_bo_kunmap(userq_obj->obj);
223 	amdgpu_bo_unref(&userq_obj->obj);
224 }
225 
226 uint64_t
227 amdgpu_userq_get_doorbell_index(struct amdgpu_userq_mgr *uq_mgr,
228 				struct amdgpu_db_info *db_info,
229 				struct drm_file *filp)
230 {
231 	uint64_t index;
232 	struct drm_gem_object *gobj;
233 	struct amdgpu_userq_obj *db_obj = db_info->db_obj;
234 	int r, db_size;
235 
236 	gobj = drm_gem_object_lookup(filp, db_info->doorbell_handle);
237 	if (gobj == NULL) {
238 		DRM_ERROR("Can't find GEM object for doorbell\n");
239 		return -EINVAL;
240 	}
241 
242 	db_obj->obj = amdgpu_bo_ref(gem_to_amdgpu_bo(gobj));
243 	drm_gem_object_put(gobj);
244 
245 	/* Pin the BO before generating the index, unpin in queue destroy */
246 	r = amdgpu_bo_pin(db_obj->obj, AMDGPU_GEM_DOMAIN_DOORBELL);
247 	if (r) {
248 		DRM_ERROR("[Usermode queues] Failed to pin doorbell object\n");
249 		goto unref_bo;
250 	}
251 
252 	r = amdgpu_bo_reserve(db_obj->obj, true);
253 	if (r) {
254 		DRM_ERROR("[Usermode queues] Failed to pin doorbell object\n");
255 		goto unpin_bo;
256 	}
257 
258 	switch (db_info->queue_type) {
259 	case AMDGPU_HW_IP_GFX:
260 	case AMDGPU_HW_IP_COMPUTE:
261 	case AMDGPU_HW_IP_DMA:
262 		db_size = sizeof(u64);
263 		break;
264 
265 	case AMDGPU_HW_IP_VCN_ENC:
266 		db_size = sizeof(u32);
267 		db_info->doorbell_offset += AMDGPU_NAVI10_DOORBELL64_VCN0_1 << 1;
268 		break;
269 
270 	case AMDGPU_HW_IP_VPE:
271 		db_size = sizeof(u32);
272 		db_info->doorbell_offset += AMDGPU_NAVI10_DOORBELL64_VPE << 1;
273 		break;
274 
275 	default:
276 		DRM_ERROR("[Usermode queues] IP %d not support\n", db_info->queue_type);
277 		r = -EINVAL;
278 		goto unpin_bo;
279 	}
280 
281 	index = amdgpu_doorbell_index_on_bar(uq_mgr->adev, db_obj->obj,
282 					     db_info->doorbell_offset, db_size);
283 	DRM_DEBUG_DRIVER("[Usermode queues] doorbell index=%lld\n", index);
284 	amdgpu_bo_unreserve(db_obj->obj);
285 	return index;
286 
287 unpin_bo:
288 	amdgpu_bo_unpin(db_obj->obj);
289 
290 unref_bo:
291 	amdgpu_bo_unref(&db_obj->obj);
292 	return r;
293 }
294 
295 static int
296 amdgpu_userq_destroy(struct drm_file *filp, int queue_id)
297 {
298 	struct amdgpu_fpriv *fpriv = filp->driver_priv;
299 	struct amdgpu_userq_mgr *uq_mgr = &fpriv->userq_mgr;
300 	struct amdgpu_device *adev = uq_mgr->adev;
301 	struct amdgpu_usermode_queue *queue;
302 	int r = 0;
303 
304 	cancel_delayed_work(&uq_mgr->resume_work);
305 	mutex_lock(&uq_mgr->userq_mutex);
306 
307 	queue = amdgpu_userq_find(uq_mgr, queue_id);
308 	if (!queue) {
309 		DRM_DEBUG_DRIVER("Invalid queue id to destroy\n");
310 		mutex_unlock(&uq_mgr->userq_mutex);
311 		return -EINVAL;
312 	}
313 	amdgpu_userq_wait_for_last_fence(uq_mgr, queue);
314 	r = amdgpu_userq_unmap_helper(uq_mgr, queue);
315 	amdgpu_bo_unpin(queue->db_obj.obj);
316 	amdgpu_bo_unref(&queue->db_obj.obj);
317 	amdgpu_userq_cleanup(uq_mgr, queue, queue_id);
318 	mutex_unlock(&uq_mgr->userq_mutex);
319 
320 	pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
321 	pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
322 
323 	return r;
324 }
325 
326 static int amdgpu_userq_priority_permit(struct drm_file *filp,
327 					int priority)
328 {
329 	if (priority < AMDGPU_USERQ_CREATE_FLAGS_QUEUE_PRIORITY_HIGH)
330 		return 0;
331 
332 	if (capable(CAP_SYS_NICE))
333 		return 0;
334 
335 	if (drm_is_current_master(filp))
336 		return 0;
337 
338 	return -EACCES;
339 }
340 
341 static int
342 amdgpu_userq_create(struct drm_file *filp, union drm_amdgpu_userq *args)
343 {
344 	struct amdgpu_fpriv *fpriv = filp->driver_priv;
345 	struct amdgpu_userq_mgr *uq_mgr = &fpriv->userq_mgr;
346 	struct amdgpu_device *adev = uq_mgr->adev;
347 	const struct amdgpu_userq_funcs *uq_funcs;
348 	struct amdgpu_usermode_queue *queue;
349 	struct amdgpu_db_info db_info;
350 	bool skip_map_queue;
351 	uint64_t index;
352 	int qid, r = 0;
353 	int priority =
354 		(args->in.flags & AMDGPU_USERQ_CREATE_FLAGS_QUEUE_PRIORITY_MASK) >>
355 		AMDGPU_USERQ_CREATE_FLAGS_QUEUE_PRIORITY_SHIFT;
356 
357 	/* Usermode queues are only supported for GFX IP as of now */
358 	if (args->in.ip_type != AMDGPU_HW_IP_GFX &&
359 	    args->in.ip_type != AMDGPU_HW_IP_DMA &&
360 	    args->in.ip_type != AMDGPU_HW_IP_COMPUTE) {
361 		DRM_ERROR("Usermode queue doesn't support IP type %u\n", args->in.ip_type);
362 		return -EINVAL;
363 	}
364 
365 	r = amdgpu_userq_priority_permit(filp, priority);
366 	if (r)
367 		return r;
368 
369 	if ((args->in.flags & AMDGPU_USERQ_CREATE_FLAGS_QUEUE_SECURE) &&
370 	    (args->in.ip_type != AMDGPU_HW_IP_GFX) &&
371 	    (args->in.ip_type != AMDGPU_HW_IP_COMPUTE) &&
372 	    !amdgpu_is_tmz(adev)) {
373 		drm_err(adev_to_drm(adev), "Secure only supported on GFX/Compute queues\n");
374 		return -EINVAL;
375 	}
376 
377 	r = pm_runtime_get_sync(adev_to_drm(adev)->dev);
378 	if (r < 0) {
379 		dev_err(adev->dev, "pm_runtime_get_sync() failed for userqueue create\n");
380 		pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
381 		return r;
382 	}
383 
384 	/*
385 	 * There could be a situation that we are creating a new queue while
386 	 * the other queues under this UQ_mgr are suspended. So if there is any
387 	 * resume work pending, wait for it to get done.
388 	 *
389 	 * This will also make sure we have a valid eviction fence ready to be used.
390 	 */
391 	amdgpu_userq_ensure_ev_fence(&fpriv->userq_mgr, &fpriv->evf_mgr);
392 
393 	uq_funcs = adev->userq_funcs[args->in.ip_type];
394 	if (!uq_funcs) {
395 		DRM_ERROR("Usermode queue is not supported for this IP (%u)\n", args->in.ip_type);
396 		r = -EINVAL;
397 		goto unlock;
398 	}
399 
400 	queue = kzalloc(sizeof(struct amdgpu_usermode_queue), GFP_KERNEL);
401 	if (!queue) {
402 		DRM_ERROR("Failed to allocate memory for queue\n");
403 		r = -ENOMEM;
404 		goto unlock;
405 	}
406 	queue->doorbell_handle = args->in.doorbell_handle;
407 	queue->queue_type = args->in.ip_type;
408 	queue->vm = &fpriv->vm;
409 	queue->priority = priority;
410 
411 	db_info.queue_type = queue->queue_type;
412 	db_info.doorbell_handle = queue->doorbell_handle;
413 	db_info.db_obj = &queue->db_obj;
414 	db_info.doorbell_offset = args->in.doorbell_offset;
415 
416 	/* Convert relative doorbell offset into absolute doorbell index */
417 	index = amdgpu_userq_get_doorbell_index(uq_mgr, &db_info, filp);
418 	if (index == (uint64_t)-EINVAL) {
419 		DRM_ERROR("Failed to get doorbell for queue\n");
420 		kfree(queue);
421 		goto unlock;
422 	}
423 
424 	queue->doorbell_index = index;
425 	xa_init_flags(&queue->fence_drv_xa, XA_FLAGS_ALLOC);
426 	r = amdgpu_userq_fence_driver_alloc(adev, queue);
427 	if (r) {
428 		DRM_ERROR("Failed to alloc fence driver\n");
429 		goto unlock;
430 	}
431 
432 	r = uq_funcs->mqd_create(uq_mgr, &args->in, queue);
433 	if (r) {
434 		DRM_ERROR("Failed to create Queue\n");
435 		amdgpu_userq_fence_driver_free(queue);
436 		kfree(queue);
437 		goto unlock;
438 	}
439 
440 
441 	qid = idr_alloc(&uq_mgr->userq_idr, queue, 1, AMDGPU_MAX_USERQ_COUNT, GFP_KERNEL);
442 	if (qid < 0) {
443 		DRM_ERROR("Failed to allocate a queue id\n");
444 		amdgpu_userq_fence_driver_free(queue);
445 		uq_funcs->mqd_destroy(uq_mgr, queue);
446 		kfree(queue);
447 		r = -ENOMEM;
448 		goto unlock;
449 	}
450 
451 	/* don't map the queue if scheduling is halted */
452 	mutex_lock(&adev->userq_mutex);
453 	if (adev->userq_halt_for_enforce_isolation &&
454 	    ((queue->queue_type == AMDGPU_HW_IP_GFX) ||
455 	     (queue->queue_type == AMDGPU_HW_IP_COMPUTE)))
456 		skip_map_queue = true;
457 	else
458 		skip_map_queue = false;
459 	if (!skip_map_queue) {
460 		r = amdgpu_userq_map_helper(uq_mgr, queue);
461 		if (r) {
462 			mutex_unlock(&adev->userq_mutex);
463 			DRM_ERROR("Failed to map Queue\n");
464 			idr_remove(&uq_mgr->userq_idr, qid);
465 			amdgpu_userq_fence_driver_free(queue);
466 			uq_funcs->mqd_destroy(uq_mgr, queue);
467 			kfree(queue);
468 			goto unlock;
469 		}
470 	}
471 	mutex_unlock(&adev->userq_mutex);
472 
473 
474 	args->out.queue_id = qid;
475 
476 unlock:
477 	mutex_unlock(&uq_mgr->userq_mutex);
478 
479 	return r;
480 }
481 
482 int amdgpu_userq_ioctl(struct drm_device *dev, void *data,
483 		       struct drm_file *filp)
484 {
485 	union drm_amdgpu_userq *args = data;
486 	int r;
487 
488 	switch (args->in.op) {
489 	case AMDGPU_USERQ_OP_CREATE:
490 		if (args->in.flags & ~(AMDGPU_USERQ_CREATE_FLAGS_QUEUE_PRIORITY_MASK |
491 				       AMDGPU_USERQ_CREATE_FLAGS_QUEUE_SECURE))
492 			return -EINVAL;
493 		r = amdgpu_userq_create(filp, args);
494 		if (r)
495 			DRM_ERROR("Failed to create usermode queue\n");
496 		break;
497 
498 	case AMDGPU_USERQ_OP_FREE:
499 		if (args->in.ip_type ||
500 		    args->in.doorbell_handle ||
501 		    args->in.doorbell_offset ||
502 		    args->in.flags ||
503 		    args->in.queue_va ||
504 		    args->in.queue_size ||
505 		    args->in.rptr_va ||
506 		    args->in.wptr_va ||
507 		    args->in.wptr_va ||
508 		    args->in.mqd ||
509 		    args->in.mqd_size)
510 			return -EINVAL;
511 		r = amdgpu_userq_destroy(filp, args->in.queue_id);
512 		if (r)
513 			DRM_ERROR("Failed to destroy usermode queue\n");
514 		break;
515 
516 	default:
517 		DRM_DEBUG_DRIVER("Invalid user queue op specified: %d\n", args->in.op);
518 		return -EINVAL;
519 	}
520 
521 	return r;
522 }
523 #else
524 int amdgpu_userq_ioctl(struct drm_device *dev, void *data,
525 		       struct drm_file *filp)
526 {
527 	return -ENOTSUPP;
528 }
529 #endif
530 
531 static int
532 amdgpu_userq_restore_all(struct amdgpu_userq_mgr *uq_mgr)
533 {
534 	struct amdgpu_device *adev = uq_mgr->adev;
535 	struct amdgpu_usermode_queue *queue;
536 	int queue_id;
537 	int ret = 0, r;
538 
539 	/* Resume all the queues for this process */
540 	idr_for_each_entry(&uq_mgr->userq_idr, queue, queue_id) {
541 		r = amdgpu_userq_map_helper(uq_mgr, queue);
542 		if (r)
543 			ret = r;
544 	}
545 
546 	if (ret)
547 		dev_err(adev->dev, "Failed to map all the queues\n");
548 	return ret;
549 }
550 
551 static int
552 amdgpu_userq_validate_vm_bo(void *_unused, struct amdgpu_bo *bo)
553 {
554 	struct ttm_operation_ctx ctx = { false, false };
555 	int ret;
556 
557 	amdgpu_bo_placement_from_domain(bo, bo->allowed_domains);
558 
559 	ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
560 	if (ret)
561 		DRM_ERROR("Fail to validate\n");
562 
563 	return ret;
564 }
565 
566 static int
567 amdgpu_userq_validate_bos(struct amdgpu_userq_mgr *uq_mgr)
568 {
569 	struct amdgpu_fpriv *fpriv = uq_mgr_to_fpriv(uq_mgr);
570 	struct amdgpu_vm *vm = &fpriv->vm;
571 	struct amdgpu_device *adev = uq_mgr->adev;
572 	struct amdgpu_bo_va *bo_va;
573 	struct ww_acquire_ctx *ticket;
574 	struct drm_exec exec;
575 	struct amdgpu_bo *bo;
576 	struct dma_resv *resv;
577 	bool clear, unlock;
578 	int ret = 0;
579 
580 	drm_exec_init(&exec, DRM_EXEC_IGNORE_DUPLICATES, 0);
581 	drm_exec_until_all_locked(&exec) {
582 		ret = amdgpu_vm_lock_pd(vm, &exec, 2);
583 		drm_exec_retry_on_contention(&exec);
584 		if (unlikely(ret)) {
585 			DRM_ERROR("Failed to lock PD\n");
586 			goto unlock_all;
587 		}
588 
589 		/* Lock the done list */
590 		list_for_each_entry(bo_va, &vm->done, base.vm_status) {
591 			bo = bo_va->base.bo;
592 			if (!bo)
593 				continue;
594 
595 			ret = drm_exec_lock_obj(&exec, &bo->tbo.base);
596 			drm_exec_retry_on_contention(&exec);
597 			if (unlikely(ret))
598 				goto unlock_all;
599 		}
600 	}
601 
602 	spin_lock(&vm->status_lock);
603 	while (!list_empty(&vm->moved)) {
604 		bo_va = list_first_entry(&vm->moved, struct amdgpu_bo_va,
605 					 base.vm_status);
606 		spin_unlock(&vm->status_lock);
607 
608 		/* Per VM BOs never need to bo cleared in the page tables */
609 		ret = amdgpu_vm_bo_update(adev, bo_va, false);
610 		if (ret)
611 			goto unlock_all;
612 		spin_lock(&vm->status_lock);
613 	}
614 
615 	ticket = &exec.ticket;
616 	while (!list_empty(&vm->invalidated)) {
617 		bo_va = list_first_entry(&vm->invalidated, struct amdgpu_bo_va,
618 					 base.vm_status);
619 		resv = bo_va->base.bo->tbo.base.resv;
620 		spin_unlock(&vm->status_lock);
621 
622 		bo = bo_va->base.bo;
623 		ret = amdgpu_userq_validate_vm_bo(NULL, bo);
624 		if (ret) {
625 			DRM_ERROR("Failed to validate BO\n");
626 			goto unlock_all;
627 		}
628 
629 		/* Try to reserve the BO to avoid clearing its ptes */
630 		if (!adev->debug_vm && dma_resv_trylock(resv)) {
631 			clear = false;
632 			unlock = true;
633 		/* The caller is already holding the reservation lock */
634 		} else if (ticket && dma_resv_locking_ctx(resv) == ticket) {
635 			clear = false;
636 			unlock = false;
637 		/* Somebody else is using the BO right now */
638 		} else {
639 			clear = true;
640 			unlock = false;
641 		}
642 
643 		ret = amdgpu_vm_bo_update(adev, bo_va, clear);
644 
645 		if (unlock)
646 			dma_resv_unlock(resv);
647 		if (ret)
648 			goto unlock_all;
649 
650 		spin_lock(&vm->status_lock);
651 	}
652 	spin_unlock(&vm->status_lock);
653 
654 	ret = amdgpu_eviction_fence_replace_fence(&fpriv->evf_mgr, &exec);
655 	if (ret)
656 		DRM_ERROR("Failed to replace eviction fence\n");
657 
658 unlock_all:
659 	drm_exec_fini(&exec);
660 	return ret;
661 }
662 
663 static void amdgpu_userq_restore_worker(struct work_struct *work)
664 {
665 	struct amdgpu_userq_mgr *uq_mgr = work_to_uq_mgr(work, resume_work.work);
666 	struct amdgpu_fpriv *fpriv = uq_mgr_to_fpriv(uq_mgr);
667 	int ret;
668 
669 	flush_work(&fpriv->evf_mgr.suspend_work.work);
670 
671 	mutex_lock(&uq_mgr->userq_mutex);
672 
673 	ret = amdgpu_userq_validate_bos(uq_mgr);
674 	if (ret) {
675 		DRM_ERROR("Failed to validate BOs to restore\n");
676 		goto unlock;
677 	}
678 
679 	ret = amdgpu_userq_restore_all(uq_mgr);
680 	if (ret) {
681 		DRM_ERROR("Failed to restore all queues\n");
682 		goto unlock;
683 	}
684 
685 unlock:
686 	mutex_unlock(&uq_mgr->userq_mutex);
687 }
688 
689 static int
690 amdgpu_userq_evict_all(struct amdgpu_userq_mgr *uq_mgr)
691 {
692 	struct amdgpu_device *adev = uq_mgr->adev;
693 	struct amdgpu_usermode_queue *queue;
694 	int queue_id;
695 	int ret = 0, r;
696 
697 	/* Try to unmap all the queues in this process ctx */
698 	idr_for_each_entry(&uq_mgr->userq_idr, queue, queue_id) {
699 		r = amdgpu_userq_unmap_helper(uq_mgr, queue);
700 		if (r)
701 			ret = r;
702 	}
703 
704 	if (ret)
705 		dev_err(adev->dev, "Couldn't unmap all the queues\n");
706 	return ret;
707 }
708 
709 static int
710 amdgpu_userq_wait_for_signal(struct amdgpu_userq_mgr *uq_mgr)
711 {
712 	struct amdgpu_usermode_queue *queue;
713 	int queue_id, ret;
714 
715 	idr_for_each_entry(&uq_mgr->userq_idr, queue, queue_id) {
716 		struct dma_fence *f = queue->last_fence;
717 
718 		if (!f || dma_fence_is_signaled(f))
719 			continue;
720 		ret = dma_fence_wait_timeout(f, true, msecs_to_jiffies(100));
721 		if (ret <= 0) {
722 			DRM_ERROR("Timed out waiting for fence=%llu:%llu\n",
723 				  f->context, f->seqno);
724 			return -ETIMEDOUT;
725 		}
726 	}
727 
728 	return 0;
729 }
730 
731 void
732 amdgpu_userq_evict(struct amdgpu_userq_mgr *uq_mgr,
733 		   struct amdgpu_eviction_fence *ev_fence)
734 {
735 	int ret;
736 	struct amdgpu_fpriv *fpriv = uq_mgr_to_fpriv(uq_mgr);
737 	struct amdgpu_eviction_fence_mgr *evf_mgr = &fpriv->evf_mgr;
738 
739 	/* Wait for any pending userqueue fence work to finish */
740 	ret = amdgpu_userq_wait_for_signal(uq_mgr);
741 	if (ret) {
742 		DRM_ERROR("Not evicting userqueue, timeout waiting for work\n");
743 		return;
744 	}
745 
746 	ret = amdgpu_userq_evict_all(uq_mgr);
747 	if (ret) {
748 		DRM_ERROR("Failed to evict userqueue\n");
749 		return;
750 	}
751 
752 	/* Signal current eviction fence */
753 	amdgpu_eviction_fence_signal(evf_mgr, ev_fence);
754 
755 	if (evf_mgr->fd_closing) {
756 		cancel_delayed_work(&uq_mgr->resume_work);
757 		return;
758 	}
759 
760 	/* Schedule a resume work */
761 	schedule_delayed_work(&uq_mgr->resume_work, 0);
762 }
763 
764 int amdgpu_userq_mgr_init(struct amdgpu_userq_mgr *userq_mgr, struct amdgpu_device *adev)
765 {
766 	mutex_init(&userq_mgr->userq_mutex);
767 	idr_init_base(&userq_mgr->userq_idr, 1);
768 	userq_mgr->adev = adev;
769 
770 	mutex_lock(&adev->userq_mutex);
771 	list_add(&userq_mgr->list, &adev->userq_mgr_list);
772 	mutex_unlock(&adev->userq_mutex);
773 
774 	INIT_DELAYED_WORK(&userq_mgr->resume_work, amdgpu_userq_restore_worker);
775 	return 0;
776 }
777 
778 void amdgpu_userq_mgr_fini(struct amdgpu_userq_mgr *userq_mgr)
779 {
780 	struct amdgpu_device *adev = userq_mgr->adev;
781 	struct amdgpu_usermode_queue *queue;
782 	struct amdgpu_userq_mgr *uqm, *tmp;
783 	uint32_t queue_id;
784 
785 	cancel_delayed_work(&userq_mgr->resume_work);
786 
787 	mutex_lock(&userq_mgr->userq_mutex);
788 	idr_for_each_entry(&userq_mgr->userq_idr, queue, queue_id) {
789 		amdgpu_userq_wait_for_last_fence(userq_mgr, queue);
790 		amdgpu_userq_unmap_helper(userq_mgr, queue);
791 		amdgpu_userq_cleanup(userq_mgr, queue, queue_id);
792 	}
793 	mutex_lock(&adev->userq_mutex);
794 	list_for_each_entry_safe(uqm, tmp, &adev->userq_mgr_list, list) {
795 		if (uqm == userq_mgr) {
796 			list_del(&uqm->list);
797 			break;
798 		}
799 	}
800 	mutex_unlock(&adev->userq_mutex);
801 	idr_destroy(&userq_mgr->userq_idr);
802 	mutex_unlock(&userq_mgr->userq_mutex);
803 	mutex_destroy(&userq_mgr->userq_mutex);
804 }
805 
806 int amdgpu_userq_suspend(struct amdgpu_device *adev)
807 {
808 	u32 ip_mask = amdgpu_userq_get_supported_ip_mask(adev);
809 	struct amdgpu_usermode_queue *queue;
810 	struct amdgpu_userq_mgr *uqm, *tmp;
811 	int queue_id;
812 	int ret = 0, r;
813 
814 	if (!ip_mask)
815 		return 0;
816 
817 	mutex_lock(&adev->userq_mutex);
818 	list_for_each_entry_safe(uqm, tmp, &adev->userq_mgr_list, list) {
819 		cancel_delayed_work_sync(&uqm->resume_work);
820 		idr_for_each_entry(&uqm->userq_idr, queue, queue_id) {
821 			r = amdgpu_userq_unmap_helper(uqm, queue);
822 			if (r)
823 				ret = r;
824 		}
825 	}
826 	mutex_unlock(&adev->userq_mutex);
827 	return ret;
828 }
829 
830 int amdgpu_userq_resume(struct amdgpu_device *adev)
831 {
832 	u32 ip_mask = amdgpu_userq_get_supported_ip_mask(adev);
833 	struct amdgpu_usermode_queue *queue;
834 	struct amdgpu_userq_mgr *uqm, *tmp;
835 	int queue_id;
836 	int ret = 0, r;
837 
838 	if (!ip_mask)
839 		return 0;
840 
841 	mutex_lock(&adev->userq_mutex);
842 	list_for_each_entry_safe(uqm, tmp, &adev->userq_mgr_list, list) {
843 		idr_for_each_entry(&uqm->userq_idr, queue, queue_id) {
844 			r = amdgpu_userq_map_helper(uqm, queue);
845 			if (r)
846 				ret = r;
847 		}
848 	}
849 	mutex_unlock(&adev->userq_mutex);
850 	return ret;
851 }
852 
853 int amdgpu_userq_stop_sched_for_enforce_isolation(struct amdgpu_device *adev,
854 						  u32 idx)
855 {
856 	u32 ip_mask = amdgpu_userq_get_supported_ip_mask(adev);
857 	struct amdgpu_usermode_queue *queue;
858 	struct amdgpu_userq_mgr *uqm, *tmp;
859 	int queue_id;
860 	int ret = 0, r;
861 
862 	/* only need to stop gfx/compute */
863 	if (!(ip_mask & ((1 << AMDGPU_HW_IP_GFX) | (1 << AMDGPU_HW_IP_COMPUTE))))
864 		return 0;
865 
866 	mutex_lock(&adev->userq_mutex);
867 	if (adev->userq_halt_for_enforce_isolation)
868 		dev_warn(adev->dev, "userq scheduling already stopped!\n");
869 	adev->userq_halt_for_enforce_isolation = true;
870 	list_for_each_entry_safe(uqm, tmp, &adev->userq_mgr_list, list) {
871 		cancel_delayed_work_sync(&uqm->resume_work);
872 		idr_for_each_entry(&uqm->userq_idr, queue, queue_id) {
873 			if (((queue->queue_type == AMDGPU_HW_IP_GFX) ||
874 			     (queue->queue_type == AMDGPU_HW_IP_COMPUTE)) &&
875 			    (queue->xcp_id == idx)) {
876 				r = amdgpu_userq_unmap_helper(uqm, queue);
877 				if (r)
878 					ret = r;
879 			}
880 		}
881 	}
882 	mutex_unlock(&adev->userq_mutex);
883 	return ret;
884 }
885 
886 int amdgpu_userq_start_sched_for_enforce_isolation(struct amdgpu_device *adev,
887 						   u32 idx)
888 {
889 	u32 ip_mask = amdgpu_userq_get_supported_ip_mask(adev);
890 	struct amdgpu_usermode_queue *queue;
891 	struct amdgpu_userq_mgr *uqm, *tmp;
892 	int queue_id;
893 	int ret = 0, r;
894 
895 	/* only need to stop gfx/compute */
896 	if (!(ip_mask & ((1 << AMDGPU_HW_IP_GFX) | (1 << AMDGPU_HW_IP_COMPUTE))))
897 		return 0;
898 
899 	mutex_lock(&adev->userq_mutex);
900 	if (!adev->userq_halt_for_enforce_isolation)
901 		dev_warn(adev->dev, "userq scheduling already started!\n");
902 	adev->userq_halt_for_enforce_isolation = false;
903 	list_for_each_entry_safe(uqm, tmp, &adev->userq_mgr_list, list) {
904 		idr_for_each_entry(&uqm->userq_idr, queue, queue_id) {
905 			if (((queue->queue_type == AMDGPU_HW_IP_GFX) ||
906 			     (queue->queue_type == AMDGPU_HW_IP_COMPUTE)) &&
907 			    (queue->xcp_id == idx)) {
908 				r = amdgpu_userq_map_helper(uqm, queue);
909 				if (r)
910 					ret = r;
911 			}
912 		}
913 	}
914 	mutex_unlock(&adev->userq_mutex);
915 	return ret;
916 }
917