xref: /linux/drivers/gpu/drm/amd/amdgpu/amdgpu_userq.c (revision a018d1819f158991b7308e4f74609c6c029b670c)
1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright 2023 Advanced Micro Devices, Inc.
4  *
5  * Permission is hereby granted, free of charge, to any person obtaining a
6  * copy of this software and associated documentation files (the "Software"),
7  * to deal in the Software without restriction, including without limitation
8  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9  * and/or sell copies of the Software, and to permit persons to whom the
10  * Software is furnished to do so, subject to the following conditions:
11  *
12  * The above copyright notice and this permission notice shall be included in
13  * all copies or substantial portions of the Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
19  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
20  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
21  * OTHER DEALINGS IN THE SOFTWARE.
22  *
23  */
24 
25 #include <drm/drm_auth.h>
26 #include <drm/drm_exec.h>
27 #include <linux/pm_runtime.h>
28 #include <drm/drm_drv.h>
29 
30 #include "amdgpu.h"
31 #include "amdgpu_reset.h"
32 #include "amdgpu_vm.h"
33 #include "amdgpu_userq.h"
34 #include "amdgpu_hmm.h"
35 #include "amdgpu_userq_fence.h"
36 
37 u32 amdgpu_userq_get_supported_ip_mask(struct amdgpu_device *adev)
38 {
39 	int i;
40 	u32 userq_ip_mask = 0;
41 
42 	for (i = 0; i < AMDGPU_HW_IP_NUM; i++) {
43 		if (adev->userq_funcs[i])
44 			userq_ip_mask |= (1 << i);
45 	}
46 
47 	return userq_ip_mask;
48 }
49 
50 static bool amdgpu_userq_is_reset_type_supported(struct amdgpu_device *adev,
51 				enum amdgpu_ring_type ring_type, int reset_type)
52 {
53 
54 	if (ring_type < 0 || ring_type >= AMDGPU_RING_TYPE_MAX)
55 		return false;
56 
57 	switch (ring_type) {
58 	case AMDGPU_RING_TYPE_GFX:
59 		if (adev->gfx.gfx_supported_reset & reset_type)
60 			return true;
61 		break;
62 	case AMDGPU_RING_TYPE_COMPUTE:
63 		if (adev->gfx.compute_supported_reset & reset_type)
64 			return true;
65 		break;
66 	case AMDGPU_RING_TYPE_SDMA:
67 		if (adev->sdma.supported_reset & reset_type)
68 			return true;
69 		break;
70 	case AMDGPU_RING_TYPE_VCN_DEC:
71 	case AMDGPU_RING_TYPE_VCN_ENC:
72 		if (adev->vcn.supported_reset & reset_type)
73 			return true;
74 		break;
75 	case AMDGPU_RING_TYPE_VCN_JPEG:
76 		if (adev->jpeg.supported_reset & reset_type)
77 			return true;
78 		break;
79 	default:
80 		break;
81 	}
82 	return false;
83 }
84 
85 static void amdgpu_userq_gpu_reset(struct amdgpu_device *adev)
86 {
87 	if (amdgpu_device_should_recover_gpu(adev)) {
88 		amdgpu_reset_domain_schedule(adev->reset_domain,
89 					     &adev->userq_reset_work);
90 		/* Wait for the reset job to complete */
91 		flush_work(&adev->userq_reset_work);
92 	}
93 }
94 
95 static int
96 amdgpu_userq_detect_and_reset_queues(struct amdgpu_userq_mgr *uq_mgr)
97 {
98 	struct amdgpu_device *adev = uq_mgr->adev;
99 	const int queue_types[] = {
100 		AMDGPU_RING_TYPE_COMPUTE,
101 		AMDGPU_RING_TYPE_GFX,
102 		AMDGPU_RING_TYPE_SDMA
103 	};
104 	const int num_queue_types = ARRAY_SIZE(queue_types);
105 	bool gpu_reset = false;
106 	int r = 0;
107 	int i;
108 
109 	/* Warning if current process mutex is not held */
110 	WARN_ON(!mutex_is_locked(&uq_mgr->userq_mutex));
111 
112 	if (unlikely(adev->debug_disable_gpu_ring_reset)) {
113 		dev_err(adev->dev, "userq reset disabled by debug mask\n");
114 		return 0;
115 	}
116 
117 	/*
118 	 * If GPU recovery feature is disabled system-wide,
119 	 * skip all reset detection logic
120 	 */
121 	if (!amdgpu_gpu_recovery)
122 		return 0;
123 
124 	/*
125 	 * Iterate through all queue types to detect and reset problematic queues
126 	 * Process each queue type in the defined order
127 	 */
128 	for (i = 0; i < num_queue_types; i++) {
129 		int ring_type = queue_types[i];
130 		const struct amdgpu_userq_funcs *funcs = adev->userq_funcs[ring_type];
131 
132 		if (!amdgpu_userq_is_reset_type_supported(adev, ring_type, AMDGPU_RESET_TYPE_PER_QUEUE))
133 				continue;
134 
135 		if (atomic_read(&uq_mgr->userq_count[ring_type]) > 0 &&
136 		    funcs && funcs->detect_and_reset) {
137 			r = funcs->detect_and_reset(adev, ring_type);
138 			if (r) {
139 				gpu_reset = true;
140 				break;
141 			}
142 		}
143 	}
144 
145 	if (gpu_reset)
146 		amdgpu_userq_gpu_reset(adev);
147 
148 	return r;
149 }
150 
151 static void amdgpu_userq_hang_detect_work(struct work_struct *work)
152 {
153 	struct amdgpu_usermode_queue *queue = container_of(work,
154 							  struct amdgpu_usermode_queue,
155 							  hang_detect_work.work);
156 	struct dma_fence *fence;
157 	struct amdgpu_userq_mgr *uq_mgr;
158 
159 	if (!queue || !queue->userq_mgr)
160 		return;
161 
162 	uq_mgr = queue->userq_mgr;
163 	fence = READ_ONCE(queue->hang_detect_fence);
164 	/* Fence already signaled – no action needed */
165 	if (!fence || dma_fence_is_signaled(fence))
166 		return;
167 
168 	mutex_lock(&uq_mgr->userq_mutex);
169 	amdgpu_userq_detect_and_reset_queues(uq_mgr);
170 	mutex_unlock(&uq_mgr->userq_mutex);
171 }
172 
173 /*
174  * Start hang detection for a user queue fence. A delayed work will be scheduled
175  * to check if the fence is still pending after the timeout period.
176 */
177 void amdgpu_userq_start_hang_detect_work(struct amdgpu_usermode_queue *queue)
178 {
179 	struct amdgpu_device *adev;
180 	unsigned long timeout_ms;
181 
182 	if (!queue || !queue->userq_mgr || !queue->userq_mgr->adev)
183 		return;
184 
185 	adev = queue->userq_mgr->adev;
186 	/* Determine timeout based on queue type */
187 	switch (queue->queue_type) {
188 	case AMDGPU_RING_TYPE_GFX:
189 		timeout_ms = adev->gfx_timeout;
190 		break;
191 	case AMDGPU_RING_TYPE_COMPUTE:
192 		timeout_ms = adev->compute_timeout;
193 		break;
194 	case AMDGPU_RING_TYPE_SDMA:
195 		timeout_ms = adev->sdma_timeout;
196 		break;
197 	default:
198 		timeout_ms = adev->gfx_timeout;
199 		break;
200 	}
201 
202 	/* Store the fence to monitor and schedule hang detection */
203 	WRITE_ONCE(queue->hang_detect_fence, queue->last_fence);
204 	schedule_delayed_work(&queue->hang_detect_work,
205 		     msecs_to_jiffies(timeout_ms));
206 }
207 
208 static void amdgpu_userq_init_hang_detect_work(struct amdgpu_usermode_queue *queue)
209 {
210 	INIT_DELAYED_WORK(&queue->hang_detect_work, amdgpu_userq_hang_detect_work);
211 	queue->hang_detect_fence = NULL;
212 }
213 
214 static int amdgpu_userq_buffer_va_list_add(struct amdgpu_usermode_queue *queue,
215 					   struct amdgpu_bo_va_mapping *va_map, u64 addr)
216 {
217 	struct amdgpu_userq_va_cursor *va_cursor;
218 	struct userq_va_list;
219 
220 	va_cursor = kzalloc_obj(*va_cursor);
221 	if (!va_cursor)
222 		return -ENOMEM;
223 
224 	INIT_LIST_HEAD(&va_cursor->list);
225 	va_cursor->gpu_addr = addr;
226 	atomic_set(&va_map->bo_va->userq_va_mapped, 1);
227 	list_add(&va_cursor->list, &queue->userq_va_list);
228 
229 	return 0;
230 }
231 
232 int amdgpu_userq_input_va_validate(struct amdgpu_device *adev,
233 				   struct amdgpu_usermode_queue *queue,
234 				   u64 addr, u64 expected_size)
235 {
236 	struct amdgpu_bo_va_mapping *va_map;
237 	struct amdgpu_vm *vm = queue->vm;
238 	u64 user_addr;
239 	u64 size;
240 	int r = 0;
241 
242 	user_addr = (addr & AMDGPU_GMC_HOLE_MASK) >> AMDGPU_GPU_PAGE_SHIFT;
243 	size = expected_size >> AMDGPU_GPU_PAGE_SHIFT;
244 
245 	r = amdgpu_bo_reserve(vm->root.bo, false);
246 	if (r)
247 		return r;
248 
249 	va_map = amdgpu_vm_bo_lookup_mapping(vm, user_addr);
250 	if (!va_map) {
251 		r = -EINVAL;
252 		goto out_err;
253 	}
254 	/* Only validate the userq whether resident in the VM mapping range */
255 	if (user_addr >= va_map->start  &&
256 	    va_map->last - user_addr + 1 >= size) {
257 		amdgpu_userq_buffer_va_list_add(queue, va_map, user_addr);
258 		amdgpu_bo_unreserve(vm->root.bo);
259 		return 0;
260 	}
261 
262 	r = -EINVAL;
263 out_err:
264 	amdgpu_bo_unreserve(vm->root.bo);
265 	return r;
266 }
267 
268 static bool amdgpu_userq_buffer_va_mapped(struct amdgpu_vm *vm, u64 addr)
269 {
270 	struct amdgpu_bo_va_mapping *mapping;
271 	bool r;
272 
273 	if (amdgpu_bo_reserve(vm->root.bo, false))
274 		return false;
275 
276 	mapping = amdgpu_vm_bo_lookup_mapping(vm, addr);
277 	if (!IS_ERR_OR_NULL(mapping) && atomic_read(&mapping->bo_va->userq_va_mapped))
278 		r = true;
279 	else
280 		r = false;
281 	amdgpu_bo_unreserve(vm->root.bo);
282 
283 	return r;
284 }
285 
286 static bool amdgpu_userq_buffer_vas_mapped(struct amdgpu_usermode_queue *queue)
287 {
288 	struct amdgpu_userq_va_cursor *va_cursor, *tmp;
289 	int r = 0;
290 
291 	list_for_each_entry_safe(va_cursor, tmp, &queue->userq_va_list, list) {
292 		r += amdgpu_userq_buffer_va_mapped(queue->vm, va_cursor->gpu_addr);
293 		dev_dbg(queue->userq_mgr->adev->dev,
294 			"validate the userq mapping:%p va:%llx r:%d\n",
295 			queue, va_cursor->gpu_addr, r);
296 	}
297 
298 	if (r != 0)
299 		return true;
300 
301 	return false;
302 }
303 
304 static void amdgpu_userq_buffer_va_list_del(struct amdgpu_bo_va_mapping *mapping,
305 					    struct amdgpu_userq_va_cursor *va_cursor)
306 {
307 	atomic_set(&mapping->bo_va->userq_va_mapped, 0);
308 	list_del(&va_cursor->list);
309 	kfree(va_cursor);
310 }
311 
312 static int amdgpu_userq_buffer_vas_list_cleanup(struct amdgpu_device *adev,
313 						struct amdgpu_usermode_queue *queue)
314 {
315 	struct amdgpu_userq_va_cursor *va_cursor, *tmp;
316 	struct amdgpu_bo_va_mapping *mapping;
317 	int r;
318 
319 	r = amdgpu_bo_reserve(queue->vm->root.bo, false);
320 	if (r)
321 		return r;
322 
323 	list_for_each_entry_safe(va_cursor, tmp, &queue->userq_va_list, list) {
324 		mapping = amdgpu_vm_bo_lookup_mapping(queue->vm, va_cursor->gpu_addr);
325 		if (!mapping) {
326 			r = -EINVAL;
327 			goto err;
328 		}
329 		dev_dbg(adev->dev, "delete the userq:%p va:%llx\n",
330 			queue, va_cursor->gpu_addr);
331 		amdgpu_userq_buffer_va_list_del(mapping, va_cursor);
332 	}
333 err:
334 	amdgpu_bo_unreserve(queue->vm->root.bo);
335 	return r;
336 }
337 
338 static int amdgpu_userq_preempt_helper(struct amdgpu_usermode_queue *queue)
339 {
340 	struct amdgpu_userq_mgr *uq_mgr = queue->userq_mgr;
341 	struct amdgpu_device *adev = uq_mgr->adev;
342 	const struct amdgpu_userq_funcs *userq_funcs =
343 		adev->userq_funcs[queue->queue_type];
344 	bool found_hung_queue = false;
345 	int r = 0;
346 
347 	if (queue->state == AMDGPU_USERQ_STATE_MAPPED) {
348 		r = userq_funcs->preempt(queue);
349 		if (r) {
350 			queue->state = AMDGPU_USERQ_STATE_HUNG;
351 			found_hung_queue = true;
352 		} else {
353 			queue->state = AMDGPU_USERQ_STATE_PREEMPTED;
354 		}
355 	}
356 
357 	if (found_hung_queue)
358 		amdgpu_userq_detect_and_reset_queues(uq_mgr);
359 
360 	return r;
361 }
362 
363 static int amdgpu_userq_restore_helper(struct amdgpu_usermode_queue *queue)
364 {
365 	struct amdgpu_userq_mgr *uq_mgr = queue->userq_mgr;
366 	struct amdgpu_device *adev = uq_mgr->adev;
367 	const struct amdgpu_userq_funcs *userq_funcs =
368 		adev->userq_funcs[queue->queue_type];
369 	int r = 0;
370 
371 	if (queue->state == AMDGPU_USERQ_STATE_PREEMPTED) {
372 		r = userq_funcs->restore(queue);
373 		if (r) {
374 			queue->state = AMDGPU_USERQ_STATE_HUNG;
375 		} else {
376 			queue->state = AMDGPU_USERQ_STATE_MAPPED;
377 		}
378 	}
379 
380 	return r;
381 }
382 
383 static int amdgpu_userq_unmap_helper(struct amdgpu_usermode_queue *queue)
384 {
385 	struct amdgpu_userq_mgr *uq_mgr = queue->userq_mgr;
386 	struct amdgpu_device *adev = uq_mgr->adev;
387 	const struct amdgpu_userq_funcs *userq_funcs =
388 		adev->userq_funcs[queue->queue_type];
389 	bool found_hung_queue = false;
390 	int r = 0;
391 
392 	if ((queue->state == AMDGPU_USERQ_STATE_MAPPED) ||
393 		(queue->state == AMDGPU_USERQ_STATE_PREEMPTED)) {
394 		r = userq_funcs->unmap(queue);
395 		if (r) {
396 			queue->state = AMDGPU_USERQ_STATE_HUNG;
397 			found_hung_queue = true;
398 		} else {
399 			queue->state = AMDGPU_USERQ_STATE_UNMAPPED;
400 		}
401 	}
402 
403 	if (found_hung_queue)
404 		amdgpu_userq_detect_and_reset_queues(uq_mgr);
405 
406 	return r;
407 }
408 
409 static int amdgpu_userq_map_helper(struct amdgpu_usermode_queue *queue)
410 {
411 	struct amdgpu_userq_mgr *uq_mgr = queue->userq_mgr;
412 	struct amdgpu_device *adev = uq_mgr->adev;
413 	const struct amdgpu_userq_funcs *userq_funcs =
414 		adev->userq_funcs[queue->queue_type];
415 	int r = 0;
416 
417 	if (queue->state == AMDGPU_USERQ_STATE_UNMAPPED) {
418 		r = userq_funcs->map(queue);
419 		if (r) {
420 			queue->state = AMDGPU_USERQ_STATE_HUNG;
421 			amdgpu_userq_detect_and_reset_queues(uq_mgr);
422 		} else {
423 			queue->state = AMDGPU_USERQ_STATE_MAPPED;
424 		}
425 	}
426 
427 	return r;
428 }
429 
430 static int amdgpu_userq_wait_for_last_fence(struct amdgpu_usermode_queue *queue)
431 {
432 	struct amdgpu_userq_mgr *uq_mgr = queue->userq_mgr;
433 	struct dma_fence *f = queue->last_fence;
434 	int ret = 0;
435 
436 	if (f && !dma_fence_is_signaled(f)) {
437 		ret = dma_fence_wait_timeout(f, true, MAX_SCHEDULE_TIMEOUT);
438 		if (ret <= 0) {
439 			drm_file_err(uq_mgr->file, "Timed out waiting for fence=%llu:%llu\n",
440 				     f->context, f->seqno);
441 			queue->state = AMDGPU_USERQ_STATE_HUNG;
442 			return -ETIME;
443 		}
444 	}
445 
446 	return ret;
447 }
448 
449 static void amdgpu_userq_cleanup(struct amdgpu_usermode_queue *queue)
450 {
451 	struct amdgpu_userq_mgr *uq_mgr = queue->userq_mgr;
452 	struct amdgpu_device *adev = uq_mgr->adev;
453 	const struct amdgpu_userq_funcs *uq_funcs = adev->userq_funcs[queue->queue_type];
454 
455 	/* Wait for mode-1 reset to complete */
456 	down_read(&adev->reset_domain->sem);
457 
458 	/* Drop the userq reference. */
459 	amdgpu_userq_buffer_vas_list_cleanup(adev, queue);
460 	uq_funcs->mqd_destroy(queue);
461 	amdgpu_userq_fence_driver_free(queue);
462 	/* Use interrupt-safe locking since IRQ handlers may access these XArrays */
463 	xa_erase_irq(&adev->userq_doorbell_xa, queue->doorbell_index);
464 	queue->userq_mgr = NULL;
465 	list_del(&queue->userq_va_list);
466 	kfree(queue);
467 
468 	up_read(&adev->reset_domain->sem);
469 }
470 
471 void
472 amdgpu_userq_ensure_ev_fence(struct amdgpu_userq_mgr *uq_mgr,
473 			     struct amdgpu_eviction_fence_mgr *evf_mgr)
474 {
475 	struct amdgpu_eviction_fence *ev_fence;
476 
477 retry:
478 	/* Flush any pending resume work to create ev_fence */
479 	flush_delayed_work(&uq_mgr->resume_work);
480 
481 	mutex_lock(&uq_mgr->userq_mutex);
482 	spin_lock(&evf_mgr->ev_fence_lock);
483 	ev_fence = evf_mgr->ev_fence;
484 	spin_unlock(&evf_mgr->ev_fence_lock);
485 	if (!ev_fence || dma_fence_is_signaled(&ev_fence->base)) {
486 		mutex_unlock(&uq_mgr->userq_mutex);
487 		/*
488 		 * Looks like there was no pending resume work,
489 		 * add one now to create a valid eviction fence
490 		 */
491 		schedule_delayed_work(&uq_mgr->resume_work, 0);
492 		goto retry;
493 	}
494 }
495 
496 int amdgpu_userq_create_object(struct amdgpu_userq_mgr *uq_mgr,
497 			       struct amdgpu_userq_obj *userq_obj,
498 			       int size)
499 {
500 	struct amdgpu_device *adev = uq_mgr->adev;
501 	struct amdgpu_bo_param bp;
502 	int r;
503 
504 	memset(&bp, 0, sizeof(bp));
505 	bp.byte_align = PAGE_SIZE;
506 	bp.domain = AMDGPU_GEM_DOMAIN_GTT;
507 	bp.flags = AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS |
508 		   AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
509 	bp.type = ttm_bo_type_kernel;
510 	bp.size = size;
511 	bp.resv = NULL;
512 	bp.bo_ptr_size = sizeof(struct amdgpu_bo);
513 
514 	r = amdgpu_bo_create(adev, &bp, &userq_obj->obj);
515 	if (r) {
516 		drm_file_err(uq_mgr->file, "Failed to allocate BO for userqueue (%d)", r);
517 		return r;
518 	}
519 
520 	r = amdgpu_bo_reserve(userq_obj->obj, true);
521 	if (r) {
522 		drm_file_err(uq_mgr->file, "Failed to reserve BO to map (%d)", r);
523 		goto free_obj;
524 	}
525 
526 	r = amdgpu_ttm_alloc_gart(&(userq_obj->obj)->tbo);
527 	if (r) {
528 		drm_file_err(uq_mgr->file, "Failed to alloc GART for userqueue object (%d)", r);
529 		goto unresv;
530 	}
531 
532 	r = amdgpu_bo_kmap(userq_obj->obj, &userq_obj->cpu_ptr);
533 	if (r) {
534 		drm_file_err(uq_mgr->file, "Failed to map BO for userqueue (%d)", r);
535 		goto unresv;
536 	}
537 
538 	userq_obj->gpu_addr = amdgpu_bo_gpu_offset(userq_obj->obj);
539 	amdgpu_bo_unreserve(userq_obj->obj);
540 	memset(userq_obj->cpu_ptr, 0, size);
541 	return 0;
542 
543 unresv:
544 	amdgpu_bo_unreserve(userq_obj->obj);
545 
546 free_obj:
547 	amdgpu_bo_unref(&userq_obj->obj);
548 	return r;
549 }
550 
551 void amdgpu_userq_destroy_object(struct amdgpu_userq_mgr *uq_mgr,
552 				 struct amdgpu_userq_obj *userq_obj)
553 {
554 	amdgpu_bo_kunmap(userq_obj->obj);
555 	amdgpu_bo_unref(&userq_obj->obj);
556 }
557 
558 uint64_t
559 amdgpu_userq_get_doorbell_index(struct amdgpu_userq_mgr *uq_mgr,
560 				struct amdgpu_db_info *db_info,
561 				struct drm_file *filp)
562 {
563 	uint64_t index;
564 	struct drm_gem_object *gobj;
565 	struct amdgpu_userq_obj *db_obj = db_info->db_obj;
566 	int r, db_size;
567 
568 	gobj = drm_gem_object_lookup(filp, db_info->doorbell_handle);
569 	if (gobj == NULL) {
570 		drm_file_err(uq_mgr->file, "Can't find GEM object for doorbell\n");
571 		return -EINVAL;
572 	}
573 
574 	db_obj->obj = amdgpu_bo_ref(gem_to_amdgpu_bo(gobj));
575 	drm_gem_object_put(gobj);
576 
577 	r = amdgpu_bo_reserve(db_obj->obj, true);
578 	if (r) {
579 		drm_file_err(uq_mgr->file, "[Usermode queues] Failed to pin doorbell object\n");
580 		goto unref_bo;
581 	}
582 
583 	/* Pin the BO before generating the index, unpin in queue destroy */
584 	r = amdgpu_bo_pin(db_obj->obj, AMDGPU_GEM_DOMAIN_DOORBELL);
585 	if (r) {
586 		drm_file_err(uq_mgr->file, "[Usermode queues] Failed to pin doorbell object\n");
587 		goto unresv_bo;
588 	}
589 
590 	switch (db_info->queue_type) {
591 	case AMDGPU_HW_IP_GFX:
592 	case AMDGPU_HW_IP_COMPUTE:
593 	case AMDGPU_HW_IP_DMA:
594 		db_size = sizeof(u64);
595 		break;
596 	default:
597 		drm_file_err(uq_mgr->file, "[Usermode queues] IP %d not support\n",
598 			     db_info->queue_type);
599 		r = -EINVAL;
600 		goto unpin_bo;
601 	}
602 
603 	/* Validate doorbell_offset is within the doorbell BO */
604 	if ((u64)db_info->doorbell_offset * db_size + db_size >
605 	    amdgpu_bo_size(db_obj->obj)) {
606 		r = -EINVAL;
607 		goto unpin_bo;
608 	}
609 
610 	index = amdgpu_doorbell_index_on_bar(uq_mgr->adev, db_obj->obj,
611 					     db_info->doorbell_offset, db_size);
612 	drm_dbg_driver(adev_to_drm(uq_mgr->adev),
613 		       "[Usermode queues] doorbell index=%lld\n", index);
614 	amdgpu_bo_unreserve(db_obj->obj);
615 	return index;
616 
617 unpin_bo:
618 	amdgpu_bo_unpin(db_obj->obj);
619 unresv_bo:
620 	amdgpu_bo_unreserve(db_obj->obj);
621 unref_bo:
622 	amdgpu_bo_unref(&db_obj->obj);
623 	return r;
624 }
625 
626 static int
627 amdgpu_userq_destroy(struct amdgpu_userq_mgr *uq_mgr, struct amdgpu_usermode_queue *queue)
628 {
629 	struct amdgpu_device *adev = uq_mgr->adev;
630 	int r = 0;
631 
632 	cancel_delayed_work_sync(&uq_mgr->resume_work);
633 	mutex_lock(&uq_mgr->userq_mutex);
634 	amdgpu_userq_wait_for_last_fence(queue);
635 	/* Cancel any pending hang detection work and cleanup */
636 	if (queue->hang_detect_fence) {
637 		cancel_delayed_work_sync(&queue->hang_detect_work);
638 		queue->hang_detect_fence = NULL;
639 	}
640 	r = amdgpu_bo_reserve(queue->db_obj.obj, true);
641 	if (!r) {
642 		amdgpu_bo_unpin(queue->db_obj.obj);
643 		amdgpu_bo_unreserve(queue->db_obj.obj);
644 	}
645 	amdgpu_bo_unref(&queue->db_obj.obj);
646 
647 	r = amdgpu_bo_reserve(queue->wptr_obj.obj, true);
648 	if (!r) {
649 		amdgpu_bo_unpin(queue->wptr_obj.obj);
650 		amdgpu_bo_unreserve(queue->wptr_obj.obj);
651 	}
652 	amdgpu_bo_unref(&queue->wptr_obj.obj);
653 
654 	atomic_dec(&uq_mgr->userq_count[queue->queue_type]);
655 #if defined(CONFIG_DEBUG_FS)
656 	debugfs_remove_recursive(queue->debugfs_queue);
657 #endif
658 	amdgpu_userq_detect_and_reset_queues(uq_mgr);
659 	r = amdgpu_userq_unmap_helper(queue);
660 	/*TODO: It requires a reset for userq hw unmap error*/
661 	if (unlikely(r != AMDGPU_USERQ_STATE_UNMAPPED)) {
662 		drm_warn(adev_to_drm(uq_mgr->adev), "trying to destroy a HW mapping userq\n");
663 		queue->state = AMDGPU_USERQ_STATE_HUNG;
664 	}
665 	amdgpu_userq_cleanup(queue);
666 	mutex_unlock(&uq_mgr->userq_mutex);
667 
668 	pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
669 
670 	return r;
671 }
672 
673 static void amdgpu_userq_kref_destroy(struct kref *kref)
674 {
675 	int r;
676 	struct amdgpu_usermode_queue *queue =
677 		container_of(kref, struct amdgpu_usermode_queue, refcount);
678 	struct amdgpu_userq_mgr *uq_mgr = queue->userq_mgr;
679 
680 	r = amdgpu_userq_destroy(uq_mgr, queue);
681 	if (r)
682 		drm_file_err(uq_mgr->file, "Failed to destroy usermode queue %d\n", r);
683 }
684 
685 struct amdgpu_usermode_queue *amdgpu_userq_get(struct amdgpu_userq_mgr *uq_mgr, u32 qid)
686 {
687 	struct amdgpu_usermode_queue *queue;
688 
689 	xa_lock(&uq_mgr->userq_xa);
690 	queue = xa_load(&uq_mgr->userq_xa, qid);
691 	if (queue)
692 		kref_get(&queue->refcount);
693 	xa_unlock(&uq_mgr->userq_xa);
694 
695 	return queue;
696 }
697 
698 void amdgpu_userq_put(struct amdgpu_usermode_queue *queue)
699 {
700 	if (queue)
701 		kref_put(&queue->refcount, amdgpu_userq_kref_destroy);
702 }
703 
704 static int amdgpu_userq_priority_permit(struct drm_file *filp,
705 					int priority)
706 {
707 	if (priority < AMDGPU_USERQ_CREATE_FLAGS_QUEUE_PRIORITY_HIGH)
708 		return 0;
709 
710 	if (capable(CAP_SYS_NICE))
711 		return 0;
712 
713 	if (drm_is_current_master(filp))
714 		return 0;
715 
716 	return -EACCES;
717 }
718 
719 #if defined(CONFIG_DEBUG_FS)
720 static int amdgpu_mqd_info_read(struct seq_file *m, void *unused)
721 {
722 	struct amdgpu_usermode_queue *queue = m->private;
723 	struct amdgpu_bo *bo;
724 	int r;
725 
726 	if (!queue || !queue->mqd.obj)
727 		return -EINVAL;
728 
729 	bo = amdgpu_bo_ref(queue->mqd.obj);
730 	r = amdgpu_bo_reserve(bo, true);
731 	if (r) {
732 		amdgpu_bo_unref(&bo);
733 		return -EINVAL;
734 	}
735 
736 	seq_printf(m, "queue_type: %d\n", queue->queue_type);
737 	seq_printf(m, "mqd_gpu_address: 0x%llx\n", amdgpu_bo_gpu_offset(queue->mqd.obj));
738 
739 	amdgpu_bo_unreserve(bo);
740 	amdgpu_bo_unref(&bo);
741 
742 	return 0;
743 }
744 
745 static int amdgpu_mqd_info_open(struct inode *inode, struct file *file)
746 {
747 	return single_open(file, amdgpu_mqd_info_read, inode->i_private);
748 }
749 
750 static const struct file_operations amdgpu_mqd_info_fops = {
751 	.owner = THIS_MODULE,
752 	.open = amdgpu_mqd_info_open,
753 	.read = seq_read,
754 	.llseek = seq_lseek,
755 	.release = single_release,
756 };
757 #endif
758 
759 static int
760 amdgpu_userq_create(struct drm_file *filp, union drm_amdgpu_userq *args)
761 {
762 	struct amdgpu_fpriv *fpriv = filp->driver_priv;
763 	struct amdgpu_userq_mgr *uq_mgr = &fpriv->userq_mgr;
764 	struct amdgpu_device *adev = uq_mgr->adev;
765 	const struct amdgpu_userq_funcs *uq_funcs;
766 	struct amdgpu_usermode_queue *queue;
767 	struct amdgpu_db_info db_info;
768 	char *queue_name;
769 	bool skip_map_queue;
770 	u32 qid;
771 	uint64_t index;
772 	int r = 0;
773 	int priority =
774 		(args->in.flags & AMDGPU_USERQ_CREATE_FLAGS_QUEUE_PRIORITY_MASK) >>
775 		AMDGPU_USERQ_CREATE_FLAGS_QUEUE_PRIORITY_SHIFT;
776 
777 	r = amdgpu_userq_priority_permit(filp, priority);
778 	if (r)
779 		return r;
780 
781 	r = pm_runtime_get_sync(adev_to_drm(adev)->dev);
782 	if (r < 0) {
783 		drm_file_err(uq_mgr->file, "pm_runtime_get_sync() failed for userqueue create\n");
784 		pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
785 		return r;
786 	}
787 
788 	/*
789 	 * There could be a situation that we are creating a new queue while
790 	 * the other queues under this UQ_mgr are suspended. So if there is any
791 	 * resume work pending, wait for it to get done.
792 	 *
793 	 * This will also make sure we have a valid eviction fence ready to be used.
794 	 */
795 	amdgpu_userq_ensure_ev_fence(&fpriv->userq_mgr, &fpriv->evf_mgr);
796 
797 	uq_funcs = adev->userq_funcs[args->in.ip_type];
798 	if (!uq_funcs) {
799 		drm_file_err(uq_mgr->file, "Usermode queue is not supported for this IP (%u)\n",
800 			     args->in.ip_type);
801 		r = -EINVAL;
802 		goto unlock;
803 	}
804 
805 	queue = kzalloc_obj(struct amdgpu_usermode_queue);
806 	if (!queue) {
807 		drm_file_err(uq_mgr->file, "Failed to allocate memory for queue\n");
808 		r = -ENOMEM;
809 		goto unlock;
810 	}
811 
812 	INIT_LIST_HEAD(&queue->userq_va_list);
813 	queue->doorbell_handle = args->in.doorbell_handle;
814 	queue->queue_type = args->in.ip_type;
815 	queue->vm = &fpriv->vm;
816 	queue->priority = priority;
817 
818 	db_info.queue_type = queue->queue_type;
819 	db_info.doorbell_handle = queue->doorbell_handle;
820 	db_info.db_obj = &queue->db_obj;
821 	db_info.doorbell_offset = args->in.doorbell_offset;
822 
823 	queue->userq_mgr = uq_mgr;
824 	/* Validate the userq virtual address.*/
825 	if (amdgpu_userq_input_va_validate(adev, queue, args->in.queue_va, args->in.queue_size) ||
826 	    amdgpu_userq_input_va_validate(adev, queue, args->in.rptr_va, AMDGPU_GPU_PAGE_SIZE) ||
827 	    amdgpu_userq_input_va_validate(adev, queue, args->in.wptr_va, AMDGPU_GPU_PAGE_SIZE)) {
828 		r = -EINVAL;
829 		kfree(queue);
830 		goto unlock;
831 	}
832 
833 	/* Convert relative doorbell offset into absolute doorbell index */
834 	index = amdgpu_userq_get_doorbell_index(uq_mgr, &db_info, filp);
835 	if (index == (uint64_t)-EINVAL) {
836 		drm_file_err(uq_mgr->file, "Failed to get doorbell for queue\n");
837 		kfree(queue);
838 		r = -EINVAL;
839 		goto unlock;
840 	}
841 
842 	queue->doorbell_index = index;
843 	xa_init_flags(&queue->fence_drv_xa, XA_FLAGS_ALLOC);
844 	r = amdgpu_userq_fence_driver_alloc(adev, queue);
845 	if (r) {
846 		drm_file_err(uq_mgr->file, "Failed to alloc fence driver\n");
847 		goto unlock;
848 	}
849 
850 	r = uq_funcs->mqd_create(queue, &args->in);
851 	if (r) {
852 		drm_file_err(uq_mgr->file, "Failed to create Queue\n");
853 		amdgpu_userq_fence_driver_free(queue);
854 		kfree(queue);
855 		goto unlock;
856 	}
857 
858 	/* drop this refcount during queue destroy */
859 	kref_init(&queue->refcount);
860 
861 	/* Wait for mode-1 reset to complete */
862 	down_read(&adev->reset_domain->sem);
863 	r = xa_err(xa_store_irq(&adev->userq_doorbell_xa, index, queue, GFP_KERNEL));
864 	if (r) {
865 		kfree(queue);
866 		up_read(&adev->reset_domain->sem);
867 		goto unlock;
868 	}
869 
870 	r = xa_alloc(&uq_mgr->userq_xa, &qid, queue,
871 		     XA_LIMIT(1, AMDGPU_MAX_USERQ_COUNT), GFP_KERNEL);
872 	if (r) {
873 		drm_file_err(uq_mgr->file, "Failed to allocate a queue id\n");
874 		amdgpu_userq_fence_driver_free(queue);
875 		uq_funcs->mqd_destroy(queue);
876 		kfree(queue);
877 		r = -ENOMEM;
878 		up_read(&adev->reset_domain->sem);
879 		goto unlock;
880 	}
881 	up_read(&adev->reset_domain->sem);
882 
883 	/* don't map the queue if scheduling is halted */
884 	if (adev->userq_halt_for_enforce_isolation &&
885 	    ((queue->queue_type == AMDGPU_HW_IP_GFX) ||
886 	     (queue->queue_type == AMDGPU_HW_IP_COMPUTE)))
887 		skip_map_queue = true;
888 	else
889 		skip_map_queue = false;
890 	if (!skip_map_queue) {
891 		r = amdgpu_userq_map_helper(queue);
892 		if (r) {
893 			drm_file_err(uq_mgr->file, "Failed to map Queue\n");
894 			xa_erase(&uq_mgr->userq_xa, qid);
895 			amdgpu_userq_fence_driver_free(queue);
896 			uq_funcs->mqd_destroy(queue);
897 			kfree(queue);
898 			goto unlock;
899 		}
900 	}
901 
902 	queue_name = kasprintf(GFP_KERNEL, "queue-%d", qid);
903 	if (!queue_name) {
904 		r = -ENOMEM;
905 		goto unlock;
906 	}
907 
908 #if defined(CONFIG_DEBUG_FS)
909 	/* Queue dentry per client to hold MQD information   */
910 	queue->debugfs_queue = debugfs_create_dir(queue_name, filp->debugfs_client);
911 	debugfs_create_file("mqd_info", 0444, queue->debugfs_queue, queue, &amdgpu_mqd_info_fops);
912 #endif
913 	amdgpu_userq_init_hang_detect_work(queue);
914 	kfree(queue_name);
915 
916 	args->out.queue_id = qid;
917 	atomic_inc(&uq_mgr->userq_count[queue->queue_type]);
918 
919 unlock:
920 	mutex_unlock(&uq_mgr->userq_mutex);
921 
922 	return r;
923 }
924 
925 static int amdgpu_userq_input_args_validate(struct drm_device *dev,
926 					union drm_amdgpu_userq *args,
927 					struct drm_file *filp)
928 {
929 	struct amdgpu_device *adev = drm_to_adev(dev);
930 
931 	switch (args->in.op) {
932 	case AMDGPU_USERQ_OP_CREATE:
933 		if (args->in.flags & ~(AMDGPU_USERQ_CREATE_FLAGS_QUEUE_PRIORITY_MASK |
934 				       AMDGPU_USERQ_CREATE_FLAGS_QUEUE_SECURE))
935 			return -EINVAL;
936 		/* Usermode queues are only supported for GFX IP as of now */
937 		if (args->in.ip_type != AMDGPU_HW_IP_GFX &&
938 		    args->in.ip_type != AMDGPU_HW_IP_DMA &&
939 		    args->in.ip_type != AMDGPU_HW_IP_COMPUTE) {
940 			drm_file_err(filp, "Usermode queue doesn't support IP type %u\n",
941 				     args->in.ip_type);
942 			return -EINVAL;
943 		}
944 
945 		if ((args->in.flags & AMDGPU_USERQ_CREATE_FLAGS_QUEUE_SECURE) &&
946 		    (args->in.ip_type != AMDGPU_HW_IP_GFX) &&
947 		    (args->in.ip_type != AMDGPU_HW_IP_COMPUTE) &&
948 		    !amdgpu_is_tmz(adev)) {
949 			drm_file_err(filp, "Secure only supported on GFX/Compute queues\n");
950 			return -EINVAL;
951 		}
952 
953 		if (args->in.queue_va == AMDGPU_BO_INVALID_OFFSET ||
954 		    args->in.queue_va == 0 ||
955 		    args->in.queue_size == 0) {
956 			drm_file_err(filp, "invalidate userq queue va or size\n");
957 			return -EINVAL;
958 		}
959 
960 		if (!is_power_of_2(args->in.queue_size)) {
961 			drm_file_err(filp, "Queue size must be a power of 2\n");
962 			return -EINVAL;
963 		}
964 
965 		if (args->in.queue_size < AMDGPU_GPU_PAGE_SIZE) {
966 			drm_file_err(filp, "Queue size smaller than AMDGPU_GPU_PAGE_SIZE\n");
967 			return -EINVAL;
968 		}
969 
970 		if (!args->in.wptr_va || !args->in.rptr_va) {
971 			drm_file_err(filp, "invalidate userq queue rptr or wptr\n");
972 			return -EINVAL;
973 		}
974 		break;
975 	case AMDGPU_USERQ_OP_FREE:
976 		if (args->in.ip_type ||
977 		    args->in.doorbell_handle ||
978 		    args->in.doorbell_offset ||
979 		    args->in.flags ||
980 		    args->in.queue_va ||
981 		    args->in.queue_size ||
982 		    args->in.rptr_va ||
983 		    args->in.wptr_va ||
984 		    args->in.mqd ||
985 		    args->in.mqd_size)
986 			return -EINVAL;
987 		break;
988 	default:
989 		return -EINVAL;
990 	}
991 
992 	return 0;
993 }
994 
995 bool amdgpu_userq_enabled(struct drm_device *dev)
996 {
997 	struct amdgpu_device *adev = drm_to_adev(dev);
998 	int i;
999 
1000 	for (i = 0; i < AMDGPU_HW_IP_NUM; i++) {
1001 		if (adev->userq_funcs[i])
1002 			return true;
1003 	}
1004 
1005 	return false;
1006 }
1007 
1008 int amdgpu_userq_ioctl(struct drm_device *dev, void *data,
1009 		       struct drm_file *filp)
1010 {
1011 	union drm_amdgpu_userq *args = data;
1012 	struct amdgpu_fpriv *fpriv = filp->driver_priv;
1013 	struct amdgpu_usermode_queue *queue;
1014 	int r = 0;
1015 
1016 	if (!amdgpu_userq_enabled(dev))
1017 		return -ENOTSUPP;
1018 
1019 	if (amdgpu_userq_input_args_validate(dev, args, filp) < 0)
1020 		return -EINVAL;
1021 
1022 	switch (args->in.op) {
1023 	case AMDGPU_USERQ_OP_CREATE:
1024 		r = amdgpu_userq_create(filp, args);
1025 		if (r)
1026 			drm_file_err(filp, "Failed to create usermode queue\n");
1027 		break;
1028 
1029 	case AMDGPU_USERQ_OP_FREE: {
1030 		xa_lock(&fpriv->userq_mgr.userq_xa);
1031 		queue = __xa_erase(&fpriv->userq_mgr.userq_xa, args->in.queue_id);
1032 		xa_unlock(&fpriv->userq_mgr.userq_xa);
1033 		if (!queue)
1034 			return -ENOENT;
1035 
1036 		amdgpu_userq_put(queue);
1037 		break;
1038 	}
1039 
1040 	default:
1041 		drm_dbg_driver(dev, "Invalid user queue op specified: %d\n", args->in.op);
1042 		return -EINVAL;
1043 	}
1044 
1045 	return r;
1046 }
1047 
1048 static int
1049 amdgpu_userq_restore_all(struct amdgpu_userq_mgr *uq_mgr)
1050 {
1051 	struct amdgpu_usermode_queue *queue;
1052 	unsigned long queue_id;
1053 	int ret = 0, r;
1054 
1055 	/* Resume all the queues for this process */
1056 	xa_for_each(&uq_mgr->userq_xa, queue_id, queue) {
1057 		queue = amdgpu_userq_get(uq_mgr, queue_id);
1058 		if (!queue)
1059 			continue;
1060 
1061 		if (!amdgpu_userq_buffer_vas_mapped(queue)) {
1062 			drm_file_err(uq_mgr->file,
1063 				     "trying restore queue without va mapping\n");
1064 			queue->state = AMDGPU_USERQ_STATE_INVALID_VA;
1065 			amdgpu_userq_put(queue);
1066 			continue;
1067 		}
1068 
1069 		r = amdgpu_userq_restore_helper(queue);
1070 		if (r)
1071 			ret = r;
1072 
1073 		amdgpu_userq_put(queue);
1074 	}
1075 
1076 	if (ret)
1077 		drm_file_err(uq_mgr->file, "Failed to map all the queues\n");
1078 	return ret;
1079 }
1080 
1081 static int amdgpu_userq_validate_vm(void *param, struct amdgpu_bo *bo)
1082 {
1083 	struct ttm_operation_ctx ctx = { false, false };
1084 
1085 	amdgpu_bo_placement_from_domain(bo, bo->allowed_domains);
1086 	return ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
1087 }
1088 
1089 /* Handle all BOs on the invalidated list, validate them and update the PTs */
1090 static int
1091 amdgpu_userq_bo_validate(struct amdgpu_device *adev, struct drm_exec *exec,
1092 			 struct amdgpu_vm *vm)
1093 {
1094 	struct ttm_operation_ctx ctx = { false, false };
1095 	struct amdgpu_bo_va *bo_va;
1096 	struct amdgpu_bo *bo;
1097 	int ret;
1098 
1099 	spin_lock(&vm->status_lock);
1100 	while (!list_empty(&vm->invalidated)) {
1101 		bo_va = list_first_entry(&vm->invalidated,
1102 					 struct amdgpu_bo_va,
1103 					 base.vm_status);
1104 		spin_unlock(&vm->status_lock);
1105 
1106 		bo = bo_va->base.bo;
1107 		ret = drm_exec_prepare_obj(exec, &bo->tbo.base, 2);
1108 		if (unlikely(ret))
1109 			return ret;
1110 
1111 		amdgpu_bo_placement_from_domain(bo, bo->allowed_domains);
1112 		ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
1113 		if (ret)
1114 			return ret;
1115 
1116 		/* This moves the bo_va to the done list */
1117 		ret = amdgpu_vm_bo_update(adev, bo_va, false);
1118 		if (ret)
1119 			return ret;
1120 
1121 		spin_lock(&vm->status_lock);
1122 	}
1123 	spin_unlock(&vm->status_lock);
1124 
1125 	return 0;
1126 }
1127 
1128 /* Make sure the whole VM is ready to be used */
1129 static int
1130 amdgpu_userq_vm_validate(struct amdgpu_userq_mgr *uq_mgr)
1131 {
1132 	struct amdgpu_fpriv *fpriv = uq_mgr_to_fpriv(uq_mgr);
1133 	bool invalidated = false, new_addition = false;
1134 	struct ttm_operation_ctx ctx = { true, false };
1135 	struct amdgpu_device *adev = uq_mgr->adev;
1136 	struct amdgpu_hmm_range *range;
1137 	struct amdgpu_vm *vm = &fpriv->vm;
1138 	unsigned long key, tmp_key;
1139 	struct amdgpu_bo_va *bo_va;
1140 	struct amdgpu_bo *bo;
1141 	struct drm_exec exec;
1142 	struct xarray xa;
1143 	int ret;
1144 
1145 	xa_init(&xa);
1146 
1147 retry_lock:
1148 	drm_exec_init(&exec, DRM_EXEC_IGNORE_DUPLICATES, 0);
1149 	drm_exec_until_all_locked(&exec) {
1150 		ret = amdgpu_vm_lock_pd(vm, &exec, 1);
1151 		drm_exec_retry_on_contention(&exec);
1152 		if (unlikely(ret))
1153 			goto unlock_all;
1154 
1155 		ret = amdgpu_vm_lock_done_list(vm, &exec, 1);
1156 		drm_exec_retry_on_contention(&exec);
1157 		if (unlikely(ret))
1158 			goto unlock_all;
1159 
1160 		/* This validates PDs, PTs and per VM BOs */
1161 		ret = amdgpu_vm_validate(adev, vm, NULL,
1162 					 amdgpu_userq_validate_vm,
1163 					 NULL);
1164 		if (unlikely(ret))
1165 			goto unlock_all;
1166 
1167 		/* This locks and validates the remaining evicted BOs */
1168 		ret = amdgpu_userq_bo_validate(adev, &exec, vm);
1169 		drm_exec_retry_on_contention(&exec);
1170 		if (unlikely(ret))
1171 			goto unlock_all;
1172 	}
1173 
1174 	if (invalidated) {
1175 		xa_for_each(&xa, tmp_key, range) {
1176 			bo = range->bo;
1177 			amdgpu_bo_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_CPU);
1178 			ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
1179 			if (ret)
1180 				goto unlock_all;
1181 
1182 			amdgpu_ttm_tt_set_user_pages(bo->tbo.ttm, range);
1183 
1184 			amdgpu_bo_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_GTT);
1185 			ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
1186 			if (ret)
1187 				goto unlock_all;
1188 		}
1189 		invalidated = false;
1190 	}
1191 
1192 	ret = amdgpu_vm_handle_moved(adev, vm, NULL);
1193 	if (ret)
1194 		goto unlock_all;
1195 
1196 	key = 0;
1197 	/* Validate User Ptr BOs */
1198 	list_for_each_entry(bo_va, &vm->done, base.vm_status) {
1199 		bo = bo_va->base.bo;
1200 		if (!bo)
1201 			continue;
1202 
1203 		if (!amdgpu_ttm_tt_is_userptr(bo->tbo.ttm))
1204 			continue;
1205 
1206 		range = xa_load(&xa, key);
1207 		if (range && range->bo != bo) {
1208 			xa_erase(&xa, key);
1209 			amdgpu_hmm_range_free(range);
1210 			range = NULL;
1211 		}
1212 
1213 		if (!range) {
1214 			range = amdgpu_hmm_range_alloc(bo);
1215 			if (!range) {
1216 				ret = -ENOMEM;
1217 				goto unlock_all;
1218 			}
1219 
1220 			xa_store(&xa, key, range, GFP_KERNEL);
1221 			new_addition = true;
1222 		}
1223 		key++;
1224 	}
1225 
1226 	if (new_addition) {
1227 		drm_exec_fini(&exec);
1228 		xa_for_each(&xa, tmp_key, range) {
1229 			if (!range)
1230 				continue;
1231 			bo = range->bo;
1232 			ret = amdgpu_ttm_tt_get_user_pages(bo, range);
1233 			if (ret)
1234 				goto unlock_all;
1235 		}
1236 
1237 		invalidated = true;
1238 		new_addition = false;
1239 		goto retry_lock;
1240 	}
1241 
1242 	ret = amdgpu_vm_update_pdes(adev, vm, false);
1243 	if (ret)
1244 		goto unlock_all;
1245 
1246 	/*
1247 	 * We need to wait for all VM updates to finish before restarting the
1248 	 * queues. Using the done list like that is now ok since everything is
1249 	 * locked in place.
1250 	 */
1251 	list_for_each_entry(bo_va, &vm->done, base.vm_status)
1252 		dma_fence_wait(bo_va->last_pt_update, false);
1253 	dma_fence_wait(vm->last_update, false);
1254 
1255 	ret = amdgpu_eviction_fence_replace_fence(&fpriv->evf_mgr, &exec);
1256 	if (ret)
1257 		drm_file_err(uq_mgr->file, "Failed to replace eviction fence\n");
1258 
1259 unlock_all:
1260 	drm_exec_fini(&exec);
1261 	xa_for_each(&xa, tmp_key, range) {
1262 		if (!range)
1263 			continue;
1264 		bo = range->bo;
1265 		amdgpu_hmm_range_free(range);
1266 	}
1267 	xa_destroy(&xa);
1268 	return ret;
1269 }
1270 
1271 static void amdgpu_userq_restore_worker(struct work_struct *work)
1272 {
1273 	struct amdgpu_userq_mgr *uq_mgr = work_to_uq_mgr(work, resume_work.work);
1274 	struct amdgpu_fpriv *fpriv = uq_mgr_to_fpriv(uq_mgr);
1275 	int ret;
1276 
1277 	flush_delayed_work(&fpriv->evf_mgr.suspend_work);
1278 
1279 	mutex_lock(&uq_mgr->userq_mutex);
1280 
1281 	ret = amdgpu_userq_vm_validate(uq_mgr);
1282 	if (ret) {
1283 		drm_file_err(uq_mgr->file, "Failed to validate BOs to restore\n");
1284 		goto unlock;
1285 	}
1286 
1287 	ret = amdgpu_userq_restore_all(uq_mgr);
1288 	if (ret) {
1289 		drm_file_err(uq_mgr->file, "Failed to restore all queues\n");
1290 		goto unlock;
1291 	}
1292 
1293 unlock:
1294 	mutex_unlock(&uq_mgr->userq_mutex);
1295 }
1296 
1297 static int
1298 amdgpu_userq_evict_all(struct amdgpu_userq_mgr *uq_mgr)
1299 {
1300 	struct amdgpu_usermode_queue *queue;
1301 	unsigned long queue_id;
1302 	int ret = 0, r;
1303 
1304 	amdgpu_userq_detect_and_reset_queues(uq_mgr);
1305 	/* Try to unmap all the queues in this process ctx */
1306 	xa_for_each(&uq_mgr->userq_xa, queue_id, queue) {
1307 		queue = amdgpu_userq_get(uq_mgr, queue_id);
1308 		if (!queue)
1309 			continue;
1310 		r = amdgpu_userq_preempt_helper(queue);
1311 		if (r)
1312 			ret = r;
1313 		amdgpu_userq_put(queue);
1314 	}
1315 
1316 	if (ret)
1317 		drm_file_err(uq_mgr->file, "Couldn't unmap all the queues\n");
1318 	return ret;
1319 }
1320 
1321 void amdgpu_userq_reset_work(struct work_struct *work)
1322 {
1323 	struct amdgpu_device *adev = container_of(work, struct amdgpu_device,
1324 						  userq_reset_work);
1325 	struct amdgpu_reset_context reset_context;
1326 
1327 	memset(&reset_context, 0, sizeof(reset_context));
1328 
1329 	reset_context.method = AMD_RESET_METHOD_NONE;
1330 	reset_context.reset_req_dev = adev;
1331 	reset_context.src = AMDGPU_RESET_SRC_USERQ;
1332 	set_bit(AMDGPU_NEED_FULL_RESET, &reset_context.flags);
1333 	/*set_bit(AMDGPU_SKIP_COREDUMP, &reset_context.flags);*/
1334 
1335 	amdgpu_device_gpu_recover(adev, NULL, &reset_context);
1336 }
1337 
1338 static int
1339 amdgpu_userq_wait_for_signal(struct amdgpu_userq_mgr *uq_mgr)
1340 {
1341 	struct amdgpu_usermode_queue *queue;
1342 	unsigned long queue_id;
1343 	int ret;
1344 
1345 	xa_for_each(&uq_mgr->userq_xa, queue_id, queue) {
1346 		queue = amdgpu_userq_get(uq_mgr, queue_id);
1347 		if (!queue)
1348 			continue;
1349 
1350 		struct dma_fence *f = queue->last_fence;
1351 
1352 		if (!f || dma_fence_is_signaled(f)) {
1353 			amdgpu_userq_put(queue);
1354 			continue;
1355 		}
1356 		ret = dma_fence_wait_timeout(f, true, msecs_to_jiffies(100));
1357 		if (ret <= 0) {
1358 			drm_file_err(uq_mgr->file, "Timed out waiting for fence=%llu:%llu\n",
1359 				     f->context, f->seqno);
1360 			amdgpu_userq_put(queue);
1361 			return -ETIMEDOUT;
1362 		}
1363 		amdgpu_userq_put(queue);
1364 	}
1365 
1366 	return 0;
1367 }
1368 
1369 void
1370 amdgpu_userq_evict(struct amdgpu_userq_mgr *uq_mgr,
1371 		   struct amdgpu_eviction_fence *ev_fence)
1372 {
1373 	struct amdgpu_fpriv *fpriv = uq_mgr_to_fpriv(uq_mgr);
1374 	struct amdgpu_eviction_fence_mgr *evf_mgr = &fpriv->evf_mgr;
1375 	struct amdgpu_device *adev = uq_mgr->adev;
1376 	int ret;
1377 
1378 	/* Wait for any pending userqueue fence work to finish */
1379 	ret = amdgpu_userq_wait_for_signal(uq_mgr);
1380 	if (ret)
1381 		dev_err(adev->dev, "Not evicting userqueue, timeout waiting for work\n");
1382 
1383 	ret = amdgpu_userq_evict_all(uq_mgr);
1384 	if (ret)
1385 		dev_err(adev->dev, "Failed to evict userqueue\n");
1386 
1387 	/* Signal current eviction fence */
1388 	amdgpu_eviction_fence_signal(evf_mgr, ev_fence);
1389 
1390 	if (evf_mgr->fd_closing) {
1391 		cancel_delayed_work_sync(&uq_mgr->resume_work);
1392 		return;
1393 	}
1394 
1395 	/* Schedule a resume work */
1396 	schedule_delayed_work(&uq_mgr->resume_work, 0);
1397 }
1398 
1399 int amdgpu_userq_mgr_init(struct amdgpu_userq_mgr *userq_mgr, struct drm_file *file_priv,
1400 			  struct amdgpu_device *adev)
1401 {
1402 	mutex_init(&userq_mgr->userq_mutex);
1403 	xa_init_flags(&userq_mgr->userq_xa, XA_FLAGS_ALLOC);
1404 	userq_mgr->adev = adev;
1405 	userq_mgr->file = file_priv;
1406 
1407 	INIT_DELAYED_WORK(&userq_mgr->resume_work, amdgpu_userq_restore_worker);
1408 	return 0;
1409 }
1410 
1411 void amdgpu_userq_mgr_fini(struct amdgpu_userq_mgr *userq_mgr)
1412 {
1413 	struct amdgpu_usermode_queue *queue;
1414 	unsigned long queue_id = 0;
1415 
1416 	for (;;) {
1417 		xa_lock(&userq_mgr->userq_xa);
1418 		queue = xa_find(&userq_mgr->userq_xa, &queue_id, ULONG_MAX,
1419 				XA_PRESENT);
1420 		if (queue)
1421 			__xa_erase(&userq_mgr->userq_xa, queue_id);
1422 		xa_unlock(&userq_mgr->userq_xa);
1423 
1424 		if (!queue)
1425 			break;
1426 
1427 		amdgpu_userq_put(queue);
1428 	}
1429 
1430 	xa_destroy(&userq_mgr->userq_xa);
1431 	mutex_destroy(&userq_mgr->userq_mutex);
1432 }
1433 
1434 int amdgpu_userq_suspend(struct amdgpu_device *adev)
1435 {
1436 	u32 ip_mask = amdgpu_userq_get_supported_ip_mask(adev);
1437 	struct amdgpu_usermode_queue *queue;
1438 	struct amdgpu_userq_mgr *uqm;
1439 	unsigned long queue_id;
1440 	int r;
1441 
1442 	if (!ip_mask)
1443 		return 0;
1444 
1445 	xa_for_each(&adev->userq_doorbell_xa, queue_id, queue) {
1446 		uqm = queue->userq_mgr;
1447 		cancel_delayed_work_sync(&uqm->resume_work);
1448 		guard(mutex)(&uqm->userq_mutex);
1449 		amdgpu_userq_detect_and_reset_queues(uqm);
1450 		if (adev->in_s0ix)
1451 			r = amdgpu_userq_preempt_helper(queue);
1452 		else
1453 			r = amdgpu_userq_unmap_helper(queue);
1454 		if (r)
1455 			return r;
1456 	}
1457 	return 0;
1458 }
1459 
1460 int amdgpu_userq_resume(struct amdgpu_device *adev)
1461 {
1462 	u32 ip_mask = amdgpu_userq_get_supported_ip_mask(adev);
1463 	struct amdgpu_usermode_queue *queue;
1464 	struct amdgpu_userq_mgr *uqm;
1465 	unsigned long queue_id;
1466 	int r;
1467 
1468 	if (!ip_mask)
1469 		return 0;
1470 
1471 	xa_for_each(&adev->userq_doorbell_xa, queue_id, queue) {
1472 		uqm = queue->userq_mgr;
1473 		guard(mutex)(&uqm->userq_mutex);
1474 		if (adev->in_s0ix)
1475 			r = amdgpu_userq_restore_helper(queue);
1476 		else
1477 			r = amdgpu_userq_map_helper(queue);
1478 		if (r)
1479 			return r;
1480 	}
1481 
1482 	return 0;
1483 }
1484 
1485 int amdgpu_userq_stop_sched_for_enforce_isolation(struct amdgpu_device *adev,
1486 						  u32 idx)
1487 {
1488 	u32 ip_mask = amdgpu_userq_get_supported_ip_mask(adev);
1489 	struct amdgpu_usermode_queue *queue;
1490 	struct amdgpu_userq_mgr *uqm;
1491 	unsigned long queue_id;
1492 	int ret = 0, r;
1493 
1494 	/* only need to stop gfx/compute */
1495 	if (!(ip_mask & ((1 << AMDGPU_HW_IP_GFX) | (1 << AMDGPU_HW_IP_COMPUTE))))
1496 		return 0;
1497 
1498 	if (adev->userq_halt_for_enforce_isolation)
1499 		dev_warn(adev->dev, "userq scheduling already stopped!\n");
1500 	adev->userq_halt_for_enforce_isolation = true;
1501 	xa_for_each(&adev->userq_doorbell_xa, queue_id, queue) {
1502 		uqm = queue->userq_mgr;
1503 		cancel_delayed_work_sync(&uqm->resume_work);
1504 		mutex_lock(&uqm->userq_mutex);
1505 		if (((queue->queue_type == AMDGPU_HW_IP_GFX) ||
1506 		     (queue->queue_type == AMDGPU_HW_IP_COMPUTE)) &&
1507 		    (queue->xcp_id == idx)) {
1508 			amdgpu_userq_detect_and_reset_queues(uqm);
1509 			r = amdgpu_userq_preempt_helper(queue);
1510 			if (r)
1511 				ret = r;
1512 		}
1513 		mutex_unlock(&uqm->userq_mutex);
1514 	}
1515 
1516 	return ret;
1517 }
1518 
1519 int amdgpu_userq_start_sched_for_enforce_isolation(struct amdgpu_device *adev,
1520 						   u32 idx)
1521 {
1522 	u32 ip_mask = amdgpu_userq_get_supported_ip_mask(adev);
1523 	struct amdgpu_usermode_queue *queue;
1524 	struct amdgpu_userq_mgr *uqm;
1525 	unsigned long queue_id;
1526 	int ret = 0, r;
1527 
1528 	/* only need to stop gfx/compute */
1529 	if (!(ip_mask & ((1 << AMDGPU_HW_IP_GFX) | (1 << AMDGPU_HW_IP_COMPUTE))))
1530 		return 0;
1531 
1532 	if (!adev->userq_halt_for_enforce_isolation)
1533 		dev_warn(adev->dev, "userq scheduling already started!\n");
1534 	adev->userq_halt_for_enforce_isolation = false;
1535 	xa_for_each(&adev->userq_doorbell_xa, queue_id, queue) {
1536 		uqm = queue->userq_mgr;
1537 		mutex_lock(&uqm->userq_mutex);
1538 			if (((queue->queue_type == AMDGPU_HW_IP_GFX) ||
1539 			     (queue->queue_type == AMDGPU_HW_IP_COMPUTE)) &&
1540 			    (queue->xcp_id == idx)) {
1541 			r = amdgpu_userq_restore_helper(queue);
1542 			if (r)
1543 				ret = r;
1544 			}
1545 		mutex_unlock(&uqm->userq_mutex);
1546 	}
1547 
1548 	return ret;
1549 }
1550 
1551 int amdgpu_userq_gem_va_unmap_validate(struct amdgpu_device *adev,
1552 				       struct amdgpu_bo_va_mapping *mapping,
1553 				       uint64_t saddr)
1554 {
1555 	u32 ip_mask = amdgpu_userq_get_supported_ip_mask(adev);
1556 	struct amdgpu_bo_va *bo_va = mapping->bo_va;
1557 	struct dma_resv *resv = bo_va->base.bo->tbo.base.resv;
1558 	int ret = 0;
1559 
1560 	if (!ip_mask)
1561 		return 0;
1562 
1563 	dev_warn_once(adev->dev, "now unmapping a vital queue va:%llx\n", saddr);
1564 	/**
1565 	 * The userq VA mapping reservation should include the eviction fence,
1566 	 * if the eviction fence can't signal successfully during unmapping,
1567 	 * then driver will warn to flag this improper unmap of the userq VA.
1568 	 * Note: The eviction fence may be attached to different BOs, and this
1569 	 * unmap is only for one kind of userq VAs, so at this point suppose
1570 	 * the eviction fence is always unsignaled.
1571 	 */
1572 	if (!dma_resv_test_signaled(resv, DMA_RESV_USAGE_BOOKKEEP)) {
1573 		ret = dma_resv_wait_timeout(resv, DMA_RESV_USAGE_BOOKKEEP, true,
1574 					    MAX_SCHEDULE_TIMEOUT);
1575 		if (ret <= 0)
1576 			return -EBUSY;
1577 	}
1578 
1579 	return 0;
1580 }
1581 
1582 void amdgpu_userq_pre_reset(struct amdgpu_device *adev)
1583 {
1584 	const struct amdgpu_userq_funcs *userq_funcs;
1585 	struct amdgpu_usermode_queue *queue;
1586 	struct amdgpu_userq_mgr *uqm;
1587 	unsigned long queue_id;
1588 
1589 	xa_for_each(&adev->userq_doorbell_xa, queue_id, queue) {
1590 		uqm = queue->userq_mgr;
1591 		cancel_delayed_work_sync(&uqm->resume_work);
1592 		if (queue->state == AMDGPU_USERQ_STATE_MAPPED) {
1593 			amdgpu_userq_wait_for_last_fence(queue);
1594 			userq_funcs = adev->userq_funcs[queue->queue_type];
1595 			userq_funcs->unmap(queue);
1596 			/* just mark all queues as hung at this point.
1597 			 * if unmap succeeds, we could map again
1598 			 * in amdgpu_userq_post_reset() if vram is not lost
1599 			 */
1600 			queue->state = AMDGPU_USERQ_STATE_HUNG;
1601 			amdgpu_userq_fence_driver_force_completion(queue);
1602 		}
1603 	}
1604 }
1605 
1606 int amdgpu_userq_post_reset(struct amdgpu_device *adev, bool vram_lost)
1607 {
1608 	/* if any queue state is AMDGPU_USERQ_STATE_UNMAPPED
1609 	 * at this point, we should be able to map it again
1610 	 * and continue if vram is not lost.
1611 	 */
1612 	struct amdgpu_usermode_queue *queue;
1613 	const struct amdgpu_userq_funcs *userq_funcs;
1614 	unsigned long queue_id;
1615 	int r = 0;
1616 
1617 	xa_for_each(&adev->userq_doorbell_xa, queue_id, queue) {
1618 		if (queue->state == AMDGPU_USERQ_STATE_HUNG && !vram_lost) {
1619 			userq_funcs = adev->userq_funcs[queue->queue_type];
1620 			/* Re-map queue */
1621 			r = userq_funcs->map(queue);
1622 			if (r) {
1623 				dev_err(adev->dev, "Failed to remap queue %ld\n", queue_id);
1624 				continue;
1625 			}
1626 			queue->state = AMDGPU_USERQ_STATE_MAPPED;
1627 		}
1628 	}
1629 
1630 	return r;
1631 }
1632