xref: /linux/drivers/gpu/drm/amd/amdgpu/amdgpu_userq.c (revision fc2591175507709191c2010a7eb466837496750d)
1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright 2023 Advanced Micro Devices, Inc.
4  *
5  * Permission is hereby granted, free of charge, to any person obtaining a
6  * copy of this software and associated documentation files (the "Software"),
7  * to deal in the Software without restriction, including without limitation
8  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9  * and/or sell copies of the Software, and to permit persons to whom the
10  * Software is furnished to do so, subject to the following conditions:
11  *
12  * The above copyright notice and this permission notice shall be included in
13  * all copies or substantial portions of the Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
19  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
20  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
21  * OTHER DEALINGS IN THE SOFTWARE.
22  *
23  */
24 
25 #include <drm/drm_auth.h>
26 #include <drm/drm_exec.h>
27 #include <linux/pm_runtime.h>
28 #include <drm/drm_drv.h>
29 
30 #include "amdgpu.h"
31 #include "amdgpu_reset.h"
32 #include "amdgpu_vm.h"
33 #include "amdgpu_userq.h"
34 #include "amdgpu_hmm.h"
35 #include "amdgpu_userq_fence.h"
36 
37 u32 amdgpu_userq_get_supported_ip_mask(struct amdgpu_device *adev)
38 {
39 	int i;
40 	u32 userq_ip_mask = 0;
41 
42 	for (i = 0; i < AMDGPU_HW_IP_NUM; i++) {
43 		if (adev->userq_funcs[i])
44 			userq_ip_mask |= (1 << i);
45 	}
46 
47 	return userq_ip_mask;
48 }
49 
50 static bool amdgpu_userq_is_reset_type_supported(struct amdgpu_device *adev,
51 				enum amdgpu_ring_type ring_type, int reset_type)
52 {
53 
54 	if (ring_type < 0 || ring_type >= AMDGPU_RING_TYPE_MAX)
55 		return false;
56 
57 	switch (ring_type) {
58 	case AMDGPU_RING_TYPE_GFX:
59 		if (adev->gfx.gfx_supported_reset & reset_type)
60 			return true;
61 		break;
62 	case AMDGPU_RING_TYPE_COMPUTE:
63 		if (adev->gfx.compute_supported_reset & reset_type)
64 			return true;
65 		break;
66 	case AMDGPU_RING_TYPE_SDMA:
67 		if (adev->sdma.supported_reset & reset_type)
68 			return true;
69 		break;
70 	case AMDGPU_RING_TYPE_VCN_DEC:
71 	case AMDGPU_RING_TYPE_VCN_ENC:
72 		if (adev->vcn.supported_reset & reset_type)
73 			return true;
74 		break;
75 	case AMDGPU_RING_TYPE_VCN_JPEG:
76 		if (adev->jpeg.supported_reset & reset_type)
77 			return true;
78 		break;
79 	default:
80 		break;
81 	}
82 	return false;
83 }
84 
85 static void amdgpu_userq_gpu_reset(struct amdgpu_device *adev)
86 {
87 	if (amdgpu_device_should_recover_gpu(adev)) {
88 		amdgpu_reset_domain_schedule(adev->reset_domain,
89 					     &adev->userq_reset_work);
90 		/* Wait for the reset job to complete */
91 		flush_work(&adev->userq_reset_work);
92 	}
93 }
94 
95 static int
96 amdgpu_userq_detect_and_reset_queues(struct amdgpu_userq_mgr *uq_mgr)
97 {
98 	struct amdgpu_device *adev = uq_mgr->adev;
99 	const int queue_types[] = {
100 		AMDGPU_RING_TYPE_COMPUTE,
101 		AMDGPU_RING_TYPE_GFX,
102 		AMDGPU_RING_TYPE_SDMA
103 	};
104 	const int num_queue_types = ARRAY_SIZE(queue_types);
105 	bool gpu_reset = false;
106 	int r = 0;
107 	int i;
108 
109 	/* Warning if current process mutex is not held */
110 	WARN_ON(!mutex_is_locked(&uq_mgr->userq_mutex));
111 
112 	if (unlikely(adev->debug_disable_gpu_ring_reset)) {
113 		dev_err(adev->dev, "userq reset disabled by debug mask\n");
114 		return 0;
115 	}
116 
117 	/*
118 	 * If GPU recovery feature is disabled system-wide,
119 	 * skip all reset detection logic
120 	 */
121 	if (!amdgpu_gpu_recovery)
122 		return 0;
123 
124 	/*
125 	 * Iterate through all queue types to detect and reset problematic queues
126 	 * Process each queue type in the defined order
127 	 */
128 	for (i = 0; i < num_queue_types; i++) {
129 		int ring_type = queue_types[i];
130 		const struct amdgpu_userq_funcs *funcs = adev->userq_funcs[ring_type];
131 
132 		if (!amdgpu_userq_is_reset_type_supported(adev, ring_type, AMDGPU_RESET_TYPE_PER_QUEUE))
133 				continue;
134 
135 		if (atomic_read(&uq_mgr->userq_count[ring_type]) > 0 &&
136 		    funcs && funcs->detect_and_reset) {
137 			r = funcs->detect_and_reset(adev, ring_type);
138 			if (r) {
139 				gpu_reset = true;
140 				break;
141 			}
142 		}
143 	}
144 
145 	if (gpu_reset)
146 		amdgpu_userq_gpu_reset(adev);
147 
148 	return r;
149 }
150 
151 static void amdgpu_userq_hang_detect_work(struct work_struct *work)
152 {
153 	struct amdgpu_usermode_queue *queue = container_of(work,
154 							  struct amdgpu_usermode_queue,
155 							  hang_detect_work.work);
156 	struct dma_fence *fence;
157 	struct amdgpu_userq_mgr *uq_mgr;
158 
159 	if (!queue->userq_mgr)
160 		return;
161 
162 	uq_mgr = queue->userq_mgr;
163 	fence = READ_ONCE(queue->hang_detect_fence);
164 	/* Fence already signaled – no action needed */
165 	if (!fence || dma_fence_is_signaled(fence))
166 		return;
167 
168 	mutex_lock(&uq_mgr->userq_mutex);
169 	amdgpu_userq_detect_and_reset_queues(uq_mgr);
170 	mutex_unlock(&uq_mgr->userq_mutex);
171 }
172 
173 /*
174  * Start hang detection for a user queue fence. A delayed work will be scheduled
175  * to check if the fence is still pending after the timeout period.
176 */
177 void amdgpu_userq_start_hang_detect_work(struct amdgpu_usermode_queue *queue)
178 {
179 	struct amdgpu_device *adev;
180 	unsigned long timeout_ms;
181 
182 	if (!queue || !queue->userq_mgr || !queue->userq_mgr->adev)
183 		return;
184 
185 	adev = queue->userq_mgr->adev;
186 	/* Determine timeout based on queue type */
187 	switch (queue->queue_type) {
188 	case AMDGPU_RING_TYPE_GFX:
189 		timeout_ms = adev->gfx_timeout;
190 		break;
191 	case AMDGPU_RING_TYPE_COMPUTE:
192 		timeout_ms = adev->compute_timeout;
193 		break;
194 	case AMDGPU_RING_TYPE_SDMA:
195 		timeout_ms = adev->sdma_timeout;
196 		break;
197 	default:
198 		timeout_ms = adev->gfx_timeout;
199 		break;
200 	}
201 
202 	/* Store the fence to monitor and schedule hang detection */
203 	WRITE_ONCE(queue->hang_detect_fence, queue->last_fence);
204 	schedule_delayed_work(&queue->hang_detect_work,
205 		     msecs_to_jiffies(timeout_ms));
206 }
207 
208 static void amdgpu_userq_init_hang_detect_work(struct amdgpu_usermode_queue *queue)
209 {
210 	INIT_DELAYED_WORK(&queue->hang_detect_work, amdgpu_userq_hang_detect_work);
211 	queue->hang_detect_fence = NULL;
212 }
213 
214 static int amdgpu_userq_buffer_va_list_add(struct amdgpu_usermode_queue *queue,
215 					   struct amdgpu_bo_va_mapping *va_map, u64 addr)
216 {
217 	struct amdgpu_userq_va_cursor *va_cursor;
218 	struct userq_va_list;
219 
220 	va_cursor = kzalloc_obj(*va_cursor);
221 	if (!va_cursor)
222 		return -ENOMEM;
223 
224 	INIT_LIST_HEAD(&va_cursor->list);
225 	va_cursor->gpu_addr = addr;
226 	atomic_set(&va_map->bo_va->userq_va_mapped, 1);
227 	list_add(&va_cursor->list, &queue->userq_va_list);
228 
229 	return 0;
230 }
231 
232 int amdgpu_userq_input_va_validate(struct amdgpu_device *adev,
233 				   struct amdgpu_usermode_queue *queue,
234 				   u64 addr, u64 expected_size)
235 {
236 	struct amdgpu_bo_va_mapping *va_map;
237 	struct amdgpu_vm *vm = queue->vm;
238 	u64 user_addr;
239 	u64 size;
240 	int r = 0;
241 
242 	user_addr = (addr & AMDGPU_GMC_HOLE_MASK) >> AMDGPU_GPU_PAGE_SHIFT;
243 	size = expected_size >> AMDGPU_GPU_PAGE_SHIFT;
244 
245 	r = amdgpu_bo_reserve(vm->root.bo, false);
246 	if (r)
247 		return r;
248 
249 	va_map = amdgpu_vm_bo_lookup_mapping(vm, user_addr);
250 	if (!va_map) {
251 		r = -EINVAL;
252 		goto out_err;
253 	}
254 	/* Only validate the userq whether resident in the VM mapping range */
255 	if (user_addr >= va_map->start  &&
256 	    va_map->last - user_addr + 1 >= size) {
257 		amdgpu_userq_buffer_va_list_add(queue, va_map, user_addr);
258 		amdgpu_bo_unreserve(vm->root.bo);
259 		return 0;
260 	}
261 
262 	r = -EINVAL;
263 out_err:
264 	amdgpu_bo_unreserve(vm->root.bo);
265 	return r;
266 }
267 
268 static bool amdgpu_userq_buffer_va_mapped(struct amdgpu_vm *vm, u64 addr)
269 {
270 	struct amdgpu_bo_va_mapping *mapping;
271 	bool r;
272 
273 	if (amdgpu_bo_reserve(vm->root.bo, false))
274 		return false;
275 
276 	mapping = amdgpu_vm_bo_lookup_mapping(vm, addr);
277 	if (!IS_ERR_OR_NULL(mapping) && atomic_read(&mapping->bo_va->userq_va_mapped))
278 		r = true;
279 	else
280 		r = false;
281 	amdgpu_bo_unreserve(vm->root.bo);
282 
283 	return r;
284 }
285 
286 static bool amdgpu_userq_buffer_vas_mapped(struct amdgpu_usermode_queue *queue)
287 {
288 	struct amdgpu_userq_va_cursor *va_cursor, *tmp;
289 	int r = 0;
290 
291 	list_for_each_entry_safe(va_cursor, tmp, &queue->userq_va_list, list) {
292 		r += amdgpu_userq_buffer_va_mapped(queue->vm, va_cursor->gpu_addr);
293 		dev_dbg(queue->userq_mgr->adev->dev,
294 			"validate the userq mapping:%p va:%llx r:%d\n",
295 			queue, va_cursor->gpu_addr, r);
296 	}
297 
298 	if (r != 0)
299 		return true;
300 
301 	return false;
302 }
303 
304 static void amdgpu_userq_buffer_va_list_del(struct amdgpu_bo_va_mapping *mapping,
305 					    struct amdgpu_userq_va_cursor *va_cursor)
306 {
307 	atomic_set(&mapping->bo_va->userq_va_mapped, 0);
308 	list_del(&va_cursor->list);
309 	kfree(va_cursor);
310 }
311 
312 static int amdgpu_userq_buffer_vas_list_cleanup(struct amdgpu_device *adev,
313 						struct amdgpu_usermode_queue *queue)
314 {
315 	struct amdgpu_userq_va_cursor *va_cursor, *tmp;
316 	struct amdgpu_bo_va_mapping *mapping;
317 	int r;
318 
319 	r = amdgpu_bo_reserve(queue->vm->root.bo, false);
320 	if (r)
321 		return r;
322 
323 	list_for_each_entry_safe(va_cursor, tmp, &queue->userq_va_list, list) {
324 		mapping = amdgpu_vm_bo_lookup_mapping(queue->vm, va_cursor->gpu_addr);
325 		if (!mapping) {
326 			r = -EINVAL;
327 			goto err;
328 		}
329 		dev_dbg(adev->dev, "delete the userq:%p va:%llx\n",
330 			queue, va_cursor->gpu_addr);
331 		amdgpu_userq_buffer_va_list_del(mapping, va_cursor);
332 	}
333 err:
334 	amdgpu_bo_unreserve(queue->vm->root.bo);
335 	return r;
336 }
337 
338 static int amdgpu_userq_preempt_helper(struct amdgpu_usermode_queue *queue)
339 {
340 	struct amdgpu_userq_mgr *uq_mgr = queue->userq_mgr;
341 	struct amdgpu_device *adev = uq_mgr->adev;
342 	const struct amdgpu_userq_funcs *userq_funcs =
343 		adev->userq_funcs[queue->queue_type];
344 	bool found_hung_queue = false;
345 	int r = 0;
346 
347 	if (queue->state == AMDGPU_USERQ_STATE_MAPPED) {
348 		r = userq_funcs->preempt(queue);
349 		if (r) {
350 			queue->state = AMDGPU_USERQ_STATE_HUNG;
351 			found_hung_queue = true;
352 		} else {
353 			queue->state = AMDGPU_USERQ_STATE_PREEMPTED;
354 		}
355 	}
356 
357 	if (found_hung_queue)
358 		amdgpu_userq_detect_and_reset_queues(uq_mgr);
359 
360 	return r;
361 }
362 
363 static int amdgpu_userq_restore_helper(struct amdgpu_usermode_queue *queue)
364 {
365 	struct amdgpu_userq_mgr *uq_mgr = queue->userq_mgr;
366 	struct amdgpu_device *adev = uq_mgr->adev;
367 	const struct amdgpu_userq_funcs *userq_funcs =
368 		adev->userq_funcs[queue->queue_type];
369 	int r = 0;
370 
371 	if (queue->state == AMDGPU_USERQ_STATE_PREEMPTED) {
372 		r = userq_funcs->restore(queue);
373 		if (r) {
374 			queue->state = AMDGPU_USERQ_STATE_HUNG;
375 		} else {
376 			queue->state = AMDGPU_USERQ_STATE_MAPPED;
377 		}
378 	}
379 
380 	return r;
381 }
382 
383 static int amdgpu_userq_unmap_helper(struct amdgpu_usermode_queue *queue)
384 {
385 	struct amdgpu_userq_mgr *uq_mgr = queue->userq_mgr;
386 	struct amdgpu_device *adev = uq_mgr->adev;
387 	const struct amdgpu_userq_funcs *userq_funcs =
388 		adev->userq_funcs[queue->queue_type];
389 	bool found_hung_queue = false;
390 	int r = 0;
391 
392 	if ((queue->state == AMDGPU_USERQ_STATE_MAPPED) ||
393 		(queue->state == AMDGPU_USERQ_STATE_PREEMPTED)) {
394 		r = userq_funcs->unmap(queue);
395 		if (r) {
396 			queue->state = AMDGPU_USERQ_STATE_HUNG;
397 			found_hung_queue = true;
398 		} else {
399 			queue->state = AMDGPU_USERQ_STATE_UNMAPPED;
400 		}
401 	}
402 
403 	if (found_hung_queue)
404 		amdgpu_userq_detect_and_reset_queues(uq_mgr);
405 
406 	return r;
407 }
408 
409 static int amdgpu_userq_map_helper(struct amdgpu_usermode_queue *queue)
410 {
411 	struct amdgpu_userq_mgr *uq_mgr = queue->userq_mgr;
412 	struct amdgpu_device *adev = uq_mgr->adev;
413 	const struct amdgpu_userq_funcs *userq_funcs =
414 		adev->userq_funcs[queue->queue_type];
415 	int r = 0;
416 
417 	if (queue->state == AMDGPU_USERQ_STATE_UNMAPPED) {
418 		r = userq_funcs->map(queue);
419 		if (r) {
420 			queue->state = AMDGPU_USERQ_STATE_HUNG;
421 			amdgpu_userq_detect_and_reset_queues(uq_mgr);
422 		} else {
423 			queue->state = AMDGPU_USERQ_STATE_MAPPED;
424 		}
425 	}
426 
427 	return r;
428 }
429 
430 static int amdgpu_userq_wait_for_last_fence(struct amdgpu_usermode_queue *queue)
431 {
432 	struct amdgpu_userq_mgr *uq_mgr = queue->userq_mgr;
433 	struct dma_fence *f = queue->last_fence;
434 	int ret = 0;
435 
436 	if (f && !dma_fence_is_signaled(f)) {
437 		ret = dma_fence_wait_timeout(f, true, MAX_SCHEDULE_TIMEOUT);
438 		if (ret <= 0) {
439 			drm_file_err(uq_mgr->file, "Timed out waiting for fence=%llu:%llu\n",
440 				     f->context, f->seqno);
441 			queue->state = AMDGPU_USERQ_STATE_HUNG;
442 			return -ETIME;
443 		}
444 	}
445 
446 	return ret;
447 }
448 
449 static void amdgpu_userq_cleanup(struct amdgpu_usermode_queue *queue)
450 {
451 	struct amdgpu_userq_mgr *uq_mgr = queue->userq_mgr;
452 	struct amdgpu_device *adev = uq_mgr->adev;
453 	const struct amdgpu_userq_funcs *uq_funcs = adev->userq_funcs[queue->queue_type];
454 
455 	/* Wait for mode-1 reset to complete */
456 	down_read(&adev->reset_domain->sem);
457 
458 	/* Drop the userq reference. */
459 	amdgpu_userq_buffer_vas_list_cleanup(adev, queue);
460 	uq_funcs->mqd_destroy(queue);
461 	amdgpu_userq_fence_driver_free(queue);
462 	/* Use interrupt-safe locking since IRQ handlers may access these XArrays */
463 	xa_erase_irq(&adev->userq_doorbell_xa, queue->doorbell_index);
464 	queue->userq_mgr = NULL;
465 	list_del(&queue->userq_va_list);
466 	kfree(queue);
467 
468 	up_read(&adev->reset_domain->sem);
469 }
470 
471 void
472 amdgpu_userq_ensure_ev_fence(struct amdgpu_userq_mgr *uq_mgr,
473 			     struct amdgpu_eviction_fence_mgr *evf_mgr)
474 {
475 	struct dma_fence *ev_fence;
476 
477 retry:
478 	/* Flush any pending resume work to create ev_fence */
479 	flush_delayed_work(&uq_mgr->resume_work);
480 
481 	mutex_lock(&uq_mgr->userq_mutex);
482 	ev_fence = amdgpu_evf_mgr_get_fence(evf_mgr);
483 	if (dma_fence_is_signaled(ev_fence)) {
484 		dma_fence_put(ev_fence);
485 		mutex_unlock(&uq_mgr->userq_mutex);
486 		/*
487 		 * Looks like there was no pending resume work,
488 		 * add one now to create a valid eviction fence
489 		 */
490 		schedule_delayed_work(&uq_mgr->resume_work, 0);
491 		goto retry;
492 	}
493 	dma_fence_put(ev_fence);
494 }
495 
496 int amdgpu_userq_create_object(struct amdgpu_userq_mgr *uq_mgr,
497 			       struct amdgpu_userq_obj *userq_obj,
498 			       int size)
499 {
500 	struct amdgpu_device *adev = uq_mgr->adev;
501 	struct amdgpu_bo_param bp;
502 	int r;
503 
504 	memset(&bp, 0, sizeof(bp));
505 	bp.byte_align = PAGE_SIZE;
506 	bp.domain = AMDGPU_GEM_DOMAIN_GTT;
507 	bp.flags = AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS |
508 		   AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
509 	bp.type = ttm_bo_type_kernel;
510 	bp.size = size;
511 	bp.resv = NULL;
512 	bp.bo_ptr_size = sizeof(struct amdgpu_bo);
513 
514 	r = amdgpu_bo_create(adev, &bp, &userq_obj->obj);
515 	if (r) {
516 		drm_file_err(uq_mgr->file, "Failed to allocate BO for userqueue (%d)", r);
517 		return r;
518 	}
519 
520 	r = amdgpu_bo_reserve(userq_obj->obj, true);
521 	if (r) {
522 		drm_file_err(uq_mgr->file, "Failed to reserve BO to map (%d)", r);
523 		goto free_obj;
524 	}
525 
526 	r = amdgpu_ttm_alloc_gart(&(userq_obj->obj)->tbo);
527 	if (r) {
528 		drm_file_err(uq_mgr->file, "Failed to alloc GART for userqueue object (%d)", r);
529 		goto unresv;
530 	}
531 
532 	r = amdgpu_bo_kmap(userq_obj->obj, &userq_obj->cpu_ptr);
533 	if (r) {
534 		drm_file_err(uq_mgr->file, "Failed to map BO for userqueue (%d)", r);
535 		goto unresv;
536 	}
537 
538 	userq_obj->gpu_addr = amdgpu_bo_gpu_offset(userq_obj->obj);
539 	amdgpu_bo_unreserve(userq_obj->obj);
540 	memset(userq_obj->cpu_ptr, 0, size);
541 	return 0;
542 
543 unresv:
544 	amdgpu_bo_unreserve(userq_obj->obj);
545 
546 free_obj:
547 	amdgpu_bo_unref(&userq_obj->obj);
548 	return r;
549 }
550 
551 void amdgpu_userq_destroy_object(struct amdgpu_userq_mgr *uq_mgr,
552 				 struct amdgpu_userq_obj *userq_obj)
553 {
554 	amdgpu_bo_kunmap(userq_obj->obj);
555 	amdgpu_bo_unref(&userq_obj->obj);
556 }
557 
558 uint64_t
559 amdgpu_userq_get_doorbell_index(struct amdgpu_userq_mgr *uq_mgr,
560 				struct amdgpu_db_info *db_info,
561 				struct drm_file *filp)
562 {
563 	uint64_t index;
564 	struct drm_gem_object *gobj;
565 	struct amdgpu_userq_obj *db_obj = db_info->db_obj;
566 	int r, db_size;
567 
568 	gobj = drm_gem_object_lookup(filp, db_info->doorbell_handle);
569 	if (gobj == NULL) {
570 		drm_file_err(uq_mgr->file, "Can't find GEM object for doorbell\n");
571 		return -EINVAL;
572 	}
573 
574 	db_obj->obj = amdgpu_bo_ref(gem_to_amdgpu_bo(gobj));
575 	drm_gem_object_put(gobj);
576 
577 	r = amdgpu_bo_reserve(db_obj->obj, true);
578 	if (r) {
579 		drm_file_err(uq_mgr->file, "[Usermode queues] Failed to pin doorbell object\n");
580 		goto unref_bo;
581 	}
582 
583 	/* Pin the BO before generating the index, unpin in queue destroy */
584 	r = amdgpu_bo_pin(db_obj->obj, AMDGPU_GEM_DOMAIN_DOORBELL);
585 	if (r) {
586 		drm_file_err(uq_mgr->file, "[Usermode queues] Failed to pin doorbell object\n");
587 		goto unresv_bo;
588 	}
589 
590 	switch (db_info->queue_type) {
591 	case AMDGPU_HW_IP_GFX:
592 	case AMDGPU_HW_IP_COMPUTE:
593 	case AMDGPU_HW_IP_DMA:
594 		db_size = sizeof(u64);
595 		break;
596 	default:
597 		drm_file_err(uq_mgr->file, "[Usermode queues] IP %d not support\n",
598 			     db_info->queue_type);
599 		r = -EINVAL;
600 		goto unpin_bo;
601 	}
602 
603 	index = amdgpu_doorbell_index_on_bar(uq_mgr->adev, db_obj->obj,
604 					     db_info->doorbell_offset, db_size);
605 	drm_dbg_driver(adev_to_drm(uq_mgr->adev),
606 		       "[Usermode queues] doorbell index=%lld\n", index);
607 	amdgpu_bo_unreserve(db_obj->obj);
608 	return index;
609 
610 unpin_bo:
611 	amdgpu_bo_unpin(db_obj->obj);
612 unresv_bo:
613 	amdgpu_bo_unreserve(db_obj->obj);
614 unref_bo:
615 	amdgpu_bo_unref(&db_obj->obj);
616 	return r;
617 }
618 
619 static int
620 amdgpu_userq_destroy(struct amdgpu_userq_mgr *uq_mgr, struct amdgpu_usermode_queue *queue)
621 {
622 	struct amdgpu_device *adev = uq_mgr->adev;
623 	int r = 0;
624 
625 	cancel_delayed_work_sync(&uq_mgr->resume_work);
626 
627 	/* Cancel any pending hang detection work and cleanup */
628 	cancel_delayed_work_sync(&queue->hang_detect_work);
629 
630 	mutex_lock(&uq_mgr->userq_mutex);
631 	queue->hang_detect_fence = NULL;
632 	amdgpu_userq_wait_for_last_fence(queue);
633 
634 	r = amdgpu_bo_reserve(queue->db_obj.obj, true);
635 	if (!r) {
636 		amdgpu_bo_unpin(queue->db_obj.obj);
637 		amdgpu_bo_unreserve(queue->db_obj.obj);
638 	}
639 	amdgpu_bo_unref(&queue->db_obj.obj);
640 
641 	r = amdgpu_bo_reserve(queue->wptr_obj.obj, true);
642 	if (!r) {
643 		amdgpu_bo_unpin(queue->wptr_obj.obj);
644 		amdgpu_bo_unreserve(queue->wptr_obj.obj);
645 	}
646 	amdgpu_bo_unref(&queue->wptr_obj.obj);
647 
648 	atomic_dec(&uq_mgr->userq_count[queue->queue_type]);
649 #if defined(CONFIG_DEBUG_FS)
650 	debugfs_remove_recursive(queue->debugfs_queue);
651 #endif
652 	amdgpu_userq_detect_and_reset_queues(uq_mgr);
653 	r = amdgpu_userq_unmap_helper(queue);
654 	/*TODO: It requires a reset for userq hw unmap error*/
655 	if (unlikely(r != AMDGPU_USERQ_STATE_UNMAPPED)) {
656 		drm_warn(adev_to_drm(uq_mgr->adev), "trying to destroy a HW mapping userq\n");
657 		queue->state = AMDGPU_USERQ_STATE_HUNG;
658 	}
659 	amdgpu_userq_cleanup(queue);
660 	mutex_unlock(&uq_mgr->userq_mutex);
661 
662 	pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
663 
664 	return r;
665 }
666 
667 static void amdgpu_userq_kref_destroy(struct kref *kref)
668 {
669 	int r;
670 	struct amdgpu_usermode_queue *queue =
671 		container_of(kref, struct amdgpu_usermode_queue, refcount);
672 	struct amdgpu_userq_mgr *uq_mgr = queue->userq_mgr;
673 
674 	r = amdgpu_userq_destroy(uq_mgr, queue);
675 	if (r)
676 		drm_file_err(uq_mgr->file, "Failed to destroy usermode queue %d\n", r);
677 }
678 
679 struct amdgpu_usermode_queue *amdgpu_userq_get(struct amdgpu_userq_mgr *uq_mgr, u32 qid)
680 {
681 	struct amdgpu_usermode_queue *queue;
682 
683 	xa_lock(&uq_mgr->userq_xa);
684 	queue = xa_load(&uq_mgr->userq_xa, qid);
685 	if (queue)
686 		kref_get(&queue->refcount);
687 	xa_unlock(&uq_mgr->userq_xa);
688 
689 	return queue;
690 }
691 
692 void amdgpu_userq_put(struct amdgpu_usermode_queue *queue)
693 {
694 	if (queue)
695 		kref_put(&queue->refcount, amdgpu_userq_kref_destroy);
696 }
697 
698 static int amdgpu_userq_priority_permit(struct drm_file *filp,
699 					int priority)
700 {
701 	if (priority < AMDGPU_USERQ_CREATE_FLAGS_QUEUE_PRIORITY_HIGH)
702 		return 0;
703 
704 	if (capable(CAP_SYS_NICE))
705 		return 0;
706 
707 	if (drm_is_current_master(filp))
708 		return 0;
709 
710 	return -EACCES;
711 }
712 
713 static int
714 amdgpu_userq_create(struct drm_file *filp, union drm_amdgpu_userq *args)
715 {
716 	struct amdgpu_fpriv *fpriv = filp->driver_priv;
717 	struct amdgpu_userq_mgr *uq_mgr = &fpriv->userq_mgr;
718 	struct amdgpu_device *adev = uq_mgr->adev;
719 	const struct amdgpu_userq_funcs *uq_funcs;
720 	struct amdgpu_usermode_queue *queue;
721 	struct amdgpu_db_info db_info;
722 	bool skip_map_queue;
723 	u32 qid;
724 	uint64_t index;
725 	int r = 0;
726 	int priority =
727 		(args->in.flags & AMDGPU_USERQ_CREATE_FLAGS_QUEUE_PRIORITY_MASK) >>
728 		AMDGPU_USERQ_CREATE_FLAGS_QUEUE_PRIORITY_SHIFT;
729 
730 	r = amdgpu_userq_priority_permit(filp, priority);
731 	if (r)
732 		return r;
733 
734 	r = pm_runtime_get_sync(adev_to_drm(adev)->dev);
735 	if (r < 0) {
736 		drm_file_err(uq_mgr->file, "pm_runtime_get_sync() failed for userqueue create\n");
737 		pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
738 		return r;
739 	}
740 
741 	/*
742 	 * There could be a situation that we are creating a new queue while
743 	 * the other queues under this UQ_mgr are suspended. So if there is any
744 	 * resume work pending, wait for it to get done.
745 	 *
746 	 * This will also make sure we have a valid eviction fence ready to be used.
747 	 */
748 	amdgpu_userq_ensure_ev_fence(&fpriv->userq_mgr, &fpriv->evf_mgr);
749 
750 	uq_funcs = adev->userq_funcs[args->in.ip_type];
751 	if (!uq_funcs) {
752 		drm_file_err(uq_mgr->file, "Usermode queue is not supported for this IP (%u)\n",
753 			     args->in.ip_type);
754 		r = -EINVAL;
755 		goto unlock;
756 	}
757 
758 	queue = kzalloc_obj(struct amdgpu_usermode_queue);
759 	if (!queue) {
760 		drm_file_err(uq_mgr->file, "Failed to allocate memory for queue\n");
761 		r = -ENOMEM;
762 		goto unlock;
763 	}
764 
765 	INIT_LIST_HEAD(&queue->userq_va_list);
766 	queue->doorbell_handle = args->in.doorbell_handle;
767 	queue->queue_type = args->in.ip_type;
768 	queue->vm = &fpriv->vm;
769 	queue->priority = priority;
770 
771 	db_info.queue_type = queue->queue_type;
772 	db_info.doorbell_handle = queue->doorbell_handle;
773 	db_info.db_obj = &queue->db_obj;
774 	db_info.doorbell_offset = args->in.doorbell_offset;
775 
776 	queue->userq_mgr = uq_mgr;
777 	/* Validate the userq virtual address.*/
778 	if (amdgpu_userq_input_va_validate(adev, queue, args->in.queue_va, args->in.queue_size) ||
779 	    amdgpu_userq_input_va_validate(adev, queue, args->in.rptr_va, AMDGPU_GPU_PAGE_SIZE) ||
780 	    amdgpu_userq_input_va_validate(adev, queue, args->in.wptr_va, AMDGPU_GPU_PAGE_SIZE)) {
781 		r = -EINVAL;
782 		goto free_queue;
783 	}
784 
785 	/* Convert relative doorbell offset into absolute doorbell index */
786 	index = amdgpu_userq_get_doorbell_index(uq_mgr, &db_info, filp);
787 	if (index == (uint64_t)-EINVAL) {
788 		drm_file_err(uq_mgr->file, "Failed to get doorbell for queue\n");
789 		r = -EINVAL;
790 		goto free_queue;
791 	}
792 
793 	queue->doorbell_index = index;
794 	xa_init_flags(&queue->fence_drv_xa, XA_FLAGS_ALLOC);
795 	r = amdgpu_userq_fence_driver_alloc(adev, queue);
796 	if (r) {
797 		drm_file_err(uq_mgr->file, "Failed to alloc fence driver\n");
798 		goto free_queue;
799 	}
800 
801 	r = uq_funcs->mqd_create(queue, &args->in);
802 	if (r) {
803 		drm_file_err(uq_mgr->file, "Failed to create Queue\n");
804 		goto clean_fence_driver;
805 	}
806 
807 	/* don't map the queue if scheduling is halted */
808 	if (adev->userq_halt_for_enforce_isolation &&
809 	    ((queue->queue_type == AMDGPU_HW_IP_GFX) ||
810 	     (queue->queue_type == AMDGPU_HW_IP_COMPUTE)))
811 		skip_map_queue = true;
812 	else
813 		skip_map_queue = false;
814 	if (!skip_map_queue) {
815 		r = amdgpu_userq_map_helper(queue);
816 		if (r) {
817 			drm_file_err(uq_mgr->file, "Failed to map Queue\n");
818 			down_read(&adev->reset_domain->sem);
819 			goto clean_mqd;
820 		}
821 	}
822 
823 	/* drop this refcount during queue destroy */
824 	kref_init(&queue->refcount);
825 
826 	/* Wait for mode-1 reset to complete */
827 	down_read(&adev->reset_domain->sem);
828 
829 	r = xa_alloc(&uq_mgr->userq_xa, &qid, queue,
830 		     XA_LIMIT(1, AMDGPU_MAX_USERQ_COUNT), GFP_KERNEL);
831 	if (r) {
832 		if (!skip_map_queue)
833 			amdgpu_userq_unmap_helper(queue);
834 
835 		r = -ENOMEM;
836 		goto clean_mqd;
837 	}
838 
839 	r = xa_err(xa_store_irq(&adev->userq_doorbell_xa, index, queue, GFP_KERNEL));
840 	if (r) {
841 		xa_erase(&uq_mgr->userq_xa, qid);
842 		if (!skip_map_queue)
843 			amdgpu_userq_unmap_helper(queue);
844 
845 		goto clean_mqd;
846 	}
847 	up_read(&adev->reset_domain->sem);
848 
849 	amdgpu_debugfs_userq_init(filp, queue, qid);
850 	amdgpu_userq_init_hang_detect_work(queue);
851 
852 	args->out.queue_id = qid;
853 	atomic_inc(&uq_mgr->userq_count[queue->queue_type]);
854 	mutex_unlock(&uq_mgr->userq_mutex);
855 	return 0;
856 
857 clean_mqd:
858 	uq_funcs->mqd_destroy(queue);
859 	up_read(&adev->reset_domain->sem);
860 clean_fence_driver:
861 	amdgpu_userq_fence_driver_free(queue);
862 free_queue:
863 	kfree(queue);
864 unlock:
865 	mutex_unlock(&uq_mgr->userq_mutex);
866 
867 	return r;
868 }
869 
870 static int amdgpu_userq_input_args_validate(struct drm_device *dev,
871 					union drm_amdgpu_userq *args,
872 					struct drm_file *filp)
873 {
874 	struct amdgpu_device *adev = drm_to_adev(dev);
875 
876 	switch (args->in.op) {
877 	case AMDGPU_USERQ_OP_CREATE:
878 		if (args->in.flags & ~(AMDGPU_USERQ_CREATE_FLAGS_QUEUE_PRIORITY_MASK |
879 				       AMDGPU_USERQ_CREATE_FLAGS_QUEUE_SECURE))
880 			return -EINVAL;
881 		/* Usermode queues are only supported for GFX IP as of now */
882 		if (args->in.ip_type != AMDGPU_HW_IP_GFX &&
883 		    args->in.ip_type != AMDGPU_HW_IP_DMA &&
884 		    args->in.ip_type != AMDGPU_HW_IP_COMPUTE) {
885 			drm_file_err(filp, "Usermode queue doesn't support IP type %u\n",
886 				     args->in.ip_type);
887 			return -EINVAL;
888 		}
889 
890 		if ((args->in.flags & AMDGPU_USERQ_CREATE_FLAGS_QUEUE_SECURE) &&
891 		    (args->in.ip_type != AMDGPU_HW_IP_GFX) &&
892 		    (args->in.ip_type != AMDGPU_HW_IP_COMPUTE) &&
893 		    !amdgpu_is_tmz(adev)) {
894 			drm_file_err(filp, "Secure only supported on GFX/Compute queues\n");
895 			return -EINVAL;
896 		}
897 
898 		if (args->in.queue_va == AMDGPU_BO_INVALID_OFFSET ||
899 		    args->in.queue_va == 0 ||
900 		    args->in.queue_size == 0) {
901 			drm_file_err(filp, "invalidate userq queue va or size\n");
902 			return -EINVAL;
903 		}
904 
905 		if (!is_power_of_2(args->in.queue_size)) {
906 			drm_file_err(filp, "Queue size must be a power of 2\n");
907 			return -EINVAL;
908 		}
909 
910 		if (args->in.queue_size < AMDGPU_GPU_PAGE_SIZE) {
911 			drm_file_err(filp, "Queue size smaller than AMDGPU_GPU_PAGE_SIZE\n");
912 			return -EINVAL;
913 		}
914 
915 		if (!args->in.wptr_va || !args->in.rptr_va) {
916 			drm_file_err(filp, "invalidate userq queue rptr or wptr\n");
917 			return -EINVAL;
918 		}
919 		break;
920 	case AMDGPU_USERQ_OP_FREE:
921 		if (args->in.ip_type ||
922 		    args->in.doorbell_handle ||
923 		    args->in.doorbell_offset ||
924 		    args->in.flags ||
925 		    args->in.queue_va ||
926 		    args->in.queue_size ||
927 		    args->in.rptr_va ||
928 		    args->in.wptr_va ||
929 		    args->in.mqd ||
930 		    args->in.mqd_size)
931 			return -EINVAL;
932 		break;
933 	default:
934 		return -EINVAL;
935 	}
936 
937 	return 0;
938 }
939 
940 bool amdgpu_userq_enabled(struct drm_device *dev)
941 {
942 	struct amdgpu_device *adev = drm_to_adev(dev);
943 	int i;
944 
945 	for (i = 0; i < AMDGPU_HW_IP_NUM; i++) {
946 		if (adev->userq_funcs[i])
947 			return true;
948 	}
949 
950 	return false;
951 }
952 
953 int amdgpu_userq_ioctl(struct drm_device *dev, void *data,
954 		       struct drm_file *filp)
955 {
956 	union drm_amdgpu_userq *args = data;
957 	struct amdgpu_fpriv *fpriv = filp->driver_priv;
958 	struct amdgpu_usermode_queue *queue;
959 	int r = 0;
960 
961 	if (!amdgpu_userq_enabled(dev))
962 		return -ENOTSUPP;
963 
964 	if (amdgpu_userq_input_args_validate(dev, args, filp) < 0)
965 		return -EINVAL;
966 
967 	switch (args->in.op) {
968 	case AMDGPU_USERQ_OP_CREATE:
969 		r = amdgpu_userq_create(filp, args);
970 		if (r)
971 			drm_file_err(filp, "Failed to create usermode queue\n");
972 		break;
973 
974 	case AMDGPU_USERQ_OP_FREE: {
975 		xa_lock(&fpriv->userq_mgr.userq_xa);
976 		queue = __xa_erase(&fpriv->userq_mgr.userq_xa, args->in.queue_id);
977 		xa_unlock(&fpriv->userq_mgr.userq_xa);
978 		if (!queue)
979 			return -ENOENT;
980 
981 		amdgpu_userq_put(queue);
982 		break;
983 	}
984 
985 	default:
986 		drm_dbg_driver(dev, "Invalid user queue op specified: %d\n", args->in.op);
987 		return -EINVAL;
988 	}
989 
990 	return r;
991 }
992 
993 static int
994 amdgpu_userq_restore_all(struct amdgpu_userq_mgr *uq_mgr)
995 {
996 	struct amdgpu_usermode_queue *queue;
997 	unsigned long queue_id;
998 	int ret = 0, r;
999 
1000 	/* Resume all the queues for this process */
1001 	xa_for_each(&uq_mgr->userq_xa, queue_id, queue) {
1002 		queue = amdgpu_userq_get(uq_mgr, queue_id);
1003 		if (!queue)
1004 			continue;
1005 
1006 		if (!amdgpu_userq_buffer_vas_mapped(queue)) {
1007 			drm_file_err(uq_mgr->file,
1008 				     "trying restore queue without va mapping\n");
1009 			queue->state = AMDGPU_USERQ_STATE_INVALID_VA;
1010 			amdgpu_userq_put(queue);
1011 			continue;
1012 		}
1013 
1014 		r = amdgpu_userq_restore_helper(queue);
1015 		if (r)
1016 			ret = r;
1017 
1018 		amdgpu_userq_put(queue);
1019 	}
1020 
1021 	if (ret)
1022 		drm_file_err(uq_mgr->file, "Failed to map all the queues\n");
1023 	return ret;
1024 }
1025 
1026 static int amdgpu_userq_validate_vm(void *param, struct amdgpu_bo *bo)
1027 {
1028 	struct ttm_operation_ctx ctx = { false, false };
1029 
1030 	amdgpu_bo_placement_from_domain(bo, bo->allowed_domains);
1031 	return ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
1032 }
1033 
1034 /* Handle all BOs on the invalidated list, validate them and update the PTs */
1035 static int
1036 amdgpu_userq_bo_validate(struct amdgpu_device *adev, struct drm_exec *exec,
1037 			 struct amdgpu_vm *vm)
1038 {
1039 	struct ttm_operation_ctx ctx = { false, false };
1040 	struct amdgpu_bo_va *bo_va;
1041 	struct amdgpu_bo *bo;
1042 	int ret;
1043 
1044 	spin_lock(&vm->status_lock);
1045 	while (!list_empty(&vm->invalidated)) {
1046 		bo_va = list_first_entry(&vm->invalidated,
1047 					 struct amdgpu_bo_va,
1048 					 base.vm_status);
1049 		spin_unlock(&vm->status_lock);
1050 
1051 		bo = bo_va->base.bo;
1052 		ret = drm_exec_prepare_obj(exec, &bo->tbo.base, 2);
1053 		if (unlikely(ret))
1054 			return ret;
1055 
1056 		amdgpu_bo_placement_from_domain(bo, bo->allowed_domains);
1057 		ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
1058 		if (ret)
1059 			return ret;
1060 
1061 		/* This moves the bo_va to the done list */
1062 		ret = amdgpu_vm_bo_update(adev, bo_va, false);
1063 		if (ret)
1064 			return ret;
1065 
1066 		spin_lock(&vm->status_lock);
1067 	}
1068 	spin_unlock(&vm->status_lock);
1069 
1070 	return 0;
1071 }
1072 
1073 /* Make sure the whole VM is ready to be used */
1074 static int
1075 amdgpu_userq_vm_validate(struct amdgpu_userq_mgr *uq_mgr)
1076 {
1077 	struct amdgpu_fpriv *fpriv = uq_mgr_to_fpriv(uq_mgr);
1078 	bool invalidated = false, new_addition = false;
1079 	struct ttm_operation_ctx ctx = { true, false };
1080 	struct amdgpu_device *adev = uq_mgr->adev;
1081 	struct amdgpu_hmm_range *range;
1082 	struct amdgpu_vm *vm = &fpriv->vm;
1083 	unsigned long key, tmp_key;
1084 	struct amdgpu_bo_va *bo_va;
1085 	struct amdgpu_bo *bo;
1086 	struct drm_exec exec;
1087 	struct xarray xa;
1088 	int ret;
1089 
1090 	xa_init(&xa);
1091 
1092 retry_lock:
1093 	drm_exec_init(&exec, DRM_EXEC_IGNORE_DUPLICATES, 0);
1094 	drm_exec_until_all_locked(&exec) {
1095 		ret = amdgpu_vm_lock_pd(vm, &exec, 1);
1096 		drm_exec_retry_on_contention(&exec);
1097 		if (unlikely(ret))
1098 			goto unlock_all;
1099 
1100 		ret = amdgpu_vm_lock_done_list(vm, &exec, 1);
1101 		drm_exec_retry_on_contention(&exec);
1102 		if (unlikely(ret))
1103 			goto unlock_all;
1104 
1105 		/* This validates PDs, PTs and per VM BOs */
1106 		ret = amdgpu_vm_validate(adev, vm, NULL,
1107 					 amdgpu_userq_validate_vm,
1108 					 NULL);
1109 		if (unlikely(ret))
1110 			goto unlock_all;
1111 
1112 		/* This locks and validates the remaining evicted BOs */
1113 		ret = amdgpu_userq_bo_validate(adev, &exec, vm);
1114 		drm_exec_retry_on_contention(&exec);
1115 		if (unlikely(ret))
1116 			goto unlock_all;
1117 	}
1118 
1119 	if (invalidated) {
1120 		xa_for_each(&xa, tmp_key, range) {
1121 			bo = range->bo;
1122 			amdgpu_bo_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_CPU);
1123 			ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
1124 			if (ret)
1125 				goto unlock_all;
1126 
1127 			amdgpu_ttm_tt_set_user_pages(bo->tbo.ttm, range);
1128 
1129 			amdgpu_bo_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_GTT);
1130 			ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
1131 			if (ret)
1132 				goto unlock_all;
1133 		}
1134 		invalidated = false;
1135 	}
1136 
1137 	ret = amdgpu_vm_handle_moved(adev, vm, NULL);
1138 	if (ret)
1139 		goto unlock_all;
1140 
1141 	key = 0;
1142 	/* Validate User Ptr BOs */
1143 	list_for_each_entry(bo_va, &vm->done, base.vm_status) {
1144 		bo = bo_va->base.bo;
1145 		if (!bo)
1146 			continue;
1147 
1148 		if (!amdgpu_ttm_tt_is_userptr(bo->tbo.ttm))
1149 			continue;
1150 
1151 		range = xa_load(&xa, key);
1152 		if (range && range->bo != bo) {
1153 			xa_erase(&xa, key);
1154 			amdgpu_hmm_range_free(range);
1155 			range = NULL;
1156 		}
1157 
1158 		if (!range) {
1159 			range = amdgpu_hmm_range_alloc(bo);
1160 			if (!range) {
1161 				ret = -ENOMEM;
1162 				goto unlock_all;
1163 			}
1164 
1165 			xa_store(&xa, key, range, GFP_KERNEL);
1166 			new_addition = true;
1167 		}
1168 		key++;
1169 	}
1170 
1171 	if (new_addition) {
1172 		drm_exec_fini(&exec);
1173 		xa_for_each(&xa, tmp_key, range) {
1174 			if (!range)
1175 				continue;
1176 			bo = range->bo;
1177 			ret = amdgpu_ttm_tt_get_user_pages(bo, range);
1178 			if (ret)
1179 				goto unlock_all;
1180 		}
1181 
1182 		invalidated = true;
1183 		new_addition = false;
1184 		goto retry_lock;
1185 	}
1186 
1187 	ret = amdgpu_vm_update_pdes(adev, vm, false);
1188 	if (ret)
1189 		goto unlock_all;
1190 
1191 	/*
1192 	 * We need to wait for all VM updates to finish before restarting the
1193 	 * queues. Using the done list like that is now ok since everything is
1194 	 * locked in place.
1195 	 */
1196 	list_for_each_entry(bo_va, &vm->done, base.vm_status)
1197 		dma_fence_wait(bo_va->last_pt_update, false);
1198 	dma_fence_wait(vm->last_update, false);
1199 
1200 	ret = amdgpu_evf_mgr_rearm(&fpriv->evf_mgr, &exec);
1201 	if (ret)
1202 		drm_file_err(uq_mgr->file, "Failed to replace eviction fence\n");
1203 
1204 unlock_all:
1205 	drm_exec_fini(&exec);
1206 	xa_for_each(&xa, tmp_key, range) {
1207 		if (!range)
1208 			continue;
1209 		bo = range->bo;
1210 		amdgpu_hmm_range_free(range);
1211 	}
1212 	xa_destroy(&xa);
1213 	return ret;
1214 }
1215 
1216 static void amdgpu_userq_restore_worker(struct work_struct *work)
1217 {
1218 	struct amdgpu_userq_mgr *uq_mgr = work_to_uq_mgr(work, resume_work.work);
1219 	struct amdgpu_fpriv *fpriv = uq_mgr_to_fpriv(uq_mgr);
1220 	struct dma_fence *ev_fence;
1221 	int ret;
1222 
1223 	mutex_lock(&uq_mgr->userq_mutex);
1224 	ev_fence = amdgpu_evf_mgr_get_fence(&fpriv->evf_mgr);
1225 	if (!dma_fence_is_signaled(ev_fence))
1226 		goto unlock;
1227 
1228 	ret = amdgpu_userq_vm_validate(uq_mgr);
1229 	if (ret) {
1230 		drm_file_err(uq_mgr->file, "Failed to validate BOs to restore\n");
1231 		goto unlock;
1232 	}
1233 
1234 	ret = amdgpu_userq_restore_all(uq_mgr);
1235 	if (ret) {
1236 		drm_file_err(uq_mgr->file, "Failed to restore all queues\n");
1237 		goto unlock;
1238 	}
1239 
1240 unlock:
1241 	mutex_unlock(&uq_mgr->userq_mutex);
1242 	dma_fence_put(ev_fence);
1243 }
1244 
1245 static int
1246 amdgpu_userq_evict_all(struct amdgpu_userq_mgr *uq_mgr)
1247 {
1248 	struct amdgpu_usermode_queue *queue;
1249 	unsigned long queue_id;
1250 	int ret = 0, r;
1251 
1252 	amdgpu_userq_detect_and_reset_queues(uq_mgr);
1253 	/* Try to unmap all the queues in this process ctx */
1254 	xa_for_each(&uq_mgr->userq_xa, queue_id, queue) {
1255 		queue = amdgpu_userq_get(uq_mgr, queue_id);
1256 		if (!queue)
1257 			continue;
1258 		r = amdgpu_userq_preempt_helper(queue);
1259 		if (r)
1260 			ret = r;
1261 		amdgpu_userq_put(queue);
1262 	}
1263 
1264 	if (ret)
1265 		drm_file_err(uq_mgr->file, "Couldn't unmap all the queues\n");
1266 	return ret;
1267 }
1268 
1269 void amdgpu_userq_reset_work(struct work_struct *work)
1270 {
1271 	struct amdgpu_device *adev = container_of(work, struct amdgpu_device,
1272 						  userq_reset_work);
1273 	struct amdgpu_reset_context reset_context;
1274 
1275 	memset(&reset_context, 0, sizeof(reset_context));
1276 
1277 	reset_context.method = AMD_RESET_METHOD_NONE;
1278 	reset_context.reset_req_dev = adev;
1279 	reset_context.src = AMDGPU_RESET_SRC_USERQ;
1280 	set_bit(AMDGPU_NEED_FULL_RESET, &reset_context.flags);
1281 	/*set_bit(AMDGPU_SKIP_COREDUMP, &reset_context.flags);*/
1282 
1283 	amdgpu_device_gpu_recover(adev, NULL, &reset_context);
1284 }
1285 
1286 static int
1287 amdgpu_userq_wait_for_signal(struct amdgpu_userq_mgr *uq_mgr)
1288 {
1289 	struct amdgpu_usermode_queue *queue;
1290 	unsigned long queue_id;
1291 	int ret;
1292 
1293 	xa_for_each(&uq_mgr->userq_xa, queue_id, queue) {
1294 		queue = amdgpu_userq_get(uq_mgr, queue_id);
1295 		if (!queue)
1296 			continue;
1297 
1298 		struct dma_fence *f = queue->last_fence;
1299 
1300 		if (!f || dma_fence_is_signaled(f)) {
1301 			amdgpu_userq_put(queue);
1302 			continue;
1303 		}
1304 		ret = dma_fence_wait_timeout(f, true, msecs_to_jiffies(100));
1305 		if (ret <= 0) {
1306 			drm_file_err(uq_mgr->file, "Timed out waiting for fence=%llu:%llu\n",
1307 				     f->context, f->seqno);
1308 			amdgpu_userq_put(queue);
1309 			return -ETIMEDOUT;
1310 		}
1311 		amdgpu_userq_put(queue);
1312 	}
1313 
1314 	return 0;
1315 }
1316 
1317 void
1318 amdgpu_userq_evict(struct amdgpu_userq_mgr *uq_mgr, bool schedule_resume)
1319 {
1320 	struct amdgpu_device *adev = uq_mgr->adev;
1321 	int ret;
1322 
1323 	/* Wait for any pending userqueue fence work to finish */
1324 	ret = amdgpu_userq_wait_for_signal(uq_mgr);
1325 	if (ret)
1326 		dev_err(adev->dev, "Not evicting userqueue, timeout waiting for work\n");
1327 
1328 	ret = amdgpu_userq_evict_all(uq_mgr);
1329 	if (ret)
1330 		dev_err(adev->dev, "Failed to evict userqueue\n");
1331 
1332 	if (schedule_resume)
1333 		schedule_delayed_work(&uq_mgr->resume_work, 0);
1334 }
1335 
1336 int amdgpu_userq_mgr_init(struct amdgpu_userq_mgr *userq_mgr, struct drm_file *file_priv,
1337 			  struct amdgpu_device *adev)
1338 {
1339 	mutex_init(&userq_mgr->userq_mutex);
1340 	xa_init_flags(&userq_mgr->userq_xa, XA_FLAGS_ALLOC);
1341 	userq_mgr->adev = adev;
1342 	userq_mgr->file = file_priv;
1343 
1344 	INIT_DELAYED_WORK(&userq_mgr->resume_work, amdgpu_userq_restore_worker);
1345 	return 0;
1346 }
1347 
1348 void amdgpu_userq_mgr_cancel_resume(struct amdgpu_userq_mgr *userq_mgr)
1349 {
1350 	cancel_delayed_work_sync(&userq_mgr->resume_work);
1351 }
1352 
1353 void amdgpu_userq_mgr_fini(struct amdgpu_userq_mgr *userq_mgr)
1354 {
1355 	struct amdgpu_usermode_queue *queue;
1356 	unsigned long queue_id = 0;
1357 
1358 	for (;;) {
1359 		xa_lock(&userq_mgr->userq_xa);
1360 		queue = xa_find(&userq_mgr->userq_xa, &queue_id, ULONG_MAX,
1361 				XA_PRESENT);
1362 		if (queue)
1363 			__xa_erase(&userq_mgr->userq_xa, queue_id);
1364 		xa_unlock(&userq_mgr->userq_xa);
1365 
1366 		if (!queue)
1367 			break;
1368 
1369 		amdgpu_userq_put(queue);
1370 	}
1371 
1372 	xa_destroy(&userq_mgr->userq_xa);
1373 	mutex_destroy(&userq_mgr->userq_mutex);
1374 }
1375 
1376 int amdgpu_userq_suspend(struct amdgpu_device *adev)
1377 {
1378 	u32 ip_mask = amdgpu_userq_get_supported_ip_mask(adev);
1379 	struct amdgpu_usermode_queue *queue;
1380 	struct amdgpu_userq_mgr *uqm;
1381 	unsigned long queue_id;
1382 	int r;
1383 
1384 	if (!ip_mask)
1385 		return 0;
1386 
1387 	xa_for_each(&adev->userq_doorbell_xa, queue_id, queue) {
1388 		uqm = queue->userq_mgr;
1389 		cancel_delayed_work_sync(&uqm->resume_work);
1390 		guard(mutex)(&uqm->userq_mutex);
1391 		amdgpu_userq_detect_and_reset_queues(uqm);
1392 		if (adev->in_s0ix)
1393 			r = amdgpu_userq_preempt_helper(queue);
1394 		else
1395 			r = amdgpu_userq_unmap_helper(queue);
1396 		if (r)
1397 			return r;
1398 	}
1399 	return 0;
1400 }
1401 
1402 int amdgpu_userq_resume(struct amdgpu_device *adev)
1403 {
1404 	u32 ip_mask = amdgpu_userq_get_supported_ip_mask(adev);
1405 	struct amdgpu_usermode_queue *queue;
1406 	struct amdgpu_userq_mgr *uqm;
1407 	unsigned long queue_id;
1408 	int r;
1409 
1410 	if (!ip_mask)
1411 		return 0;
1412 
1413 	xa_for_each(&adev->userq_doorbell_xa, queue_id, queue) {
1414 		uqm = queue->userq_mgr;
1415 		guard(mutex)(&uqm->userq_mutex);
1416 		if (adev->in_s0ix)
1417 			r = amdgpu_userq_restore_helper(queue);
1418 		else
1419 			r = amdgpu_userq_map_helper(queue);
1420 		if (r)
1421 			return r;
1422 	}
1423 
1424 	return 0;
1425 }
1426 
1427 int amdgpu_userq_stop_sched_for_enforce_isolation(struct amdgpu_device *adev,
1428 						  u32 idx)
1429 {
1430 	u32 ip_mask = amdgpu_userq_get_supported_ip_mask(adev);
1431 	struct amdgpu_usermode_queue *queue;
1432 	struct amdgpu_userq_mgr *uqm;
1433 	unsigned long queue_id;
1434 	int ret = 0, r;
1435 
1436 	/* only need to stop gfx/compute */
1437 	if (!(ip_mask & ((1 << AMDGPU_HW_IP_GFX) | (1 << AMDGPU_HW_IP_COMPUTE))))
1438 		return 0;
1439 
1440 	if (adev->userq_halt_for_enforce_isolation)
1441 		dev_warn(adev->dev, "userq scheduling already stopped!\n");
1442 	adev->userq_halt_for_enforce_isolation = true;
1443 	xa_for_each(&adev->userq_doorbell_xa, queue_id, queue) {
1444 		uqm = queue->userq_mgr;
1445 		cancel_delayed_work_sync(&uqm->resume_work);
1446 		mutex_lock(&uqm->userq_mutex);
1447 		if (((queue->queue_type == AMDGPU_HW_IP_GFX) ||
1448 		     (queue->queue_type == AMDGPU_HW_IP_COMPUTE)) &&
1449 		    (queue->xcp_id == idx)) {
1450 			amdgpu_userq_detect_and_reset_queues(uqm);
1451 			r = amdgpu_userq_preempt_helper(queue);
1452 			if (r)
1453 				ret = r;
1454 		}
1455 		mutex_unlock(&uqm->userq_mutex);
1456 	}
1457 
1458 	return ret;
1459 }
1460 
1461 int amdgpu_userq_start_sched_for_enforce_isolation(struct amdgpu_device *adev,
1462 						   u32 idx)
1463 {
1464 	u32 ip_mask = amdgpu_userq_get_supported_ip_mask(adev);
1465 	struct amdgpu_usermode_queue *queue;
1466 	struct amdgpu_userq_mgr *uqm;
1467 	unsigned long queue_id;
1468 	int ret = 0, r;
1469 
1470 	/* only need to stop gfx/compute */
1471 	if (!(ip_mask & ((1 << AMDGPU_HW_IP_GFX) | (1 << AMDGPU_HW_IP_COMPUTE))))
1472 		return 0;
1473 
1474 	if (!adev->userq_halt_for_enforce_isolation)
1475 		dev_warn(adev->dev, "userq scheduling already started!\n");
1476 	adev->userq_halt_for_enforce_isolation = false;
1477 	xa_for_each(&adev->userq_doorbell_xa, queue_id, queue) {
1478 		uqm = queue->userq_mgr;
1479 		mutex_lock(&uqm->userq_mutex);
1480 			if (((queue->queue_type == AMDGPU_HW_IP_GFX) ||
1481 			     (queue->queue_type == AMDGPU_HW_IP_COMPUTE)) &&
1482 			    (queue->xcp_id == idx)) {
1483 			r = amdgpu_userq_restore_helper(queue);
1484 			if (r)
1485 				ret = r;
1486 			}
1487 		mutex_unlock(&uqm->userq_mutex);
1488 	}
1489 
1490 	return ret;
1491 }
1492 
1493 int amdgpu_userq_gem_va_unmap_validate(struct amdgpu_device *adev,
1494 				       struct amdgpu_bo_va_mapping *mapping,
1495 				       uint64_t saddr)
1496 {
1497 	u32 ip_mask = amdgpu_userq_get_supported_ip_mask(adev);
1498 	struct amdgpu_bo_va *bo_va = mapping->bo_va;
1499 	struct dma_resv *resv = bo_va->base.bo->tbo.base.resv;
1500 	int ret = 0;
1501 
1502 	if (!ip_mask)
1503 		return 0;
1504 
1505 	dev_warn_once(adev->dev, "now unmapping a vital queue va:%llx\n", saddr);
1506 	/**
1507 	 * The userq VA mapping reservation should include the eviction fence,
1508 	 * if the eviction fence can't signal successfully during unmapping,
1509 	 * then driver will warn to flag this improper unmap of the userq VA.
1510 	 * Note: The eviction fence may be attached to different BOs, and this
1511 	 * unmap is only for one kind of userq VAs, so at this point suppose
1512 	 * the eviction fence is always unsignaled.
1513 	 */
1514 	if (!dma_resv_test_signaled(resv, DMA_RESV_USAGE_BOOKKEEP)) {
1515 		ret = dma_resv_wait_timeout(resv, DMA_RESV_USAGE_BOOKKEEP, true,
1516 					    MAX_SCHEDULE_TIMEOUT);
1517 		if (ret <= 0)
1518 			return -EBUSY;
1519 	}
1520 
1521 	return 0;
1522 }
1523 
1524 void amdgpu_userq_pre_reset(struct amdgpu_device *adev)
1525 {
1526 	const struct amdgpu_userq_funcs *userq_funcs;
1527 	struct amdgpu_usermode_queue *queue;
1528 	struct amdgpu_userq_mgr *uqm;
1529 	unsigned long queue_id;
1530 
1531 	xa_for_each(&adev->userq_doorbell_xa, queue_id, queue) {
1532 		uqm = queue->userq_mgr;
1533 		cancel_delayed_work_sync(&uqm->resume_work);
1534 		if (queue->state == AMDGPU_USERQ_STATE_MAPPED) {
1535 			amdgpu_userq_wait_for_last_fence(queue);
1536 			userq_funcs = adev->userq_funcs[queue->queue_type];
1537 			userq_funcs->unmap(queue);
1538 			/* just mark all queues as hung at this point.
1539 			 * if unmap succeeds, we could map again
1540 			 * in amdgpu_userq_post_reset() if vram is not lost
1541 			 */
1542 			queue->state = AMDGPU_USERQ_STATE_HUNG;
1543 			amdgpu_userq_fence_driver_force_completion(queue);
1544 		}
1545 	}
1546 }
1547 
1548 int amdgpu_userq_post_reset(struct amdgpu_device *adev, bool vram_lost)
1549 {
1550 	/* if any queue state is AMDGPU_USERQ_STATE_UNMAPPED
1551 	 * at this point, we should be able to map it again
1552 	 * and continue if vram is not lost.
1553 	 */
1554 	struct amdgpu_usermode_queue *queue;
1555 	const struct amdgpu_userq_funcs *userq_funcs;
1556 	unsigned long queue_id;
1557 	int r = 0;
1558 
1559 	xa_for_each(&adev->userq_doorbell_xa, queue_id, queue) {
1560 		if (queue->state == AMDGPU_USERQ_STATE_HUNG && !vram_lost) {
1561 			userq_funcs = adev->userq_funcs[queue->queue_type];
1562 			/* Re-map queue */
1563 			r = userq_funcs->map(queue);
1564 			if (r) {
1565 				dev_err(adev->dev, "Failed to remap queue %ld\n", queue_id);
1566 				continue;
1567 			}
1568 			queue->state = AMDGPU_USERQ_STATE_MAPPED;
1569 		}
1570 	}
1571 
1572 	return r;
1573 }
1574