xref: /linux/drivers/gpu/drm/amd/amdgpu/amdgpu_userq.c (revision 6916d5703ddf9a38f1f6c2cc793381a24ee914c6)
1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright 2023 Advanced Micro Devices, Inc.
4  *
5  * Permission is hereby granted, free of charge, to any person obtaining a
6  * copy of this software and associated documentation files (the "Software"),
7  * to deal in the Software without restriction, including without limitation
8  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9  * and/or sell copies of the Software, and to permit persons to whom the
10  * Software is furnished to do so, subject to the following conditions:
11  *
12  * The above copyright notice and this permission notice shall be included in
13  * all copies or substantial portions of the Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
19  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
20  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
21  * OTHER DEALINGS IN THE SOFTWARE.
22  *
23  */
24 
25 #include <drm/drm_auth.h>
26 #include <drm/drm_exec.h>
27 #include <linux/pm_runtime.h>
28 #include <drm/drm_drv.h>
29 
30 #include "amdgpu.h"
31 #include "amdgpu_reset.h"
32 #include "amdgpu_vm.h"
33 #include "amdgpu_userq.h"
34 #include "amdgpu_hmm.h"
35 #include "amdgpu_userq_fence.h"
36 
amdgpu_userq_get_supported_ip_mask(struct amdgpu_device * adev)37 u32 amdgpu_userq_get_supported_ip_mask(struct amdgpu_device *adev)
38 {
39 	int i;
40 	u32 userq_ip_mask = 0;
41 
42 	for (i = 0; i < AMDGPU_HW_IP_NUM; i++) {
43 		if (adev->userq_funcs[i])
44 			userq_ip_mask |= (1 << i);
45 	}
46 
47 	return userq_ip_mask;
48 }
49 
amdgpu_userq_is_reset_type_supported(struct amdgpu_device * adev,enum amdgpu_ring_type ring_type,int reset_type)50 static bool amdgpu_userq_is_reset_type_supported(struct amdgpu_device *adev,
51 				enum amdgpu_ring_type ring_type, int reset_type)
52 {
53 
54 	if (ring_type < 0 || ring_type >= AMDGPU_RING_TYPE_MAX)
55 		return false;
56 
57 	switch (ring_type) {
58 	case AMDGPU_RING_TYPE_GFX:
59 		if (adev->gfx.gfx_supported_reset & reset_type)
60 			return true;
61 		break;
62 	case AMDGPU_RING_TYPE_COMPUTE:
63 		if (adev->gfx.compute_supported_reset & reset_type)
64 			return true;
65 		break;
66 	case AMDGPU_RING_TYPE_SDMA:
67 		if (adev->sdma.supported_reset & reset_type)
68 			return true;
69 		break;
70 	case AMDGPU_RING_TYPE_VCN_DEC:
71 	case AMDGPU_RING_TYPE_VCN_ENC:
72 		if (adev->vcn.supported_reset & reset_type)
73 			return true;
74 		break;
75 	case AMDGPU_RING_TYPE_VCN_JPEG:
76 		if (adev->jpeg.supported_reset & reset_type)
77 			return true;
78 		break;
79 	default:
80 		break;
81 	}
82 	return false;
83 }
84 
amdgpu_userq_gpu_reset(struct amdgpu_device * adev)85 static void amdgpu_userq_gpu_reset(struct amdgpu_device *adev)
86 {
87 	if (amdgpu_device_should_recover_gpu(adev)) {
88 		amdgpu_reset_domain_schedule(adev->reset_domain,
89 					     &adev->userq_reset_work);
90 		/* Wait for the reset job to complete */
91 		flush_work(&adev->userq_reset_work);
92 	}
93 }
94 
95 static int
amdgpu_userq_detect_and_reset_queues(struct amdgpu_userq_mgr * uq_mgr)96 amdgpu_userq_detect_and_reset_queues(struct amdgpu_userq_mgr *uq_mgr)
97 {
98 	struct amdgpu_device *adev = uq_mgr->adev;
99 	const int queue_types[] = {
100 		AMDGPU_RING_TYPE_COMPUTE,
101 		AMDGPU_RING_TYPE_GFX,
102 		AMDGPU_RING_TYPE_SDMA
103 	};
104 	const int num_queue_types = ARRAY_SIZE(queue_types);
105 	bool gpu_reset = false;
106 	int r = 0;
107 	int i;
108 
109 	if (unlikely(adev->debug_disable_gpu_ring_reset)) {
110 		dev_err(adev->dev, "userq reset disabled by debug mask\n");
111 		return 0;
112 	}
113 
114 	/*
115 	 * If GPU recovery feature is disabled system-wide,
116 	 * skip all reset detection logic
117 	 */
118 	if (!amdgpu_gpu_recovery)
119 		return 0;
120 
121 	/*
122 	 * Iterate through all queue types to detect and reset problematic queues
123 	 * Process each queue type in the defined order
124 	 */
125 	for (i = 0; i < num_queue_types; i++) {
126 		int ring_type = queue_types[i];
127 		const struct amdgpu_userq_funcs *funcs =
128 			adev->userq_funcs[ring_type];
129 
130 		if (!amdgpu_userq_is_reset_type_supported(adev, ring_type,
131 							  AMDGPU_RESET_TYPE_PER_QUEUE))
132 				continue;
133 
134 		if (atomic_read(&uq_mgr->userq_count[ring_type]) > 0 &&
135 		    funcs && funcs->detect_and_reset) {
136 			r = funcs->detect_and_reset(adev, ring_type);
137 			if (r) {
138 				gpu_reset = true;
139 				break;
140 			}
141 		}
142 	}
143 
144 	if (gpu_reset)
145 		amdgpu_userq_gpu_reset(adev);
146 
147 	return r;
148 }
149 
amdgpu_userq_hang_detect_work(struct work_struct * work)150 static void amdgpu_userq_hang_detect_work(struct work_struct *work)
151 {
152 	struct amdgpu_usermode_queue *queue =
153 		container_of(work, struct amdgpu_usermode_queue,
154 			     hang_detect_work.work);
155 
156 	amdgpu_userq_detect_and_reset_queues(queue->userq_mgr);
157 }
158 
159 /*
160  * Start hang detection for a user queue fence. A delayed work will be scheduled
161  * to reset the queues when the fence doesn't signal in time.
162  */
amdgpu_userq_start_hang_detect_work(struct amdgpu_usermode_queue * queue)163 void amdgpu_userq_start_hang_detect_work(struct amdgpu_usermode_queue *queue)
164 {
165 	struct amdgpu_device *adev;
166 	unsigned long timeout_ms;
167 
168 	adev = queue->userq_mgr->adev;
169 	/* Determine timeout based on queue type */
170 	switch (queue->queue_type) {
171 	case AMDGPU_RING_TYPE_GFX:
172 		timeout_ms = adev->gfx_timeout;
173 		break;
174 	case AMDGPU_RING_TYPE_COMPUTE:
175 		timeout_ms = adev->compute_timeout;
176 		break;
177 	case AMDGPU_RING_TYPE_SDMA:
178 		timeout_ms = adev->sdma_timeout;
179 		break;
180 	default:
181 		timeout_ms = adev->gfx_timeout;
182 		break;
183 	}
184 
185 	schedule_delayed_work(&queue->hang_detect_work,
186 		     msecs_to_jiffies(timeout_ms));
187 }
188 
amdgpu_userq_process_fence_irq(struct amdgpu_device * adev,u32 doorbell)189 void amdgpu_userq_process_fence_irq(struct amdgpu_device *adev, u32 doorbell)
190 {
191 	struct xarray *xa = &adev->userq_doorbell_xa;
192 	struct amdgpu_usermode_queue *queue;
193 	unsigned long flags;
194 	int r;
195 
196 	xa_lock_irqsave(xa, flags);
197 	queue = xa_load(xa, doorbell);
198 	if (queue) {
199 		r = amdgpu_userq_fence_driver_process(queue->fence_drv);
200 		/*
201 		 * We are in interrupt context here, this *can't* wait for
202 		 * reset work to finish.
203 		 */
204 		if (r >= 0)
205 			cancel_delayed_work(&queue->hang_detect_work);
206 
207 		/* Restart the timer when there are still fences pending */
208 		if (r == 1)
209 			amdgpu_userq_start_hang_detect_work(queue);
210 	}
211 	xa_unlock_irqrestore(xa, flags);
212 }
213 
amdgpu_userq_buffer_va_list_add(struct amdgpu_usermode_queue * queue,struct amdgpu_bo_va_mapping * va_map,u64 addr)214 static int amdgpu_userq_buffer_va_list_add(struct amdgpu_usermode_queue *queue,
215 					   struct amdgpu_bo_va_mapping *va_map, u64 addr)
216 {
217 	struct amdgpu_userq_va_cursor *va_cursor;
218 	struct userq_va_list;
219 
220 	va_cursor = kzalloc_obj(*va_cursor);
221 	if (!va_cursor)
222 		return -ENOMEM;
223 
224 	INIT_LIST_HEAD(&va_cursor->list);
225 	va_cursor->gpu_addr = addr;
226 	atomic_set(&va_map->bo_va->userq_va_mapped, 1);
227 	list_add(&va_cursor->list, &queue->userq_va_list);
228 
229 	return 0;
230 }
231 
amdgpu_userq_input_va_validate(struct amdgpu_device * adev,struct amdgpu_usermode_queue * queue,u64 addr,u64 expected_size)232 int amdgpu_userq_input_va_validate(struct amdgpu_device *adev,
233 				   struct amdgpu_usermode_queue *queue,
234 				   u64 addr, u64 expected_size)
235 {
236 	struct amdgpu_bo_va_mapping *va_map;
237 	struct amdgpu_vm *vm = queue->vm;
238 	u64 user_addr;
239 	u64 size;
240 	int r = 0;
241 
242 	/* Caller must hold vm->root.bo reservation */
243 	dma_resv_assert_held(queue->vm->root.bo->tbo.base.resv);
244 
245 	user_addr = (addr & AMDGPU_GMC_HOLE_MASK) >> AMDGPU_GPU_PAGE_SHIFT;
246 	size = expected_size >> AMDGPU_GPU_PAGE_SHIFT;
247 
248 	va_map = amdgpu_vm_bo_lookup_mapping(vm, user_addr);
249 	if (!va_map) {
250 		r = -EINVAL;
251 		goto out_err;
252 	}
253 	/* Only validate the userq whether resident in the VM mapping range */
254 	if (user_addr >= va_map->start  &&
255 	    va_map->last - user_addr + 1 >= size) {
256 		amdgpu_userq_buffer_va_list_add(queue, va_map, user_addr);
257 		return 0;
258 	}
259 
260 	r = -EINVAL;
261 out_err:
262 	return r;
263 }
264 
amdgpu_userq_buffer_va_mapped(struct amdgpu_vm * vm,u64 addr)265 static bool amdgpu_userq_buffer_va_mapped(struct amdgpu_vm *vm, u64 addr)
266 {
267 	struct amdgpu_bo_va_mapping *mapping;
268 	bool r;
269 
270 	dma_resv_assert_held(vm->root.bo->tbo.base.resv);
271 
272 	mapping = amdgpu_vm_bo_lookup_mapping(vm, addr);
273 	if (!IS_ERR_OR_NULL(mapping) && atomic_read(&mapping->bo_va->userq_va_mapped))
274 		r = true;
275 	else
276 		r = false;
277 
278 	return r;
279 }
280 
amdgpu_userq_buffer_vas_mapped(struct amdgpu_usermode_queue * queue)281 static bool amdgpu_userq_buffer_vas_mapped(struct amdgpu_usermode_queue *queue)
282 {
283 	struct amdgpu_userq_va_cursor *va_cursor, *tmp;
284 	int r = 0;
285 
286 	list_for_each_entry_safe(va_cursor, tmp, &queue->userq_va_list, list) {
287 		r += amdgpu_userq_buffer_va_mapped(queue->vm, va_cursor->gpu_addr);
288 		dev_dbg(queue->userq_mgr->adev->dev,
289 			"validate the userq mapping:%p va:%llx r:%d\n",
290 			queue, va_cursor->gpu_addr, r);
291 	}
292 
293 	if (r != 0)
294 		return true;
295 
296 	return false;
297 }
298 
amdgpu_userq_buffer_va_list_del(struct amdgpu_bo_va_mapping * mapping,struct amdgpu_userq_va_cursor * va_cursor)299 static void amdgpu_userq_buffer_va_list_del(struct amdgpu_bo_va_mapping *mapping,
300 					    struct amdgpu_userq_va_cursor *va_cursor)
301 {
302 	atomic_set(&mapping->bo_va->userq_va_mapped, 0);
303 	list_del(&va_cursor->list);
304 	kfree(va_cursor);
305 }
306 
amdgpu_userq_buffer_vas_list_cleanup(struct amdgpu_device * adev,struct amdgpu_usermode_queue * queue)307 static int amdgpu_userq_buffer_vas_list_cleanup(struct amdgpu_device *adev,
308 						struct amdgpu_usermode_queue *queue)
309 {
310 	struct amdgpu_userq_va_cursor *va_cursor, *tmp;
311 	struct amdgpu_bo_va_mapping *mapping;
312 
313 	/* Caller must hold vm->root.bo reservation */
314 	dma_resv_assert_held(queue->vm->root.bo->tbo.base.resv);
315 
316 	list_for_each_entry_safe(va_cursor, tmp, &queue->userq_va_list, list) {
317 		mapping = amdgpu_vm_bo_lookup_mapping(queue->vm, va_cursor->gpu_addr);
318 		if (!mapping) {
319 			return -EINVAL;
320 		}
321 		dev_dbg(adev->dev, "delete the userq:%p va:%llx\n",
322 			queue, va_cursor->gpu_addr);
323 		amdgpu_userq_buffer_va_list_del(mapping, va_cursor);
324 	}
325 
326 	return 0;
327 }
328 
amdgpu_userq_preempt_helper(struct amdgpu_usermode_queue * queue)329 static int amdgpu_userq_preempt_helper(struct amdgpu_usermode_queue *queue)
330 {
331 	struct amdgpu_userq_mgr *uq_mgr = queue->userq_mgr;
332 	struct amdgpu_device *adev = uq_mgr->adev;
333 	const struct amdgpu_userq_funcs *userq_funcs =
334 		adev->userq_funcs[queue->queue_type];
335 	int r;
336 
337 	if (queue->state == AMDGPU_USERQ_STATE_MAPPED) {
338 		r = userq_funcs->preempt(queue);
339 		if (r) {
340 			queue->state = AMDGPU_USERQ_STATE_HUNG;
341 			return r;
342 		} else {
343 			queue->state = AMDGPU_USERQ_STATE_PREEMPTED;
344 		}
345 	}
346 	return 0;
347 }
348 
amdgpu_userq_restore_helper(struct amdgpu_usermode_queue * queue)349 static int amdgpu_userq_restore_helper(struct amdgpu_usermode_queue *queue)
350 {
351 	struct amdgpu_userq_mgr *uq_mgr = queue->userq_mgr;
352 	struct amdgpu_device *adev = uq_mgr->adev;
353 	const struct amdgpu_userq_funcs *userq_funcs =
354 		adev->userq_funcs[queue->queue_type];
355 	int r = 0;
356 
357 	if (queue->state == AMDGPU_USERQ_STATE_PREEMPTED) {
358 		r = userq_funcs->restore(queue);
359 		if (r) {
360 			queue->state = AMDGPU_USERQ_STATE_HUNG;
361 		} else {
362 			queue->state = AMDGPU_USERQ_STATE_MAPPED;
363 		}
364 	}
365 
366 	return r;
367 }
368 
amdgpu_userq_unmap_helper(struct amdgpu_usermode_queue * queue)369 static int amdgpu_userq_unmap_helper(struct amdgpu_usermode_queue *queue)
370 {
371 	struct amdgpu_userq_mgr *uq_mgr = queue->userq_mgr;
372 	struct amdgpu_device *adev = uq_mgr->adev;
373 	const struct amdgpu_userq_funcs *userq_funcs =
374 		adev->userq_funcs[queue->queue_type];
375 	int r;
376 
377 	if ((queue->state == AMDGPU_USERQ_STATE_MAPPED) ||
378 	    (queue->state == AMDGPU_USERQ_STATE_PREEMPTED)) {
379 
380 		r = userq_funcs->unmap(queue);
381 		if (r) {
382 			queue->state = AMDGPU_USERQ_STATE_HUNG;
383 			return r;
384 		} else {
385 			queue->state = AMDGPU_USERQ_STATE_UNMAPPED;
386 		}
387 	}
388 
389 	return 0;
390 }
391 
amdgpu_userq_map_helper(struct amdgpu_usermode_queue * queue)392 static int amdgpu_userq_map_helper(struct amdgpu_usermode_queue *queue)
393 {
394 	struct amdgpu_userq_mgr *uq_mgr = queue->userq_mgr;
395 	struct amdgpu_device *adev = uq_mgr->adev;
396 	const struct amdgpu_userq_funcs *userq_funcs =
397 		adev->userq_funcs[queue->queue_type];
398 	int r;
399 
400 	if (queue->state == AMDGPU_USERQ_STATE_UNMAPPED) {
401 		r = userq_funcs->map(queue);
402 		if (r) {
403 			queue->state = AMDGPU_USERQ_STATE_HUNG;
404 			return r;
405 		} else {
406 			queue->state = AMDGPU_USERQ_STATE_MAPPED;
407 		}
408 	}
409 
410 	return 0;
411 }
412 
amdgpu_userq_wait_for_last_fence(struct amdgpu_usermode_queue * queue)413 static void amdgpu_userq_wait_for_last_fence(struct amdgpu_usermode_queue *queue)
414 {
415 	struct dma_fence *f = queue->last_fence;
416 
417 	if (!f)
418 		return;
419 
420 	dma_fence_wait(f, false);
421 }
422 
amdgpu_userq_cleanup(struct amdgpu_usermode_queue * queue)423 static void amdgpu_userq_cleanup(struct amdgpu_usermode_queue *queue)
424 {
425 	struct amdgpu_userq_mgr *uq_mgr = queue->userq_mgr;
426 	struct amdgpu_device *adev = uq_mgr->adev;
427 	const struct amdgpu_userq_funcs *uq_funcs = adev->userq_funcs[queue->queue_type];
428 
429 	/* Wait for mode-1 reset to complete */
430 	down_read(&adev->reset_domain->sem);
431 
432 	uq_funcs->mqd_destroy(queue);
433 	/* Use interrupt-safe locking since IRQ handlers may access these XArrays */
434 	xa_erase_irq(&adev->userq_doorbell_xa, queue->doorbell_index);
435 	amdgpu_userq_fence_driver_free(queue);
436 	queue->fence_drv = NULL;
437 	queue->userq_mgr = NULL;
438 	list_del(&queue->userq_va_list);
439 
440 	up_read(&adev->reset_domain->sem);
441 }
442 
443 /**
444  * amdgpu_userq_ensure_ev_fence - ensure a valid, unsignaled eviction fence exists
445  * @uq_mgr: the usermode queue manager for this process
446  * @evf_mgr: the eviction fence manager to check and rearm
447  *
448  * Ensures that a valid and not yet signaled eviction fence is attached to the
449  * usermode queue before any queue operations proceed. If it is signalled, then
450  * rearm a new eviction fence.
451  */
452 void
amdgpu_userq_ensure_ev_fence(struct amdgpu_userq_mgr * uq_mgr,struct amdgpu_eviction_fence_mgr * evf_mgr)453 amdgpu_userq_ensure_ev_fence(struct amdgpu_userq_mgr *uq_mgr,
454 			     struct amdgpu_eviction_fence_mgr *evf_mgr)
455 {
456 	struct dma_fence *ev_fence;
457 
458 retry:
459 	/* Flush any pending resume work to create ev_fence */
460 	flush_delayed_work(&uq_mgr->resume_work);
461 
462 	mutex_lock(&uq_mgr->userq_mutex);
463 	ev_fence = amdgpu_evf_mgr_get_fence(evf_mgr);
464 	if (dma_fence_is_signaled(ev_fence)) {
465 		dma_fence_put(ev_fence);
466 		mutex_unlock(&uq_mgr->userq_mutex);
467 		/*
468 		 * Looks like there was no pending resume work,
469 		 * add one now to create a valid eviction fence
470 		 */
471 		schedule_delayed_work(&uq_mgr->resume_work, 0);
472 		goto retry;
473 	}
474 	dma_fence_put(ev_fence);
475 }
476 
amdgpu_userq_create_object(struct amdgpu_userq_mgr * uq_mgr,struct amdgpu_userq_obj * userq_obj,int size)477 int amdgpu_userq_create_object(struct amdgpu_userq_mgr *uq_mgr,
478 			       struct amdgpu_userq_obj *userq_obj,
479 			       int size)
480 {
481 	struct amdgpu_device *adev = uq_mgr->adev;
482 	struct amdgpu_bo_param bp;
483 	int r;
484 
485 	memset(&bp, 0, sizeof(bp));
486 	bp.byte_align = PAGE_SIZE;
487 	bp.domain = AMDGPU_GEM_DOMAIN_GTT;
488 	bp.flags = AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS |
489 		   AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
490 	bp.type = ttm_bo_type_kernel;
491 	bp.size = size;
492 	bp.resv = NULL;
493 	bp.bo_ptr_size = sizeof(struct amdgpu_bo);
494 
495 	r = amdgpu_bo_create(adev, &bp, &userq_obj->obj);
496 	if (r) {
497 		drm_file_err(uq_mgr->file, "Failed to allocate BO for userqueue (%d)", r);
498 		return r;
499 	}
500 
501 	r = amdgpu_bo_reserve(userq_obj->obj, true);
502 	if (r) {
503 		drm_file_err(uq_mgr->file, "Failed to reserve BO to map (%d)", r);
504 		goto free_obj;
505 	}
506 
507 	r = amdgpu_ttm_alloc_gart(&(userq_obj->obj)->tbo);
508 	if (r) {
509 		drm_file_err(uq_mgr->file, "Failed to alloc GART for userqueue object (%d)", r);
510 		goto unresv;
511 	}
512 
513 	r = amdgpu_bo_kmap(userq_obj->obj, &userq_obj->cpu_ptr);
514 	if (r) {
515 		drm_file_err(uq_mgr->file, "Failed to map BO for userqueue (%d)", r);
516 		goto unresv;
517 	}
518 
519 	userq_obj->gpu_addr = amdgpu_bo_gpu_offset(userq_obj->obj);
520 	amdgpu_bo_unreserve(userq_obj->obj);
521 	memset(userq_obj->cpu_ptr, 0, size);
522 	return 0;
523 
524 unresv:
525 	amdgpu_bo_unreserve(userq_obj->obj);
526 
527 free_obj:
528 	amdgpu_bo_unref(&userq_obj->obj);
529 	return r;
530 }
531 
amdgpu_userq_destroy_object(struct amdgpu_userq_mgr * uq_mgr,struct amdgpu_userq_obj * userq_obj)532 void amdgpu_userq_destroy_object(struct amdgpu_userq_mgr *uq_mgr,
533 				 struct amdgpu_userq_obj *userq_obj)
534 {
535 	amdgpu_bo_kunmap(userq_obj->obj);
536 	amdgpu_bo_unref(&userq_obj->obj);
537 }
538 
539 uint64_t
amdgpu_userq_get_doorbell_index(struct amdgpu_userq_mgr * uq_mgr,struct amdgpu_db_info * db_info,struct drm_file * filp)540 amdgpu_userq_get_doorbell_index(struct amdgpu_userq_mgr *uq_mgr,
541 				struct amdgpu_db_info *db_info,
542 				struct drm_file *filp)
543 {
544 	uint64_t index;
545 	struct drm_gem_object *gobj;
546 	struct amdgpu_userq_obj *db_obj = db_info->db_obj;
547 	int r, db_size;
548 
549 	gobj = drm_gem_object_lookup(filp, db_info->doorbell_handle);
550 	if (gobj == NULL) {
551 		drm_file_err(uq_mgr->file, "Can't find GEM object for doorbell\n");
552 		return -EINVAL;
553 	}
554 
555 	db_obj->obj = amdgpu_bo_ref(gem_to_amdgpu_bo(gobj));
556 	drm_gem_object_put(gobj);
557 
558 	r = amdgpu_bo_reserve(db_obj->obj, true);
559 	if (r) {
560 		drm_file_err(uq_mgr->file, "[Usermode queues] Failed to pin doorbell object\n");
561 		goto unref_bo;
562 	}
563 
564 	/* Pin the BO before generating the index, unpin in queue destroy */
565 	r = amdgpu_bo_pin(db_obj->obj, AMDGPU_GEM_DOMAIN_DOORBELL);
566 	if (r) {
567 		drm_file_err(uq_mgr->file, "[Usermode queues] Failed to pin doorbell object\n");
568 		goto unresv_bo;
569 	}
570 
571 	switch (db_info->queue_type) {
572 	case AMDGPU_HW_IP_GFX:
573 	case AMDGPU_HW_IP_COMPUTE:
574 	case AMDGPU_HW_IP_DMA:
575 		db_size = sizeof(u64);
576 		break;
577 	default:
578 		drm_file_err(uq_mgr->file, "[Usermode queues] IP %d not support\n",
579 			     db_info->queue_type);
580 		r = -EINVAL;
581 		goto unpin_bo;
582 	}
583 
584 	/* Validate doorbell_offset is within the doorbell BO */
585 	if ((u64)db_info->doorbell_offset * db_size + db_size >
586 	    amdgpu_bo_size(db_obj->obj)) {
587 		r = -EINVAL;
588 		goto unpin_bo;
589 	}
590 
591 	index = amdgpu_doorbell_index_on_bar(uq_mgr->adev, db_obj->obj,
592 					     db_info->doorbell_offset, db_size);
593 	drm_dbg_driver(adev_to_drm(uq_mgr->adev),
594 		       "[Usermode queues] doorbell index=%lld\n", index);
595 	amdgpu_bo_unreserve(db_obj->obj);
596 	return index;
597 
598 unpin_bo:
599 	amdgpu_bo_unpin(db_obj->obj);
600 unresv_bo:
601 	amdgpu_bo_unreserve(db_obj->obj);
602 unref_bo:
603 	amdgpu_bo_unref(&db_obj->obj);
604 	return r;
605 }
606 
607 static int
amdgpu_userq_destroy(struct amdgpu_userq_mgr * uq_mgr,struct amdgpu_usermode_queue * queue)608 amdgpu_userq_destroy(struct amdgpu_userq_mgr *uq_mgr, struct amdgpu_usermode_queue *queue)
609 {
610 	struct amdgpu_device *adev = uq_mgr->adev;
611 	struct amdgpu_fpriv *fpriv = uq_mgr_to_fpriv(uq_mgr);
612 	struct amdgpu_vm *vm = &fpriv->vm;
613 
614 	int r = 0;
615 
616 	cancel_delayed_work_sync(&uq_mgr->resume_work);
617 
618 	/* Cancel any pending hang detection work and cleanup */
619 	cancel_delayed_work_sync(&queue->hang_detect_work);
620 
621 	r = amdgpu_bo_reserve(vm->root.bo, false);
622 	if (r) {
623 		drm_file_err(uq_mgr->file, "Failed to reserve root bo during userqueue destroy\n");
624 		return r;
625 	}
626 	amdgpu_userq_buffer_vas_list_cleanup(adev, queue);
627 	amdgpu_bo_unreserve(vm->root.bo);
628 
629 	mutex_lock(&uq_mgr->userq_mutex);
630 	amdgpu_userq_wait_for_last_fence(queue);
631 
632 #if defined(CONFIG_DEBUG_FS)
633 	debugfs_remove_recursive(queue->debugfs_queue);
634 #endif
635 	r = amdgpu_userq_unmap_helper(queue);
636 	atomic_dec(&uq_mgr->userq_count[queue->queue_type]);
637 	amdgpu_userq_cleanup(queue);
638 	mutex_unlock(&uq_mgr->userq_mutex);
639 
640 	amdgpu_bo_reserve(queue->db_obj.obj, true);
641 	amdgpu_bo_unpin(queue->db_obj.obj);
642 	amdgpu_bo_unreserve(queue->db_obj.obj);
643 	amdgpu_bo_unref(&queue->db_obj.obj);
644 
645 	amdgpu_bo_reserve(queue->wptr_obj.obj, true);
646 	amdgpu_bo_unpin(queue->wptr_obj.obj);
647 	amdgpu_bo_unreserve(queue->wptr_obj.obj);
648 	amdgpu_bo_unref(&queue->wptr_obj.obj);
649 	kfree(queue);
650 
651 	pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
652 
653 	return r;
654 }
655 
amdgpu_userq_kref_destroy(struct kref * kref)656 static void amdgpu_userq_kref_destroy(struct kref *kref)
657 {
658 	int r;
659 	struct amdgpu_usermode_queue *queue =
660 		container_of(kref, struct amdgpu_usermode_queue, refcount);
661 	struct amdgpu_userq_mgr *uq_mgr = queue->userq_mgr;
662 
663 	r = amdgpu_userq_destroy(uq_mgr, queue);
664 	if (r)
665 		drm_file_err(uq_mgr->file, "Failed to destroy usermode queue %d\n", r);
666 }
667 
amdgpu_userq_get(struct amdgpu_userq_mgr * uq_mgr,u32 qid)668 struct amdgpu_usermode_queue *amdgpu_userq_get(struct amdgpu_userq_mgr *uq_mgr, u32 qid)
669 {
670 	struct amdgpu_usermode_queue *queue;
671 
672 	xa_lock(&uq_mgr->userq_xa);
673 	queue = xa_load(&uq_mgr->userq_xa, qid);
674 	if (queue)
675 		kref_get(&queue->refcount);
676 	xa_unlock(&uq_mgr->userq_xa);
677 
678 	return queue;
679 }
680 
amdgpu_userq_put(struct amdgpu_usermode_queue * queue)681 void amdgpu_userq_put(struct amdgpu_usermode_queue *queue)
682 {
683 	if (queue)
684 		kref_put(&queue->refcount, amdgpu_userq_kref_destroy);
685 }
686 
amdgpu_userq_priority_permit(struct drm_file * filp,int priority)687 static int amdgpu_userq_priority_permit(struct drm_file *filp,
688 					int priority)
689 {
690 	if (priority < AMDGPU_USERQ_CREATE_FLAGS_QUEUE_PRIORITY_HIGH)
691 		return 0;
692 
693 	if (capable(CAP_SYS_NICE))
694 		return 0;
695 
696 	if (drm_is_current_master(filp))
697 		return 0;
698 
699 	return -EACCES;
700 }
701 
702 static int
amdgpu_userq_create(struct drm_file * filp,union drm_amdgpu_userq * args)703 amdgpu_userq_create(struct drm_file *filp, union drm_amdgpu_userq *args)
704 {
705 	struct amdgpu_fpriv *fpriv = filp->driver_priv;
706 	struct amdgpu_userq_mgr *uq_mgr = &fpriv->userq_mgr;
707 	struct amdgpu_device *adev = uq_mgr->adev;
708 	const struct amdgpu_userq_funcs *uq_funcs;
709 	struct amdgpu_usermode_queue *queue;
710 	struct amdgpu_db_info db_info;
711 	bool skip_map_queue;
712 	u32 qid;
713 	uint64_t index;
714 	int r = 0;
715 	int priority =
716 		(args->in.flags & AMDGPU_USERQ_CREATE_FLAGS_QUEUE_PRIORITY_MASK) >>
717 		AMDGPU_USERQ_CREATE_FLAGS_QUEUE_PRIORITY_SHIFT;
718 
719 	r = amdgpu_userq_priority_permit(filp, priority);
720 	if (r)
721 		return r;
722 
723 	r = pm_runtime_resume_and_get(adev_to_drm(adev)->dev);
724 	if (r < 0) {
725 		drm_file_err(uq_mgr->file, "pm_runtime_resume_and_get() failed for userqueue create\n");
726 		return r;
727 	}
728 
729 	uq_funcs = adev->userq_funcs[args->in.ip_type];
730 	if (!uq_funcs) {
731 		drm_file_err(uq_mgr->file, "Usermode queue is not supported for this IP (%u)\n",
732 			     args->in.ip_type);
733 		r = -EINVAL;
734 		goto err_pm_runtime;
735 	}
736 
737 	queue = kzalloc_obj(struct amdgpu_usermode_queue);
738 	if (!queue) {
739 		drm_file_err(uq_mgr->file, "Failed to allocate memory for queue\n");
740 		r = -ENOMEM;
741 		goto err_pm_runtime;
742 	}
743 
744 	INIT_LIST_HEAD(&queue->userq_va_list);
745 	queue->doorbell_handle = args->in.doorbell_handle;
746 	queue->queue_type = args->in.ip_type;
747 	queue->vm = &fpriv->vm;
748 	queue->priority = priority;
749 
750 	db_info.queue_type = queue->queue_type;
751 	db_info.doorbell_handle = queue->doorbell_handle;
752 	db_info.db_obj = &queue->db_obj;
753 	db_info.doorbell_offset = args->in.doorbell_offset;
754 
755 	queue->userq_mgr = uq_mgr;
756 
757 	/* Validate the userq virtual address.*/
758 	r = amdgpu_bo_reserve(fpriv->vm.root.bo, false);
759 	if (r)
760 		goto free_queue;
761 
762 	if (amdgpu_userq_input_va_validate(adev, queue, args->in.queue_va, args->in.queue_size) ||
763 	    amdgpu_userq_input_va_validate(adev, queue, args->in.rptr_va, AMDGPU_GPU_PAGE_SIZE) ||
764 	    amdgpu_userq_input_va_validate(adev, queue, args->in.wptr_va, AMDGPU_GPU_PAGE_SIZE)) {
765 		r = -EINVAL;
766 		amdgpu_bo_unreserve(fpriv->vm.root.bo);
767 		goto clean_mapping;
768 	}
769 	amdgpu_bo_unreserve(fpriv->vm.root.bo);
770 
771 	/* Convert relative doorbell offset into absolute doorbell index */
772 	index = amdgpu_userq_get_doorbell_index(uq_mgr, &db_info, filp);
773 	if (index == (uint64_t)-EINVAL) {
774 		drm_file_err(uq_mgr->file, "Failed to get doorbell for queue\n");
775 		r = -EINVAL;
776 		goto clean_mapping;
777 	}
778 
779 	queue->doorbell_index = index;
780 	mutex_init(&queue->fence_drv_lock);
781 	xa_init_flags(&queue->fence_drv_xa, XA_FLAGS_ALLOC);
782 	r = amdgpu_userq_fence_driver_alloc(adev, &queue->fence_drv);
783 	if (r) {
784 		drm_file_err(uq_mgr->file, "Failed to alloc fence driver\n");
785 		goto clean_mapping;
786 	}
787 
788 	r = uq_funcs->mqd_create(queue, &args->in);
789 	if (r) {
790 		drm_file_err(uq_mgr->file, "Failed to create Queue\n");
791 		goto clean_fence_driver;
792 	}
793 
794 	amdgpu_userq_ensure_ev_fence(&fpriv->userq_mgr, &fpriv->evf_mgr);
795 
796 	/* don't map the queue if scheduling is halted */
797 	if (adev->userq_halt_for_enforce_isolation &&
798 	    ((queue->queue_type == AMDGPU_HW_IP_GFX) ||
799 	     (queue->queue_type == AMDGPU_HW_IP_COMPUTE)))
800 		skip_map_queue = true;
801 	else
802 		skip_map_queue = false;
803 	if (!skip_map_queue) {
804 		r = amdgpu_userq_map_helper(queue);
805 		if (r) {
806 			drm_file_err(uq_mgr->file, "Failed to map Queue\n");
807 			goto clean_mqd;
808 		}
809 	}
810 
811 	/* drop this refcount during queue destroy */
812 	kref_init(&queue->refcount);
813 
814 	/* Wait for mode-1 reset to complete */
815 	down_read(&adev->reset_domain->sem);
816 
817 	r = xa_alloc(&uq_mgr->userq_xa, &qid, queue,
818 		     XA_LIMIT(1, AMDGPU_MAX_USERQ_COUNT), GFP_KERNEL);
819 	if (r) {
820 		if (!skip_map_queue)
821 			amdgpu_userq_unmap_helper(queue);
822 		r = -ENOMEM;
823 		goto clean_reset_domain;
824 	}
825 
826 	r = xa_err(xa_store_irq(&adev->userq_doorbell_xa, index, queue, GFP_KERNEL));
827 	if (r) {
828 		xa_erase(&uq_mgr->userq_xa, qid);
829 		if (!skip_map_queue)
830 			amdgpu_userq_unmap_helper(queue);
831 		goto clean_reset_domain;
832 	}
833 	up_read(&adev->reset_domain->sem);
834 
835 	amdgpu_debugfs_userq_init(filp, queue, qid);
836 	INIT_DELAYED_WORK(&queue->hang_detect_work,
837 			  amdgpu_userq_hang_detect_work);
838 
839 	args->out.queue_id = qid;
840 	atomic_inc(&uq_mgr->userq_count[queue->queue_type]);
841 	mutex_unlock(&uq_mgr->userq_mutex);
842 	return 0;
843 
844 clean_reset_domain:
845 	up_read(&adev->reset_domain->sem);
846 clean_mqd:
847 	mutex_unlock(&uq_mgr->userq_mutex);
848 	uq_funcs->mqd_destroy(queue);
849 clean_fence_driver:
850 	amdgpu_userq_fence_driver_free(queue);
851 clean_mapping:
852 	amdgpu_bo_reserve(fpriv->vm.root.bo, true);
853 	amdgpu_userq_buffer_vas_list_cleanup(adev, queue);
854 	amdgpu_bo_unreserve(fpriv->vm.root.bo);
855 	mutex_destroy(&queue->fence_drv_lock);
856 free_queue:
857 	kfree(queue);
858 err_pm_runtime:
859 	pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
860 	return r;
861 }
862 
amdgpu_userq_input_args_validate(struct drm_device * dev,union drm_amdgpu_userq * args,struct drm_file * filp)863 static int amdgpu_userq_input_args_validate(struct drm_device *dev,
864 					union drm_amdgpu_userq *args,
865 					struct drm_file *filp)
866 {
867 	struct amdgpu_device *adev = drm_to_adev(dev);
868 
869 	switch (args->in.op) {
870 	case AMDGPU_USERQ_OP_CREATE:
871 		if (args->in.flags & ~(AMDGPU_USERQ_CREATE_FLAGS_QUEUE_PRIORITY_MASK |
872 				       AMDGPU_USERQ_CREATE_FLAGS_QUEUE_SECURE))
873 			return -EINVAL;
874 		/* Usermode queues are only supported for GFX IP as of now */
875 		if (args->in.ip_type != AMDGPU_HW_IP_GFX &&
876 		    args->in.ip_type != AMDGPU_HW_IP_DMA &&
877 		    args->in.ip_type != AMDGPU_HW_IP_COMPUTE) {
878 			drm_file_err(filp, "Usermode queue doesn't support IP type %u\n",
879 				     args->in.ip_type);
880 			return -EINVAL;
881 		}
882 
883 		if ((args->in.flags & AMDGPU_USERQ_CREATE_FLAGS_QUEUE_SECURE) &&
884 		    (args->in.ip_type != AMDGPU_HW_IP_GFX) &&
885 		    (args->in.ip_type != AMDGPU_HW_IP_COMPUTE) &&
886 		    !amdgpu_is_tmz(adev)) {
887 			drm_file_err(filp, "Secure only supported on GFX/Compute queues\n");
888 			return -EINVAL;
889 		}
890 
891 		if (args->in.queue_va == AMDGPU_BO_INVALID_OFFSET ||
892 		    args->in.queue_va == 0 ||
893 		    args->in.queue_size == 0) {
894 			drm_file_err(filp, "invalidate userq queue va or size\n");
895 			return -EINVAL;
896 		}
897 
898 		if (!is_power_of_2(args->in.queue_size)) {
899 			drm_file_err(filp, "Queue size must be a power of 2\n");
900 			return -EINVAL;
901 		}
902 
903 		if (args->in.queue_size < AMDGPU_GPU_PAGE_SIZE) {
904 			drm_file_err(filp, "Queue size smaller than AMDGPU_GPU_PAGE_SIZE\n");
905 			return -EINVAL;
906 		}
907 
908 		if (!args->in.wptr_va || !args->in.rptr_va) {
909 			drm_file_err(filp, "invalidate userq queue rptr or wptr\n");
910 			return -EINVAL;
911 		}
912 		break;
913 	case AMDGPU_USERQ_OP_FREE:
914 		if (args->in.ip_type ||
915 		    args->in.doorbell_handle ||
916 		    args->in.doorbell_offset ||
917 		    args->in.flags ||
918 		    args->in.queue_va ||
919 		    args->in.queue_size ||
920 		    args->in.rptr_va ||
921 		    args->in.wptr_va ||
922 		    args->in.mqd ||
923 		    args->in.mqd_size)
924 			return -EINVAL;
925 		break;
926 	default:
927 		return -EINVAL;
928 	}
929 
930 	return 0;
931 }
932 
amdgpu_userq_enabled(struct drm_device * dev)933 bool amdgpu_userq_enabled(struct drm_device *dev)
934 {
935 	struct amdgpu_device *adev = drm_to_adev(dev);
936 	int i;
937 
938 	for (i = 0; i < AMDGPU_HW_IP_NUM; i++) {
939 		if (adev->userq_funcs[i])
940 			return true;
941 	}
942 
943 	return false;
944 }
945 
amdgpu_userq_ioctl(struct drm_device * dev,void * data,struct drm_file * filp)946 int amdgpu_userq_ioctl(struct drm_device *dev, void *data,
947 		       struct drm_file *filp)
948 {
949 	union drm_amdgpu_userq *args = data;
950 	struct amdgpu_fpriv *fpriv = filp->driver_priv;
951 	struct amdgpu_usermode_queue *queue;
952 	int r = 0;
953 
954 	if (!amdgpu_userq_enabled(dev))
955 		return -ENOTSUPP;
956 
957 	if (amdgpu_userq_input_args_validate(dev, args, filp) < 0)
958 		return -EINVAL;
959 
960 	switch (args->in.op) {
961 	case AMDGPU_USERQ_OP_CREATE:
962 		r = amdgpu_userq_create(filp, args);
963 		if (r)
964 			drm_file_err(filp, "Failed to create usermode queue\n");
965 		break;
966 
967 	case AMDGPU_USERQ_OP_FREE: {
968 		xa_lock(&fpriv->userq_mgr.userq_xa);
969 		queue = __xa_erase(&fpriv->userq_mgr.userq_xa, args->in.queue_id);
970 		xa_unlock(&fpriv->userq_mgr.userq_xa);
971 		if (!queue)
972 			return -ENOENT;
973 
974 		amdgpu_userq_put(queue);
975 		break;
976 	}
977 
978 	default:
979 		drm_dbg_driver(dev, "Invalid user queue op specified: %d\n", args->in.op);
980 		return -EINVAL;
981 	}
982 
983 	return r;
984 }
985 
986 static int
amdgpu_userq_restore_all(struct amdgpu_userq_mgr * uq_mgr)987 amdgpu_userq_restore_all(struct amdgpu_userq_mgr *uq_mgr)
988 {
989 	struct amdgpu_fpriv *fpriv = uq_mgr_to_fpriv(uq_mgr);
990 	struct amdgpu_vm *vm = &fpriv->vm;
991 	struct amdgpu_usermode_queue *queue;
992 	unsigned long queue_id;
993 	int ret = 0, r;
994 
995 
996 	if (amdgpu_bo_reserve(vm->root.bo, false))
997 		return false;
998 
999 	mutex_lock(&uq_mgr->userq_mutex);
1000 	/* Resume all the queues for this process */
1001 	xa_for_each(&uq_mgr->userq_xa, queue_id, queue) {
1002 
1003 		if (!amdgpu_userq_buffer_vas_mapped(queue)) {
1004 			drm_file_err(uq_mgr->file,
1005 				     "trying restore queue without va mapping\n");
1006 			queue->state = AMDGPU_USERQ_STATE_INVALID_VA;
1007 			continue;
1008 		}
1009 
1010 		r = amdgpu_userq_restore_helper(queue);
1011 		if (r)
1012 			ret = r;
1013 
1014 	}
1015 	mutex_unlock(&uq_mgr->userq_mutex);
1016 	amdgpu_bo_unreserve(vm->root.bo);
1017 
1018 	if (ret)
1019 		drm_file_err(uq_mgr->file,
1020 			     "Failed to map all the queues, restore failed ret=%d\n", ret);
1021 	return ret;
1022 }
1023 
amdgpu_userq_validate_vm(void * param,struct amdgpu_bo * bo)1024 static int amdgpu_userq_validate_vm(void *param, struct amdgpu_bo *bo)
1025 {
1026 	struct ttm_operation_ctx ctx = { false, false };
1027 
1028 	amdgpu_bo_placement_from_domain(bo, bo->allowed_domains);
1029 	return ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
1030 }
1031 
1032 /* Handle all BOs on the invalidated list, validate them and update the PTs */
1033 static int
amdgpu_userq_bo_validate(struct amdgpu_device * adev,struct drm_exec * exec,struct amdgpu_vm * vm)1034 amdgpu_userq_bo_validate(struct amdgpu_device *adev, struct drm_exec *exec,
1035 			 struct amdgpu_vm *vm)
1036 {
1037 	struct ttm_operation_ctx ctx = { false, false };
1038 	struct amdgpu_bo_va *bo_va;
1039 	struct amdgpu_bo *bo;
1040 	int ret;
1041 
1042 	spin_lock(&vm->status_lock);
1043 	while (!list_empty(&vm->invalidated)) {
1044 		bo_va = list_first_entry(&vm->invalidated,
1045 					 struct amdgpu_bo_va,
1046 					 base.vm_status);
1047 		spin_unlock(&vm->status_lock);
1048 
1049 		bo = bo_va->base.bo;
1050 		ret = drm_exec_prepare_obj(exec, &bo->tbo.base, 2);
1051 		if (unlikely(ret))
1052 			return ret;
1053 
1054 		amdgpu_bo_placement_from_domain(bo, bo->allowed_domains);
1055 		ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
1056 		if (ret)
1057 			return ret;
1058 
1059 		/* This moves the bo_va to the done list */
1060 		ret = amdgpu_vm_bo_update(adev, bo_va, false);
1061 		if (ret)
1062 			return ret;
1063 
1064 		spin_lock(&vm->status_lock);
1065 	}
1066 	spin_unlock(&vm->status_lock);
1067 
1068 	return 0;
1069 }
1070 
1071 /* Make sure the whole VM is ready to be used */
1072 static int
amdgpu_userq_vm_validate(struct amdgpu_userq_mgr * uq_mgr)1073 amdgpu_userq_vm_validate(struct amdgpu_userq_mgr *uq_mgr)
1074 {
1075 	struct amdgpu_fpriv *fpriv = uq_mgr_to_fpriv(uq_mgr);
1076 	bool invalidated = false, new_addition = false;
1077 	struct ttm_operation_ctx ctx = { true, false };
1078 	struct amdgpu_device *adev = uq_mgr->adev;
1079 	struct amdgpu_hmm_range *range;
1080 	struct amdgpu_vm *vm = &fpriv->vm;
1081 	unsigned long key, tmp_key;
1082 	struct amdgpu_bo_va *bo_va;
1083 	struct amdgpu_bo *bo;
1084 	struct drm_exec exec;
1085 	struct xarray xa;
1086 	int ret;
1087 
1088 	xa_init(&xa);
1089 
1090 retry_lock:
1091 	drm_exec_init(&exec, DRM_EXEC_IGNORE_DUPLICATES, 0);
1092 	drm_exec_until_all_locked(&exec) {
1093 		ret = amdgpu_vm_lock_pd(vm, &exec, 1);
1094 		drm_exec_retry_on_contention(&exec);
1095 		if (unlikely(ret))
1096 			goto unlock_all;
1097 
1098 		ret = amdgpu_vm_lock_done_list(vm, &exec, 1);
1099 		drm_exec_retry_on_contention(&exec);
1100 		if (unlikely(ret))
1101 			goto unlock_all;
1102 
1103 		/* This validates PDs, PTs and per VM BOs */
1104 		ret = amdgpu_vm_validate(adev, vm, NULL,
1105 					 amdgpu_userq_validate_vm,
1106 					 NULL);
1107 		if (unlikely(ret))
1108 			goto unlock_all;
1109 
1110 		/* This locks and validates the remaining evicted BOs */
1111 		ret = amdgpu_userq_bo_validate(adev, &exec, vm);
1112 		drm_exec_retry_on_contention(&exec);
1113 		if (unlikely(ret))
1114 			goto unlock_all;
1115 	}
1116 
1117 	if (invalidated) {
1118 		xa_for_each(&xa, tmp_key, range) {
1119 			bo = range->bo;
1120 			amdgpu_bo_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_CPU);
1121 			ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
1122 			if (ret)
1123 				goto unlock_all;
1124 
1125 			amdgpu_ttm_tt_set_user_pages(bo->tbo.ttm, range);
1126 
1127 			amdgpu_bo_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_GTT);
1128 			ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
1129 			if (ret)
1130 				goto unlock_all;
1131 		}
1132 		invalidated = false;
1133 	}
1134 
1135 	ret = amdgpu_vm_handle_moved(adev, vm, NULL);
1136 	if (ret)
1137 		goto unlock_all;
1138 
1139 	key = 0;
1140 	/* Validate User Ptr BOs */
1141 	list_for_each_entry(bo_va, &vm->done, base.vm_status) {
1142 		bo = bo_va->base.bo;
1143 		if (!bo)
1144 			continue;
1145 
1146 		if (!amdgpu_ttm_tt_is_userptr(bo->tbo.ttm))
1147 			continue;
1148 
1149 		range = xa_load(&xa, key);
1150 		if (range && range->bo != bo) {
1151 			xa_erase(&xa, key);
1152 			amdgpu_hmm_range_free(range);
1153 			range = NULL;
1154 		}
1155 
1156 		if (!range) {
1157 			range = amdgpu_hmm_range_alloc(bo);
1158 			if (!range) {
1159 				ret = -ENOMEM;
1160 				goto unlock_all;
1161 			}
1162 
1163 			xa_store(&xa, key, range, GFP_KERNEL);
1164 			new_addition = true;
1165 		}
1166 		key++;
1167 	}
1168 
1169 	if (new_addition) {
1170 		drm_exec_fini(&exec);
1171 		xa_for_each(&xa, tmp_key, range) {
1172 			if (!range)
1173 				continue;
1174 			bo = range->bo;
1175 			ret = amdgpu_ttm_tt_get_user_pages(bo, range);
1176 			if (ret)
1177 				goto free_ranges;
1178 		}
1179 
1180 		invalidated = true;
1181 		new_addition = false;
1182 		goto retry_lock;
1183 	}
1184 
1185 	ret = amdgpu_vm_update_pdes(adev, vm, false);
1186 	if (ret)
1187 		goto unlock_all;
1188 
1189 	/*
1190 	 * We need to wait for all VM updates to finish before restarting the
1191 	 * queues. Using the done list like that is now ok since everything is
1192 	 * locked in place.
1193 	 */
1194 	list_for_each_entry(bo_va, &vm->done, base.vm_status)
1195 		dma_fence_wait(bo_va->last_pt_update, false);
1196 	dma_fence_wait(vm->last_update, false);
1197 
1198 	ret = amdgpu_evf_mgr_rearm(&fpriv->evf_mgr, &exec);
1199 	if (ret)
1200 		drm_file_err(uq_mgr->file, "Failed to replace eviction fence\n");
1201 
1202 unlock_all:
1203 	drm_exec_fini(&exec);
1204 free_ranges:
1205 	xa_for_each(&xa, tmp_key, range) {
1206 		if (!range)
1207 			continue;
1208 		bo = range->bo;
1209 		amdgpu_hmm_range_free(range);
1210 	}
1211 	xa_destroy(&xa);
1212 	return ret;
1213 }
1214 
amdgpu_userq_restore_worker(struct work_struct * work)1215 static void amdgpu_userq_restore_worker(struct work_struct *work)
1216 {
1217 	struct amdgpu_userq_mgr *uq_mgr = work_to_uq_mgr(work, resume_work.work);
1218 	struct amdgpu_fpriv *fpriv = uq_mgr_to_fpriv(uq_mgr);
1219 	struct dma_fence *ev_fence;
1220 	int ret;
1221 
1222 	ev_fence = amdgpu_evf_mgr_get_fence(&fpriv->evf_mgr);
1223 	if (!dma_fence_is_signaled(ev_fence))
1224 		goto put_fence;
1225 
1226 	ret = amdgpu_userq_vm_validate(uq_mgr);
1227 	if (ret) {
1228 		drm_file_err(uq_mgr->file, "Failed to validate BOs to restore ret=%d\n", ret);
1229 		goto put_fence;
1230 	}
1231 
1232 	amdgpu_userq_restore_all(uq_mgr);
1233 
1234 put_fence:
1235 	dma_fence_put(ev_fence);
1236 }
1237 
1238 static int
amdgpu_userq_evict_all(struct amdgpu_userq_mgr * uq_mgr)1239 amdgpu_userq_evict_all(struct amdgpu_userq_mgr *uq_mgr)
1240 {
1241 	struct amdgpu_usermode_queue *queue;
1242 	unsigned long queue_id;
1243 	int ret = 0, r;
1244 
1245 	/* Try to unmap all the queues in this process ctx */
1246 	xa_for_each(&uq_mgr->userq_xa, queue_id, queue) {
1247 		r = amdgpu_userq_preempt_helper(queue);
1248 		if (r)
1249 			ret = r;
1250 	}
1251 
1252 	if (ret) {
1253 		drm_file_err(uq_mgr->file,
1254 			     "Couldn't unmap all the queues, eviction failed ret=%d\n", ret);
1255 		amdgpu_userq_detect_and_reset_queues(uq_mgr);
1256 	}
1257 	return ret;
1258 }
1259 
amdgpu_userq_reset_work(struct work_struct * work)1260 void amdgpu_userq_reset_work(struct work_struct *work)
1261 {
1262 	struct amdgpu_device *adev = container_of(work, struct amdgpu_device,
1263 						  userq_reset_work);
1264 	struct amdgpu_reset_context reset_context;
1265 
1266 	memset(&reset_context, 0, sizeof(reset_context));
1267 
1268 	reset_context.method = AMD_RESET_METHOD_NONE;
1269 	reset_context.reset_req_dev = adev;
1270 	reset_context.src = AMDGPU_RESET_SRC_USERQ;
1271 	set_bit(AMDGPU_NEED_FULL_RESET, &reset_context.flags);
1272 	/*set_bit(AMDGPU_SKIP_COREDUMP, &reset_context.flags);*/
1273 
1274 	amdgpu_device_gpu_recover(adev, NULL, &reset_context);
1275 }
1276 
1277 static void
amdgpu_userq_wait_for_signal(struct amdgpu_userq_mgr * uq_mgr)1278 amdgpu_userq_wait_for_signal(struct amdgpu_userq_mgr *uq_mgr)
1279 {
1280 	struct amdgpu_usermode_queue *queue;
1281 	unsigned long queue_id;
1282 
1283 	xa_for_each(&uq_mgr->userq_xa, queue_id, queue) {
1284 		struct dma_fence *f = queue->last_fence;
1285 
1286 		if (!f)
1287 			continue;
1288 
1289 		dma_fence_wait(f, false);
1290 	}
1291 }
1292 
1293 void
amdgpu_userq_evict(struct amdgpu_userq_mgr * uq_mgr)1294 amdgpu_userq_evict(struct amdgpu_userq_mgr *uq_mgr)
1295 {
1296 	/* Wait for any pending userqueue fence work to finish */
1297 	amdgpu_userq_wait_for_signal(uq_mgr);
1298 	amdgpu_userq_evict_all(uq_mgr);
1299 }
1300 
amdgpu_userq_mgr_init(struct amdgpu_userq_mgr * userq_mgr,struct drm_file * file_priv,struct amdgpu_device * adev)1301 int amdgpu_userq_mgr_init(struct amdgpu_userq_mgr *userq_mgr, struct drm_file *file_priv,
1302 			  struct amdgpu_device *adev)
1303 {
1304 	mutex_init(&userq_mgr->userq_mutex);
1305 	xa_init_flags(&userq_mgr->userq_xa, XA_FLAGS_ALLOC);
1306 	userq_mgr->adev = adev;
1307 	userq_mgr->file = file_priv;
1308 
1309 	INIT_DELAYED_WORK(&userq_mgr->resume_work, amdgpu_userq_restore_worker);
1310 	return 0;
1311 }
1312 
amdgpu_userq_mgr_cancel_resume(struct amdgpu_userq_mgr * userq_mgr)1313 void amdgpu_userq_mgr_cancel_resume(struct amdgpu_userq_mgr *userq_mgr)
1314 {
1315 	cancel_delayed_work_sync(&userq_mgr->resume_work);
1316 }
1317 
amdgpu_userq_mgr_fini(struct amdgpu_userq_mgr * userq_mgr)1318 void amdgpu_userq_mgr_fini(struct amdgpu_userq_mgr *userq_mgr)
1319 {
1320 	struct amdgpu_usermode_queue *queue;
1321 	unsigned long queue_id = 0;
1322 
1323 	for (;;) {
1324 		xa_lock(&userq_mgr->userq_xa);
1325 		queue = xa_find(&userq_mgr->userq_xa, &queue_id, ULONG_MAX,
1326 				XA_PRESENT);
1327 		if (queue)
1328 			__xa_erase(&userq_mgr->userq_xa, queue_id);
1329 		xa_unlock(&userq_mgr->userq_xa);
1330 
1331 		if (!queue)
1332 			break;
1333 
1334 		amdgpu_userq_put(queue);
1335 	}
1336 
1337 	xa_destroy(&userq_mgr->userq_xa);
1338 	mutex_destroy(&userq_mgr->userq_mutex);
1339 }
1340 
amdgpu_userq_suspend(struct amdgpu_device * adev)1341 int amdgpu_userq_suspend(struct amdgpu_device *adev)
1342 {
1343 	u32 ip_mask = amdgpu_userq_get_supported_ip_mask(adev);
1344 	struct amdgpu_usermode_queue *queue;
1345 	struct amdgpu_userq_mgr *uqm;
1346 	unsigned long queue_id;
1347 	int r;
1348 
1349 	if (!ip_mask)
1350 		return 0;
1351 
1352 	xa_for_each(&adev->userq_doorbell_xa, queue_id, queue) {
1353 		uqm = queue->userq_mgr;
1354 		cancel_delayed_work_sync(&uqm->resume_work);
1355 		guard(mutex)(&uqm->userq_mutex);
1356 		if (adev->in_s0ix)
1357 			r = amdgpu_userq_preempt_helper(queue);
1358 		else
1359 			r = amdgpu_userq_unmap_helper(queue);
1360 		if (r)
1361 			return r;
1362 	}
1363 	return 0;
1364 }
1365 
amdgpu_userq_resume(struct amdgpu_device * adev)1366 int amdgpu_userq_resume(struct amdgpu_device *adev)
1367 {
1368 	u32 ip_mask = amdgpu_userq_get_supported_ip_mask(adev);
1369 	struct amdgpu_usermode_queue *queue;
1370 	struct amdgpu_userq_mgr *uqm;
1371 	unsigned long queue_id;
1372 	int r;
1373 
1374 	if (!ip_mask)
1375 		return 0;
1376 
1377 	xa_for_each(&adev->userq_doorbell_xa, queue_id, queue) {
1378 		uqm = queue->userq_mgr;
1379 		guard(mutex)(&uqm->userq_mutex);
1380 		if (adev->in_s0ix)
1381 			r = amdgpu_userq_restore_helper(queue);
1382 		else
1383 			r = amdgpu_userq_map_helper(queue);
1384 		if (r)
1385 			return r;
1386 	}
1387 
1388 	return 0;
1389 }
1390 
amdgpu_userq_stop_sched_for_enforce_isolation(struct amdgpu_device * adev,u32 idx)1391 int amdgpu_userq_stop_sched_for_enforce_isolation(struct amdgpu_device *adev,
1392 						  u32 idx)
1393 {
1394 	u32 ip_mask = amdgpu_userq_get_supported_ip_mask(adev);
1395 	struct amdgpu_usermode_queue *queue;
1396 	struct amdgpu_userq_mgr *uqm;
1397 	unsigned long queue_id;
1398 	int ret = 0, r;
1399 
1400 	/* only need to stop gfx/compute */
1401 	if (!(ip_mask & ((1 << AMDGPU_HW_IP_GFX) | (1 << AMDGPU_HW_IP_COMPUTE))))
1402 		return 0;
1403 
1404 	if (adev->userq_halt_for_enforce_isolation)
1405 		dev_warn(adev->dev, "userq scheduling already stopped!\n");
1406 	adev->userq_halt_for_enforce_isolation = true;
1407 	xa_for_each(&adev->userq_doorbell_xa, queue_id, queue) {
1408 		uqm = queue->userq_mgr;
1409 		cancel_delayed_work_sync(&uqm->resume_work);
1410 		mutex_lock(&uqm->userq_mutex);
1411 		if (((queue->queue_type == AMDGPU_HW_IP_GFX) ||
1412 		     (queue->queue_type == AMDGPU_HW_IP_COMPUTE)) &&
1413 		    (queue->xcp_id == idx)) {
1414 			r = amdgpu_userq_preempt_helper(queue);
1415 			if (r)
1416 				ret = r;
1417 		}
1418 		mutex_unlock(&uqm->userq_mutex);
1419 	}
1420 
1421 	return ret;
1422 }
1423 
amdgpu_userq_start_sched_for_enforce_isolation(struct amdgpu_device * adev,u32 idx)1424 int amdgpu_userq_start_sched_for_enforce_isolation(struct amdgpu_device *adev,
1425 						   u32 idx)
1426 {
1427 	u32 ip_mask = amdgpu_userq_get_supported_ip_mask(adev);
1428 	struct amdgpu_usermode_queue *queue;
1429 	struct amdgpu_userq_mgr *uqm;
1430 	unsigned long queue_id;
1431 	int ret = 0, r;
1432 
1433 	/* only need to stop gfx/compute */
1434 	if (!(ip_mask & ((1 << AMDGPU_HW_IP_GFX) | (1 << AMDGPU_HW_IP_COMPUTE))))
1435 		return 0;
1436 
1437 	if (!adev->userq_halt_for_enforce_isolation)
1438 		dev_warn(adev->dev, "userq scheduling already started!\n");
1439 
1440 	adev->userq_halt_for_enforce_isolation = false;
1441 
1442 	xa_for_each(&adev->userq_doorbell_xa, queue_id, queue) {
1443 		uqm = queue->userq_mgr;
1444 		mutex_lock(&uqm->userq_mutex);
1445 		if (((queue->queue_type == AMDGPU_HW_IP_GFX) ||
1446 		     (queue->queue_type == AMDGPU_HW_IP_COMPUTE)) &&
1447 		    (queue->xcp_id == idx)) {
1448 			r = amdgpu_userq_restore_helper(queue);
1449 			if (r)
1450 				ret = r;
1451 		}
1452 		mutex_unlock(&uqm->userq_mutex);
1453 	}
1454 
1455 	return ret;
1456 }
1457 
amdgpu_userq_gem_va_unmap_validate(struct amdgpu_device * adev,struct amdgpu_bo_va_mapping * mapping,uint64_t saddr)1458 void amdgpu_userq_gem_va_unmap_validate(struct amdgpu_device *adev,
1459 					struct amdgpu_bo_va_mapping *mapping,
1460 					uint64_t saddr)
1461 {
1462 	u32 ip_mask = amdgpu_userq_get_supported_ip_mask(adev);
1463 	struct amdgpu_bo_va *bo_va = mapping->bo_va;
1464 	struct dma_resv *resv = bo_va->base.bo->tbo.base.resv;
1465 
1466 	if (!ip_mask)
1467 		return;
1468 
1469 	dev_warn_once(adev->dev, "now unmapping a vital queue va:%llx\n", saddr);
1470 	/**
1471 	 * The userq VA mapping reservation should include the eviction fence,
1472 	 * if the eviction fence can't signal successfully during unmapping,
1473 	 * then driver will warn to flag this improper unmap of the userq VA.
1474 	 * Note: The eviction fence may be attached to different BOs, and this
1475 	 * unmap is only for one kind of userq VAs, so at this point suppose
1476 	 * the eviction fence is always unsignaled.
1477 	 */
1478 	dma_resv_wait_timeout(resv, DMA_RESV_USAGE_BOOKKEEP,
1479 			      false, MAX_SCHEDULE_TIMEOUT);
1480 }
1481 
amdgpu_userq_pre_reset(struct amdgpu_device * adev)1482 void amdgpu_userq_pre_reset(struct amdgpu_device *adev)
1483 {
1484 	const struct amdgpu_userq_funcs *userq_funcs;
1485 	struct amdgpu_usermode_queue *queue;
1486 	unsigned long queue_id;
1487 
1488 	/* TODO: We probably need a new lock for the queue state */
1489 	xa_for_each(&adev->userq_doorbell_xa, queue_id, queue) {
1490 		if (queue->state != AMDGPU_USERQ_STATE_MAPPED)
1491 			continue;
1492 
1493 		userq_funcs = adev->userq_funcs[queue->queue_type];
1494 		userq_funcs->unmap(queue);
1495 		/* just mark all queues as hung at this point.
1496 		 * if unmap succeeds, we could map again
1497 		 * in amdgpu_userq_post_reset() if vram is not lost
1498 		 */
1499 		queue->state = AMDGPU_USERQ_STATE_HUNG;
1500 		amdgpu_userq_fence_driver_force_completion(queue);
1501 	}
1502 }
1503 
amdgpu_userq_post_reset(struct amdgpu_device * adev,bool vram_lost)1504 int amdgpu_userq_post_reset(struct amdgpu_device *adev, bool vram_lost)
1505 {
1506 	/* if any queue state is AMDGPU_USERQ_STATE_UNMAPPED
1507 	 * at this point, we should be able to map it again
1508 	 * and continue if vram is not lost.
1509 	 */
1510 	struct amdgpu_usermode_queue *queue;
1511 	const struct amdgpu_userq_funcs *userq_funcs;
1512 	unsigned long queue_id;
1513 	int r = 0;
1514 
1515 	xa_for_each(&adev->userq_doorbell_xa, queue_id, queue) {
1516 		if (queue->state == AMDGPU_USERQ_STATE_HUNG && !vram_lost) {
1517 			userq_funcs = adev->userq_funcs[queue->queue_type];
1518 			/* Re-map queue */
1519 			r = userq_funcs->map(queue);
1520 			if (r) {
1521 				dev_err(adev->dev, "Failed to remap queue %ld\n", queue_id);
1522 				continue;
1523 			}
1524 			queue->state = AMDGPU_USERQ_STATE_MAPPED;
1525 		}
1526 	}
1527 
1528 	return r;
1529 }
1530