xref: /linux/drivers/gpu/drm/amd/amdgpu/amdgpu_userq.c (revision 92c4c9fdc838d3b41a996bb700ea64b9e78fc7ea)
1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright 2023 Advanced Micro Devices, Inc.
4  *
5  * Permission is hereby granted, free of charge, to any person obtaining a
6  * copy of this software and associated documentation files (the "Software"),
7  * to deal in the Software without restriction, including without limitation
8  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9  * and/or sell copies of the Software, and to permit persons to whom the
10  * Software is furnished to do so, subject to the following conditions:
11  *
12  * The above copyright notice and this permission notice shall be included in
13  * all copies or substantial portions of the Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
19  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
20  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
21  * OTHER DEALINGS IN THE SOFTWARE.
22  *
23  */
24 
25 #include <drm/drm_auth.h>
26 #include <drm/drm_exec.h>
27 #include <linux/pm_runtime.h>
28 #include <drm/drm_drv.h>
29 
30 #include "amdgpu.h"
31 #include "amdgpu_reset.h"
32 #include "amdgpu_vm.h"
33 #include "amdgpu_userq.h"
34 #include "amdgpu_hmm.h"
35 #include "amdgpu_userq_fence.h"
36 
amdgpu_userq_get_supported_ip_mask(struct amdgpu_device * adev)37 u32 amdgpu_userq_get_supported_ip_mask(struct amdgpu_device *adev)
38 {
39 	int i;
40 	u32 userq_ip_mask = 0;
41 
42 	for (i = 0; i < AMDGPU_HW_IP_NUM; i++) {
43 		if (adev->userq_funcs[i])
44 			userq_ip_mask |= (1 << i);
45 	}
46 
47 	return userq_ip_mask;
48 }
49 
amdgpu_userq_is_reset_type_supported(struct amdgpu_device * adev,enum amdgpu_ring_type ring_type,int reset_type)50 static bool amdgpu_userq_is_reset_type_supported(struct amdgpu_device *adev,
51 				enum amdgpu_ring_type ring_type, int reset_type)
52 {
53 
54 	if (ring_type < 0 || ring_type >= AMDGPU_RING_TYPE_MAX)
55 		return false;
56 
57 	switch (ring_type) {
58 	case AMDGPU_RING_TYPE_GFX:
59 		if (adev->gfx.gfx_supported_reset & reset_type)
60 			return true;
61 		break;
62 	case AMDGPU_RING_TYPE_COMPUTE:
63 		if (adev->gfx.compute_supported_reset & reset_type)
64 			return true;
65 		break;
66 	case AMDGPU_RING_TYPE_SDMA:
67 		if (adev->sdma.supported_reset & reset_type)
68 			return true;
69 		break;
70 	case AMDGPU_RING_TYPE_VCN_DEC:
71 	case AMDGPU_RING_TYPE_VCN_ENC:
72 		if (adev->vcn.supported_reset & reset_type)
73 			return true;
74 		break;
75 	case AMDGPU_RING_TYPE_VCN_JPEG:
76 		if (adev->jpeg.supported_reset & reset_type)
77 			return true;
78 		break;
79 	default:
80 		break;
81 	}
82 	return false;
83 }
84 
amdgpu_userq_gpu_reset(struct amdgpu_device * adev)85 static void amdgpu_userq_gpu_reset(struct amdgpu_device *adev)
86 {
87 	if (amdgpu_device_should_recover_gpu(adev)) {
88 		amdgpu_reset_domain_schedule(adev->reset_domain,
89 					     &adev->userq_reset_work);
90 		/* Wait for the reset job to complete */
91 		flush_work(&adev->userq_reset_work);
92 	}
93 }
94 
95 static int
amdgpu_userq_detect_and_reset_queues(struct amdgpu_userq_mgr * uq_mgr)96 amdgpu_userq_detect_and_reset_queues(struct amdgpu_userq_mgr *uq_mgr)
97 {
98 	struct amdgpu_device *adev = uq_mgr->adev;
99 	const int queue_types[] = {
100 		AMDGPU_RING_TYPE_COMPUTE,
101 		AMDGPU_RING_TYPE_GFX,
102 		AMDGPU_RING_TYPE_SDMA
103 	};
104 	const int num_queue_types = ARRAY_SIZE(queue_types);
105 	bool gpu_reset = false;
106 	int r = 0;
107 	int i;
108 
109 	/* Warning if current process mutex is not held */
110 	WARN_ON(!mutex_is_locked(&uq_mgr->userq_mutex));
111 
112 	if (unlikely(adev->debug_disable_gpu_ring_reset)) {
113 		dev_err(adev->dev, "userq reset disabled by debug mask\n");
114 		return 0;
115 	}
116 
117 	/*
118 	 * If GPU recovery feature is disabled system-wide,
119 	 * skip all reset detection logic
120 	 */
121 	if (!amdgpu_gpu_recovery)
122 		return 0;
123 
124 	/*
125 	 * Iterate through all queue types to detect and reset problematic queues
126 	 * Process each queue type in the defined order
127 	 */
128 	for (i = 0; i < num_queue_types; i++) {
129 		int ring_type = queue_types[i];
130 		const struct amdgpu_userq_funcs *funcs = adev->userq_funcs[ring_type];
131 
132 		if (!amdgpu_userq_is_reset_type_supported(adev, ring_type, AMDGPU_RESET_TYPE_PER_QUEUE))
133 				continue;
134 
135 		if (atomic_read(&uq_mgr->userq_count[ring_type]) > 0 &&
136 		    funcs && funcs->detect_and_reset) {
137 			r = funcs->detect_and_reset(adev, ring_type);
138 			if (r) {
139 				gpu_reset = true;
140 				break;
141 			}
142 		}
143 	}
144 
145 	if (gpu_reset)
146 		amdgpu_userq_gpu_reset(adev);
147 
148 	return r;
149 }
150 
amdgpu_userq_hang_detect_work(struct work_struct * work)151 static void amdgpu_userq_hang_detect_work(struct work_struct *work)
152 {
153 	struct amdgpu_usermode_queue *queue = container_of(work,
154 							  struct amdgpu_usermode_queue,
155 							  hang_detect_work.work);
156 	struct dma_fence *fence;
157 	struct amdgpu_userq_mgr *uq_mgr;
158 
159 	if (!queue->userq_mgr)
160 		return;
161 
162 	uq_mgr = queue->userq_mgr;
163 	fence = READ_ONCE(queue->hang_detect_fence);
164 	/* Fence already signaled – no action needed */
165 	if (!fence || dma_fence_is_signaled(fence))
166 		return;
167 
168 	mutex_lock(&uq_mgr->userq_mutex);
169 	amdgpu_userq_detect_and_reset_queues(uq_mgr);
170 	mutex_unlock(&uq_mgr->userq_mutex);
171 }
172 
173 /*
174  * Start hang detection for a user queue fence. A delayed work will be scheduled
175  * to check if the fence is still pending after the timeout period.
176 */
amdgpu_userq_start_hang_detect_work(struct amdgpu_usermode_queue * queue)177 void amdgpu_userq_start_hang_detect_work(struct amdgpu_usermode_queue *queue)
178 {
179 	struct amdgpu_device *adev;
180 	unsigned long timeout_ms;
181 
182 	if (!queue || !queue->userq_mgr || !queue->userq_mgr->adev)
183 		return;
184 
185 	adev = queue->userq_mgr->adev;
186 	/* Determine timeout based on queue type */
187 	switch (queue->queue_type) {
188 	case AMDGPU_RING_TYPE_GFX:
189 		timeout_ms = adev->gfx_timeout;
190 		break;
191 	case AMDGPU_RING_TYPE_COMPUTE:
192 		timeout_ms = adev->compute_timeout;
193 		break;
194 	case AMDGPU_RING_TYPE_SDMA:
195 		timeout_ms = adev->sdma_timeout;
196 		break;
197 	default:
198 		timeout_ms = adev->gfx_timeout;
199 		break;
200 	}
201 
202 	/* Store the fence to monitor and schedule hang detection */
203 	WRITE_ONCE(queue->hang_detect_fence, queue->last_fence);
204 	schedule_delayed_work(&queue->hang_detect_work,
205 		     msecs_to_jiffies(timeout_ms));
206 }
207 
amdgpu_userq_init_hang_detect_work(struct amdgpu_usermode_queue * queue)208 static void amdgpu_userq_init_hang_detect_work(struct amdgpu_usermode_queue *queue)
209 {
210 	INIT_DELAYED_WORK(&queue->hang_detect_work, amdgpu_userq_hang_detect_work);
211 	queue->hang_detect_fence = NULL;
212 }
213 
amdgpu_userq_buffer_va_list_add(struct amdgpu_usermode_queue * queue,struct amdgpu_bo_va_mapping * va_map,u64 addr)214 static int amdgpu_userq_buffer_va_list_add(struct amdgpu_usermode_queue *queue,
215 					   struct amdgpu_bo_va_mapping *va_map, u64 addr)
216 {
217 	struct amdgpu_userq_va_cursor *va_cursor;
218 	struct userq_va_list;
219 
220 	va_cursor = kzalloc_obj(*va_cursor);
221 	if (!va_cursor)
222 		return -ENOMEM;
223 
224 	INIT_LIST_HEAD(&va_cursor->list);
225 	va_cursor->gpu_addr = addr;
226 	atomic_set(&va_map->bo_va->userq_va_mapped, 1);
227 	list_add(&va_cursor->list, &queue->userq_va_list);
228 
229 	return 0;
230 }
231 
amdgpu_userq_input_va_validate(struct amdgpu_device * adev,struct amdgpu_usermode_queue * queue,u64 addr,u64 expected_size)232 int amdgpu_userq_input_va_validate(struct amdgpu_device *adev,
233 				   struct amdgpu_usermode_queue *queue,
234 				   u64 addr, u64 expected_size)
235 {
236 	struct amdgpu_bo_va_mapping *va_map;
237 	struct amdgpu_vm *vm = queue->vm;
238 	u64 user_addr;
239 	u64 size;
240 	int r = 0;
241 
242 	/* Caller must hold vm->root.bo reservation */
243 	dma_resv_assert_held(queue->vm->root.bo->tbo.base.resv);
244 
245 	user_addr = (addr & AMDGPU_GMC_HOLE_MASK) >> AMDGPU_GPU_PAGE_SHIFT;
246 	size = expected_size >> AMDGPU_GPU_PAGE_SHIFT;
247 
248 	va_map = amdgpu_vm_bo_lookup_mapping(vm, user_addr);
249 	if (!va_map) {
250 		r = -EINVAL;
251 		goto out_err;
252 	}
253 	/* Only validate the userq whether resident in the VM mapping range */
254 	if (user_addr >= va_map->start  &&
255 	    va_map->last - user_addr + 1 >= size) {
256 		amdgpu_userq_buffer_va_list_add(queue, va_map, user_addr);
257 		return 0;
258 	}
259 
260 	r = -EINVAL;
261 out_err:
262 	return r;
263 }
264 
amdgpu_userq_buffer_va_mapped(struct amdgpu_vm * vm,u64 addr)265 static bool amdgpu_userq_buffer_va_mapped(struct amdgpu_vm *vm, u64 addr)
266 {
267 	struct amdgpu_bo_va_mapping *mapping;
268 	bool r;
269 
270 	dma_resv_assert_held(vm->root.bo->tbo.base.resv);
271 
272 	mapping = amdgpu_vm_bo_lookup_mapping(vm, addr);
273 	if (!IS_ERR_OR_NULL(mapping) && atomic_read(&mapping->bo_va->userq_va_mapped))
274 		r = true;
275 	else
276 		r = false;
277 
278 	return r;
279 }
280 
amdgpu_userq_buffer_vas_mapped(struct amdgpu_usermode_queue * queue)281 static bool amdgpu_userq_buffer_vas_mapped(struct amdgpu_usermode_queue *queue)
282 {
283 	struct amdgpu_userq_va_cursor *va_cursor, *tmp;
284 	int r = 0;
285 
286 	list_for_each_entry_safe(va_cursor, tmp, &queue->userq_va_list, list) {
287 		r += amdgpu_userq_buffer_va_mapped(queue->vm, va_cursor->gpu_addr);
288 		dev_dbg(queue->userq_mgr->adev->dev,
289 			"validate the userq mapping:%p va:%llx r:%d\n",
290 			queue, va_cursor->gpu_addr, r);
291 	}
292 
293 	if (r != 0)
294 		return true;
295 
296 	return false;
297 }
298 
amdgpu_userq_buffer_va_list_del(struct amdgpu_bo_va_mapping * mapping,struct amdgpu_userq_va_cursor * va_cursor)299 static void amdgpu_userq_buffer_va_list_del(struct amdgpu_bo_va_mapping *mapping,
300 					    struct amdgpu_userq_va_cursor *va_cursor)
301 {
302 	atomic_set(&mapping->bo_va->userq_va_mapped, 0);
303 	list_del(&va_cursor->list);
304 	kfree(va_cursor);
305 }
306 
amdgpu_userq_buffer_vas_list_cleanup(struct amdgpu_device * adev,struct amdgpu_usermode_queue * queue)307 static int amdgpu_userq_buffer_vas_list_cleanup(struct amdgpu_device *adev,
308 						struct amdgpu_usermode_queue *queue)
309 {
310 	struct amdgpu_userq_va_cursor *va_cursor, *tmp;
311 	struct amdgpu_bo_va_mapping *mapping;
312 
313 	/* Caller must hold vm->root.bo reservation */
314 	dma_resv_assert_held(queue->vm->root.bo->tbo.base.resv);
315 
316 	list_for_each_entry_safe(va_cursor, tmp, &queue->userq_va_list, list) {
317 		mapping = amdgpu_vm_bo_lookup_mapping(queue->vm, va_cursor->gpu_addr);
318 		if (!mapping) {
319 			return -EINVAL;
320 		}
321 		dev_dbg(adev->dev, "delete the userq:%p va:%llx\n",
322 			queue, va_cursor->gpu_addr);
323 		amdgpu_userq_buffer_va_list_del(mapping, va_cursor);
324 	}
325 
326 	return 0;
327 }
328 
amdgpu_userq_preempt_helper(struct amdgpu_usermode_queue * queue)329 static int amdgpu_userq_preempt_helper(struct amdgpu_usermode_queue *queue)
330 {
331 	struct amdgpu_userq_mgr *uq_mgr = queue->userq_mgr;
332 	struct amdgpu_device *adev = uq_mgr->adev;
333 	const struct amdgpu_userq_funcs *userq_funcs =
334 		adev->userq_funcs[queue->queue_type];
335 	bool found_hung_queue = false;
336 	int r = 0;
337 
338 	if (queue->state == AMDGPU_USERQ_STATE_MAPPED) {
339 		r = userq_funcs->preempt(queue);
340 		if (r) {
341 			queue->state = AMDGPU_USERQ_STATE_HUNG;
342 			found_hung_queue = true;
343 		} else {
344 			queue->state = AMDGPU_USERQ_STATE_PREEMPTED;
345 		}
346 	}
347 
348 	if (found_hung_queue)
349 		amdgpu_userq_detect_and_reset_queues(uq_mgr);
350 
351 	return r;
352 }
353 
amdgpu_userq_restore_helper(struct amdgpu_usermode_queue * queue)354 static int amdgpu_userq_restore_helper(struct amdgpu_usermode_queue *queue)
355 {
356 	struct amdgpu_userq_mgr *uq_mgr = queue->userq_mgr;
357 	struct amdgpu_device *adev = uq_mgr->adev;
358 	const struct amdgpu_userq_funcs *userq_funcs =
359 		adev->userq_funcs[queue->queue_type];
360 	int r = 0;
361 
362 	if (queue->state == AMDGPU_USERQ_STATE_PREEMPTED) {
363 		r = userq_funcs->restore(queue);
364 		if (r) {
365 			queue->state = AMDGPU_USERQ_STATE_HUNG;
366 		} else {
367 			queue->state = AMDGPU_USERQ_STATE_MAPPED;
368 		}
369 	}
370 
371 	return r;
372 }
373 
amdgpu_userq_unmap_helper(struct amdgpu_usermode_queue * queue)374 static int amdgpu_userq_unmap_helper(struct amdgpu_usermode_queue *queue)
375 {
376 	struct amdgpu_userq_mgr *uq_mgr = queue->userq_mgr;
377 	struct amdgpu_device *adev = uq_mgr->adev;
378 	const struct amdgpu_userq_funcs *userq_funcs =
379 		adev->userq_funcs[queue->queue_type];
380 	bool found_hung_queue = false;
381 	int r = 0;
382 
383 	if ((queue->state == AMDGPU_USERQ_STATE_MAPPED) ||
384 		(queue->state == AMDGPU_USERQ_STATE_PREEMPTED)) {
385 		r = userq_funcs->unmap(queue);
386 		if (r) {
387 			queue->state = AMDGPU_USERQ_STATE_HUNG;
388 			found_hung_queue = true;
389 		} else {
390 			queue->state = AMDGPU_USERQ_STATE_UNMAPPED;
391 		}
392 	}
393 
394 	if (found_hung_queue)
395 		amdgpu_userq_detect_and_reset_queues(uq_mgr);
396 
397 	return r;
398 }
399 
amdgpu_userq_map_helper(struct amdgpu_usermode_queue * queue)400 static int amdgpu_userq_map_helper(struct amdgpu_usermode_queue *queue)
401 {
402 	struct amdgpu_userq_mgr *uq_mgr = queue->userq_mgr;
403 	struct amdgpu_device *adev = uq_mgr->adev;
404 	const struct amdgpu_userq_funcs *userq_funcs =
405 		adev->userq_funcs[queue->queue_type];
406 	int r = 0;
407 
408 	if (queue->state == AMDGPU_USERQ_STATE_UNMAPPED) {
409 		r = userq_funcs->map(queue);
410 		if (r) {
411 			queue->state = AMDGPU_USERQ_STATE_HUNG;
412 			amdgpu_userq_detect_and_reset_queues(uq_mgr);
413 		} else {
414 			queue->state = AMDGPU_USERQ_STATE_MAPPED;
415 		}
416 	}
417 
418 	return r;
419 }
420 
amdgpu_userq_wait_for_last_fence(struct amdgpu_usermode_queue * queue)421 static void amdgpu_userq_wait_for_last_fence(struct amdgpu_usermode_queue *queue)
422 {
423 	struct dma_fence *f = queue->last_fence;
424 
425 	if (!f)
426 		return;
427 
428 	dma_fence_wait(f, false);
429 }
430 
amdgpu_userq_cleanup(struct amdgpu_usermode_queue * queue)431 static void amdgpu_userq_cleanup(struct amdgpu_usermode_queue *queue)
432 {
433 	struct amdgpu_userq_mgr *uq_mgr = queue->userq_mgr;
434 	struct amdgpu_device *adev = uq_mgr->adev;
435 	const struct amdgpu_userq_funcs *uq_funcs = adev->userq_funcs[queue->queue_type];
436 
437 	/* Wait for mode-1 reset to complete */
438 	down_read(&adev->reset_domain->sem);
439 
440 	uq_funcs->mqd_destroy(queue);
441 	/* Use interrupt-safe locking since IRQ handlers may access these XArrays */
442 	xa_erase_irq(&adev->userq_doorbell_xa, queue->doorbell_index);
443 	amdgpu_userq_fence_driver_free(queue);
444 	queue->fence_drv = NULL;
445 	queue->userq_mgr = NULL;
446 	list_del(&queue->userq_va_list);
447 
448 	up_read(&adev->reset_domain->sem);
449 }
450 
451 /**
452  * amdgpu_userq_ensure_ev_fence - ensure a valid, unsignaled eviction fence exists
453  * @uq_mgr: the usermode queue manager for this process
454  * @evf_mgr: the eviction fence manager to check and rearm
455  *
456  * Ensures that a valid and not yet signaled eviction fence is attached to the
457  * usermode queue before any queue operations proceed. If it is signalled, then
458  * rearm a new eviction fence.
459  */
460 void
amdgpu_userq_ensure_ev_fence(struct amdgpu_userq_mgr * uq_mgr,struct amdgpu_eviction_fence_mgr * evf_mgr)461 amdgpu_userq_ensure_ev_fence(struct amdgpu_userq_mgr *uq_mgr,
462 			     struct amdgpu_eviction_fence_mgr *evf_mgr)
463 {
464 	struct dma_fence *ev_fence;
465 
466 retry:
467 	/* Flush any pending resume work to create ev_fence */
468 	flush_delayed_work(&uq_mgr->resume_work);
469 
470 	mutex_lock(&uq_mgr->userq_mutex);
471 	ev_fence = amdgpu_evf_mgr_get_fence(evf_mgr);
472 	if (dma_fence_is_signaled(ev_fence)) {
473 		dma_fence_put(ev_fence);
474 		mutex_unlock(&uq_mgr->userq_mutex);
475 		/*
476 		 * Looks like there was no pending resume work,
477 		 * add one now to create a valid eviction fence
478 		 */
479 		schedule_delayed_work(&uq_mgr->resume_work, 0);
480 		goto retry;
481 	}
482 	dma_fence_put(ev_fence);
483 }
484 
amdgpu_userq_create_object(struct amdgpu_userq_mgr * uq_mgr,struct amdgpu_userq_obj * userq_obj,int size)485 int amdgpu_userq_create_object(struct amdgpu_userq_mgr *uq_mgr,
486 			       struct amdgpu_userq_obj *userq_obj,
487 			       int size)
488 {
489 	struct amdgpu_device *adev = uq_mgr->adev;
490 	struct amdgpu_bo_param bp;
491 	int r;
492 
493 	memset(&bp, 0, sizeof(bp));
494 	bp.byte_align = PAGE_SIZE;
495 	bp.domain = AMDGPU_GEM_DOMAIN_GTT;
496 	bp.flags = AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS |
497 		   AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
498 	bp.type = ttm_bo_type_kernel;
499 	bp.size = size;
500 	bp.resv = NULL;
501 	bp.bo_ptr_size = sizeof(struct amdgpu_bo);
502 
503 	r = amdgpu_bo_create(adev, &bp, &userq_obj->obj);
504 	if (r) {
505 		drm_file_err(uq_mgr->file, "Failed to allocate BO for userqueue (%d)", r);
506 		return r;
507 	}
508 
509 	r = amdgpu_bo_reserve(userq_obj->obj, true);
510 	if (r) {
511 		drm_file_err(uq_mgr->file, "Failed to reserve BO to map (%d)", r);
512 		goto free_obj;
513 	}
514 
515 	r = amdgpu_ttm_alloc_gart(&(userq_obj->obj)->tbo);
516 	if (r) {
517 		drm_file_err(uq_mgr->file, "Failed to alloc GART for userqueue object (%d)", r);
518 		goto unresv;
519 	}
520 
521 	r = amdgpu_bo_kmap(userq_obj->obj, &userq_obj->cpu_ptr);
522 	if (r) {
523 		drm_file_err(uq_mgr->file, "Failed to map BO for userqueue (%d)", r);
524 		goto unresv;
525 	}
526 
527 	userq_obj->gpu_addr = amdgpu_bo_gpu_offset(userq_obj->obj);
528 	amdgpu_bo_unreserve(userq_obj->obj);
529 	memset(userq_obj->cpu_ptr, 0, size);
530 	return 0;
531 
532 unresv:
533 	amdgpu_bo_unreserve(userq_obj->obj);
534 
535 free_obj:
536 	amdgpu_bo_unref(&userq_obj->obj);
537 	return r;
538 }
539 
amdgpu_userq_destroy_object(struct amdgpu_userq_mgr * uq_mgr,struct amdgpu_userq_obj * userq_obj)540 void amdgpu_userq_destroy_object(struct amdgpu_userq_mgr *uq_mgr,
541 				 struct amdgpu_userq_obj *userq_obj)
542 {
543 	amdgpu_bo_kunmap(userq_obj->obj);
544 	amdgpu_bo_unref(&userq_obj->obj);
545 }
546 
547 uint64_t
amdgpu_userq_get_doorbell_index(struct amdgpu_userq_mgr * uq_mgr,struct amdgpu_db_info * db_info,struct drm_file * filp)548 amdgpu_userq_get_doorbell_index(struct amdgpu_userq_mgr *uq_mgr,
549 				struct amdgpu_db_info *db_info,
550 				struct drm_file *filp)
551 {
552 	uint64_t index;
553 	struct drm_gem_object *gobj;
554 	struct amdgpu_userq_obj *db_obj = db_info->db_obj;
555 	int r, db_size;
556 
557 	gobj = drm_gem_object_lookup(filp, db_info->doorbell_handle);
558 	if (gobj == NULL) {
559 		drm_file_err(uq_mgr->file, "Can't find GEM object for doorbell\n");
560 		return -EINVAL;
561 	}
562 
563 	db_obj->obj = amdgpu_bo_ref(gem_to_amdgpu_bo(gobj));
564 	drm_gem_object_put(gobj);
565 
566 	r = amdgpu_bo_reserve(db_obj->obj, true);
567 	if (r) {
568 		drm_file_err(uq_mgr->file, "[Usermode queues] Failed to pin doorbell object\n");
569 		goto unref_bo;
570 	}
571 
572 	/* Pin the BO before generating the index, unpin in queue destroy */
573 	r = amdgpu_bo_pin(db_obj->obj, AMDGPU_GEM_DOMAIN_DOORBELL);
574 	if (r) {
575 		drm_file_err(uq_mgr->file, "[Usermode queues] Failed to pin doorbell object\n");
576 		goto unresv_bo;
577 	}
578 
579 	switch (db_info->queue_type) {
580 	case AMDGPU_HW_IP_GFX:
581 	case AMDGPU_HW_IP_COMPUTE:
582 	case AMDGPU_HW_IP_DMA:
583 		db_size = sizeof(u64);
584 		break;
585 	default:
586 		drm_file_err(uq_mgr->file, "[Usermode queues] IP %d not support\n",
587 			     db_info->queue_type);
588 		r = -EINVAL;
589 		goto unpin_bo;
590 	}
591 
592 	/* Validate doorbell_offset is within the doorbell BO */
593 	if ((u64)db_info->doorbell_offset * db_size + db_size >
594 	    amdgpu_bo_size(db_obj->obj)) {
595 		r = -EINVAL;
596 		goto unpin_bo;
597 	}
598 
599 	index = amdgpu_doorbell_index_on_bar(uq_mgr->adev, db_obj->obj,
600 					     db_info->doorbell_offset, db_size);
601 	drm_dbg_driver(adev_to_drm(uq_mgr->adev),
602 		       "[Usermode queues] doorbell index=%lld\n", index);
603 	amdgpu_bo_unreserve(db_obj->obj);
604 	return index;
605 
606 unpin_bo:
607 	amdgpu_bo_unpin(db_obj->obj);
608 unresv_bo:
609 	amdgpu_bo_unreserve(db_obj->obj);
610 unref_bo:
611 	amdgpu_bo_unref(&db_obj->obj);
612 	return r;
613 }
614 
615 static int
amdgpu_userq_destroy(struct amdgpu_userq_mgr * uq_mgr,struct amdgpu_usermode_queue * queue)616 amdgpu_userq_destroy(struct amdgpu_userq_mgr *uq_mgr, struct amdgpu_usermode_queue *queue)
617 {
618 	struct amdgpu_device *adev = uq_mgr->adev;
619 	struct amdgpu_fpriv *fpriv = uq_mgr_to_fpriv(uq_mgr);
620 	struct amdgpu_vm *vm = &fpriv->vm;
621 
622 	int r = 0;
623 
624 	cancel_delayed_work_sync(&uq_mgr->resume_work);
625 
626 	/* Cancel any pending hang detection work and cleanup */
627 	cancel_delayed_work_sync(&queue->hang_detect_work);
628 
629 	r = amdgpu_bo_reserve(vm->root.bo, false);
630 	if (r) {
631 		drm_file_err(uq_mgr->file, "Failed to reserve root bo during userqueue destroy\n");
632 		return r;
633 	}
634 	amdgpu_userq_buffer_vas_list_cleanup(adev, queue);
635 	amdgpu_bo_unreserve(vm->root.bo);
636 
637 	mutex_lock(&uq_mgr->userq_mutex);
638 	queue->hang_detect_fence = NULL;
639 	amdgpu_userq_wait_for_last_fence(queue);
640 
641 #if defined(CONFIG_DEBUG_FS)
642 	debugfs_remove_recursive(queue->debugfs_queue);
643 #endif
644 	amdgpu_userq_detect_and_reset_queues(uq_mgr);
645 	r = amdgpu_userq_unmap_helper(queue);
646 	/*TODO: It requires a reset for userq hw unmap error*/
647 	if (r) {
648 		drm_warn(adev_to_drm(uq_mgr->adev), "trying to destroy a HW mapping userq\n");
649 		queue->state = AMDGPU_USERQ_STATE_HUNG;
650 	}
651 
652 	atomic_dec(&uq_mgr->userq_count[queue->queue_type]);
653 	amdgpu_userq_cleanup(queue);
654 	mutex_unlock(&uq_mgr->userq_mutex);
655 
656 	amdgpu_bo_reserve(queue->db_obj.obj, true);
657 	amdgpu_bo_unpin(queue->db_obj.obj);
658 	amdgpu_bo_unreserve(queue->db_obj.obj);
659 	amdgpu_bo_unref(&queue->db_obj.obj);
660 
661 	amdgpu_bo_reserve(queue->wptr_obj.obj, true);
662 	amdgpu_bo_unpin(queue->wptr_obj.obj);
663 	amdgpu_bo_unreserve(queue->wptr_obj.obj);
664 	amdgpu_bo_unref(&queue->wptr_obj.obj);
665 	kfree(queue);
666 
667 	pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
668 
669 	return r;
670 }
671 
amdgpu_userq_kref_destroy(struct kref * kref)672 static void amdgpu_userq_kref_destroy(struct kref *kref)
673 {
674 	int r;
675 	struct amdgpu_usermode_queue *queue =
676 		container_of(kref, struct amdgpu_usermode_queue, refcount);
677 	struct amdgpu_userq_mgr *uq_mgr = queue->userq_mgr;
678 
679 	r = amdgpu_userq_destroy(uq_mgr, queue);
680 	if (r)
681 		drm_file_err(uq_mgr->file, "Failed to destroy usermode queue %d\n", r);
682 }
683 
amdgpu_userq_get(struct amdgpu_userq_mgr * uq_mgr,u32 qid)684 struct amdgpu_usermode_queue *amdgpu_userq_get(struct amdgpu_userq_mgr *uq_mgr, u32 qid)
685 {
686 	struct amdgpu_usermode_queue *queue;
687 
688 	xa_lock(&uq_mgr->userq_xa);
689 	queue = xa_load(&uq_mgr->userq_xa, qid);
690 	if (queue)
691 		kref_get(&queue->refcount);
692 	xa_unlock(&uq_mgr->userq_xa);
693 
694 	return queue;
695 }
696 
amdgpu_userq_put(struct amdgpu_usermode_queue * queue)697 void amdgpu_userq_put(struct amdgpu_usermode_queue *queue)
698 {
699 	if (queue)
700 		kref_put(&queue->refcount, amdgpu_userq_kref_destroy);
701 }
702 
amdgpu_userq_priority_permit(struct drm_file * filp,int priority)703 static int amdgpu_userq_priority_permit(struct drm_file *filp,
704 					int priority)
705 {
706 	if (priority < AMDGPU_USERQ_CREATE_FLAGS_QUEUE_PRIORITY_HIGH)
707 		return 0;
708 
709 	if (capable(CAP_SYS_NICE))
710 		return 0;
711 
712 	if (drm_is_current_master(filp))
713 		return 0;
714 
715 	return -EACCES;
716 }
717 
718 static int
amdgpu_userq_create(struct drm_file * filp,union drm_amdgpu_userq * args)719 amdgpu_userq_create(struct drm_file *filp, union drm_amdgpu_userq *args)
720 {
721 	struct amdgpu_fpriv *fpriv = filp->driver_priv;
722 	struct amdgpu_userq_mgr *uq_mgr = &fpriv->userq_mgr;
723 	struct amdgpu_device *adev = uq_mgr->adev;
724 	const struct amdgpu_userq_funcs *uq_funcs;
725 	struct amdgpu_usermode_queue *queue;
726 	struct amdgpu_db_info db_info;
727 	bool skip_map_queue;
728 	u32 qid;
729 	uint64_t index;
730 	int r = 0;
731 	int priority =
732 		(args->in.flags & AMDGPU_USERQ_CREATE_FLAGS_QUEUE_PRIORITY_MASK) >>
733 		AMDGPU_USERQ_CREATE_FLAGS_QUEUE_PRIORITY_SHIFT;
734 
735 	r = amdgpu_userq_priority_permit(filp, priority);
736 	if (r)
737 		return r;
738 
739 	r = pm_runtime_resume_and_get(adev_to_drm(adev)->dev);
740 	if (r < 0) {
741 		drm_file_err(uq_mgr->file, "pm_runtime_resume_and_get() failed for userqueue create\n");
742 		return r;
743 	}
744 
745 	uq_funcs = adev->userq_funcs[args->in.ip_type];
746 	if (!uq_funcs) {
747 		drm_file_err(uq_mgr->file, "Usermode queue is not supported for this IP (%u)\n",
748 			     args->in.ip_type);
749 		r = -EINVAL;
750 		goto err_pm_runtime;
751 	}
752 
753 	queue = kzalloc_obj(struct amdgpu_usermode_queue);
754 	if (!queue) {
755 		drm_file_err(uq_mgr->file, "Failed to allocate memory for queue\n");
756 		r = -ENOMEM;
757 		goto err_pm_runtime;
758 	}
759 
760 	INIT_LIST_HEAD(&queue->userq_va_list);
761 	queue->doorbell_handle = args->in.doorbell_handle;
762 	queue->queue_type = args->in.ip_type;
763 	queue->vm = &fpriv->vm;
764 	queue->priority = priority;
765 
766 	db_info.queue_type = queue->queue_type;
767 	db_info.doorbell_handle = queue->doorbell_handle;
768 	db_info.db_obj = &queue->db_obj;
769 	db_info.doorbell_offset = args->in.doorbell_offset;
770 
771 	queue->userq_mgr = uq_mgr;
772 
773 	/* Validate the userq virtual address.*/
774 	r = amdgpu_bo_reserve(fpriv->vm.root.bo, false);
775 	if (r)
776 		goto free_queue;
777 
778 	if (amdgpu_userq_input_va_validate(adev, queue, args->in.queue_va, args->in.queue_size) ||
779 	    amdgpu_userq_input_va_validate(adev, queue, args->in.rptr_va, AMDGPU_GPU_PAGE_SIZE) ||
780 	    amdgpu_userq_input_va_validate(adev, queue, args->in.wptr_va, AMDGPU_GPU_PAGE_SIZE)) {
781 		r = -EINVAL;
782 		amdgpu_bo_unreserve(fpriv->vm.root.bo);
783 		goto clean_mapping;
784 	}
785 	amdgpu_bo_unreserve(fpriv->vm.root.bo);
786 
787 	/* Convert relative doorbell offset into absolute doorbell index */
788 	index = amdgpu_userq_get_doorbell_index(uq_mgr, &db_info, filp);
789 	if (index == (uint64_t)-EINVAL) {
790 		drm_file_err(uq_mgr->file, "Failed to get doorbell for queue\n");
791 		r = -EINVAL;
792 		goto clean_mapping;
793 	}
794 
795 	queue->doorbell_index = index;
796 	xa_init_flags(&queue->fence_drv_xa, XA_FLAGS_ALLOC);
797 	r = amdgpu_userq_fence_driver_alloc(adev, &queue->fence_drv);
798 	if (r) {
799 		drm_file_err(uq_mgr->file, "Failed to alloc fence driver\n");
800 		goto clean_mapping;
801 	}
802 
803 	r = uq_funcs->mqd_create(queue, &args->in);
804 	if (r) {
805 		drm_file_err(uq_mgr->file, "Failed to create Queue\n");
806 		goto clean_fence_driver;
807 	}
808 
809 	amdgpu_userq_ensure_ev_fence(&fpriv->userq_mgr, &fpriv->evf_mgr);
810 
811 	/* don't map the queue if scheduling is halted */
812 	if (adev->userq_halt_for_enforce_isolation &&
813 	    ((queue->queue_type == AMDGPU_HW_IP_GFX) ||
814 	     (queue->queue_type == AMDGPU_HW_IP_COMPUTE)))
815 		skip_map_queue = true;
816 	else
817 		skip_map_queue = false;
818 	if (!skip_map_queue) {
819 		r = amdgpu_userq_map_helper(queue);
820 		if (r) {
821 			drm_file_err(uq_mgr->file, "Failed to map Queue\n");
822 			goto clean_mqd;
823 		}
824 	}
825 
826 	/* drop this refcount during queue destroy */
827 	kref_init(&queue->refcount);
828 
829 	/* Wait for mode-1 reset to complete */
830 	down_read(&adev->reset_domain->sem);
831 
832 	r = xa_alloc(&uq_mgr->userq_xa, &qid, queue,
833 		     XA_LIMIT(1, AMDGPU_MAX_USERQ_COUNT), GFP_KERNEL);
834 	if (r) {
835 		if (!skip_map_queue)
836 			amdgpu_userq_unmap_helper(queue);
837 		r = -ENOMEM;
838 		goto clean_reset_domain;
839 	}
840 
841 	r = xa_err(xa_store_irq(&adev->userq_doorbell_xa, index, queue, GFP_KERNEL));
842 	if (r) {
843 		xa_erase(&uq_mgr->userq_xa, qid);
844 		if (!skip_map_queue)
845 			amdgpu_userq_unmap_helper(queue);
846 		goto clean_reset_domain;
847 	}
848 	up_read(&adev->reset_domain->sem);
849 
850 	amdgpu_debugfs_userq_init(filp, queue, qid);
851 	amdgpu_userq_init_hang_detect_work(queue);
852 
853 	args->out.queue_id = qid;
854 	atomic_inc(&uq_mgr->userq_count[queue->queue_type]);
855 	mutex_unlock(&uq_mgr->userq_mutex);
856 	return 0;
857 
858 clean_reset_domain:
859 	up_read(&adev->reset_domain->sem);
860 clean_mqd:
861 	mutex_unlock(&uq_mgr->userq_mutex);
862 	uq_funcs->mqd_destroy(queue);
863 clean_fence_driver:
864 	amdgpu_userq_fence_driver_free(queue);
865 clean_mapping:
866 	amdgpu_bo_reserve(fpriv->vm.root.bo, true);
867 	amdgpu_userq_buffer_vas_list_cleanup(adev, queue);
868 	amdgpu_bo_unreserve(fpriv->vm.root.bo);
869 free_queue:
870 	kfree(queue);
871 err_pm_runtime:
872 	pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
873 	return r;
874 }
875 
amdgpu_userq_input_args_validate(struct drm_device * dev,union drm_amdgpu_userq * args,struct drm_file * filp)876 static int amdgpu_userq_input_args_validate(struct drm_device *dev,
877 					union drm_amdgpu_userq *args,
878 					struct drm_file *filp)
879 {
880 	struct amdgpu_device *adev = drm_to_adev(dev);
881 
882 	switch (args->in.op) {
883 	case AMDGPU_USERQ_OP_CREATE:
884 		if (args->in.flags & ~(AMDGPU_USERQ_CREATE_FLAGS_QUEUE_PRIORITY_MASK |
885 				       AMDGPU_USERQ_CREATE_FLAGS_QUEUE_SECURE))
886 			return -EINVAL;
887 		/* Usermode queues are only supported for GFX IP as of now */
888 		if (args->in.ip_type != AMDGPU_HW_IP_GFX &&
889 		    args->in.ip_type != AMDGPU_HW_IP_DMA &&
890 		    args->in.ip_type != AMDGPU_HW_IP_COMPUTE) {
891 			drm_file_err(filp, "Usermode queue doesn't support IP type %u\n",
892 				     args->in.ip_type);
893 			return -EINVAL;
894 		}
895 
896 		if ((args->in.flags & AMDGPU_USERQ_CREATE_FLAGS_QUEUE_SECURE) &&
897 		    (args->in.ip_type != AMDGPU_HW_IP_GFX) &&
898 		    (args->in.ip_type != AMDGPU_HW_IP_COMPUTE) &&
899 		    !amdgpu_is_tmz(adev)) {
900 			drm_file_err(filp, "Secure only supported on GFX/Compute queues\n");
901 			return -EINVAL;
902 		}
903 
904 		if (args->in.queue_va == AMDGPU_BO_INVALID_OFFSET ||
905 		    args->in.queue_va == 0 ||
906 		    args->in.queue_size == 0) {
907 			drm_file_err(filp, "invalidate userq queue va or size\n");
908 			return -EINVAL;
909 		}
910 
911 		if (!is_power_of_2(args->in.queue_size)) {
912 			drm_file_err(filp, "Queue size must be a power of 2\n");
913 			return -EINVAL;
914 		}
915 
916 		if (args->in.queue_size < AMDGPU_GPU_PAGE_SIZE) {
917 			drm_file_err(filp, "Queue size smaller than AMDGPU_GPU_PAGE_SIZE\n");
918 			return -EINVAL;
919 		}
920 
921 		if (!args->in.wptr_va || !args->in.rptr_va) {
922 			drm_file_err(filp, "invalidate userq queue rptr or wptr\n");
923 			return -EINVAL;
924 		}
925 		break;
926 	case AMDGPU_USERQ_OP_FREE:
927 		if (args->in.ip_type ||
928 		    args->in.doorbell_handle ||
929 		    args->in.doorbell_offset ||
930 		    args->in.flags ||
931 		    args->in.queue_va ||
932 		    args->in.queue_size ||
933 		    args->in.rptr_va ||
934 		    args->in.wptr_va ||
935 		    args->in.mqd ||
936 		    args->in.mqd_size)
937 			return -EINVAL;
938 		break;
939 	default:
940 		return -EINVAL;
941 	}
942 
943 	return 0;
944 }
945 
amdgpu_userq_enabled(struct drm_device * dev)946 bool amdgpu_userq_enabled(struct drm_device *dev)
947 {
948 	struct amdgpu_device *adev = drm_to_adev(dev);
949 	int i;
950 
951 	for (i = 0; i < AMDGPU_HW_IP_NUM; i++) {
952 		if (adev->userq_funcs[i])
953 			return true;
954 	}
955 
956 	return false;
957 }
958 
amdgpu_userq_ioctl(struct drm_device * dev,void * data,struct drm_file * filp)959 int amdgpu_userq_ioctl(struct drm_device *dev, void *data,
960 		       struct drm_file *filp)
961 {
962 	union drm_amdgpu_userq *args = data;
963 	struct amdgpu_fpriv *fpriv = filp->driver_priv;
964 	struct amdgpu_usermode_queue *queue;
965 	int r = 0;
966 
967 	if (!amdgpu_userq_enabled(dev))
968 		return -ENOTSUPP;
969 
970 	if (amdgpu_userq_input_args_validate(dev, args, filp) < 0)
971 		return -EINVAL;
972 
973 	switch (args->in.op) {
974 	case AMDGPU_USERQ_OP_CREATE:
975 		r = amdgpu_userq_create(filp, args);
976 		if (r)
977 			drm_file_err(filp, "Failed to create usermode queue\n");
978 		break;
979 
980 	case AMDGPU_USERQ_OP_FREE: {
981 		xa_lock(&fpriv->userq_mgr.userq_xa);
982 		queue = __xa_erase(&fpriv->userq_mgr.userq_xa, args->in.queue_id);
983 		xa_unlock(&fpriv->userq_mgr.userq_xa);
984 		if (!queue)
985 			return -ENOENT;
986 
987 		amdgpu_userq_put(queue);
988 		break;
989 	}
990 
991 	default:
992 		drm_dbg_driver(dev, "Invalid user queue op specified: %d\n", args->in.op);
993 		return -EINVAL;
994 	}
995 
996 	return r;
997 }
998 
999 static int
amdgpu_userq_restore_all(struct amdgpu_userq_mgr * uq_mgr)1000 amdgpu_userq_restore_all(struct amdgpu_userq_mgr *uq_mgr)
1001 {
1002 	struct amdgpu_fpriv *fpriv = uq_mgr_to_fpriv(uq_mgr);
1003 	struct amdgpu_vm *vm = &fpriv->vm;
1004 	struct amdgpu_usermode_queue *queue;
1005 	unsigned long queue_id;
1006 	int ret = 0, r;
1007 
1008 
1009 	if (amdgpu_bo_reserve(vm->root.bo, false))
1010 		return false;
1011 
1012 	mutex_lock(&uq_mgr->userq_mutex);
1013 	/* Resume all the queues for this process */
1014 	xa_for_each(&uq_mgr->userq_xa, queue_id, queue) {
1015 
1016 		if (!amdgpu_userq_buffer_vas_mapped(queue)) {
1017 			drm_file_err(uq_mgr->file,
1018 				     "trying restore queue without va mapping\n");
1019 			queue->state = AMDGPU_USERQ_STATE_INVALID_VA;
1020 			continue;
1021 		}
1022 
1023 		r = amdgpu_userq_restore_helper(queue);
1024 		if (r)
1025 			ret = r;
1026 
1027 	}
1028 	mutex_unlock(&uq_mgr->userq_mutex);
1029 	amdgpu_bo_unreserve(vm->root.bo);
1030 
1031 	if (ret)
1032 		drm_file_err(uq_mgr->file,
1033 			     "Failed to map all the queues, restore failed ret=%d\n", ret);
1034 	return ret;
1035 }
1036 
amdgpu_userq_validate_vm(void * param,struct amdgpu_bo * bo)1037 static int amdgpu_userq_validate_vm(void *param, struct amdgpu_bo *bo)
1038 {
1039 	struct ttm_operation_ctx ctx = { false, false };
1040 
1041 	amdgpu_bo_placement_from_domain(bo, bo->allowed_domains);
1042 	return ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
1043 }
1044 
1045 /* Handle all BOs on the invalidated list, validate them and update the PTs */
1046 static int
amdgpu_userq_bo_validate(struct amdgpu_device * adev,struct drm_exec * exec,struct amdgpu_vm * vm)1047 amdgpu_userq_bo_validate(struct amdgpu_device *adev, struct drm_exec *exec,
1048 			 struct amdgpu_vm *vm)
1049 {
1050 	struct ttm_operation_ctx ctx = { false, false };
1051 	struct amdgpu_bo_va *bo_va;
1052 	struct amdgpu_bo *bo;
1053 	int ret;
1054 
1055 	spin_lock(&vm->status_lock);
1056 	while (!list_empty(&vm->invalidated)) {
1057 		bo_va = list_first_entry(&vm->invalidated,
1058 					 struct amdgpu_bo_va,
1059 					 base.vm_status);
1060 		spin_unlock(&vm->status_lock);
1061 
1062 		bo = bo_va->base.bo;
1063 		ret = drm_exec_prepare_obj(exec, &bo->tbo.base, 2);
1064 		if (unlikely(ret))
1065 			return ret;
1066 
1067 		amdgpu_bo_placement_from_domain(bo, bo->allowed_domains);
1068 		ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
1069 		if (ret)
1070 			return ret;
1071 
1072 		/* This moves the bo_va to the done list */
1073 		ret = amdgpu_vm_bo_update(adev, bo_va, false);
1074 		if (ret)
1075 			return ret;
1076 
1077 		spin_lock(&vm->status_lock);
1078 	}
1079 	spin_unlock(&vm->status_lock);
1080 
1081 	return 0;
1082 }
1083 
1084 /* Make sure the whole VM is ready to be used */
1085 static int
amdgpu_userq_vm_validate(struct amdgpu_userq_mgr * uq_mgr)1086 amdgpu_userq_vm_validate(struct amdgpu_userq_mgr *uq_mgr)
1087 {
1088 	struct amdgpu_fpriv *fpriv = uq_mgr_to_fpriv(uq_mgr);
1089 	bool invalidated = false, new_addition = false;
1090 	struct ttm_operation_ctx ctx = { true, false };
1091 	struct amdgpu_device *adev = uq_mgr->adev;
1092 	struct amdgpu_hmm_range *range;
1093 	struct amdgpu_vm *vm = &fpriv->vm;
1094 	unsigned long key, tmp_key;
1095 	struct amdgpu_bo_va *bo_va;
1096 	struct amdgpu_bo *bo;
1097 	struct drm_exec exec;
1098 	struct xarray xa;
1099 	int ret;
1100 
1101 	xa_init(&xa);
1102 
1103 retry_lock:
1104 	drm_exec_init(&exec, DRM_EXEC_IGNORE_DUPLICATES, 0);
1105 	drm_exec_until_all_locked(&exec) {
1106 		ret = amdgpu_vm_lock_pd(vm, &exec, 1);
1107 		drm_exec_retry_on_contention(&exec);
1108 		if (unlikely(ret))
1109 			goto unlock_all;
1110 
1111 		ret = amdgpu_vm_lock_done_list(vm, &exec, 1);
1112 		drm_exec_retry_on_contention(&exec);
1113 		if (unlikely(ret))
1114 			goto unlock_all;
1115 
1116 		/* This validates PDs, PTs and per VM BOs */
1117 		ret = amdgpu_vm_validate(adev, vm, NULL,
1118 					 amdgpu_userq_validate_vm,
1119 					 NULL);
1120 		if (unlikely(ret))
1121 			goto unlock_all;
1122 
1123 		/* This locks and validates the remaining evicted BOs */
1124 		ret = amdgpu_userq_bo_validate(adev, &exec, vm);
1125 		drm_exec_retry_on_contention(&exec);
1126 		if (unlikely(ret))
1127 			goto unlock_all;
1128 	}
1129 
1130 	if (invalidated) {
1131 		xa_for_each(&xa, tmp_key, range) {
1132 			bo = range->bo;
1133 			amdgpu_bo_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_CPU);
1134 			ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
1135 			if (ret)
1136 				goto unlock_all;
1137 
1138 			amdgpu_ttm_tt_set_user_pages(bo->tbo.ttm, range);
1139 
1140 			amdgpu_bo_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_GTT);
1141 			ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
1142 			if (ret)
1143 				goto unlock_all;
1144 		}
1145 		invalidated = false;
1146 	}
1147 
1148 	ret = amdgpu_vm_handle_moved(adev, vm, NULL);
1149 	if (ret)
1150 		goto unlock_all;
1151 
1152 	key = 0;
1153 	/* Validate User Ptr BOs */
1154 	list_for_each_entry(bo_va, &vm->done, base.vm_status) {
1155 		bo = bo_va->base.bo;
1156 		if (!bo)
1157 			continue;
1158 
1159 		if (!amdgpu_ttm_tt_is_userptr(bo->tbo.ttm))
1160 			continue;
1161 
1162 		range = xa_load(&xa, key);
1163 		if (range && range->bo != bo) {
1164 			xa_erase(&xa, key);
1165 			amdgpu_hmm_range_free(range);
1166 			range = NULL;
1167 		}
1168 
1169 		if (!range) {
1170 			range = amdgpu_hmm_range_alloc(bo);
1171 			if (!range) {
1172 				ret = -ENOMEM;
1173 				goto unlock_all;
1174 			}
1175 
1176 			xa_store(&xa, key, range, GFP_KERNEL);
1177 			new_addition = true;
1178 		}
1179 		key++;
1180 	}
1181 
1182 	if (new_addition) {
1183 		drm_exec_fini(&exec);
1184 		xa_for_each(&xa, tmp_key, range) {
1185 			if (!range)
1186 				continue;
1187 			bo = range->bo;
1188 			ret = amdgpu_ttm_tt_get_user_pages(bo, range);
1189 			if (ret)
1190 				goto unlock_all;
1191 		}
1192 
1193 		invalidated = true;
1194 		new_addition = false;
1195 		goto retry_lock;
1196 	}
1197 
1198 	ret = amdgpu_vm_update_pdes(adev, vm, false);
1199 	if (ret)
1200 		goto unlock_all;
1201 
1202 	/*
1203 	 * We need to wait for all VM updates to finish before restarting the
1204 	 * queues. Using the done list like that is now ok since everything is
1205 	 * locked in place.
1206 	 */
1207 	list_for_each_entry(bo_va, &vm->done, base.vm_status)
1208 		dma_fence_wait(bo_va->last_pt_update, false);
1209 	dma_fence_wait(vm->last_update, false);
1210 
1211 	ret = amdgpu_evf_mgr_rearm(&fpriv->evf_mgr, &exec);
1212 	if (ret)
1213 		drm_file_err(uq_mgr->file, "Failed to replace eviction fence\n");
1214 
1215 unlock_all:
1216 	drm_exec_fini(&exec);
1217 	xa_for_each(&xa, tmp_key, range) {
1218 		if (!range)
1219 			continue;
1220 		bo = range->bo;
1221 		amdgpu_hmm_range_free(range);
1222 	}
1223 	xa_destroy(&xa);
1224 	return ret;
1225 }
1226 
amdgpu_userq_restore_worker(struct work_struct * work)1227 static void amdgpu_userq_restore_worker(struct work_struct *work)
1228 {
1229 	struct amdgpu_userq_mgr *uq_mgr = work_to_uq_mgr(work, resume_work.work);
1230 	struct amdgpu_fpriv *fpriv = uq_mgr_to_fpriv(uq_mgr);
1231 	struct dma_fence *ev_fence;
1232 	int ret;
1233 
1234 	ev_fence = amdgpu_evf_mgr_get_fence(&fpriv->evf_mgr);
1235 	if (!dma_fence_is_signaled(ev_fence))
1236 		goto put_fence;
1237 
1238 	ret = amdgpu_userq_vm_validate(uq_mgr);
1239 	if (ret) {
1240 		drm_file_err(uq_mgr->file, "Failed to validate BOs to restore ret=%d\n", ret);
1241 		goto put_fence;
1242 	}
1243 
1244 	amdgpu_userq_restore_all(uq_mgr);
1245 
1246 put_fence:
1247 	dma_fence_put(ev_fence);
1248 }
1249 
1250 static int
amdgpu_userq_evict_all(struct amdgpu_userq_mgr * uq_mgr)1251 amdgpu_userq_evict_all(struct amdgpu_userq_mgr *uq_mgr)
1252 {
1253 	struct amdgpu_usermode_queue *queue;
1254 	unsigned long queue_id;
1255 	int ret = 0, r;
1256 
1257 	amdgpu_userq_detect_and_reset_queues(uq_mgr);
1258 	/* Try to unmap all the queues in this process ctx */
1259 	xa_for_each(&uq_mgr->userq_xa, queue_id, queue) {
1260 		r = amdgpu_userq_preempt_helper(queue);
1261 		if (r)
1262 			ret = r;
1263 	}
1264 
1265 	if (ret)
1266 		drm_file_err(uq_mgr->file,
1267 			     "Couldn't unmap all the queues, eviction failed ret=%d\n", ret);
1268 	return ret;
1269 }
1270 
amdgpu_userq_reset_work(struct work_struct * work)1271 void amdgpu_userq_reset_work(struct work_struct *work)
1272 {
1273 	struct amdgpu_device *adev = container_of(work, struct amdgpu_device,
1274 						  userq_reset_work);
1275 	struct amdgpu_reset_context reset_context;
1276 
1277 	memset(&reset_context, 0, sizeof(reset_context));
1278 
1279 	reset_context.method = AMD_RESET_METHOD_NONE;
1280 	reset_context.reset_req_dev = adev;
1281 	reset_context.src = AMDGPU_RESET_SRC_USERQ;
1282 	set_bit(AMDGPU_NEED_FULL_RESET, &reset_context.flags);
1283 	/*set_bit(AMDGPU_SKIP_COREDUMP, &reset_context.flags);*/
1284 
1285 	amdgpu_device_gpu_recover(adev, NULL, &reset_context);
1286 }
1287 
1288 static void
amdgpu_userq_wait_for_signal(struct amdgpu_userq_mgr * uq_mgr)1289 amdgpu_userq_wait_for_signal(struct amdgpu_userq_mgr *uq_mgr)
1290 {
1291 	struct amdgpu_usermode_queue *queue;
1292 	unsigned long queue_id;
1293 
1294 	xa_for_each(&uq_mgr->userq_xa, queue_id, queue) {
1295 		struct dma_fence *f = queue->last_fence;
1296 
1297 		if (!f)
1298 			continue;
1299 
1300 		dma_fence_wait(f, false);
1301 	}
1302 }
1303 
1304 void
amdgpu_userq_evict(struct amdgpu_userq_mgr * uq_mgr)1305 amdgpu_userq_evict(struct amdgpu_userq_mgr *uq_mgr)
1306 {
1307 	/* Wait for any pending userqueue fence work to finish */
1308 	amdgpu_userq_wait_for_signal(uq_mgr);
1309 	amdgpu_userq_evict_all(uq_mgr);
1310 }
1311 
amdgpu_userq_mgr_init(struct amdgpu_userq_mgr * userq_mgr,struct drm_file * file_priv,struct amdgpu_device * adev)1312 int amdgpu_userq_mgr_init(struct amdgpu_userq_mgr *userq_mgr, struct drm_file *file_priv,
1313 			  struct amdgpu_device *adev)
1314 {
1315 	mutex_init(&userq_mgr->userq_mutex);
1316 	xa_init_flags(&userq_mgr->userq_xa, XA_FLAGS_ALLOC);
1317 	userq_mgr->adev = adev;
1318 	userq_mgr->file = file_priv;
1319 
1320 	INIT_DELAYED_WORK(&userq_mgr->resume_work, amdgpu_userq_restore_worker);
1321 	return 0;
1322 }
1323 
amdgpu_userq_mgr_cancel_resume(struct amdgpu_userq_mgr * userq_mgr)1324 void amdgpu_userq_mgr_cancel_resume(struct amdgpu_userq_mgr *userq_mgr)
1325 {
1326 	cancel_delayed_work_sync(&userq_mgr->resume_work);
1327 }
1328 
amdgpu_userq_mgr_fini(struct amdgpu_userq_mgr * userq_mgr)1329 void amdgpu_userq_mgr_fini(struct amdgpu_userq_mgr *userq_mgr)
1330 {
1331 	struct amdgpu_usermode_queue *queue;
1332 	unsigned long queue_id = 0;
1333 
1334 	for (;;) {
1335 		xa_lock(&userq_mgr->userq_xa);
1336 		queue = xa_find(&userq_mgr->userq_xa, &queue_id, ULONG_MAX,
1337 				XA_PRESENT);
1338 		if (queue)
1339 			__xa_erase(&userq_mgr->userq_xa, queue_id);
1340 		xa_unlock(&userq_mgr->userq_xa);
1341 
1342 		if (!queue)
1343 			break;
1344 
1345 		amdgpu_userq_put(queue);
1346 	}
1347 
1348 	xa_destroy(&userq_mgr->userq_xa);
1349 	mutex_destroy(&userq_mgr->userq_mutex);
1350 }
1351 
amdgpu_userq_suspend(struct amdgpu_device * adev)1352 int amdgpu_userq_suspend(struct amdgpu_device *adev)
1353 {
1354 	u32 ip_mask = amdgpu_userq_get_supported_ip_mask(adev);
1355 	struct amdgpu_usermode_queue *queue;
1356 	struct amdgpu_userq_mgr *uqm;
1357 	unsigned long queue_id;
1358 	int r;
1359 
1360 	if (!ip_mask)
1361 		return 0;
1362 
1363 	xa_for_each(&adev->userq_doorbell_xa, queue_id, queue) {
1364 		uqm = queue->userq_mgr;
1365 		cancel_delayed_work_sync(&uqm->resume_work);
1366 		guard(mutex)(&uqm->userq_mutex);
1367 		amdgpu_userq_detect_and_reset_queues(uqm);
1368 		if (adev->in_s0ix)
1369 			r = amdgpu_userq_preempt_helper(queue);
1370 		else
1371 			r = amdgpu_userq_unmap_helper(queue);
1372 		if (r)
1373 			return r;
1374 	}
1375 	return 0;
1376 }
1377 
amdgpu_userq_resume(struct amdgpu_device * adev)1378 int amdgpu_userq_resume(struct amdgpu_device *adev)
1379 {
1380 	u32 ip_mask = amdgpu_userq_get_supported_ip_mask(adev);
1381 	struct amdgpu_usermode_queue *queue;
1382 	struct amdgpu_userq_mgr *uqm;
1383 	unsigned long queue_id;
1384 	int r;
1385 
1386 	if (!ip_mask)
1387 		return 0;
1388 
1389 	xa_for_each(&adev->userq_doorbell_xa, queue_id, queue) {
1390 		uqm = queue->userq_mgr;
1391 		guard(mutex)(&uqm->userq_mutex);
1392 		if (adev->in_s0ix)
1393 			r = amdgpu_userq_restore_helper(queue);
1394 		else
1395 			r = amdgpu_userq_map_helper(queue);
1396 		if (r)
1397 			return r;
1398 	}
1399 
1400 	return 0;
1401 }
1402 
amdgpu_userq_stop_sched_for_enforce_isolation(struct amdgpu_device * adev,u32 idx)1403 int amdgpu_userq_stop_sched_for_enforce_isolation(struct amdgpu_device *adev,
1404 						  u32 idx)
1405 {
1406 	u32 ip_mask = amdgpu_userq_get_supported_ip_mask(adev);
1407 	struct amdgpu_usermode_queue *queue;
1408 	struct amdgpu_userq_mgr *uqm;
1409 	unsigned long queue_id;
1410 	int ret = 0, r;
1411 
1412 	/* only need to stop gfx/compute */
1413 	if (!(ip_mask & ((1 << AMDGPU_HW_IP_GFX) | (1 << AMDGPU_HW_IP_COMPUTE))))
1414 		return 0;
1415 
1416 	if (adev->userq_halt_for_enforce_isolation)
1417 		dev_warn(adev->dev, "userq scheduling already stopped!\n");
1418 	adev->userq_halt_for_enforce_isolation = true;
1419 	xa_for_each(&adev->userq_doorbell_xa, queue_id, queue) {
1420 		uqm = queue->userq_mgr;
1421 		cancel_delayed_work_sync(&uqm->resume_work);
1422 		mutex_lock(&uqm->userq_mutex);
1423 		if (((queue->queue_type == AMDGPU_HW_IP_GFX) ||
1424 		     (queue->queue_type == AMDGPU_HW_IP_COMPUTE)) &&
1425 		    (queue->xcp_id == idx)) {
1426 			amdgpu_userq_detect_and_reset_queues(uqm);
1427 			r = amdgpu_userq_preempt_helper(queue);
1428 			if (r)
1429 				ret = r;
1430 		}
1431 		mutex_unlock(&uqm->userq_mutex);
1432 	}
1433 
1434 	return ret;
1435 }
1436 
amdgpu_userq_start_sched_for_enforce_isolation(struct amdgpu_device * adev,u32 idx)1437 int amdgpu_userq_start_sched_for_enforce_isolation(struct amdgpu_device *adev,
1438 						   u32 idx)
1439 {
1440 	u32 ip_mask = amdgpu_userq_get_supported_ip_mask(adev);
1441 	struct amdgpu_usermode_queue *queue;
1442 	struct amdgpu_userq_mgr *uqm;
1443 	unsigned long queue_id;
1444 	int ret = 0, r;
1445 
1446 	/* only need to stop gfx/compute */
1447 	if (!(ip_mask & ((1 << AMDGPU_HW_IP_GFX) | (1 << AMDGPU_HW_IP_COMPUTE))))
1448 		return 0;
1449 
1450 	if (!adev->userq_halt_for_enforce_isolation)
1451 		dev_warn(adev->dev, "userq scheduling already started!\n");
1452 
1453 	adev->userq_halt_for_enforce_isolation = false;
1454 
1455 	xa_for_each(&adev->userq_doorbell_xa, queue_id, queue) {
1456 		uqm = queue->userq_mgr;
1457 		mutex_lock(&uqm->userq_mutex);
1458 		if (((queue->queue_type == AMDGPU_HW_IP_GFX) ||
1459 		     (queue->queue_type == AMDGPU_HW_IP_COMPUTE)) &&
1460 		    (queue->xcp_id == idx)) {
1461 			r = amdgpu_userq_restore_helper(queue);
1462 			if (r)
1463 				ret = r;
1464 		}
1465 		mutex_unlock(&uqm->userq_mutex);
1466 	}
1467 
1468 	return ret;
1469 }
1470 
amdgpu_userq_gem_va_unmap_validate(struct amdgpu_device * adev,struct amdgpu_bo_va_mapping * mapping,uint64_t saddr)1471 void amdgpu_userq_gem_va_unmap_validate(struct amdgpu_device *adev,
1472 					struct amdgpu_bo_va_mapping *mapping,
1473 					uint64_t saddr)
1474 {
1475 	u32 ip_mask = amdgpu_userq_get_supported_ip_mask(adev);
1476 	struct amdgpu_bo_va *bo_va = mapping->bo_va;
1477 	struct dma_resv *resv = bo_va->base.bo->tbo.base.resv;
1478 
1479 	if (!ip_mask)
1480 		return;
1481 
1482 	dev_warn_once(adev->dev, "now unmapping a vital queue va:%llx\n", saddr);
1483 	/**
1484 	 * The userq VA mapping reservation should include the eviction fence,
1485 	 * if the eviction fence can't signal successfully during unmapping,
1486 	 * then driver will warn to flag this improper unmap of the userq VA.
1487 	 * Note: The eviction fence may be attached to different BOs, and this
1488 	 * unmap is only for one kind of userq VAs, so at this point suppose
1489 	 * the eviction fence is always unsignaled.
1490 	 */
1491 	dma_resv_wait_timeout(resv, DMA_RESV_USAGE_BOOKKEEP,
1492 			      false, MAX_SCHEDULE_TIMEOUT);
1493 }
1494 
amdgpu_userq_pre_reset(struct amdgpu_device * adev)1495 void amdgpu_userq_pre_reset(struct amdgpu_device *adev)
1496 {
1497 	const struct amdgpu_userq_funcs *userq_funcs;
1498 	struct amdgpu_usermode_queue *queue;
1499 	struct amdgpu_userq_mgr *uqm;
1500 	unsigned long queue_id;
1501 
1502 	xa_for_each(&adev->userq_doorbell_xa, queue_id, queue) {
1503 		uqm = queue->userq_mgr;
1504 		cancel_delayed_work_sync(&uqm->resume_work);
1505 		if (queue->state == AMDGPU_USERQ_STATE_MAPPED) {
1506 			amdgpu_userq_wait_for_last_fence(queue);
1507 			userq_funcs = adev->userq_funcs[queue->queue_type];
1508 			userq_funcs->unmap(queue);
1509 			/* just mark all queues as hung at this point.
1510 			 * if unmap succeeds, we could map again
1511 			 * in amdgpu_userq_post_reset() if vram is not lost
1512 			 */
1513 			queue->state = AMDGPU_USERQ_STATE_HUNG;
1514 			amdgpu_userq_fence_driver_force_completion(queue);
1515 		}
1516 	}
1517 }
1518 
amdgpu_userq_post_reset(struct amdgpu_device * adev,bool vram_lost)1519 int amdgpu_userq_post_reset(struct amdgpu_device *adev, bool vram_lost)
1520 {
1521 	/* if any queue state is AMDGPU_USERQ_STATE_UNMAPPED
1522 	 * at this point, we should be able to map it again
1523 	 * and continue if vram is not lost.
1524 	 */
1525 	struct amdgpu_usermode_queue *queue;
1526 	const struct amdgpu_userq_funcs *userq_funcs;
1527 	unsigned long queue_id;
1528 	int r = 0;
1529 
1530 	xa_for_each(&adev->userq_doorbell_xa, queue_id, queue) {
1531 		if (queue->state == AMDGPU_USERQ_STATE_HUNG && !vram_lost) {
1532 			userq_funcs = adev->userq_funcs[queue->queue_type];
1533 			/* Re-map queue */
1534 			r = userq_funcs->map(queue);
1535 			if (r) {
1536 				dev_err(adev->dev, "Failed to remap queue %ld\n", queue_id);
1537 				continue;
1538 			}
1539 			queue->state = AMDGPU_USERQ_STATE_MAPPED;
1540 		}
1541 	}
1542 
1543 	return r;
1544 }
1545