xref: /linux/drivers/gpu/drm/amd/amdgpu/amdgpu_userq.c (revision 39d3389331abd712461f50249722f7ed9d815068)
1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright 2023 Advanced Micro Devices, Inc.
4  *
5  * Permission is hereby granted, free of charge, to any person obtaining a
6  * copy of this software and associated documentation files (the "Software"),
7  * to deal in the Software without restriction, including without limitation
8  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9  * and/or sell copies of the Software, and to permit persons to whom the
10  * Software is furnished to do so, subject to the following conditions:
11  *
12  * The above copyright notice and this permission notice shall be included in
13  * all copies or substantial portions of the Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
19  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
20  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
21  * OTHER DEALINGS IN THE SOFTWARE.
22  *
23  */
24 
25 #include <drm/drm_auth.h>
26 #include <drm/drm_exec.h>
27 #include <linux/pm_runtime.h>
28 #include <drm/drm_drv.h>
29 
30 #include "amdgpu.h"
31 #include "amdgpu_reset.h"
32 #include "amdgpu_vm.h"
33 #include "amdgpu_userq.h"
34 #include "amdgpu_hmm.h"
35 #include "amdgpu_userq_fence.h"
36 
amdgpu_userq_get_supported_ip_mask(struct amdgpu_device * adev)37 u32 amdgpu_userq_get_supported_ip_mask(struct amdgpu_device *adev)
38 {
39 	int i;
40 	u32 userq_ip_mask = 0;
41 
42 	for (i = 0; i < AMDGPU_HW_IP_NUM; i++) {
43 		if (adev->userq_funcs[i])
44 			userq_ip_mask |= (1 << i);
45 	}
46 
47 	return userq_ip_mask;
48 }
49 
amdgpu_userq_is_reset_type_supported(struct amdgpu_device * adev,enum amdgpu_ring_type ring_type,int reset_type)50 static bool amdgpu_userq_is_reset_type_supported(struct amdgpu_device *adev,
51 				enum amdgpu_ring_type ring_type, int reset_type)
52 {
53 
54 	if (ring_type < 0 || ring_type >= AMDGPU_RING_TYPE_MAX)
55 		return false;
56 
57 	switch (ring_type) {
58 	case AMDGPU_RING_TYPE_GFX:
59 		if (adev->gfx.gfx_supported_reset & reset_type)
60 			return true;
61 		break;
62 	case AMDGPU_RING_TYPE_COMPUTE:
63 		if (adev->gfx.compute_supported_reset & reset_type)
64 			return true;
65 		break;
66 	case AMDGPU_RING_TYPE_SDMA:
67 		if (adev->sdma.supported_reset & reset_type)
68 			return true;
69 		break;
70 	case AMDGPU_RING_TYPE_VCN_DEC:
71 	case AMDGPU_RING_TYPE_VCN_ENC:
72 		if (adev->vcn.supported_reset & reset_type)
73 			return true;
74 		break;
75 	case AMDGPU_RING_TYPE_VCN_JPEG:
76 		if (adev->jpeg.supported_reset & reset_type)
77 			return true;
78 		break;
79 	default:
80 		break;
81 	}
82 	return false;
83 }
84 
amdgpu_userq_gpu_reset(struct amdgpu_device * adev)85 static void amdgpu_userq_gpu_reset(struct amdgpu_device *adev)
86 {
87 	if (amdgpu_device_should_recover_gpu(adev)) {
88 		amdgpu_reset_domain_schedule(adev->reset_domain,
89 					     &adev->userq_reset_work);
90 		/* Wait for the reset job to complete */
91 		flush_work(&adev->userq_reset_work);
92 	}
93 }
94 
95 static int
amdgpu_userq_detect_and_reset_queues(struct amdgpu_userq_mgr * uq_mgr)96 amdgpu_userq_detect_and_reset_queues(struct amdgpu_userq_mgr *uq_mgr)
97 {
98 	struct amdgpu_device *adev = uq_mgr->adev;
99 	const int queue_types[] = {
100 		AMDGPU_RING_TYPE_COMPUTE,
101 		AMDGPU_RING_TYPE_GFX,
102 		AMDGPU_RING_TYPE_SDMA
103 	};
104 	const int num_queue_types = ARRAY_SIZE(queue_types);
105 	bool gpu_reset = false;
106 	int r = 0;
107 	int i;
108 
109 	/* Warning if current process mutex is not held */
110 	WARN_ON(!mutex_is_locked(&uq_mgr->userq_mutex));
111 
112 	if (unlikely(adev->debug_disable_gpu_ring_reset)) {
113 		dev_err(adev->dev, "userq reset disabled by debug mask\n");
114 		return 0;
115 	}
116 
117 	/*
118 	 * If GPU recovery feature is disabled system-wide,
119 	 * skip all reset detection logic
120 	 */
121 	if (!amdgpu_gpu_recovery)
122 		return 0;
123 
124 	/*
125 	 * Iterate through all queue types to detect and reset problematic queues
126 	 * Process each queue type in the defined order
127 	 */
128 	for (i = 0; i < num_queue_types; i++) {
129 		int ring_type = queue_types[i];
130 		const struct amdgpu_userq_funcs *funcs = adev->userq_funcs[ring_type];
131 
132 		if (!amdgpu_userq_is_reset_type_supported(adev, ring_type, AMDGPU_RESET_TYPE_PER_QUEUE))
133 				continue;
134 
135 		if (atomic_read(&uq_mgr->userq_count[ring_type]) > 0 &&
136 		    funcs && funcs->detect_and_reset) {
137 			r = funcs->detect_and_reset(adev, ring_type);
138 			if (r) {
139 				gpu_reset = true;
140 				break;
141 			}
142 		}
143 	}
144 
145 	if (gpu_reset)
146 		amdgpu_userq_gpu_reset(adev);
147 
148 	return r;
149 }
150 
amdgpu_userq_buffer_va_list_add(struct amdgpu_usermode_queue * queue,struct amdgpu_bo_va_mapping * va_map,u64 addr)151 static int amdgpu_userq_buffer_va_list_add(struct amdgpu_usermode_queue *queue,
152 					   struct amdgpu_bo_va_mapping *va_map, u64 addr)
153 {
154 	struct amdgpu_userq_va_cursor *va_cursor;
155 	struct userq_va_list;
156 
157 	va_cursor = kzalloc(sizeof(*va_cursor), GFP_KERNEL);
158 	if (!va_cursor)
159 		return -ENOMEM;
160 
161 	INIT_LIST_HEAD(&va_cursor->list);
162 	va_cursor->gpu_addr = addr;
163 	atomic_set(&va_map->bo_va->userq_va_mapped, 1);
164 	list_add(&va_cursor->list, &queue->userq_va_list);
165 
166 	return 0;
167 }
168 
amdgpu_userq_input_va_validate(struct amdgpu_usermode_queue * queue,u64 addr,u64 expected_size)169 int amdgpu_userq_input_va_validate(struct amdgpu_usermode_queue *queue,
170 				   u64 addr, u64 expected_size)
171 {
172 	struct amdgpu_bo_va_mapping *va_map;
173 	struct amdgpu_vm *vm = queue->vm;
174 	u64 user_addr;
175 	u64 size;
176 	int r = 0;
177 
178 	user_addr = (addr & AMDGPU_GMC_HOLE_MASK) >> AMDGPU_GPU_PAGE_SHIFT;
179 	size = expected_size >> AMDGPU_GPU_PAGE_SHIFT;
180 
181 	r = amdgpu_bo_reserve(vm->root.bo, false);
182 	if (r)
183 		return r;
184 
185 	va_map = amdgpu_vm_bo_lookup_mapping(vm, user_addr);
186 	if (!va_map) {
187 		r = -EINVAL;
188 		goto out_err;
189 	}
190 	/* Only validate the userq whether resident in the VM mapping range */
191 	if (user_addr >= va_map->start  &&
192 	    va_map->last - user_addr + 1 >= size) {
193 		amdgpu_userq_buffer_va_list_add(queue, va_map, user_addr);
194 		amdgpu_bo_unreserve(vm->root.bo);
195 		return 0;
196 	}
197 
198 	r = -EINVAL;
199 out_err:
200 	amdgpu_bo_unreserve(vm->root.bo);
201 	return r;
202 }
203 
amdgpu_userq_buffer_va_mapped(struct amdgpu_vm * vm,u64 addr)204 static bool amdgpu_userq_buffer_va_mapped(struct amdgpu_vm *vm, u64 addr)
205 {
206 	struct amdgpu_bo_va_mapping *mapping;
207 	bool r;
208 
209 	if (amdgpu_bo_reserve(vm->root.bo, false))
210 		return false;
211 
212 	mapping = amdgpu_vm_bo_lookup_mapping(vm, addr);
213 	if (!IS_ERR_OR_NULL(mapping) && atomic_read(&mapping->bo_va->userq_va_mapped))
214 		r = true;
215 	else
216 		r = false;
217 	amdgpu_bo_unreserve(vm->root.bo);
218 
219 	return r;
220 }
221 
amdgpu_userq_buffer_vas_mapped(struct amdgpu_usermode_queue * queue)222 static bool amdgpu_userq_buffer_vas_mapped(struct amdgpu_usermode_queue *queue)
223 {
224 	struct amdgpu_userq_va_cursor *va_cursor, *tmp;
225 	int r = 0;
226 
227 	list_for_each_entry_safe(va_cursor, tmp, &queue->userq_va_list, list) {
228 		r += amdgpu_userq_buffer_va_mapped(queue->vm, va_cursor->gpu_addr);
229 		dev_dbg(queue->userq_mgr->adev->dev,
230 			"validate the userq mapping:%p va:%llx r:%d\n",
231 			queue, va_cursor->gpu_addr, r);
232 	}
233 
234 	if (r != 0)
235 		return true;
236 
237 	return false;
238 }
239 
amdgpu_userq_buffer_va_list_del(struct amdgpu_bo_va_mapping * mapping,struct amdgpu_userq_va_cursor * va_cursor)240 static void amdgpu_userq_buffer_va_list_del(struct amdgpu_bo_va_mapping *mapping,
241 					    struct amdgpu_userq_va_cursor *va_cursor)
242 {
243 	atomic_set(&mapping->bo_va->userq_va_mapped, 0);
244 	list_del(&va_cursor->list);
245 	kfree(va_cursor);
246 }
247 
amdgpu_userq_buffer_vas_list_cleanup(struct amdgpu_device * adev,struct amdgpu_usermode_queue * queue)248 static int amdgpu_userq_buffer_vas_list_cleanup(struct amdgpu_device *adev,
249 						struct amdgpu_usermode_queue *queue)
250 {
251 	struct amdgpu_userq_va_cursor *va_cursor, *tmp;
252 	struct amdgpu_bo_va_mapping *mapping;
253 	int r;
254 
255 	r = amdgpu_bo_reserve(queue->vm->root.bo, false);
256 	if (r)
257 		return r;
258 
259 	list_for_each_entry_safe(va_cursor, tmp, &queue->userq_va_list, list) {
260 		mapping = amdgpu_vm_bo_lookup_mapping(queue->vm, va_cursor->gpu_addr);
261 		if (!mapping) {
262 			r = -EINVAL;
263 			goto err;
264 		}
265 		dev_dbg(adev->dev, "delete the userq:%p va:%llx\n",
266 			queue, va_cursor->gpu_addr);
267 		amdgpu_userq_buffer_va_list_del(mapping, va_cursor);
268 	}
269 err:
270 	amdgpu_bo_unreserve(queue->vm->root.bo);
271 	return r;
272 }
273 
274 static int
amdgpu_userq_preempt_helper(struct amdgpu_userq_mgr * uq_mgr,struct amdgpu_usermode_queue * queue)275 amdgpu_userq_preempt_helper(struct amdgpu_userq_mgr *uq_mgr,
276 			  struct amdgpu_usermode_queue *queue)
277 {
278 	struct amdgpu_device *adev = uq_mgr->adev;
279 	const struct amdgpu_userq_funcs *userq_funcs =
280 		adev->userq_funcs[queue->queue_type];
281 	bool found_hung_queue = false;
282 	int r = 0;
283 
284 	if (queue->state == AMDGPU_USERQ_STATE_MAPPED) {
285 		r = userq_funcs->preempt(uq_mgr, queue);
286 		if (r) {
287 			queue->state = AMDGPU_USERQ_STATE_HUNG;
288 			found_hung_queue = true;
289 		} else {
290 			queue->state = AMDGPU_USERQ_STATE_PREEMPTED;
291 		}
292 	}
293 
294 	if (found_hung_queue)
295 		amdgpu_userq_detect_and_reset_queues(uq_mgr);
296 
297 	return r;
298 }
299 
300 static int
amdgpu_userq_restore_helper(struct amdgpu_userq_mgr * uq_mgr,struct amdgpu_usermode_queue * queue)301 amdgpu_userq_restore_helper(struct amdgpu_userq_mgr *uq_mgr,
302 			struct amdgpu_usermode_queue *queue)
303 {
304 	struct amdgpu_device *adev = uq_mgr->adev;
305 	const struct amdgpu_userq_funcs *userq_funcs =
306 		adev->userq_funcs[queue->queue_type];
307 	int r = 0;
308 
309 	if (queue->state == AMDGPU_USERQ_STATE_PREEMPTED) {
310 		r = userq_funcs->restore(uq_mgr, queue);
311 		if (r) {
312 			queue->state = AMDGPU_USERQ_STATE_HUNG;
313 		} else {
314 			queue->state = AMDGPU_USERQ_STATE_MAPPED;
315 		}
316 	}
317 
318 	return r;
319 }
320 
321 static int
amdgpu_userq_unmap_helper(struct amdgpu_userq_mgr * uq_mgr,struct amdgpu_usermode_queue * queue)322 amdgpu_userq_unmap_helper(struct amdgpu_userq_mgr *uq_mgr,
323 			  struct amdgpu_usermode_queue *queue)
324 {
325 	struct amdgpu_device *adev = uq_mgr->adev;
326 	const struct amdgpu_userq_funcs *userq_funcs =
327 		adev->userq_funcs[queue->queue_type];
328 	bool found_hung_queue = false;
329 	int r = 0;
330 
331 	if ((queue->state == AMDGPU_USERQ_STATE_MAPPED) ||
332 		(queue->state == AMDGPU_USERQ_STATE_PREEMPTED)) {
333 		r = userq_funcs->unmap(uq_mgr, queue);
334 		if (r) {
335 			queue->state = AMDGPU_USERQ_STATE_HUNG;
336 			found_hung_queue = true;
337 		} else {
338 			queue->state = AMDGPU_USERQ_STATE_UNMAPPED;
339 		}
340 	}
341 
342 	if (found_hung_queue)
343 		amdgpu_userq_detect_and_reset_queues(uq_mgr);
344 
345 	return r;
346 }
347 
348 static int
amdgpu_userq_map_helper(struct amdgpu_userq_mgr * uq_mgr,struct amdgpu_usermode_queue * queue)349 amdgpu_userq_map_helper(struct amdgpu_userq_mgr *uq_mgr,
350 			struct amdgpu_usermode_queue *queue)
351 {
352 	struct amdgpu_device *adev = uq_mgr->adev;
353 	const struct amdgpu_userq_funcs *userq_funcs =
354 		adev->userq_funcs[queue->queue_type];
355 	int r = 0;
356 
357 	if (queue->state == AMDGPU_USERQ_STATE_UNMAPPED) {
358 		r = userq_funcs->map(uq_mgr, queue);
359 		if (r) {
360 			queue->state = AMDGPU_USERQ_STATE_HUNG;
361 			amdgpu_userq_detect_and_reset_queues(uq_mgr);
362 		} else {
363 			queue->state = AMDGPU_USERQ_STATE_MAPPED;
364 		}
365 	}
366 
367 	return r;
368 }
369 
370 static int
amdgpu_userq_wait_for_last_fence(struct amdgpu_userq_mgr * uq_mgr,struct amdgpu_usermode_queue * queue)371 amdgpu_userq_wait_for_last_fence(struct amdgpu_userq_mgr *uq_mgr,
372 				 struct amdgpu_usermode_queue *queue)
373 {
374 	struct dma_fence *f = queue->last_fence;
375 	int ret = 0;
376 
377 	if (f && !dma_fence_is_signaled(f)) {
378 		ret = dma_fence_wait_timeout(f, true, MAX_SCHEDULE_TIMEOUT);
379 		if (ret <= 0) {
380 			drm_file_err(uq_mgr->file, "Timed out waiting for fence=%llu:%llu\n",
381 				     f->context, f->seqno);
382 			queue->state = AMDGPU_USERQ_STATE_HUNG;
383 			return -ETIME;
384 		}
385 	}
386 
387 	return ret;
388 }
389 
390 static void
amdgpu_userq_cleanup(struct amdgpu_userq_mgr * uq_mgr,struct amdgpu_usermode_queue * queue,int queue_id)391 amdgpu_userq_cleanup(struct amdgpu_userq_mgr *uq_mgr,
392 		     struct amdgpu_usermode_queue *queue,
393 		     int queue_id)
394 {
395 	struct amdgpu_device *adev = uq_mgr->adev;
396 	const struct amdgpu_userq_funcs *uq_funcs = adev->userq_funcs[queue->queue_type];
397 
398 	/* Wait for mode-1 reset to complete */
399 	down_read(&adev->reset_domain->sem);
400 
401 	/* Drop the userq reference. */
402 	amdgpu_userq_buffer_vas_list_cleanup(adev, queue);
403 	uq_funcs->mqd_destroy(uq_mgr, queue);
404 	amdgpu_userq_fence_driver_free(queue);
405 	/* Use interrupt-safe locking since IRQ handlers may access these XArrays */
406 	xa_erase_irq(&uq_mgr->userq_mgr_xa, (unsigned long)queue_id);
407 	xa_erase_irq(&adev->userq_doorbell_xa, queue->doorbell_index);
408 	queue->userq_mgr = NULL;
409 	list_del(&queue->userq_va_list);
410 	kfree(queue);
411 
412 	up_read(&adev->reset_domain->sem);
413 }
414 
415 static struct amdgpu_usermode_queue *
amdgpu_userq_find(struct amdgpu_userq_mgr * uq_mgr,int qid)416 amdgpu_userq_find(struct amdgpu_userq_mgr *uq_mgr, int qid)
417 {
418 	return xa_load(&uq_mgr->userq_mgr_xa, qid);
419 }
420 
421 void
amdgpu_userq_ensure_ev_fence(struct amdgpu_userq_mgr * uq_mgr,struct amdgpu_eviction_fence_mgr * evf_mgr)422 amdgpu_userq_ensure_ev_fence(struct amdgpu_userq_mgr *uq_mgr,
423 			     struct amdgpu_eviction_fence_mgr *evf_mgr)
424 {
425 	struct amdgpu_eviction_fence *ev_fence;
426 
427 retry:
428 	/* Flush any pending resume work to create ev_fence */
429 	flush_delayed_work(&uq_mgr->resume_work);
430 
431 	mutex_lock(&uq_mgr->userq_mutex);
432 	spin_lock(&evf_mgr->ev_fence_lock);
433 	ev_fence = evf_mgr->ev_fence;
434 	spin_unlock(&evf_mgr->ev_fence_lock);
435 	if (!ev_fence || dma_fence_is_signaled(&ev_fence->base)) {
436 		mutex_unlock(&uq_mgr->userq_mutex);
437 		/*
438 		 * Looks like there was no pending resume work,
439 		 * add one now to create a valid eviction fence
440 		 */
441 		schedule_delayed_work(&uq_mgr->resume_work, 0);
442 		goto retry;
443 	}
444 }
445 
amdgpu_userq_create_object(struct amdgpu_userq_mgr * uq_mgr,struct amdgpu_userq_obj * userq_obj,int size)446 int amdgpu_userq_create_object(struct amdgpu_userq_mgr *uq_mgr,
447 			       struct amdgpu_userq_obj *userq_obj,
448 			       int size)
449 {
450 	struct amdgpu_device *adev = uq_mgr->adev;
451 	struct amdgpu_bo_param bp;
452 	int r;
453 
454 	memset(&bp, 0, sizeof(bp));
455 	bp.byte_align = PAGE_SIZE;
456 	bp.domain = AMDGPU_GEM_DOMAIN_GTT;
457 	bp.flags = AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS |
458 		   AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
459 	bp.type = ttm_bo_type_kernel;
460 	bp.size = size;
461 	bp.resv = NULL;
462 	bp.bo_ptr_size = sizeof(struct amdgpu_bo);
463 
464 	r = amdgpu_bo_create(adev, &bp, &userq_obj->obj);
465 	if (r) {
466 		drm_file_err(uq_mgr->file, "Failed to allocate BO for userqueue (%d)", r);
467 		return r;
468 	}
469 
470 	r = amdgpu_bo_reserve(userq_obj->obj, true);
471 	if (r) {
472 		drm_file_err(uq_mgr->file, "Failed to reserve BO to map (%d)", r);
473 		goto free_obj;
474 	}
475 
476 	r = amdgpu_ttm_alloc_gart(&(userq_obj->obj)->tbo);
477 	if (r) {
478 		drm_file_err(uq_mgr->file, "Failed to alloc GART for userqueue object (%d)", r);
479 		goto unresv;
480 	}
481 
482 	r = amdgpu_bo_kmap(userq_obj->obj, &userq_obj->cpu_ptr);
483 	if (r) {
484 		drm_file_err(uq_mgr->file, "Failed to map BO for userqueue (%d)", r);
485 		goto unresv;
486 	}
487 
488 	userq_obj->gpu_addr = amdgpu_bo_gpu_offset(userq_obj->obj);
489 	amdgpu_bo_unreserve(userq_obj->obj);
490 	memset(userq_obj->cpu_ptr, 0, size);
491 	return 0;
492 
493 unresv:
494 	amdgpu_bo_unreserve(userq_obj->obj);
495 
496 free_obj:
497 	amdgpu_bo_unref(&userq_obj->obj);
498 	return r;
499 }
500 
amdgpu_userq_destroy_object(struct amdgpu_userq_mgr * uq_mgr,struct amdgpu_userq_obj * userq_obj)501 void amdgpu_userq_destroy_object(struct amdgpu_userq_mgr *uq_mgr,
502 				 struct amdgpu_userq_obj *userq_obj)
503 {
504 	amdgpu_bo_kunmap(userq_obj->obj);
505 	amdgpu_bo_unref(&userq_obj->obj);
506 }
507 
508 uint64_t
amdgpu_userq_get_doorbell_index(struct amdgpu_userq_mgr * uq_mgr,struct amdgpu_db_info * db_info,struct drm_file * filp)509 amdgpu_userq_get_doorbell_index(struct amdgpu_userq_mgr *uq_mgr,
510 				struct amdgpu_db_info *db_info,
511 				struct drm_file *filp)
512 {
513 	uint64_t index;
514 	struct drm_gem_object *gobj;
515 	struct amdgpu_userq_obj *db_obj = db_info->db_obj;
516 	int r, db_size;
517 
518 	gobj = drm_gem_object_lookup(filp, db_info->doorbell_handle);
519 	if (gobj == NULL) {
520 		drm_file_err(uq_mgr->file, "Can't find GEM object for doorbell\n");
521 		return -EINVAL;
522 	}
523 
524 	db_obj->obj = amdgpu_bo_ref(gem_to_amdgpu_bo(gobj));
525 	drm_gem_object_put(gobj);
526 
527 	r = amdgpu_bo_reserve(db_obj->obj, true);
528 	if (r) {
529 		drm_file_err(uq_mgr->file, "[Usermode queues] Failed to pin doorbell object\n");
530 		goto unref_bo;
531 	}
532 
533 	/* Pin the BO before generating the index, unpin in queue destroy */
534 	r = amdgpu_bo_pin(db_obj->obj, AMDGPU_GEM_DOMAIN_DOORBELL);
535 	if (r) {
536 		drm_file_err(uq_mgr->file, "[Usermode queues] Failed to pin doorbell object\n");
537 		goto unresv_bo;
538 	}
539 
540 	switch (db_info->queue_type) {
541 	case AMDGPU_HW_IP_GFX:
542 	case AMDGPU_HW_IP_COMPUTE:
543 	case AMDGPU_HW_IP_DMA:
544 		db_size = sizeof(u64);
545 		break;
546 	default:
547 		drm_file_err(uq_mgr->file, "[Usermode queues] IP %d not support\n",
548 			     db_info->queue_type);
549 		r = -EINVAL;
550 		goto unpin_bo;
551 	}
552 
553 	index = amdgpu_doorbell_index_on_bar(uq_mgr->adev, db_obj->obj,
554 					     db_info->doorbell_offset, db_size);
555 	drm_dbg_driver(adev_to_drm(uq_mgr->adev),
556 		       "[Usermode queues] doorbell index=%lld\n", index);
557 	amdgpu_bo_unreserve(db_obj->obj);
558 	return index;
559 
560 unpin_bo:
561 	amdgpu_bo_unpin(db_obj->obj);
562 unresv_bo:
563 	amdgpu_bo_unreserve(db_obj->obj);
564 unref_bo:
565 	amdgpu_bo_unref(&db_obj->obj);
566 	return r;
567 }
568 
569 static int
amdgpu_userq_destroy(struct drm_file * filp,int queue_id)570 amdgpu_userq_destroy(struct drm_file *filp, int queue_id)
571 {
572 	struct amdgpu_fpriv *fpriv = filp->driver_priv;
573 	struct amdgpu_userq_mgr *uq_mgr = &fpriv->userq_mgr;
574 	struct amdgpu_device *adev = uq_mgr->adev;
575 	struct amdgpu_usermode_queue *queue;
576 	int r = 0;
577 
578 	cancel_delayed_work_sync(&uq_mgr->resume_work);
579 	mutex_lock(&uq_mgr->userq_mutex);
580 
581 	queue = amdgpu_userq_find(uq_mgr, queue_id);
582 	if (!queue) {
583 		drm_dbg_driver(adev_to_drm(uq_mgr->adev), "Invalid queue id to destroy\n");
584 		mutex_unlock(&uq_mgr->userq_mutex);
585 		return -EINVAL;
586 	}
587 	amdgpu_userq_wait_for_last_fence(uq_mgr, queue);
588 	r = amdgpu_bo_reserve(queue->db_obj.obj, true);
589 	if (!r) {
590 		amdgpu_bo_unpin(queue->db_obj.obj);
591 		amdgpu_bo_unreserve(queue->db_obj.obj);
592 	}
593 	amdgpu_bo_unref(&queue->db_obj.obj);
594 	atomic_dec(&uq_mgr->userq_count[queue->queue_type]);
595 #if defined(CONFIG_DEBUG_FS)
596 	debugfs_remove_recursive(queue->debugfs_queue);
597 #endif
598 	amdgpu_userq_detect_and_reset_queues(uq_mgr);
599 	r = amdgpu_userq_unmap_helper(uq_mgr, queue);
600 	/*TODO: It requires a reset for userq hw unmap error*/
601 	if (unlikely(r != AMDGPU_USERQ_STATE_UNMAPPED)) {
602 		drm_warn(adev_to_drm(uq_mgr->adev), "trying to destroy a HW mapping userq\n");
603 		queue->state = AMDGPU_USERQ_STATE_HUNG;
604 	}
605 	amdgpu_userq_cleanup(uq_mgr, queue, queue_id);
606 	mutex_unlock(&uq_mgr->userq_mutex);
607 
608 	pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
609 
610 	return r;
611 }
612 
amdgpu_userq_priority_permit(struct drm_file * filp,int priority)613 static int amdgpu_userq_priority_permit(struct drm_file *filp,
614 					int priority)
615 {
616 	if (priority < AMDGPU_USERQ_CREATE_FLAGS_QUEUE_PRIORITY_HIGH)
617 		return 0;
618 
619 	if (capable(CAP_SYS_NICE))
620 		return 0;
621 
622 	if (drm_is_current_master(filp))
623 		return 0;
624 
625 	return -EACCES;
626 }
627 
628 #if defined(CONFIG_DEBUG_FS)
amdgpu_mqd_info_read(struct seq_file * m,void * unused)629 static int amdgpu_mqd_info_read(struct seq_file *m, void *unused)
630 {
631 	struct amdgpu_usermode_queue *queue = m->private;
632 	struct amdgpu_bo *bo;
633 	int r;
634 
635 	if (!queue || !queue->mqd.obj)
636 		return -EINVAL;
637 
638 	bo = amdgpu_bo_ref(queue->mqd.obj);
639 	r = amdgpu_bo_reserve(bo, true);
640 	if (r) {
641 		amdgpu_bo_unref(&bo);
642 		return -EINVAL;
643 	}
644 
645 	seq_printf(m, "queue_type: %d\n", queue->queue_type);
646 	seq_printf(m, "mqd_gpu_address: 0x%llx\n", amdgpu_bo_gpu_offset(queue->mqd.obj));
647 
648 	amdgpu_bo_unreserve(bo);
649 	amdgpu_bo_unref(&bo);
650 
651 	return 0;
652 }
653 
amdgpu_mqd_info_open(struct inode * inode,struct file * file)654 static int amdgpu_mqd_info_open(struct inode *inode, struct file *file)
655 {
656 	return single_open(file, amdgpu_mqd_info_read, inode->i_private);
657 }
658 
659 static const struct file_operations amdgpu_mqd_info_fops = {
660 	.owner = THIS_MODULE,
661 	.open = amdgpu_mqd_info_open,
662 	.read = seq_read,
663 	.llseek = seq_lseek,
664 	.release = single_release,
665 };
666 #endif
667 
668 static int
amdgpu_userq_create(struct drm_file * filp,union drm_amdgpu_userq * args)669 amdgpu_userq_create(struct drm_file *filp, union drm_amdgpu_userq *args)
670 {
671 	struct amdgpu_fpriv *fpriv = filp->driver_priv;
672 	struct amdgpu_userq_mgr *uq_mgr = &fpriv->userq_mgr;
673 	struct amdgpu_device *adev = uq_mgr->adev;
674 	const struct amdgpu_userq_funcs *uq_funcs;
675 	struct amdgpu_usermode_queue *queue;
676 	struct amdgpu_db_info db_info;
677 	char *queue_name;
678 	bool skip_map_queue;
679 	u32 qid;
680 	uint64_t index;
681 	int r = 0;
682 	int priority =
683 		(args->in.flags & AMDGPU_USERQ_CREATE_FLAGS_QUEUE_PRIORITY_MASK) >>
684 		AMDGPU_USERQ_CREATE_FLAGS_QUEUE_PRIORITY_SHIFT;
685 
686 	r = amdgpu_userq_priority_permit(filp, priority);
687 	if (r)
688 		return r;
689 
690 	r = pm_runtime_get_sync(adev_to_drm(adev)->dev);
691 	if (r < 0) {
692 		drm_file_err(uq_mgr->file, "pm_runtime_get_sync() failed for userqueue create\n");
693 		pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
694 		return r;
695 	}
696 
697 	/*
698 	 * There could be a situation that we are creating a new queue while
699 	 * the other queues under this UQ_mgr are suspended. So if there is any
700 	 * resume work pending, wait for it to get done.
701 	 *
702 	 * This will also make sure we have a valid eviction fence ready to be used.
703 	 */
704 	amdgpu_userq_ensure_ev_fence(&fpriv->userq_mgr, &fpriv->evf_mgr);
705 
706 	uq_funcs = adev->userq_funcs[args->in.ip_type];
707 	if (!uq_funcs) {
708 		drm_file_err(uq_mgr->file, "Usermode queue is not supported for this IP (%u)\n",
709 			     args->in.ip_type);
710 		r = -EINVAL;
711 		goto unlock;
712 	}
713 
714 	queue = kzalloc(sizeof(struct amdgpu_usermode_queue), GFP_KERNEL);
715 	if (!queue) {
716 		drm_file_err(uq_mgr->file, "Failed to allocate memory for queue\n");
717 		r = -ENOMEM;
718 		goto unlock;
719 	}
720 
721 	INIT_LIST_HEAD(&queue->userq_va_list);
722 	queue->doorbell_handle = args->in.doorbell_handle;
723 	queue->queue_type = args->in.ip_type;
724 	queue->vm = &fpriv->vm;
725 	queue->priority = priority;
726 
727 	db_info.queue_type = queue->queue_type;
728 	db_info.doorbell_handle = queue->doorbell_handle;
729 	db_info.db_obj = &queue->db_obj;
730 	db_info.doorbell_offset = args->in.doorbell_offset;
731 
732 	/* Validate the userq virtual address.*/
733 	if (amdgpu_userq_input_va_validate(queue, args->in.queue_va, args->in.queue_size) ||
734 	    amdgpu_userq_input_va_validate(queue, args->in.rptr_va, AMDGPU_GPU_PAGE_SIZE) ||
735 	    amdgpu_userq_input_va_validate(queue, args->in.wptr_va, AMDGPU_GPU_PAGE_SIZE)) {
736 		r = -EINVAL;
737 		kfree(queue);
738 		goto unlock;
739 	}
740 
741 	/* Convert relative doorbell offset into absolute doorbell index */
742 	index = amdgpu_userq_get_doorbell_index(uq_mgr, &db_info, filp);
743 	if (index == (uint64_t)-EINVAL) {
744 		drm_file_err(uq_mgr->file, "Failed to get doorbell for queue\n");
745 		kfree(queue);
746 		r = -EINVAL;
747 		goto unlock;
748 	}
749 
750 	queue->doorbell_index = index;
751 	xa_init_flags(&queue->fence_drv_xa, XA_FLAGS_ALLOC);
752 	r = amdgpu_userq_fence_driver_alloc(adev, queue);
753 	if (r) {
754 		drm_file_err(uq_mgr->file, "Failed to alloc fence driver\n");
755 		goto unlock;
756 	}
757 
758 	r = uq_funcs->mqd_create(uq_mgr, &args->in, queue);
759 	if (r) {
760 		drm_file_err(uq_mgr->file, "Failed to create Queue\n");
761 		amdgpu_userq_fence_driver_free(queue);
762 		kfree(queue);
763 		goto unlock;
764 	}
765 
766 	/* Wait for mode-1 reset to complete */
767 	down_read(&adev->reset_domain->sem);
768 	r = xa_err(xa_store_irq(&adev->userq_doorbell_xa, index, queue, GFP_KERNEL));
769 	if (r) {
770 		kfree(queue);
771 		up_read(&adev->reset_domain->sem);
772 		goto unlock;
773 	}
774 
775 	r = xa_alloc(&uq_mgr->userq_mgr_xa, &qid, queue, XA_LIMIT(1, AMDGPU_MAX_USERQ_COUNT), GFP_KERNEL);
776 	if (r) {
777 		drm_file_err(uq_mgr->file, "Failed to allocate a queue id\n");
778 		amdgpu_userq_fence_driver_free(queue);
779 		uq_funcs->mqd_destroy(uq_mgr, queue);
780 		kfree(queue);
781 		r = -ENOMEM;
782 		up_read(&adev->reset_domain->sem);
783 		goto unlock;
784 	}
785 	up_read(&adev->reset_domain->sem);
786 	queue->userq_mgr = uq_mgr;
787 
788 	/* don't map the queue if scheduling is halted */
789 	if (adev->userq_halt_for_enforce_isolation &&
790 	    ((queue->queue_type == AMDGPU_HW_IP_GFX) ||
791 	     (queue->queue_type == AMDGPU_HW_IP_COMPUTE)))
792 		skip_map_queue = true;
793 	else
794 		skip_map_queue = false;
795 	if (!skip_map_queue) {
796 		r = amdgpu_userq_map_helper(uq_mgr, queue);
797 		if (r) {
798 			drm_file_err(uq_mgr->file, "Failed to map Queue\n");
799 			xa_erase(&uq_mgr->userq_mgr_xa, qid);
800 			amdgpu_userq_fence_driver_free(queue);
801 			uq_funcs->mqd_destroy(uq_mgr, queue);
802 			kfree(queue);
803 			goto unlock;
804 		}
805 	}
806 
807 	queue_name = kasprintf(GFP_KERNEL, "queue-%d", qid);
808 	if (!queue_name) {
809 		r = -ENOMEM;
810 		goto unlock;
811 	}
812 
813 #if defined(CONFIG_DEBUG_FS)
814 	/* Queue dentry per client to hold MQD information   */
815 	queue->debugfs_queue = debugfs_create_dir(queue_name, filp->debugfs_client);
816 	debugfs_create_file("mqd_info", 0444, queue->debugfs_queue, queue, &amdgpu_mqd_info_fops);
817 #endif
818 	kfree(queue_name);
819 
820 	args->out.queue_id = qid;
821 	atomic_inc(&uq_mgr->userq_count[queue->queue_type]);
822 
823 unlock:
824 	mutex_unlock(&uq_mgr->userq_mutex);
825 
826 	return r;
827 }
828 
amdgpu_userq_input_args_validate(struct drm_device * dev,union drm_amdgpu_userq * args,struct drm_file * filp)829 static int amdgpu_userq_input_args_validate(struct drm_device *dev,
830 					union drm_amdgpu_userq *args,
831 					struct drm_file *filp)
832 {
833 	struct amdgpu_device *adev = drm_to_adev(dev);
834 
835 	switch (args->in.op) {
836 	case AMDGPU_USERQ_OP_CREATE:
837 		if (args->in.flags & ~(AMDGPU_USERQ_CREATE_FLAGS_QUEUE_PRIORITY_MASK |
838 				       AMDGPU_USERQ_CREATE_FLAGS_QUEUE_SECURE))
839 			return -EINVAL;
840 		/* Usermode queues are only supported for GFX IP as of now */
841 		if (args->in.ip_type != AMDGPU_HW_IP_GFX &&
842 		    args->in.ip_type != AMDGPU_HW_IP_DMA &&
843 		    args->in.ip_type != AMDGPU_HW_IP_COMPUTE) {
844 			drm_file_err(filp, "Usermode queue doesn't support IP type %u\n",
845 				     args->in.ip_type);
846 			return -EINVAL;
847 		}
848 
849 		if ((args->in.flags & AMDGPU_USERQ_CREATE_FLAGS_QUEUE_SECURE) &&
850 		    (args->in.ip_type != AMDGPU_HW_IP_GFX) &&
851 		    (args->in.ip_type != AMDGPU_HW_IP_COMPUTE) &&
852 		    !amdgpu_is_tmz(adev)) {
853 			drm_file_err(filp, "Secure only supported on GFX/Compute queues\n");
854 			return -EINVAL;
855 		}
856 
857 		if (args->in.queue_va == AMDGPU_BO_INVALID_OFFSET ||
858 		    args->in.queue_va == 0 ||
859 		    args->in.queue_size == 0) {
860 			drm_file_err(filp, "invalidate userq queue va or size\n");
861 			return -EINVAL;
862 		}
863 		if (!args->in.wptr_va || !args->in.rptr_va) {
864 			drm_file_err(filp, "invalidate userq queue rptr or wptr\n");
865 			return -EINVAL;
866 		}
867 		break;
868 	case AMDGPU_USERQ_OP_FREE:
869 		if (args->in.ip_type ||
870 		    args->in.doorbell_handle ||
871 		    args->in.doorbell_offset ||
872 		    args->in.flags ||
873 		    args->in.queue_va ||
874 		    args->in.queue_size ||
875 		    args->in.rptr_va ||
876 		    args->in.wptr_va ||
877 		    args->in.mqd ||
878 		    args->in.mqd_size)
879 			return -EINVAL;
880 		break;
881 	default:
882 		return -EINVAL;
883 	}
884 
885 	return 0;
886 }
887 
amdgpu_userq_enabled(struct drm_device * dev)888 bool amdgpu_userq_enabled(struct drm_device *dev)
889 {
890 	struct amdgpu_device *adev = drm_to_adev(dev);
891 	int i;
892 
893 	for (i = 0; i < AMDGPU_HW_IP_NUM; i++) {
894 		if (adev->userq_funcs[i])
895 			return true;
896 	}
897 
898 	return false;
899 }
900 
amdgpu_userq_ioctl(struct drm_device * dev,void * data,struct drm_file * filp)901 int amdgpu_userq_ioctl(struct drm_device *dev, void *data,
902 		       struct drm_file *filp)
903 {
904 	union drm_amdgpu_userq *args = data;
905 	int r;
906 
907 	if (!amdgpu_userq_enabled(dev))
908 		return -ENOTSUPP;
909 
910 	if (amdgpu_userq_input_args_validate(dev, args, filp) < 0)
911 		return -EINVAL;
912 
913 	switch (args->in.op) {
914 	case AMDGPU_USERQ_OP_CREATE:
915 		r = amdgpu_userq_create(filp, args);
916 		if (r)
917 			drm_file_err(filp, "Failed to create usermode queue\n");
918 		break;
919 
920 	case AMDGPU_USERQ_OP_FREE:
921 		r = amdgpu_userq_destroy(filp, args->in.queue_id);
922 		if (r)
923 			drm_file_err(filp, "Failed to destroy usermode queue\n");
924 		break;
925 
926 	default:
927 		drm_dbg_driver(dev, "Invalid user queue op specified: %d\n", args->in.op);
928 		return -EINVAL;
929 	}
930 
931 	return r;
932 }
933 
934 static int
amdgpu_userq_restore_all(struct amdgpu_userq_mgr * uq_mgr)935 amdgpu_userq_restore_all(struct amdgpu_userq_mgr *uq_mgr)
936 {
937 	struct amdgpu_usermode_queue *queue;
938 	unsigned long queue_id;
939 	int ret = 0, r;
940 
941 	/* Resume all the queues for this process */
942 	xa_for_each(&uq_mgr->userq_mgr_xa, queue_id, queue) {
943 
944 		if (!amdgpu_userq_buffer_vas_mapped(queue)) {
945 			drm_file_err(uq_mgr->file,
946 				     "trying restore queue without va mapping\n");
947 			queue->state = AMDGPU_USERQ_STATE_INVALID_VA;
948 			continue;
949 		}
950 
951 		r = amdgpu_userq_restore_helper(uq_mgr, queue);
952 		if (r)
953 			ret = r;
954 	}
955 
956 	if (ret)
957 		drm_file_err(uq_mgr->file, "Failed to map all the queues\n");
958 	return ret;
959 }
960 
amdgpu_userq_validate_vm(void * param,struct amdgpu_bo * bo)961 static int amdgpu_userq_validate_vm(void *param, struct amdgpu_bo *bo)
962 {
963 	struct ttm_operation_ctx ctx = { false, false };
964 
965 	amdgpu_bo_placement_from_domain(bo, bo->allowed_domains);
966 	return ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
967 }
968 
969 /* Handle all BOs on the invalidated list, validate them and update the PTs */
970 static int
amdgpu_userq_bo_validate(struct amdgpu_device * adev,struct drm_exec * exec,struct amdgpu_vm * vm)971 amdgpu_userq_bo_validate(struct amdgpu_device *adev, struct drm_exec *exec,
972 			 struct amdgpu_vm *vm)
973 {
974 	struct ttm_operation_ctx ctx = { false, false };
975 	struct amdgpu_bo_va *bo_va;
976 	struct amdgpu_bo *bo;
977 	int ret;
978 
979 	spin_lock(&vm->status_lock);
980 	while (!list_empty(&vm->invalidated)) {
981 		bo_va = list_first_entry(&vm->invalidated,
982 					 struct amdgpu_bo_va,
983 					 base.vm_status);
984 		spin_unlock(&vm->status_lock);
985 
986 		bo = bo_va->base.bo;
987 		ret = drm_exec_prepare_obj(exec, &bo->tbo.base, 2);
988 		if (unlikely(ret))
989 			return ret;
990 
991 		amdgpu_bo_placement_from_domain(bo, bo->allowed_domains);
992 		ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
993 		if (ret)
994 			return ret;
995 
996 		/* This moves the bo_va to the done list */
997 		ret = amdgpu_vm_bo_update(adev, bo_va, false);
998 		if (ret)
999 			return ret;
1000 
1001 		spin_lock(&vm->status_lock);
1002 	}
1003 	spin_unlock(&vm->status_lock);
1004 
1005 	return 0;
1006 }
1007 
1008 /* Make sure the whole VM is ready to be used */
1009 static int
amdgpu_userq_vm_validate(struct amdgpu_userq_mgr * uq_mgr)1010 amdgpu_userq_vm_validate(struct amdgpu_userq_mgr *uq_mgr)
1011 {
1012 	struct amdgpu_fpriv *fpriv = uq_mgr_to_fpriv(uq_mgr);
1013 	bool invalidated = false, new_addition = false;
1014 	struct ttm_operation_ctx ctx = { true, false };
1015 	struct amdgpu_device *adev = uq_mgr->adev;
1016 	struct amdgpu_hmm_range *range;
1017 	struct amdgpu_vm *vm = &fpriv->vm;
1018 	unsigned long key, tmp_key;
1019 	struct amdgpu_bo_va *bo_va;
1020 	struct amdgpu_bo *bo;
1021 	struct drm_exec exec;
1022 	struct xarray xa;
1023 	int ret;
1024 
1025 	xa_init(&xa);
1026 
1027 retry_lock:
1028 	drm_exec_init(&exec, DRM_EXEC_IGNORE_DUPLICATES, 0);
1029 	drm_exec_until_all_locked(&exec) {
1030 		ret = amdgpu_vm_lock_pd(vm, &exec, 1);
1031 		drm_exec_retry_on_contention(&exec);
1032 		if (unlikely(ret))
1033 			goto unlock_all;
1034 
1035 		ret = amdgpu_vm_lock_done_list(vm, &exec, 1);
1036 		drm_exec_retry_on_contention(&exec);
1037 		if (unlikely(ret))
1038 			goto unlock_all;
1039 
1040 		/* This validates PDs, PTs and per VM BOs */
1041 		ret = amdgpu_vm_validate(adev, vm, NULL,
1042 					 amdgpu_userq_validate_vm,
1043 					 NULL);
1044 		if (unlikely(ret))
1045 			goto unlock_all;
1046 
1047 		/* This locks and validates the remaining evicted BOs */
1048 		ret = amdgpu_userq_bo_validate(adev, &exec, vm);
1049 		drm_exec_retry_on_contention(&exec);
1050 		if (unlikely(ret))
1051 			goto unlock_all;
1052 	}
1053 
1054 	if (invalidated) {
1055 		xa_for_each(&xa, tmp_key, range) {
1056 			bo = range->bo;
1057 			amdgpu_bo_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_CPU);
1058 			ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
1059 			if (ret)
1060 				goto unlock_all;
1061 
1062 			amdgpu_ttm_tt_set_user_pages(bo->tbo.ttm, range);
1063 
1064 			amdgpu_bo_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_GTT);
1065 			ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
1066 			if (ret)
1067 				goto unlock_all;
1068 		}
1069 		invalidated = false;
1070 	}
1071 
1072 	ret = amdgpu_vm_handle_moved(adev, vm, NULL);
1073 	if (ret)
1074 		goto unlock_all;
1075 
1076 	key = 0;
1077 	/* Validate User Ptr BOs */
1078 	list_for_each_entry(bo_va, &vm->done, base.vm_status) {
1079 		bo = bo_va->base.bo;
1080 		if (!bo)
1081 			continue;
1082 
1083 		if (!amdgpu_ttm_tt_is_userptr(bo->tbo.ttm))
1084 			continue;
1085 
1086 		range = xa_load(&xa, key);
1087 		if (range && range->bo != bo) {
1088 			xa_erase(&xa, key);
1089 			amdgpu_hmm_range_free(range);
1090 			range = NULL;
1091 		}
1092 
1093 		if (!range) {
1094 			range = amdgpu_hmm_range_alloc(bo);
1095 			if (!range) {
1096 				ret = -ENOMEM;
1097 				goto unlock_all;
1098 			}
1099 
1100 			xa_store(&xa, key, range, GFP_KERNEL);
1101 			new_addition = true;
1102 		}
1103 		key++;
1104 	}
1105 
1106 	if (new_addition) {
1107 		drm_exec_fini(&exec);
1108 		xa_for_each(&xa, tmp_key, range) {
1109 			if (!range)
1110 				continue;
1111 			bo = range->bo;
1112 			ret = amdgpu_ttm_tt_get_user_pages(bo, range);
1113 			if (ret)
1114 				goto unlock_all;
1115 		}
1116 
1117 		invalidated = true;
1118 		new_addition = false;
1119 		goto retry_lock;
1120 	}
1121 
1122 	ret = amdgpu_vm_update_pdes(adev, vm, false);
1123 	if (ret)
1124 		goto unlock_all;
1125 
1126 	/*
1127 	 * We need to wait for all VM updates to finish before restarting the
1128 	 * queues. Using the done list like that is now ok since everything is
1129 	 * locked in place.
1130 	 */
1131 	list_for_each_entry(bo_va, &vm->done, base.vm_status)
1132 		dma_fence_wait(bo_va->last_pt_update, false);
1133 	dma_fence_wait(vm->last_update, false);
1134 
1135 	ret = amdgpu_eviction_fence_replace_fence(&fpriv->evf_mgr, &exec);
1136 	if (ret)
1137 		drm_file_err(uq_mgr->file, "Failed to replace eviction fence\n");
1138 
1139 unlock_all:
1140 	drm_exec_fini(&exec);
1141 	xa_for_each(&xa, tmp_key, range) {
1142 		if (!range)
1143 			continue;
1144 		bo = range->bo;
1145 		amdgpu_hmm_range_free(range);
1146 	}
1147 	xa_destroy(&xa);
1148 	return ret;
1149 }
1150 
amdgpu_userq_restore_worker(struct work_struct * work)1151 static void amdgpu_userq_restore_worker(struct work_struct *work)
1152 {
1153 	struct amdgpu_userq_mgr *uq_mgr = work_to_uq_mgr(work, resume_work.work);
1154 	struct amdgpu_fpriv *fpriv = uq_mgr_to_fpriv(uq_mgr);
1155 	int ret;
1156 
1157 	flush_delayed_work(&fpriv->evf_mgr.suspend_work);
1158 
1159 	mutex_lock(&uq_mgr->userq_mutex);
1160 
1161 	ret = amdgpu_userq_vm_validate(uq_mgr);
1162 	if (ret) {
1163 		drm_file_err(uq_mgr->file, "Failed to validate BOs to restore\n");
1164 		goto unlock;
1165 	}
1166 
1167 	ret = amdgpu_userq_restore_all(uq_mgr);
1168 	if (ret) {
1169 		drm_file_err(uq_mgr->file, "Failed to restore all queues\n");
1170 		goto unlock;
1171 	}
1172 
1173 unlock:
1174 	mutex_unlock(&uq_mgr->userq_mutex);
1175 }
1176 
1177 static int
amdgpu_userq_evict_all(struct amdgpu_userq_mgr * uq_mgr)1178 amdgpu_userq_evict_all(struct amdgpu_userq_mgr *uq_mgr)
1179 {
1180 	struct amdgpu_usermode_queue *queue;
1181 	unsigned long queue_id;
1182 	int ret = 0, r;
1183 
1184 	amdgpu_userq_detect_and_reset_queues(uq_mgr);
1185 	/* Try to unmap all the queues in this process ctx */
1186 	xa_for_each(&uq_mgr->userq_mgr_xa, queue_id, queue) {
1187 		r = amdgpu_userq_preempt_helper(uq_mgr, queue);
1188 		if (r)
1189 			ret = r;
1190 	}
1191 
1192 	if (ret)
1193 		drm_file_err(uq_mgr->file, "Couldn't unmap all the queues\n");
1194 	return ret;
1195 }
1196 
amdgpu_userq_reset_work(struct work_struct * work)1197 void amdgpu_userq_reset_work(struct work_struct *work)
1198 {
1199 	struct amdgpu_device *adev = container_of(work, struct amdgpu_device,
1200 						  userq_reset_work);
1201 	struct amdgpu_reset_context reset_context;
1202 
1203 	memset(&reset_context, 0, sizeof(reset_context));
1204 
1205 	reset_context.method = AMD_RESET_METHOD_NONE;
1206 	reset_context.reset_req_dev = adev;
1207 	reset_context.src = AMDGPU_RESET_SRC_USERQ;
1208 	set_bit(AMDGPU_NEED_FULL_RESET, &reset_context.flags);
1209 	/*set_bit(AMDGPU_SKIP_COREDUMP, &reset_context.flags);*/
1210 
1211 	amdgpu_device_gpu_recover(adev, NULL, &reset_context);
1212 }
1213 
1214 static int
amdgpu_userq_wait_for_signal(struct amdgpu_userq_mgr * uq_mgr)1215 amdgpu_userq_wait_for_signal(struct amdgpu_userq_mgr *uq_mgr)
1216 {
1217 	struct amdgpu_usermode_queue *queue;
1218 	unsigned long queue_id;
1219 	int ret;
1220 
1221 	xa_for_each(&uq_mgr->userq_mgr_xa, queue_id, queue) {
1222 		struct dma_fence *f = queue->last_fence;
1223 
1224 		if (!f || dma_fence_is_signaled(f))
1225 			continue;
1226 		ret = dma_fence_wait_timeout(f, true, msecs_to_jiffies(100));
1227 		if (ret <= 0) {
1228 			drm_file_err(uq_mgr->file, "Timed out waiting for fence=%llu:%llu\n",
1229 				     f->context, f->seqno);
1230 			return -ETIMEDOUT;
1231 		}
1232 	}
1233 
1234 	return 0;
1235 }
1236 
1237 void
amdgpu_userq_evict(struct amdgpu_userq_mgr * uq_mgr,struct amdgpu_eviction_fence * ev_fence)1238 amdgpu_userq_evict(struct amdgpu_userq_mgr *uq_mgr,
1239 		   struct amdgpu_eviction_fence *ev_fence)
1240 {
1241 	struct amdgpu_fpriv *fpriv = uq_mgr_to_fpriv(uq_mgr);
1242 	struct amdgpu_eviction_fence_mgr *evf_mgr = &fpriv->evf_mgr;
1243 	struct amdgpu_device *adev = uq_mgr->adev;
1244 	int ret;
1245 
1246 	/* Wait for any pending userqueue fence work to finish */
1247 	ret = amdgpu_userq_wait_for_signal(uq_mgr);
1248 	if (ret)
1249 		dev_err(adev->dev, "Not evicting userqueue, timeout waiting for work\n");
1250 
1251 	ret = amdgpu_userq_evict_all(uq_mgr);
1252 	if (ret)
1253 		dev_err(adev->dev, "Failed to evict userqueue\n");
1254 
1255 	/* Signal current eviction fence */
1256 	amdgpu_eviction_fence_signal(evf_mgr, ev_fence);
1257 
1258 	if (evf_mgr->fd_closing) {
1259 		cancel_delayed_work_sync(&uq_mgr->resume_work);
1260 		return;
1261 	}
1262 
1263 	/* Schedule a resume work */
1264 	schedule_delayed_work(&uq_mgr->resume_work, 0);
1265 }
1266 
amdgpu_userq_mgr_init(struct amdgpu_userq_mgr * userq_mgr,struct drm_file * file_priv,struct amdgpu_device * adev)1267 int amdgpu_userq_mgr_init(struct amdgpu_userq_mgr *userq_mgr, struct drm_file *file_priv,
1268 			  struct amdgpu_device *adev)
1269 {
1270 	mutex_init(&userq_mgr->userq_mutex);
1271 	xa_init_flags(&userq_mgr->userq_mgr_xa, XA_FLAGS_ALLOC);
1272 	userq_mgr->adev = adev;
1273 	userq_mgr->file = file_priv;
1274 
1275 	INIT_DELAYED_WORK(&userq_mgr->resume_work, amdgpu_userq_restore_worker);
1276 	return 0;
1277 }
1278 
amdgpu_userq_mgr_fini(struct amdgpu_userq_mgr * userq_mgr)1279 void amdgpu_userq_mgr_fini(struct amdgpu_userq_mgr *userq_mgr)
1280 {
1281 	struct amdgpu_usermode_queue *queue;
1282 	unsigned long queue_id;
1283 
1284 	cancel_delayed_work_sync(&userq_mgr->resume_work);
1285 
1286 	mutex_lock(&userq_mgr->userq_mutex);
1287 	amdgpu_userq_detect_and_reset_queues(userq_mgr);
1288 	xa_for_each(&userq_mgr->userq_mgr_xa, queue_id, queue) {
1289 		amdgpu_userq_wait_for_last_fence(userq_mgr, queue);
1290 		amdgpu_userq_unmap_helper(userq_mgr, queue);
1291 		amdgpu_userq_cleanup(userq_mgr, queue, queue_id);
1292 	}
1293 
1294 	xa_destroy(&userq_mgr->userq_mgr_xa);
1295 	mutex_unlock(&userq_mgr->userq_mutex);
1296 	mutex_destroy(&userq_mgr->userq_mutex);
1297 }
1298 
amdgpu_userq_suspend(struct amdgpu_device * adev)1299 int amdgpu_userq_suspend(struct amdgpu_device *adev)
1300 {
1301 	u32 ip_mask = amdgpu_userq_get_supported_ip_mask(adev);
1302 	struct amdgpu_usermode_queue *queue;
1303 	struct amdgpu_userq_mgr *uqm;
1304 	unsigned long queue_id;
1305 	int r;
1306 
1307 	if (!ip_mask)
1308 		return 0;
1309 
1310 	xa_for_each(&adev->userq_doorbell_xa, queue_id, queue) {
1311 		uqm = queue->userq_mgr;
1312 		cancel_delayed_work_sync(&uqm->resume_work);
1313 		guard(mutex)(&uqm->userq_mutex);
1314 		amdgpu_userq_detect_and_reset_queues(uqm);
1315 		if (adev->in_s0ix)
1316 			r = amdgpu_userq_preempt_helper(uqm, queue);
1317 		else
1318 			r = amdgpu_userq_unmap_helper(uqm, queue);
1319 		if (r)
1320 			return r;
1321 	}
1322 	return 0;
1323 }
1324 
amdgpu_userq_resume(struct amdgpu_device * adev)1325 int amdgpu_userq_resume(struct amdgpu_device *adev)
1326 {
1327 	u32 ip_mask = amdgpu_userq_get_supported_ip_mask(adev);
1328 	struct amdgpu_usermode_queue *queue;
1329 	struct amdgpu_userq_mgr *uqm;
1330 	unsigned long queue_id;
1331 	int r;
1332 
1333 	if (!ip_mask)
1334 		return 0;
1335 
1336 	xa_for_each(&adev->userq_doorbell_xa, queue_id, queue) {
1337 		uqm = queue->userq_mgr;
1338 		guard(mutex)(&uqm->userq_mutex);
1339 		if (adev->in_s0ix)
1340 			r = amdgpu_userq_restore_helper(uqm, queue);
1341 		else
1342 			r = amdgpu_userq_map_helper(uqm, queue);
1343 		if (r)
1344 			return r;
1345 	}
1346 
1347 	return 0;
1348 }
1349 
amdgpu_userq_stop_sched_for_enforce_isolation(struct amdgpu_device * adev,u32 idx)1350 int amdgpu_userq_stop_sched_for_enforce_isolation(struct amdgpu_device *adev,
1351 						  u32 idx)
1352 {
1353 	u32 ip_mask = amdgpu_userq_get_supported_ip_mask(adev);
1354 	struct amdgpu_usermode_queue *queue;
1355 	struct amdgpu_userq_mgr *uqm;
1356 	unsigned long queue_id;
1357 	int ret = 0, r;
1358 
1359 	/* only need to stop gfx/compute */
1360 	if (!(ip_mask & ((1 << AMDGPU_HW_IP_GFX) | (1 << AMDGPU_HW_IP_COMPUTE))))
1361 		return 0;
1362 
1363 	if (adev->userq_halt_for_enforce_isolation)
1364 		dev_warn(adev->dev, "userq scheduling already stopped!\n");
1365 	adev->userq_halt_for_enforce_isolation = true;
1366 	xa_for_each(&adev->userq_doorbell_xa, queue_id, queue) {
1367 		uqm = queue->userq_mgr;
1368 		cancel_delayed_work_sync(&uqm->resume_work);
1369 		mutex_lock(&uqm->userq_mutex);
1370 		if (((queue->queue_type == AMDGPU_HW_IP_GFX) ||
1371 		     (queue->queue_type == AMDGPU_HW_IP_COMPUTE)) &&
1372 		    (queue->xcp_id == idx)) {
1373 			amdgpu_userq_detect_and_reset_queues(uqm);
1374 			r = amdgpu_userq_preempt_helper(uqm, queue);
1375 			if (r)
1376 				ret = r;
1377 		}
1378 		mutex_unlock(&uqm->userq_mutex);
1379 	}
1380 
1381 	return ret;
1382 }
1383 
amdgpu_userq_start_sched_for_enforce_isolation(struct amdgpu_device * adev,u32 idx)1384 int amdgpu_userq_start_sched_for_enforce_isolation(struct amdgpu_device *adev,
1385 						   u32 idx)
1386 {
1387 	u32 ip_mask = amdgpu_userq_get_supported_ip_mask(adev);
1388 	struct amdgpu_usermode_queue *queue;
1389 	struct amdgpu_userq_mgr *uqm;
1390 	unsigned long queue_id;
1391 	int ret = 0, r;
1392 
1393 	/* only need to stop gfx/compute */
1394 	if (!(ip_mask & ((1 << AMDGPU_HW_IP_GFX) | (1 << AMDGPU_HW_IP_COMPUTE))))
1395 		return 0;
1396 
1397 	if (!adev->userq_halt_for_enforce_isolation)
1398 		dev_warn(adev->dev, "userq scheduling already started!\n");
1399 	adev->userq_halt_for_enforce_isolation = false;
1400 	xa_for_each(&adev->userq_doorbell_xa, queue_id, queue) {
1401 		uqm = queue->userq_mgr;
1402 		mutex_lock(&uqm->userq_mutex);
1403 			if (((queue->queue_type == AMDGPU_HW_IP_GFX) ||
1404 			     (queue->queue_type == AMDGPU_HW_IP_COMPUTE)) &&
1405 			    (queue->xcp_id == idx)) {
1406 				r = amdgpu_userq_restore_helper(uqm, queue);
1407 				if (r)
1408 					ret = r;
1409 			}
1410 		mutex_unlock(&uqm->userq_mutex);
1411 	}
1412 
1413 	return ret;
1414 }
1415 
amdgpu_userq_gem_va_unmap_validate(struct amdgpu_device * adev,struct amdgpu_bo_va_mapping * mapping,uint64_t saddr)1416 int amdgpu_userq_gem_va_unmap_validate(struct amdgpu_device *adev,
1417 				       struct amdgpu_bo_va_mapping *mapping,
1418 				       uint64_t saddr)
1419 {
1420 	u32 ip_mask = amdgpu_userq_get_supported_ip_mask(adev);
1421 	struct amdgpu_bo_va *bo_va = mapping->bo_va;
1422 	struct dma_resv *resv = bo_va->base.bo->tbo.base.resv;
1423 	int ret = 0;
1424 
1425 	if (!ip_mask)
1426 		return 0;
1427 
1428 	dev_warn_once(adev->dev, "now unmapping a vital queue va:%llx\n", saddr);
1429 	/**
1430 	 * The userq VA mapping reservation should include the eviction fence,
1431 	 * if the eviction fence can't signal successfully during unmapping,
1432 	 * then driver will warn to flag this improper unmap of the userq VA.
1433 	 * Note: The eviction fence may be attached to different BOs, and this
1434 	 * unmap is only for one kind of userq VAs, so at this point suppose
1435 	 * the eviction fence is always unsignaled.
1436 	 */
1437 	if (!dma_resv_test_signaled(resv, DMA_RESV_USAGE_BOOKKEEP)) {
1438 		ret = dma_resv_wait_timeout(resv, DMA_RESV_USAGE_BOOKKEEP, true,
1439 					    MAX_SCHEDULE_TIMEOUT);
1440 		if (ret <= 0)
1441 			return -EBUSY;
1442 	}
1443 
1444 	return 0;
1445 }
1446 
amdgpu_userq_pre_reset(struct amdgpu_device * adev)1447 void amdgpu_userq_pre_reset(struct amdgpu_device *adev)
1448 {
1449 	const struct amdgpu_userq_funcs *userq_funcs;
1450 	struct amdgpu_usermode_queue *queue;
1451 	struct amdgpu_userq_mgr *uqm;
1452 	unsigned long queue_id;
1453 
1454 	xa_for_each(&adev->userq_doorbell_xa, queue_id, queue) {
1455 		uqm = queue->userq_mgr;
1456 		cancel_delayed_work_sync(&uqm->resume_work);
1457 		if (queue->state == AMDGPU_USERQ_STATE_MAPPED) {
1458 			amdgpu_userq_wait_for_last_fence(uqm, queue);
1459 			userq_funcs = adev->userq_funcs[queue->queue_type];
1460 			userq_funcs->unmap(uqm, queue);
1461 			/* just mark all queues as hung at this point.
1462 			 * if unmap succeeds, we could map again
1463 			 * in amdgpu_userq_post_reset() if vram is not lost
1464 			 */
1465 			queue->state = AMDGPU_USERQ_STATE_HUNG;
1466 			amdgpu_userq_fence_driver_force_completion(queue);
1467 		}
1468 	}
1469 }
1470 
amdgpu_userq_post_reset(struct amdgpu_device * adev,bool vram_lost)1471 int amdgpu_userq_post_reset(struct amdgpu_device *adev, bool vram_lost)
1472 {
1473 	/* if any queue state is AMDGPU_USERQ_STATE_UNMAPPED
1474 	 * at this point, we should be able to map it again
1475 	 * and continue if vram is not lost.
1476 	 */
1477 	struct amdgpu_userq_mgr *uqm;
1478 	struct amdgpu_usermode_queue *queue;
1479 	const struct amdgpu_userq_funcs *userq_funcs;
1480 	unsigned long queue_id;
1481 	int r = 0;
1482 
1483 	xa_for_each(&adev->userq_doorbell_xa, queue_id, queue) {
1484 		uqm = queue->userq_mgr;
1485 		if (queue->state == AMDGPU_USERQ_STATE_HUNG && !vram_lost) {
1486 			userq_funcs = adev->userq_funcs[queue->queue_type];
1487 			/* Re-map queue */
1488 			r = userq_funcs->map(uqm, queue);
1489 			if (r) {
1490 				dev_err(adev->dev, "Failed to remap queue %ld\n", queue_id);
1491 				continue;
1492 			}
1493 			queue->state = AMDGPU_USERQ_STATE_MAPPED;
1494 		}
1495 	}
1496 
1497 	return r;
1498 }
1499