xref: /linux/drivers/gpu/drm/amd/amdgpu/mes_userqueue.c (revision 92c4c9fdc838d3b41a996bb700ea64b9e78fc7ea)
1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright 2024 Advanced Micro Devices, Inc.
4  *
5  * Permission is hereby granted, free of charge, to any person obtaining a
6  * copy of this software and associated documentation files (the "Software"),
7  * to deal in the Software without restriction, including without limitation
8  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9  * and/or sell copies of the Software, and to permit persons to whom the
10  * Software is furnished to do so, subject to the following conditions:
11  *
12  * The above copyright notice and this permission notice shall be included in
13  * all copies or substantial portions of the Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
19  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
20  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
21  * OTHER DEALINGS IN THE SOFTWARE.
22  *
23  */
24 #include <drm/drm_drv.h>
25 #include "amdgpu.h"
26 #include "amdgpu_gfx.h"
27 #include "mes_userqueue.h"
28 #include "amdgpu_userq_fence.h"
29 
30 #define AMDGPU_USERQ_PROC_CTX_SZ PAGE_SIZE
31 #define AMDGPU_USERQ_GANG_CTX_SZ PAGE_SIZE
32 
33 static int
mes_userq_map_gtt_bo_to_gart(struct amdgpu_bo * bo)34 mes_userq_map_gtt_bo_to_gart(struct amdgpu_bo *bo)
35 {
36 	int ret;
37 
38 	ret = amdgpu_bo_reserve(bo, true);
39 	if (ret) {
40 		DRM_ERROR("Failed to reserve bo. ret %d\n", ret);
41 		goto err_reserve_bo_failed;
42 	}
43 
44 	ret = amdgpu_ttm_alloc_gart(&bo->tbo);
45 	if (ret) {
46 		DRM_ERROR("Failed to bind bo to GART. ret %d\n", ret);
47 		goto err_map_bo_gart_failed;
48 	}
49 
50 	amdgpu_bo_unreserve(bo);
51 	bo = amdgpu_bo_ref(bo);
52 
53 	return 0;
54 
55 err_map_bo_gart_failed:
56 	amdgpu_bo_unreserve(bo);
57 err_reserve_bo_failed:
58 	return ret;
59 }
60 
61 static int
mes_userq_create_wptr_mapping(struct amdgpu_device * adev,struct amdgpu_userq_mgr * uq_mgr,struct amdgpu_usermode_queue * queue,uint64_t wptr)62 mes_userq_create_wptr_mapping(struct amdgpu_device *adev,
63 			      struct amdgpu_userq_mgr *uq_mgr,
64 			      struct amdgpu_usermode_queue *queue,
65 			      uint64_t wptr)
66 {
67 	struct amdgpu_bo_va_mapping *wptr_mapping;
68 	struct amdgpu_vm *wptr_vm;
69 	struct amdgpu_userq_obj *wptr_obj = &queue->wptr_obj;
70 	int ret;
71 
72 	wptr_vm = queue->vm;
73 	ret = amdgpu_bo_reserve(wptr_vm->root.bo, false);
74 	if (ret)
75 		return ret;
76 
77 	wptr &= AMDGPU_GMC_HOLE_MASK;
78 	wptr_mapping = amdgpu_vm_bo_lookup_mapping(wptr_vm, wptr >> PAGE_SHIFT);
79 	amdgpu_bo_unreserve(wptr_vm->root.bo);
80 	if (!wptr_mapping) {
81 		DRM_ERROR("Failed to lookup wptr bo\n");
82 		return -EINVAL;
83 	}
84 
85 	wptr_obj->obj = wptr_mapping->bo_va->base.bo;
86 	if (wptr_obj->obj->tbo.base.size > PAGE_SIZE) {
87 		DRM_ERROR("Requested GART mapping for wptr bo larger than one page\n");
88 		return -EINVAL;
89 	}
90 
91 	ret = mes_userq_map_gtt_bo_to_gart(wptr_obj->obj);
92 	if (ret) {
93 		DRM_ERROR("Failed to map wptr bo to GART\n");
94 		return ret;
95 	}
96 
97 	ret = amdgpu_bo_reserve(wptr_obj->obj, true);
98 	if (ret) {
99 		DRM_ERROR("Failed to reserve wptr bo\n");
100 		return ret;
101 	}
102 
103 	/* TODO use eviction fence instead of pinning. */
104 	ret = amdgpu_bo_pin(wptr_obj->obj, AMDGPU_GEM_DOMAIN_GTT);
105 	if (ret) {
106 		drm_file_err(uq_mgr->file, "[Usermode queues] Failed to pin wptr bo\n");
107 		goto unresv_bo;
108 	}
109 
110 	queue->wptr_obj.gpu_addr = amdgpu_bo_gpu_offset(wptr_obj->obj);
111 	amdgpu_bo_unreserve(wptr_obj->obj);
112 
113 	return 0;
114 
115 unresv_bo:
116 	amdgpu_bo_unreserve(wptr_obj->obj);
117 	return ret;
118 
119 }
120 
convert_to_mes_priority(int priority)121 static int convert_to_mes_priority(int priority)
122 {
123 	switch (priority) {
124 	case AMDGPU_USERQ_CREATE_FLAGS_QUEUE_PRIORITY_NORMAL_LOW:
125 	default:
126 		return AMDGPU_MES_PRIORITY_LEVEL_NORMAL;
127 	case AMDGPU_USERQ_CREATE_FLAGS_QUEUE_PRIORITY_LOW:
128 		return AMDGPU_MES_PRIORITY_LEVEL_LOW;
129 	case AMDGPU_USERQ_CREATE_FLAGS_QUEUE_PRIORITY_NORMAL_HIGH:
130 		return AMDGPU_MES_PRIORITY_LEVEL_MEDIUM;
131 	case AMDGPU_USERQ_CREATE_FLAGS_QUEUE_PRIORITY_HIGH:
132 		return AMDGPU_MES_PRIORITY_LEVEL_HIGH;
133 	}
134 }
135 
mes_userq_map(struct amdgpu_usermode_queue * queue)136 static int mes_userq_map(struct amdgpu_usermode_queue *queue)
137 {
138 	struct amdgpu_userq_mgr *uq_mgr = queue->userq_mgr;
139 	struct amdgpu_device *adev = uq_mgr->adev;
140 	struct amdgpu_userq_obj *ctx = &queue->fw_obj;
141 	struct amdgpu_mqd_prop *userq_props = queue->userq_prop;
142 	struct mes_add_queue_input queue_input;
143 	int r;
144 
145 	memset(&queue_input, 0x0, sizeof(struct mes_add_queue_input));
146 
147 	queue_input.process_va_start = 0;
148 	queue_input.process_va_end = adev->vm_manager.max_pfn - 1;
149 
150 	/* set process quantum to 10 ms and gang quantum to 1 ms as default */
151 	queue_input.process_quantum = 100000;
152 	queue_input.gang_quantum = 10000;
153 	queue_input.paging = false;
154 
155 	queue_input.process_context_addr = ctx->gpu_addr;
156 	queue_input.gang_context_addr = ctx->gpu_addr + AMDGPU_USERQ_PROC_CTX_SZ;
157 	queue_input.inprocess_gang_priority = AMDGPU_MES_PRIORITY_LEVEL_NORMAL;
158 	queue_input.gang_global_priority_level = convert_to_mes_priority(queue->priority);
159 
160 	queue_input.process_id = queue->vm->pasid;
161 	queue_input.queue_type = queue->queue_type;
162 	queue_input.mqd_addr = queue->mqd.gpu_addr;
163 	queue_input.wptr_addr = userq_props->wptr_gpu_addr;
164 	queue_input.queue_size = userq_props->queue_size >> 2;
165 	queue_input.doorbell_offset = userq_props->doorbell_index;
166 	queue_input.page_table_base_addr = amdgpu_gmc_pd_addr(queue->vm->root.bo);
167 	queue_input.wptr_mc_addr = queue->wptr_obj.gpu_addr;
168 
169 	amdgpu_mes_lock(&adev->mes);
170 	r = adev->mes.funcs->add_hw_queue(&adev->mes, &queue_input);
171 	amdgpu_mes_unlock(&adev->mes);
172 	if (r) {
173 		DRM_ERROR("Failed to map queue in HW, err (%d)\n", r);
174 		return r;
175 	}
176 
177 	DRM_DEBUG_DRIVER("Queue (doorbell:%d) mapped successfully\n", userq_props->doorbell_index);
178 	return 0;
179 }
180 
mes_userq_unmap(struct amdgpu_usermode_queue * queue)181 static int mes_userq_unmap(struct amdgpu_usermode_queue *queue)
182 {
183 	struct amdgpu_userq_mgr *uq_mgr = queue->userq_mgr;
184 	struct amdgpu_device *adev = uq_mgr->adev;
185 	struct mes_remove_queue_input queue_input;
186 	struct amdgpu_userq_obj *ctx = &queue->fw_obj;
187 	int r;
188 
189 	memset(&queue_input, 0x0, sizeof(struct mes_remove_queue_input));
190 	queue_input.doorbell_offset = queue->doorbell_index;
191 	queue_input.gang_context_addr = ctx->gpu_addr + AMDGPU_USERQ_PROC_CTX_SZ;
192 
193 	amdgpu_mes_lock(&adev->mes);
194 	r = adev->mes.funcs->remove_hw_queue(&adev->mes, &queue_input);
195 	amdgpu_mes_unlock(&adev->mes);
196 	if (r)
197 		DRM_ERROR("Failed to unmap queue in HW, err (%d)\n", r);
198 	return r;
199 }
200 
mes_userq_create_ctx_space(struct amdgpu_userq_mgr * uq_mgr,struct amdgpu_usermode_queue * queue,struct drm_amdgpu_userq_in * mqd_user)201 static int mes_userq_create_ctx_space(struct amdgpu_userq_mgr *uq_mgr,
202 				      struct amdgpu_usermode_queue *queue,
203 				      struct drm_amdgpu_userq_in *mqd_user)
204 {
205 	struct amdgpu_userq_obj *ctx = &queue->fw_obj;
206 	int r, size;
207 
208 	/*
209 	 * The FW expects at least one page space allocated for
210 	 * process ctx and gang ctx each. Create an object
211 	 * for the same.
212 	 */
213 	size = AMDGPU_USERQ_PROC_CTX_SZ + AMDGPU_USERQ_GANG_CTX_SZ;
214 	r = amdgpu_userq_create_object(uq_mgr, ctx, size);
215 	if (r) {
216 		DRM_ERROR("Failed to allocate ctx space bo for userqueue, err:%d\n", r);
217 		return r;
218 	}
219 
220 	return 0;
221 }
222 
mes_userq_detect_and_reset(struct amdgpu_device * adev,int queue_type)223 static int mes_userq_detect_and_reset(struct amdgpu_device *adev,
224 				      int queue_type)
225 {
226 	int db_array_size = amdgpu_mes_get_hung_queue_db_array_size(adev);
227 	struct mes_detect_and_reset_queue_input input;
228 	struct amdgpu_usermode_queue *queue;
229 	unsigned int hung_db_num = 0;
230 	unsigned long queue_id;
231 	u32 db_array[8];
232 	bool found_hung_queue = false;
233 	int r, i;
234 
235 	if (db_array_size > 8) {
236 		dev_err(adev->dev, "DB array size (%d vs 8) too small\n",
237 			db_array_size);
238 		return -EINVAL;
239 	}
240 
241 	memset(&input, 0x0, sizeof(struct mes_detect_and_reset_queue_input));
242 
243 	input.queue_type = queue_type;
244 
245 	amdgpu_mes_lock(&adev->mes);
246 	r = amdgpu_mes_detect_and_reset_hung_queues(adev, queue_type, false,
247 						    &hung_db_num, db_array, 0);
248 	amdgpu_mes_unlock(&adev->mes);
249 	if (r) {
250 		dev_err(adev->dev, "Failed to detect and reset queues, err (%d)\n", r);
251 	} else if (hung_db_num) {
252 		xa_for_each(&adev->userq_doorbell_xa, queue_id, queue) {
253 			if (queue->queue_type == queue_type) {
254 				for (i = 0; i < hung_db_num; i++) {
255 					if (queue->doorbell_index == db_array[i]) {
256 						queue->state = AMDGPU_USERQ_STATE_HUNG;
257 						found_hung_queue = true;
258 						atomic_inc(&adev->gpu_reset_counter);
259 						amdgpu_userq_fence_driver_force_completion(queue);
260 						drm_dev_wedged_event(adev_to_drm(adev), DRM_WEDGE_RECOVERY_NONE, NULL);
261 					}
262 				}
263 			}
264 		}
265 	}
266 
267 	if (found_hung_queue) {
268 		/* Resume scheduling after hang recovery */
269 		r = amdgpu_mes_resume(adev);
270 	}
271 
272 	return r;
273 }
274 
mes_userq_mqd_create(struct amdgpu_usermode_queue * queue,struct drm_amdgpu_userq_in * args_in)275 static int mes_userq_mqd_create(struct amdgpu_usermode_queue *queue,
276 				struct drm_amdgpu_userq_in *args_in)
277 {
278 	struct amdgpu_userq_mgr *uq_mgr = queue->userq_mgr;
279 	struct amdgpu_device *adev = uq_mgr->adev;
280 	struct amdgpu_mqd *mqd_hw_default = &adev->mqds[queue->queue_type];
281 	struct drm_amdgpu_userq_in *mqd_user = args_in;
282 	struct amdgpu_mqd_prop *userq_props;
283 	int r;
284 
285 	/* Structure to initialize MQD for userqueue using generic MQD init function */
286 	userq_props = kzalloc_obj(struct amdgpu_mqd_prop);
287 	if (!userq_props) {
288 		DRM_ERROR("Failed to allocate memory for userq_props\n");
289 		return -ENOMEM;
290 	}
291 
292 	r = amdgpu_userq_create_object(uq_mgr, &queue->mqd,
293 			AMDGPU_MQD_SIZE_ALIGN(mqd_hw_default->mqd_size));
294 	if (r) {
295 		DRM_ERROR("Failed to create MQD object for userqueue\n");
296 		goto free_props;
297 	}
298 
299 	/* Initialize the MQD BO with user given values */
300 	userq_props->wptr_gpu_addr = mqd_user->wptr_va;
301 	userq_props->rptr_gpu_addr = mqd_user->rptr_va;
302 	userq_props->queue_size = mqd_user->queue_size;
303 	userq_props->hqd_base_gpu_addr = mqd_user->queue_va;
304 	userq_props->mqd_gpu_addr = queue->mqd.gpu_addr;
305 	userq_props->use_doorbell = true;
306 	userq_props->doorbell_index = queue->doorbell_index;
307 	userq_props->fence_address = queue->fence_drv->gpu_addr;
308 
309 	if (queue->queue_type == AMDGPU_HW_IP_COMPUTE) {
310 		struct drm_amdgpu_userq_mqd_compute_gfx11 *compute_mqd;
311 
312 		if (mqd_user->mqd_size != sizeof(*compute_mqd)) {
313 			DRM_ERROR("Invalid compute IP MQD size\n");
314 			r = -EINVAL;
315 			goto free_mqd;
316 		}
317 
318 		compute_mqd = memdup_user(u64_to_user_ptr(mqd_user->mqd), mqd_user->mqd_size);
319 		if (IS_ERR(compute_mqd)) {
320 			DRM_ERROR("Failed to read user MQD\n");
321 			r = -ENOMEM;
322 			goto free_mqd;
323 		}
324 
325 		r = amdgpu_bo_reserve(queue->vm->root.bo, false);
326 		if (r) {
327 			kfree(compute_mqd);
328 			goto free_mqd;
329 		}
330 		r = amdgpu_userq_input_va_validate(adev, queue, compute_mqd->eop_va,
331 						   2048);
332 		amdgpu_bo_unreserve(queue->vm->root.bo);
333 		if (r) {
334 			kfree(compute_mqd);
335 			goto free_mqd;
336 		}
337 
338 		userq_props->eop_gpu_addr = compute_mqd->eop_va;
339 		userq_props->hqd_pipe_priority = AMDGPU_GFX_PIPE_PRIO_NORMAL;
340 		userq_props->hqd_queue_priority = AMDGPU_GFX_QUEUE_PRIORITY_MINIMUM;
341 		userq_props->hqd_active = false;
342 		userq_props->tmz_queue =
343 			mqd_user->flags & AMDGPU_USERQ_CREATE_FLAGS_QUEUE_SECURE;
344 		kfree(compute_mqd);
345 	} else if (queue->queue_type == AMDGPU_HW_IP_GFX) {
346 		struct drm_amdgpu_userq_mqd_gfx11 *mqd_gfx_v11;
347 		struct amdgpu_gfx_shadow_info shadow_info;
348 
349 		if (adev->gfx.funcs->get_gfx_shadow_info) {
350 			adev->gfx.funcs->get_gfx_shadow_info(adev, &shadow_info, true);
351 		} else {
352 			r = -EINVAL;
353 			goto free_mqd;
354 		}
355 
356 		if (mqd_user->mqd_size != sizeof(*mqd_gfx_v11) || !mqd_user->mqd) {
357 			DRM_ERROR("Invalid GFX MQD\n");
358 			r = -EINVAL;
359 			goto free_mqd;
360 		}
361 
362 		mqd_gfx_v11 = memdup_user(u64_to_user_ptr(mqd_user->mqd), mqd_user->mqd_size);
363 		if (IS_ERR(mqd_gfx_v11)) {
364 			DRM_ERROR("Failed to read user MQD\n");
365 			r = -ENOMEM;
366 			goto free_mqd;
367 		}
368 
369 		userq_props->shadow_addr = mqd_gfx_v11->shadow_va;
370 		userq_props->csa_addr = mqd_gfx_v11->csa_va;
371 		userq_props->tmz_queue =
372 			mqd_user->flags & AMDGPU_USERQ_CREATE_FLAGS_QUEUE_SECURE;
373 
374 		r = amdgpu_bo_reserve(queue->vm->root.bo, false);
375 		if (r) {
376 			kfree(mqd_gfx_v11);
377 			goto free_mqd;
378 		}
379 		r = amdgpu_userq_input_va_validate(adev, queue, mqd_gfx_v11->shadow_va,
380 						   shadow_info.shadow_size);
381 		if (r) {
382 			amdgpu_bo_unreserve(queue->vm->root.bo);
383 			kfree(mqd_gfx_v11);
384 			goto free_mqd;
385 		}
386 
387 		r = amdgpu_userq_input_va_validate(adev, queue, mqd_gfx_v11->csa_va,
388 						   shadow_info.csa_size);
389 		amdgpu_bo_unreserve(queue->vm->root.bo);
390 		if (r) {
391 			kfree(mqd_gfx_v11);
392 			goto free_mqd;
393 		}
394 
395 		kfree(mqd_gfx_v11);
396 	} else if (queue->queue_type == AMDGPU_HW_IP_DMA) {
397 		struct drm_amdgpu_userq_mqd_sdma_gfx11 *mqd_sdma_v11;
398 
399 		if (mqd_user->mqd_size != sizeof(*mqd_sdma_v11) || !mqd_user->mqd) {
400 			DRM_ERROR("Invalid SDMA MQD\n");
401 			r = -EINVAL;
402 			goto free_mqd;
403 		}
404 
405 		mqd_sdma_v11 = memdup_user(u64_to_user_ptr(mqd_user->mqd), mqd_user->mqd_size);
406 		if (IS_ERR(mqd_sdma_v11)) {
407 			DRM_ERROR("Failed to read sdma user MQD\n");
408 			r = -ENOMEM;
409 			goto free_mqd;
410 		}
411 
412 		r = amdgpu_bo_reserve(queue->vm->root.bo, false);
413 		if (r) {
414 			kfree(mqd_sdma_v11);
415 			goto free_mqd;
416 		}
417 		r = amdgpu_userq_input_va_validate(adev, queue, mqd_sdma_v11->csa_va,
418 						   32);
419 		amdgpu_bo_unreserve(queue->vm->root.bo);
420 		if (r) {
421 			kfree(mqd_sdma_v11);
422 			goto free_mqd;
423 		}
424 
425 		userq_props->csa_addr = mqd_sdma_v11->csa_va;
426 		kfree(mqd_sdma_v11);
427 	}
428 
429 	queue->userq_prop = userq_props;
430 
431 	r = mqd_hw_default->init_mqd(adev, (void *)queue->mqd.cpu_ptr, userq_props);
432 	if (r) {
433 		DRM_ERROR("Failed to initialize MQD for userqueue\n");
434 		goto free_mqd;
435 	}
436 
437 	/* Create BO for FW operations */
438 	r = mes_userq_create_ctx_space(uq_mgr, queue, mqd_user);
439 	if (r) {
440 		DRM_ERROR("Failed to allocate BO for userqueue (%d)", r);
441 		goto free_mqd;
442 	}
443 
444 	/* FW expects WPTR BOs to be mapped into GART */
445 	r = mes_userq_create_wptr_mapping(adev, uq_mgr, queue, userq_props->wptr_gpu_addr);
446 	if (r) {
447 		DRM_ERROR("Failed to create WPTR mapping\n");
448 		goto free_ctx;
449 	}
450 
451 	return 0;
452 
453 free_ctx:
454 	amdgpu_userq_destroy_object(uq_mgr, &queue->fw_obj);
455 
456 free_mqd:
457 	amdgpu_userq_destroy_object(uq_mgr, &queue->mqd);
458 
459 free_props:
460 	kfree(userq_props);
461 
462 	return r;
463 }
464 
mes_userq_mqd_destroy(struct amdgpu_usermode_queue * queue)465 static void mes_userq_mqd_destroy(struct amdgpu_usermode_queue *queue)
466 {
467 	struct amdgpu_userq_mgr *uq_mgr = queue->userq_mgr;
468 
469 	amdgpu_userq_destroy_object(uq_mgr, &queue->fw_obj);
470 	kfree(queue->userq_prop);
471 	amdgpu_userq_destroy_object(uq_mgr, &queue->mqd);
472 }
473 
mes_userq_preempt(struct amdgpu_usermode_queue * queue)474 static int mes_userq_preempt(struct amdgpu_usermode_queue *queue)
475 {
476 	struct amdgpu_userq_mgr *uq_mgr = queue->userq_mgr;
477 	struct amdgpu_device *adev = uq_mgr->adev;
478 	struct mes_suspend_gang_input queue_input;
479 	struct amdgpu_userq_obj *ctx = &queue->fw_obj;
480 	signed long timeout = 2100000; /* 2100 ms */
481 	u64 fence_gpu_addr;
482 	u32 fence_offset;
483 	u64 *fence_ptr;
484 	int i, r;
485 
486 	if (queue->state != AMDGPU_USERQ_STATE_MAPPED)
487 		return 0;
488 	r = amdgpu_device_wb_get(adev, &fence_offset);
489 	if (r)
490 		return r;
491 
492 	fence_gpu_addr = adev->wb.gpu_addr + (fence_offset * 4);
493 	fence_ptr = (u64 *)&adev->wb.wb[fence_offset];
494 	*fence_ptr = 0;
495 
496 	memset(&queue_input, 0x0, sizeof(struct mes_suspend_gang_input));
497 	queue_input.gang_context_addr = ctx->gpu_addr + AMDGPU_USERQ_PROC_CTX_SZ;
498 	queue_input.suspend_fence_addr = fence_gpu_addr;
499 	queue_input.suspend_fence_value = 1;
500 	amdgpu_mes_lock(&adev->mes);
501 	r = adev->mes.funcs->suspend_gang(&adev->mes, &queue_input);
502 	amdgpu_mes_unlock(&adev->mes);
503 	if (r) {
504 		DRM_ERROR("Failed to suspend gang: %d\n", r);
505 		goto out;
506 	}
507 
508 	for (i = 0; i < timeout; i++) {
509 		if (*fence_ptr == 1)
510 			goto out;
511 		udelay(1);
512 	}
513 	r = -ETIMEDOUT;
514 
515 out:
516 	amdgpu_device_wb_free(adev, fence_offset);
517 	return r;
518 }
519 
mes_userq_restore(struct amdgpu_usermode_queue * queue)520 static int mes_userq_restore(struct amdgpu_usermode_queue *queue)
521 {
522 	struct amdgpu_userq_mgr *uq_mgr = queue->userq_mgr;
523 	struct amdgpu_device *adev = uq_mgr->adev;
524 	struct mes_resume_gang_input queue_input;
525 	struct amdgpu_userq_obj *ctx = &queue->fw_obj;
526 	int r;
527 
528 	if (queue->state == AMDGPU_USERQ_STATE_HUNG)
529 		return -EINVAL;
530 	if (queue->state != AMDGPU_USERQ_STATE_PREEMPTED)
531 		return 0;
532 
533 	memset(&queue_input, 0x0, sizeof(struct mes_resume_gang_input));
534 	queue_input.gang_context_addr = ctx->gpu_addr + AMDGPU_USERQ_PROC_CTX_SZ;
535 
536 	amdgpu_mes_lock(&adev->mes);
537 	r = adev->mes.funcs->resume_gang(&adev->mes, &queue_input);
538 	amdgpu_mes_unlock(&adev->mes);
539 	if (r)
540 		dev_err(adev->dev, "Failed to resume queue, err (%d)\n", r);
541 	return r;
542 }
543 
544 const struct amdgpu_userq_funcs userq_mes_funcs = {
545 	.mqd_create = mes_userq_mqd_create,
546 	.mqd_destroy = mes_userq_mqd_destroy,
547 	.unmap = mes_userq_unmap,
548 	.map = mes_userq_map,
549 	.detect_and_reset = mes_userq_detect_and_reset,
550 	.preempt = mes_userq_preempt,
551 	.restore = mes_userq_restore,
552 };
553