xref: /linux/drivers/gpu/drm/amd/amdgpu/mes_userqueue.c (revision 07fdad3a93756b872da7b53647715c48d0f4a2d0)
1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright 2024 Advanced Micro Devices, Inc.
4  *
5  * Permission is hereby granted, free of charge, to any person obtaining a
6  * copy of this software and associated documentation files (the "Software"),
7  * to deal in the Software without restriction, including without limitation
8  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9  * and/or sell copies of the Software, and to permit persons to whom the
10  * Software is furnished to do so, subject to the following conditions:
11  *
12  * The above copyright notice and this permission notice shall be included in
13  * all copies or substantial portions of the Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
19  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
20  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
21  * OTHER DEALINGS IN THE SOFTWARE.
22  *
23  */
24 #include <drm/drm_drv.h>
25 #include "amdgpu.h"
26 #include "amdgpu_gfx.h"
27 #include "mes_userqueue.h"
28 #include "amdgpu_userq_fence.h"
29 
30 #define AMDGPU_USERQ_PROC_CTX_SZ PAGE_SIZE
31 #define AMDGPU_USERQ_GANG_CTX_SZ PAGE_SIZE
32 
33 static int
34 mes_userq_map_gtt_bo_to_gart(struct amdgpu_bo *bo)
35 {
36 	int ret;
37 
38 	ret = amdgpu_bo_reserve(bo, true);
39 	if (ret) {
40 		DRM_ERROR("Failed to reserve bo. ret %d\n", ret);
41 		goto err_reserve_bo_failed;
42 	}
43 
44 	ret = amdgpu_ttm_alloc_gart(&bo->tbo);
45 	if (ret) {
46 		DRM_ERROR("Failed to bind bo to GART. ret %d\n", ret);
47 		goto err_map_bo_gart_failed;
48 	}
49 
50 	amdgpu_bo_unreserve(bo);
51 	bo = amdgpu_bo_ref(bo);
52 
53 	return 0;
54 
55 err_map_bo_gart_failed:
56 	amdgpu_bo_unreserve(bo);
57 err_reserve_bo_failed:
58 	return ret;
59 }
60 
61 static int
62 mes_userq_create_wptr_mapping(struct amdgpu_userq_mgr *uq_mgr,
63 			      struct amdgpu_usermode_queue *queue,
64 			      uint64_t wptr)
65 {
66 	struct amdgpu_bo_va_mapping *wptr_mapping;
67 	struct amdgpu_vm *wptr_vm;
68 	struct amdgpu_userq_obj *wptr_obj = &queue->wptr_obj;
69 	int ret;
70 
71 	wptr_vm = queue->vm;
72 	ret = amdgpu_bo_reserve(wptr_vm->root.bo, false);
73 	if (ret)
74 		return ret;
75 
76 	wptr &= AMDGPU_GMC_HOLE_MASK;
77 	wptr_mapping = amdgpu_vm_bo_lookup_mapping(wptr_vm, wptr >> PAGE_SHIFT);
78 	amdgpu_bo_unreserve(wptr_vm->root.bo);
79 	if (!wptr_mapping) {
80 		DRM_ERROR("Failed to lookup wptr bo\n");
81 		return -EINVAL;
82 	}
83 
84 	wptr_obj->obj = wptr_mapping->bo_va->base.bo;
85 	if (wptr_obj->obj->tbo.base.size > PAGE_SIZE) {
86 		DRM_ERROR("Requested GART mapping for wptr bo larger than one page\n");
87 		return -EINVAL;
88 	}
89 
90 	ret = mes_userq_map_gtt_bo_to_gart(wptr_obj->obj);
91 	if (ret) {
92 		DRM_ERROR("Failed to map wptr bo to GART\n");
93 		return ret;
94 	}
95 
96 	queue->wptr_obj.gpu_addr = amdgpu_bo_gpu_offset_no_check(wptr_obj->obj);
97 	return 0;
98 }
99 
100 static int convert_to_mes_priority(int priority)
101 {
102 	switch (priority) {
103 	case AMDGPU_USERQ_CREATE_FLAGS_QUEUE_PRIORITY_NORMAL_LOW:
104 	default:
105 		return AMDGPU_MES_PRIORITY_LEVEL_NORMAL;
106 	case AMDGPU_USERQ_CREATE_FLAGS_QUEUE_PRIORITY_LOW:
107 		return AMDGPU_MES_PRIORITY_LEVEL_LOW;
108 	case AMDGPU_USERQ_CREATE_FLAGS_QUEUE_PRIORITY_NORMAL_HIGH:
109 		return AMDGPU_MES_PRIORITY_LEVEL_MEDIUM;
110 	case AMDGPU_USERQ_CREATE_FLAGS_QUEUE_PRIORITY_HIGH:
111 		return AMDGPU_MES_PRIORITY_LEVEL_HIGH;
112 	}
113 }
114 
115 static int mes_userq_map(struct amdgpu_userq_mgr *uq_mgr,
116 			 struct amdgpu_usermode_queue *queue)
117 {
118 	struct amdgpu_device *adev = uq_mgr->adev;
119 	struct amdgpu_userq_obj *ctx = &queue->fw_obj;
120 	struct amdgpu_mqd_prop *userq_props = queue->userq_prop;
121 	struct mes_add_queue_input queue_input;
122 	int r;
123 
124 	memset(&queue_input, 0x0, sizeof(struct mes_add_queue_input));
125 
126 	queue_input.process_va_start = 0;
127 	queue_input.process_va_end = adev->vm_manager.max_pfn - 1;
128 
129 	/* set process quantum to 10 ms and gang quantum to 1 ms as default */
130 	queue_input.process_quantum = 100000;
131 	queue_input.gang_quantum = 10000;
132 	queue_input.paging = false;
133 
134 	queue_input.process_context_addr = ctx->gpu_addr;
135 	queue_input.gang_context_addr = ctx->gpu_addr + AMDGPU_USERQ_PROC_CTX_SZ;
136 	queue_input.inprocess_gang_priority = AMDGPU_MES_PRIORITY_LEVEL_NORMAL;
137 	queue_input.gang_global_priority_level = convert_to_mes_priority(queue->priority);
138 
139 	queue_input.process_id = queue->vm->pasid;
140 	queue_input.queue_type = queue->queue_type;
141 	queue_input.mqd_addr = queue->mqd.gpu_addr;
142 	queue_input.wptr_addr = userq_props->wptr_gpu_addr;
143 	queue_input.queue_size = userq_props->queue_size >> 2;
144 	queue_input.doorbell_offset = userq_props->doorbell_index;
145 	queue_input.page_table_base_addr = amdgpu_gmc_pd_addr(queue->vm->root.bo);
146 	queue_input.wptr_mc_addr = queue->wptr_obj.gpu_addr;
147 
148 	amdgpu_mes_lock(&adev->mes);
149 	r = adev->mes.funcs->add_hw_queue(&adev->mes, &queue_input);
150 	amdgpu_mes_unlock(&adev->mes);
151 	if (r) {
152 		DRM_ERROR("Failed to map queue in HW, err (%d)\n", r);
153 		return r;
154 	}
155 
156 	DRM_DEBUG_DRIVER("Queue (doorbell:%d) mapped successfully\n", userq_props->doorbell_index);
157 	return 0;
158 }
159 
160 static int mes_userq_unmap(struct amdgpu_userq_mgr *uq_mgr,
161 			   struct amdgpu_usermode_queue *queue)
162 {
163 	struct amdgpu_device *adev = uq_mgr->adev;
164 	struct mes_remove_queue_input queue_input;
165 	struct amdgpu_userq_obj *ctx = &queue->fw_obj;
166 	int r;
167 
168 	memset(&queue_input, 0x0, sizeof(struct mes_remove_queue_input));
169 	queue_input.doorbell_offset = queue->doorbell_index;
170 	queue_input.gang_context_addr = ctx->gpu_addr + AMDGPU_USERQ_PROC_CTX_SZ;
171 
172 	amdgpu_mes_lock(&adev->mes);
173 	r = adev->mes.funcs->remove_hw_queue(&adev->mes, &queue_input);
174 	amdgpu_mes_unlock(&adev->mes);
175 	if (r)
176 		DRM_ERROR("Failed to unmap queue in HW, err (%d)\n", r);
177 	return r;
178 }
179 
180 static int mes_userq_create_ctx_space(struct amdgpu_userq_mgr *uq_mgr,
181 				      struct amdgpu_usermode_queue *queue,
182 				      struct drm_amdgpu_userq_in *mqd_user)
183 {
184 	struct amdgpu_userq_obj *ctx = &queue->fw_obj;
185 	int r, size;
186 
187 	/*
188 	 * The FW expects at least one page space allocated for
189 	 * process ctx and gang ctx each. Create an object
190 	 * for the same.
191 	 */
192 	size = AMDGPU_USERQ_PROC_CTX_SZ + AMDGPU_USERQ_GANG_CTX_SZ;
193 	r = amdgpu_userq_create_object(uq_mgr, ctx, size);
194 	if (r) {
195 		DRM_ERROR("Failed to allocate ctx space bo for userqueue, err:%d\n", r);
196 		return r;
197 	}
198 
199 	return 0;
200 }
201 
202 static int mes_userq_detect_and_reset(struct amdgpu_device *adev,
203 				      int queue_type)
204 {
205 	int db_array_size = amdgpu_mes_get_hung_queue_db_array_size(adev);
206 	struct mes_detect_and_reset_queue_input input;
207 	struct amdgpu_usermode_queue *queue;
208 	struct amdgpu_userq_mgr *uqm, *tmp;
209 	unsigned int hung_db_num = 0;
210 	int queue_id, r, i;
211 	u32 db_array[4];
212 
213 	if (db_array_size > 4) {
214 		dev_err(adev->dev, "DB array size (%d vs 4) too small\n",
215 			db_array_size);
216 		return -EINVAL;
217 	}
218 
219 	memset(&input, 0x0, sizeof(struct mes_detect_and_reset_queue_input));
220 
221 	input.queue_type = queue_type;
222 
223 	amdgpu_mes_lock(&adev->mes);
224 	r = amdgpu_mes_detect_and_reset_hung_queues(adev, queue_type, false,
225 						    &hung_db_num, db_array);
226 	amdgpu_mes_unlock(&adev->mes);
227 	if (r) {
228 		dev_err(adev->dev, "Failed to detect and reset queues, err (%d)\n", r);
229 	} else if (hung_db_num) {
230 		list_for_each_entry_safe(uqm, tmp, &adev->userq_mgr_list, list) {
231 			idr_for_each_entry(&uqm->userq_idr, queue, queue_id) {
232 				if (queue->queue_type == queue_type) {
233 					for (i = 0; i < hung_db_num; i++) {
234 						if (queue->doorbell_index == db_array[i]) {
235 							queue->state = AMDGPU_USERQ_STATE_HUNG;
236 							atomic_inc(&adev->gpu_reset_counter);
237 							amdgpu_userq_fence_driver_force_completion(queue);
238 							drm_dev_wedged_event(adev_to_drm(adev), DRM_WEDGE_RECOVERY_NONE, NULL);
239 						}
240 					}
241 				}
242 			}
243 		}
244 	}
245 
246 	return r;
247 }
248 
249 static int mes_userq_mqd_create(struct amdgpu_userq_mgr *uq_mgr,
250 				struct drm_amdgpu_userq_in *args_in,
251 				struct amdgpu_usermode_queue *queue)
252 {
253 	struct amdgpu_device *adev = uq_mgr->adev;
254 	struct amdgpu_mqd *mqd_hw_default = &adev->mqds[queue->queue_type];
255 	struct drm_amdgpu_userq_in *mqd_user = args_in;
256 	struct amdgpu_mqd_prop *userq_props;
257 	struct amdgpu_gfx_shadow_info shadow_info;
258 	int r;
259 
260 	/* Structure to initialize MQD for userqueue using generic MQD init function */
261 	userq_props = kzalloc(sizeof(struct amdgpu_mqd_prop), GFP_KERNEL);
262 	if (!userq_props) {
263 		DRM_ERROR("Failed to allocate memory for userq_props\n");
264 		return -ENOMEM;
265 	}
266 
267 	r = amdgpu_userq_create_object(uq_mgr, &queue->mqd, mqd_hw_default->mqd_size);
268 	if (r) {
269 		DRM_ERROR("Failed to create MQD object for userqueue\n");
270 		goto free_props;
271 	}
272 
273 	/* Initialize the MQD BO with user given values */
274 	userq_props->wptr_gpu_addr = mqd_user->wptr_va;
275 	userq_props->rptr_gpu_addr = mqd_user->rptr_va;
276 	userq_props->queue_size = mqd_user->queue_size;
277 	userq_props->hqd_base_gpu_addr = mqd_user->queue_va;
278 	userq_props->mqd_gpu_addr = queue->mqd.gpu_addr;
279 	userq_props->use_doorbell = true;
280 	userq_props->doorbell_index = queue->doorbell_index;
281 	userq_props->fence_address = queue->fence_drv->gpu_addr;
282 
283 	if (adev->gfx.funcs->get_gfx_shadow_info)
284 		adev->gfx.funcs->get_gfx_shadow_info(adev, &shadow_info, true);
285 	if (queue->queue_type == AMDGPU_HW_IP_COMPUTE) {
286 		struct drm_amdgpu_userq_mqd_compute_gfx11 *compute_mqd;
287 
288 		if (mqd_user->mqd_size != sizeof(*compute_mqd)) {
289 			DRM_ERROR("Invalid compute IP MQD size\n");
290 			r = -EINVAL;
291 			goto free_mqd;
292 		}
293 
294 		compute_mqd = memdup_user(u64_to_user_ptr(mqd_user->mqd), mqd_user->mqd_size);
295 		if (IS_ERR(compute_mqd)) {
296 			DRM_ERROR("Failed to read user MQD\n");
297 			r = -ENOMEM;
298 			goto free_mqd;
299 		}
300 
301 		if (amdgpu_userq_input_va_validate(queue->vm, compute_mqd->eop_va,
302 		    max_t(u32, PAGE_SIZE, AMDGPU_GPU_PAGE_SIZE)))
303 			goto free_mqd;
304 
305 		userq_props->eop_gpu_addr = compute_mqd->eop_va;
306 		userq_props->hqd_pipe_priority = AMDGPU_GFX_PIPE_PRIO_NORMAL;
307 		userq_props->hqd_queue_priority = AMDGPU_GFX_QUEUE_PRIORITY_MINIMUM;
308 		userq_props->hqd_active = false;
309 		userq_props->tmz_queue =
310 			mqd_user->flags & AMDGPU_USERQ_CREATE_FLAGS_QUEUE_SECURE;
311 		kfree(compute_mqd);
312 	} else if (queue->queue_type == AMDGPU_HW_IP_GFX) {
313 		struct drm_amdgpu_userq_mqd_gfx11 *mqd_gfx_v11;
314 
315 		if (mqd_user->mqd_size != sizeof(*mqd_gfx_v11) || !mqd_user->mqd) {
316 			DRM_ERROR("Invalid GFX MQD\n");
317 			r = -EINVAL;
318 			goto free_mqd;
319 		}
320 
321 		mqd_gfx_v11 = memdup_user(u64_to_user_ptr(mqd_user->mqd), mqd_user->mqd_size);
322 		if (IS_ERR(mqd_gfx_v11)) {
323 			DRM_ERROR("Failed to read user MQD\n");
324 			r = -ENOMEM;
325 			goto free_mqd;
326 		}
327 
328 		userq_props->shadow_addr = mqd_gfx_v11->shadow_va;
329 		userq_props->csa_addr = mqd_gfx_v11->csa_va;
330 		userq_props->tmz_queue =
331 			mqd_user->flags & AMDGPU_USERQ_CREATE_FLAGS_QUEUE_SECURE;
332 
333 		if (amdgpu_userq_input_va_validate(queue->vm, mqd_gfx_v11->shadow_va,
334 		    shadow_info.shadow_size))
335 			goto free_mqd;
336 
337 		kfree(mqd_gfx_v11);
338 	} else if (queue->queue_type == AMDGPU_HW_IP_DMA) {
339 		struct drm_amdgpu_userq_mqd_sdma_gfx11 *mqd_sdma_v11;
340 
341 		if (mqd_user->mqd_size != sizeof(*mqd_sdma_v11) || !mqd_user->mqd) {
342 			DRM_ERROR("Invalid SDMA MQD\n");
343 			r = -EINVAL;
344 			goto free_mqd;
345 		}
346 
347 		mqd_sdma_v11 = memdup_user(u64_to_user_ptr(mqd_user->mqd), mqd_user->mqd_size);
348 		if (IS_ERR(mqd_sdma_v11)) {
349 			DRM_ERROR("Failed to read sdma user MQD\n");
350 			r = -ENOMEM;
351 			goto free_mqd;
352 		}
353 
354 		if (amdgpu_userq_input_va_validate(queue->vm, mqd_sdma_v11->csa_va,
355 		    shadow_info.csa_size))
356 			goto free_mqd;
357 
358 		userq_props->csa_addr = mqd_sdma_v11->csa_va;
359 		kfree(mqd_sdma_v11);
360 	}
361 
362 	queue->userq_prop = userq_props;
363 
364 	r = mqd_hw_default->init_mqd(adev, (void *)queue->mqd.cpu_ptr, userq_props);
365 	if (r) {
366 		DRM_ERROR("Failed to initialize MQD for userqueue\n");
367 		goto free_mqd;
368 	}
369 
370 	/* Create BO for FW operations */
371 	r = mes_userq_create_ctx_space(uq_mgr, queue, mqd_user);
372 	if (r) {
373 		DRM_ERROR("Failed to allocate BO for userqueue (%d)", r);
374 		goto free_mqd;
375 	}
376 
377 	/* FW expects WPTR BOs to be mapped into GART */
378 	r = mes_userq_create_wptr_mapping(uq_mgr, queue, userq_props->wptr_gpu_addr);
379 	if (r) {
380 		DRM_ERROR("Failed to create WPTR mapping\n");
381 		goto free_ctx;
382 	}
383 
384 	return 0;
385 
386 free_ctx:
387 	amdgpu_userq_destroy_object(uq_mgr, &queue->fw_obj);
388 
389 free_mqd:
390 	amdgpu_userq_destroy_object(uq_mgr, &queue->mqd);
391 
392 free_props:
393 	kfree(userq_props);
394 
395 	return r;
396 }
397 
398 static void
399 mes_userq_mqd_destroy(struct amdgpu_userq_mgr *uq_mgr,
400 		      struct amdgpu_usermode_queue *queue)
401 {
402 	amdgpu_userq_destroy_object(uq_mgr, &queue->fw_obj);
403 	kfree(queue->userq_prop);
404 	amdgpu_userq_destroy_object(uq_mgr, &queue->mqd);
405 }
406 
407 static int mes_userq_preempt(struct amdgpu_userq_mgr *uq_mgr,
408 				struct amdgpu_usermode_queue *queue)
409 {
410 	struct amdgpu_device *adev = uq_mgr->adev;
411 	struct mes_suspend_gang_input queue_input;
412 	struct amdgpu_userq_obj *ctx = &queue->fw_obj;
413 	signed long timeout = 2100000; /* 2100 ms */
414 	u64 fence_gpu_addr;
415 	u32 fence_offset;
416 	u64 *fence_ptr;
417 	int i, r;
418 
419 	if (queue->state != AMDGPU_USERQ_STATE_MAPPED)
420 		return 0;
421 	r = amdgpu_device_wb_get(adev, &fence_offset);
422 	if (r)
423 		return r;
424 
425 	fence_gpu_addr = adev->wb.gpu_addr + (fence_offset * 4);
426 	fence_ptr = (u64 *)&adev->wb.wb[fence_offset];
427 	*fence_ptr = 0;
428 
429 	memset(&queue_input, 0x0, sizeof(struct mes_suspend_gang_input));
430 	queue_input.gang_context_addr = ctx->gpu_addr + AMDGPU_USERQ_PROC_CTX_SZ;
431 	queue_input.suspend_fence_addr = fence_gpu_addr;
432 	queue_input.suspend_fence_value = 1;
433 	amdgpu_mes_lock(&adev->mes);
434 	r = adev->mes.funcs->suspend_gang(&adev->mes, &queue_input);
435 	amdgpu_mes_unlock(&adev->mes);
436 	if (r) {
437 		DRM_ERROR("Failed to suspend gang: %d\n", r);
438 		goto out;
439 	}
440 
441 	for (i = 0; i < timeout; i++) {
442 		if (*fence_ptr == 1)
443 			goto out;
444 		udelay(1);
445 	}
446 	r = -ETIMEDOUT;
447 
448 out:
449 	amdgpu_device_wb_free(adev, fence_offset);
450 	return r;
451 }
452 
453 static int mes_userq_restore(struct amdgpu_userq_mgr *uq_mgr,
454 				struct amdgpu_usermode_queue *queue)
455 {
456 	struct amdgpu_device *adev = uq_mgr->adev;
457 	struct mes_resume_gang_input queue_input;
458 	struct amdgpu_userq_obj *ctx = &queue->fw_obj;
459 	int r;
460 
461 	if (queue->state == AMDGPU_USERQ_STATE_HUNG)
462 		return -EINVAL;
463 	if (queue->state != AMDGPU_USERQ_STATE_PREEMPTED)
464 		return 0;
465 
466 	memset(&queue_input, 0x0, sizeof(struct mes_resume_gang_input));
467 	queue_input.gang_context_addr = ctx->gpu_addr + AMDGPU_USERQ_PROC_CTX_SZ;
468 
469 	amdgpu_mes_lock(&adev->mes);
470 	r = adev->mes.funcs->resume_gang(&adev->mes, &queue_input);
471 	amdgpu_mes_unlock(&adev->mes);
472 	if (r)
473 		dev_err(adev->dev, "Failed to resume queue, err (%d)\n", r);
474 	return r;
475 }
476 
477 const struct amdgpu_userq_funcs userq_mes_funcs = {
478 	.mqd_create = mes_userq_mqd_create,
479 	.mqd_destroy = mes_userq_mqd_destroy,
480 	.unmap = mes_userq_unmap,
481 	.map = mes_userq_map,
482 	.detect_and_reset = mes_userq_detect_and_reset,
483 	.preempt = mes_userq_preempt,
484 	.restore = mes_userq_restore,
485 };
486