1 // SPDX-License-Identifier: MIT
2 /*
3 * Copyright 2024 Advanced Micro Devices, Inc.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice shall be included in
13 * all copies or substantial portions of the Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
19 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
20 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
21 * OTHER DEALINGS IN THE SOFTWARE.
22 *
23 */
24 #include <drm/drm_drv.h>
25 #include "amdgpu.h"
26 #include "amdgpu_gfx.h"
27 #include "mes_userqueue.h"
28 #include "amdgpu_userq_fence.h"
29
30 #define AMDGPU_USERQ_PROC_CTX_SZ PAGE_SIZE
31 #define AMDGPU_USERQ_GANG_CTX_SZ PAGE_SIZE
32
33 static int
mes_userq_create_wptr_mapping(struct amdgpu_device * adev,struct amdgpu_userq_mgr * uq_mgr,struct amdgpu_usermode_queue * queue,uint64_t wptr)34 mes_userq_create_wptr_mapping(struct amdgpu_device *adev,
35 struct amdgpu_userq_mgr *uq_mgr,
36 struct amdgpu_usermode_queue *queue,
37 uint64_t wptr)
38 {
39 struct amdgpu_bo_va_mapping *wptr_mapping;
40 struct amdgpu_userq_obj *wptr_obj = &queue->wptr_obj;
41 struct amdgpu_bo *obj;
42 struct amdgpu_vm *vm = queue->vm;
43 struct drm_exec exec;
44 int ret;
45
46 wptr &= AMDGPU_GMC_HOLE_MASK;
47
48 drm_exec_init(&exec, DRM_EXEC_IGNORE_DUPLICATES, 2);
49 drm_exec_until_all_locked(&exec) {
50 ret = amdgpu_vm_lock_pd(vm, &exec, 1);
51 drm_exec_retry_on_contention(&exec);
52 if (unlikely(ret))
53 goto fail_lock;
54
55 wptr_mapping = amdgpu_vm_bo_lookup_mapping(vm, wptr >> PAGE_SHIFT);
56 if (!wptr_mapping) {
57 ret = -EINVAL;
58 goto fail_lock;
59 }
60
61 obj = wptr_mapping->bo_va->base.bo;
62 ret = drm_exec_lock_obj(&exec, &obj->tbo.base);
63 drm_exec_retry_on_contention(&exec);
64 if (unlikely(ret))
65 goto fail_lock;
66 }
67
68 wptr_obj->obj = amdgpu_bo_ref(wptr_mapping->bo_va->base.bo);
69 if (wptr_obj->obj->tbo.base.size > PAGE_SIZE) {
70 ret = -EINVAL;
71 goto fail_map;
72 }
73
74 /* TODO use eviction fence instead of pinning. */
75 ret = amdgpu_bo_pin(wptr_obj->obj, AMDGPU_GEM_DOMAIN_GTT);
76 if (ret) {
77 DRM_ERROR("Failed to pin wptr bo. ret %d\n", ret);
78 goto fail_map;
79 }
80
81 ret = amdgpu_ttm_alloc_gart(&wptr_obj->obj->tbo);
82 if (ret) {
83 DRM_ERROR("Failed to bind bo to GART. ret %d\n", ret);
84 goto fail_map;
85 }
86
87 queue->wptr_obj.gpu_addr = amdgpu_bo_gpu_offset(wptr_obj->obj);
88
89 drm_exec_fini(&exec);
90 return 0;
91
92 fail_map:
93 amdgpu_bo_unref(&wptr_obj->obj);
94 fail_lock:
95 drm_exec_fini(&exec);
96 return ret;
97
98 }
99
convert_to_mes_priority(int priority)100 static int convert_to_mes_priority(int priority)
101 {
102 switch (priority) {
103 case AMDGPU_USERQ_CREATE_FLAGS_QUEUE_PRIORITY_NORMAL_LOW:
104 default:
105 return AMDGPU_MES_PRIORITY_LEVEL_NORMAL;
106 case AMDGPU_USERQ_CREATE_FLAGS_QUEUE_PRIORITY_LOW:
107 return AMDGPU_MES_PRIORITY_LEVEL_LOW;
108 case AMDGPU_USERQ_CREATE_FLAGS_QUEUE_PRIORITY_NORMAL_HIGH:
109 return AMDGPU_MES_PRIORITY_LEVEL_MEDIUM;
110 case AMDGPU_USERQ_CREATE_FLAGS_QUEUE_PRIORITY_HIGH:
111 return AMDGPU_MES_PRIORITY_LEVEL_HIGH;
112 }
113 }
114
mes_userq_map(struct amdgpu_usermode_queue * queue)115 static int mes_userq_map(struct amdgpu_usermode_queue *queue)
116 {
117 struct amdgpu_userq_mgr *uq_mgr = queue->userq_mgr;
118 struct amdgpu_device *adev = uq_mgr->adev;
119 struct amdgpu_userq_obj *ctx = &queue->fw_obj;
120 struct amdgpu_mqd_prop *userq_props = queue->userq_prop;
121 struct mes_add_queue_input queue_input;
122 int r;
123
124 memset(&queue_input, 0x0, sizeof(struct mes_add_queue_input));
125
126 queue_input.process_va_start = 0;
127 queue_input.process_va_end = adev->vm_manager.max_pfn - 1;
128
129 /* set process quantum to 10 ms and gang quantum to 1 ms as default */
130 queue_input.process_quantum = 100000;
131 queue_input.gang_quantum = 10000;
132 queue_input.paging = false;
133
134 queue_input.process_context_addr = ctx->gpu_addr;
135 queue_input.gang_context_addr = ctx->gpu_addr + AMDGPU_USERQ_PROC_CTX_SZ;
136 queue_input.inprocess_gang_priority = AMDGPU_MES_PRIORITY_LEVEL_NORMAL;
137 queue_input.gang_global_priority_level = convert_to_mes_priority(queue->priority);
138
139 queue_input.process_id = queue->vm->pasid;
140 queue_input.queue_type = queue->queue_type;
141 queue_input.mqd_addr = queue->mqd.gpu_addr;
142 queue_input.wptr_addr = userq_props->wptr_gpu_addr;
143 queue_input.queue_size = userq_props->queue_size >> 2;
144 queue_input.doorbell_offset = userq_props->doorbell_index;
145 queue_input.page_table_base_addr = amdgpu_gmc_pd_addr(queue->vm->root.bo);
146 queue_input.wptr_mc_addr = queue->wptr_obj.gpu_addr;
147
148 amdgpu_mes_lock(&adev->mes);
149 r = adev->mes.funcs->add_hw_queue(&adev->mes, &queue_input);
150 amdgpu_mes_unlock(&adev->mes);
151 if (r) {
152 DRM_ERROR("Failed to map queue in HW, err (%d)\n", r);
153 return r;
154 }
155
156 DRM_DEBUG_DRIVER("Queue (doorbell:%d) mapped successfully\n", userq_props->doorbell_index);
157 return 0;
158 }
159
mes_userq_unmap(struct amdgpu_usermode_queue * queue)160 static int mes_userq_unmap(struct amdgpu_usermode_queue *queue)
161 {
162 struct amdgpu_userq_mgr *uq_mgr = queue->userq_mgr;
163 struct amdgpu_device *adev = uq_mgr->adev;
164 struct mes_remove_queue_input queue_input;
165 struct amdgpu_userq_obj *ctx = &queue->fw_obj;
166 int r;
167
168 memset(&queue_input, 0x0, sizeof(struct mes_remove_queue_input));
169 queue_input.doorbell_offset = queue->doorbell_index;
170 queue_input.gang_context_addr = ctx->gpu_addr + AMDGPU_USERQ_PROC_CTX_SZ;
171
172 amdgpu_mes_lock(&adev->mes);
173 r = adev->mes.funcs->remove_hw_queue(&adev->mes, &queue_input);
174 amdgpu_mes_unlock(&adev->mes);
175 if (r)
176 DRM_ERROR("Failed to unmap queue in HW, err (%d)\n", r);
177 return r;
178 }
179
mes_userq_create_ctx_space(struct amdgpu_userq_mgr * uq_mgr,struct amdgpu_usermode_queue * queue,struct drm_amdgpu_userq_in * mqd_user)180 static int mes_userq_create_ctx_space(struct amdgpu_userq_mgr *uq_mgr,
181 struct amdgpu_usermode_queue *queue,
182 struct drm_amdgpu_userq_in *mqd_user)
183 {
184 struct amdgpu_userq_obj *ctx = &queue->fw_obj;
185 int r, size;
186
187 /*
188 * The FW expects at least one page space allocated for
189 * process ctx and gang ctx each. Create an object
190 * for the same.
191 */
192 size = AMDGPU_USERQ_PROC_CTX_SZ + AMDGPU_USERQ_GANG_CTX_SZ;
193 r = amdgpu_userq_create_object(uq_mgr, ctx, size);
194 if (r) {
195 DRM_ERROR("Failed to allocate ctx space bo for userqueue, err:%d\n", r);
196 return r;
197 }
198
199 return 0;
200 }
201
mes_userq_detect_and_reset(struct amdgpu_device * adev,int queue_type)202 static int mes_userq_detect_and_reset(struct amdgpu_device *adev,
203 int queue_type)
204 {
205 int db_array_size = amdgpu_mes_get_hung_queue_db_array_size(adev);
206 struct mes_detect_and_reset_queue_input input;
207 struct amdgpu_usermode_queue *queue;
208 unsigned int hung_db_num = 0;
209 unsigned long queue_id;
210 u32 db_array[8];
211 bool found_hung_queue = false;
212 int r, i;
213
214 if (db_array_size > 8) {
215 dev_err(adev->dev, "DB array size (%d vs 8) too small\n",
216 db_array_size);
217 return -EINVAL;
218 }
219
220 memset(&input, 0x0, sizeof(struct mes_detect_and_reset_queue_input));
221
222 input.queue_type = queue_type;
223
224 amdgpu_mes_lock(&adev->mes);
225 r = amdgpu_mes_detect_and_reset_hung_queues(adev, queue_type, false,
226 &hung_db_num, db_array, 0);
227 amdgpu_mes_unlock(&adev->mes);
228 if (r) {
229 dev_err(adev->dev, "Failed to detect and reset queues, err (%d)\n", r);
230 } else if (hung_db_num) {
231 xa_for_each(&adev->userq_doorbell_xa, queue_id, queue) {
232 if (queue->queue_type == queue_type) {
233 for (i = 0; i < hung_db_num; i++) {
234 if (queue->doorbell_index == db_array[i]) {
235 queue->state = AMDGPU_USERQ_STATE_HUNG;
236 found_hung_queue = true;
237 atomic_inc(&adev->gpu_reset_counter);
238 amdgpu_userq_fence_driver_force_completion(queue);
239 drm_dev_wedged_event(adev_to_drm(adev), DRM_WEDGE_RECOVERY_NONE, NULL);
240 }
241 }
242 }
243 }
244 }
245
246 if (found_hung_queue) {
247 /* Resume scheduling after hang recovery */
248 r = amdgpu_mes_resume(adev);
249 }
250
251 return r;
252 }
253
mes_userq_mqd_create(struct amdgpu_usermode_queue * queue,struct drm_amdgpu_userq_in * args_in)254 static int mes_userq_mqd_create(struct amdgpu_usermode_queue *queue,
255 struct drm_amdgpu_userq_in *args_in)
256 {
257 struct amdgpu_userq_mgr *uq_mgr = queue->userq_mgr;
258 struct amdgpu_device *adev = uq_mgr->adev;
259 struct amdgpu_mqd *mqd_hw_default = &adev->mqds[queue->queue_type];
260 struct drm_amdgpu_userq_in *mqd_user = args_in;
261 struct amdgpu_mqd_prop *userq_props;
262 int r;
263
264 /* Structure to initialize MQD for userqueue using generic MQD init function */
265 userq_props = kzalloc_obj(struct amdgpu_mqd_prop);
266 if (!userq_props) {
267 DRM_ERROR("Failed to allocate memory for userq_props\n");
268 return -ENOMEM;
269 }
270
271 r = amdgpu_userq_create_object(uq_mgr, &queue->mqd,
272 AMDGPU_MQD_SIZE_ALIGN(mqd_hw_default->mqd_size));
273 if (r) {
274 DRM_ERROR("Failed to create MQD object for userqueue\n");
275 goto free_props;
276 }
277
278 /* Initialize the MQD BO with user given values */
279 userq_props->wptr_gpu_addr = mqd_user->wptr_va;
280 userq_props->rptr_gpu_addr = mqd_user->rptr_va;
281 userq_props->queue_size = mqd_user->queue_size;
282 userq_props->hqd_base_gpu_addr = mqd_user->queue_va;
283 userq_props->mqd_gpu_addr = queue->mqd.gpu_addr;
284 userq_props->use_doorbell = true;
285 userq_props->doorbell_index = queue->doorbell_index;
286 userq_props->fence_address = queue->fence_drv->gpu_addr;
287
288 if (queue->queue_type == AMDGPU_HW_IP_COMPUTE) {
289 struct drm_amdgpu_userq_mqd_compute_gfx11 *compute_mqd;
290
291 if (mqd_user->mqd_size != sizeof(*compute_mqd)) {
292 DRM_ERROR("Invalid compute IP MQD size\n");
293 r = -EINVAL;
294 goto free_mqd;
295 }
296
297 compute_mqd = memdup_user(u64_to_user_ptr(mqd_user->mqd), mqd_user->mqd_size);
298 if (IS_ERR(compute_mqd)) {
299 DRM_ERROR("Failed to read user MQD\n");
300 r = -ENOMEM;
301 goto free_mqd;
302 }
303
304 r = amdgpu_bo_reserve(queue->vm->root.bo, false);
305 if (r) {
306 kfree(compute_mqd);
307 goto free_mqd;
308 }
309 r = amdgpu_userq_input_va_validate(adev, queue, compute_mqd->eop_va,
310 2048);
311 amdgpu_bo_unreserve(queue->vm->root.bo);
312 if (r) {
313 kfree(compute_mqd);
314 goto free_mqd;
315 }
316
317 userq_props->eop_gpu_addr = compute_mqd->eop_va;
318 userq_props->hqd_pipe_priority = AMDGPU_GFX_PIPE_PRIO_NORMAL;
319 userq_props->hqd_queue_priority = AMDGPU_GFX_QUEUE_PRIORITY_MINIMUM;
320 userq_props->hqd_active = false;
321 userq_props->tmz_queue =
322 mqd_user->flags & AMDGPU_USERQ_CREATE_FLAGS_QUEUE_SECURE;
323 kfree(compute_mqd);
324 } else if (queue->queue_type == AMDGPU_HW_IP_GFX) {
325 struct drm_amdgpu_userq_mqd_gfx11 *mqd_gfx_v11;
326 struct amdgpu_gfx_shadow_info shadow_info;
327
328 if (adev->gfx.funcs->get_gfx_shadow_info) {
329 adev->gfx.funcs->get_gfx_shadow_info(adev, &shadow_info, true);
330 } else {
331 r = -EINVAL;
332 goto free_mqd;
333 }
334
335 if (mqd_user->mqd_size != sizeof(*mqd_gfx_v11) || !mqd_user->mqd) {
336 DRM_ERROR("Invalid GFX MQD\n");
337 r = -EINVAL;
338 goto free_mqd;
339 }
340
341 mqd_gfx_v11 = memdup_user(u64_to_user_ptr(mqd_user->mqd), mqd_user->mqd_size);
342 if (IS_ERR(mqd_gfx_v11)) {
343 DRM_ERROR("Failed to read user MQD\n");
344 r = -ENOMEM;
345 goto free_mqd;
346 }
347
348 userq_props->shadow_addr = mqd_gfx_v11->shadow_va;
349 userq_props->csa_addr = mqd_gfx_v11->csa_va;
350 userq_props->tmz_queue =
351 mqd_user->flags & AMDGPU_USERQ_CREATE_FLAGS_QUEUE_SECURE;
352
353 r = amdgpu_bo_reserve(queue->vm->root.bo, false);
354 if (r) {
355 kfree(mqd_gfx_v11);
356 goto free_mqd;
357 }
358 r = amdgpu_userq_input_va_validate(adev, queue, mqd_gfx_v11->shadow_va,
359 shadow_info.shadow_size);
360 if (r) {
361 amdgpu_bo_unreserve(queue->vm->root.bo);
362 kfree(mqd_gfx_v11);
363 goto free_mqd;
364 }
365
366 r = amdgpu_userq_input_va_validate(adev, queue, mqd_gfx_v11->csa_va,
367 shadow_info.csa_size);
368 amdgpu_bo_unreserve(queue->vm->root.bo);
369 if (r) {
370 kfree(mqd_gfx_v11);
371 goto free_mqd;
372 }
373
374 kfree(mqd_gfx_v11);
375 } else if (queue->queue_type == AMDGPU_HW_IP_DMA) {
376 struct drm_amdgpu_userq_mqd_sdma_gfx11 *mqd_sdma_v11;
377
378 if (mqd_user->mqd_size != sizeof(*mqd_sdma_v11) || !mqd_user->mqd) {
379 DRM_ERROR("Invalid SDMA MQD\n");
380 r = -EINVAL;
381 goto free_mqd;
382 }
383
384 mqd_sdma_v11 = memdup_user(u64_to_user_ptr(mqd_user->mqd), mqd_user->mqd_size);
385 if (IS_ERR(mqd_sdma_v11)) {
386 DRM_ERROR("Failed to read sdma user MQD\n");
387 r = -ENOMEM;
388 goto free_mqd;
389 }
390
391 r = amdgpu_bo_reserve(queue->vm->root.bo, false);
392 if (r) {
393 kfree(mqd_sdma_v11);
394 goto free_mqd;
395 }
396 r = amdgpu_userq_input_va_validate(adev, queue, mqd_sdma_v11->csa_va,
397 32);
398 amdgpu_bo_unreserve(queue->vm->root.bo);
399 if (r) {
400 kfree(mqd_sdma_v11);
401 goto free_mqd;
402 }
403
404 userq_props->csa_addr = mqd_sdma_v11->csa_va;
405 kfree(mqd_sdma_v11);
406 }
407
408 queue->userq_prop = userq_props;
409
410 r = mqd_hw_default->init_mqd(adev, (void *)queue->mqd.cpu_ptr, userq_props);
411 if (r) {
412 DRM_ERROR("Failed to initialize MQD for userqueue\n");
413 goto free_mqd;
414 }
415
416 /* Create BO for FW operations */
417 r = mes_userq_create_ctx_space(uq_mgr, queue, mqd_user);
418 if (r) {
419 DRM_ERROR("Failed to allocate BO for userqueue (%d)", r);
420 goto free_mqd;
421 }
422
423 /* FW expects WPTR BOs to be mapped into GART */
424 r = mes_userq_create_wptr_mapping(adev, uq_mgr, queue, userq_props->wptr_gpu_addr);
425 if (r) {
426 DRM_ERROR("Failed to create WPTR mapping\n");
427 goto free_ctx;
428 }
429
430 return 0;
431
432 free_ctx:
433 amdgpu_userq_destroy_object(uq_mgr, &queue->fw_obj);
434
435 free_mqd:
436 amdgpu_userq_destroy_object(uq_mgr, &queue->mqd);
437
438 free_props:
439 kfree(userq_props);
440
441 return r;
442 }
443
mes_userq_mqd_destroy(struct amdgpu_usermode_queue * queue)444 static void mes_userq_mqd_destroy(struct amdgpu_usermode_queue *queue)
445 {
446 struct amdgpu_userq_mgr *uq_mgr = queue->userq_mgr;
447
448 amdgpu_userq_destroy_object(uq_mgr, &queue->fw_obj);
449 kfree(queue->userq_prop);
450 amdgpu_userq_destroy_object(uq_mgr, &queue->mqd);
451 }
452
mes_userq_preempt(struct amdgpu_usermode_queue * queue)453 static int mes_userq_preempt(struct amdgpu_usermode_queue *queue)
454 {
455 struct amdgpu_userq_mgr *uq_mgr = queue->userq_mgr;
456 struct amdgpu_device *adev = uq_mgr->adev;
457 struct mes_suspend_gang_input queue_input;
458 struct amdgpu_userq_obj *ctx = &queue->fw_obj;
459 signed long timeout = 2100000; /* 2100 ms */
460 u64 fence_gpu_addr;
461 u32 fence_offset;
462 u64 *fence_ptr;
463 int i, r;
464
465 if (queue->state != AMDGPU_USERQ_STATE_MAPPED)
466 return 0;
467 r = amdgpu_device_wb_get(adev, &fence_offset);
468 if (r)
469 return r;
470
471 fence_gpu_addr = adev->wb.gpu_addr + (fence_offset * 4);
472 fence_ptr = (u64 *)&adev->wb.wb[fence_offset];
473 *fence_ptr = 0;
474
475 memset(&queue_input, 0x0, sizeof(struct mes_suspend_gang_input));
476 queue_input.gang_context_addr = ctx->gpu_addr + AMDGPU_USERQ_PROC_CTX_SZ;
477 queue_input.suspend_fence_addr = fence_gpu_addr;
478 queue_input.suspend_fence_value = 1;
479 amdgpu_mes_lock(&adev->mes);
480 r = adev->mes.funcs->suspend_gang(&adev->mes, &queue_input);
481 amdgpu_mes_unlock(&adev->mes);
482 if (r) {
483 DRM_ERROR("Failed to suspend gang: %d\n", r);
484 goto out;
485 }
486
487 for (i = 0; i < timeout; i++) {
488 if (*fence_ptr == 1)
489 goto out;
490 udelay(1);
491 }
492 r = -ETIMEDOUT;
493
494 out:
495 amdgpu_device_wb_free(adev, fence_offset);
496 return r;
497 }
498
mes_userq_restore(struct amdgpu_usermode_queue * queue)499 static int mes_userq_restore(struct amdgpu_usermode_queue *queue)
500 {
501 struct amdgpu_userq_mgr *uq_mgr = queue->userq_mgr;
502 struct amdgpu_device *adev = uq_mgr->adev;
503 struct mes_resume_gang_input queue_input;
504 struct amdgpu_userq_obj *ctx = &queue->fw_obj;
505 int r;
506
507 if (queue->state == AMDGPU_USERQ_STATE_HUNG)
508 return -EINVAL;
509 if (queue->state != AMDGPU_USERQ_STATE_PREEMPTED)
510 return 0;
511
512 memset(&queue_input, 0x0, sizeof(struct mes_resume_gang_input));
513 queue_input.gang_context_addr = ctx->gpu_addr + AMDGPU_USERQ_PROC_CTX_SZ;
514
515 amdgpu_mes_lock(&adev->mes);
516 r = adev->mes.funcs->resume_gang(&adev->mes, &queue_input);
517 amdgpu_mes_unlock(&adev->mes);
518 if (r)
519 dev_err(adev->dev, "Failed to resume queue, err (%d)\n", r);
520 return r;
521 }
522
523 const struct amdgpu_userq_funcs userq_mes_funcs = {
524 .mqd_create = mes_userq_mqd_create,
525 .mqd_destroy = mes_userq_mqd_destroy,
526 .unmap = mes_userq_unmap,
527 .map = mes_userq_map,
528 .detect_and_reset = mes_userq_detect_and_reset,
529 .preempt = mes_userq_preempt,
530 .restore = mes_userq_restore,
531 };
532