1 // SPDX-License-Identifier: MIT 2 /* 3 * Copyright 2024 Advanced Micro Devices, Inc. 4 * 5 * Permission is hereby granted, free of charge, to any person obtaining a 6 * copy of this software and associated documentation files (the "Software"), 7 * to deal in the Software without restriction, including without limitation 8 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 9 * and/or sell copies of the Software, and to permit persons to whom the 10 * Software is furnished to do so, subject to the following conditions: 11 * 12 * The above copyright notice and this permission notice shall be included in 13 * all copies or substantial portions of the Software. 14 * 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 19 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 20 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 21 * OTHER DEALINGS IN THE SOFTWARE. 22 * 23 */ 24 #include <drm/drm_drv.h> 25 #include "amdgpu.h" 26 #include "amdgpu_gfx.h" 27 #include "mes_userqueue.h" 28 #include "amdgpu_userq_fence.h" 29 30 #define AMDGPU_USERQ_PROC_CTX_SZ PAGE_SIZE 31 #define AMDGPU_USERQ_GANG_CTX_SZ PAGE_SIZE 32 33 static int 34 mes_userq_map_gtt_bo_to_gart(struct amdgpu_bo *bo) 35 { 36 int ret; 37 38 ret = amdgpu_bo_reserve(bo, true); 39 if (ret) { 40 DRM_ERROR("Failed to reserve bo. ret %d\n", ret); 41 goto err_reserve_bo_failed; 42 } 43 44 ret = amdgpu_ttm_alloc_gart(&bo->tbo); 45 if (ret) { 46 DRM_ERROR("Failed to bind bo to GART. ret %d\n", ret); 47 goto err_map_bo_gart_failed; 48 } 49 50 amdgpu_bo_unreserve(bo); 51 bo = amdgpu_bo_ref(bo); 52 53 return 0; 54 55 err_map_bo_gart_failed: 56 amdgpu_bo_unreserve(bo); 57 err_reserve_bo_failed: 58 return ret; 59 } 60 61 static int 62 mes_userq_create_wptr_mapping(struct amdgpu_device *adev, 63 struct amdgpu_userq_mgr *uq_mgr, 64 struct amdgpu_usermode_queue *queue, 65 uint64_t wptr) 66 { 67 struct amdgpu_bo_va_mapping *wptr_mapping; 68 struct amdgpu_vm *wptr_vm; 69 struct amdgpu_userq_obj *wptr_obj = &queue->wptr_obj; 70 int ret; 71 72 wptr_vm = queue->vm; 73 ret = amdgpu_bo_reserve(wptr_vm->root.bo, false); 74 if (ret) 75 return ret; 76 77 wptr &= AMDGPU_GMC_HOLE_MASK; 78 wptr_mapping = amdgpu_vm_bo_lookup_mapping(wptr_vm, wptr >> PAGE_SHIFT); 79 amdgpu_bo_unreserve(wptr_vm->root.bo); 80 if (!wptr_mapping) { 81 DRM_ERROR("Failed to lookup wptr bo\n"); 82 return -EINVAL; 83 } 84 85 wptr_obj->obj = wptr_mapping->bo_va->base.bo; 86 if (wptr_obj->obj->tbo.base.size > PAGE_SIZE) { 87 DRM_ERROR("Requested GART mapping for wptr bo larger than one page\n"); 88 return -EINVAL; 89 } 90 91 ret = mes_userq_map_gtt_bo_to_gart(wptr_obj->obj); 92 if (ret) { 93 DRM_ERROR("Failed to map wptr bo to GART\n"); 94 return ret; 95 } 96 97 ret = amdgpu_bo_reserve(wptr_obj->obj, true); 98 if (ret) { 99 DRM_ERROR("Failed to reserve wptr bo\n"); 100 return ret; 101 } 102 103 /* TODO use eviction fence instead of pinning. */ 104 ret = amdgpu_bo_pin(wptr_obj->obj, AMDGPU_GEM_DOMAIN_GTT); 105 if (ret) { 106 drm_file_err(uq_mgr->file, "[Usermode queues] Failed to pin wptr bo\n"); 107 goto unresv_bo; 108 } 109 110 queue->wptr_obj.gpu_addr = amdgpu_bo_gpu_offset(wptr_obj->obj); 111 amdgpu_bo_unreserve(wptr_obj->obj); 112 113 return 0; 114 115 unresv_bo: 116 amdgpu_bo_unreserve(wptr_obj->obj); 117 return ret; 118 119 } 120 121 static int convert_to_mes_priority(int priority) 122 { 123 switch (priority) { 124 case AMDGPU_USERQ_CREATE_FLAGS_QUEUE_PRIORITY_NORMAL_LOW: 125 default: 126 return AMDGPU_MES_PRIORITY_LEVEL_NORMAL; 127 case AMDGPU_USERQ_CREATE_FLAGS_QUEUE_PRIORITY_LOW: 128 return AMDGPU_MES_PRIORITY_LEVEL_LOW; 129 case AMDGPU_USERQ_CREATE_FLAGS_QUEUE_PRIORITY_NORMAL_HIGH: 130 return AMDGPU_MES_PRIORITY_LEVEL_MEDIUM; 131 case AMDGPU_USERQ_CREATE_FLAGS_QUEUE_PRIORITY_HIGH: 132 return AMDGPU_MES_PRIORITY_LEVEL_HIGH; 133 } 134 } 135 136 static int mes_userq_map(struct amdgpu_usermode_queue *queue) 137 { 138 struct amdgpu_userq_mgr *uq_mgr = queue->userq_mgr; 139 struct amdgpu_device *adev = uq_mgr->adev; 140 struct amdgpu_userq_obj *ctx = &queue->fw_obj; 141 struct amdgpu_mqd_prop *userq_props = queue->userq_prop; 142 struct mes_add_queue_input queue_input; 143 int r; 144 145 memset(&queue_input, 0x0, sizeof(struct mes_add_queue_input)); 146 147 queue_input.process_va_start = 0; 148 queue_input.process_va_end = adev->vm_manager.max_pfn - 1; 149 150 /* set process quantum to 10 ms and gang quantum to 1 ms as default */ 151 queue_input.process_quantum = 100000; 152 queue_input.gang_quantum = 10000; 153 queue_input.paging = false; 154 155 queue_input.process_context_addr = ctx->gpu_addr; 156 queue_input.gang_context_addr = ctx->gpu_addr + AMDGPU_USERQ_PROC_CTX_SZ; 157 queue_input.inprocess_gang_priority = AMDGPU_MES_PRIORITY_LEVEL_NORMAL; 158 queue_input.gang_global_priority_level = convert_to_mes_priority(queue->priority); 159 160 queue_input.process_id = queue->vm->pasid; 161 queue_input.queue_type = queue->queue_type; 162 queue_input.mqd_addr = queue->mqd.gpu_addr; 163 queue_input.wptr_addr = userq_props->wptr_gpu_addr; 164 queue_input.queue_size = userq_props->queue_size >> 2; 165 queue_input.doorbell_offset = userq_props->doorbell_index; 166 queue_input.page_table_base_addr = amdgpu_gmc_pd_addr(queue->vm->root.bo); 167 queue_input.wptr_mc_addr = queue->wptr_obj.gpu_addr; 168 169 amdgpu_mes_lock(&adev->mes); 170 r = adev->mes.funcs->add_hw_queue(&adev->mes, &queue_input); 171 amdgpu_mes_unlock(&adev->mes); 172 if (r) { 173 DRM_ERROR("Failed to map queue in HW, err (%d)\n", r); 174 return r; 175 } 176 177 DRM_DEBUG_DRIVER("Queue (doorbell:%d) mapped successfully\n", userq_props->doorbell_index); 178 return 0; 179 } 180 181 static int mes_userq_unmap(struct amdgpu_usermode_queue *queue) 182 { 183 struct amdgpu_userq_mgr *uq_mgr = queue->userq_mgr; 184 struct amdgpu_device *adev = uq_mgr->adev; 185 struct mes_remove_queue_input queue_input; 186 struct amdgpu_userq_obj *ctx = &queue->fw_obj; 187 int r; 188 189 memset(&queue_input, 0x0, sizeof(struct mes_remove_queue_input)); 190 queue_input.doorbell_offset = queue->doorbell_index; 191 queue_input.gang_context_addr = ctx->gpu_addr + AMDGPU_USERQ_PROC_CTX_SZ; 192 193 amdgpu_mes_lock(&adev->mes); 194 r = adev->mes.funcs->remove_hw_queue(&adev->mes, &queue_input); 195 amdgpu_mes_unlock(&adev->mes); 196 if (r) 197 DRM_ERROR("Failed to unmap queue in HW, err (%d)\n", r); 198 return r; 199 } 200 201 static int mes_userq_create_ctx_space(struct amdgpu_userq_mgr *uq_mgr, 202 struct amdgpu_usermode_queue *queue, 203 struct drm_amdgpu_userq_in *mqd_user) 204 { 205 struct amdgpu_userq_obj *ctx = &queue->fw_obj; 206 int r, size; 207 208 /* 209 * The FW expects at least one page space allocated for 210 * process ctx and gang ctx each. Create an object 211 * for the same. 212 */ 213 size = AMDGPU_USERQ_PROC_CTX_SZ + AMDGPU_USERQ_GANG_CTX_SZ; 214 r = amdgpu_userq_create_object(uq_mgr, ctx, size); 215 if (r) { 216 DRM_ERROR("Failed to allocate ctx space bo for userqueue, err:%d\n", r); 217 return r; 218 } 219 220 return 0; 221 } 222 223 static int mes_userq_detect_and_reset(struct amdgpu_device *adev, 224 int queue_type) 225 { 226 int db_array_size = amdgpu_mes_get_hung_queue_db_array_size(adev); 227 struct mes_detect_and_reset_queue_input input; 228 struct amdgpu_usermode_queue *queue; 229 unsigned int hung_db_num = 0; 230 unsigned long queue_id; 231 u32 db_array[8]; 232 bool found_hung_queue = false; 233 int r, i; 234 235 if (db_array_size > 8) { 236 dev_err(adev->dev, "DB array size (%d vs 8) too small\n", 237 db_array_size); 238 return -EINVAL; 239 } 240 241 memset(&input, 0x0, sizeof(struct mes_detect_and_reset_queue_input)); 242 243 input.queue_type = queue_type; 244 245 amdgpu_mes_lock(&adev->mes); 246 r = amdgpu_mes_detect_and_reset_hung_queues(adev, queue_type, false, 247 &hung_db_num, db_array, 0); 248 amdgpu_mes_unlock(&adev->mes); 249 if (r) { 250 dev_err(adev->dev, "Failed to detect and reset queues, err (%d)\n", r); 251 } else if (hung_db_num) { 252 xa_for_each(&adev->userq_doorbell_xa, queue_id, queue) { 253 if (queue->queue_type == queue_type) { 254 for (i = 0; i < hung_db_num; i++) { 255 if (queue->doorbell_index == db_array[i]) { 256 queue->state = AMDGPU_USERQ_STATE_HUNG; 257 found_hung_queue = true; 258 atomic_inc(&adev->gpu_reset_counter); 259 amdgpu_userq_fence_driver_force_completion(queue); 260 drm_dev_wedged_event(adev_to_drm(adev), DRM_WEDGE_RECOVERY_NONE, NULL); 261 } 262 } 263 } 264 } 265 } 266 267 if (found_hung_queue) { 268 /* Resume scheduling after hang recovery */ 269 r = amdgpu_mes_resume(adev); 270 } 271 272 return r; 273 } 274 275 static int mes_userq_mqd_create(struct amdgpu_usermode_queue *queue, 276 struct drm_amdgpu_userq_in *args_in) 277 { 278 struct amdgpu_userq_mgr *uq_mgr = queue->userq_mgr; 279 struct amdgpu_device *adev = uq_mgr->adev; 280 struct amdgpu_mqd *mqd_hw_default = &adev->mqds[queue->queue_type]; 281 struct drm_amdgpu_userq_in *mqd_user = args_in; 282 struct amdgpu_mqd_prop *userq_props; 283 int r; 284 285 /* Structure to initialize MQD for userqueue using generic MQD init function */ 286 userq_props = kzalloc(sizeof(struct amdgpu_mqd_prop), GFP_KERNEL); 287 if (!userq_props) { 288 DRM_ERROR("Failed to allocate memory for userq_props\n"); 289 return -ENOMEM; 290 } 291 292 r = amdgpu_userq_create_object(uq_mgr, &queue->mqd, 293 AMDGPU_MQD_SIZE_ALIGN(mqd_hw_default->mqd_size)); 294 if (r) { 295 DRM_ERROR("Failed to create MQD object for userqueue\n"); 296 goto free_props; 297 } 298 299 /* Initialize the MQD BO with user given values */ 300 userq_props->wptr_gpu_addr = mqd_user->wptr_va; 301 userq_props->rptr_gpu_addr = mqd_user->rptr_va; 302 userq_props->queue_size = mqd_user->queue_size; 303 userq_props->hqd_base_gpu_addr = mqd_user->queue_va; 304 userq_props->mqd_gpu_addr = queue->mqd.gpu_addr; 305 userq_props->use_doorbell = true; 306 userq_props->doorbell_index = queue->doorbell_index; 307 userq_props->fence_address = queue->fence_drv->gpu_addr; 308 309 if (queue->queue_type == AMDGPU_HW_IP_COMPUTE) { 310 struct drm_amdgpu_userq_mqd_compute_gfx11 *compute_mqd; 311 312 if (mqd_user->mqd_size != sizeof(*compute_mqd)) { 313 DRM_ERROR("Invalid compute IP MQD size\n"); 314 r = -EINVAL; 315 goto free_mqd; 316 } 317 318 compute_mqd = memdup_user(u64_to_user_ptr(mqd_user->mqd), mqd_user->mqd_size); 319 if (IS_ERR(compute_mqd)) { 320 DRM_ERROR("Failed to read user MQD\n"); 321 r = -ENOMEM; 322 goto free_mqd; 323 } 324 325 r = amdgpu_userq_input_va_validate(adev, queue, compute_mqd->eop_va, 326 2048); 327 if (r) 328 goto free_mqd; 329 330 userq_props->eop_gpu_addr = compute_mqd->eop_va; 331 userq_props->hqd_pipe_priority = AMDGPU_GFX_PIPE_PRIO_NORMAL; 332 userq_props->hqd_queue_priority = AMDGPU_GFX_QUEUE_PRIORITY_MINIMUM; 333 userq_props->hqd_active = false; 334 userq_props->tmz_queue = 335 mqd_user->flags & AMDGPU_USERQ_CREATE_FLAGS_QUEUE_SECURE; 336 kfree(compute_mqd); 337 } else if (queue->queue_type == AMDGPU_HW_IP_GFX) { 338 struct drm_amdgpu_userq_mqd_gfx11 *mqd_gfx_v11; 339 struct amdgpu_gfx_shadow_info shadow_info; 340 341 if (adev->gfx.funcs->get_gfx_shadow_info) { 342 adev->gfx.funcs->get_gfx_shadow_info(adev, &shadow_info, true); 343 } else { 344 r = -EINVAL; 345 goto free_mqd; 346 } 347 348 if (mqd_user->mqd_size != sizeof(*mqd_gfx_v11) || !mqd_user->mqd) { 349 DRM_ERROR("Invalid GFX MQD\n"); 350 r = -EINVAL; 351 goto free_mqd; 352 } 353 354 mqd_gfx_v11 = memdup_user(u64_to_user_ptr(mqd_user->mqd), mqd_user->mqd_size); 355 if (IS_ERR(mqd_gfx_v11)) { 356 DRM_ERROR("Failed to read user MQD\n"); 357 r = -ENOMEM; 358 goto free_mqd; 359 } 360 361 userq_props->shadow_addr = mqd_gfx_v11->shadow_va; 362 userq_props->csa_addr = mqd_gfx_v11->csa_va; 363 userq_props->tmz_queue = 364 mqd_user->flags & AMDGPU_USERQ_CREATE_FLAGS_QUEUE_SECURE; 365 366 r = amdgpu_userq_input_va_validate(adev, queue, mqd_gfx_v11->shadow_va, 367 shadow_info.shadow_size); 368 if (r) 369 goto free_mqd; 370 r = amdgpu_userq_input_va_validate(adev, queue, mqd_gfx_v11->csa_va, 371 shadow_info.csa_size); 372 if (r) 373 goto free_mqd; 374 375 kfree(mqd_gfx_v11); 376 } else if (queue->queue_type == AMDGPU_HW_IP_DMA) { 377 struct drm_amdgpu_userq_mqd_sdma_gfx11 *mqd_sdma_v11; 378 379 if (mqd_user->mqd_size != sizeof(*mqd_sdma_v11) || !mqd_user->mqd) { 380 DRM_ERROR("Invalid SDMA MQD\n"); 381 r = -EINVAL; 382 goto free_mqd; 383 } 384 385 mqd_sdma_v11 = memdup_user(u64_to_user_ptr(mqd_user->mqd), mqd_user->mqd_size); 386 if (IS_ERR(mqd_sdma_v11)) { 387 DRM_ERROR("Failed to read sdma user MQD\n"); 388 r = -ENOMEM; 389 goto free_mqd; 390 } 391 r = amdgpu_userq_input_va_validate(adev, queue, mqd_sdma_v11->csa_va, 392 32); 393 if (r) 394 goto free_mqd; 395 396 userq_props->csa_addr = mqd_sdma_v11->csa_va; 397 kfree(mqd_sdma_v11); 398 } 399 400 queue->userq_prop = userq_props; 401 402 r = mqd_hw_default->init_mqd(adev, (void *)queue->mqd.cpu_ptr, userq_props); 403 if (r) { 404 DRM_ERROR("Failed to initialize MQD for userqueue\n"); 405 goto free_mqd; 406 } 407 408 /* Create BO for FW operations */ 409 r = mes_userq_create_ctx_space(uq_mgr, queue, mqd_user); 410 if (r) { 411 DRM_ERROR("Failed to allocate BO for userqueue (%d)", r); 412 goto free_mqd; 413 } 414 415 /* FW expects WPTR BOs to be mapped into GART */ 416 r = mes_userq_create_wptr_mapping(adev, uq_mgr, queue, userq_props->wptr_gpu_addr); 417 if (r) { 418 DRM_ERROR("Failed to create WPTR mapping\n"); 419 goto free_ctx; 420 } 421 422 return 0; 423 424 free_ctx: 425 amdgpu_userq_destroy_object(uq_mgr, &queue->fw_obj); 426 427 free_mqd: 428 amdgpu_userq_destroy_object(uq_mgr, &queue->mqd); 429 430 free_props: 431 kfree(userq_props); 432 433 return r; 434 } 435 436 static void mes_userq_mqd_destroy(struct amdgpu_usermode_queue *queue) 437 { 438 struct amdgpu_userq_mgr *uq_mgr = queue->userq_mgr; 439 440 amdgpu_userq_destroy_object(uq_mgr, &queue->fw_obj); 441 kfree(queue->userq_prop); 442 amdgpu_userq_destroy_object(uq_mgr, &queue->mqd); 443 } 444 445 static int mes_userq_preempt(struct amdgpu_usermode_queue *queue) 446 { 447 struct amdgpu_userq_mgr *uq_mgr = queue->userq_mgr; 448 struct amdgpu_device *adev = uq_mgr->adev; 449 struct mes_suspend_gang_input queue_input; 450 struct amdgpu_userq_obj *ctx = &queue->fw_obj; 451 signed long timeout = 2100000; /* 2100 ms */ 452 u64 fence_gpu_addr; 453 u32 fence_offset; 454 u64 *fence_ptr; 455 int i, r; 456 457 if (queue->state != AMDGPU_USERQ_STATE_MAPPED) 458 return 0; 459 r = amdgpu_device_wb_get(adev, &fence_offset); 460 if (r) 461 return r; 462 463 fence_gpu_addr = adev->wb.gpu_addr + (fence_offset * 4); 464 fence_ptr = (u64 *)&adev->wb.wb[fence_offset]; 465 *fence_ptr = 0; 466 467 memset(&queue_input, 0x0, sizeof(struct mes_suspend_gang_input)); 468 queue_input.gang_context_addr = ctx->gpu_addr + AMDGPU_USERQ_PROC_CTX_SZ; 469 queue_input.suspend_fence_addr = fence_gpu_addr; 470 queue_input.suspend_fence_value = 1; 471 amdgpu_mes_lock(&adev->mes); 472 r = adev->mes.funcs->suspend_gang(&adev->mes, &queue_input); 473 amdgpu_mes_unlock(&adev->mes); 474 if (r) { 475 DRM_ERROR("Failed to suspend gang: %d\n", r); 476 goto out; 477 } 478 479 for (i = 0; i < timeout; i++) { 480 if (*fence_ptr == 1) 481 goto out; 482 udelay(1); 483 } 484 r = -ETIMEDOUT; 485 486 out: 487 amdgpu_device_wb_free(adev, fence_offset); 488 return r; 489 } 490 491 static int mes_userq_restore(struct amdgpu_usermode_queue *queue) 492 { 493 struct amdgpu_userq_mgr *uq_mgr = queue->userq_mgr; 494 struct amdgpu_device *adev = uq_mgr->adev; 495 struct mes_resume_gang_input queue_input; 496 struct amdgpu_userq_obj *ctx = &queue->fw_obj; 497 int r; 498 499 if (queue->state == AMDGPU_USERQ_STATE_HUNG) 500 return -EINVAL; 501 if (queue->state != AMDGPU_USERQ_STATE_PREEMPTED) 502 return 0; 503 504 memset(&queue_input, 0x0, sizeof(struct mes_resume_gang_input)); 505 queue_input.gang_context_addr = ctx->gpu_addr + AMDGPU_USERQ_PROC_CTX_SZ; 506 507 amdgpu_mes_lock(&adev->mes); 508 r = adev->mes.funcs->resume_gang(&adev->mes, &queue_input); 509 amdgpu_mes_unlock(&adev->mes); 510 if (r) 511 dev_err(adev->dev, "Failed to resume queue, err (%d)\n", r); 512 return r; 513 } 514 515 const struct amdgpu_userq_funcs userq_mes_funcs = { 516 .mqd_create = mes_userq_mqd_create, 517 .mqd_destroy = mes_userq_mqd_destroy, 518 .unmap = mes_userq_unmap, 519 .map = mes_userq_map, 520 .detect_and_reset = mes_userq_detect_and_reset, 521 .preempt = mes_userq_preempt, 522 .restore = mes_userq_restore, 523 }; 524