1 // SPDX-License-Identifier: MIT 2 /* 3 * Copyright 2024 Advanced Micro Devices, Inc. 4 * 5 * Permission is hereby granted, free of charge, to any person obtaining a 6 * copy of this software and associated documentation files (the "Software"), 7 * to deal in the Software without restriction, including without limitation 8 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 9 * and/or sell copies of the Software, and to permit persons to whom the 10 * Software is furnished to do so, subject to the following conditions: 11 * 12 * The above copyright notice and this permission notice shall be included in 13 * all copies or substantial portions of the Software. 14 * 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 19 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 20 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 21 * OTHER DEALINGS IN THE SOFTWARE. 22 * 23 */ 24 #include <drm/drm_drv.h> 25 #include "amdgpu.h" 26 #include "amdgpu_gfx.h" 27 #include "mes_userqueue.h" 28 #include "amdgpu_userq_fence.h" 29 30 #define AMDGPU_USERQ_PROC_CTX_SZ PAGE_SIZE 31 #define AMDGPU_USERQ_GANG_CTX_SZ PAGE_SIZE 32 33 static int 34 mes_userq_map_gtt_bo_to_gart(struct amdgpu_bo *bo) 35 { 36 int ret; 37 38 ret = amdgpu_bo_reserve(bo, true); 39 if (ret) { 40 DRM_ERROR("Failed to reserve bo. ret %d\n", ret); 41 goto err_reserve_bo_failed; 42 } 43 44 ret = amdgpu_ttm_alloc_gart(&bo->tbo); 45 if (ret) { 46 DRM_ERROR("Failed to bind bo to GART. ret %d\n", ret); 47 goto err_map_bo_gart_failed; 48 } 49 50 amdgpu_bo_unreserve(bo); 51 bo = amdgpu_bo_ref(bo); 52 53 return 0; 54 55 err_map_bo_gart_failed: 56 amdgpu_bo_unreserve(bo); 57 err_reserve_bo_failed: 58 return ret; 59 } 60 61 static int 62 mes_userq_create_wptr_mapping(struct amdgpu_userq_mgr *uq_mgr, 63 struct amdgpu_usermode_queue *queue, 64 uint64_t wptr) 65 { 66 struct amdgpu_bo_va_mapping *wptr_mapping; 67 struct amdgpu_vm *wptr_vm; 68 struct amdgpu_userq_obj *wptr_obj = &queue->wptr_obj; 69 int ret; 70 71 wptr_vm = queue->vm; 72 ret = amdgpu_bo_reserve(wptr_vm->root.bo, false); 73 if (ret) 74 return ret; 75 76 wptr &= AMDGPU_GMC_HOLE_MASK; 77 wptr_mapping = amdgpu_vm_bo_lookup_mapping(wptr_vm, wptr >> PAGE_SHIFT); 78 amdgpu_bo_unreserve(wptr_vm->root.bo); 79 if (!wptr_mapping) { 80 DRM_ERROR("Failed to lookup wptr bo\n"); 81 return -EINVAL; 82 } 83 84 wptr_obj->obj = wptr_mapping->bo_va->base.bo; 85 if (wptr_obj->obj->tbo.base.size > PAGE_SIZE) { 86 DRM_ERROR("Requested GART mapping for wptr bo larger than one page\n"); 87 return -EINVAL; 88 } 89 90 ret = mes_userq_map_gtt_bo_to_gart(wptr_obj->obj); 91 if (ret) { 92 DRM_ERROR("Failed to map wptr bo to GART\n"); 93 return ret; 94 } 95 96 queue->wptr_obj.gpu_addr = amdgpu_bo_gpu_offset_no_check(wptr_obj->obj); 97 return 0; 98 } 99 100 static int convert_to_mes_priority(int priority) 101 { 102 switch (priority) { 103 case AMDGPU_USERQ_CREATE_FLAGS_QUEUE_PRIORITY_NORMAL_LOW: 104 default: 105 return AMDGPU_MES_PRIORITY_LEVEL_NORMAL; 106 case AMDGPU_USERQ_CREATE_FLAGS_QUEUE_PRIORITY_LOW: 107 return AMDGPU_MES_PRIORITY_LEVEL_LOW; 108 case AMDGPU_USERQ_CREATE_FLAGS_QUEUE_PRIORITY_NORMAL_HIGH: 109 return AMDGPU_MES_PRIORITY_LEVEL_MEDIUM; 110 case AMDGPU_USERQ_CREATE_FLAGS_QUEUE_PRIORITY_HIGH: 111 return AMDGPU_MES_PRIORITY_LEVEL_HIGH; 112 } 113 } 114 115 static int mes_userq_map(struct amdgpu_userq_mgr *uq_mgr, 116 struct amdgpu_usermode_queue *queue) 117 { 118 struct amdgpu_device *adev = uq_mgr->adev; 119 struct amdgpu_userq_obj *ctx = &queue->fw_obj; 120 struct amdgpu_mqd_prop *userq_props = queue->userq_prop; 121 struct mes_add_queue_input queue_input; 122 int r; 123 124 memset(&queue_input, 0x0, sizeof(struct mes_add_queue_input)); 125 126 queue_input.process_va_start = 0; 127 queue_input.process_va_end = adev->vm_manager.max_pfn - 1; 128 129 /* set process quantum to 10 ms and gang quantum to 1 ms as default */ 130 queue_input.process_quantum = 100000; 131 queue_input.gang_quantum = 10000; 132 queue_input.paging = false; 133 134 queue_input.process_context_addr = ctx->gpu_addr; 135 queue_input.gang_context_addr = ctx->gpu_addr + AMDGPU_USERQ_PROC_CTX_SZ; 136 queue_input.inprocess_gang_priority = AMDGPU_MES_PRIORITY_LEVEL_NORMAL; 137 queue_input.gang_global_priority_level = convert_to_mes_priority(queue->priority); 138 139 queue_input.process_id = queue->vm->pasid; 140 queue_input.queue_type = queue->queue_type; 141 queue_input.mqd_addr = queue->mqd.gpu_addr; 142 queue_input.wptr_addr = userq_props->wptr_gpu_addr; 143 queue_input.queue_size = userq_props->queue_size >> 2; 144 queue_input.doorbell_offset = userq_props->doorbell_index; 145 queue_input.page_table_base_addr = amdgpu_gmc_pd_addr(queue->vm->root.bo); 146 queue_input.wptr_mc_addr = queue->wptr_obj.gpu_addr; 147 148 amdgpu_mes_lock(&adev->mes); 149 r = adev->mes.funcs->add_hw_queue(&adev->mes, &queue_input); 150 amdgpu_mes_unlock(&adev->mes); 151 if (r) { 152 DRM_ERROR("Failed to map queue in HW, err (%d)\n", r); 153 return r; 154 } 155 156 DRM_DEBUG_DRIVER("Queue (doorbell:%d) mapped successfully\n", userq_props->doorbell_index); 157 return 0; 158 } 159 160 static int mes_userq_unmap(struct amdgpu_userq_mgr *uq_mgr, 161 struct amdgpu_usermode_queue *queue) 162 { 163 struct amdgpu_device *adev = uq_mgr->adev; 164 struct mes_remove_queue_input queue_input; 165 struct amdgpu_userq_obj *ctx = &queue->fw_obj; 166 int r; 167 168 memset(&queue_input, 0x0, sizeof(struct mes_remove_queue_input)); 169 queue_input.doorbell_offset = queue->doorbell_index; 170 queue_input.gang_context_addr = ctx->gpu_addr + AMDGPU_USERQ_PROC_CTX_SZ; 171 172 amdgpu_mes_lock(&adev->mes); 173 r = adev->mes.funcs->remove_hw_queue(&adev->mes, &queue_input); 174 amdgpu_mes_unlock(&adev->mes); 175 if (r) 176 DRM_ERROR("Failed to unmap queue in HW, err (%d)\n", r); 177 return r; 178 } 179 180 static int mes_userq_create_ctx_space(struct amdgpu_userq_mgr *uq_mgr, 181 struct amdgpu_usermode_queue *queue, 182 struct drm_amdgpu_userq_in *mqd_user) 183 { 184 struct amdgpu_userq_obj *ctx = &queue->fw_obj; 185 int r, size; 186 187 /* 188 * The FW expects at least one page space allocated for 189 * process ctx and gang ctx each. Create an object 190 * for the same. 191 */ 192 size = AMDGPU_USERQ_PROC_CTX_SZ + AMDGPU_USERQ_GANG_CTX_SZ; 193 r = amdgpu_userq_create_object(uq_mgr, ctx, size); 194 if (r) { 195 DRM_ERROR("Failed to allocate ctx space bo for userqueue, err:%d\n", r); 196 return r; 197 } 198 199 return 0; 200 } 201 202 static int mes_userq_detect_and_reset(struct amdgpu_device *adev, 203 int queue_type) 204 { 205 int db_array_size = amdgpu_mes_get_hung_queue_db_array_size(adev); 206 struct mes_detect_and_reset_queue_input input; 207 struct amdgpu_usermode_queue *queue; 208 struct amdgpu_userq_mgr *uqm, *tmp; 209 unsigned int hung_db_num = 0; 210 int queue_id, r, i; 211 u32 db_array[4]; 212 213 if (db_array_size > 4) { 214 dev_err(adev->dev, "DB array size (%d vs 4) too small\n", 215 db_array_size); 216 return -EINVAL; 217 } 218 219 memset(&input, 0x0, sizeof(struct mes_detect_and_reset_queue_input)); 220 221 input.queue_type = queue_type; 222 223 amdgpu_mes_lock(&adev->mes); 224 r = amdgpu_mes_detect_and_reset_hung_queues(adev, queue_type, false, 225 &hung_db_num, db_array); 226 amdgpu_mes_unlock(&adev->mes); 227 if (r) { 228 dev_err(adev->dev, "Failed to detect and reset queues, err (%d)\n", r); 229 } else if (hung_db_num) { 230 list_for_each_entry_safe(uqm, tmp, &adev->userq_mgr_list, list) { 231 idr_for_each_entry(&uqm->userq_idr, queue, queue_id) { 232 if (queue->queue_type == queue_type) { 233 for (i = 0; i < hung_db_num; i++) { 234 if (queue->doorbell_index == db_array[i]) { 235 queue->state = AMDGPU_USERQ_STATE_HUNG; 236 atomic_inc(&adev->gpu_reset_counter); 237 amdgpu_userq_fence_driver_force_completion(queue); 238 drm_dev_wedged_event(adev_to_drm(adev), DRM_WEDGE_RECOVERY_NONE, NULL); 239 } 240 } 241 } 242 } 243 } 244 } 245 246 return r; 247 } 248 249 static int mes_userq_mqd_create(struct amdgpu_userq_mgr *uq_mgr, 250 struct drm_amdgpu_userq_in *args_in, 251 struct amdgpu_usermode_queue *queue) 252 { 253 struct amdgpu_device *adev = uq_mgr->adev; 254 struct amdgpu_mqd *mqd_hw_default = &adev->mqds[queue->queue_type]; 255 struct drm_amdgpu_userq_in *mqd_user = args_in; 256 struct amdgpu_mqd_prop *userq_props; 257 int r; 258 259 /* Structure to initialize MQD for userqueue using generic MQD init function */ 260 userq_props = kzalloc(sizeof(struct amdgpu_mqd_prop), GFP_KERNEL); 261 if (!userq_props) { 262 DRM_ERROR("Failed to allocate memory for userq_props\n"); 263 return -ENOMEM; 264 } 265 266 r = amdgpu_userq_create_object(uq_mgr, &queue->mqd, mqd_hw_default->mqd_size); 267 if (r) { 268 DRM_ERROR("Failed to create MQD object for userqueue\n"); 269 goto free_props; 270 } 271 272 /* Initialize the MQD BO with user given values */ 273 userq_props->wptr_gpu_addr = mqd_user->wptr_va; 274 userq_props->rptr_gpu_addr = mqd_user->rptr_va; 275 userq_props->queue_size = mqd_user->queue_size; 276 userq_props->hqd_base_gpu_addr = mqd_user->queue_va; 277 userq_props->mqd_gpu_addr = queue->mqd.gpu_addr; 278 userq_props->use_doorbell = true; 279 userq_props->doorbell_index = queue->doorbell_index; 280 userq_props->fence_address = queue->fence_drv->gpu_addr; 281 282 if (queue->queue_type == AMDGPU_HW_IP_COMPUTE) { 283 struct drm_amdgpu_userq_mqd_compute_gfx11 *compute_mqd; 284 285 if (mqd_user->mqd_size != sizeof(*compute_mqd)) { 286 DRM_ERROR("Invalid compute IP MQD size\n"); 287 r = -EINVAL; 288 goto free_mqd; 289 } 290 291 compute_mqd = memdup_user(u64_to_user_ptr(mqd_user->mqd), mqd_user->mqd_size); 292 if (IS_ERR(compute_mqd)) { 293 DRM_ERROR("Failed to read user MQD\n"); 294 r = -ENOMEM; 295 goto free_mqd; 296 } 297 298 userq_props->eop_gpu_addr = compute_mqd->eop_va; 299 userq_props->hqd_pipe_priority = AMDGPU_GFX_PIPE_PRIO_NORMAL; 300 userq_props->hqd_queue_priority = AMDGPU_GFX_QUEUE_PRIORITY_MINIMUM; 301 userq_props->hqd_active = false; 302 userq_props->tmz_queue = 303 mqd_user->flags & AMDGPU_USERQ_CREATE_FLAGS_QUEUE_SECURE; 304 kfree(compute_mqd); 305 } else if (queue->queue_type == AMDGPU_HW_IP_GFX) { 306 struct drm_amdgpu_userq_mqd_gfx11 *mqd_gfx_v11; 307 308 if (mqd_user->mqd_size != sizeof(*mqd_gfx_v11) || !mqd_user->mqd) { 309 DRM_ERROR("Invalid GFX MQD\n"); 310 r = -EINVAL; 311 goto free_mqd; 312 } 313 314 mqd_gfx_v11 = memdup_user(u64_to_user_ptr(mqd_user->mqd), mqd_user->mqd_size); 315 if (IS_ERR(mqd_gfx_v11)) { 316 DRM_ERROR("Failed to read user MQD\n"); 317 r = -ENOMEM; 318 goto free_mqd; 319 } 320 321 userq_props->shadow_addr = mqd_gfx_v11->shadow_va; 322 userq_props->csa_addr = mqd_gfx_v11->csa_va; 323 userq_props->tmz_queue = 324 mqd_user->flags & AMDGPU_USERQ_CREATE_FLAGS_QUEUE_SECURE; 325 kfree(mqd_gfx_v11); 326 } else if (queue->queue_type == AMDGPU_HW_IP_DMA) { 327 struct drm_amdgpu_userq_mqd_sdma_gfx11 *mqd_sdma_v11; 328 329 if (mqd_user->mqd_size != sizeof(*mqd_sdma_v11) || !mqd_user->mqd) { 330 DRM_ERROR("Invalid SDMA MQD\n"); 331 r = -EINVAL; 332 goto free_mqd; 333 } 334 335 mqd_sdma_v11 = memdup_user(u64_to_user_ptr(mqd_user->mqd), mqd_user->mqd_size); 336 if (IS_ERR(mqd_sdma_v11)) { 337 DRM_ERROR("Failed to read sdma user MQD\n"); 338 r = -ENOMEM; 339 goto free_mqd; 340 } 341 342 userq_props->csa_addr = mqd_sdma_v11->csa_va; 343 kfree(mqd_sdma_v11); 344 } 345 346 queue->userq_prop = userq_props; 347 348 r = mqd_hw_default->init_mqd(adev, (void *)queue->mqd.cpu_ptr, userq_props); 349 if (r) { 350 DRM_ERROR("Failed to initialize MQD for userqueue\n"); 351 goto free_mqd; 352 } 353 354 /* Create BO for FW operations */ 355 r = mes_userq_create_ctx_space(uq_mgr, queue, mqd_user); 356 if (r) { 357 DRM_ERROR("Failed to allocate BO for userqueue (%d)", r); 358 goto free_mqd; 359 } 360 361 /* FW expects WPTR BOs to be mapped into GART */ 362 r = mes_userq_create_wptr_mapping(uq_mgr, queue, userq_props->wptr_gpu_addr); 363 if (r) { 364 DRM_ERROR("Failed to create WPTR mapping\n"); 365 goto free_ctx; 366 } 367 368 return 0; 369 370 free_ctx: 371 amdgpu_userq_destroy_object(uq_mgr, &queue->fw_obj); 372 373 free_mqd: 374 amdgpu_userq_destroy_object(uq_mgr, &queue->mqd); 375 376 free_props: 377 kfree(userq_props); 378 379 return r; 380 } 381 382 static void 383 mes_userq_mqd_destroy(struct amdgpu_userq_mgr *uq_mgr, 384 struct amdgpu_usermode_queue *queue) 385 { 386 amdgpu_userq_destroy_object(uq_mgr, &queue->fw_obj); 387 kfree(queue->userq_prop); 388 amdgpu_userq_destroy_object(uq_mgr, &queue->mqd); 389 } 390 391 const struct amdgpu_userq_funcs userq_mes_funcs = { 392 .mqd_create = mes_userq_mqd_create, 393 .mqd_destroy = mes_userq_mqd_destroy, 394 .unmap = mes_userq_unmap, 395 .map = mes_userq_map, 396 .detect_and_reset = mes_userq_detect_and_reset, 397 }; 398