1 /* 2 * Copyright 2015 Advanced Micro Devices, Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 * 23 */ 24 #include <linux/kthread.h> 25 #include <linux/wait.h> 26 #include <linux/sched.h> 27 28 #include <drm/drm_drv.h> 29 30 #include "amdgpu.h" 31 #include "amdgpu_trace.h" 32 #include "amdgpu_reset.h" 33 #include "amdgpu_dev_coredump.h" 34 #include "amdgpu_xgmi.h" 35 36 static void amdgpu_job_do_core_dump(struct amdgpu_device *adev, 37 struct amdgpu_job *job) 38 { 39 int i; 40 41 dev_info(adev->dev, "Dumping IP State\n"); 42 for (i = 0; i < adev->num_ip_blocks; i++) 43 if (adev->ip_blocks[i].version->funcs->dump_ip_state) 44 adev->ip_blocks[i].version->funcs 45 ->dump_ip_state((void *)&adev->ip_blocks[i]); 46 dev_info(adev->dev, "Dumping IP State Completed\n"); 47 48 amdgpu_coredump(adev, true, false, job); 49 } 50 51 static void amdgpu_job_core_dump(struct amdgpu_device *adev, 52 struct amdgpu_job *job) 53 { 54 struct list_head device_list, *device_list_handle = NULL; 55 struct amdgpu_device *tmp_adev = NULL; 56 struct amdgpu_hive_info *hive = NULL; 57 58 if (!amdgpu_sriov_vf(adev)) 59 hive = amdgpu_get_xgmi_hive(adev); 60 if (hive) 61 mutex_lock(&hive->hive_lock); 62 /* 63 * Reuse the logic in amdgpu_device_gpu_recover() to build list of 64 * devices for code dump 65 */ 66 INIT_LIST_HEAD(&device_list); 67 if (!amdgpu_sriov_vf(adev) && (adev->gmc.xgmi.num_physical_nodes > 1) && hive) { 68 list_for_each_entry(tmp_adev, &hive->device_list, gmc.xgmi.head) 69 list_add_tail(&tmp_adev->reset_list, &device_list); 70 if (!list_is_first(&adev->reset_list, &device_list)) 71 list_rotate_to_front(&adev->reset_list, &device_list); 72 device_list_handle = &device_list; 73 } else { 74 list_add_tail(&adev->reset_list, &device_list); 75 device_list_handle = &device_list; 76 } 77 78 /* Do the coredump for each device */ 79 list_for_each_entry(tmp_adev, device_list_handle, reset_list) 80 amdgpu_job_do_core_dump(tmp_adev, job); 81 82 if (hive) { 83 mutex_unlock(&hive->hive_lock); 84 amdgpu_put_xgmi_hive(hive); 85 } 86 } 87 88 static enum drm_gpu_sched_stat amdgpu_job_timedout(struct drm_sched_job *s_job) 89 { 90 struct amdgpu_ring *ring = to_amdgpu_ring(s_job->sched); 91 struct amdgpu_job *job = to_amdgpu_job(s_job); 92 struct drm_wedge_task_info *info = NULL; 93 struct amdgpu_task_info *ti = NULL; 94 struct amdgpu_device *adev = ring->adev; 95 int idx, r; 96 97 if (!drm_dev_enter(adev_to_drm(adev), &idx)) { 98 dev_info(adev->dev, "%s - device unplugged skipping recovery on scheduler:%s", 99 __func__, s_job->sched->name); 100 101 /* Effectively the job is aborted as the device is gone */ 102 return DRM_GPU_SCHED_STAT_ENODEV; 103 } 104 105 /* 106 * Do the coredump immediately after a job timeout to get a very 107 * close dump/snapshot/representation of GPU's current error status 108 * Skip it for SRIOV, since VF FLR will be triggered by host driver 109 * before job timeout 110 */ 111 if (!amdgpu_sriov_vf(adev)) 112 amdgpu_job_core_dump(adev, job); 113 114 if (amdgpu_gpu_recovery && 115 amdgpu_ring_is_reset_type_supported(ring, AMDGPU_RESET_TYPE_SOFT_RESET) && 116 amdgpu_ring_soft_recovery(ring, job->vmid, s_job->s_fence->parent)) { 117 dev_err(adev->dev, "ring %s timeout, but soft recovered\n", 118 s_job->sched->name); 119 goto exit; 120 } 121 122 dev_err(adev->dev, "ring %s timeout, signaled seq=%u, emitted seq=%u\n", 123 job->base.sched->name, atomic_read(&ring->fence_drv.last_seq), 124 ring->fence_drv.sync_seq); 125 126 ti = amdgpu_vm_get_task_info_pasid(ring->adev, job->pasid); 127 if (ti) { 128 amdgpu_vm_print_task_info(adev, ti); 129 info = &ti->task; 130 } 131 132 /* attempt a per ring reset */ 133 if (amdgpu_gpu_recovery && 134 amdgpu_ring_is_reset_type_supported(ring, AMDGPU_RESET_TYPE_PER_QUEUE) && 135 ring->funcs->reset) { 136 dev_err(adev->dev, "Starting %s ring reset\n", 137 s_job->sched->name); 138 /* Stop the scheduler to prevent anybody else from touching the ring buffer. */ 139 drm_sched_wqueue_stop(&ring->sched); 140 r = amdgpu_ring_reset(ring, job->vmid, job->hw_fence); 141 if (!r) { 142 /* Start the scheduler again */ 143 drm_sched_wqueue_start(&ring->sched); 144 atomic_inc(&ring->adev->gpu_reset_counter); 145 dev_err(adev->dev, "Ring %s reset succeeded\n", 146 ring->sched.name); 147 drm_dev_wedged_event(adev_to_drm(adev), 148 DRM_WEDGE_RECOVERY_NONE, info); 149 goto exit; 150 } 151 dev_err(adev->dev, "Ring %s reset failed\n", ring->sched.name); 152 } 153 154 if (dma_fence_get_status(&s_job->s_fence->finished) == 0) 155 dma_fence_set_error(&s_job->s_fence->finished, -ETIME); 156 157 if (amdgpu_device_should_recover_gpu(ring->adev)) { 158 struct amdgpu_reset_context reset_context; 159 memset(&reset_context, 0, sizeof(reset_context)); 160 161 reset_context.method = AMD_RESET_METHOD_NONE; 162 reset_context.reset_req_dev = adev; 163 reset_context.src = AMDGPU_RESET_SRC_JOB; 164 clear_bit(AMDGPU_NEED_FULL_RESET, &reset_context.flags); 165 166 /* 167 * To avoid an unnecessary extra coredump, as we have already 168 * got the very close representation of GPU's error status 169 */ 170 set_bit(AMDGPU_SKIP_COREDUMP, &reset_context.flags); 171 172 r = amdgpu_device_gpu_recover(ring->adev, job, &reset_context); 173 if (r) 174 dev_err(adev->dev, "GPU Recovery Failed: %d\n", r); 175 } else { 176 drm_sched_suspend_timeout(&ring->sched); 177 if (amdgpu_sriov_vf(adev)) 178 adev->virt.tdr_debug = true; 179 } 180 181 exit: 182 amdgpu_vm_put_task_info(ti); 183 drm_dev_exit(idx); 184 /* This is needed to add the job back to the pending list */ 185 return DRM_GPU_SCHED_STAT_NO_HANG; 186 } 187 188 int amdgpu_job_alloc(struct amdgpu_device *adev, struct amdgpu_vm *vm, 189 struct drm_sched_entity *entity, void *owner, 190 unsigned int num_ibs, struct amdgpu_job **job, 191 u64 drm_client_id) 192 { 193 struct amdgpu_fence *af; 194 int r; 195 196 if (num_ibs == 0) 197 return -EINVAL; 198 199 *job = kzalloc_flex(**job, ibs, num_ibs); 200 if (!*job) 201 return -ENOMEM; 202 203 af = kzalloc_obj(struct amdgpu_fence); 204 if (!af) { 205 r = -ENOMEM; 206 goto err_job; 207 } 208 (*job)->hw_fence = af; 209 210 af = kzalloc_obj(struct amdgpu_fence); 211 if (!af) { 212 r = -ENOMEM; 213 goto err_fence; 214 } 215 (*job)->hw_vm_fence = af; 216 217 (*job)->vm = vm; 218 219 amdgpu_sync_create(&(*job)->explicit_sync); 220 (*job)->generation = amdgpu_vm_generation(adev, vm); 221 (*job)->vm_pd_addr = AMDGPU_BO_INVALID_OFFSET; 222 223 if (!entity) 224 return 0; 225 226 r = drm_sched_job_init(&(*job)->base, entity, 1, owner, drm_client_id); 227 if (!r) 228 return 0; 229 230 kfree((*job)->hw_vm_fence); 231 232 err_fence: 233 kfree((*job)->hw_fence); 234 err_job: 235 kfree(*job); 236 *job = NULL; 237 238 return r; 239 } 240 241 int amdgpu_job_alloc_with_ib(struct amdgpu_device *adev, 242 struct drm_sched_entity *entity, void *owner, 243 size_t size, enum amdgpu_ib_pool_type pool_type, 244 struct amdgpu_job **job, u64 k_job_id) 245 { 246 int r; 247 248 r = amdgpu_job_alloc(adev, NULL, entity, owner, 1, job, 249 k_job_id); 250 if (r) 251 return r; 252 253 (*job)->num_ibs = 1; 254 r = amdgpu_ib_get(adev, NULL, size, pool_type, &(*job)->ibs[0]); 255 if (r) { 256 if (entity) 257 drm_sched_job_cleanup(&(*job)->base); 258 kfree((*job)->hw_vm_fence); 259 kfree((*job)->hw_fence); 260 kfree(*job); 261 *job = NULL; 262 } 263 264 return r; 265 } 266 267 void amdgpu_job_set_resources(struct amdgpu_job *job, struct amdgpu_bo *gds, 268 struct amdgpu_bo *gws, struct amdgpu_bo *oa) 269 { 270 if (gds) { 271 job->gds_base = amdgpu_bo_gpu_offset(gds) >> PAGE_SHIFT; 272 job->gds_size = amdgpu_bo_size(gds) >> PAGE_SHIFT; 273 } 274 if (gws) { 275 job->gws_base = amdgpu_bo_gpu_offset(gws) >> PAGE_SHIFT; 276 job->gws_size = amdgpu_bo_size(gws) >> PAGE_SHIFT; 277 } 278 if (oa) { 279 job->oa_base = amdgpu_bo_gpu_offset(oa) >> PAGE_SHIFT; 280 job->oa_size = amdgpu_bo_size(oa) >> PAGE_SHIFT; 281 } 282 } 283 284 void amdgpu_job_free_resources(struct amdgpu_job *job) 285 { 286 struct dma_fence *f; 287 unsigned i; 288 289 /* Check if any fences were initialized */ 290 if (job->base.s_fence && 291 dma_fence_was_initialized(&job->base.s_fence->finished)) 292 f = &job->base.s_fence->finished; 293 else if (dma_fence_was_initialized(&job->hw_fence->base)) 294 f = &job->hw_fence->base; 295 else 296 f = NULL; 297 298 for (i = 0; i < job->num_ibs; ++i) 299 amdgpu_ib_free(&job->ibs[i], f); 300 } 301 302 static void amdgpu_job_free_cb(struct drm_sched_job *s_job) 303 { 304 struct amdgpu_job *job = to_amdgpu_job(s_job); 305 306 drm_sched_job_cleanup(s_job); 307 308 amdgpu_sync_free(&job->explicit_sync); 309 310 if (dma_fence_was_initialized(&job->hw_fence->base)) 311 dma_fence_put(&job->hw_fence->base); 312 else 313 kfree(job->hw_fence); 314 if (dma_fence_was_initialized(&job->hw_vm_fence->base)) 315 dma_fence_put(&job->hw_vm_fence->base); 316 else 317 kfree(job->hw_vm_fence); 318 319 kfree(job); 320 } 321 322 void amdgpu_job_set_gang_leader(struct amdgpu_job *job, 323 struct amdgpu_job *leader) 324 { 325 struct dma_fence *fence = &leader->base.s_fence->scheduled; 326 327 WARN_ON(job->gang_submit); 328 329 /* 330 * Don't add a reference when we are the gang leader to avoid circle 331 * dependency. 332 */ 333 if (job != leader) 334 dma_fence_get(fence); 335 job->gang_submit = fence; 336 } 337 338 void amdgpu_job_free(struct amdgpu_job *job) 339 { 340 if (job->base.entity) 341 drm_sched_job_cleanup(&job->base); 342 343 amdgpu_job_free_resources(job); 344 amdgpu_sync_free(&job->explicit_sync); 345 if (job->gang_submit != &job->base.s_fence->scheduled) 346 dma_fence_put(job->gang_submit); 347 348 if (dma_fence_was_initialized(&job->hw_fence->base)) 349 dma_fence_put(&job->hw_fence->base); 350 else 351 kfree(job->hw_fence); 352 if (dma_fence_was_initialized(&job->hw_vm_fence->base)) 353 dma_fence_put(&job->hw_vm_fence->base); 354 else 355 kfree(job->hw_vm_fence); 356 357 kfree(job); 358 } 359 360 struct dma_fence *amdgpu_job_submit(struct amdgpu_job *job) 361 { 362 struct dma_fence *f; 363 364 drm_sched_job_arm(&job->base); 365 f = dma_fence_get(&job->base.s_fence->finished); 366 amdgpu_job_free_resources(job); 367 drm_sched_entity_push_job(&job->base); 368 369 return f; 370 } 371 372 int amdgpu_job_submit_direct(struct amdgpu_job *job, struct amdgpu_ring *ring, 373 struct dma_fence **fence) 374 { 375 int r; 376 377 job->base.sched = &ring->sched; 378 r = amdgpu_ib_schedule(ring, job->num_ibs, job->ibs, job, fence); 379 380 if (r) 381 return r; 382 383 amdgpu_job_free(job); 384 return 0; 385 } 386 387 static struct dma_fence * 388 amdgpu_job_prepare_job(struct drm_sched_job *sched_job, 389 struct drm_sched_entity *s_entity) 390 { 391 struct amdgpu_ring *ring = to_amdgpu_ring(s_entity->rq->sched); 392 struct amdgpu_job *job = to_amdgpu_job(sched_job); 393 struct dma_fence *fence; 394 int r; 395 396 r = drm_sched_entity_error(s_entity); 397 if (r) 398 goto error; 399 400 if (job->gang_submit) { 401 fence = amdgpu_device_switch_gang(ring->adev, job->gang_submit); 402 if (fence) 403 return fence; 404 } 405 406 fence = amdgpu_device_enforce_isolation(ring->adev, ring, job); 407 if (fence) 408 return fence; 409 410 if (job->vm && !job->vmid) { 411 r = amdgpu_vmid_grab(job->vm, ring, job, &fence); 412 if (r) { 413 dev_err(ring->adev->dev, "Error getting VM ID (%d)\n", r); 414 goto error; 415 } 416 return fence; 417 } 418 419 return NULL; 420 421 error: 422 dma_fence_set_error(&job->base.s_fence->finished, r); 423 return NULL; 424 } 425 426 static struct dma_fence *amdgpu_job_run(struct drm_sched_job *sched_job) 427 { 428 struct amdgpu_ring *ring = to_amdgpu_ring(sched_job->sched); 429 struct amdgpu_device *adev = ring->adev; 430 struct dma_fence *fence = NULL, *finished; 431 struct amdgpu_job *job; 432 int r = 0; 433 434 job = to_amdgpu_job(sched_job); 435 finished = &job->base.s_fence->finished; 436 437 trace_amdgpu_sched_run_job(job); 438 439 /* Skip job if VRAM is lost and never resubmit gangs */ 440 if (job->generation != amdgpu_vm_generation(adev, job->vm) || 441 (job->job_run_counter && job->gang_submit)) 442 dma_fence_set_error(finished, -ECANCELED); 443 444 if (finished->error < 0) { 445 dev_dbg(adev->dev, "Skip scheduling IBs in ring(%s)", 446 ring->name); 447 } else { 448 r = amdgpu_ib_schedule(ring, job->num_ibs, job->ibs, job, 449 &fence); 450 if (r) 451 dev_err(adev->dev, 452 "Error scheduling IBs (%d) in ring(%s)", r, 453 ring->name); 454 } 455 456 job->job_run_counter++; 457 amdgpu_job_free_resources(job); 458 459 fence = r ? ERR_PTR(r) : fence; 460 return fence; 461 } 462 463 /* 464 * This is a duplicate function from DRM scheduler sched_internal.h. 465 * Plan is to remove it when amdgpu_job_stop_all_jobs_on_sched is removed, due 466 * latter being incorrect and racy. 467 * 468 * See https://lore.kernel.org/amd-gfx/44edde63-7181-44fb-a4f7-94e50514f539@amd.com/ 469 */ 470 static struct drm_sched_job * 471 drm_sched_entity_queue_pop(struct drm_sched_entity *entity) 472 { 473 struct spsc_node *node; 474 475 node = spsc_queue_pop(&entity->job_queue); 476 if (!node) 477 return NULL; 478 479 return container_of(node, struct drm_sched_job, queue_node); 480 } 481 482 void amdgpu_job_stop_all_jobs_on_sched(struct drm_gpu_scheduler *sched) 483 { 484 struct drm_sched_job *s_job; 485 struct drm_sched_entity *s_entity = NULL; 486 int i; 487 488 /* Signal all jobs not yet scheduled */ 489 for (i = DRM_SCHED_PRIORITY_KERNEL; i < sched->num_rqs; i++) { 490 struct drm_sched_rq *rq = sched->sched_rq[i]; 491 spin_lock(&rq->lock); 492 list_for_each_entry(s_entity, &rq->entities, list) { 493 while ((s_job = drm_sched_entity_queue_pop(s_entity))) { 494 struct drm_sched_fence *s_fence = s_job->s_fence; 495 496 dma_fence_signal(&s_fence->scheduled); 497 dma_fence_set_error(&s_fence->finished, -EHWPOISON); 498 dma_fence_signal(&s_fence->finished); 499 } 500 } 501 spin_unlock(&rq->lock); 502 } 503 504 /* Signal all jobs already scheduled to HW */ 505 list_for_each_entry(s_job, &sched->pending_list, list) { 506 struct drm_sched_fence *s_fence = s_job->s_fence; 507 508 dma_fence_set_error(&s_fence->finished, -EHWPOISON); 509 dma_fence_signal(&s_fence->finished); 510 } 511 } 512 513 const struct drm_sched_backend_ops amdgpu_sched_ops = { 514 .prepare_job = amdgpu_job_prepare_job, 515 .run_job = amdgpu_job_run, 516 .timedout_job = amdgpu_job_timedout, 517 .free_job = amdgpu_job_free_cb 518 }; 519