1 /* 2 * Copyright 2015 Advanced Micro Devices, Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 * 23 */ 24 #include <linux/kthread.h> 25 #include <linux/wait.h> 26 #include <linux/sched.h> 27 28 #include <drm/drm_drv.h> 29 30 #include "amdgpu.h" 31 #include "amdgpu_trace.h" 32 #include "amdgpu_reset.h" 33 #include "amdgpu_dev_coredump.h" 34 #include "amdgpu_xgmi.h" 35 36 static void amdgpu_job_do_core_dump(struct amdgpu_device *adev, 37 struct amdgpu_job *job) 38 { 39 int i; 40 41 dev_info(adev->dev, "Dumping IP State\n"); 42 for (i = 0; i < adev->num_ip_blocks; i++) 43 if (adev->ip_blocks[i].version->funcs->dump_ip_state) 44 adev->ip_blocks[i].version->funcs 45 ->dump_ip_state((void *)&adev->ip_blocks[i]); 46 dev_info(adev->dev, "Dumping IP State Completed\n"); 47 48 amdgpu_coredump(adev, true, false, job); 49 } 50 51 static void amdgpu_job_core_dump(struct amdgpu_device *adev, 52 struct amdgpu_job *job) 53 { 54 struct list_head device_list, *device_list_handle = NULL; 55 struct amdgpu_device *tmp_adev = NULL; 56 struct amdgpu_hive_info *hive = NULL; 57 58 if (!amdgpu_sriov_vf(adev)) 59 hive = amdgpu_get_xgmi_hive(adev); 60 if (hive) 61 mutex_lock(&hive->hive_lock); 62 /* 63 * Reuse the logic in amdgpu_device_gpu_recover() to build list of 64 * devices for code dump 65 */ 66 INIT_LIST_HEAD(&device_list); 67 if (!amdgpu_sriov_vf(adev) && (adev->gmc.xgmi.num_physical_nodes > 1) && hive) { 68 list_for_each_entry(tmp_adev, &hive->device_list, gmc.xgmi.head) 69 list_add_tail(&tmp_adev->reset_list, &device_list); 70 if (!list_is_first(&adev->reset_list, &device_list)) 71 list_rotate_to_front(&adev->reset_list, &device_list); 72 device_list_handle = &device_list; 73 } else { 74 list_add_tail(&adev->reset_list, &device_list); 75 device_list_handle = &device_list; 76 } 77 78 /* Do the coredump for each device */ 79 list_for_each_entry(tmp_adev, device_list_handle, reset_list) 80 amdgpu_job_do_core_dump(tmp_adev, job); 81 82 if (hive) { 83 mutex_unlock(&hive->hive_lock); 84 amdgpu_put_xgmi_hive(hive); 85 } 86 } 87 88 static enum drm_gpu_sched_stat amdgpu_job_timedout(struct drm_sched_job *s_job) 89 { 90 struct amdgpu_ring *ring = to_amdgpu_ring(s_job->sched); 91 struct amdgpu_job *job = to_amdgpu_job(s_job); 92 struct amdgpu_task_info *ti; 93 struct amdgpu_device *adev = ring->adev; 94 int idx; 95 int r; 96 97 if (!drm_dev_enter(adev_to_drm(adev), &idx)) { 98 dev_info(adev->dev, "%s - device unplugged skipping recovery on scheduler:%s", 99 __func__, s_job->sched->name); 100 101 /* Effectively the job is aborted as the device is gone */ 102 return DRM_GPU_SCHED_STAT_ENODEV; 103 } 104 105 adev->job_hang = true; 106 107 /* 108 * Do the coredump immediately after a job timeout to get a very 109 * close dump/snapshot/representation of GPU's current error status 110 * Skip it for SRIOV, since VF FLR will be triggered by host driver 111 * before job timeout 112 */ 113 if (!amdgpu_sriov_vf(adev)) 114 amdgpu_job_core_dump(adev, job); 115 116 if (amdgpu_gpu_recovery && 117 amdgpu_ring_soft_recovery(ring, job->vmid, s_job->s_fence->parent)) { 118 dev_err(adev->dev, "ring %s timeout, but soft recovered\n", 119 s_job->sched->name); 120 goto exit; 121 } 122 123 dev_err(adev->dev, "ring %s timeout, signaled seq=%u, emitted seq=%u\n", 124 job->base.sched->name, atomic_read(&ring->fence_drv.last_seq), 125 ring->fence_drv.sync_seq); 126 127 ti = amdgpu_vm_get_task_info_pasid(ring->adev, job->pasid); 128 if (ti) { 129 dev_err(adev->dev, 130 "Process information: process %s pid %d thread %s pid %d\n", 131 ti->process_name, ti->tgid, ti->task_name, ti->pid); 132 amdgpu_vm_put_task_info(ti); 133 } 134 135 dma_fence_set_error(&s_job->s_fence->finished, -ETIME); 136 137 /* attempt a per ring reset */ 138 if (amdgpu_gpu_recovery && 139 ring->funcs->reset) { 140 dev_err(adev->dev, "Starting %s ring reset\n", s_job->sched->name); 141 /* stop the scheduler, but don't mess with the 142 * bad job yet because if ring reset fails 143 * we'll fall back to full GPU reset. 144 */ 145 drm_sched_wqueue_stop(&ring->sched); 146 r = amdgpu_ring_reset(ring, job->vmid); 147 if (!r) { 148 if (amdgpu_ring_sched_ready(ring)) 149 drm_sched_stop(&ring->sched, s_job); 150 atomic_inc(&ring->adev->gpu_reset_counter); 151 amdgpu_fence_driver_force_completion(ring); 152 if (amdgpu_ring_sched_ready(ring)) 153 drm_sched_start(&ring->sched, 0); 154 goto exit; 155 } 156 dev_err(adev->dev, "Ring %s reset failure\n", ring->sched.name); 157 } 158 159 if (amdgpu_device_should_recover_gpu(ring->adev)) { 160 struct amdgpu_reset_context reset_context; 161 memset(&reset_context, 0, sizeof(reset_context)); 162 163 reset_context.method = AMD_RESET_METHOD_NONE; 164 reset_context.reset_req_dev = adev; 165 reset_context.src = AMDGPU_RESET_SRC_JOB; 166 clear_bit(AMDGPU_NEED_FULL_RESET, &reset_context.flags); 167 168 /* 169 * To avoid an unnecessary extra coredump, as we have already 170 * got the very close representation of GPU's error status 171 */ 172 set_bit(AMDGPU_SKIP_COREDUMP, &reset_context.flags); 173 174 r = amdgpu_device_gpu_recover(ring->adev, job, &reset_context); 175 if (r) 176 dev_err(adev->dev, "GPU Recovery Failed: %d\n", r); 177 } else { 178 drm_sched_suspend_timeout(&ring->sched); 179 if (amdgpu_sriov_vf(adev)) 180 adev->virt.tdr_debug = true; 181 } 182 183 exit: 184 adev->job_hang = false; 185 drm_dev_exit(idx); 186 return DRM_GPU_SCHED_STAT_NOMINAL; 187 } 188 189 int amdgpu_job_alloc(struct amdgpu_device *adev, struct amdgpu_vm *vm, 190 struct drm_sched_entity *entity, void *owner, 191 unsigned int num_ibs, struct amdgpu_job **job) 192 { 193 if (num_ibs == 0) 194 return -EINVAL; 195 196 *job = kzalloc(struct_size(*job, ibs, num_ibs), GFP_KERNEL); 197 if (!*job) 198 return -ENOMEM; 199 200 (*job)->vm = vm; 201 202 amdgpu_sync_create(&(*job)->explicit_sync); 203 (*job)->generation = amdgpu_vm_generation(adev, vm); 204 (*job)->vm_pd_addr = AMDGPU_BO_INVALID_OFFSET; 205 206 if (!entity) 207 return 0; 208 209 return drm_sched_job_init(&(*job)->base, entity, 1, owner); 210 } 211 212 int amdgpu_job_alloc_with_ib(struct amdgpu_device *adev, 213 struct drm_sched_entity *entity, void *owner, 214 size_t size, enum amdgpu_ib_pool_type pool_type, 215 struct amdgpu_job **job) 216 { 217 int r; 218 219 r = amdgpu_job_alloc(adev, NULL, entity, owner, 1, job); 220 if (r) 221 return r; 222 223 (*job)->num_ibs = 1; 224 r = amdgpu_ib_get(adev, NULL, size, pool_type, &(*job)->ibs[0]); 225 if (r) { 226 if (entity) 227 drm_sched_job_cleanup(&(*job)->base); 228 kfree(*job); 229 } 230 231 return r; 232 } 233 234 void amdgpu_job_set_resources(struct amdgpu_job *job, struct amdgpu_bo *gds, 235 struct amdgpu_bo *gws, struct amdgpu_bo *oa) 236 { 237 if (gds) { 238 job->gds_base = amdgpu_bo_gpu_offset(gds) >> PAGE_SHIFT; 239 job->gds_size = amdgpu_bo_size(gds) >> PAGE_SHIFT; 240 } 241 if (gws) { 242 job->gws_base = amdgpu_bo_gpu_offset(gws) >> PAGE_SHIFT; 243 job->gws_size = amdgpu_bo_size(gws) >> PAGE_SHIFT; 244 } 245 if (oa) { 246 job->oa_base = amdgpu_bo_gpu_offset(oa) >> PAGE_SHIFT; 247 job->oa_size = amdgpu_bo_size(oa) >> PAGE_SHIFT; 248 } 249 } 250 251 void amdgpu_job_free_resources(struct amdgpu_job *job) 252 { 253 struct dma_fence *f; 254 unsigned i; 255 256 /* Check if any fences where initialized */ 257 if (job->base.s_fence && job->base.s_fence->finished.ops) 258 f = &job->base.s_fence->finished; 259 else if (job->hw_fence.ops) 260 f = &job->hw_fence; 261 else 262 f = NULL; 263 264 for (i = 0; i < job->num_ibs; ++i) 265 amdgpu_ib_free(&job->ibs[i], f); 266 } 267 268 static void amdgpu_job_free_cb(struct drm_sched_job *s_job) 269 { 270 struct amdgpu_job *job = to_amdgpu_job(s_job); 271 272 drm_sched_job_cleanup(s_job); 273 274 amdgpu_sync_free(&job->explicit_sync); 275 276 /* only put the hw fence if has embedded fence */ 277 if (!job->hw_fence.ops) 278 kfree(job); 279 else 280 dma_fence_put(&job->hw_fence); 281 } 282 283 void amdgpu_job_set_gang_leader(struct amdgpu_job *job, 284 struct amdgpu_job *leader) 285 { 286 struct dma_fence *fence = &leader->base.s_fence->scheduled; 287 288 WARN_ON(job->gang_submit); 289 290 /* 291 * Don't add a reference when we are the gang leader to avoid circle 292 * dependency. 293 */ 294 if (job != leader) 295 dma_fence_get(fence); 296 job->gang_submit = fence; 297 } 298 299 void amdgpu_job_free(struct amdgpu_job *job) 300 { 301 if (job->base.entity) 302 drm_sched_job_cleanup(&job->base); 303 304 amdgpu_job_free_resources(job); 305 amdgpu_sync_free(&job->explicit_sync); 306 if (job->gang_submit != &job->base.s_fence->scheduled) 307 dma_fence_put(job->gang_submit); 308 309 if (!job->hw_fence.ops) 310 kfree(job); 311 else 312 dma_fence_put(&job->hw_fence); 313 } 314 315 struct dma_fence *amdgpu_job_submit(struct amdgpu_job *job) 316 { 317 struct dma_fence *f; 318 319 drm_sched_job_arm(&job->base); 320 f = dma_fence_get(&job->base.s_fence->finished); 321 amdgpu_job_free_resources(job); 322 drm_sched_entity_push_job(&job->base); 323 324 return f; 325 } 326 327 int amdgpu_job_submit_direct(struct amdgpu_job *job, struct amdgpu_ring *ring, 328 struct dma_fence **fence) 329 { 330 int r; 331 332 job->base.sched = &ring->sched; 333 r = amdgpu_ib_schedule(ring, job->num_ibs, job->ibs, job, fence); 334 335 if (r) 336 return r; 337 338 amdgpu_job_free(job); 339 return 0; 340 } 341 342 static struct dma_fence * 343 amdgpu_job_prepare_job(struct drm_sched_job *sched_job, 344 struct drm_sched_entity *s_entity) 345 { 346 struct amdgpu_ring *ring = to_amdgpu_ring(s_entity->rq->sched); 347 struct amdgpu_job *job = to_amdgpu_job(sched_job); 348 struct dma_fence *fence = NULL; 349 int r; 350 351 r = drm_sched_entity_error(s_entity); 352 if (r) 353 goto error; 354 355 if (job->gang_submit) 356 fence = amdgpu_device_switch_gang(ring->adev, job->gang_submit); 357 358 if (!fence && job->vm && !job->vmid) { 359 r = amdgpu_vmid_grab(job->vm, ring, job, &fence); 360 if (r) { 361 dev_err(ring->adev->dev, "Error getting VM ID (%d)\n", r); 362 goto error; 363 } 364 } 365 366 return fence; 367 368 error: 369 dma_fence_set_error(&job->base.s_fence->finished, r); 370 return NULL; 371 } 372 373 static struct dma_fence *amdgpu_job_run(struct drm_sched_job *sched_job) 374 { 375 struct amdgpu_ring *ring = to_amdgpu_ring(sched_job->sched); 376 struct amdgpu_device *adev = ring->adev; 377 struct dma_fence *fence = NULL, *finished; 378 struct amdgpu_job *job; 379 int r = 0; 380 381 job = to_amdgpu_job(sched_job); 382 finished = &job->base.s_fence->finished; 383 384 trace_amdgpu_sched_run_job(job); 385 386 /* Skip job if VRAM is lost and never resubmit gangs */ 387 if (job->generation != amdgpu_vm_generation(adev, job->vm) || 388 (job->job_run_counter && job->gang_submit)) 389 dma_fence_set_error(finished, -ECANCELED); 390 391 if (finished->error < 0) { 392 dev_dbg(adev->dev, "Skip scheduling IBs in ring(%s)", 393 ring->name); 394 } else { 395 r = amdgpu_ib_schedule(ring, job->num_ibs, job->ibs, job, 396 &fence); 397 if (r) 398 dev_err(adev->dev, 399 "Error scheduling IBs (%d) in ring(%s)", r, 400 ring->name); 401 } 402 403 job->job_run_counter++; 404 amdgpu_job_free_resources(job); 405 406 fence = r ? ERR_PTR(r) : fence; 407 return fence; 408 } 409 410 #define to_drm_sched_job(sched_job) \ 411 container_of((sched_job), struct drm_sched_job, queue_node) 412 413 void amdgpu_job_stop_all_jobs_on_sched(struct drm_gpu_scheduler *sched) 414 { 415 struct drm_sched_job *s_job; 416 struct drm_sched_entity *s_entity = NULL; 417 int i; 418 419 /* Signal all jobs not yet scheduled */ 420 for (i = DRM_SCHED_PRIORITY_KERNEL; i < sched->num_rqs; i++) { 421 struct drm_sched_rq *rq = sched->sched_rq[i]; 422 spin_lock(&rq->lock); 423 list_for_each_entry(s_entity, &rq->entities, list) { 424 while ((s_job = to_drm_sched_job(spsc_queue_pop(&s_entity->job_queue)))) { 425 struct drm_sched_fence *s_fence = s_job->s_fence; 426 427 dma_fence_signal(&s_fence->scheduled); 428 dma_fence_set_error(&s_fence->finished, -EHWPOISON); 429 dma_fence_signal(&s_fence->finished); 430 } 431 } 432 spin_unlock(&rq->lock); 433 } 434 435 /* Signal all jobs already scheduled to HW */ 436 list_for_each_entry(s_job, &sched->pending_list, list) { 437 struct drm_sched_fence *s_fence = s_job->s_fence; 438 439 dma_fence_set_error(&s_fence->finished, -EHWPOISON); 440 dma_fence_signal(&s_fence->finished); 441 } 442 } 443 444 const struct drm_sched_backend_ops amdgpu_sched_ops = { 445 .prepare_job = amdgpu_job_prepare_job, 446 .run_job = amdgpu_job_run, 447 .timedout_job = amdgpu_job_timedout, 448 .free_job = amdgpu_job_free_cb 449 }; 450