1 /*
2 * Copyright 2015 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 *
23 */
24 #include <linux/kthread.h>
25 #include <linux/wait.h>
26 #include <linux/sched.h>
27
28 #include <drm/drm_drv.h>
29
30 #include "amdgpu.h"
31 #include "amdgpu_trace.h"
32 #include "amdgpu_reset.h"
33 #include "amdgpu_dev_coredump.h"
34 #include "amdgpu_xgmi.h"
35
amdgpu_job_do_core_dump(struct amdgpu_device * adev,struct amdgpu_job * job)36 static void amdgpu_job_do_core_dump(struct amdgpu_device *adev,
37 struct amdgpu_job *job)
38 {
39 int i;
40
41 dev_info(adev->dev, "Dumping IP State\n");
42 for (i = 0; i < adev->num_ip_blocks; i++)
43 if (adev->ip_blocks[i].version->funcs->dump_ip_state)
44 adev->ip_blocks[i].version->funcs
45 ->dump_ip_state((void *)&adev->ip_blocks[i]);
46 dev_info(adev->dev, "Dumping IP State Completed\n");
47
48 amdgpu_coredump(adev, true, false, job);
49 }
50
amdgpu_job_core_dump(struct amdgpu_device * adev,struct amdgpu_job * job)51 static void amdgpu_job_core_dump(struct amdgpu_device *adev,
52 struct amdgpu_job *job)
53 {
54 struct list_head device_list, *device_list_handle = NULL;
55 struct amdgpu_device *tmp_adev = NULL;
56 struct amdgpu_hive_info *hive = NULL;
57
58 if (!amdgpu_sriov_vf(adev))
59 hive = amdgpu_get_xgmi_hive(adev);
60 if (hive)
61 mutex_lock(&hive->hive_lock);
62 /*
63 * Reuse the logic in amdgpu_device_gpu_recover() to build list of
64 * devices for code dump
65 */
66 INIT_LIST_HEAD(&device_list);
67 if (!amdgpu_sriov_vf(adev) && (adev->gmc.xgmi.num_physical_nodes > 1) && hive) {
68 list_for_each_entry(tmp_adev, &hive->device_list, gmc.xgmi.head)
69 list_add_tail(&tmp_adev->reset_list, &device_list);
70 if (!list_is_first(&adev->reset_list, &device_list))
71 list_rotate_to_front(&adev->reset_list, &device_list);
72 device_list_handle = &device_list;
73 } else {
74 list_add_tail(&adev->reset_list, &device_list);
75 device_list_handle = &device_list;
76 }
77
78 /* Do the coredump for each device */
79 list_for_each_entry(tmp_adev, device_list_handle, reset_list)
80 amdgpu_job_do_core_dump(tmp_adev, job);
81
82 if (hive) {
83 mutex_unlock(&hive->hive_lock);
84 amdgpu_put_xgmi_hive(hive);
85 }
86 }
87
amdgpu_job_timedout(struct drm_sched_job * s_job)88 static enum drm_gpu_sched_stat amdgpu_job_timedout(struct drm_sched_job *s_job)
89 {
90 struct amdgpu_ring *ring = to_amdgpu_ring(s_job->sched);
91 struct amdgpu_job *job = to_amdgpu_job(s_job);
92 struct drm_wedge_task_info *info = NULL;
93 struct amdgpu_task_info *ti = NULL;
94 struct amdgpu_device *adev = ring->adev;
95 enum drm_gpu_sched_stat status = DRM_GPU_SCHED_STAT_RESET;
96 int idx, r;
97
98 if (!drm_dev_enter(adev_to_drm(adev), &idx)) {
99 dev_info(adev->dev, "%s - device unplugged skipping recovery on scheduler:%s",
100 __func__, s_job->sched->name);
101
102 /* Effectively the job is aborted as the device is gone */
103 return DRM_GPU_SCHED_STAT_ENODEV;
104 }
105
106 /*
107 * Do the coredump immediately after a job timeout to get a very
108 * close dump/snapshot/representation of GPU's current error status
109 * Skip it for SRIOV, since VF FLR will be triggered by host driver
110 * before job timeout
111 */
112 if (!amdgpu_sriov_vf(adev))
113 amdgpu_job_core_dump(adev, job);
114
115 if (amdgpu_gpu_recovery &&
116 amdgpu_ring_is_reset_type_supported(ring, AMDGPU_RESET_TYPE_SOFT_RESET) &&
117 amdgpu_ring_soft_recovery(ring, job->vmid, s_job->s_fence->parent)) {
118 dev_err(adev->dev, "ring %s timeout, but soft recovered\n",
119 s_job->sched->name);
120 goto exit;
121 }
122
123 dev_err(adev->dev, "ring %s timeout, signaled seq=%u, emitted seq=%u\n",
124 job->base.sched->name, atomic_read(&ring->fence_drv.last_seq),
125 ring->fence_drv.sync_seq);
126
127 ti = amdgpu_vm_get_task_info_pasid(ring->adev, job->pasid);
128 if (ti) {
129 amdgpu_vm_print_task_info(adev, ti);
130 info = &ti->task;
131 }
132
133 /* attempt a per ring reset */
134 if (amdgpu_gpu_recovery &&
135 amdgpu_ring_is_reset_type_supported(ring, AMDGPU_RESET_TYPE_PER_QUEUE) &&
136 ring->funcs->reset) {
137 dev_err(adev->dev, "Starting %s ring reset\n",
138 s_job->sched->name);
139 /* Stop the scheduler to prevent anybody else from touching the ring buffer. */
140 drm_sched_wqueue_stop(&ring->sched);
141 r = amdgpu_ring_reset(ring, job->vmid, job->hw_fence);
142 if (!r) {
143 /* Start the scheduler again */
144 drm_sched_wqueue_start(&ring->sched);
145 atomic_inc(&ring->adev->gpu_reset_counter);
146 dev_err(adev->dev, "Ring %s reset succeeded\n",
147 ring->sched.name);
148 drm_dev_wedged_event(adev_to_drm(adev),
149 DRM_WEDGE_RECOVERY_NONE, info);
150 /* This is needed to add the job back to the pending list */
151 status = DRM_GPU_SCHED_STAT_NO_HANG;
152 goto exit;
153 }
154 dev_err(adev->dev, "Ring %s reset failed\n", ring->sched.name);
155 }
156
157 if (dma_fence_get_status(&s_job->s_fence->finished) == 0)
158 dma_fence_set_error(&s_job->s_fence->finished, -ETIME);
159
160 if (amdgpu_device_should_recover_gpu(ring->adev)) {
161 struct amdgpu_reset_context reset_context;
162 memset(&reset_context, 0, sizeof(reset_context));
163
164 reset_context.method = AMD_RESET_METHOD_NONE;
165 reset_context.reset_req_dev = adev;
166 reset_context.src = AMDGPU_RESET_SRC_JOB;
167 clear_bit(AMDGPU_NEED_FULL_RESET, &reset_context.flags);
168
169 /*
170 * To avoid an unnecessary extra coredump, as we have already
171 * got the very close representation of GPU's error status
172 */
173 set_bit(AMDGPU_SKIP_COREDUMP, &reset_context.flags);
174
175 r = amdgpu_device_gpu_recover(ring->adev, job, &reset_context);
176 if (r)
177 dev_err(adev->dev, "GPU Recovery Failed: %d\n", r);
178 } else {
179 drm_sched_suspend_timeout(&ring->sched);
180 if (amdgpu_sriov_vf(adev))
181 adev->virt.tdr_debug = true;
182 }
183
184 exit:
185 amdgpu_vm_put_task_info(ti);
186 drm_dev_exit(idx);
187 return status;
188 }
189
amdgpu_job_alloc(struct amdgpu_device * adev,struct amdgpu_vm * vm,struct drm_sched_entity * entity,void * owner,unsigned int num_ibs,struct amdgpu_job ** job,u64 drm_client_id)190 int amdgpu_job_alloc(struct amdgpu_device *adev, struct amdgpu_vm *vm,
191 struct drm_sched_entity *entity, void *owner,
192 unsigned int num_ibs, struct amdgpu_job **job,
193 u64 drm_client_id)
194 {
195 struct amdgpu_fence *af;
196 int r;
197
198 if (num_ibs == 0)
199 return -EINVAL;
200
201 *job = kzalloc_flex(**job, ibs, num_ibs);
202 if (!*job)
203 return -ENOMEM;
204
205 af = kzalloc_obj(struct amdgpu_fence);
206 if (!af) {
207 r = -ENOMEM;
208 goto err_job;
209 }
210 (*job)->hw_fence = af;
211
212 af = kzalloc_obj(struct amdgpu_fence);
213 if (!af) {
214 r = -ENOMEM;
215 goto err_fence;
216 }
217 (*job)->hw_vm_fence = af;
218
219 (*job)->vm = vm;
220
221 amdgpu_sync_create(&(*job)->explicit_sync);
222 (*job)->generation = amdgpu_vm_generation(adev, vm);
223 (*job)->vm_pd_addr = AMDGPU_BO_INVALID_OFFSET;
224
225 if (!entity)
226 return 0;
227
228 r = drm_sched_job_init(&(*job)->base, entity, 1, owner, drm_client_id);
229 if (!r)
230 return 0;
231
232 kfree((*job)->hw_vm_fence);
233
234 err_fence:
235 kfree((*job)->hw_fence);
236 err_job:
237 kfree(*job);
238 *job = NULL;
239
240 return r;
241 }
242
amdgpu_job_alloc_with_ib(struct amdgpu_device * adev,struct drm_sched_entity * entity,void * owner,size_t size,enum amdgpu_ib_pool_type pool_type,struct amdgpu_job ** job,u64 k_job_id)243 int amdgpu_job_alloc_with_ib(struct amdgpu_device *adev,
244 struct drm_sched_entity *entity, void *owner,
245 size_t size, enum amdgpu_ib_pool_type pool_type,
246 struct amdgpu_job **job, u64 k_job_id)
247 {
248 int r;
249
250 r = amdgpu_job_alloc(adev, NULL, entity, owner, 1, job,
251 k_job_id);
252 if (r)
253 return r;
254
255 (*job)->num_ibs = 1;
256 r = amdgpu_ib_get(adev, NULL, size, pool_type, &(*job)->ibs[0]);
257 if (r) {
258 if (entity)
259 drm_sched_job_cleanup(&(*job)->base);
260 kfree((*job)->hw_vm_fence);
261 kfree((*job)->hw_fence);
262 kfree(*job);
263 *job = NULL;
264 }
265
266 return r;
267 }
268
amdgpu_job_set_resources(struct amdgpu_job * job,struct amdgpu_bo * gds,struct amdgpu_bo * gws,struct amdgpu_bo * oa)269 void amdgpu_job_set_resources(struct amdgpu_job *job, struct amdgpu_bo *gds,
270 struct amdgpu_bo *gws, struct amdgpu_bo *oa)
271 {
272 if (gds) {
273 job->gds_base = amdgpu_bo_gpu_offset(gds) >> PAGE_SHIFT;
274 job->gds_size = amdgpu_bo_size(gds) >> PAGE_SHIFT;
275 }
276 if (gws) {
277 job->gws_base = amdgpu_bo_gpu_offset(gws) >> PAGE_SHIFT;
278 job->gws_size = amdgpu_bo_size(gws) >> PAGE_SHIFT;
279 }
280 if (oa) {
281 job->oa_base = amdgpu_bo_gpu_offset(oa) >> PAGE_SHIFT;
282 job->oa_size = amdgpu_bo_size(oa) >> PAGE_SHIFT;
283 }
284 }
285
amdgpu_job_free_resources(struct amdgpu_job * job)286 void amdgpu_job_free_resources(struct amdgpu_job *job)
287 {
288 struct dma_fence *f;
289 unsigned i;
290
291 /* Check if any fences were initialized */
292 if (job->base.s_fence && job->base.s_fence->finished.ops)
293 f = &job->base.s_fence->finished;
294 else if (job->hw_fence && job->hw_fence->base.ops)
295 f = &job->hw_fence->base;
296 else
297 f = NULL;
298
299 for (i = 0; i < job->num_ibs; ++i)
300 amdgpu_ib_free(&job->ibs[i], f);
301 }
302
amdgpu_job_free_cb(struct drm_sched_job * s_job)303 static void amdgpu_job_free_cb(struct drm_sched_job *s_job)
304 {
305 struct amdgpu_job *job = to_amdgpu_job(s_job);
306
307 drm_sched_job_cleanup(s_job);
308
309 amdgpu_sync_free(&job->explicit_sync);
310
311 if (job->hw_fence->base.ops)
312 dma_fence_put(&job->hw_fence->base);
313 else
314 kfree(job->hw_fence);
315 if (job->hw_vm_fence->base.ops)
316 dma_fence_put(&job->hw_vm_fence->base);
317 else
318 kfree(job->hw_vm_fence);
319
320 kfree(job);
321 }
322
amdgpu_job_set_gang_leader(struct amdgpu_job * job,struct amdgpu_job * leader)323 void amdgpu_job_set_gang_leader(struct amdgpu_job *job,
324 struct amdgpu_job *leader)
325 {
326 struct dma_fence *fence = &leader->base.s_fence->scheduled;
327
328 WARN_ON(job->gang_submit);
329
330 /*
331 * Don't add a reference when we are the gang leader to avoid circle
332 * dependency.
333 */
334 if (job != leader)
335 dma_fence_get(fence);
336 job->gang_submit = fence;
337 }
338
amdgpu_job_free(struct amdgpu_job * job)339 void amdgpu_job_free(struct amdgpu_job *job)
340 {
341 if (job->base.entity)
342 drm_sched_job_cleanup(&job->base);
343
344 amdgpu_job_free_resources(job);
345 amdgpu_sync_free(&job->explicit_sync);
346 if (job->gang_submit != &job->base.s_fence->scheduled)
347 dma_fence_put(job->gang_submit);
348
349 if (job->hw_fence->base.ops)
350 dma_fence_put(&job->hw_fence->base);
351 else
352 kfree(job->hw_fence);
353 if (job->hw_vm_fence->base.ops)
354 dma_fence_put(&job->hw_vm_fence->base);
355 else
356 kfree(job->hw_vm_fence);
357
358 kfree(job);
359 }
360
amdgpu_job_submit(struct amdgpu_job * job)361 struct dma_fence *amdgpu_job_submit(struct amdgpu_job *job)
362 {
363 struct dma_fence *f;
364
365 drm_sched_job_arm(&job->base);
366 f = dma_fence_get(&job->base.s_fence->finished);
367 amdgpu_job_free_resources(job);
368 drm_sched_entity_push_job(&job->base);
369
370 return f;
371 }
372
amdgpu_job_submit_direct(struct amdgpu_job * job,struct amdgpu_ring * ring,struct dma_fence ** fence)373 int amdgpu_job_submit_direct(struct amdgpu_job *job, struct amdgpu_ring *ring,
374 struct dma_fence **fence)
375 {
376 int r;
377
378 job->base.sched = &ring->sched;
379 r = amdgpu_ib_schedule(ring, job->num_ibs, job->ibs, job, fence);
380
381 if (r)
382 return r;
383
384 amdgpu_job_free(job);
385 return 0;
386 }
387
388 static struct dma_fence *
amdgpu_job_prepare_job(struct drm_sched_job * sched_job,struct drm_sched_entity * s_entity)389 amdgpu_job_prepare_job(struct drm_sched_job *sched_job,
390 struct drm_sched_entity *s_entity)
391 {
392 struct amdgpu_ring *ring = to_amdgpu_ring(s_entity->rq->sched);
393 struct amdgpu_job *job = to_amdgpu_job(sched_job);
394 struct dma_fence *fence;
395 int r;
396
397 r = drm_sched_entity_error(s_entity);
398 if (r)
399 goto error;
400
401 if (job->gang_submit) {
402 fence = amdgpu_device_switch_gang(ring->adev, job->gang_submit);
403 if (fence)
404 return fence;
405 }
406
407 fence = amdgpu_device_enforce_isolation(ring->adev, ring, job);
408 if (fence)
409 return fence;
410
411 if (job->vm && !job->vmid) {
412 r = amdgpu_vmid_grab(job->vm, ring, job, &fence);
413 if (r) {
414 dev_err(ring->adev->dev, "Error getting VM ID (%d)\n", r);
415 goto error;
416 }
417 return fence;
418 }
419
420 return NULL;
421
422 error:
423 dma_fence_set_error(&job->base.s_fence->finished, r);
424 return NULL;
425 }
426
amdgpu_job_run(struct drm_sched_job * sched_job)427 static struct dma_fence *amdgpu_job_run(struct drm_sched_job *sched_job)
428 {
429 struct amdgpu_ring *ring = to_amdgpu_ring(sched_job->sched);
430 struct amdgpu_device *adev = ring->adev;
431 struct dma_fence *fence = NULL, *finished;
432 struct amdgpu_job *job;
433 int r = 0;
434
435 job = to_amdgpu_job(sched_job);
436 finished = &job->base.s_fence->finished;
437
438 trace_amdgpu_sched_run_job(job);
439
440 /* Skip job if VRAM is lost and never resubmit gangs */
441 if (job->generation != amdgpu_vm_generation(adev, job->vm) ||
442 (job->job_run_counter && job->gang_submit))
443 dma_fence_set_error(finished, -ECANCELED);
444
445 if (finished->error < 0) {
446 dev_dbg(adev->dev, "Skip scheduling IBs in ring(%s)",
447 ring->name);
448 } else {
449 r = amdgpu_ib_schedule(ring, job->num_ibs, job->ibs, job,
450 &fence);
451 if (r)
452 dev_err(adev->dev,
453 "Error scheduling IBs (%d) in ring(%s)", r,
454 ring->name);
455 }
456
457 job->job_run_counter++;
458 amdgpu_job_free_resources(job);
459
460 fence = r ? ERR_PTR(r) : fence;
461 return fence;
462 }
463
464 /*
465 * This is a duplicate function from DRM scheduler sched_internal.h.
466 * Plan is to remove it when amdgpu_job_stop_all_jobs_on_sched is removed, due
467 * latter being incorrect and racy.
468 *
469 * See https://lore.kernel.org/amd-gfx/44edde63-7181-44fb-a4f7-94e50514f539@amd.com/
470 */
471 static struct drm_sched_job *
drm_sched_entity_queue_pop(struct drm_sched_entity * entity)472 drm_sched_entity_queue_pop(struct drm_sched_entity *entity)
473 {
474 struct spsc_node *node;
475
476 node = spsc_queue_pop(&entity->job_queue);
477 if (!node)
478 return NULL;
479
480 return container_of(node, struct drm_sched_job, queue_node);
481 }
482
amdgpu_job_stop_all_jobs_on_sched(struct drm_gpu_scheduler * sched)483 void amdgpu_job_stop_all_jobs_on_sched(struct drm_gpu_scheduler *sched)
484 {
485 struct drm_sched_job *s_job;
486 struct drm_sched_entity *s_entity = NULL;
487 int i;
488
489 /* Signal all jobs not yet scheduled */
490 for (i = DRM_SCHED_PRIORITY_KERNEL; i < sched->num_rqs; i++) {
491 struct drm_sched_rq *rq = sched->sched_rq[i];
492 spin_lock(&rq->lock);
493 list_for_each_entry(s_entity, &rq->entities, list) {
494 while ((s_job = drm_sched_entity_queue_pop(s_entity))) {
495 struct drm_sched_fence *s_fence = s_job->s_fence;
496
497 dma_fence_signal(&s_fence->scheduled);
498 dma_fence_set_error(&s_fence->finished, -EHWPOISON);
499 dma_fence_signal(&s_fence->finished);
500 }
501 }
502 spin_unlock(&rq->lock);
503 }
504
505 /* Signal all jobs already scheduled to HW */
506 list_for_each_entry(s_job, &sched->pending_list, list) {
507 struct drm_sched_fence *s_fence = s_job->s_fence;
508
509 dma_fence_set_error(&s_fence->finished, -EHWPOISON);
510 dma_fence_signal(&s_fence->finished);
511 }
512 }
513
514 const struct drm_sched_backend_ops amdgpu_sched_ops = {
515 .prepare_job = amdgpu_job_prepare_job,
516 .run_job = amdgpu_job_run,
517 .timedout_job = amdgpu_job_timedout,
518 .free_job = amdgpu_job_free_cb
519 };
520