xref: /linux/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c (revision 5ea5b6ff0d63aef1dc3fb25445acea183f61a934)
1 /*
2  * Copyright 2015 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  *
23  */
24 #include <linux/kthread.h>
25 #include <linux/wait.h>
26 #include <linux/sched.h>
27 
28 #include <drm/drm_drv.h>
29 
30 #include "amdgpu.h"
31 #include "amdgpu_trace.h"
32 #include "amdgpu_reset.h"
33 #include "amdgpu_dev_coredump.h"
34 #include "amdgpu_xgmi.h"
35 
36 static void amdgpu_job_do_core_dump(struct amdgpu_device *adev,
37 				    struct amdgpu_job *job)
38 {
39 	int i;
40 
41 	dev_info(adev->dev, "Dumping IP State\n");
42 	for (i = 0; i < adev->num_ip_blocks; i++)
43 		if (adev->ip_blocks[i].version->funcs->dump_ip_state)
44 			adev->ip_blocks[i].version->funcs
45 				->dump_ip_state((void *)&adev->ip_blocks[i]);
46 	dev_info(adev->dev, "Dumping IP State Completed\n");
47 
48 	amdgpu_coredump(adev, true, false, job);
49 }
50 
51 static void amdgpu_job_core_dump(struct amdgpu_device *adev,
52 				 struct amdgpu_job *job)
53 {
54 	struct list_head device_list, *device_list_handle =  NULL;
55 	struct amdgpu_device *tmp_adev = NULL;
56 	struct amdgpu_hive_info *hive = NULL;
57 
58 	if (!amdgpu_sriov_vf(adev))
59 		hive = amdgpu_get_xgmi_hive(adev);
60 	if (hive)
61 		mutex_lock(&hive->hive_lock);
62 	/*
63 	 * Reuse the logic in amdgpu_device_gpu_recover() to build list of
64 	 * devices for code dump
65 	 */
66 	INIT_LIST_HEAD(&device_list);
67 	if (!amdgpu_sriov_vf(adev) && (adev->gmc.xgmi.num_physical_nodes > 1) && hive) {
68 		list_for_each_entry(tmp_adev, &hive->device_list, gmc.xgmi.head)
69 			list_add_tail(&tmp_adev->reset_list, &device_list);
70 		if (!list_is_first(&adev->reset_list, &device_list))
71 			list_rotate_to_front(&adev->reset_list, &device_list);
72 		device_list_handle = &device_list;
73 	} else {
74 		list_add_tail(&adev->reset_list, &device_list);
75 		device_list_handle = &device_list;
76 	}
77 
78 	/* Do the coredump for each device */
79 	list_for_each_entry(tmp_adev, device_list_handle, reset_list)
80 		amdgpu_job_do_core_dump(tmp_adev, job);
81 
82 	if (hive) {
83 		mutex_unlock(&hive->hive_lock);
84 		amdgpu_put_xgmi_hive(hive);
85 	}
86 }
87 
88 static enum drm_gpu_sched_stat amdgpu_job_timedout(struct drm_sched_job *s_job)
89 {
90 	struct amdgpu_ring *ring = to_amdgpu_ring(s_job->sched);
91 	struct amdgpu_job *job = to_amdgpu_job(s_job);
92 	struct drm_wedge_task_info *info = NULL;
93 	struct amdgpu_task_info *ti = NULL;
94 	struct amdgpu_device *adev = ring->adev;
95 	enum drm_gpu_sched_stat status = DRM_GPU_SCHED_STAT_RESET;
96 	int idx, r;
97 
98 	if (!drm_dev_enter(adev_to_drm(adev), &idx)) {
99 		dev_info(adev->dev, "%s - device unplugged skipping recovery on scheduler:%s",
100 			 __func__, s_job->sched->name);
101 
102 		/* Effectively the job is aborted as the device is gone */
103 		return DRM_GPU_SCHED_STAT_ENODEV;
104 	}
105 
106 	/*
107 	 * Do the coredump immediately after a job timeout to get a very
108 	 * close dump/snapshot/representation of GPU's current error status
109 	 * Skip it for SRIOV, since VF FLR will be triggered by host driver
110 	 * before job timeout
111 	 */
112 	if (!amdgpu_sriov_vf(adev))
113 		amdgpu_job_core_dump(adev, job);
114 
115 	if (amdgpu_gpu_recovery &&
116 	    amdgpu_ring_is_reset_type_supported(ring, AMDGPU_RESET_TYPE_SOFT_RESET) &&
117 	    amdgpu_ring_soft_recovery(ring, job->vmid, s_job->s_fence->parent)) {
118 		dev_err(adev->dev, "ring %s timeout, but soft recovered\n",
119 			s_job->sched->name);
120 		goto exit;
121 	}
122 
123 	dev_err(adev->dev, "ring %s timeout, signaled seq=%u, emitted seq=%u\n",
124 		job->base.sched->name, atomic_read(&ring->fence_drv.last_seq),
125 		ring->fence_drv.sync_seq);
126 
127 	ti = amdgpu_vm_get_task_info_pasid(ring->adev, job->pasid);
128 	if (ti) {
129 		amdgpu_vm_print_task_info(adev, ti);
130 		info = &ti->task;
131 	}
132 
133 	/* attempt a per ring reset */
134 	if (amdgpu_gpu_recovery &&
135 	    amdgpu_ring_is_reset_type_supported(ring, AMDGPU_RESET_TYPE_PER_QUEUE) &&
136 	    ring->funcs->reset) {
137 		dev_err(adev->dev, "Starting %s ring reset\n",
138 			s_job->sched->name);
139 		/* Stop the scheduler to prevent anybody else from touching the ring buffer. */
140 		drm_sched_wqueue_stop(&ring->sched);
141 		r = amdgpu_ring_reset(ring, job->vmid, job->hw_fence);
142 		if (!r) {
143 			/* Start the scheduler again */
144 			drm_sched_wqueue_start(&ring->sched);
145 			atomic_inc(&ring->adev->gpu_reset_counter);
146 			dev_err(adev->dev, "Ring %s reset succeeded\n",
147 				ring->sched.name);
148 			drm_dev_wedged_event(adev_to_drm(adev),
149 					     DRM_WEDGE_RECOVERY_NONE, info);
150 			/* This is needed to add the job back to the pending list */
151 			status = DRM_GPU_SCHED_STAT_NO_HANG;
152 			goto exit;
153 		}
154 		dev_err(adev->dev, "Ring %s reset failed\n", ring->sched.name);
155 	}
156 
157 	if (dma_fence_get_status(&s_job->s_fence->finished) == 0)
158 		dma_fence_set_error(&s_job->s_fence->finished, -ETIME);
159 
160 	if (amdgpu_device_should_recover_gpu(ring->adev)) {
161 		struct amdgpu_reset_context reset_context;
162 		memset(&reset_context, 0, sizeof(reset_context));
163 
164 		reset_context.method = AMD_RESET_METHOD_NONE;
165 		reset_context.reset_req_dev = adev;
166 		reset_context.src = AMDGPU_RESET_SRC_JOB;
167 		clear_bit(AMDGPU_NEED_FULL_RESET, &reset_context.flags);
168 
169 		/*
170 		 * To avoid an unnecessary extra coredump, as we have already
171 		 * got the very close representation of GPU's error status
172 		 */
173 		set_bit(AMDGPU_SKIP_COREDUMP, &reset_context.flags);
174 
175 		r = amdgpu_device_gpu_recover(ring->adev, job, &reset_context);
176 		if (r)
177 			dev_err(adev->dev, "GPU Recovery Failed: %d\n", r);
178 	} else {
179 		drm_sched_suspend_timeout(&ring->sched);
180 		if (amdgpu_sriov_vf(adev))
181 			adev->virt.tdr_debug = true;
182 	}
183 
184 exit:
185 	amdgpu_vm_put_task_info(ti);
186 	drm_dev_exit(idx);
187 	return status;
188 }
189 
190 int amdgpu_job_alloc(struct amdgpu_device *adev, struct amdgpu_vm *vm,
191 		     struct drm_sched_entity *entity, void *owner,
192 		     unsigned int num_ibs, struct amdgpu_job **job,
193 		     u64 drm_client_id)
194 {
195 	struct amdgpu_fence *af;
196 	int r;
197 
198 	if (num_ibs == 0)
199 		return -EINVAL;
200 
201 	*job = kzalloc_flex(**job, ibs, num_ibs);
202 	if (!*job)
203 		return -ENOMEM;
204 
205 	af = kzalloc_obj(struct amdgpu_fence);
206 	if (!af) {
207 		r = -ENOMEM;
208 		goto err_job;
209 	}
210 	(*job)->hw_fence = af;
211 
212 	af = kzalloc_obj(struct amdgpu_fence);
213 	if (!af) {
214 		r = -ENOMEM;
215 		goto err_fence;
216 	}
217 	(*job)->hw_vm_fence = af;
218 
219 	(*job)->vm = vm;
220 
221 	amdgpu_sync_create(&(*job)->explicit_sync);
222 	(*job)->generation = amdgpu_vm_generation(adev, vm);
223 	(*job)->vm_pd_addr = AMDGPU_BO_INVALID_OFFSET;
224 
225 	if (!entity)
226 		return 0;
227 
228 	r = drm_sched_job_init(&(*job)->base, entity, 1, owner, drm_client_id);
229 	if (!r)
230 		return 0;
231 
232 	kfree((*job)->hw_vm_fence);
233 
234 err_fence:
235 	kfree((*job)->hw_fence);
236 err_job:
237 	kfree(*job);
238 	*job = NULL;
239 
240 	return r;
241 }
242 
243 int amdgpu_job_alloc_with_ib(struct amdgpu_device *adev,
244 			     struct drm_sched_entity *entity, void *owner,
245 			     size_t size, enum amdgpu_ib_pool_type pool_type,
246 			     struct amdgpu_job **job, u64 k_job_id)
247 {
248 	int r;
249 
250 	r = amdgpu_job_alloc(adev, NULL, entity, owner, 1, job,
251 			     k_job_id);
252 	if (r)
253 		return r;
254 
255 	(*job)->num_ibs = 1;
256 	r = amdgpu_ib_get(adev, NULL, size, pool_type, &(*job)->ibs[0]);
257 	if (r) {
258 		if (entity)
259 			drm_sched_job_cleanup(&(*job)->base);
260 		kfree((*job)->hw_vm_fence);
261 		kfree((*job)->hw_fence);
262 		kfree(*job);
263 		*job = NULL;
264 	}
265 
266 	return r;
267 }
268 
269 void amdgpu_job_set_resources(struct amdgpu_job *job, struct amdgpu_bo *gds,
270 			      struct amdgpu_bo *gws, struct amdgpu_bo *oa)
271 {
272 	if (gds) {
273 		job->gds_base = amdgpu_bo_gpu_offset(gds) >> PAGE_SHIFT;
274 		job->gds_size = amdgpu_bo_size(gds) >> PAGE_SHIFT;
275 	}
276 	if (gws) {
277 		job->gws_base = amdgpu_bo_gpu_offset(gws) >> PAGE_SHIFT;
278 		job->gws_size = amdgpu_bo_size(gws) >> PAGE_SHIFT;
279 	}
280 	if (oa) {
281 		job->oa_base = amdgpu_bo_gpu_offset(oa) >> PAGE_SHIFT;
282 		job->oa_size = amdgpu_bo_size(oa) >> PAGE_SHIFT;
283 	}
284 }
285 
286 void amdgpu_job_free_resources(struct amdgpu_job *job)
287 {
288 	struct dma_fence *f;
289 	unsigned i;
290 
291 	/* Check if any fences were initialized */
292 	if (job->base.s_fence &&
293 	    dma_fence_was_initialized(&job->base.s_fence->finished))
294 		f = &job->base.s_fence->finished;
295 	else if (dma_fence_was_initialized(&job->hw_fence->base))
296 		f = &job->hw_fence->base;
297 	else
298 		f = NULL;
299 
300 	for (i = 0; i < job->num_ibs; ++i)
301 		amdgpu_ib_free(&job->ibs[i], f);
302 }
303 
304 static void amdgpu_job_free_cb(struct drm_sched_job *s_job)
305 {
306 	struct amdgpu_job *job = to_amdgpu_job(s_job);
307 
308 	drm_sched_job_cleanup(s_job);
309 
310 	amdgpu_sync_free(&job->explicit_sync);
311 
312 	if (dma_fence_was_initialized(&job->hw_fence->base))
313 		dma_fence_put(&job->hw_fence->base);
314 	else
315 		kfree(job->hw_fence);
316 	if (dma_fence_was_initialized(&job->hw_vm_fence->base))
317 		dma_fence_put(&job->hw_vm_fence->base);
318 	else
319 		kfree(job->hw_vm_fence);
320 
321 	kfree(job);
322 }
323 
324 void amdgpu_job_set_gang_leader(struct amdgpu_job *job,
325 				struct amdgpu_job *leader)
326 {
327 	struct dma_fence *fence = &leader->base.s_fence->scheduled;
328 
329 	WARN_ON(job->gang_submit);
330 
331 	/*
332 	 * Don't add a reference when we are the gang leader to avoid circle
333 	 * dependency.
334 	 */
335 	if (job != leader)
336 		dma_fence_get(fence);
337 	job->gang_submit = fence;
338 }
339 
340 void amdgpu_job_free(struct amdgpu_job *job)
341 {
342 	if (job->base.entity)
343 		drm_sched_job_cleanup(&job->base);
344 
345 	amdgpu_job_free_resources(job);
346 	amdgpu_sync_free(&job->explicit_sync);
347 	if (job->gang_submit != &job->base.s_fence->scheduled)
348 		dma_fence_put(job->gang_submit);
349 
350 	if (dma_fence_was_initialized(&job->hw_fence->base))
351 		dma_fence_put(&job->hw_fence->base);
352 	else
353 		kfree(job->hw_fence);
354 	if (dma_fence_was_initialized(&job->hw_vm_fence->base))
355 		dma_fence_put(&job->hw_vm_fence->base);
356 	else
357 		kfree(job->hw_vm_fence);
358 
359 	kfree(job);
360 }
361 
362 struct dma_fence *amdgpu_job_submit(struct amdgpu_job *job)
363 {
364 	struct dma_fence *f;
365 
366 	drm_sched_job_arm(&job->base);
367 	f = dma_fence_get(&job->base.s_fence->finished);
368 	amdgpu_job_free_resources(job);
369 	drm_sched_entity_push_job(&job->base);
370 
371 	return f;
372 }
373 
374 int amdgpu_job_submit_direct(struct amdgpu_job *job, struct amdgpu_ring *ring,
375 			     struct dma_fence **fence)
376 {
377 	int r;
378 
379 	job->base.sched = &ring->sched;
380 	r = amdgpu_ib_schedule(ring, job->num_ibs, job->ibs, job, fence);
381 
382 	if (r)
383 		return r;
384 
385 	amdgpu_job_free(job);
386 	return 0;
387 }
388 
389 static struct dma_fence *
390 amdgpu_job_prepare_job(struct drm_sched_job *sched_job,
391 		      struct drm_sched_entity *s_entity)
392 {
393 	struct amdgpu_ring *ring = to_amdgpu_ring(s_entity->rq->sched);
394 	struct amdgpu_job *job = to_amdgpu_job(sched_job);
395 	struct dma_fence *fence;
396 	int r;
397 
398 	r = drm_sched_entity_error(s_entity);
399 	if (r)
400 		goto error;
401 
402 	if (job->gang_submit) {
403 		fence = amdgpu_device_switch_gang(ring->adev, job->gang_submit);
404 		if (fence)
405 			return fence;
406 	}
407 
408 	fence = amdgpu_device_enforce_isolation(ring->adev, ring, job);
409 	if (fence)
410 		return fence;
411 
412 	if (job->vm && !job->vmid) {
413 		r = amdgpu_vmid_grab(job->vm, ring, job, &fence);
414 		if (r) {
415 			dev_err(ring->adev->dev, "Error getting VM ID (%d)\n", r);
416 			goto error;
417 		}
418 		return fence;
419 	}
420 
421 	return NULL;
422 
423 error:
424 	dma_fence_set_error(&job->base.s_fence->finished, r);
425 	return NULL;
426 }
427 
428 static struct dma_fence *amdgpu_job_run(struct drm_sched_job *sched_job)
429 {
430 	struct amdgpu_ring *ring = to_amdgpu_ring(sched_job->sched);
431 	struct amdgpu_device *adev = ring->adev;
432 	struct dma_fence *fence = NULL, *finished;
433 	struct amdgpu_job *job;
434 	int r = 0;
435 
436 	job = to_amdgpu_job(sched_job);
437 	finished = &job->base.s_fence->finished;
438 
439 	trace_amdgpu_sched_run_job(job);
440 
441 	/* Skip job if VRAM is lost and never resubmit gangs */
442 	if (job->generation != amdgpu_vm_generation(adev, job->vm) ||
443 	    (job->job_run_counter && job->gang_submit))
444 		dma_fence_set_error(finished, -ECANCELED);
445 
446 	if (finished->error < 0) {
447 		dev_dbg(adev->dev, "Skip scheduling IBs in ring(%s)",
448 			ring->name);
449 	} else {
450 		r = amdgpu_ib_schedule(ring, job->num_ibs, job->ibs, job,
451 				       &fence);
452 		if (r)
453 			dev_err(adev->dev,
454 				"Error scheduling IBs (%d) in ring(%s)", r,
455 				ring->name);
456 	}
457 
458 	job->job_run_counter++;
459 	amdgpu_job_free_resources(job);
460 
461 	fence = r ? ERR_PTR(r) : fence;
462 	return fence;
463 }
464 
465 /*
466  * This is a duplicate function from DRM scheduler sched_internal.h.
467  * Plan is to remove it when amdgpu_job_stop_all_jobs_on_sched is removed, due
468  * latter being incorrect and racy.
469  *
470  * See https://lore.kernel.org/amd-gfx/44edde63-7181-44fb-a4f7-94e50514f539@amd.com/
471  */
472 static struct drm_sched_job *
473 drm_sched_entity_queue_pop(struct drm_sched_entity *entity)
474 {
475 	struct spsc_node *node;
476 
477 	node = spsc_queue_pop(&entity->job_queue);
478 	if (!node)
479 		return NULL;
480 
481 	return container_of(node, struct drm_sched_job, queue_node);
482 }
483 
484 void amdgpu_job_stop_all_jobs_on_sched(struct drm_gpu_scheduler *sched)
485 {
486 	struct drm_sched_job *s_job;
487 	struct drm_sched_entity *s_entity = NULL;
488 	int i;
489 
490 	/* Signal all jobs not yet scheduled */
491 	for (i = DRM_SCHED_PRIORITY_KERNEL; i < sched->num_rqs; i++) {
492 		struct drm_sched_rq *rq = sched->sched_rq[i];
493 		spin_lock(&rq->lock);
494 		list_for_each_entry(s_entity, &rq->entities, list) {
495 			while ((s_job = drm_sched_entity_queue_pop(s_entity))) {
496 				struct drm_sched_fence *s_fence = s_job->s_fence;
497 
498 				dma_fence_signal(&s_fence->scheduled);
499 				dma_fence_set_error(&s_fence->finished, -EHWPOISON);
500 				dma_fence_signal(&s_fence->finished);
501 			}
502 		}
503 		spin_unlock(&rq->lock);
504 	}
505 
506 	/* Signal all jobs already scheduled to HW */
507 	list_for_each_entry(s_job, &sched->pending_list, list) {
508 		struct drm_sched_fence *s_fence = s_job->s_fence;
509 
510 		dma_fence_set_error(&s_fence->finished, -EHWPOISON);
511 		dma_fence_signal(&s_fence->finished);
512 	}
513 }
514 
515 const struct drm_sched_backend_ops amdgpu_sched_ops = {
516 	.prepare_job = amdgpu_job_prepare_job,
517 	.run_job = amdgpu_job_run,
518 	.timedout_job = amdgpu_job_timedout,
519 	.free_job = amdgpu_job_free_cb
520 };
521