xref: /linux/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c (revision 570172569238c66a482ec3eb5d766cc9cf255f69)
1 /*
2  * Copyright 2015 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  *
23  */
24 #include <linux/kthread.h>
25 #include <linux/wait.h>
26 #include <linux/sched.h>
27 
28 #include <drm/drm_drv.h>
29 
30 #include "amdgpu.h"
31 #include "amdgpu_trace.h"
32 #include "amdgpu_reset.h"
33 #include "amdgpu_dev_coredump.h"
34 #include "amdgpu_xgmi.h"
35 
36 static void amdgpu_job_do_core_dump(struct amdgpu_device *adev,
37 				    struct amdgpu_job *job)
38 {
39 	int i;
40 
41 	dev_info(adev->dev, "Dumping IP State\n");
42 	for (i = 0; i < adev->num_ip_blocks; i++)
43 		if (adev->ip_blocks[i].version->funcs->dump_ip_state)
44 			adev->ip_blocks[i].version->funcs
45 				->dump_ip_state((void *)adev);
46 	dev_info(adev->dev, "Dumping IP State Completed\n");
47 
48 	amdgpu_coredump(adev, true, false, job);
49 }
50 
51 static void amdgpu_job_core_dump(struct amdgpu_device *adev,
52 				 struct amdgpu_job *job)
53 {
54 	struct list_head device_list, *device_list_handle =  NULL;
55 	struct amdgpu_device *tmp_adev = NULL;
56 	struct amdgpu_hive_info *hive = NULL;
57 
58 	if (!amdgpu_sriov_vf(adev))
59 		hive = amdgpu_get_xgmi_hive(adev);
60 	if (hive)
61 		mutex_lock(&hive->hive_lock);
62 	/*
63 	 * Reuse the logic in amdgpu_device_gpu_recover() to build list of
64 	 * devices for code dump
65 	 */
66 	INIT_LIST_HEAD(&device_list);
67 	if (!amdgpu_sriov_vf(adev) && (adev->gmc.xgmi.num_physical_nodes > 1) && hive) {
68 		list_for_each_entry(tmp_adev, &hive->device_list, gmc.xgmi.head)
69 			list_add_tail(&tmp_adev->reset_list, &device_list);
70 		if (!list_is_first(&adev->reset_list, &device_list))
71 			list_rotate_to_front(&adev->reset_list, &device_list);
72 		device_list_handle = &device_list;
73 	} else {
74 		list_add_tail(&adev->reset_list, &device_list);
75 		device_list_handle = &device_list;
76 	}
77 
78 	/* Do the coredump for each device */
79 	list_for_each_entry(tmp_adev, device_list_handle, reset_list)
80 		amdgpu_job_do_core_dump(tmp_adev, job);
81 
82 	if (hive) {
83 		mutex_unlock(&hive->hive_lock);
84 		amdgpu_put_xgmi_hive(hive);
85 	}
86 }
87 
88 static enum drm_gpu_sched_stat amdgpu_job_timedout(struct drm_sched_job *s_job)
89 {
90 	struct amdgpu_ring *ring = to_amdgpu_ring(s_job->sched);
91 	struct amdgpu_job *job = to_amdgpu_job(s_job);
92 	struct amdgpu_task_info *ti;
93 	struct amdgpu_device *adev = ring->adev;
94 	int idx;
95 	int r;
96 
97 	if (!drm_dev_enter(adev_to_drm(adev), &idx)) {
98 		dev_info(adev->dev, "%s - device unplugged skipping recovery on scheduler:%s",
99 			 __func__, s_job->sched->name);
100 
101 		/* Effectively the job is aborted as the device is gone */
102 		return DRM_GPU_SCHED_STAT_ENODEV;
103 	}
104 
105 	adev->job_hang = true;
106 
107 	/*
108 	 * Do the coredump immediately after a job timeout to get a very
109 	 * close dump/snapshot/representation of GPU's current error status
110 	 */
111 	amdgpu_job_core_dump(adev, job);
112 
113 	if (amdgpu_gpu_recovery &&
114 	    amdgpu_ring_soft_recovery(ring, job->vmid, s_job->s_fence->parent)) {
115 		dev_err(adev->dev, "ring %s timeout, but soft recovered\n",
116 			s_job->sched->name);
117 		goto exit;
118 	}
119 
120 	dev_err(adev->dev, "ring %s timeout, signaled seq=%u, emitted seq=%u\n",
121 		job->base.sched->name, atomic_read(&ring->fence_drv.last_seq),
122 		ring->fence_drv.sync_seq);
123 
124 	ti = amdgpu_vm_get_task_info_pasid(ring->adev, job->pasid);
125 	if (ti) {
126 		dev_err(adev->dev,
127 			"Process information: process %s pid %d thread %s pid %d\n",
128 			ti->process_name, ti->tgid, ti->task_name, ti->pid);
129 		amdgpu_vm_put_task_info(ti);
130 	}
131 
132 	dma_fence_set_error(&s_job->s_fence->finished, -ETIME);
133 
134 	/* attempt a per ring reset */
135 	if (amdgpu_gpu_recovery &&
136 	    ring->funcs->reset) {
137 		/* stop the scheduler, but don't mess with the
138 		 * bad job yet because if ring reset fails
139 		 * we'll fall back to full GPU reset.
140 		 */
141 		drm_sched_wqueue_stop(&ring->sched);
142 		r = amdgpu_ring_reset(ring, job->vmid);
143 		if (!r) {
144 			if (amdgpu_ring_sched_ready(ring))
145 				drm_sched_stop(&ring->sched, s_job);
146 			atomic_inc(&ring->adev->gpu_reset_counter);
147 			amdgpu_fence_driver_force_completion(ring);
148 			if (amdgpu_ring_sched_ready(ring))
149 				drm_sched_start(&ring->sched);
150 			goto exit;
151 		}
152 	}
153 
154 	if (amdgpu_device_should_recover_gpu(ring->adev)) {
155 		struct amdgpu_reset_context reset_context;
156 		memset(&reset_context, 0, sizeof(reset_context));
157 
158 		reset_context.method = AMD_RESET_METHOD_NONE;
159 		reset_context.reset_req_dev = adev;
160 		reset_context.src = AMDGPU_RESET_SRC_JOB;
161 		clear_bit(AMDGPU_NEED_FULL_RESET, &reset_context.flags);
162 
163 		/*
164 		 * To avoid an unnecessary extra coredump, as we have already
165 		 * got the very close representation of GPU's error status
166 		 */
167 		set_bit(AMDGPU_SKIP_COREDUMP, &reset_context.flags);
168 
169 		r = amdgpu_device_gpu_recover(ring->adev, job, &reset_context);
170 		if (r)
171 			dev_err(adev->dev, "GPU Recovery Failed: %d\n", r);
172 	} else {
173 		drm_sched_suspend_timeout(&ring->sched);
174 		if (amdgpu_sriov_vf(adev))
175 			adev->virt.tdr_debug = true;
176 	}
177 
178 exit:
179 	adev->job_hang = false;
180 	drm_dev_exit(idx);
181 	return DRM_GPU_SCHED_STAT_NOMINAL;
182 }
183 
184 int amdgpu_job_alloc(struct amdgpu_device *adev, struct amdgpu_vm *vm,
185 		     struct drm_sched_entity *entity, void *owner,
186 		     unsigned int num_ibs, struct amdgpu_job **job)
187 {
188 	if (num_ibs == 0)
189 		return -EINVAL;
190 
191 	*job = kzalloc(struct_size(*job, ibs, num_ibs), GFP_KERNEL);
192 	if (!*job)
193 		return -ENOMEM;
194 
195 	/*
196 	 * Initialize the scheduler to at least some ring so that we always
197 	 * have a pointer to adev.
198 	 */
199 	(*job)->base.sched = &adev->rings[0]->sched;
200 	(*job)->vm = vm;
201 
202 	amdgpu_sync_create(&(*job)->explicit_sync);
203 	(*job)->generation = amdgpu_vm_generation(adev, vm);
204 	(*job)->vm_pd_addr = AMDGPU_BO_INVALID_OFFSET;
205 
206 	if (!entity)
207 		return 0;
208 
209 	return drm_sched_job_init(&(*job)->base, entity, 1, owner);
210 }
211 
212 int amdgpu_job_alloc_with_ib(struct amdgpu_device *adev,
213 			     struct drm_sched_entity *entity, void *owner,
214 			     size_t size, enum amdgpu_ib_pool_type pool_type,
215 			     struct amdgpu_job **job)
216 {
217 	int r;
218 
219 	r = amdgpu_job_alloc(adev, NULL, entity, owner, 1, job);
220 	if (r)
221 		return r;
222 
223 	(*job)->num_ibs = 1;
224 	r = amdgpu_ib_get(adev, NULL, size, pool_type, &(*job)->ibs[0]);
225 	if (r) {
226 		if (entity)
227 			drm_sched_job_cleanup(&(*job)->base);
228 		kfree(*job);
229 	}
230 
231 	return r;
232 }
233 
234 void amdgpu_job_set_resources(struct amdgpu_job *job, struct amdgpu_bo *gds,
235 			      struct amdgpu_bo *gws, struct amdgpu_bo *oa)
236 {
237 	if (gds) {
238 		job->gds_base = amdgpu_bo_gpu_offset(gds) >> PAGE_SHIFT;
239 		job->gds_size = amdgpu_bo_size(gds) >> PAGE_SHIFT;
240 	}
241 	if (gws) {
242 		job->gws_base = amdgpu_bo_gpu_offset(gws) >> PAGE_SHIFT;
243 		job->gws_size = amdgpu_bo_size(gws) >> PAGE_SHIFT;
244 	}
245 	if (oa) {
246 		job->oa_base = amdgpu_bo_gpu_offset(oa) >> PAGE_SHIFT;
247 		job->oa_size = amdgpu_bo_size(oa) >> PAGE_SHIFT;
248 	}
249 }
250 
251 void amdgpu_job_free_resources(struct amdgpu_job *job)
252 {
253 	struct amdgpu_ring *ring = to_amdgpu_ring(job->base.sched);
254 	struct dma_fence *f;
255 	unsigned i;
256 
257 	/* Check if any fences where initialized */
258 	if (job->base.s_fence && job->base.s_fence->finished.ops)
259 		f = &job->base.s_fence->finished;
260 	else if (job->hw_fence.ops)
261 		f = &job->hw_fence;
262 	else
263 		f = NULL;
264 
265 	for (i = 0; i < job->num_ibs; ++i)
266 		amdgpu_ib_free(ring->adev, &job->ibs[i], f);
267 }
268 
269 static void amdgpu_job_free_cb(struct drm_sched_job *s_job)
270 {
271 	struct amdgpu_job *job = to_amdgpu_job(s_job);
272 
273 	drm_sched_job_cleanup(s_job);
274 
275 	amdgpu_sync_free(&job->explicit_sync);
276 
277 	/* only put the hw fence if has embedded fence */
278 	if (!job->hw_fence.ops)
279 		kfree(job);
280 	else
281 		dma_fence_put(&job->hw_fence);
282 }
283 
284 void amdgpu_job_set_gang_leader(struct amdgpu_job *job,
285 				struct amdgpu_job *leader)
286 {
287 	struct dma_fence *fence = &leader->base.s_fence->scheduled;
288 
289 	WARN_ON(job->gang_submit);
290 
291 	/*
292 	 * Don't add a reference when we are the gang leader to avoid circle
293 	 * dependency.
294 	 */
295 	if (job != leader)
296 		dma_fence_get(fence);
297 	job->gang_submit = fence;
298 }
299 
300 void amdgpu_job_free(struct amdgpu_job *job)
301 {
302 	if (job->base.entity)
303 		drm_sched_job_cleanup(&job->base);
304 
305 	amdgpu_job_free_resources(job);
306 	amdgpu_sync_free(&job->explicit_sync);
307 	if (job->gang_submit != &job->base.s_fence->scheduled)
308 		dma_fence_put(job->gang_submit);
309 
310 	if (!job->hw_fence.ops)
311 		kfree(job);
312 	else
313 		dma_fence_put(&job->hw_fence);
314 }
315 
316 struct dma_fence *amdgpu_job_submit(struct amdgpu_job *job)
317 {
318 	struct dma_fence *f;
319 
320 	drm_sched_job_arm(&job->base);
321 	f = dma_fence_get(&job->base.s_fence->finished);
322 	amdgpu_job_free_resources(job);
323 	drm_sched_entity_push_job(&job->base);
324 
325 	return f;
326 }
327 
328 int amdgpu_job_submit_direct(struct amdgpu_job *job, struct amdgpu_ring *ring,
329 			     struct dma_fence **fence)
330 {
331 	int r;
332 
333 	job->base.sched = &ring->sched;
334 	r = amdgpu_ib_schedule(ring, job->num_ibs, job->ibs, job, fence);
335 
336 	if (r)
337 		return r;
338 
339 	amdgpu_job_free(job);
340 	return 0;
341 }
342 
343 static struct dma_fence *
344 amdgpu_job_prepare_job(struct drm_sched_job *sched_job,
345 		      struct drm_sched_entity *s_entity)
346 {
347 	struct amdgpu_ring *ring = to_amdgpu_ring(s_entity->rq->sched);
348 	struct amdgpu_job *job = to_amdgpu_job(sched_job);
349 	struct dma_fence *fence = NULL;
350 	int r;
351 
352 	r = drm_sched_entity_error(s_entity);
353 	if (r)
354 		goto error;
355 
356 	if (!fence && job->gang_submit)
357 		fence = amdgpu_device_switch_gang(ring->adev, job->gang_submit);
358 
359 	while (!fence && job->vm && !job->vmid) {
360 		r = amdgpu_vmid_grab(job->vm, ring, job, &fence);
361 		if (r) {
362 			dev_err(ring->adev->dev, "Error getting VM ID (%d)\n", r);
363 			goto error;
364 		}
365 	}
366 
367 	return fence;
368 
369 error:
370 	dma_fence_set_error(&job->base.s_fence->finished, r);
371 	return NULL;
372 }
373 
374 static struct dma_fence *amdgpu_job_run(struct drm_sched_job *sched_job)
375 {
376 	struct amdgpu_ring *ring = to_amdgpu_ring(sched_job->sched);
377 	struct amdgpu_device *adev = ring->adev;
378 	struct dma_fence *fence = NULL, *finished;
379 	struct amdgpu_job *job;
380 	int r = 0;
381 
382 	job = to_amdgpu_job(sched_job);
383 	finished = &job->base.s_fence->finished;
384 
385 	trace_amdgpu_sched_run_job(job);
386 
387 	/* Skip job if VRAM is lost and never resubmit gangs */
388 	if (job->generation != amdgpu_vm_generation(adev, job->vm) ||
389 	    (job->job_run_counter && job->gang_submit))
390 		dma_fence_set_error(finished, -ECANCELED);
391 
392 	if (finished->error < 0) {
393 		dev_dbg(adev->dev, "Skip scheduling IBs in ring(%s)",
394 			ring->name);
395 	} else {
396 		r = amdgpu_ib_schedule(ring, job->num_ibs, job->ibs, job,
397 				       &fence);
398 		if (r)
399 			dev_err(adev->dev,
400 				"Error scheduling IBs (%d) in ring(%s)", r,
401 				ring->name);
402 	}
403 
404 	job->job_run_counter++;
405 	amdgpu_job_free_resources(job);
406 
407 	fence = r ? ERR_PTR(r) : fence;
408 	return fence;
409 }
410 
411 #define to_drm_sched_job(sched_job)		\
412 		container_of((sched_job), struct drm_sched_job, queue_node)
413 
414 void amdgpu_job_stop_all_jobs_on_sched(struct drm_gpu_scheduler *sched)
415 {
416 	struct drm_sched_job *s_job;
417 	struct drm_sched_entity *s_entity = NULL;
418 	int i;
419 
420 	/* Signal all jobs not yet scheduled */
421 	for (i = DRM_SCHED_PRIORITY_KERNEL; i < sched->num_rqs; i++) {
422 		struct drm_sched_rq *rq = sched->sched_rq[i];
423 		spin_lock(&rq->lock);
424 		list_for_each_entry(s_entity, &rq->entities, list) {
425 			while ((s_job = to_drm_sched_job(spsc_queue_pop(&s_entity->job_queue)))) {
426 				struct drm_sched_fence *s_fence = s_job->s_fence;
427 
428 				dma_fence_signal(&s_fence->scheduled);
429 				dma_fence_set_error(&s_fence->finished, -EHWPOISON);
430 				dma_fence_signal(&s_fence->finished);
431 			}
432 		}
433 		spin_unlock(&rq->lock);
434 	}
435 
436 	/* Signal all jobs already scheduled to HW */
437 	list_for_each_entry(s_job, &sched->pending_list, list) {
438 		struct drm_sched_fence *s_fence = s_job->s_fence;
439 
440 		dma_fence_set_error(&s_fence->finished, -EHWPOISON);
441 		dma_fence_signal(&s_fence->finished);
442 	}
443 }
444 
445 const struct drm_sched_backend_ops amdgpu_sched_ops = {
446 	.prepare_job = amdgpu_job_prepare_job,
447 	.run_job = amdgpu_job_run,
448 	.timedout_job = amdgpu_job_timedout,
449 	.free_job = amdgpu_job_free_cb
450 };
451