xref: /linux/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c (revision 2845f512232de9e436b9e3b5529e906e62414013)
1 /*
2  * Copyright 2015 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  *
23  */
24 #include <linux/kthread.h>
25 #include <linux/wait.h>
26 #include <linux/sched.h>
27 
28 #include <drm/drm_drv.h>
29 
30 #include "amdgpu.h"
31 #include "amdgpu_trace.h"
32 #include "amdgpu_reset.h"
33 
34 static enum drm_gpu_sched_stat amdgpu_job_timedout(struct drm_sched_job *s_job)
35 {
36 	struct amdgpu_ring *ring = to_amdgpu_ring(s_job->sched);
37 	struct amdgpu_job *job = to_amdgpu_job(s_job);
38 	struct amdgpu_task_info *ti;
39 	struct amdgpu_device *adev = ring->adev;
40 	int idx;
41 	int r;
42 
43 	if (!drm_dev_enter(adev_to_drm(adev), &idx)) {
44 		dev_info(adev->dev, "%s - device unplugged skipping recovery on scheduler:%s",
45 			 __func__, s_job->sched->name);
46 
47 		/* Effectively the job is aborted as the device is gone */
48 		return DRM_GPU_SCHED_STAT_ENODEV;
49 	}
50 
51 
52 	adev->job_hang = true;
53 
54 	if (amdgpu_gpu_recovery &&
55 	    amdgpu_ring_soft_recovery(ring, job->vmid, s_job->s_fence->parent)) {
56 		dev_err(adev->dev, "ring %s timeout, but soft recovered\n",
57 			s_job->sched->name);
58 		goto exit;
59 	}
60 
61 	dev_err(adev->dev, "ring %s timeout, signaled seq=%u, emitted seq=%u\n",
62 		job->base.sched->name, atomic_read(&ring->fence_drv.last_seq),
63 		ring->fence_drv.sync_seq);
64 
65 	ti = amdgpu_vm_get_task_info_pasid(ring->adev, job->pasid);
66 	if (ti) {
67 		dev_err(adev->dev,
68 			"Process information: process %s pid %d thread %s pid %d\n",
69 			ti->process_name, ti->tgid, ti->task_name, ti->pid);
70 		amdgpu_vm_put_task_info(ti);
71 	}
72 
73 	dma_fence_set_error(&s_job->s_fence->finished, -ETIME);
74 
75 	/* attempt a per ring reset */
76 	if (amdgpu_gpu_recovery &&
77 	    ring->funcs->reset) {
78 		/* stop the scheduler, but don't mess with the
79 		 * bad job yet because if ring reset fails
80 		 * we'll fall back to full GPU reset.
81 		 */
82 		drm_sched_wqueue_stop(&ring->sched);
83 		r = amdgpu_ring_reset(ring, job->vmid);
84 		if (!r) {
85 			if (amdgpu_ring_sched_ready(ring))
86 				drm_sched_stop(&ring->sched, s_job);
87 			atomic_inc(&ring->adev->gpu_reset_counter);
88 			amdgpu_fence_driver_force_completion(ring);
89 			if (amdgpu_ring_sched_ready(ring))
90 				drm_sched_start(&ring->sched, true);
91 			goto exit;
92 		}
93 	}
94 
95 	if (amdgpu_device_should_recover_gpu(ring->adev)) {
96 		struct amdgpu_reset_context reset_context;
97 		memset(&reset_context, 0, sizeof(reset_context));
98 
99 		reset_context.method = AMD_RESET_METHOD_NONE;
100 		reset_context.reset_req_dev = adev;
101 		reset_context.src = AMDGPU_RESET_SRC_JOB;
102 		clear_bit(AMDGPU_NEED_FULL_RESET, &reset_context.flags);
103 
104 		r = amdgpu_device_gpu_recover(ring->adev, job, &reset_context);
105 		if (r)
106 			dev_err(adev->dev, "GPU Recovery Failed: %d\n", r);
107 	} else {
108 		drm_sched_suspend_timeout(&ring->sched);
109 		if (amdgpu_sriov_vf(adev))
110 			adev->virt.tdr_debug = true;
111 	}
112 
113 exit:
114 	adev->job_hang = false;
115 	drm_dev_exit(idx);
116 	return DRM_GPU_SCHED_STAT_NOMINAL;
117 }
118 
119 int amdgpu_job_alloc(struct amdgpu_device *adev, struct amdgpu_vm *vm,
120 		     struct drm_sched_entity *entity, void *owner,
121 		     unsigned int num_ibs, struct amdgpu_job **job)
122 {
123 	if (num_ibs == 0)
124 		return -EINVAL;
125 
126 	*job = kzalloc(struct_size(*job, ibs, num_ibs), GFP_KERNEL);
127 	if (!*job)
128 		return -ENOMEM;
129 
130 	/*
131 	 * Initialize the scheduler to at least some ring so that we always
132 	 * have a pointer to adev.
133 	 */
134 	(*job)->base.sched = &adev->rings[0]->sched;
135 	(*job)->vm = vm;
136 
137 	amdgpu_sync_create(&(*job)->explicit_sync);
138 	(*job)->generation = amdgpu_vm_generation(adev, vm);
139 	(*job)->vm_pd_addr = AMDGPU_BO_INVALID_OFFSET;
140 
141 	if (!entity)
142 		return 0;
143 
144 	return drm_sched_job_init(&(*job)->base, entity, 1, owner);
145 }
146 
147 int amdgpu_job_alloc_with_ib(struct amdgpu_device *adev,
148 			     struct drm_sched_entity *entity, void *owner,
149 			     size_t size, enum amdgpu_ib_pool_type pool_type,
150 			     struct amdgpu_job **job)
151 {
152 	int r;
153 
154 	r = amdgpu_job_alloc(adev, NULL, entity, owner, 1, job);
155 	if (r)
156 		return r;
157 
158 	(*job)->num_ibs = 1;
159 	r = amdgpu_ib_get(adev, NULL, size, pool_type, &(*job)->ibs[0]);
160 	if (r) {
161 		if (entity)
162 			drm_sched_job_cleanup(&(*job)->base);
163 		kfree(*job);
164 	}
165 
166 	return r;
167 }
168 
169 void amdgpu_job_set_resources(struct amdgpu_job *job, struct amdgpu_bo *gds,
170 			      struct amdgpu_bo *gws, struct amdgpu_bo *oa)
171 {
172 	if (gds) {
173 		job->gds_base = amdgpu_bo_gpu_offset(gds) >> PAGE_SHIFT;
174 		job->gds_size = amdgpu_bo_size(gds) >> PAGE_SHIFT;
175 	}
176 	if (gws) {
177 		job->gws_base = amdgpu_bo_gpu_offset(gws) >> PAGE_SHIFT;
178 		job->gws_size = amdgpu_bo_size(gws) >> PAGE_SHIFT;
179 	}
180 	if (oa) {
181 		job->oa_base = amdgpu_bo_gpu_offset(oa) >> PAGE_SHIFT;
182 		job->oa_size = amdgpu_bo_size(oa) >> PAGE_SHIFT;
183 	}
184 }
185 
186 void amdgpu_job_free_resources(struct amdgpu_job *job)
187 {
188 	struct amdgpu_ring *ring = to_amdgpu_ring(job->base.sched);
189 	struct dma_fence *f;
190 	unsigned i;
191 
192 	/* Check if any fences where initialized */
193 	if (job->base.s_fence && job->base.s_fence->finished.ops)
194 		f = &job->base.s_fence->finished;
195 	else if (job->hw_fence.ops)
196 		f = &job->hw_fence;
197 	else
198 		f = NULL;
199 
200 	for (i = 0; i < job->num_ibs; ++i)
201 		amdgpu_ib_free(ring->adev, &job->ibs[i], f);
202 }
203 
204 static void amdgpu_job_free_cb(struct drm_sched_job *s_job)
205 {
206 	struct amdgpu_job *job = to_amdgpu_job(s_job);
207 
208 	drm_sched_job_cleanup(s_job);
209 
210 	amdgpu_sync_free(&job->explicit_sync);
211 
212 	/* only put the hw fence if has embedded fence */
213 	if (!job->hw_fence.ops)
214 		kfree(job);
215 	else
216 		dma_fence_put(&job->hw_fence);
217 }
218 
219 void amdgpu_job_set_gang_leader(struct amdgpu_job *job,
220 				struct amdgpu_job *leader)
221 {
222 	struct dma_fence *fence = &leader->base.s_fence->scheduled;
223 
224 	WARN_ON(job->gang_submit);
225 
226 	/*
227 	 * Don't add a reference when we are the gang leader to avoid circle
228 	 * dependency.
229 	 */
230 	if (job != leader)
231 		dma_fence_get(fence);
232 	job->gang_submit = fence;
233 }
234 
235 void amdgpu_job_free(struct amdgpu_job *job)
236 {
237 	if (job->base.entity)
238 		drm_sched_job_cleanup(&job->base);
239 
240 	amdgpu_job_free_resources(job);
241 	amdgpu_sync_free(&job->explicit_sync);
242 	if (job->gang_submit != &job->base.s_fence->scheduled)
243 		dma_fence_put(job->gang_submit);
244 
245 	if (!job->hw_fence.ops)
246 		kfree(job);
247 	else
248 		dma_fence_put(&job->hw_fence);
249 }
250 
251 struct dma_fence *amdgpu_job_submit(struct amdgpu_job *job)
252 {
253 	struct dma_fence *f;
254 
255 	drm_sched_job_arm(&job->base);
256 	f = dma_fence_get(&job->base.s_fence->finished);
257 	amdgpu_job_free_resources(job);
258 	drm_sched_entity_push_job(&job->base);
259 
260 	return f;
261 }
262 
263 int amdgpu_job_submit_direct(struct amdgpu_job *job, struct amdgpu_ring *ring,
264 			     struct dma_fence **fence)
265 {
266 	int r;
267 
268 	job->base.sched = &ring->sched;
269 	r = amdgpu_ib_schedule(ring, job->num_ibs, job->ibs, job, fence);
270 
271 	if (r)
272 		return r;
273 
274 	amdgpu_job_free(job);
275 	return 0;
276 }
277 
278 static struct dma_fence *
279 amdgpu_job_prepare_job(struct drm_sched_job *sched_job,
280 		      struct drm_sched_entity *s_entity)
281 {
282 	struct amdgpu_ring *ring = to_amdgpu_ring(s_entity->rq->sched);
283 	struct amdgpu_job *job = to_amdgpu_job(sched_job);
284 	struct dma_fence *fence = NULL;
285 	int r;
286 
287 	r = drm_sched_entity_error(s_entity);
288 	if (r)
289 		goto error;
290 
291 	if (!fence && job->gang_submit)
292 		fence = amdgpu_device_switch_gang(ring->adev, job->gang_submit);
293 
294 	while (!fence && job->vm && !job->vmid) {
295 		r = amdgpu_vmid_grab(job->vm, ring, job, &fence);
296 		if (r) {
297 			dev_err(ring->adev->dev, "Error getting VM ID (%d)\n", r);
298 			goto error;
299 		}
300 	}
301 
302 	return fence;
303 
304 error:
305 	dma_fence_set_error(&job->base.s_fence->finished, r);
306 	return NULL;
307 }
308 
309 static struct dma_fence *amdgpu_job_run(struct drm_sched_job *sched_job)
310 {
311 	struct amdgpu_ring *ring = to_amdgpu_ring(sched_job->sched);
312 	struct amdgpu_device *adev = ring->adev;
313 	struct dma_fence *fence = NULL, *finished;
314 	struct amdgpu_job *job;
315 	int r = 0;
316 
317 	job = to_amdgpu_job(sched_job);
318 	finished = &job->base.s_fence->finished;
319 
320 	trace_amdgpu_sched_run_job(job);
321 
322 	/* Skip job if VRAM is lost and never resubmit gangs */
323 	if (job->generation != amdgpu_vm_generation(adev, job->vm) ||
324 	    (job->job_run_counter && job->gang_submit))
325 		dma_fence_set_error(finished, -ECANCELED);
326 
327 	if (finished->error < 0) {
328 		dev_dbg(adev->dev, "Skip scheduling IBs in ring(%s)",
329 			ring->name);
330 	} else {
331 		r = amdgpu_ib_schedule(ring, job->num_ibs, job->ibs, job,
332 				       &fence);
333 		if (r)
334 			dev_err(adev->dev,
335 				"Error scheduling IBs (%d) in ring(%s)", r,
336 				ring->name);
337 	}
338 
339 	job->job_run_counter++;
340 	amdgpu_job_free_resources(job);
341 
342 	fence = r ? ERR_PTR(r) : fence;
343 	return fence;
344 }
345 
346 #define to_drm_sched_job(sched_job)		\
347 		container_of((sched_job), struct drm_sched_job, queue_node)
348 
349 void amdgpu_job_stop_all_jobs_on_sched(struct drm_gpu_scheduler *sched)
350 {
351 	struct drm_sched_job *s_job;
352 	struct drm_sched_entity *s_entity = NULL;
353 	int i;
354 
355 	/* Signal all jobs not yet scheduled */
356 	for (i = DRM_SCHED_PRIORITY_KERNEL; i < sched->num_rqs; i++) {
357 		struct drm_sched_rq *rq = sched->sched_rq[i];
358 		spin_lock(&rq->lock);
359 		list_for_each_entry(s_entity, &rq->entities, list) {
360 			while ((s_job = to_drm_sched_job(spsc_queue_pop(&s_entity->job_queue)))) {
361 				struct drm_sched_fence *s_fence = s_job->s_fence;
362 
363 				dma_fence_signal(&s_fence->scheduled);
364 				dma_fence_set_error(&s_fence->finished, -EHWPOISON);
365 				dma_fence_signal(&s_fence->finished);
366 			}
367 		}
368 		spin_unlock(&rq->lock);
369 	}
370 
371 	/* Signal all jobs already scheduled to HW */
372 	list_for_each_entry(s_job, &sched->pending_list, list) {
373 		struct drm_sched_fence *s_fence = s_job->s_fence;
374 
375 		dma_fence_set_error(&s_fence->finished, -EHWPOISON);
376 		dma_fence_signal(&s_fence->finished);
377 	}
378 }
379 
380 const struct drm_sched_backend_ops amdgpu_sched_ops = {
381 	.prepare_job = amdgpu_job_prepare_job,
382 	.run_job = amdgpu_job_run,
383 	.timedout_job = amdgpu_job_timedout,
384 	.free_job = amdgpu_job_free_cb
385 };
386