Lines Matching full:job

37 				    struct amdgpu_job *job)  in amdgpu_job_do_core_dump()  argument
48 amdgpu_coredump(adev, true, false, job); in amdgpu_job_do_core_dump()
52 struct amdgpu_job *job) in amdgpu_job_core_dump() argument
80 amdgpu_job_do_core_dump(tmp_adev, job); in amdgpu_job_core_dump()
91 struct amdgpu_job *job = to_amdgpu_job(s_job); in amdgpu_job_timedout() local
101 /* Effectively the job is aborted as the device is gone */ in amdgpu_job_timedout()
106 * Do the coredump immediately after a job timeout to get a very in amdgpu_job_timedout()
109 * before job timeout in amdgpu_job_timedout()
112 amdgpu_job_core_dump(adev, job); in amdgpu_job_timedout()
116 amdgpu_ring_soft_recovery(ring, job->vmid, s_job->s_fence->parent)) { in amdgpu_job_timedout()
123 job->base.sched->name, atomic_read(&ring->fence_drv.last_seq), in amdgpu_job_timedout()
126 ti = amdgpu_vm_get_task_info_pasid(ring->adev, job->pasid); in amdgpu_job_timedout()
138 r = amdgpu_ring_reset(ring, job->vmid, job->hw_fence); in amdgpu_job_timedout()
167 r = amdgpu_device_gpu_recover(ring->adev, job, &reset_context); in amdgpu_job_timedout()
184 unsigned int num_ibs, struct amdgpu_job **job, in amdgpu_job_alloc() argument
193 *job = kzalloc(struct_size(*job, ibs, num_ibs), GFP_KERNEL); in amdgpu_job_alloc()
194 if (!*job) in amdgpu_job_alloc()
202 (*job)->hw_fence = af; in amdgpu_job_alloc()
209 (*job)->hw_vm_fence = af; in amdgpu_job_alloc()
211 (*job)->vm = vm; in amdgpu_job_alloc()
213 amdgpu_sync_create(&(*job)->explicit_sync); in amdgpu_job_alloc()
214 (*job)->generation = amdgpu_vm_generation(adev, vm); in amdgpu_job_alloc()
215 (*job)->vm_pd_addr = AMDGPU_BO_INVALID_OFFSET; in amdgpu_job_alloc()
220 return drm_sched_job_init(&(*job)->base, entity, 1, owner, in amdgpu_job_alloc()
224 kfree((*job)->hw_fence); in amdgpu_job_alloc()
226 kfree(*job); in amdgpu_job_alloc()
227 *job = NULL; in amdgpu_job_alloc()
235 struct amdgpu_job **job, u64 k_job_id) in amdgpu_job_alloc_with_ib() argument
239 r = amdgpu_job_alloc(adev, NULL, entity, owner, 1, job, in amdgpu_job_alloc_with_ib()
244 (*job)->num_ibs = 1; in amdgpu_job_alloc_with_ib()
245 r = amdgpu_ib_get(adev, NULL, size, pool_type, &(*job)->ibs[0]); in amdgpu_job_alloc_with_ib()
248 drm_sched_job_cleanup(&(*job)->base); in amdgpu_job_alloc_with_ib()
249 kfree((*job)->hw_vm_fence); in amdgpu_job_alloc_with_ib()
250 kfree((*job)->hw_fence); in amdgpu_job_alloc_with_ib()
251 kfree(*job); in amdgpu_job_alloc_with_ib()
252 *job = NULL; in amdgpu_job_alloc_with_ib()
258 void amdgpu_job_set_resources(struct amdgpu_job *job, struct amdgpu_bo *gds, in amdgpu_job_set_resources() argument
262 job->gds_base = amdgpu_bo_gpu_offset(gds) >> PAGE_SHIFT; in amdgpu_job_set_resources()
263 job->gds_size = amdgpu_bo_size(gds) >> PAGE_SHIFT; in amdgpu_job_set_resources()
266 job->gws_base = amdgpu_bo_gpu_offset(gws) >> PAGE_SHIFT; in amdgpu_job_set_resources()
267 job->gws_size = amdgpu_bo_size(gws) >> PAGE_SHIFT; in amdgpu_job_set_resources()
270 job->oa_base = amdgpu_bo_gpu_offset(oa) >> PAGE_SHIFT; in amdgpu_job_set_resources()
271 job->oa_size = amdgpu_bo_size(oa) >> PAGE_SHIFT; in amdgpu_job_set_resources()
275 void amdgpu_job_free_resources(struct amdgpu_job *job) in amdgpu_job_free_resources() argument
281 if (job->base.s_fence && job->base.s_fence->finished.ops) in amdgpu_job_free_resources()
282 f = &job->base.s_fence->finished; in amdgpu_job_free_resources()
283 else if (job->hw_fence && job->hw_fence->base.ops) in amdgpu_job_free_resources()
284 f = &job->hw_fence->base; in amdgpu_job_free_resources()
288 for (i = 0; i < job->num_ibs; ++i) in amdgpu_job_free_resources()
289 amdgpu_ib_free(&job->ibs[i], f); in amdgpu_job_free_resources()
294 struct amdgpu_job *job = to_amdgpu_job(s_job); in amdgpu_job_free_cb() local
298 amdgpu_sync_free(&job->explicit_sync); in amdgpu_job_free_cb()
300 if (job->hw_fence->base.ops) in amdgpu_job_free_cb()
301 dma_fence_put(&job->hw_fence->base); in amdgpu_job_free_cb()
303 kfree(job->hw_fence); in amdgpu_job_free_cb()
304 if (job->hw_vm_fence->base.ops) in amdgpu_job_free_cb()
305 dma_fence_put(&job->hw_vm_fence->base); in amdgpu_job_free_cb()
307 kfree(job->hw_vm_fence); in amdgpu_job_free_cb()
309 kfree(job); in amdgpu_job_free_cb()
312 void amdgpu_job_set_gang_leader(struct amdgpu_job *job, in amdgpu_job_set_gang_leader() argument
317 WARN_ON(job->gang_submit); in amdgpu_job_set_gang_leader()
323 if (job != leader) in amdgpu_job_set_gang_leader()
325 job->gang_submit = fence; in amdgpu_job_set_gang_leader()
328 void amdgpu_job_free(struct amdgpu_job *job) in amdgpu_job_free() argument
330 if (job->base.entity) in amdgpu_job_free()
331 drm_sched_job_cleanup(&job->base); in amdgpu_job_free()
333 amdgpu_job_free_resources(job); in amdgpu_job_free()
334 amdgpu_sync_free(&job->explicit_sync); in amdgpu_job_free()
335 if (job->gang_submit != &job->base.s_fence->scheduled) in amdgpu_job_free()
336 dma_fence_put(job->gang_submit); in amdgpu_job_free()
338 if (job->hw_fence->base.ops) in amdgpu_job_free()
339 dma_fence_put(&job->hw_fence->base); in amdgpu_job_free()
341 kfree(job->hw_fence); in amdgpu_job_free()
342 if (job->hw_vm_fence->base.ops) in amdgpu_job_free()
343 dma_fence_put(&job->hw_vm_fence->base); in amdgpu_job_free()
345 kfree(job->hw_vm_fence); in amdgpu_job_free()
347 kfree(job); in amdgpu_job_free()
350 struct dma_fence *amdgpu_job_submit(struct amdgpu_job *job) in amdgpu_job_submit() argument
354 drm_sched_job_arm(&job->base); in amdgpu_job_submit()
355 f = dma_fence_get(&job->base.s_fence->finished); in amdgpu_job_submit()
356 amdgpu_job_free_resources(job); in amdgpu_job_submit()
357 drm_sched_entity_push_job(&job->base); in amdgpu_job_submit()
362 int amdgpu_job_submit_direct(struct amdgpu_job *job, struct amdgpu_ring *ring, in amdgpu_job_submit_direct() argument
367 job->base.sched = &ring->sched; in amdgpu_job_submit_direct()
368 r = amdgpu_ib_schedule(ring, job->num_ibs, job->ibs, job, fence); in amdgpu_job_submit_direct()
373 amdgpu_job_free(job); in amdgpu_job_submit_direct()
382 struct amdgpu_job *job = to_amdgpu_job(sched_job); in amdgpu_job_prepare_job() local
390 if (job->gang_submit) { in amdgpu_job_prepare_job()
391 fence = amdgpu_device_switch_gang(ring->adev, job->gang_submit); in amdgpu_job_prepare_job()
396 fence = amdgpu_device_enforce_isolation(ring->adev, ring, job); in amdgpu_job_prepare_job()
400 if (job->vm && !job->vmid) { in amdgpu_job_prepare_job()
401 r = amdgpu_vmid_grab(job->vm, ring, job, &fence); in amdgpu_job_prepare_job()
412 dma_fence_set_error(&job->base.s_fence->finished, r); in amdgpu_job_prepare_job()
421 struct amdgpu_job *job; in amdgpu_job_run() local
424 job = to_amdgpu_job(sched_job); in amdgpu_job_run()
425 finished = &job->base.s_fence->finished; in amdgpu_job_run()
427 trace_amdgpu_sched_run_job(job); in amdgpu_job_run()
429 /* Skip job if VRAM is lost and never resubmit gangs */ in amdgpu_job_run()
430 if (job->generation != amdgpu_vm_generation(adev, job->vm) || in amdgpu_job_run()
431 (job->job_run_counter && job->gang_submit)) in amdgpu_job_run()
438 r = amdgpu_ib_schedule(ring, job->num_ibs, job->ibs, job, in amdgpu_job_run()
446 job->job_run_counter++; in amdgpu_job_run()
447 amdgpu_job_free_resources(job); in amdgpu_job_run()