Lines Matching refs:job
37 struct amdgpu_job *job) in amdgpu_job_do_core_dump() argument
48 amdgpu_coredump(adev, true, false, job); in amdgpu_job_do_core_dump()
52 struct amdgpu_job *job) in amdgpu_job_core_dump() argument
80 amdgpu_job_do_core_dump(tmp_adev, job); in amdgpu_job_core_dump()
91 struct amdgpu_job *job = to_amdgpu_job(s_job); in amdgpu_job_timedout() local
112 amdgpu_job_core_dump(adev, job); in amdgpu_job_timedout()
116 amdgpu_ring_soft_recovery(ring, job->vmid, s_job->s_fence->parent)) { in amdgpu_job_timedout()
123 job->base.sched->name, atomic_read(&ring->fence_drv.last_seq), in amdgpu_job_timedout()
126 ti = amdgpu_vm_get_task_info_pasid(ring->adev, job->pasid); in amdgpu_job_timedout()
140 r = amdgpu_ring_reset(ring, job->vmid, &job->hw_fence); in amdgpu_job_timedout()
169 r = amdgpu_device_gpu_recover(ring->adev, job, &reset_context); in amdgpu_job_timedout()
186 unsigned int num_ibs, struct amdgpu_job **job, in amdgpu_job_alloc() argument
192 *job = kzalloc(struct_size(*job, ibs, num_ibs), GFP_KERNEL); in amdgpu_job_alloc()
193 if (!*job) in amdgpu_job_alloc()
196 (*job)->vm = vm; in amdgpu_job_alloc()
198 amdgpu_sync_create(&(*job)->explicit_sync); in amdgpu_job_alloc()
199 (*job)->generation = amdgpu_vm_generation(adev, vm); in amdgpu_job_alloc()
200 (*job)->vm_pd_addr = AMDGPU_BO_INVALID_OFFSET; in amdgpu_job_alloc()
205 return drm_sched_job_init(&(*job)->base, entity, 1, owner, in amdgpu_job_alloc()
212 struct amdgpu_job **job) in amdgpu_job_alloc_with_ib() argument
216 r = amdgpu_job_alloc(adev, NULL, entity, owner, 1, job, 0); in amdgpu_job_alloc_with_ib()
220 (*job)->num_ibs = 1; in amdgpu_job_alloc_with_ib()
221 r = amdgpu_ib_get(adev, NULL, size, pool_type, &(*job)->ibs[0]); in amdgpu_job_alloc_with_ib()
224 drm_sched_job_cleanup(&(*job)->base); in amdgpu_job_alloc_with_ib()
225 kfree(*job); in amdgpu_job_alloc_with_ib()
231 void amdgpu_job_set_resources(struct amdgpu_job *job, struct amdgpu_bo *gds, in amdgpu_job_set_resources() argument
235 job->gds_base = amdgpu_bo_gpu_offset(gds) >> PAGE_SHIFT; in amdgpu_job_set_resources()
236 job->gds_size = amdgpu_bo_size(gds) >> PAGE_SHIFT; in amdgpu_job_set_resources()
239 job->gws_base = amdgpu_bo_gpu_offset(gws) >> PAGE_SHIFT; in amdgpu_job_set_resources()
240 job->gws_size = amdgpu_bo_size(gws) >> PAGE_SHIFT; in amdgpu_job_set_resources()
243 job->oa_base = amdgpu_bo_gpu_offset(oa) >> PAGE_SHIFT; in amdgpu_job_set_resources()
244 job->oa_size = amdgpu_bo_size(oa) >> PAGE_SHIFT; in amdgpu_job_set_resources()
248 void amdgpu_job_free_resources(struct amdgpu_job *job) in amdgpu_job_free_resources() argument
254 if (job->base.s_fence && job->base.s_fence->finished.ops) in amdgpu_job_free_resources()
255 f = &job->base.s_fence->finished; in amdgpu_job_free_resources()
256 else if (job->hw_fence.base.ops) in amdgpu_job_free_resources()
257 f = &job->hw_fence.base; in amdgpu_job_free_resources()
261 for (i = 0; i < job->num_ibs; ++i) in amdgpu_job_free_resources()
262 amdgpu_ib_free(&job->ibs[i], f); in amdgpu_job_free_resources()
267 struct amdgpu_job *job = to_amdgpu_job(s_job); in amdgpu_job_free_cb() local
271 amdgpu_sync_free(&job->explicit_sync); in amdgpu_job_free_cb()
274 if (!job->hw_fence.base.ops) in amdgpu_job_free_cb()
275 kfree(job); in amdgpu_job_free_cb()
277 dma_fence_put(&job->hw_fence.base); in amdgpu_job_free_cb()
280 void amdgpu_job_set_gang_leader(struct amdgpu_job *job, in amdgpu_job_set_gang_leader() argument
285 WARN_ON(job->gang_submit); in amdgpu_job_set_gang_leader()
291 if (job != leader) in amdgpu_job_set_gang_leader()
293 job->gang_submit = fence; in amdgpu_job_set_gang_leader()
296 void amdgpu_job_free(struct amdgpu_job *job) in amdgpu_job_free() argument
298 if (job->base.entity) in amdgpu_job_free()
299 drm_sched_job_cleanup(&job->base); in amdgpu_job_free()
301 amdgpu_job_free_resources(job); in amdgpu_job_free()
302 amdgpu_sync_free(&job->explicit_sync); in amdgpu_job_free()
303 if (job->gang_submit != &job->base.s_fence->scheduled) in amdgpu_job_free()
304 dma_fence_put(job->gang_submit); in amdgpu_job_free()
306 if (!job->hw_fence.base.ops) in amdgpu_job_free()
307 kfree(job); in amdgpu_job_free()
309 dma_fence_put(&job->hw_fence.base); in amdgpu_job_free()
312 struct dma_fence *amdgpu_job_submit(struct amdgpu_job *job) in amdgpu_job_submit() argument
316 drm_sched_job_arm(&job->base); in amdgpu_job_submit()
317 f = dma_fence_get(&job->base.s_fence->finished); in amdgpu_job_submit()
318 amdgpu_job_free_resources(job); in amdgpu_job_submit()
319 drm_sched_entity_push_job(&job->base); in amdgpu_job_submit()
324 int amdgpu_job_submit_direct(struct amdgpu_job *job, struct amdgpu_ring *ring, in amdgpu_job_submit_direct() argument
329 job->base.sched = &ring->sched; in amdgpu_job_submit_direct()
330 r = amdgpu_ib_schedule(ring, job->num_ibs, job->ibs, job, fence); in amdgpu_job_submit_direct()
335 amdgpu_job_free(job); in amdgpu_job_submit_direct()
344 struct amdgpu_job *job = to_amdgpu_job(sched_job); in amdgpu_job_prepare_job() local
352 if (job->gang_submit) { in amdgpu_job_prepare_job()
353 fence = amdgpu_device_switch_gang(ring->adev, job->gang_submit); in amdgpu_job_prepare_job()
358 fence = amdgpu_device_enforce_isolation(ring->adev, ring, job); in amdgpu_job_prepare_job()
362 if (job->vm && !job->vmid) { in amdgpu_job_prepare_job()
363 r = amdgpu_vmid_grab(job->vm, ring, job, &fence); in amdgpu_job_prepare_job()
374 dma_fence_set_error(&job->base.s_fence->finished, r); in amdgpu_job_prepare_job()
383 struct amdgpu_job *job; in amdgpu_job_run() local
386 job = to_amdgpu_job(sched_job); in amdgpu_job_run()
387 finished = &job->base.s_fence->finished; in amdgpu_job_run()
389 trace_amdgpu_sched_run_job(job); in amdgpu_job_run()
392 if (job->generation != amdgpu_vm_generation(adev, job->vm) || in amdgpu_job_run()
393 (job->job_run_counter && job->gang_submit)) in amdgpu_job_run()
400 r = amdgpu_ib_schedule(ring, job->num_ibs, job->ibs, job, in amdgpu_job_run()
408 job->job_run_counter++; in amdgpu_job_run()
409 amdgpu_job_free_resources(job); in amdgpu_job_run()