10856cab1SChristian König /*
20856cab1SChristian König * Copyright 2015 Advanced Micro Devices, Inc.
30856cab1SChristian König *
40856cab1SChristian König * Permission is hereby granted, free of charge, to any person obtaining a
50856cab1SChristian König * copy of this software and associated documentation files (the "Software"),
60856cab1SChristian König * to deal in the Software without restriction, including without limitation
70856cab1SChristian König * the rights to use, copy, modify, merge, publish, distribute, sublicense,
80856cab1SChristian König * and/or sell copies of the Software, and to permit persons to whom the
90856cab1SChristian König * Software is furnished to do so, subject to the following conditions:
100856cab1SChristian König *
110856cab1SChristian König * The above copyright notice and this permission notice shall be included in
120856cab1SChristian König * all copies or substantial portions of the Software.
130856cab1SChristian König *
140856cab1SChristian König * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
150856cab1SChristian König * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
160856cab1SChristian König * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
170856cab1SChristian König * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
180856cab1SChristian König * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
190856cab1SChristian König * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
200856cab1SChristian König * OTHER DEALINGS IN THE SOFTWARE.
210856cab1SChristian König *
220856cab1SChristian König *
230856cab1SChristian König */
240856cab1SChristian König #include <linux/kthread.h>
250856cab1SChristian König #include <linux/wait.h>
260856cab1SChristian König #include <linux/sched.h>
27fdf2f6c5SSam Ravnborg
28ca4e1724SAndrey Grodzovsky #include <drm/drm_drv.h>
29ca4e1724SAndrey Grodzovsky
300856cab1SChristian König #include "amdgpu.h"
310856cab1SChristian König #include "amdgpu_trace.h"
32f1549c09SLikun Gao #include "amdgpu_reset.h"
33c67db6a6STrigger Huang #include "amdgpu_dev_coredump.h"
34c67db6a6STrigger Huang #include "amdgpu_xgmi.h"
35c67db6a6STrigger Huang
amdgpu_job_do_core_dump(struct amdgpu_device * adev,struct amdgpu_job * job)36c67db6a6STrigger Huang static void amdgpu_job_do_core_dump(struct amdgpu_device *adev,
37c67db6a6STrigger Huang struct amdgpu_job *job)
38c67db6a6STrigger Huang {
39c67db6a6STrigger Huang int i;
40c67db6a6STrigger Huang
41c67db6a6STrigger Huang dev_info(adev->dev, "Dumping IP State\n");
4230e8f4c2SSunil Khatri for (i = 0; i < adev->num_ip_blocks; i++)
43c67db6a6STrigger Huang if (adev->ip_blocks[i].version->funcs->dump_ip_state)
44c67db6a6STrigger Huang adev->ip_blocks[i].version->funcs
45c67db6a6STrigger Huang ->dump_ip_state((void *)adev);
46c67db6a6STrigger Huang dev_info(adev->dev, "Dumping IP State Completed\n");
47c67db6a6STrigger Huang
48c67db6a6STrigger Huang amdgpu_coredump(adev, true, false, job);
49c67db6a6STrigger Huang }
50c67db6a6STrigger Huang
amdgpu_job_core_dump(struct amdgpu_device * adev,struct amdgpu_job * job)51c67db6a6STrigger Huang static void amdgpu_job_core_dump(struct amdgpu_device *adev,
52c67db6a6STrigger Huang struct amdgpu_job *job)
53c67db6a6STrigger Huang {
54c67db6a6STrigger Huang struct list_head device_list, *device_list_handle = NULL;
55c67db6a6STrigger Huang struct amdgpu_device *tmp_adev = NULL;
56c67db6a6STrigger Huang struct amdgpu_hive_info *hive = NULL;
57c67db6a6STrigger Huang
58c67db6a6STrigger Huang if (!amdgpu_sriov_vf(adev))
59c67db6a6STrigger Huang hive = amdgpu_get_xgmi_hive(adev);
60c67db6a6STrigger Huang if (hive)
61c67db6a6STrigger Huang mutex_lock(&hive->hive_lock);
62c67db6a6STrigger Huang /*
63c67db6a6STrigger Huang * Reuse the logic in amdgpu_device_gpu_recover() to build list of
64c67db6a6STrigger Huang * devices for code dump
65c67db6a6STrigger Huang */
66c67db6a6STrigger Huang INIT_LIST_HEAD(&device_list);
67c67db6a6STrigger Huang if (!amdgpu_sriov_vf(adev) && (adev->gmc.xgmi.num_physical_nodes > 1) && hive) {
68c67db6a6STrigger Huang list_for_each_entry(tmp_adev, &hive->device_list, gmc.xgmi.head)
69c67db6a6STrigger Huang list_add_tail(&tmp_adev->reset_list, &device_list);
70c67db6a6STrigger Huang if (!list_is_first(&adev->reset_list, &device_list))
71c67db6a6STrigger Huang list_rotate_to_front(&adev->reset_list, &device_list);
72c67db6a6STrigger Huang device_list_handle = &device_list;
73c67db6a6STrigger Huang } else {
74c67db6a6STrigger Huang list_add_tail(&adev->reset_list, &device_list);
75c67db6a6STrigger Huang device_list_handle = &device_list;
76c67db6a6STrigger Huang }
77c67db6a6STrigger Huang
78c67db6a6STrigger Huang /* Do the coredump for each device */
79c67db6a6STrigger Huang list_for_each_entry(tmp_adev, device_list_handle, reset_list)
80c67db6a6STrigger Huang amdgpu_job_do_core_dump(tmp_adev, job);
81c67db6a6STrigger Huang
82c67db6a6STrigger Huang if (hive) {
83c67db6a6STrigger Huang mutex_unlock(&hive->hive_lock);
84c67db6a6STrigger Huang amdgpu_put_xgmi_hive(hive);
85c67db6a6STrigger Huang }
86c67db6a6STrigger Huang }
870856cab1SChristian König
amdgpu_job_timedout(struct drm_sched_job * s_job)88a6a1f036SLuben Tuikov static enum drm_gpu_sched_stat amdgpu_job_timedout(struct drm_sched_job *s_job)
890de2479cSMonk Liu {
903320b8d2SChristian König struct amdgpu_ring *ring = to_amdgpu_ring(s_job->sched);
913320b8d2SChristian König struct amdgpu_job *job = to_amdgpu_job(s_job);
92b8f67b9dSShashank Sharma struct amdgpu_task_info *ti;
9395a2f917SYintian Tao struct amdgpu_device *adev = ring->adev;
94ca4e1724SAndrey Grodzovsky int idx;
957258fa31SSurbhi Kakarya int r;
96ca4e1724SAndrey Grodzovsky
97c58a863bSGuchun Chen if (!drm_dev_enter(adev_to_drm(adev), &idx)) {
987d570f56SAlex Deucher dev_info(adev->dev, "%s - device unplugged skipping recovery on scheduler:%s",
99ca4e1724SAndrey Grodzovsky __func__, s_job->sched->name);
100ca4e1724SAndrey Grodzovsky
101ca4e1724SAndrey Grodzovsky /* Effectively the job is aborted as the device is gone */
102ca4e1724SAndrey Grodzovsky return DRM_GPU_SCHED_STAT_ENODEV;
103ca4e1724SAndrey Grodzovsky }
1040346bfd9STrigger Huang
105194eb174SVictor Zhao adev->job_hang = true;
1060e51a772SChristian König
107c67db6a6STrigger Huang /*
108c67db6a6STrigger Huang * Do the coredump immediately after a job timeout to get a very
109c67db6a6STrigger Huang * close dump/snapshot/representation of GPU's current error status
110*e1d27f7aSZhenGuo Yin * Skip it for SRIOV, since VF FLR will be triggered by host driver
111*e1d27f7aSZhenGuo Yin * before job timeout
112c67db6a6STrigger Huang */
113*e1d27f7aSZhenGuo Yin if (!amdgpu_sriov_vf(adev))
114c67db6a6STrigger Huang amdgpu_job_core_dump(adev, job);
115c67db6a6STrigger Huang
116cc063ea2SMarek Olšák if (amdgpu_gpu_recovery &&
117cc063ea2SMarek Olšák amdgpu_ring_soft_recovery(ring, job->vmid, s_job->s_fence->parent)) {
1187d570f56SAlex Deucher dev_err(adev->dev, "ring %s timeout, but soft recovered\n",
1197876fa4fSChristian König s_job->sched->name);
120ca4e1724SAndrey Grodzovsky goto exit;
1217876fa4fSChristian König }
1227876fa4fSChristian König
1237d570f56SAlex Deucher dev_err(adev->dev, "ring %s timeout, signaled seq=%u, emitted seq=%u\n",
1243320b8d2SChristian König job->base.sched->name, atomic_read(&ring->fence_drv.last_seq),
1253320b8d2SChristian König ring->fence_drv.sync_seq);
126b8f67b9dSShashank Sharma
127b8f67b9dSShashank Sharma ti = amdgpu_vm_get_task_info_pasid(ring->adev, job->pasid);
128b8f67b9dSShashank Sharma if (ti) {
1297d570f56SAlex Deucher dev_err(adev->dev,
1307d570f56SAlex Deucher "Process information: process %s pid %d thread %s pid %d\n",
131b8f67b9dSShashank Sharma ti->process_name, ti->tgid, ti->task_name, ti->pid);
132b8f67b9dSShashank Sharma amdgpu_vm_put_task_info(ti);
133b8f67b9dSShashank Sharma }
1344fbf87e2SMonk Liu
1357a66ad6cSZhenGuo Yin dma_fence_set_error(&s_job->s_fence->finished, -ETIME);
1367a66ad6cSZhenGuo Yin
13715789fa0SAlex Deucher /* attempt a per ring reset */
13815789fa0SAlex Deucher if (amdgpu_gpu_recovery &&
13915789fa0SAlex Deucher ring->funcs->reset) {
14015789fa0SAlex Deucher /* stop the scheduler, but don't mess with the
14115789fa0SAlex Deucher * bad job yet because if ring reset fails
14215789fa0SAlex Deucher * we'll fall back to full GPU reset.
14315789fa0SAlex Deucher */
14415789fa0SAlex Deucher drm_sched_wqueue_stop(&ring->sched);
14515789fa0SAlex Deucher r = amdgpu_ring_reset(ring, job->vmid);
14615789fa0SAlex Deucher if (!r) {
14715789fa0SAlex Deucher if (amdgpu_ring_sched_ready(ring))
14815789fa0SAlex Deucher drm_sched_stop(&ring->sched, s_job);
149fb0a5834SPrike Liang atomic_inc(&ring->adev->gpu_reset_counter);
15015789fa0SAlex Deucher amdgpu_fence_driver_force_completion(ring);
15115789fa0SAlex Deucher if (amdgpu_ring_sched_ready(ring))
152e55ef655SDaniel Vetter drm_sched_start(&ring->sched);
15315789fa0SAlex Deucher goto exit;
15415789fa0SAlex Deucher }
15515789fa0SAlex Deucher }
15615789fa0SAlex Deucher
15795a2f917SYintian Tao if (amdgpu_device_should_recover_gpu(ring->adev)) {
158f1549c09SLikun Gao struct amdgpu_reset_context reset_context;
159f1549c09SLikun Gao memset(&reset_context, 0, sizeof(reset_context));
160f1549c09SLikun Gao
161f1549c09SLikun Gao reset_context.method = AMD_RESET_METHOD_NONE;
162f1549c09SLikun Gao reset_context.reset_req_dev = adev;
163bac640ddSEric Huang reset_context.src = AMDGPU_RESET_SRC_JOB;
164f1549c09SLikun Gao clear_bit(AMDGPU_NEED_FULL_RESET, &reset_context.flags);
165f1549c09SLikun Gao
166c67db6a6STrigger Huang /*
167c67db6a6STrigger Huang * To avoid an unnecessary extra coredump, as we have already
168c67db6a6STrigger Huang * got the very close representation of GPU's error status
169c67db6a6STrigger Huang */
170c67db6a6STrigger Huang set_bit(AMDGPU_SKIP_COREDUMP, &reset_context.flags);
171c67db6a6STrigger Huang
172f1549c09SLikun Gao r = amdgpu_device_gpu_recover(ring->adev, job, &reset_context);
1737258fa31SSurbhi Kakarya if (r)
1747d570f56SAlex Deucher dev_err(adev->dev, "GPU Recovery Failed: %d\n", r);
17595a2f917SYintian Tao } else {
176c3b6c607SMonk Liu drm_sched_suspend_timeout(&ring->sched);
17795a2f917SYintian Tao if (amdgpu_sriov_vf(adev))
17895a2f917SYintian Tao adev->virt.tdr_debug = true;
17995a2f917SYintian Tao }
180ca4e1724SAndrey Grodzovsky
181ca4e1724SAndrey Grodzovsky exit:
182194eb174SVictor Zhao adev->job_hang = false;
183ca4e1724SAndrey Grodzovsky drm_dev_exit(idx);
184ca4e1724SAndrey Grodzovsky return DRM_GPU_SCHED_STAT_NOMINAL;
1850de2479cSMonk Liu }
1860de2479cSMonk Liu
amdgpu_job_alloc(struct amdgpu_device * adev,struct amdgpu_vm * vm,struct drm_sched_entity * entity,void * owner,unsigned int num_ibs,struct amdgpu_job ** job)187f7d66fb2SChristian König int amdgpu_job_alloc(struct amdgpu_device *adev, struct amdgpu_vm *vm,
188f7d66fb2SChristian König struct drm_sched_entity *entity, void *owner,
189f7d66fb2SChristian König unsigned int num_ibs, struct amdgpu_job **job)
1900856cab1SChristian König {
1910856cab1SChristian König if (num_ibs == 0)
1920856cab1SChristian König return -EINVAL;
1930856cab1SChristian König
1946103b2f2SChristian König *job = kzalloc(struct_size(*job, ibs, num_ibs), GFP_KERNEL);
1950856cab1SChristian König if (!*job)
1960856cab1SChristian König return -ENOMEM;
1970856cab1SChristian König
198a1917b73SChristian König /*
199a1917b73SChristian König * Initialize the scheduler to at least some ring so that we always
200a1917b73SChristian König * have a pointer to adev.
201a1917b73SChristian König */
202a1917b73SChristian König (*job)->base.sched = &adev->rings[0]->sched;
203c5637837SMonk Liu (*job)->vm = vm;
2040856cab1SChristian König
2051b2d5edaSChristian König amdgpu_sync_create(&(*job)->explicit_sync);
206f88e295eSChristian König (*job)->generation = amdgpu_vm_generation(adev, vm);
207d8de8260SAndrey Grodzovsky (*job)->vm_pd_addr = AMDGPU_BO_INVALID_OFFSET;
208e86f9ceeSChristian König
209f7d66fb2SChristian König if (!entity)
2100856cab1SChristian König return 0;
211f7d66fb2SChristian König
212a78422e9SDanilo Krummrich return drm_sched_job_init(&(*job)->base, entity, 1, owner);
2130856cab1SChristian König }
2140856cab1SChristian König
amdgpu_job_alloc_with_ib(struct amdgpu_device * adev,struct drm_sched_entity * entity,void * owner,size_t size,enum amdgpu_ib_pool_type pool_type,struct amdgpu_job ** job)215f7d66fb2SChristian König int amdgpu_job_alloc_with_ib(struct amdgpu_device *adev,
216f7d66fb2SChristian König struct drm_sched_entity *entity, void *owner,
217f7d66fb2SChristian König size_t size, enum amdgpu_ib_pool_type pool_type,
2180856cab1SChristian König struct amdgpu_job **job)
2190856cab1SChristian König {
2200856cab1SChristian König int r;
2210856cab1SChristian König
222f7d66fb2SChristian König r = amdgpu_job_alloc(adev, NULL, entity, owner, 1, job);
2230856cab1SChristian König if (r)
2240856cab1SChristian König return r;
2250856cab1SChristian König
2264624459cSChristian König (*job)->num_ibs = 1;
227c8e42d57Sxinhui pan r = amdgpu_ib_get(adev, NULL, size, pool_type, &(*job)->ibs[0]);
228f7d66fb2SChristian König if (r) {
229f7d66fb2SChristian König if (entity)
230f7d66fb2SChristian König drm_sched_job_cleanup(&(*job)->base);
2310856cab1SChristian König kfree(*job);
232f7d66fb2SChristian König }
2330856cab1SChristian König
2340856cab1SChristian König return r;
2350856cab1SChristian König }
2360856cab1SChristian König
amdgpu_job_set_resources(struct amdgpu_job * job,struct amdgpu_bo * gds,struct amdgpu_bo * gws,struct amdgpu_bo * oa)237736ec9faSChristian König void amdgpu_job_set_resources(struct amdgpu_job *job, struct amdgpu_bo *gds,
238736ec9faSChristian König struct amdgpu_bo *gws, struct amdgpu_bo *oa)
239736ec9faSChristian König {
240736ec9faSChristian König if (gds) {
241736ec9faSChristian König job->gds_base = amdgpu_bo_gpu_offset(gds) >> PAGE_SHIFT;
242736ec9faSChristian König job->gds_size = amdgpu_bo_size(gds) >> PAGE_SHIFT;
243736ec9faSChristian König }
244736ec9faSChristian König if (gws) {
245736ec9faSChristian König job->gws_base = amdgpu_bo_gpu_offset(gws) >> PAGE_SHIFT;
246736ec9faSChristian König job->gws_size = amdgpu_bo_size(gws) >> PAGE_SHIFT;
247736ec9faSChristian König }
248736ec9faSChristian König if (oa) {
249736ec9faSChristian König job->oa_base = amdgpu_bo_gpu_offset(oa) >> PAGE_SHIFT;
250736ec9faSChristian König job->oa_size = amdgpu_bo_size(oa) >> PAGE_SHIFT;
251736ec9faSChristian König }
252736ec9faSChristian König }
253736ec9faSChristian König
amdgpu_job_free_resources(struct amdgpu_job * job)254a5fb4ec2SChristian König void amdgpu_job_free_resources(struct amdgpu_job *job)
2550856cab1SChristian König {
256a1917b73SChristian König struct amdgpu_ring *ring = to_amdgpu_ring(job->base.sched);
257f54d1867SChris Wilson struct dma_fence *f;
2581ab0d211SChristian König unsigned i;
2591ab0d211SChristian König
26074ea8e78SChristian König /* Check if any fences where initialized */
26174ea8e78SChristian König if (job->base.s_fence && job->base.s_fence->finished.ops)
26274ea8e78SChristian König f = &job->base.s_fence->finished;
26374ea8e78SChristian König else if (job->hw_fence.ops)
26474ea8e78SChristian König f = &job->hw_fence;
26574ea8e78SChristian König else
26674ea8e78SChristian König f = NULL;
26774ea8e78SChristian König
2680856cab1SChristian König for (i = 0; i < job->num_ibs; ++i)
269a1917b73SChristian König amdgpu_ib_free(ring->adev, &job->ibs[i], f);
2700856cab1SChristian König }
2710856cab1SChristian König
amdgpu_job_free_cb(struct drm_sched_job * s_job)2721b1f42d8SLucas Stach static void amdgpu_job_free_cb(struct drm_sched_job *s_job)
273b6723c8dSMonk Liu {
2743320b8d2SChristian König struct amdgpu_job *job = to_amdgpu_job(s_job);
275c5f74f78SChristian König
27626efecf9SSharat Masetty drm_sched_job_cleanup(s_job);
27726efecf9SSharat Masetty
2781b2d5edaSChristian König amdgpu_sync_free(&job->explicit_sync);
279c530b02fSJack Zhang
2803cb93f39SStanley.Yang /* only put the hw fence if has embedded fence */
2813cb93f39SStanley.Yang if (!job->hw_fence.ops)
2823cb93f39SStanley.Yang kfree(job);
2833cb93f39SStanley.Yang else
284c530b02fSJack Zhang dma_fence_put(&job->hw_fence);
285b6723c8dSMonk Liu }
286b6723c8dSMonk Liu
amdgpu_job_set_gang_leader(struct amdgpu_job * job,struct amdgpu_job * leader)28768ce8b24SChristian König void amdgpu_job_set_gang_leader(struct amdgpu_job *job,
28868ce8b24SChristian König struct amdgpu_job *leader)
28968ce8b24SChristian König {
29068ce8b24SChristian König struct dma_fence *fence = &leader->base.s_fence->scheduled;
29168ce8b24SChristian König
29268ce8b24SChristian König WARN_ON(job->gang_submit);
29368ce8b24SChristian König
29468ce8b24SChristian König /*
29568ce8b24SChristian König * Don't add a reference when we are the gang leader to avoid circle
29668ce8b24SChristian König * dependency.
29768ce8b24SChristian König */
29868ce8b24SChristian König if (job != leader)
29968ce8b24SChristian König dma_fence_get(fence);
30068ce8b24SChristian König job->gang_submit = fence;
30168ce8b24SChristian König }
30268ce8b24SChristian König
amdgpu_job_free(struct amdgpu_job * job)3031e24e31fSChristian König void amdgpu_job_free(struct amdgpu_job *job)
3041e24e31fSChristian König {
305f7d66fb2SChristian König if (job->base.entity)
306f7d66fb2SChristian König drm_sched_job_cleanup(&job->base);
307f7d66fb2SChristian König
3081e24e31fSChristian König amdgpu_job_free_resources(job);
3091b2d5edaSChristian König amdgpu_sync_free(&job->explicit_sync);
31068ce8b24SChristian König if (job->gang_submit != &job->base.s_fence->scheduled)
31168ce8b24SChristian König dma_fence_put(job->gang_submit);
312c530b02fSJack Zhang
3132581c5d8SYuBiao Wang if (!job->hw_fence.ops)
3142581c5d8SYuBiao Wang kfree(job);
3152581c5d8SYuBiao Wang else
316c530b02fSJack Zhang dma_fence_put(&job->hw_fence);
3171e24e31fSChristian König }
3181e24e31fSChristian König
amdgpu_job_submit(struct amdgpu_job * job)319f7d66fb2SChristian König struct dma_fence *amdgpu_job_submit(struct amdgpu_job *job)
3200856cab1SChristian König {
321f7d66fb2SChristian König struct dma_fence *f;
3220856cab1SChristian König
323dbe48d03SDaniel Vetter drm_sched_job_arm(&job->base);
324f7d66fb2SChristian König f = dma_fence_get(&job->base.s_fence->finished);
325a5fb4ec2SChristian König amdgpu_job_free_resources(job);
3260e10e9a1SDaniel Vetter drm_sched_entity_push_job(&job->base);
3270856cab1SChristian König
328f7d66fb2SChristian König return f;
3290856cab1SChristian König }
3300856cab1SChristian König
amdgpu_job_submit_direct(struct amdgpu_job * job,struct amdgpu_ring * ring,struct dma_fence ** fence)331ee913fd9SChristian König int amdgpu_job_submit_direct(struct amdgpu_job *job, struct amdgpu_ring *ring,
332ee913fd9SChristian König struct dma_fence **fence)
333ee913fd9SChristian König {
334ee913fd9SChristian König int r;
335ee913fd9SChristian König
336ee913fd9SChristian König job->base.sched = &ring->sched;
337f6a3f660SAndrey Grodzovsky r = amdgpu_ib_schedule(ring, job->num_ibs, job->ibs, job, fence);
338f6a3f660SAndrey Grodzovsky
339ee913fd9SChristian König if (r)
340ee913fd9SChristian König return r;
341ee913fd9SChristian König
342ee913fd9SChristian König amdgpu_job_free(job);
343ee913fd9SChristian König return 0;
344ee913fd9SChristian König }
345ee913fd9SChristian König
346940ca22bSChristian König static struct dma_fence *
amdgpu_job_prepare_job(struct drm_sched_job * sched_job,struct drm_sched_entity * s_entity)347a82f30b0SChristian König amdgpu_job_prepare_job(struct drm_sched_job *sched_job,
3481b1f42d8SLucas Stach struct drm_sched_entity *s_entity)
3490856cab1SChristian König {
350068c3304SNayan Deshmukh struct amdgpu_ring *ring = to_amdgpu_ring(s_entity->rq->sched);
3510856cab1SChristian König struct amdgpu_job *job = to_amdgpu_job(sched_job);
3521728baa7SChristian König struct dma_fence *fence = NULL;
353df83d1ebSChunming Zhou int r;
3540856cab1SChristian König
355e84e697dSChristian König r = drm_sched_entity_error(s_entity);
356829798c7SJoshua Ashton if (r)
357e84e697dSChristian König goto error;
358e84e697dSChristian König
359b09d6acbSChristian König if (!fence && job->gang_submit)
360b09d6acbSChristian König fence = amdgpu_device_switch_gang(ring->adev, job->gang_submit);
361b09d6acbSChristian König
362940ca22bSChristian König while (!fence && job->vm && !job->vmid) {
363940ca22bSChristian König r = amdgpu_vmid_grab(job->vm, ring, job, &fence);
364e84e697dSChristian König if (r) {
3657d570f56SAlex Deucher dev_err(ring->adev->dev, "Error getting VM ID (%d)\n", r);
366e84e697dSChristian König goto error;
367e84e697dSChristian König }
3680856cab1SChristian König }
3690856cab1SChristian König
3700856cab1SChristian König return fence;
371e84e697dSChristian König
372e84e697dSChristian König error:
373e84e697dSChristian König dma_fence_set_error(&job->base.s_fence->finished, r);
374e84e697dSChristian König return NULL;
3750856cab1SChristian König }
3760856cab1SChristian König
amdgpu_job_run(struct drm_sched_job * sched_job)3771b1f42d8SLucas Stach static struct dma_fence *amdgpu_job_run(struct drm_sched_job *sched_job)
3780856cab1SChristian König {
3793320b8d2SChristian König struct amdgpu_ring *ring = to_amdgpu_ring(sched_job->sched);
38068ce8b24SChristian König struct amdgpu_device *adev = ring->adev;
38148f05f29SMonk Liu struct dma_fence *fence = NULL, *finished;
3820856cab1SChristian König struct amdgpu_job *job;
383db5e65fcSAndrey Grodzovsky int r = 0;
3840856cab1SChristian König
3850856cab1SChristian König job = to_amdgpu_job(sched_job);
38648f05f29SMonk Liu finished = &job->base.s_fence->finished;
387e86f9ceeSChristian König
3880856cab1SChristian König trace_amdgpu_sched_run_job(job);
38948f05f29SMonk Liu
39068ce8b24SChristian König /* Skip job if VRAM is lost and never resubmit gangs */
391f88e295eSChristian König if (job->generation != amdgpu_vm_generation(adev, job->vm) ||
39268ce8b24SChristian König (job->job_run_counter && job->gang_submit))
39368ce8b24SChristian König dma_fence_set_error(finished, -ECANCELED);
394f1403342SChristian König
395f1403342SChristian König if (finished->error < 0) {
396cd409dbcSLijo Lazar dev_dbg(adev->dev, "Skip scheduling IBs in ring(%s)",
397cd409dbcSLijo Lazar ring->name);
398f1403342SChristian König } else {
3993320b8d2SChristian König r = amdgpu_ib_schedule(ring, job->num_ibs, job->ibs, job,
40014e47f93SChristian König &fence);
40122a77cf6SChristian König if (r)
402cd409dbcSLijo Lazar dev_err(adev->dev,
403cd409dbcSLijo Lazar "Error scheduling IBs (%d) in ring(%s)", r,
404cd409dbcSLijo Lazar ring->name);
40515d73ce6SChunming Zhou }
406b2ff0e8aSAndres Rodriguez
407c530b02fSJack Zhang job->job_run_counter++;
40822a77cf6SChristian König amdgpu_job_free_resources(job);
409db5e65fcSAndrey Grodzovsky
410db5e65fcSAndrey Grodzovsky fence = r ? ERR_PTR(r) : fence;
4110856cab1SChristian König return fence;
4120856cab1SChristian König }
4130856cab1SChristian König
4147c6e68c7SAndrey Grodzovsky #define to_drm_sched_job(sched_job) \
4157c6e68c7SAndrey Grodzovsky container_of((sched_job), struct drm_sched_job, queue_node)
4167c6e68c7SAndrey Grodzovsky
amdgpu_job_stop_all_jobs_on_sched(struct drm_gpu_scheduler * sched)4177c6e68c7SAndrey Grodzovsky void amdgpu_job_stop_all_jobs_on_sched(struct drm_gpu_scheduler *sched)
4187c6e68c7SAndrey Grodzovsky {
4197c6e68c7SAndrey Grodzovsky struct drm_sched_job *s_job;
4207c6e68c7SAndrey Grodzovsky struct drm_sched_entity *s_entity = NULL;
4217c6e68c7SAndrey Grodzovsky int i;
4227c6e68c7SAndrey Grodzovsky
4237c6e68c7SAndrey Grodzovsky /* Signal all jobs not yet scheduled */
42438f922a5SLuben Tuikov for (i = DRM_SCHED_PRIORITY_KERNEL; i < sched->num_rqs; i++) {
42556e44960SLuben Tuikov struct drm_sched_rq *rq = sched->sched_rq[i];
4267c6e68c7SAndrey Grodzovsky spin_lock(&rq->lock);
4277c6e68c7SAndrey Grodzovsky list_for_each_entry(s_entity, &rq->entities, list) {
4287c6e68c7SAndrey Grodzovsky while ((s_job = to_drm_sched_job(spsc_queue_pop(&s_entity->job_queue)))) {
4297c6e68c7SAndrey Grodzovsky struct drm_sched_fence *s_fence = s_job->s_fence;
4307c6e68c7SAndrey Grodzovsky
4317c6e68c7SAndrey Grodzovsky dma_fence_signal(&s_fence->scheduled);
4327c6e68c7SAndrey Grodzovsky dma_fence_set_error(&s_fence->finished, -EHWPOISON);
4337c6e68c7SAndrey Grodzovsky dma_fence_signal(&s_fence->finished);
4347c6e68c7SAndrey Grodzovsky }
4357c6e68c7SAndrey Grodzovsky }
4367c6e68c7SAndrey Grodzovsky spin_unlock(&rq->lock);
4377c6e68c7SAndrey Grodzovsky }
4387c6e68c7SAndrey Grodzovsky
4397c6e68c7SAndrey Grodzovsky /* Signal all jobs already scheduled to HW */
4406efa4b46SLuben Tuikov list_for_each_entry(s_job, &sched->pending_list, list) {
4417c6e68c7SAndrey Grodzovsky struct drm_sched_fence *s_fence = s_job->s_fence;
4427c6e68c7SAndrey Grodzovsky
4437c6e68c7SAndrey Grodzovsky dma_fence_set_error(&s_fence->finished, -EHWPOISON);
4447c6e68c7SAndrey Grodzovsky dma_fence_signal(&s_fence->finished);
4457c6e68c7SAndrey Grodzovsky }
4467c6e68c7SAndrey Grodzovsky }
4477c6e68c7SAndrey Grodzovsky
4481b1f42d8SLucas Stach const struct drm_sched_backend_ops amdgpu_sched_ops = {
449a82f30b0SChristian König .prepare_job = amdgpu_job_prepare_job,
4500856cab1SChristian König .run_job = amdgpu_job_run,
4510e51a772SChristian König .timedout_job = amdgpu_job_timedout,
452c5f74f78SChristian König .free_job = amdgpu_job_free_cb
4530856cab1SChristian König };
454