1 /* 2 * Copyright 2015 Advanced Micro Devices, Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 * 23 */ 24 #include <linux/kthread.h> 25 #include <linux/wait.h> 26 #include <linux/sched.h> 27 28 #include <drm/drm_drv.h> 29 30 #include "amdgpu.h" 31 #include "amdgpu_trace.h" 32 #include "amdgpu_reset.h" 33 34 static enum drm_gpu_sched_stat amdgpu_job_timedout(struct drm_sched_job *s_job) 35 { 36 struct amdgpu_ring *ring = to_amdgpu_ring(s_job->sched); 37 struct amdgpu_job *job = to_amdgpu_job(s_job); 38 struct amdgpu_task_info *ti; 39 struct amdgpu_device *adev = ring->adev; 40 int idx; 41 int r; 42 43 if (!drm_dev_enter(adev_to_drm(adev), &idx)) { 44 dev_info(adev->dev, "%s - device unplugged skipping recovery on scheduler:%s", 45 __func__, s_job->sched->name); 46 47 /* Effectively the job is aborted as the device is gone */ 48 return DRM_GPU_SCHED_STAT_ENODEV; 49 } 50 51 52 adev->job_hang = true; 53 54 if (amdgpu_gpu_recovery && 55 amdgpu_ring_soft_recovery(ring, job->vmid, s_job->s_fence->parent)) { 56 dev_err(adev->dev, "ring %s timeout, but soft recovered\n", 57 s_job->sched->name); 58 goto exit; 59 } 60 61 dev_err(adev->dev, "ring %s timeout, signaled seq=%u, emitted seq=%u\n", 62 job->base.sched->name, atomic_read(&ring->fence_drv.last_seq), 63 ring->fence_drv.sync_seq); 64 65 ti = amdgpu_vm_get_task_info_pasid(ring->adev, job->pasid); 66 if (ti) { 67 dev_err(adev->dev, 68 "Process information: process %s pid %d thread %s pid %d\n", 69 ti->process_name, ti->tgid, ti->task_name, ti->pid); 70 amdgpu_vm_put_task_info(ti); 71 } 72 73 dma_fence_set_error(&s_job->s_fence->finished, -ETIME); 74 75 if (amdgpu_device_should_recover_gpu(ring->adev)) { 76 struct amdgpu_reset_context reset_context; 77 memset(&reset_context, 0, sizeof(reset_context)); 78 79 reset_context.method = AMD_RESET_METHOD_NONE; 80 reset_context.reset_req_dev = adev; 81 reset_context.src = AMDGPU_RESET_SRC_JOB; 82 clear_bit(AMDGPU_NEED_FULL_RESET, &reset_context.flags); 83 84 r = amdgpu_device_gpu_recover(ring->adev, job, &reset_context); 85 if (r) 86 dev_err(adev->dev, "GPU Recovery Failed: %d\n", r); 87 } else { 88 drm_sched_suspend_timeout(&ring->sched); 89 if (amdgpu_sriov_vf(adev)) 90 adev->virt.tdr_debug = true; 91 } 92 93 exit: 94 adev->job_hang = false; 95 drm_dev_exit(idx); 96 return DRM_GPU_SCHED_STAT_NOMINAL; 97 } 98 99 int amdgpu_job_alloc(struct amdgpu_device *adev, struct amdgpu_vm *vm, 100 struct drm_sched_entity *entity, void *owner, 101 unsigned int num_ibs, struct amdgpu_job **job) 102 { 103 if (num_ibs == 0) 104 return -EINVAL; 105 106 *job = kzalloc(struct_size(*job, ibs, num_ibs), GFP_KERNEL); 107 if (!*job) 108 return -ENOMEM; 109 110 /* 111 * Initialize the scheduler to at least some ring so that we always 112 * have a pointer to adev. 113 */ 114 (*job)->base.sched = &adev->rings[0]->sched; 115 (*job)->vm = vm; 116 117 amdgpu_sync_create(&(*job)->explicit_sync); 118 (*job)->generation = amdgpu_vm_generation(adev, vm); 119 (*job)->vm_pd_addr = AMDGPU_BO_INVALID_OFFSET; 120 121 if (!entity) 122 return 0; 123 124 return drm_sched_job_init(&(*job)->base, entity, 1, owner); 125 } 126 127 int amdgpu_job_alloc_with_ib(struct amdgpu_device *adev, 128 struct drm_sched_entity *entity, void *owner, 129 size_t size, enum amdgpu_ib_pool_type pool_type, 130 struct amdgpu_job **job) 131 { 132 int r; 133 134 r = amdgpu_job_alloc(adev, NULL, entity, owner, 1, job); 135 if (r) 136 return r; 137 138 (*job)->num_ibs = 1; 139 r = amdgpu_ib_get(adev, NULL, size, pool_type, &(*job)->ibs[0]); 140 if (r) { 141 if (entity) 142 drm_sched_job_cleanup(&(*job)->base); 143 kfree(*job); 144 } 145 146 return r; 147 } 148 149 void amdgpu_job_set_resources(struct amdgpu_job *job, struct amdgpu_bo *gds, 150 struct amdgpu_bo *gws, struct amdgpu_bo *oa) 151 { 152 if (gds) { 153 job->gds_base = amdgpu_bo_gpu_offset(gds) >> PAGE_SHIFT; 154 job->gds_size = amdgpu_bo_size(gds) >> PAGE_SHIFT; 155 } 156 if (gws) { 157 job->gws_base = amdgpu_bo_gpu_offset(gws) >> PAGE_SHIFT; 158 job->gws_size = amdgpu_bo_size(gws) >> PAGE_SHIFT; 159 } 160 if (oa) { 161 job->oa_base = amdgpu_bo_gpu_offset(oa) >> PAGE_SHIFT; 162 job->oa_size = amdgpu_bo_size(oa) >> PAGE_SHIFT; 163 } 164 } 165 166 void amdgpu_job_free_resources(struct amdgpu_job *job) 167 { 168 struct amdgpu_ring *ring = to_amdgpu_ring(job->base.sched); 169 struct dma_fence *f; 170 unsigned i; 171 172 /* Check if any fences where initialized */ 173 if (job->base.s_fence && job->base.s_fence->finished.ops) 174 f = &job->base.s_fence->finished; 175 else if (job->hw_fence.ops) 176 f = &job->hw_fence; 177 else 178 f = NULL; 179 180 for (i = 0; i < job->num_ibs; ++i) 181 amdgpu_ib_free(ring->adev, &job->ibs[i], f); 182 } 183 184 static void amdgpu_job_free_cb(struct drm_sched_job *s_job) 185 { 186 struct amdgpu_job *job = to_amdgpu_job(s_job); 187 188 drm_sched_job_cleanup(s_job); 189 190 amdgpu_sync_free(&job->explicit_sync); 191 192 /* only put the hw fence if has embedded fence */ 193 if (!job->hw_fence.ops) 194 kfree(job); 195 else 196 dma_fence_put(&job->hw_fence); 197 } 198 199 void amdgpu_job_set_gang_leader(struct amdgpu_job *job, 200 struct amdgpu_job *leader) 201 { 202 struct dma_fence *fence = &leader->base.s_fence->scheduled; 203 204 WARN_ON(job->gang_submit); 205 206 /* 207 * Don't add a reference when we are the gang leader to avoid circle 208 * dependency. 209 */ 210 if (job != leader) 211 dma_fence_get(fence); 212 job->gang_submit = fence; 213 } 214 215 void amdgpu_job_free(struct amdgpu_job *job) 216 { 217 if (job->base.entity) 218 drm_sched_job_cleanup(&job->base); 219 220 amdgpu_job_free_resources(job); 221 amdgpu_sync_free(&job->explicit_sync); 222 if (job->gang_submit != &job->base.s_fence->scheduled) 223 dma_fence_put(job->gang_submit); 224 225 if (!job->hw_fence.ops) 226 kfree(job); 227 else 228 dma_fence_put(&job->hw_fence); 229 } 230 231 struct dma_fence *amdgpu_job_submit(struct amdgpu_job *job) 232 { 233 struct dma_fence *f; 234 235 drm_sched_job_arm(&job->base); 236 f = dma_fence_get(&job->base.s_fence->finished); 237 amdgpu_job_free_resources(job); 238 drm_sched_entity_push_job(&job->base); 239 240 return f; 241 } 242 243 int amdgpu_job_submit_direct(struct amdgpu_job *job, struct amdgpu_ring *ring, 244 struct dma_fence **fence) 245 { 246 int r; 247 248 job->base.sched = &ring->sched; 249 r = amdgpu_ib_schedule(ring, job->num_ibs, job->ibs, job, fence); 250 251 if (r) 252 return r; 253 254 amdgpu_job_free(job); 255 return 0; 256 } 257 258 static struct dma_fence * 259 amdgpu_job_prepare_job(struct drm_sched_job *sched_job, 260 struct drm_sched_entity *s_entity) 261 { 262 struct amdgpu_ring *ring = to_amdgpu_ring(s_entity->rq->sched); 263 struct amdgpu_job *job = to_amdgpu_job(sched_job); 264 struct dma_fence *fence = NULL; 265 int r; 266 267 r = drm_sched_entity_error(s_entity); 268 if (r) 269 goto error; 270 271 if (!fence && job->gang_submit) 272 fence = amdgpu_device_switch_gang(ring->adev, job->gang_submit); 273 274 while (!fence && job->vm && !job->vmid) { 275 r = amdgpu_vmid_grab(job->vm, ring, job, &fence); 276 if (r) { 277 dev_err(ring->adev->dev, "Error getting VM ID (%d)\n", r); 278 goto error; 279 } 280 } 281 282 return fence; 283 284 error: 285 dma_fence_set_error(&job->base.s_fence->finished, r); 286 return NULL; 287 } 288 289 static struct dma_fence *amdgpu_job_run(struct drm_sched_job *sched_job) 290 { 291 struct amdgpu_ring *ring = to_amdgpu_ring(sched_job->sched); 292 struct amdgpu_device *adev = ring->adev; 293 struct dma_fence *fence = NULL, *finished; 294 struct amdgpu_job *job; 295 int r = 0; 296 297 job = to_amdgpu_job(sched_job); 298 finished = &job->base.s_fence->finished; 299 300 trace_amdgpu_sched_run_job(job); 301 302 /* Skip job if VRAM is lost and never resubmit gangs */ 303 if (job->generation != amdgpu_vm_generation(adev, job->vm) || 304 (job->job_run_counter && job->gang_submit)) 305 dma_fence_set_error(finished, -ECANCELED); 306 307 if (finished->error < 0) { 308 dev_dbg(adev->dev, "Skip scheduling IBs in ring(%s)", 309 ring->name); 310 } else { 311 r = amdgpu_ib_schedule(ring, job->num_ibs, job->ibs, job, 312 &fence); 313 if (r) 314 dev_err(adev->dev, 315 "Error scheduling IBs (%d) in ring(%s)", r, 316 ring->name); 317 } 318 319 job->job_run_counter++; 320 amdgpu_job_free_resources(job); 321 322 fence = r ? ERR_PTR(r) : fence; 323 return fence; 324 } 325 326 #define to_drm_sched_job(sched_job) \ 327 container_of((sched_job), struct drm_sched_job, queue_node) 328 329 void amdgpu_job_stop_all_jobs_on_sched(struct drm_gpu_scheduler *sched) 330 { 331 struct drm_sched_job *s_job; 332 struct drm_sched_entity *s_entity = NULL; 333 int i; 334 335 /* Signal all jobs not yet scheduled */ 336 for (i = DRM_SCHED_PRIORITY_KERNEL; i < sched->num_rqs; i++) { 337 struct drm_sched_rq *rq = sched->sched_rq[i]; 338 spin_lock(&rq->lock); 339 list_for_each_entry(s_entity, &rq->entities, list) { 340 while ((s_job = to_drm_sched_job(spsc_queue_pop(&s_entity->job_queue)))) { 341 struct drm_sched_fence *s_fence = s_job->s_fence; 342 343 dma_fence_signal(&s_fence->scheduled); 344 dma_fence_set_error(&s_fence->finished, -EHWPOISON); 345 dma_fence_signal(&s_fence->finished); 346 } 347 } 348 spin_unlock(&rq->lock); 349 } 350 351 /* Signal all jobs already scheduled to HW */ 352 list_for_each_entry(s_job, &sched->pending_list, list) { 353 struct drm_sched_fence *s_fence = s_job->s_fence; 354 355 dma_fence_set_error(&s_fence->finished, -EHWPOISON); 356 dma_fence_signal(&s_fence->finished); 357 } 358 } 359 360 const struct drm_sched_backend_ops amdgpu_sched_ops = { 361 .prepare_job = amdgpu_job_prepare_job, 362 .run_job = amdgpu_job_run, 363 .timedout_job = amdgpu_job_timedout, 364 .free_job = amdgpu_job_free_cb 365 }; 366