1 /* 2 * Copyright 2008 Advanced Micro Devices, Inc. 3 * Copyright 2008 Red Hat Inc. 4 * Copyright 2009 Jerome Glisse. 5 * 6 * Permission is hereby granted, free of charge, to any person obtaining a 7 * copy of this software and associated documentation files (the "Software"), 8 * to deal in the Software without restriction, including without limitation 9 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 10 * and/or sell copies of the Software, and to permit persons to whom the 11 * Software is furnished to do so, subject to the following conditions: 12 * 13 * The above copyright notice and this permission notice shall be included in 14 * all copies or substantial portions of the Software. 15 * 16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 22 * OTHER DEALINGS IN THE SOFTWARE. 23 * 24 * Authors: Dave Airlie 25 * Alex Deucher 26 * Jerome Glisse 27 * Christian König 28 */ 29 #include <linux/seq_file.h> 30 #include <linux/slab.h> 31 32 #include <drm/amdgpu_drm.h> 33 34 #include "amdgpu.h" 35 #include "atom.h" 36 #include "amdgpu_trace.h" 37 38 #define AMDGPU_IB_TEST_TIMEOUT msecs_to_jiffies(1000) 39 #define AMDGPU_IB_TEST_GFX_XGMI_TIMEOUT msecs_to_jiffies(2000) 40 41 /* 42 * IB 43 * IBs (Indirect Buffers) and areas of GPU accessible memory where 44 * commands are stored. You can put a pointer to the IB in the 45 * command ring and the hw will fetch the commands from the IB 46 * and execute them. Generally userspace acceleration drivers 47 * produce command buffers which are send to the kernel and 48 * put in IBs for execution by the requested ring. 49 */ 50 51 /** 52 * amdgpu_ib_get - request an IB (Indirect Buffer) 53 * 54 * @adev: amdgpu_device pointer 55 * @vm: amdgpu_vm pointer 56 * @size: requested IB size 57 * @pool_type: IB pool type (delayed, immediate, direct) 58 * @ib: IB object returned 59 * 60 * Request an IB (all asics). IBs are allocated using the 61 * suballocator. 62 * Returns 0 on success, error on failure. 63 */ 64 int amdgpu_ib_get(struct amdgpu_device *adev, struct amdgpu_vm *vm, 65 unsigned int size, enum amdgpu_ib_pool_type pool_type, 66 struct amdgpu_ib *ib) 67 { 68 int r; 69 70 if (size) { 71 r = amdgpu_sa_bo_new(&adev->ib_pools[pool_type], 72 &ib->sa_bo, size); 73 if (r) { 74 dev_err(adev->dev, "failed to get a new IB (%d)\n", r); 75 return r; 76 } 77 78 ib->ptr = amdgpu_sa_bo_cpu_addr(ib->sa_bo); 79 /* flush the cache before commit the IB */ 80 ib->flags = AMDGPU_IB_FLAG_EMIT_MEM_SYNC; 81 82 if (!vm) 83 ib->gpu_addr = amdgpu_sa_bo_gpu_addr(ib->sa_bo); 84 } 85 86 return 0; 87 } 88 89 /** 90 * amdgpu_ib_free - free an IB (Indirect Buffer) 91 * 92 * @ib: IB object to free 93 * @f: the fence SA bo need wait on for the ib alloation 94 * 95 * Free an IB (all asics). 96 */ 97 void amdgpu_ib_free(struct amdgpu_ib *ib, struct dma_fence *f) 98 { 99 amdgpu_sa_bo_free(&ib->sa_bo, f); 100 } 101 102 /** 103 * amdgpu_ib_schedule - schedule an IB (Indirect Buffer) on the ring 104 * 105 * @ring: ring index the IB is associated with 106 * @num_ibs: number of IBs to schedule 107 * @ibs: IB objects to schedule 108 * @job: job to schedule 109 * @f: fence created during this submission 110 * 111 * Schedule an IB on the associated ring (all asics). 112 * Returns 0 on success, error on failure. 113 * 114 * On SI, there are two parallel engines fed from the primary ring, 115 * the CE (Constant Engine) and the DE (Drawing Engine). Since 116 * resource descriptors have moved to memory, the CE allows you to 117 * prime the caches while the DE is updating register state so that 118 * the resource descriptors will be already in cache when the draw is 119 * processed. To accomplish this, the userspace driver submits two 120 * IBs, one for the CE and one for the DE. If there is a CE IB (called 121 * a CONST_IB), it will be put on the ring prior to the DE IB. Prior 122 * to SI there was just a DE IB. 123 */ 124 int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned int num_ibs, 125 struct amdgpu_ib *ibs, struct amdgpu_job *job, 126 struct dma_fence **f) 127 { 128 struct amdgpu_device *adev = ring->adev; 129 struct amdgpu_ib *ib = &ibs[0]; 130 struct dma_fence *tmp = NULL; 131 struct amdgpu_fence *af; 132 bool need_ctx_switch; 133 struct amdgpu_vm *vm; 134 uint64_t fence_ctx; 135 uint32_t status = 0, alloc_size; 136 unsigned int fence_flags = 0; 137 bool secure, init_shadow; 138 u64 shadow_va, csa_va, gds_va; 139 int vmid = AMDGPU_JOB_GET_VMID(job); 140 bool need_pipe_sync = false; 141 unsigned int cond_exec; 142 unsigned int i; 143 int r = 0; 144 145 if (num_ibs == 0) 146 return -EINVAL; 147 148 /* ring tests don't use a job */ 149 if (job) { 150 vm = job->vm; 151 fence_ctx = job->base.s_fence ? 152 job->base.s_fence->finished.context : 0; 153 shadow_va = job->shadow_va; 154 csa_va = job->csa_va; 155 gds_va = job->gds_va; 156 init_shadow = job->init_shadow; 157 af = job->hw_fence; 158 /* Save the context of the job for reset handling. 159 * The driver needs this so it can skip the ring 160 * contents for guilty contexts. 161 */ 162 af->context = fence_ctx; 163 /* the vm fence is also part of the job's context */ 164 job->hw_vm_fence->context = fence_ctx; 165 } else { 166 vm = NULL; 167 fence_ctx = 0; 168 shadow_va = 0; 169 csa_va = 0; 170 gds_va = 0; 171 init_shadow = false; 172 af = kzalloc_obj(*af, GFP_ATOMIC); 173 if (!af) 174 return -ENOMEM; 175 } 176 177 if (!ring->sched.ready) { 178 dev_err(adev->dev, "couldn't schedule ib on ring <%s>\n", ring->name); 179 r = -EINVAL; 180 goto free_fence; 181 } 182 183 if (vm && !job->vmid) { 184 dev_err(adev->dev, "VM IB without ID\n"); 185 r = -EINVAL; 186 goto free_fence; 187 } 188 189 if ((ib->flags & AMDGPU_IB_FLAGS_SECURE) && 190 (!ring->funcs->secure_submission_supported)) { 191 dev_err(adev->dev, "secure submissions not supported on ring <%s>\n", ring->name); 192 r = -EINVAL; 193 goto free_fence; 194 } 195 196 alloc_size = ring->funcs->emit_frame_size + num_ibs * 197 ring->funcs->emit_ib_size; 198 199 r = amdgpu_ring_alloc(ring, alloc_size); 200 if (r) { 201 dev_err(adev->dev, "scheduling IB failed (%d).\n", r); 202 goto free_fence; 203 } 204 205 need_ctx_switch = ring->current_ctx != fence_ctx; 206 if (ring->funcs->emit_pipeline_sync && job && 207 ((tmp = amdgpu_sync_get_fence(&job->explicit_sync)) || 208 need_ctx_switch || amdgpu_vm_need_pipeline_sync(ring, job))) { 209 210 need_pipe_sync = true; 211 212 if (tmp) 213 trace_amdgpu_ib_pipe_sync(job, tmp); 214 215 dma_fence_put(tmp); 216 } 217 218 if (job) 219 amdgpu_vm_flush(ring, job, need_pipe_sync); 220 221 amdgpu_ring_ib_begin(ring); 222 223 if (ring->funcs->insert_start) 224 ring->funcs->insert_start(ring); 225 226 if ((ib->flags & AMDGPU_IB_FLAG_EMIT_MEM_SYNC) && ring->funcs->emit_mem_sync) 227 ring->funcs->emit_mem_sync(ring); 228 229 if (ring->funcs->emit_wave_limit && 230 ring->hw_prio == AMDGPU_GFX_PIPE_PRIO_HIGH) 231 ring->funcs->emit_wave_limit(ring, true); 232 233 if (ring->funcs->emit_gfx_shadow && adev->gfx.cp_gfx_shadow) 234 amdgpu_ring_emit_gfx_shadow(ring, shadow_va, csa_va, gds_va, 235 init_shadow, vmid); 236 237 if (ring->funcs->init_cond_exec) 238 cond_exec = amdgpu_ring_init_cond_exec(ring, 239 ring->cond_exe_gpu_addr); 240 241 amdgpu_device_flush_hdp(adev, ring); 242 243 if (need_ctx_switch) 244 status |= AMDGPU_HAVE_CTX_SWITCH; 245 246 if (job && ring->funcs->emit_cntxcntl) { 247 status |= job->preamble_status; 248 status |= job->preemption_status; 249 amdgpu_ring_emit_cntxcntl(ring, status); 250 } 251 252 /* Setup initial TMZiness and send it off. 253 */ 254 secure = false; 255 if (job && ring->funcs->emit_frame_cntl) { 256 secure = ib->flags & AMDGPU_IB_FLAGS_SECURE; 257 amdgpu_ring_emit_frame_cntl(ring, true, secure); 258 } 259 260 for (i = 0; i < num_ibs; ++i) { 261 ib = &ibs[i]; 262 263 if (job && ring->funcs->emit_frame_cntl) { 264 if (secure != !!(ib->flags & AMDGPU_IB_FLAGS_SECURE)) { 265 amdgpu_ring_emit_frame_cntl(ring, false, secure); 266 secure = !secure; 267 amdgpu_ring_emit_frame_cntl(ring, true, secure); 268 } 269 } 270 271 amdgpu_ring_emit_ib(ring, job, ib, status); 272 status &= ~AMDGPU_HAVE_CTX_SWITCH; 273 } 274 275 if (job && ring->funcs->emit_frame_cntl) 276 amdgpu_ring_emit_frame_cntl(ring, false, secure); 277 278 amdgpu_device_invalidate_hdp(adev, ring); 279 280 if (ib->flags & AMDGPU_IB_FLAG_TC_WB_NOT_INVALIDATE) 281 fence_flags |= AMDGPU_FENCE_FLAG_TC_WB_ONLY; 282 283 /* wrap the last IB with fence */ 284 if (job && job->uf_addr) { 285 amdgpu_ring_emit_fence(ring, job->uf_addr, job->uf_sequence, 286 fence_flags | AMDGPU_FENCE_FLAG_64BIT); 287 } 288 289 if (ring->funcs->emit_gfx_shadow && ring->funcs->init_cond_exec && 290 adev->gfx.cp_gfx_shadow) { 291 amdgpu_ring_emit_gfx_shadow(ring, 0, 0, 0, false, 0); 292 amdgpu_ring_init_cond_exec(ring, ring->cond_exe_gpu_addr); 293 } 294 295 amdgpu_fence_emit(ring, af, fence_flags); 296 *f = &af->base; 297 /* get a ref for the job */ 298 if (job) 299 dma_fence_get(*f); 300 301 if (ring->funcs->insert_end) 302 ring->funcs->insert_end(ring); 303 304 amdgpu_ring_patch_cond_exec(ring, cond_exec); 305 306 ring->current_ctx = fence_ctx; 307 if (job && ring->funcs->emit_switch_buffer) 308 amdgpu_ring_emit_switch_buffer(ring); 309 310 if (ring->funcs->emit_wave_limit && 311 ring->hw_prio == AMDGPU_GFX_PIPE_PRIO_HIGH) 312 ring->funcs->emit_wave_limit(ring, false); 313 314 amdgpu_ring_ib_end(ring); 315 /* Save the wptr associated with this fence. 316 * This must be last for resets to work properly 317 * as we need to save the wptr associated with this 318 * fence so we know what rings contents to backup 319 * after we reset the queue. 320 */ 321 amdgpu_fence_save_wptr(af); 322 323 amdgpu_ring_commit(ring); 324 325 return 0; 326 327 free_fence: 328 if (!job) 329 kfree(af); 330 return r; 331 } 332 333 /** 334 * amdgpu_ib_pool_init - Init the IB (Indirect Buffer) pool 335 * 336 * @adev: amdgpu_device pointer 337 * 338 * Initialize the suballocator to manage a pool of memory 339 * for use as IBs (all asics). 340 * Returns 0 on success, error on failure. 341 */ 342 int amdgpu_ib_pool_init(struct amdgpu_device *adev) 343 { 344 int r, i; 345 346 if (adev->ib_pool_ready) 347 return 0; 348 349 for (i = 0; i < AMDGPU_IB_POOL_MAX; i++) { 350 r = amdgpu_sa_bo_manager_init(adev, &adev->ib_pools[i], 351 AMDGPU_IB_POOL_SIZE, 256, 352 AMDGPU_GEM_DOMAIN_GTT); 353 if (r) 354 goto error; 355 } 356 adev->ib_pool_ready = true; 357 358 return 0; 359 360 error: 361 while (i--) 362 amdgpu_sa_bo_manager_fini(adev, &adev->ib_pools[i]); 363 return r; 364 } 365 366 /** 367 * amdgpu_ib_pool_fini - Free the IB (Indirect Buffer) pool 368 * 369 * @adev: amdgpu_device pointer 370 * 371 * Tear down the suballocator managing the pool of memory 372 * for use as IBs (all asics). 373 */ 374 void amdgpu_ib_pool_fini(struct amdgpu_device *adev) 375 { 376 int i; 377 378 if (!adev->ib_pool_ready) 379 return; 380 381 for (i = 0; i < AMDGPU_IB_POOL_MAX; i++) 382 amdgpu_sa_bo_manager_fini(adev, &adev->ib_pools[i]); 383 adev->ib_pool_ready = false; 384 } 385 386 /** 387 * amdgpu_ib_ring_tests - test IBs on the rings 388 * 389 * @adev: amdgpu_device pointer 390 * 391 * Test an IB (Indirect Buffer) on each ring. 392 * If the test fails, disable the ring. 393 * Returns 0 on success, error if the primary GFX ring 394 * IB test fails. 395 */ 396 int amdgpu_ib_ring_tests(struct amdgpu_device *adev) 397 { 398 long tmo_gfx, tmo_mm; 399 int r, ret = 0; 400 unsigned int i; 401 402 tmo_mm = tmo_gfx = AMDGPU_IB_TEST_TIMEOUT; 403 if (amdgpu_sriov_vf(adev)) { 404 /* for MM engines in hypervisor side they are not scheduled together 405 * with CP and SDMA engines, so even in exclusive mode MM engine could 406 * still running on other VF thus the IB TEST TIMEOUT for MM engines 407 * under SR-IOV should be set to a long time. 8 sec should be enough 408 * for the MM comes back to this VF. 409 */ 410 tmo_mm = 8 * AMDGPU_IB_TEST_TIMEOUT; 411 } 412 413 if (amdgpu_sriov_runtime(adev)) { 414 /* for CP & SDMA engines since they are scheduled together so 415 * need to make the timeout width enough to cover the time 416 * cost waiting for it coming back under RUNTIME only 417 */ 418 tmo_gfx = 8 * AMDGPU_IB_TEST_TIMEOUT; 419 } else if (adev->gmc.xgmi.hive_id) { 420 tmo_gfx = AMDGPU_IB_TEST_GFX_XGMI_TIMEOUT; 421 } 422 423 for (i = 0; i < adev->num_rings; ++i) { 424 struct amdgpu_ring *ring = adev->rings[i]; 425 long tmo; 426 427 /* KIQ rings don't have an IB test because we never submit IBs 428 * to them and they have no interrupt support. 429 */ 430 if (!ring->sched.ready || !ring->funcs->test_ib) 431 continue; 432 433 if (adev->enable_mes && 434 ring->funcs->type == AMDGPU_RING_TYPE_KIQ) 435 continue; 436 437 /* MM engine need more time */ 438 if (ring->funcs->type == AMDGPU_RING_TYPE_UVD || 439 ring->funcs->type == AMDGPU_RING_TYPE_VCE || 440 ring->funcs->type == AMDGPU_RING_TYPE_UVD_ENC || 441 ring->funcs->type == AMDGPU_RING_TYPE_VCN_DEC || 442 ring->funcs->type == AMDGPU_RING_TYPE_VCN_ENC || 443 ring->funcs->type == AMDGPU_RING_TYPE_VCN_JPEG) 444 tmo = tmo_mm; 445 else 446 tmo = tmo_gfx; 447 448 r = amdgpu_ring_test_ib(ring, tmo); 449 if (!r) { 450 DRM_DEV_DEBUG(adev->dev, "ib test on %s succeeded\n", 451 ring->name); 452 continue; 453 } 454 455 ring->sched.ready = false; 456 DRM_DEV_ERROR(adev->dev, "IB test failed on %s (%d).\n", 457 ring->name, r); 458 459 if (ring == &adev->gfx.gfx_ring[0]) { 460 /* oh, oh, that's really bad */ 461 adev->accel_working = false; 462 return r; 463 464 } else { 465 ret = r; 466 } 467 } 468 return ret; 469 } 470 471 /* 472 * Debugfs info 473 */ 474 #if defined(CONFIG_DEBUG_FS) 475 476 static int amdgpu_debugfs_sa_info_show(struct seq_file *m, void *unused) 477 { 478 struct amdgpu_device *adev = m->private; 479 480 seq_puts(m, "--------------------- DELAYED ---------------------\n"); 481 amdgpu_sa_bo_dump_debug_info(&adev->ib_pools[AMDGPU_IB_POOL_DELAYED], 482 m); 483 seq_puts(m, "-------------------- IMMEDIATE --------------------\n"); 484 amdgpu_sa_bo_dump_debug_info(&adev->ib_pools[AMDGPU_IB_POOL_IMMEDIATE], 485 m); 486 seq_puts(m, "--------------------- DIRECT ----------------------\n"); 487 amdgpu_sa_bo_dump_debug_info(&adev->ib_pools[AMDGPU_IB_POOL_DIRECT], m); 488 489 return 0; 490 } 491 492 DEFINE_SHOW_ATTRIBUTE(amdgpu_debugfs_sa_info); 493 494 #endif 495 496 void amdgpu_debugfs_sa_init(struct amdgpu_device *adev) 497 { 498 #if defined(CONFIG_DEBUG_FS) 499 struct drm_minor *minor = adev_to_drm(adev)->primary; 500 struct dentry *root = minor->debugfs_root; 501 502 debugfs_create_file("amdgpu_sa_info", 0444, root, adev, 503 &amdgpu_debugfs_sa_info_fops); 504 505 #endif 506 } 507