1 /* 2 * Copyright 2008 Advanced Micro Devices, Inc. 3 * Copyright 2008 Red Hat Inc. 4 * Copyright 2009 Jerome Glisse. 5 * 6 * Permission is hereby granted, free of charge, to any person obtaining a 7 * copy of this software and associated documentation files (the "Software"), 8 * to deal in the Software without restriction, including without limitation 9 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 10 * and/or sell copies of the Software, and to permit persons to whom the 11 * Software is furnished to do so, subject to the following conditions: 12 * 13 * The above copyright notice and this permission notice shall be included in 14 * all copies or substantial portions of the Software. 15 * 16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 22 * OTHER DEALINGS IN THE SOFTWARE. 23 * 24 * Authors: Dave Airlie 25 * Alex Deucher 26 * Jerome Glisse 27 * Christian König 28 */ 29 #include <linux/seq_file.h> 30 #include <linux/slab.h> 31 #include <linux/uaccess.h> 32 #include <linux/debugfs.h> 33 34 #include <drm/amdgpu_drm.h> 35 #include "amdgpu.h" 36 #include "atom.h" 37 38 /* 39 * Rings 40 * Most engines on the GPU are fed via ring buffers. Ring 41 * buffers are areas of GPU accessible memory that the host 42 * writes commands into and the GPU reads commands out of. 43 * There is a rptr (read pointer) that determines where the 44 * GPU is currently reading, and a wptr (write pointer) 45 * which determines where the host has written. When the 46 * pointers are equal, the ring is idle. When the host 47 * writes commands to the ring buffer, it increments the 48 * wptr. The GPU then starts fetching commands and executes 49 * them until the pointers are equal again. 50 */ 51 52 /** 53 * amdgpu_ring_max_ibs - Return max IBs that fit in a single submission. 54 * 55 * @type: ring type for which to return the limit. 56 */ 57 unsigned int amdgpu_ring_max_ibs(enum amdgpu_ring_type type) 58 { 59 switch (type) { 60 case AMDGPU_RING_TYPE_GFX: 61 /* Need to keep at least 192 on GFX7+ for old radv. */ 62 return 192; 63 case AMDGPU_RING_TYPE_COMPUTE: 64 return 125; 65 case AMDGPU_RING_TYPE_VCN_JPEG: 66 return 16; 67 default: 68 return 49; 69 } 70 } 71 72 /** 73 * amdgpu_ring_alloc - allocate space on the ring buffer 74 * 75 * @ring: amdgpu_ring structure holding ring information 76 * @ndw: number of dwords to allocate in the ring buffer 77 * 78 * Allocate @ndw dwords in the ring buffer (all asics). 79 * Returns 0 on success, error on failure. 80 */ 81 int amdgpu_ring_alloc(struct amdgpu_ring *ring, unsigned int ndw) 82 { 83 /* Align requested size with padding so unlock_commit can 84 * pad safely */ 85 ndw = (ndw + ring->funcs->align_mask) & ~ring->funcs->align_mask; 86 87 /* Make sure we aren't trying to allocate more space 88 * than the maximum for one submission 89 */ 90 if (WARN_ON_ONCE(ndw > ring->max_dw)) 91 return -ENOMEM; 92 93 ring->count_dw = ndw; 94 ring->wptr_old = ring->wptr; 95 96 if (ring->funcs->begin_use) 97 ring->funcs->begin_use(ring); 98 99 return 0; 100 } 101 102 /** amdgpu_ring_insert_nop - insert NOP packets 103 * 104 * @ring: amdgpu_ring structure holding ring information 105 * @count: the number of NOP packets to insert 106 * 107 * This is the generic insert_nop function for rings except SDMA 108 */ 109 void amdgpu_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count) 110 { 111 uint32_t occupied, chunk1, chunk2; 112 113 occupied = ring->wptr & ring->buf_mask; 114 chunk1 = ring->buf_mask + 1 - occupied; 115 chunk1 = (chunk1 >= count) ? count : chunk1; 116 chunk2 = count - chunk1; 117 118 if (chunk1) 119 memset32(&ring->ring[occupied], ring->funcs->nop, chunk1); 120 121 if (chunk2) 122 memset32(ring->ring, ring->funcs->nop, chunk2); 123 124 ring->wptr += count; 125 ring->wptr &= ring->ptr_mask; 126 ring->count_dw -= count; 127 } 128 129 /** 130 * amdgpu_ring_generic_pad_ib - pad IB with NOP packets 131 * 132 * @ring: amdgpu_ring structure holding ring information 133 * @ib: IB to add NOP packets to 134 * 135 * This is the generic pad_ib function for rings except SDMA 136 */ 137 void amdgpu_ring_generic_pad_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib) 138 { 139 while (ib->length_dw & ring->funcs->align_mask) 140 ib->ptr[ib->length_dw++] = ring->funcs->nop; 141 } 142 143 /** 144 * amdgpu_ring_commit - tell the GPU to execute the new 145 * commands on the ring buffer 146 * 147 * @ring: amdgpu_ring structure holding ring information 148 * 149 * Update the wptr (write pointer) to tell the GPU to 150 * execute new commands on the ring buffer (all asics). 151 */ 152 void amdgpu_ring_commit(struct amdgpu_ring *ring) 153 { 154 uint32_t count; 155 156 if (ring->count_dw < 0) 157 DRM_ERROR("amdgpu: writing more dwords to the ring than expected!\n"); 158 159 /* We pad to match fetch size */ 160 count = ring->funcs->align_mask + 1 - 161 (ring->wptr & ring->funcs->align_mask); 162 count &= ring->funcs->align_mask; 163 164 if (count != 0) 165 ring->funcs->insert_nop(ring, count); 166 167 mb(); 168 amdgpu_ring_set_wptr(ring); 169 170 if (ring->funcs->end_use) 171 ring->funcs->end_use(ring); 172 } 173 174 /** 175 * amdgpu_ring_undo - reset the wptr 176 * 177 * @ring: amdgpu_ring structure holding ring information 178 * 179 * Reset the driver's copy of the wptr (all asics). 180 */ 181 void amdgpu_ring_undo(struct amdgpu_ring *ring) 182 { 183 ring->wptr = ring->wptr_old; 184 185 if (ring->funcs->end_use) 186 ring->funcs->end_use(ring); 187 } 188 189 #define amdgpu_ring_get_gpu_addr(ring, offset) \ 190 (ring->is_mes_queue ? \ 191 (ring->mes_ctx->meta_data_gpu_addr + offset) : \ 192 (ring->adev->wb.gpu_addr + offset * 4)) 193 194 #define amdgpu_ring_get_cpu_addr(ring, offset) \ 195 (ring->is_mes_queue ? \ 196 (void *)((uint8_t *)(ring->mes_ctx->meta_data_ptr) + offset) : \ 197 (&ring->adev->wb.wb[offset])) 198 199 /** 200 * amdgpu_ring_init - init driver ring struct. 201 * 202 * @adev: amdgpu_device pointer 203 * @ring: amdgpu_ring structure holding ring information 204 * @max_dw: maximum number of dw for ring alloc 205 * @irq_src: interrupt source to use for this ring 206 * @irq_type: interrupt type to use for this ring 207 * @hw_prio: ring priority (NORMAL/HIGH) 208 * @sched_score: optional score atomic shared with other schedulers 209 * 210 * Initialize the driver information for the selected ring (all asics). 211 * Returns 0 on success, error on failure. 212 */ 213 int amdgpu_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring, 214 unsigned int max_dw, struct amdgpu_irq_src *irq_src, 215 unsigned int irq_type, unsigned int hw_prio, 216 atomic_t *sched_score) 217 { 218 int r; 219 int sched_hw_submission = amdgpu_sched_hw_submission; 220 u32 *num_sched; 221 u32 hw_ip; 222 unsigned int max_ibs_dw; 223 224 /* Set the hw submission limit higher for KIQ because 225 * it's used for a number of gfx/compute tasks by both 226 * KFD and KGD which may have outstanding fences and 227 * it doesn't really use the gpu scheduler anyway; 228 * KIQ tasks get submitted directly to the ring. 229 */ 230 if (ring->funcs->type == AMDGPU_RING_TYPE_KIQ) 231 sched_hw_submission = max(sched_hw_submission, 256); 232 if (ring->funcs->type == AMDGPU_RING_TYPE_MES) 233 sched_hw_submission = 8; 234 else if (ring == &adev->sdma.instance[0].page) 235 sched_hw_submission = 256; 236 237 if (ring->adev == NULL) { 238 if (adev->num_rings >= AMDGPU_MAX_RINGS) 239 return -EINVAL; 240 241 ring->adev = adev; 242 ring->num_hw_submission = sched_hw_submission; 243 ring->sched_score = sched_score; 244 ring->vmid_wait = dma_fence_get_stub(); 245 246 if (!ring->is_mes_queue) { 247 ring->idx = adev->num_rings++; 248 adev->rings[ring->idx] = ring; 249 } 250 251 r = amdgpu_fence_driver_init_ring(ring); 252 if (r) 253 return r; 254 } 255 256 if (ring->is_mes_queue) { 257 ring->rptr_offs = amdgpu_mes_ctx_get_offs(ring, 258 AMDGPU_MES_CTX_RPTR_OFFS); 259 ring->wptr_offs = amdgpu_mes_ctx_get_offs(ring, 260 AMDGPU_MES_CTX_WPTR_OFFS); 261 ring->fence_offs = amdgpu_mes_ctx_get_offs(ring, 262 AMDGPU_MES_CTX_FENCE_OFFS); 263 ring->trail_fence_offs = amdgpu_mes_ctx_get_offs(ring, 264 AMDGPU_MES_CTX_TRAIL_FENCE_OFFS); 265 ring->cond_exe_offs = amdgpu_mes_ctx_get_offs(ring, 266 AMDGPU_MES_CTX_COND_EXE_OFFS); 267 } else { 268 r = amdgpu_device_wb_get(adev, &ring->rptr_offs); 269 if (r) { 270 dev_err(adev->dev, "(%d) ring rptr_offs wb alloc failed\n", r); 271 return r; 272 } 273 274 r = amdgpu_device_wb_get(adev, &ring->wptr_offs); 275 if (r) { 276 dev_err(adev->dev, "(%d) ring wptr_offs wb alloc failed\n", r); 277 return r; 278 } 279 280 r = amdgpu_device_wb_get(adev, &ring->fence_offs); 281 if (r) { 282 dev_err(adev->dev, "(%d) ring fence_offs wb alloc failed\n", r); 283 return r; 284 } 285 286 r = amdgpu_device_wb_get(adev, &ring->trail_fence_offs); 287 if (r) { 288 dev_err(adev->dev, "(%d) ring trail_fence_offs wb alloc failed\n", r); 289 return r; 290 } 291 292 r = amdgpu_device_wb_get(adev, &ring->cond_exe_offs); 293 if (r) { 294 dev_err(adev->dev, "(%d) ring cond_exec_polling wb alloc failed\n", r); 295 return r; 296 } 297 } 298 299 ring->fence_gpu_addr = 300 amdgpu_ring_get_gpu_addr(ring, ring->fence_offs); 301 ring->fence_cpu_addr = 302 amdgpu_ring_get_cpu_addr(ring, ring->fence_offs); 303 304 ring->rptr_gpu_addr = 305 amdgpu_ring_get_gpu_addr(ring, ring->rptr_offs); 306 ring->rptr_cpu_addr = 307 amdgpu_ring_get_cpu_addr(ring, ring->rptr_offs); 308 309 ring->wptr_gpu_addr = 310 amdgpu_ring_get_gpu_addr(ring, ring->wptr_offs); 311 ring->wptr_cpu_addr = 312 amdgpu_ring_get_cpu_addr(ring, ring->wptr_offs); 313 314 ring->trail_fence_gpu_addr = 315 amdgpu_ring_get_gpu_addr(ring, ring->trail_fence_offs); 316 ring->trail_fence_cpu_addr = 317 amdgpu_ring_get_cpu_addr(ring, ring->trail_fence_offs); 318 319 ring->cond_exe_gpu_addr = 320 amdgpu_ring_get_gpu_addr(ring, ring->cond_exe_offs); 321 ring->cond_exe_cpu_addr = 322 amdgpu_ring_get_cpu_addr(ring, ring->cond_exe_offs); 323 324 /* always set cond_exec_polling to CONTINUE */ 325 *ring->cond_exe_cpu_addr = 1; 326 327 if (ring->funcs->type != AMDGPU_RING_TYPE_CPER) { 328 r = amdgpu_fence_driver_start_ring(ring, irq_src, irq_type); 329 if (r) { 330 dev_err(adev->dev, "failed initializing fences (%d).\n", r); 331 return r; 332 } 333 334 max_ibs_dw = ring->funcs->emit_frame_size + 335 amdgpu_ring_max_ibs(ring->funcs->type) * ring->funcs->emit_ib_size; 336 max_ibs_dw = (max_ibs_dw + ring->funcs->align_mask) & ~ring->funcs->align_mask; 337 338 if (WARN_ON(max_ibs_dw > max_dw)) 339 max_dw = max_ibs_dw; 340 341 ring->ring_size = roundup_pow_of_two(max_dw * 4 * sched_hw_submission); 342 } else { 343 ring->ring_size = roundup_pow_of_two(max_dw * 4); 344 ring->count_dw = (ring->ring_size - 4) >> 2; 345 /* ring buffer is empty now */ 346 ring->wptr = *ring->rptr_cpu_addr = 0; 347 } 348 349 ring->buf_mask = (ring->ring_size / 4) - 1; 350 ring->ptr_mask = ring->funcs->support_64bit_ptrs ? 351 0xffffffffffffffff : ring->buf_mask; 352 353 /* Allocate ring buffer */ 354 if (ring->is_mes_queue) { 355 int offset = 0; 356 357 BUG_ON(ring->ring_size > PAGE_SIZE*4); 358 359 offset = amdgpu_mes_ctx_get_offs(ring, 360 AMDGPU_MES_CTX_RING_OFFS); 361 ring->gpu_addr = amdgpu_mes_ctx_get_offs_gpu_addr(ring, offset); 362 ring->ring = amdgpu_mes_ctx_get_offs_cpu_addr(ring, offset); 363 amdgpu_ring_clear_ring(ring); 364 365 } else if (ring->ring_obj == NULL) { 366 r = amdgpu_bo_create_kernel(adev, ring->ring_size + ring->funcs->extra_dw, PAGE_SIZE, 367 AMDGPU_GEM_DOMAIN_GTT, 368 &ring->ring_obj, 369 &ring->gpu_addr, 370 (void **)&ring->ring); 371 if (r) { 372 dev_err(adev->dev, "(%d) ring create failed\n", r); 373 return r; 374 } 375 amdgpu_ring_clear_ring(ring); 376 } 377 378 ring->max_dw = max_dw; 379 ring->hw_prio = hw_prio; 380 381 if (!ring->no_scheduler && ring->funcs->type < AMDGPU_HW_IP_NUM) { 382 hw_ip = ring->funcs->type; 383 num_sched = &adev->gpu_sched[hw_ip][hw_prio].num_scheds; 384 adev->gpu_sched[hw_ip][hw_prio].sched[(*num_sched)++] = 385 &ring->sched; 386 } 387 388 return 0; 389 } 390 391 /** 392 * amdgpu_ring_fini - tear down the driver ring struct. 393 * 394 * @ring: amdgpu_ring structure holding ring information 395 * 396 * Tear down the driver information for the selected ring (all asics). 397 */ 398 void amdgpu_ring_fini(struct amdgpu_ring *ring) 399 { 400 401 /* Not to finish a ring which is not initialized */ 402 if (!(ring->adev) || 403 (!ring->is_mes_queue && !(ring->adev->rings[ring->idx]))) 404 return; 405 406 ring->sched.ready = false; 407 408 if (!ring->is_mes_queue) { 409 amdgpu_device_wb_free(ring->adev, ring->rptr_offs); 410 amdgpu_device_wb_free(ring->adev, ring->wptr_offs); 411 412 amdgpu_device_wb_free(ring->adev, ring->cond_exe_offs); 413 amdgpu_device_wb_free(ring->adev, ring->fence_offs); 414 415 amdgpu_bo_free_kernel(&ring->ring_obj, 416 &ring->gpu_addr, 417 (void **)&ring->ring); 418 } else { 419 kfree(ring->fence_drv.fences); 420 } 421 422 dma_fence_put(ring->vmid_wait); 423 ring->vmid_wait = NULL; 424 ring->me = 0; 425 426 if (!ring->is_mes_queue) 427 ring->adev->rings[ring->idx] = NULL; 428 } 429 430 /** 431 * amdgpu_ring_emit_reg_write_reg_wait_helper - ring helper 432 * 433 * @ring: ring to write to 434 * @reg0: register to write 435 * @reg1: register to wait on 436 * @ref: reference value to write/wait on 437 * @mask: mask to wait on 438 * 439 * Helper for rings that don't support write and wait in a 440 * single oneshot packet. 441 */ 442 void amdgpu_ring_emit_reg_write_reg_wait_helper(struct amdgpu_ring *ring, 443 uint32_t reg0, uint32_t reg1, 444 uint32_t ref, uint32_t mask) 445 { 446 amdgpu_ring_emit_wreg(ring, reg0, ref); 447 amdgpu_ring_emit_reg_wait(ring, reg1, mask, mask); 448 } 449 450 /** 451 * amdgpu_ring_soft_recovery - try to soft recover a ring lockup 452 * 453 * @ring: ring to try the recovery on 454 * @vmid: VMID we try to get going again 455 * @fence: timedout fence 456 * 457 * Tries to get a ring proceeding again when it is stuck. 458 */ 459 bool amdgpu_ring_soft_recovery(struct amdgpu_ring *ring, unsigned int vmid, 460 struct dma_fence *fence) 461 { 462 unsigned long flags; 463 ktime_t deadline; 464 465 if (unlikely(ring->adev->debug_disable_soft_recovery)) 466 return false; 467 468 deadline = ktime_add_us(ktime_get(), 10000); 469 470 if (amdgpu_sriov_vf(ring->adev) || !ring->funcs->soft_recovery || !fence) 471 return false; 472 473 spin_lock_irqsave(fence->lock, flags); 474 if (!dma_fence_is_signaled_locked(fence)) 475 dma_fence_set_error(fence, -ENODATA); 476 spin_unlock_irqrestore(fence->lock, flags); 477 478 atomic_inc(&ring->adev->gpu_reset_counter); 479 while (!dma_fence_is_signaled(fence) && 480 ktime_to_ns(ktime_sub(deadline, ktime_get())) > 0) 481 ring->funcs->soft_recovery(ring, vmid); 482 483 return dma_fence_is_signaled(fence); 484 } 485 486 /* 487 * Debugfs info 488 */ 489 #if defined(CONFIG_DEBUG_FS) 490 491 /* Layout of file is 12 bytes consisting of 492 * - rptr 493 * - wptr 494 * - driver's copy of wptr 495 * 496 * followed by n-words of ring data 497 */ 498 static ssize_t amdgpu_debugfs_ring_read(struct file *f, char __user *buf, 499 size_t size, loff_t *pos) 500 { 501 struct amdgpu_ring *ring = file_inode(f)->i_private; 502 uint32_t value, result, early[3]; 503 uint64_t p; 504 loff_t i; 505 int r; 506 507 if (*pos & 3 || size & 3) 508 return -EINVAL; 509 510 result = 0; 511 512 if (*pos < 12) { 513 if (ring->funcs->type == AMDGPU_RING_TYPE_CPER) 514 mutex_lock(&ring->adev->cper.ring_lock); 515 516 early[0] = amdgpu_ring_get_rptr(ring) & ring->buf_mask; 517 early[1] = amdgpu_ring_get_wptr(ring) & ring->buf_mask; 518 early[2] = ring->wptr & ring->buf_mask; 519 for (i = *pos / 4; i < 3 && size; i++) { 520 r = put_user(early[i], (uint32_t *)buf); 521 if (r) { 522 result = r; 523 goto out; 524 } 525 buf += 4; 526 result += 4; 527 size -= 4; 528 *pos += 4; 529 } 530 } 531 532 if (ring->funcs->type != AMDGPU_RING_TYPE_CPER) { 533 while (size) { 534 if (*pos >= (ring->ring_size + 12)) 535 return result; 536 537 value = ring->ring[(*pos - 12)/4]; 538 r = put_user(value, (uint32_t *)buf); 539 if (r) 540 return r; 541 buf += 4; 542 result += 4; 543 size -= 4; 544 *pos += 4; 545 } 546 } else { 547 p = early[0]; 548 if (early[0] <= early[1]) 549 size = (early[1] - early[0]); 550 else 551 size = ring->ring_size - (early[0] - early[1]); 552 553 while (size) { 554 if (p == early[1]) 555 goto out; 556 557 value = ring->ring[p]; 558 r = put_user(value, (uint32_t *)buf); 559 if (r) { 560 result = r; 561 goto out; 562 } 563 564 buf += 4; 565 result += 4; 566 size--; 567 p++; 568 p &= ring->ptr_mask; 569 } 570 } 571 572 out: 573 if (ring->funcs->type == AMDGPU_RING_TYPE_CPER) 574 mutex_unlock(&ring->adev->cper.ring_lock); 575 576 return result; 577 } 578 579 static const struct file_operations amdgpu_debugfs_ring_fops = { 580 .owner = THIS_MODULE, 581 .read = amdgpu_debugfs_ring_read, 582 .llseek = default_llseek 583 }; 584 585 static ssize_t amdgpu_debugfs_mqd_read(struct file *f, char __user *buf, 586 size_t size, loff_t *pos) 587 { 588 struct amdgpu_ring *ring = file_inode(f)->i_private; 589 volatile u32 *mqd; 590 u32 *kbuf; 591 int r, i; 592 uint32_t value, result; 593 594 if (*pos & 3 || size & 3) 595 return -EINVAL; 596 597 kbuf = kmalloc(ring->mqd_size, GFP_KERNEL); 598 if (!kbuf) 599 return -ENOMEM; 600 601 r = amdgpu_bo_reserve(ring->mqd_obj, false); 602 if (unlikely(r != 0)) 603 goto err_free; 604 605 r = amdgpu_bo_kmap(ring->mqd_obj, (void **)&mqd); 606 if (r) 607 goto err_unreserve; 608 609 /* 610 * Copy to local buffer to avoid put_user(), which might fault 611 * and acquire mmap_sem, under reservation_ww_class_mutex. 612 */ 613 for (i = 0; i < ring->mqd_size/sizeof(u32); i++) 614 kbuf[i] = mqd[i]; 615 616 amdgpu_bo_kunmap(ring->mqd_obj); 617 amdgpu_bo_unreserve(ring->mqd_obj); 618 619 result = 0; 620 while (size) { 621 if (*pos >= ring->mqd_size) 622 break; 623 624 value = kbuf[*pos/4]; 625 r = put_user(value, (uint32_t *)buf); 626 if (r) 627 goto err_free; 628 buf += 4; 629 result += 4; 630 size -= 4; 631 *pos += 4; 632 } 633 634 kfree(kbuf); 635 return result; 636 637 err_unreserve: 638 amdgpu_bo_unreserve(ring->mqd_obj); 639 err_free: 640 kfree(kbuf); 641 return r; 642 } 643 644 static const struct file_operations amdgpu_debugfs_mqd_fops = { 645 .owner = THIS_MODULE, 646 .read = amdgpu_debugfs_mqd_read, 647 .llseek = default_llseek 648 }; 649 650 static int amdgpu_debugfs_ring_error(void *data, u64 val) 651 { 652 struct amdgpu_ring *ring = data; 653 654 amdgpu_fence_driver_set_error(ring, val); 655 return 0; 656 } 657 658 DEFINE_DEBUGFS_ATTRIBUTE_SIGNED(amdgpu_debugfs_error_fops, NULL, 659 amdgpu_debugfs_ring_error, "%lld\n"); 660 661 #endif 662 663 void amdgpu_debugfs_ring_init(struct amdgpu_device *adev, 664 struct amdgpu_ring *ring) 665 { 666 #if defined(CONFIG_DEBUG_FS) 667 struct drm_minor *minor = adev_to_drm(adev)->primary; 668 struct dentry *root = minor->debugfs_root; 669 char name[32]; 670 671 sprintf(name, "amdgpu_ring_%s", ring->name); 672 debugfs_create_file_size(name, S_IFREG | 0444, root, ring, 673 &amdgpu_debugfs_ring_fops, 674 ring->ring_size + 12); 675 676 if (ring->mqd_obj) { 677 sprintf(name, "amdgpu_mqd_%s", ring->name); 678 debugfs_create_file_size(name, S_IFREG | 0444, root, ring, 679 &amdgpu_debugfs_mqd_fops, 680 ring->mqd_size); 681 } 682 683 sprintf(name, "amdgpu_error_%s", ring->name); 684 debugfs_create_file(name, 0200, root, ring, 685 &amdgpu_debugfs_error_fops); 686 687 #endif 688 } 689 690 /** 691 * amdgpu_ring_test_helper - tests ring and set sched readiness status 692 * 693 * @ring: ring to try the recovery on 694 * 695 * Tests ring and set sched readiness status 696 * 697 * Returns 0 on success, error on failure. 698 */ 699 int amdgpu_ring_test_helper(struct amdgpu_ring *ring) 700 { 701 struct amdgpu_device *adev = ring->adev; 702 int r; 703 704 r = amdgpu_ring_test_ring(ring); 705 if (r) 706 DRM_DEV_ERROR(adev->dev, "ring %s test failed (%d)\n", 707 ring->name, r); 708 else 709 DRM_DEV_DEBUG(adev->dev, "ring test on %s succeeded\n", 710 ring->name); 711 712 ring->sched.ready = !r; 713 714 return r; 715 } 716 717 static void amdgpu_ring_to_mqd_prop(struct amdgpu_ring *ring, 718 struct amdgpu_mqd_prop *prop) 719 { 720 struct amdgpu_device *adev = ring->adev; 721 bool is_high_prio_compute = ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE && 722 amdgpu_gfx_is_high_priority_compute_queue(adev, ring); 723 bool is_high_prio_gfx = ring->funcs->type == AMDGPU_RING_TYPE_GFX && 724 amdgpu_gfx_is_high_priority_graphics_queue(adev, ring); 725 726 memset(prop, 0, sizeof(*prop)); 727 728 prop->mqd_gpu_addr = ring->mqd_gpu_addr; 729 prop->hqd_base_gpu_addr = ring->gpu_addr; 730 prop->rptr_gpu_addr = ring->rptr_gpu_addr; 731 prop->wptr_gpu_addr = ring->wptr_gpu_addr; 732 prop->queue_size = ring->ring_size; 733 prop->eop_gpu_addr = ring->eop_gpu_addr; 734 prop->use_doorbell = ring->use_doorbell; 735 prop->doorbell_index = ring->doorbell_index; 736 737 /* map_queues packet doesn't need activate the queue, 738 * so only kiq need set this field. 739 */ 740 prop->hqd_active = ring->funcs->type == AMDGPU_RING_TYPE_KIQ; 741 742 prop->allow_tunneling = is_high_prio_compute; 743 if (is_high_prio_compute || is_high_prio_gfx) { 744 prop->hqd_pipe_priority = AMDGPU_GFX_PIPE_PRIO_HIGH; 745 prop->hqd_queue_priority = AMDGPU_GFX_QUEUE_PRIORITY_MAXIMUM; 746 } 747 } 748 749 int amdgpu_ring_init_mqd(struct amdgpu_ring *ring) 750 { 751 struct amdgpu_device *adev = ring->adev; 752 struct amdgpu_mqd *mqd_mgr; 753 struct amdgpu_mqd_prop prop; 754 755 amdgpu_ring_to_mqd_prop(ring, &prop); 756 757 ring->wptr = 0; 758 759 if (ring->funcs->type == AMDGPU_RING_TYPE_KIQ) 760 mqd_mgr = &adev->mqds[AMDGPU_HW_IP_COMPUTE]; 761 else 762 mqd_mgr = &adev->mqds[ring->funcs->type]; 763 764 return mqd_mgr->init_mqd(adev, ring->mqd_ptr, &prop); 765 } 766 767 void amdgpu_ring_ib_begin(struct amdgpu_ring *ring) 768 { 769 if (ring->is_sw_ring) 770 amdgpu_sw_ring_ib_begin(ring); 771 } 772 773 void amdgpu_ring_ib_end(struct amdgpu_ring *ring) 774 { 775 if (ring->is_sw_ring) 776 amdgpu_sw_ring_ib_end(ring); 777 } 778 779 void amdgpu_ring_ib_on_emit_cntl(struct amdgpu_ring *ring) 780 { 781 if (ring->is_sw_ring) 782 amdgpu_sw_ring_ib_mark_offset(ring, AMDGPU_MUX_OFFSET_TYPE_CONTROL); 783 } 784 785 void amdgpu_ring_ib_on_emit_ce(struct amdgpu_ring *ring) 786 { 787 if (ring->is_sw_ring) 788 amdgpu_sw_ring_ib_mark_offset(ring, AMDGPU_MUX_OFFSET_TYPE_CE); 789 } 790 791 void amdgpu_ring_ib_on_emit_de(struct amdgpu_ring *ring) 792 { 793 if (ring->is_sw_ring) 794 amdgpu_sw_ring_ib_mark_offset(ring, AMDGPU_MUX_OFFSET_TYPE_DE); 795 } 796 797 bool amdgpu_ring_sched_ready(struct amdgpu_ring *ring) 798 { 799 if (!ring) 800 return false; 801 802 if (ring->no_scheduler || !drm_sched_wqueue_ready(&ring->sched)) 803 return false; 804 805 return true; 806 } 807