1 /* 2 * Copyright 2009 Jerome Glisse. 3 * All Rights Reserved. 4 * 5 * Permission is hereby granted, free of charge, to any person obtaining a 6 * copy of this software and associated documentation files (the 7 * "Software"), to deal in the Software without restriction, including 8 * without limitation the rights to use, copy, modify, merge, publish, 9 * distribute, sub license, and/or sell copies of the Software, and to 10 * permit persons to whom the Software is furnished to do so, subject to 11 * the following conditions: 12 * 13 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 14 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 15 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL 16 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, 17 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR 18 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE 19 * USE OR OTHER DEALINGS IN THE SOFTWARE. 20 * 21 * The above copyright notice and this permission notice (including the 22 * next paragraph) shall be included in all copies or substantial portions 23 * of the Software. 24 * 25 */ 26 /* 27 * Authors: 28 * Jerome Glisse <glisse@freedesktop.org> 29 * Dave Airlie 30 */ 31 #include <linux/seq_file.h> 32 #include <linux/atomic.h> 33 #include <linux/wait.h> 34 #include <linux/kref.h> 35 #include <linux/slab.h> 36 #include <linux/firmware.h> 37 #include <linux/pm_runtime.h> 38 39 #include <drm/drm_drv.h> 40 #include "amdgpu.h" 41 #include "amdgpu_trace.h" 42 #include "amdgpu_reset.h" 43 44 /* 45 * Cast helper 46 */ 47 static const struct dma_fence_ops amdgpu_fence_ops; 48 static inline struct amdgpu_fence *to_amdgpu_fence(struct dma_fence *f) 49 { 50 struct amdgpu_fence *__f = container_of(f, struct amdgpu_fence, base); 51 52 return __f; 53 } 54 55 /** 56 * amdgpu_fence_write - write a fence value 57 * 58 * @ring: ring the fence is associated with 59 * @seq: sequence number to write 60 * 61 * Writes a fence value to memory (all asics). 62 */ 63 static void amdgpu_fence_write(struct amdgpu_ring *ring, u32 seq) 64 { 65 struct amdgpu_fence_driver *drv = &ring->fence_drv; 66 67 if (drv->cpu_addr) 68 *drv->cpu_addr = cpu_to_le32(seq); 69 } 70 71 /** 72 * amdgpu_fence_read - read a fence value 73 * 74 * @ring: ring the fence is associated with 75 * 76 * Reads a fence value from memory (all asics). 77 * Returns the value of the fence read from memory. 78 */ 79 static u32 amdgpu_fence_read(struct amdgpu_ring *ring) 80 { 81 struct amdgpu_fence_driver *drv = &ring->fence_drv; 82 u32 seq = 0; 83 84 if (drv->cpu_addr) 85 seq = le32_to_cpu(*drv->cpu_addr); 86 else 87 seq = atomic_read(&drv->last_seq); 88 89 return seq; 90 } 91 92 /** 93 * amdgpu_fence_emit - emit a fence on the requested ring 94 * 95 * @ring: ring the fence is associated with 96 * @af: amdgpu fence input 97 * @flags: flags to pass into the subordinate .emit_fence() call 98 * 99 * Emits a fence command on the requested ring (all asics). 100 * Returns 0 on success, -ENOMEM on failure. 101 */ 102 int amdgpu_fence_emit(struct amdgpu_ring *ring, struct amdgpu_fence *af, 103 unsigned int flags) 104 { 105 struct amdgpu_device *adev = ring->adev; 106 struct dma_fence *fence; 107 struct dma_fence __rcu **ptr; 108 uint32_t seq; 109 int r; 110 111 fence = &af->base; 112 af->ring = ring; 113 114 seq = ++ring->fence_drv.sync_seq; 115 dma_fence_init(fence, &amdgpu_fence_ops, 116 &ring->fence_drv.lock, 117 adev->fence_context + ring->idx, seq); 118 119 amdgpu_ring_emit_fence(ring, ring->fence_drv.gpu_addr, 120 seq, flags | AMDGPU_FENCE_FLAG_INT); 121 amdgpu_fence_save_wptr(af); 122 pm_runtime_get_noresume(adev_to_drm(adev)->dev); 123 ptr = &ring->fence_drv.fences[seq & ring->fence_drv.num_fences_mask]; 124 if (unlikely(rcu_dereference_protected(*ptr, 1))) { 125 struct dma_fence *old; 126 127 rcu_read_lock(); 128 old = dma_fence_get_rcu_safe(ptr); 129 rcu_read_unlock(); 130 131 if (old) { 132 r = dma_fence_wait(old, false); 133 dma_fence_put(old); 134 if (r) 135 return r; 136 } 137 } 138 139 to_amdgpu_fence(fence)->start_timestamp = ktime_get(); 140 141 /* This function can't be called concurrently anyway, otherwise 142 * emitting the fence would mess up the hardware ring buffer. 143 */ 144 rcu_assign_pointer(*ptr, dma_fence_get(fence)); 145 146 return 0; 147 } 148 149 /** 150 * amdgpu_fence_emit_polling - emit a fence on the requeste ring 151 * 152 * @ring: ring the fence is associated with 153 * @s: resulting sequence number 154 * @timeout: the timeout for waiting in usecs 155 * 156 * Emits a fence command on the requested ring (all asics). 157 * Used For polling fence. 158 * Returns 0 on success, -ENOMEM on failure. 159 */ 160 int amdgpu_fence_emit_polling(struct amdgpu_ring *ring, uint32_t *s, 161 uint32_t timeout) 162 { 163 uint32_t seq; 164 signed long r; 165 166 if (!s) 167 return -EINVAL; 168 169 seq = ++ring->fence_drv.sync_seq; 170 r = amdgpu_fence_wait_polling(ring, 171 seq - ring->fence_drv.num_fences_mask, 172 timeout); 173 if (r < 1) 174 return -ETIMEDOUT; 175 176 amdgpu_ring_emit_fence(ring, ring->fence_drv.gpu_addr, 177 seq, 0); 178 179 *s = seq; 180 181 return 0; 182 } 183 184 /** 185 * amdgpu_fence_schedule_fallback - schedule fallback check 186 * 187 * @ring: pointer to struct amdgpu_ring 188 * 189 * Start a timer as fallback to our interrupts. 190 */ 191 static void amdgpu_fence_schedule_fallback(struct amdgpu_ring *ring) 192 { 193 mod_timer(&ring->fence_drv.fallback_timer, 194 jiffies + AMDGPU_FENCE_JIFFIES_TIMEOUT); 195 } 196 197 /** 198 * amdgpu_fence_process - check for fence activity 199 * 200 * @ring: pointer to struct amdgpu_ring 201 * 202 * Checks the current fence value and calculates the last 203 * signalled fence value. Wakes the fence queue if the 204 * sequence number has increased. 205 * 206 * Returns true if fence was processed 207 */ 208 bool amdgpu_fence_process(struct amdgpu_ring *ring) 209 { 210 struct amdgpu_fence_driver *drv = &ring->fence_drv; 211 struct amdgpu_device *adev = ring->adev; 212 uint32_t seq, last_seq; 213 214 do { 215 last_seq = atomic_read(&ring->fence_drv.last_seq); 216 seq = amdgpu_fence_read(ring); 217 218 } while (atomic_cmpxchg(&drv->last_seq, last_seq, seq) != last_seq); 219 220 if (timer_delete(&ring->fence_drv.fallback_timer) && 221 seq != ring->fence_drv.sync_seq) 222 amdgpu_fence_schedule_fallback(ring); 223 224 if (unlikely(seq == last_seq)) 225 return false; 226 227 last_seq &= drv->num_fences_mask; 228 seq &= drv->num_fences_mask; 229 230 do { 231 struct dma_fence *fence, **ptr; 232 struct amdgpu_fence *am_fence; 233 234 ++last_seq; 235 last_seq &= drv->num_fences_mask; 236 ptr = &drv->fences[last_seq]; 237 238 /* There is always exactly one thread signaling this fence slot */ 239 fence = rcu_dereference_protected(*ptr, 1); 240 RCU_INIT_POINTER(*ptr, NULL); 241 242 if (!fence) 243 continue; 244 245 /* Save the wptr in the fence driver so we know what the last processed 246 * wptr was. This is required for re-emitting the ring state for 247 * queues that are reset but are not guilty and thus have no guilty fence. 248 */ 249 am_fence = container_of(fence, struct amdgpu_fence, base); 250 drv->signalled_wptr = am_fence->wptr; 251 dma_fence_signal(fence); 252 dma_fence_put(fence); 253 pm_runtime_mark_last_busy(adev_to_drm(adev)->dev); 254 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); 255 } while (last_seq != seq); 256 257 return true; 258 } 259 260 /** 261 * amdgpu_fence_fallback - fallback for hardware interrupts 262 * 263 * @t: timer context used to obtain the pointer to ring structure 264 * 265 * Checks for fence activity. 266 */ 267 static void amdgpu_fence_fallback(struct timer_list *t) 268 { 269 struct amdgpu_ring *ring = timer_container_of(ring, t, 270 fence_drv.fallback_timer); 271 272 if (amdgpu_fence_process(ring)) 273 dev_warn(ring->adev->dev, 274 "Fence fallback timer expired on ring %s\n", 275 ring->name); 276 } 277 278 /** 279 * amdgpu_fence_wait_empty - wait for all fences to signal 280 * 281 * @ring: ring index the fence is associated with 282 * 283 * Wait for all fences on the requested ring to signal (all asics). 284 * Returns 0 if the fences have passed, error for all other cases. 285 */ 286 int amdgpu_fence_wait_empty(struct amdgpu_ring *ring) 287 { 288 uint64_t seq = READ_ONCE(ring->fence_drv.sync_seq); 289 struct dma_fence *fence, **ptr; 290 int r; 291 292 if (!seq) 293 return 0; 294 295 ptr = &ring->fence_drv.fences[seq & ring->fence_drv.num_fences_mask]; 296 rcu_read_lock(); 297 fence = rcu_dereference(*ptr); 298 if (!fence || !dma_fence_get_rcu(fence)) { 299 rcu_read_unlock(); 300 return 0; 301 } 302 rcu_read_unlock(); 303 304 r = dma_fence_wait(fence, false); 305 dma_fence_put(fence); 306 return r; 307 } 308 309 /** 310 * amdgpu_fence_wait_polling - busy wait for givn sequence number 311 * 312 * @ring: ring index the fence is associated with 313 * @wait_seq: sequence number to wait 314 * @timeout: the timeout for waiting in usecs 315 * 316 * Wait for all fences on the requested ring to signal (all asics). 317 * Returns left time if no timeout, 0 or minus if timeout. 318 */ 319 signed long amdgpu_fence_wait_polling(struct amdgpu_ring *ring, 320 uint32_t wait_seq, 321 signed long timeout) 322 { 323 324 while ((int32_t)(wait_seq - amdgpu_fence_read(ring)) > 0 && timeout > 0) { 325 udelay(2); 326 timeout -= 2; 327 } 328 return timeout > 0 ? timeout : 0; 329 } 330 /** 331 * amdgpu_fence_count_emitted - get the count of emitted fences 332 * 333 * @ring: ring the fence is associated with 334 * 335 * Get the number of fences emitted on the requested ring (all asics). 336 * Returns the number of emitted fences on the ring. Used by the 337 * dynpm code to ring track activity. 338 */ 339 unsigned int amdgpu_fence_count_emitted(struct amdgpu_ring *ring) 340 { 341 uint64_t emitted; 342 343 /* We are not protected by ring lock when reading the last sequence 344 * but it's ok to report slightly wrong fence count here. 345 */ 346 emitted = 0x100000000ull; 347 emitted -= atomic_read(&ring->fence_drv.last_seq); 348 emitted += READ_ONCE(ring->fence_drv.sync_seq); 349 return lower_32_bits(emitted); 350 } 351 352 /** 353 * amdgpu_fence_last_unsignaled_time_us - the time fence emitted until now 354 * @ring: ring the fence is associated with 355 * 356 * Find the earliest fence unsignaled until now, calculate the time delta 357 * between the time fence emitted and now. 358 */ 359 u64 amdgpu_fence_last_unsignaled_time_us(struct amdgpu_ring *ring) 360 { 361 struct amdgpu_fence_driver *drv = &ring->fence_drv; 362 struct dma_fence *fence; 363 uint32_t last_seq, sync_seq; 364 365 last_seq = atomic_read(&ring->fence_drv.last_seq); 366 sync_seq = READ_ONCE(ring->fence_drv.sync_seq); 367 if (last_seq == sync_seq) 368 return 0; 369 370 ++last_seq; 371 last_seq &= drv->num_fences_mask; 372 fence = drv->fences[last_seq]; 373 if (!fence) 374 return 0; 375 376 return ktime_us_delta(ktime_get(), 377 to_amdgpu_fence(fence)->start_timestamp); 378 } 379 380 /** 381 * amdgpu_fence_update_start_timestamp - update the timestamp of the fence 382 * @ring: ring the fence is associated with 383 * @seq: the fence seq number to update. 384 * @timestamp: the start timestamp to update. 385 * 386 * The function called at the time the fence and related ib is about to 387 * resubmit to gpu in MCBP scenario. Thus we do not consider race condition 388 * with amdgpu_fence_process to modify the same fence. 389 */ 390 void amdgpu_fence_update_start_timestamp(struct amdgpu_ring *ring, uint32_t seq, ktime_t timestamp) 391 { 392 struct amdgpu_fence_driver *drv = &ring->fence_drv; 393 struct dma_fence *fence; 394 395 seq &= drv->num_fences_mask; 396 fence = drv->fences[seq]; 397 if (!fence) 398 return; 399 400 to_amdgpu_fence(fence)->start_timestamp = timestamp; 401 } 402 403 /** 404 * amdgpu_fence_driver_start_ring - make the fence driver 405 * ready for use on the requested ring. 406 * 407 * @ring: ring to start the fence driver on 408 * @irq_src: interrupt source to use for this ring 409 * @irq_type: interrupt type to use for this ring 410 * 411 * Make the fence driver ready for processing (all asics). 412 * Not all asics have all rings, so each asic will only 413 * start the fence driver on the rings it has. 414 * Returns 0 for success, errors for failure. 415 */ 416 int amdgpu_fence_driver_start_ring(struct amdgpu_ring *ring, 417 struct amdgpu_irq_src *irq_src, 418 unsigned int irq_type) 419 { 420 struct amdgpu_device *adev = ring->adev; 421 uint64_t index; 422 423 if (ring->funcs->type != AMDGPU_RING_TYPE_UVD) { 424 ring->fence_drv.cpu_addr = ring->fence_cpu_addr; 425 ring->fence_drv.gpu_addr = ring->fence_gpu_addr; 426 } else { 427 /* put fence directly behind firmware */ 428 index = ALIGN(adev->uvd.fw->size, 8); 429 ring->fence_drv.cpu_addr = adev->uvd.inst[ring->me].cpu_addr + index; 430 ring->fence_drv.gpu_addr = adev->uvd.inst[ring->me].gpu_addr + index; 431 } 432 amdgpu_fence_write(ring, atomic_read(&ring->fence_drv.last_seq)); 433 434 ring->fence_drv.irq_src = irq_src; 435 ring->fence_drv.irq_type = irq_type; 436 ring->fence_drv.initialized = true; 437 438 DRM_DEV_DEBUG(adev->dev, "fence driver on ring %s use gpu addr 0x%016llx\n", 439 ring->name, ring->fence_drv.gpu_addr); 440 return 0; 441 } 442 443 /** 444 * amdgpu_fence_driver_init_ring - init the fence driver 445 * for the requested ring. 446 * 447 * @ring: ring to init the fence driver on 448 * 449 * Init the fence driver for the requested ring (all asics). 450 * Helper function for amdgpu_fence_driver_init(). 451 */ 452 int amdgpu_fence_driver_init_ring(struct amdgpu_ring *ring) 453 { 454 struct amdgpu_device *adev = ring->adev; 455 456 if (!adev) 457 return -EINVAL; 458 459 if (!is_power_of_2(ring->num_hw_submission)) 460 return -EINVAL; 461 462 ring->fence_drv.cpu_addr = NULL; 463 ring->fence_drv.gpu_addr = 0; 464 ring->fence_drv.sync_seq = 0; 465 atomic_set(&ring->fence_drv.last_seq, 0); 466 ring->fence_drv.initialized = false; 467 468 timer_setup(&ring->fence_drv.fallback_timer, amdgpu_fence_fallback, 0); 469 470 ring->fence_drv.num_fences_mask = ring->num_hw_submission * 2 - 1; 471 spin_lock_init(&ring->fence_drv.lock); 472 ring->fence_drv.fences = kcalloc(ring->num_hw_submission * 2, sizeof(void *), 473 GFP_KERNEL); 474 475 if (!ring->fence_drv.fences) 476 return -ENOMEM; 477 478 return 0; 479 } 480 481 /** 482 * amdgpu_fence_driver_sw_init - init the fence driver 483 * for all possible rings. 484 * 485 * @adev: amdgpu device pointer 486 * 487 * Init the fence driver for all possible rings (all asics). 488 * Not all asics have all rings, so each asic will only 489 * start the fence driver on the rings it has using 490 * amdgpu_fence_driver_start_ring(). 491 * Returns 0 for success. 492 */ 493 int amdgpu_fence_driver_sw_init(struct amdgpu_device *adev) 494 { 495 return 0; 496 } 497 498 /** 499 * amdgpu_fence_need_ring_interrupt_restore - helper function to check whether 500 * fence driver interrupts need to be restored. 501 * 502 * @ring: ring that to be checked 503 * 504 * Interrupts for rings that belong to GFX IP don't need to be restored 505 * when the target power state is s0ix. 506 * 507 * Return true if need to restore interrupts, false otherwise. 508 */ 509 static bool amdgpu_fence_need_ring_interrupt_restore(struct amdgpu_ring *ring) 510 { 511 struct amdgpu_device *adev = ring->adev; 512 bool is_gfx_power_domain = false; 513 514 switch (ring->funcs->type) { 515 case AMDGPU_RING_TYPE_SDMA: 516 /* SDMA 5.x+ is part of GFX power domain so it's covered by GFXOFF */ 517 if (amdgpu_ip_version(adev, SDMA0_HWIP, 0) >= 518 IP_VERSION(5, 0, 0)) 519 is_gfx_power_domain = true; 520 break; 521 case AMDGPU_RING_TYPE_GFX: 522 case AMDGPU_RING_TYPE_COMPUTE: 523 case AMDGPU_RING_TYPE_KIQ: 524 case AMDGPU_RING_TYPE_MES: 525 is_gfx_power_domain = true; 526 break; 527 default: 528 break; 529 } 530 531 return !(adev->in_s0ix && is_gfx_power_domain); 532 } 533 534 /** 535 * amdgpu_fence_driver_hw_fini - tear down the fence driver 536 * for all possible rings. 537 * 538 * @adev: amdgpu device pointer 539 * 540 * Tear down the fence driver for all possible rings (all asics). 541 */ 542 void amdgpu_fence_driver_hw_fini(struct amdgpu_device *adev) 543 { 544 int i, r; 545 546 for (i = 0; i < AMDGPU_MAX_RINGS; i++) { 547 struct amdgpu_ring *ring = adev->rings[i]; 548 549 if (!ring || !ring->fence_drv.initialized) 550 continue; 551 552 /* You can't wait for HW to signal if it's gone */ 553 if (!drm_dev_is_unplugged(adev_to_drm(adev))) 554 r = amdgpu_fence_wait_empty(ring); 555 else 556 r = -ENODEV; 557 /* no need to trigger GPU reset as we are unloading */ 558 if (r) 559 amdgpu_fence_driver_force_completion(ring); 560 561 if (!drm_dev_is_unplugged(adev_to_drm(adev)) && 562 ring->fence_drv.irq_src && 563 amdgpu_fence_need_ring_interrupt_restore(ring)) 564 amdgpu_irq_put(adev, ring->fence_drv.irq_src, 565 ring->fence_drv.irq_type); 566 567 timer_delete_sync(&ring->fence_drv.fallback_timer); 568 } 569 } 570 571 /* Will either stop and flush handlers for amdgpu interrupt or reanble it */ 572 void amdgpu_fence_driver_isr_toggle(struct amdgpu_device *adev, bool stop) 573 { 574 int i; 575 576 for (i = 0; i < AMDGPU_MAX_RINGS; i++) { 577 struct amdgpu_ring *ring = adev->rings[i]; 578 579 if (!ring || !ring->fence_drv.initialized || !ring->fence_drv.irq_src) 580 continue; 581 582 if (stop) 583 disable_irq(adev->irq.irq); 584 else 585 enable_irq(adev->irq.irq); 586 } 587 } 588 589 void amdgpu_fence_driver_sw_fini(struct amdgpu_device *adev) 590 { 591 unsigned int i, j; 592 593 for (i = 0; i < AMDGPU_MAX_RINGS; i++) { 594 struct amdgpu_ring *ring = adev->rings[i]; 595 596 if (!ring || !ring->fence_drv.initialized) 597 continue; 598 599 /* 600 * Notice we check for sched.ops since there's some 601 * override on the meaning of sched.ready by amdgpu. 602 * The natural check would be sched.ready, which is 603 * set as drm_sched_init() finishes... 604 */ 605 if (ring->sched.ops) 606 drm_sched_fini(&ring->sched); 607 608 for (j = 0; j <= ring->fence_drv.num_fences_mask; ++j) 609 dma_fence_put(ring->fence_drv.fences[j]); 610 kfree(ring->fence_drv.fences); 611 ring->fence_drv.fences = NULL; 612 ring->fence_drv.initialized = false; 613 } 614 } 615 616 /** 617 * amdgpu_fence_driver_hw_init - enable the fence driver 618 * for all possible rings. 619 * 620 * @adev: amdgpu device pointer 621 * 622 * Enable the fence driver for all possible rings (all asics). 623 * Not all asics have all rings, so each asic will only 624 * start the fence driver on the rings it has using 625 * amdgpu_fence_driver_start_ring(). 626 * Returns 0 for success. 627 */ 628 void amdgpu_fence_driver_hw_init(struct amdgpu_device *adev) 629 { 630 int i; 631 632 for (i = 0; i < AMDGPU_MAX_RINGS; i++) { 633 struct amdgpu_ring *ring = adev->rings[i]; 634 635 if (!ring || !ring->fence_drv.initialized) 636 continue; 637 638 /* enable the interrupt */ 639 if (ring->fence_drv.irq_src && 640 amdgpu_fence_need_ring_interrupt_restore(ring)) 641 amdgpu_irq_get(adev, ring->fence_drv.irq_src, 642 ring->fence_drv.irq_type); 643 } 644 } 645 646 /** 647 * amdgpu_fence_driver_set_error - set error code on fences 648 * @ring: the ring which contains the fences 649 * @error: the error code to set 650 * 651 * Set an error code to all the fences pending on the ring. 652 */ 653 void amdgpu_fence_driver_set_error(struct amdgpu_ring *ring, int error) 654 { 655 struct amdgpu_fence_driver *drv = &ring->fence_drv; 656 unsigned long flags; 657 658 spin_lock_irqsave(&drv->lock, flags); 659 for (unsigned int i = 0; i <= drv->num_fences_mask; ++i) { 660 struct dma_fence *fence; 661 662 fence = rcu_dereference_protected(drv->fences[i], 663 lockdep_is_held(&drv->lock)); 664 if (fence && !dma_fence_is_signaled_locked(fence)) 665 dma_fence_set_error(fence, error); 666 } 667 spin_unlock_irqrestore(&drv->lock, flags); 668 } 669 670 /** 671 * amdgpu_fence_driver_force_completion - force signal latest fence of ring 672 * 673 * @ring: fence of the ring to signal 674 * 675 */ 676 void amdgpu_fence_driver_force_completion(struct amdgpu_ring *ring) 677 { 678 amdgpu_fence_driver_set_error(ring, -ECANCELED); 679 amdgpu_fence_write(ring, ring->fence_drv.sync_seq); 680 amdgpu_fence_process(ring); 681 } 682 683 684 /* 685 * Kernel queue reset handling 686 * 687 * The driver can reset individual queues for most engines, but those queues 688 * may contain work from multiple contexts. Resetting the queue will reset 689 * lose all of that state. In order to minimize the collateral damage, the 690 * driver will save the ring contents which are not associated with the guilty 691 * context prior to resetting the queue. After resetting the queue the queue 692 * contents from the other contexts is re-emitted to the rings so that it can 693 * be processed by the engine. To handle this, we save the queue's write 694 * pointer (wptr) in the fences associated with each context. If we get a 695 * queue timeout, we can then use the wptrs from the fences to determine 696 * which data needs to be saved out of the queue's ring buffer. 697 */ 698 699 /** 700 * amdgpu_fence_driver_guilty_force_completion - force signal of specified sequence 701 * 702 * @af: fence of the ring to signal 703 * 704 */ 705 void amdgpu_fence_driver_guilty_force_completion(struct amdgpu_fence *af) 706 { 707 struct dma_fence *unprocessed; 708 struct dma_fence __rcu **ptr; 709 struct amdgpu_fence *fence; 710 struct amdgpu_ring *ring = af->ring; 711 unsigned long flags; 712 u32 seq, last_seq; 713 714 last_seq = amdgpu_fence_read(ring) & ring->fence_drv.num_fences_mask; 715 seq = ring->fence_drv.sync_seq & ring->fence_drv.num_fences_mask; 716 717 /* mark all fences from the guilty context with an error */ 718 spin_lock_irqsave(&ring->fence_drv.lock, flags); 719 do { 720 last_seq++; 721 last_seq &= ring->fence_drv.num_fences_mask; 722 723 ptr = &ring->fence_drv.fences[last_seq]; 724 rcu_read_lock(); 725 unprocessed = rcu_dereference(*ptr); 726 727 if (unprocessed && !dma_fence_is_signaled_locked(unprocessed)) { 728 fence = container_of(unprocessed, struct amdgpu_fence, base); 729 730 if (fence == af) 731 dma_fence_set_error(&fence->base, -ETIME); 732 else if (fence->context == af->context) 733 dma_fence_set_error(&fence->base, -ECANCELED); 734 } 735 rcu_read_unlock(); 736 } while (last_seq != seq); 737 spin_unlock_irqrestore(&ring->fence_drv.lock, flags); 738 /* signal the guilty fence */ 739 amdgpu_fence_write(ring, (u32)af->base.seqno); 740 amdgpu_fence_process(ring); 741 } 742 743 void amdgpu_fence_save_wptr(struct amdgpu_fence *af) 744 { 745 af->wptr = af->ring->wptr; 746 } 747 748 static void amdgpu_ring_backup_unprocessed_command(struct amdgpu_ring *ring, 749 u64 start_wptr, u32 end_wptr) 750 { 751 unsigned int first_idx = start_wptr & ring->buf_mask; 752 unsigned int last_idx = end_wptr & ring->buf_mask; 753 unsigned int i; 754 755 /* Backup the contents of the ring buffer. */ 756 for (i = first_idx; i != last_idx; ++i, i &= ring->buf_mask) 757 ring->ring_backup[ring->ring_backup_entries_to_copy++] = ring->ring[i]; 758 } 759 760 void amdgpu_ring_backup_unprocessed_commands(struct amdgpu_ring *ring, 761 struct amdgpu_fence *guilty_fence) 762 { 763 struct dma_fence *unprocessed; 764 struct dma_fence __rcu **ptr; 765 struct amdgpu_fence *fence; 766 u64 wptr; 767 u32 seq, last_seq; 768 769 last_seq = amdgpu_fence_read(ring) & ring->fence_drv.num_fences_mask; 770 seq = ring->fence_drv.sync_seq & ring->fence_drv.num_fences_mask; 771 wptr = ring->fence_drv.signalled_wptr; 772 ring->ring_backup_entries_to_copy = 0; 773 774 do { 775 last_seq++; 776 last_seq &= ring->fence_drv.num_fences_mask; 777 778 ptr = &ring->fence_drv.fences[last_seq]; 779 rcu_read_lock(); 780 unprocessed = rcu_dereference(*ptr); 781 782 if (unprocessed && !dma_fence_is_signaled(unprocessed)) { 783 fence = container_of(unprocessed, struct amdgpu_fence, base); 784 785 /* save everything if the ring is not guilty, otherwise 786 * just save the content from other contexts. 787 */ 788 if (!guilty_fence || (fence->context != guilty_fence->context)) 789 amdgpu_ring_backup_unprocessed_command(ring, wptr, 790 fence->wptr); 791 wptr = fence->wptr; 792 } 793 rcu_read_unlock(); 794 } while (last_seq != seq); 795 } 796 797 /* 798 * Common fence implementation 799 */ 800 801 static const char *amdgpu_fence_get_driver_name(struct dma_fence *fence) 802 { 803 return "amdgpu"; 804 } 805 806 static const char *amdgpu_fence_get_timeline_name(struct dma_fence *f) 807 { 808 return (const char *)to_amdgpu_fence(f)->ring->name; 809 } 810 811 /** 812 * amdgpu_fence_enable_signaling - enable signalling on fence 813 * @f: fence 814 * 815 * This function is called with fence_queue lock held, and adds a callback 816 * to fence_queue that checks if this fence is signaled, and if so it 817 * signals the fence and removes itself. 818 */ 819 static bool amdgpu_fence_enable_signaling(struct dma_fence *f) 820 { 821 if (!timer_pending(&to_amdgpu_fence(f)->ring->fence_drv.fallback_timer)) 822 amdgpu_fence_schedule_fallback(to_amdgpu_fence(f)->ring); 823 824 return true; 825 } 826 827 /** 828 * amdgpu_fence_free - free up the fence memory 829 * 830 * @rcu: RCU callback head 831 * 832 * Free up the fence memory after the RCU grace period. 833 */ 834 static void amdgpu_fence_free(struct rcu_head *rcu) 835 { 836 struct dma_fence *f = container_of(rcu, struct dma_fence, rcu); 837 838 /* free fence_slab if it's separated fence*/ 839 kfree(to_amdgpu_fence(f)); 840 } 841 842 /** 843 * amdgpu_fence_release - callback that fence can be freed 844 * 845 * @f: fence 846 * 847 * This function is called when the reference count becomes zero. 848 * It just RCU schedules freeing up the fence. 849 */ 850 static void amdgpu_fence_release(struct dma_fence *f) 851 { 852 call_rcu(&f->rcu, amdgpu_fence_free); 853 } 854 855 static const struct dma_fence_ops amdgpu_fence_ops = { 856 .get_driver_name = amdgpu_fence_get_driver_name, 857 .get_timeline_name = amdgpu_fence_get_timeline_name, 858 .enable_signaling = amdgpu_fence_enable_signaling, 859 .release = amdgpu_fence_release, 860 }; 861 862 /* 863 * Fence debugfs 864 */ 865 #if defined(CONFIG_DEBUG_FS) 866 static int amdgpu_debugfs_fence_info_show(struct seq_file *m, void *unused) 867 { 868 struct amdgpu_device *adev = m->private; 869 int i; 870 871 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) { 872 struct amdgpu_ring *ring = adev->rings[i]; 873 874 if (!ring || !ring->fence_drv.initialized) 875 continue; 876 877 amdgpu_fence_process(ring); 878 879 seq_printf(m, "--- ring %d (%s) ---\n", i, ring->name); 880 seq_printf(m, "Last signaled fence 0x%08x\n", 881 atomic_read(&ring->fence_drv.last_seq)); 882 seq_printf(m, "Last emitted 0x%08x\n", 883 ring->fence_drv.sync_seq); 884 885 if (ring->funcs->type == AMDGPU_RING_TYPE_GFX || 886 ring->funcs->type == AMDGPU_RING_TYPE_SDMA) { 887 seq_printf(m, "Last signaled trailing fence 0x%08x\n", 888 le32_to_cpu(*ring->trail_fence_cpu_addr)); 889 seq_printf(m, "Last emitted 0x%08x\n", 890 ring->trail_seq); 891 } 892 893 if (ring->funcs->type != AMDGPU_RING_TYPE_GFX) 894 continue; 895 896 /* set in CP_VMID_PREEMPT and preemption occurred */ 897 seq_printf(m, "Last preempted 0x%08x\n", 898 le32_to_cpu(*(ring->fence_drv.cpu_addr + 2))); 899 /* set in CP_VMID_RESET and reset occurred */ 900 seq_printf(m, "Last reset 0x%08x\n", 901 le32_to_cpu(*(ring->fence_drv.cpu_addr + 4))); 902 /* Both preemption and reset occurred */ 903 seq_printf(m, "Last both 0x%08x\n", 904 le32_to_cpu(*(ring->fence_drv.cpu_addr + 6))); 905 } 906 return 0; 907 } 908 909 /* 910 * amdgpu_debugfs_gpu_recover - manually trigger a gpu reset & recover 911 * 912 * Manually trigger a gpu reset at the next fence wait. 913 */ 914 static int gpu_recover_get(void *data, u64 *val) 915 { 916 struct amdgpu_device *adev = (struct amdgpu_device *)data; 917 struct drm_device *dev = adev_to_drm(adev); 918 int r; 919 920 r = pm_runtime_get_sync(dev->dev); 921 if (r < 0) { 922 pm_runtime_put_autosuspend(dev->dev); 923 return 0; 924 } 925 926 if (amdgpu_reset_domain_schedule(adev->reset_domain, &adev->reset_work)) 927 flush_work(&adev->reset_work); 928 929 *val = atomic_read(&adev->reset_domain->reset_res); 930 931 pm_runtime_mark_last_busy(dev->dev); 932 pm_runtime_put_autosuspend(dev->dev); 933 934 return 0; 935 } 936 937 DEFINE_SHOW_ATTRIBUTE(amdgpu_debugfs_fence_info); 938 DEFINE_DEBUGFS_ATTRIBUTE(amdgpu_debugfs_gpu_recover_fops, gpu_recover_get, NULL, 939 "%lld\n"); 940 941 static void amdgpu_debugfs_reset_work(struct work_struct *work) 942 { 943 struct amdgpu_device *adev = container_of(work, struct amdgpu_device, 944 reset_work); 945 946 struct amdgpu_reset_context reset_context; 947 948 memset(&reset_context, 0, sizeof(reset_context)); 949 950 reset_context.method = AMD_RESET_METHOD_NONE; 951 reset_context.reset_req_dev = adev; 952 reset_context.src = AMDGPU_RESET_SRC_USER; 953 set_bit(AMDGPU_NEED_FULL_RESET, &reset_context.flags); 954 set_bit(AMDGPU_SKIP_COREDUMP, &reset_context.flags); 955 956 amdgpu_device_gpu_recover(adev, NULL, &reset_context); 957 } 958 959 #endif 960 961 void amdgpu_debugfs_fence_init(struct amdgpu_device *adev) 962 { 963 #if defined(CONFIG_DEBUG_FS) 964 struct drm_minor *minor = adev_to_drm(adev)->primary; 965 struct dentry *root = minor->debugfs_root; 966 967 debugfs_create_file("amdgpu_fence_info", 0444, root, adev, 968 &amdgpu_debugfs_fence_info_fops); 969 970 if (!amdgpu_sriov_vf(adev)) { 971 972 INIT_WORK(&adev->reset_work, amdgpu_debugfs_reset_work); 973 debugfs_create_file("amdgpu_gpu_recover", 0444, root, adev, 974 &amdgpu_debugfs_gpu_recover_fops); 975 } 976 #endif 977 } 978 979