1 /* 2 * Copyright 2009 Jerome Glisse. 3 * All Rights Reserved. 4 * 5 * Permission is hereby granted, free of charge, to any person obtaining a 6 * copy of this software and associated documentation files (the 7 * "Software"), to deal in the Software without restriction, including 8 * without limitation the rights to use, copy, modify, merge, publish, 9 * distribute, sub license, and/or sell copies of the Software, and to 10 * permit persons to whom the Software is furnished to do so, subject to 11 * the following conditions: 12 * 13 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 14 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 15 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL 16 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, 17 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR 18 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE 19 * USE OR OTHER DEALINGS IN THE SOFTWARE. 20 * 21 * The above copyright notice and this permission notice (including the 22 * next paragraph) shall be included in all copies or substantial portions 23 * of the Software. 24 * 25 */ 26 /* 27 * Authors: 28 * Jerome Glisse <glisse@freedesktop.org> 29 * Dave Airlie 30 */ 31 #include <linux/seq_file.h> 32 #include <linux/atomic.h> 33 #include <linux/wait.h> 34 #include <linux/kref.h> 35 #include <linux/slab.h> 36 #include <linux/firmware.h> 37 #include <linux/pm_runtime.h> 38 39 #include <drm/drm_drv.h> 40 #include "amdgpu.h" 41 #include "amdgpu_trace.h" 42 #include "amdgpu_reset.h" 43 44 /* 45 * Cast helper 46 */ 47 static const struct dma_fence_ops amdgpu_fence_ops; 48 static const struct dma_fence_ops amdgpu_job_fence_ops; 49 static inline struct amdgpu_fence *to_amdgpu_fence(struct dma_fence *f) 50 { 51 struct amdgpu_fence *__f = container_of(f, struct amdgpu_fence, base); 52 53 if (__f->base.ops == &amdgpu_fence_ops || 54 __f->base.ops == &amdgpu_job_fence_ops) 55 return __f; 56 57 return NULL; 58 } 59 60 /** 61 * amdgpu_fence_write - write a fence value 62 * 63 * @ring: ring the fence is associated with 64 * @seq: sequence number to write 65 * 66 * Writes a fence value to memory (all asics). 67 */ 68 static void amdgpu_fence_write(struct amdgpu_ring *ring, u32 seq) 69 { 70 struct amdgpu_fence_driver *drv = &ring->fence_drv; 71 72 if (drv->cpu_addr) 73 *drv->cpu_addr = cpu_to_le32(seq); 74 } 75 76 /** 77 * amdgpu_fence_read - read a fence value 78 * 79 * @ring: ring the fence is associated with 80 * 81 * Reads a fence value from memory (all asics). 82 * Returns the value of the fence read from memory. 83 */ 84 static u32 amdgpu_fence_read(struct amdgpu_ring *ring) 85 { 86 struct amdgpu_fence_driver *drv = &ring->fence_drv; 87 u32 seq = 0; 88 89 if (drv->cpu_addr) 90 seq = le32_to_cpu(*drv->cpu_addr); 91 else 92 seq = atomic_read(&drv->last_seq); 93 94 return seq; 95 } 96 97 /** 98 * amdgpu_fence_emit - emit a fence on the requested ring 99 * 100 * @ring: ring the fence is associated with 101 * @f: resulting fence object 102 * @af: amdgpu fence input 103 * @flags: flags to pass into the subordinate .emit_fence() call 104 * 105 * Emits a fence command on the requested ring (all asics). 106 * Returns 0 on success, -ENOMEM on failure. 107 */ 108 int amdgpu_fence_emit(struct amdgpu_ring *ring, struct dma_fence **f, 109 struct amdgpu_fence *af, unsigned int flags) 110 { 111 struct amdgpu_device *adev = ring->adev; 112 struct dma_fence *fence; 113 struct amdgpu_fence *am_fence; 114 struct dma_fence __rcu **ptr; 115 uint32_t seq; 116 int r; 117 118 if (!af) { 119 /* create a separate hw fence */ 120 am_fence = kzalloc(sizeof(*am_fence), GFP_KERNEL); 121 if (!am_fence) 122 return -ENOMEM; 123 } else { 124 am_fence = af; 125 } 126 fence = &am_fence->base; 127 am_fence->ring = ring; 128 129 seq = ++ring->fence_drv.sync_seq; 130 if (af) { 131 dma_fence_init(fence, &amdgpu_job_fence_ops, 132 &ring->fence_drv.lock, 133 adev->fence_context + ring->idx, seq); 134 /* Against remove in amdgpu_job_{free, free_cb} */ 135 dma_fence_get(fence); 136 } else { 137 dma_fence_init(fence, &amdgpu_fence_ops, 138 &ring->fence_drv.lock, 139 adev->fence_context + ring->idx, seq); 140 } 141 142 amdgpu_ring_emit_fence(ring, ring->fence_drv.gpu_addr, 143 seq, flags | AMDGPU_FENCE_FLAG_INT); 144 pm_runtime_get_noresume(adev_to_drm(adev)->dev); 145 ptr = &ring->fence_drv.fences[seq & ring->fence_drv.num_fences_mask]; 146 if (unlikely(rcu_dereference_protected(*ptr, 1))) { 147 struct dma_fence *old; 148 149 rcu_read_lock(); 150 old = dma_fence_get_rcu_safe(ptr); 151 rcu_read_unlock(); 152 153 if (old) { 154 r = dma_fence_wait(old, false); 155 dma_fence_put(old); 156 if (r) 157 return r; 158 } 159 } 160 161 to_amdgpu_fence(fence)->start_timestamp = ktime_get(); 162 163 /* This function can't be called concurrently anyway, otherwise 164 * emitting the fence would mess up the hardware ring buffer. 165 */ 166 rcu_assign_pointer(*ptr, dma_fence_get(fence)); 167 168 *f = fence; 169 170 return 0; 171 } 172 173 /** 174 * amdgpu_fence_emit_polling - emit a fence on the requeste ring 175 * 176 * @ring: ring the fence is associated with 177 * @s: resulting sequence number 178 * @timeout: the timeout for waiting in usecs 179 * 180 * Emits a fence command on the requested ring (all asics). 181 * Used For polling fence. 182 * Returns 0 on success, -ENOMEM on failure. 183 */ 184 int amdgpu_fence_emit_polling(struct amdgpu_ring *ring, uint32_t *s, 185 uint32_t timeout) 186 { 187 uint32_t seq; 188 signed long r; 189 190 if (!s) 191 return -EINVAL; 192 193 seq = ++ring->fence_drv.sync_seq; 194 r = amdgpu_fence_wait_polling(ring, 195 seq - ring->fence_drv.num_fences_mask, 196 timeout); 197 if (r < 1) 198 return -ETIMEDOUT; 199 200 amdgpu_ring_emit_fence(ring, ring->fence_drv.gpu_addr, 201 seq, 0); 202 203 *s = seq; 204 205 return 0; 206 } 207 208 /** 209 * amdgpu_fence_schedule_fallback - schedule fallback check 210 * 211 * @ring: pointer to struct amdgpu_ring 212 * 213 * Start a timer as fallback to our interrupts. 214 */ 215 static void amdgpu_fence_schedule_fallback(struct amdgpu_ring *ring) 216 { 217 mod_timer(&ring->fence_drv.fallback_timer, 218 jiffies + AMDGPU_FENCE_JIFFIES_TIMEOUT); 219 } 220 221 /** 222 * amdgpu_fence_process - check for fence activity 223 * 224 * @ring: pointer to struct amdgpu_ring 225 * 226 * Checks the current fence value and calculates the last 227 * signalled fence value. Wakes the fence queue if the 228 * sequence number has increased. 229 * 230 * Returns true if fence was processed 231 */ 232 bool amdgpu_fence_process(struct amdgpu_ring *ring) 233 { 234 struct amdgpu_fence_driver *drv = &ring->fence_drv; 235 struct amdgpu_device *adev = ring->adev; 236 uint32_t seq, last_seq; 237 238 do { 239 last_seq = atomic_read(&ring->fence_drv.last_seq); 240 seq = amdgpu_fence_read(ring); 241 242 } while (atomic_cmpxchg(&drv->last_seq, last_seq, seq) != last_seq); 243 244 if (timer_delete(&ring->fence_drv.fallback_timer) && 245 seq != ring->fence_drv.sync_seq) 246 amdgpu_fence_schedule_fallback(ring); 247 248 if (unlikely(seq == last_seq)) 249 return false; 250 251 last_seq &= drv->num_fences_mask; 252 seq &= drv->num_fences_mask; 253 254 do { 255 struct dma_fence *fence, **ptr; 256 257 ++last_seq; 258 last_seq &= drv->num_fences_mask; 259 ptr = &drv->fences[last_seq]; 260 261 /* There is always exactly one thread signaling this fence slot */ 262 fence = rcu_dereference_protected(*ptr, 1); 263 RCU_INIT_POINTER(*ptr, NULL); 264 265 if (!fence) 266 continue; 267 268 dma_fence_signal(fence); 269 dma_fence_put(fence); 270 pm_runtime_mark_last_busy(adev_to_drm(adev)->dev); 271 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); 272 } while (last_seq != seq); 273 274 return true; 275 } 276 277 /** 278 * amdgpu_fence_fallback - fallback for hardware interrupts 279 * 280 * @t: timer context used to obtain the pointer to ring structure 281 * 282 * Checks for fence activity. 283 */ 284 static void amdgpu_fence_fallback(struct timer_list *t) 285 { 286 struct amdgpu_ring *ring = timer_container_of(ring, t, 287 fence_drv.fallback_timer); 288 289 if (amdgpu_fence_process(ring)) 290 dev_warn(ring->adev->dev, 291 "Fence fallback timer expired on ring %s\n", 292 ring->name); 293 } 294 295 /** 296 * amdgpu_fence_wait_empty - wait for all fences to signal 297 * 298 * @ring: ring index the fence is associated with 299 * 300 * Wait for all fences on the requested ring to signal (all asics). 301 * Returns 0 if the fences have passed, error for all other cases. 302 */ 303 int amdgpu_fence_wait_empty(struct amdgpu_ring *ring) 304 { 305 uint64_t seq = READ_ONCE(ring->fence_drv.sync_seq); 306 struct dma_fence *fence, **ptr; 307 int r; 308 309 if (!seq) 310 return 0; 311 312 ptr = &ring->fence_drv.fences[seq & ring->fence_drv.num_fences_mask]; 313 rcu_read_lock(); 314 fence = rcu_dereference(*ptr); 315 if (!fence || !dma_fence_get_rcu(fence)) { 316 rcu_read_unlock(); 317 return 0; 318 } 319 rcu_read_unlock(); 320 321 r = dma_fence_wait(fence, false); 322 dma_fence_put(fence); 323 return r; 324 } 325 326 /** 327 * amdgpu_fence_wait_polling - busy wait for givn sequence number 328 * 329 * @ring: ring index the fence is associated with 330 * @wait_seq: sequence number to wait 331 * @timeout: the timeout for waiting in usecs 332 * 333 * Wait for all fences on the requested ring to signal (all asics). 334 * Returns left time if no timeout, 0 or minus if timeout. 335 */ 336 signed long amdgpu_fence_wait_polling(struct amdgpu_ring *ring, 337 uint32_t wait_seq, 338 signed long timeout) 339 { 340 341 while ((int32_t)(wait_seq - amdgpu_fence_read(ring)) > 0 && timeout > 0) { 342 udelay(2); 343 timeout -= 2; 344 } 345 return timeout > 0 ? timeout : 0; 346 } 347 /** 348 * amdgpu_fence_count_emitted - get the count of emitted fences 349 * 350 * @ring: ring the fence is associated with 351 * 352 * Get the number of fences emitted on the requested ring (all asics). 353 * Returns the number of emitted fences on the ring. Used by the 354 * dynpm code to ring track activity. 355 */ 356 unsigned int amdgpu_fence_count_emitted(struct amdgpu_ring *ring) 357 { 358 uint64_t emitted; 359 360 /* We are not protected by ring lock when reading the last sequence 361 * but it's ok to report slightly wrong fence count here. 362 */ 363 emitted = 0x100000000ull; 364 emitted -= atomic_read(&ring->fence_drv.last_seq); 365 emitted += READ_ONCE(ring->fence_drv.sync_seq); 366 return lower_32_bits(emitted); 367 } 368 369 /** 370 * amdgpu_fence_last_unsignaled_time_us - the time fence emitted until now 371 * @ring: ring the fence is associated with 372 * 373 * Find the earliest fence unsignaled until now, calculate the time delta 374 * between the time fence emitted and now. 375 */ 376 u64 amdgpu_fence_last_unsignaled_time_us(struct amdgpu_ring *ring) 377 { 378 struct amdgpu_fence_driver *drv = &ring->fence_drv; 379 struct dma_fence *fence; 380 uint32_t last_seq, sync_seq; 381 382 last_seq = atomic_read(&ring->fence_drv.last_seq); 383 sync_seq = READ_ONCE(ring->fence_drv.sync_seq); 384 if (last_seq == sync_seq) 385 return 0; 386 387 ++last_seq; 388 last_seq &= drv->num_fences_mask; 389 fence = drv->fences[last_seq]; 390 if (!fence) 391 return 0; 392 393 return ktime_us_delta(ktime_get(), 394 to_amdgpu_fence(fence)->start_timestamp); 395 } 396 397 /** 398 * amdgpu_fence_update_start_timestamp - update the timestamp of the fence 399 * @ring: ring the fence is associated with 400 * @seq: the fence seq number to update. 401 * @timestamp: the start timestamp to update. 402 * 403 * The function called at the time the fence and related ib is about to 404 * resubmit to gpu in MCBP scenario. Thus we do not consider race condition 405 * with amdgpu_fence_process to modify the same fence. 406 */ 407 void amdgpu_fence_update_start_timestamp(struct amdgpu_ring *ring, uint32_t seq, ktime_t timestamp) 408 { 409 struct amdgpu_fence_driver *drv = &ring->fence_drv; 410 struct dma_fence *fence; 411 412 seq &= drv->num_fences_mask; 413 fence = drv->fences[seq]; 414 if (!fence) 415 return; 416 417 to_amdgpu_fence(fence)->start_timestamp = timestamp; 418 } 419 420 /** 421 * amdgpu_fence_driver_start_ring - make the fence driver 422 * ready for use on the requested ring. 423 * 424 * @ring: ring to start the fence driver on 425 * @irq_src: interrupt source to use for this ring 426 * @irq_type: interrupt type to use for this ring 427 * 428 * Make the fence driver ready for processing (all asics). 429 * Not all asics have all rings, so each asic will only 430 * start the fence driver on the rings it has. 431 * Returns 0 for success, errors for failure. 432 */ 433 int amdgpu_fence_driver_start_ring(struct amdgpu_ring *ring, 434 struct amdgpu_irq_src *irq_src, 435 unsigned int irq_type) 436 { 437 struct amdgpu_device *adev = ring->adev; 438 uint64_t index; 439 440 if (ring->funcs->type != AMDGPU_RING_TYPE_UVD) { 441 ring->fence_drv.cpu_addr = ring->fence_cpu_addr; 442 ring->fence_drv.gpu_addr = ring->fence_gpu_addr; 443 } else { 444 /* put fence directly behind firmware */ 445 index = ALIGN(adev->uvd.fw->size, 8); 446 ring->fence_drv.cpu_addr = adev->uvd.inst[ring->me].cpu_addr + index; 447 ring->fence_drv.gpu_addr = adev->uvd.inst[ring->me].gpu_addr + index; 448 } 449 amdgpu_fence_write(ring, atomic_read(&ring->fence_drv.last_seq)); 450 451 ring->fence_drv.irq_src = irq_src; 452 ring->fence_drv.irq_type = irq_type; 453 ring->fence_drv.initialized = true; 454 455 DRM_DEV_DEBUG(adev->dev, "fence driver on ring %s use gpu addr 0x%016llx\n", 456 ring->name, ring->fence_drv.gpu_addr); 457 return 0; 458 } 459 460 /** 461 * amdgpu_fence_driver_init_ring - init the fence driver 462 * for the requested ring. 463 * 464 * @ring: ring to init the fence driver on 465 * 466 * Init the fence driver for the requested ring (all asics). 467 * Helper function for amdgpu_fence_driver_init(). 468 */ 469 int amdgpu_fence_driver_init_ring(struct amdgpu_ring *ring) 470 { 471 struct amdgpu_device *adev = ring->adev; 472 473 if (!adev) 474 return -EINVAL; 475 476 if (!is_power_of_2(ring->num_hw_submission)) 477 return -EINVAL; 478 479 ring->fence_drv.cpu_addr = NULL; 480 ring->fence_drv.gpu_addr = 0; 481 ring->fence_drv.sync_seq = 0; 482 atomic_set(&ring->fence_drv.last_seq, 0); 483 ring->fence_drv.initialized = false; 484 485 timer_setup(&ring->fence_drv.fallback_timer, amdgpu_fence_fallback, 0); 486 487 ring->fence_drv.num_fences_mask = ring->num_hw_submission * 2 - 1; 488 spin_lock_init(&ring->fence_drv.lock); 489 ring->fence_drv.fences = kcalloc(ring->num_hw_submission * 2, sizeof(void *), 490 GFP_KERNEL); 491 492 if (!ring->fence_drv.fences) 493 return -ENOMEM; 494 495 return 0; 496 } 497 498 /** 499 * amdgpu_fence_driver_sw_init - init the fence driver 500 * for all possible rings. 501 * 502 * @adev: amdgpu device pointer 503 * 504 * Init the fence driver for all possible rings (all asics). 505 * Not all asics have all rings, so each asic will only 506 * start the fence driver on the rings it has using 507 * amdgpu_fence_driver_start_ring(). 508 * Returns 0 for success. 509 */ 510 int amdgpu_fence_driver_sw_init(struct amdgpu_device *adev) 511 { 512 return 0; 513 } 514 515 /** 516 * amdgpu_fence_need_ring_interrupt_restore - helper function to check whether 517 * fence driver interrupts need to be restored. 518 * 519 * @ring: ring that to be checked 520 * 521 * Interrupts for rings that belong to GFX IP don't need to be restored 522 * when the target power state is s0ix. 523 * 524 * Return true if need to restore interrupts, false otherwise. 525 */ 526 static bool amdgpu_fence_need_ring_interrupt_restore(struct amdgpu_ring *ring) 527 { 528 struct amdgpu_device *adev = ring->adev; 529 bool is_gfx_power_domain = false; 530 531 switch (ring->funcs->type) { 532 case AMDGPU_RING_TYPE_SDMA: 533 /* SDMA 5.x+ is part of GFX power domain so it's covered by GFXOFF */ 534 if (amdgpu_ip_version(adev, SDMA0_HWIP, 0) >= 535 IP_VERSION(5, 0, 0)) 536 is_gfx_power_domain = true; 537 break; 538 case AMDGPU_RING_TYPE_GFX: 539 case AMDGPU_RING_TYPE_COMPUTE: 540 case AMDGPU_RING_TYPE_KIQ: 541 case AMDGPU_RING_TYPE_MES: 542 is_gfx_power_domain = true; 543 break; 544 default: 545 break; 546 } 547 548 return !(adev->in_s0ix && is_gfx_power_domain); 549 } 550 551 /** 552 * amdgpu_fence_driver_hw_fini - tear down the fence driver 553 * for all possible rings. 554 * 555 * @adev: amdgpu device pointer 556 * 557 * Tear down the fence driver for all possible rings (all asics). 558 */ 559 void amdgpu_fence_driver_hw_fini(struct amdgpu_device *adev) 560 { 561 int i, r; 562 563 for (i = 0; i < AMDGPU_MAX_RINGS; i++) { 564 struct amdgpu_ring *ring = adev->rings[i]; 565 566 if (!ring || !ring->fence_drv.initialized) 567 continue; 568 569 /* You can't wait for HW to signal if it's gone */ 570 if (!drm_dev_is_unplugged(adev_to_drm(adev))) 571 r = amdgpu_fence_wait_empty(ring); 572 else 573 r = -ENODEV; 574 /* no need to trigger GPU reset as we are unloading */ 575 if (r) 576 amdgpu_fence_driver_force_completion(ring); 577 578 if (!drm_dev_is_unplugged(adev_to_drm(adev)) && 579 ring->fence_drv.irq_src && 580 amdgpu_fence_need_ring_interrupt_restore(ring)) 581 amdgpu_irq_put(adev, ring->fence_drv.irq_src, 582 ring->fence_drv.irq_type); 583 584 timer_delete_sync(&ring->fence_drv.fallback_timer); 585 } 586 } 587 588 /* Will either stop and flush handlers for amdgpu interrupt or reanble it */ 589 void amdgpu_fence_driver_isr_toggle(struct amdgpu_device *adev, bool stop) 590 { 591 int i; 592 593 for (i = 0; i < AMDGPU_MAX_RINGS; i++) { 594 struct amdgpu_ring *ring = adev->rings[i]; 595 596 if (!ring || !ring->fence_drv.initialized || !ring->fence_drv.irq_src) 597 continue; 598 599 if (stop) 600 disable_irq(adev->irq.irq); 601 else 602 enable_irq(adev->irq.irq); 603 } 604 } 605 606 void amdgpu_fence_driver_sw_fini(struct amdgpu_device *adev) 607 { 608 unsigned int i, j; 609 610 for (i = 0; i < AMDGPU_MAX_RINGS; i++) { 611 struct amdgpu_ring *ring = adev->rings[i]; 612 613 if (!ring || !ring->fence_drv.initialized) 614 continue; 615 616 /* 617 * Notice we check for sched.ops since there's some 618 * override on the meaning of sched.ready by amdgpu. 619 * The natural check would be sched.ready, which is 620 * set as drm_sched_init() finishes... 621 */ 622 if (ring->sched.ops) 623 drm_sched_fini(&ring->sched); 624 625 for (j = 0; j <= ring->fence_drv.num_fences_mask; ++j) 626 dma_fence_put(ring->fence_drv.fences[j]); 627 kfree(ring->fence_drv.fences); 628 ring->fence_drv.fences = NULL; 629 ring->fence_drv.initialized = false; 630 } 631 } 632 633 /** 634 * amdgpu_fence_driver_hw_init - enable the fence driver 635 * for all possible rings. 636 * 637 * @adev: amdgpu device pointer 638 * 639 * Enable the fence driver for all possible rings (all asics). 640 * Not all asics have all rings, so each asic will only 641 * start the fence driver on the rings it has using 642 * amdgpu_fence_driver_start_ring(). 643 * Returns 0 for success. 644 */ 645 void amdgpu_fence_driver_hw_init(struct amdgpu_device *adev) 646 { 647 int i; 648 649 for (i = 0; i < AMDGPU_MAX_RINGS; i++) { 650 struct amdgpu_ring *ring = adev->rings[i]; 651 652 if (!ring || !ring->fence_drv.initialized) 653 continue; 654 655 /* enable the interrupt */ 656 if (ring->fence_drv.irq_src && 657 amdgpu_fence_need_ring_interrupt_restore(ring)) 658 amdgpu_irq_get(adev, ring->fence_drv.irq_src, 659 ring->fence_drv.irq_type); 660 } 661 } 662 663 /** 664 * amdgpu_fence_driver_clear_job_fences - clear job embedded fences of ring 665 * 666 * @ring: fence of the ring to be cleared 667 * 668 */ 669 void amdgpu_fence_driver_clear_job_fences(struct amdgpu_ring *ring) 670 { 671 int i; 672 struct dma_fence *old, **ptr; 673 674 for (i = 0; i <= ring->fence_drv.num_fences_mask; i++) { 675 ptr = &ring->fence_drv.fences[i]; 676 old = rcu_dereference_protected(*ptr, 1); 677 if (old && old->ops == &amdgpu_job_fence_ops) { 678 struct amdgpu_job *job; 679 680 /* For non-scheduler bad job, i.e. failed ib test, we need to signal 681 * it right here or we won't be able to track them in fence_drv 682 * and they will remain unsignaled during sa_bo free. 683 */ 684 job = container_of(old, struct amdgpu_job, hw_fence.base); 685 if (!job->base.s_fence && !dma_fence_is_signaled(old)) 686 dma_fence_signal(old); 687 RCU_INIT_POINTER(*ptr, NULL); 688 dma_fence_put(old); 689 } 690 } 691 } 692 693 /** 694 * amdgpu_fence_driver_set_error - set error code on fences 695 * @ring: the ring which contains the fences 696 * @error: the error code to set 697 * 698 * Set an error code to all the fences pending on the ring. 699 */ 700 void amdgpu_fence_driver_set_error(struct amdgpu_ring *ring, int error) 701 { 702 struct amdgpu_fence_driver *drv = &ring->fence_drv; 703 unsigned long flags; 704 705 spin_lock_irqsave(&drv->lock, flags); 706 for (unsigned int i = 0; i <= drv->num_fences_mask; ++i) { 707 struct dma_fence *fence; 708 709 fence = rcu_dereference_protected(drv->fences[i], 710 lockdep_is_held(&drv->lock)); 711 if (fence && !dma_fence_is_signaled_locked(fence)) 712 dma_fence_set_error(fence, error); 713 } 714 spin_unlock_irqrestore(&drv->lock, flags); 715 } 716 717 /** 718 * amdgpu_fence_driver_force_completion - force signal latest fence of ring 719 * 720 * @ring: fence of the ring to signal 721 * 722 */ 723 void amdgpu_fence_driver_force_completion(struct amdgpu_ring *ring) 724 { 725 amdgpu_fence_driver_set_error(ring, -ECANCELED); 726 amdgpu_fence_write(ring, ring->fence_drv.sync_seq); 727 amdgpu_fence_process(ring); 728 } 729 730 /* 731 * Common fence implementation 732 */ 733 734 static const char *amdgpu_fence_get_driver_name(struct dma_fence *fence) 735 { 736 return "amdgpu"; 737 } 738 739 static const char *amdgpu_fence_get_timeline_name(struct dma_fence *f) 740 { 741 return (const char *)to_amdgpu_fence(f)->ring->name; 742 } 743 744 static const char *amdgpu_job_fence_get_timeline_name(struct dma_fence *f) 745 { 746 struct amdgpu_job *job = container_of(f, struct amdgpu_job, hw_fence.base); 747 748 return (const char *)to_amdgpu_ring(job->base.sched)->name; 749 } 750 751 /** 752 * amdgpu_fence_enable_signaling - enable signalling on fence 753 * @f: fence 754 * 755 * This function is called with fence_queue lock held, and adds a callback 756 * to fence_queue that checks if this fence is signaled, and if so it 757 * signals the fence and removes itself. 758 */ 759 static bool amdgpu_fence_enable_signaling(struct dma_fence *f) 760 { 761 if (!timer_pending(&to_amdgpu_fence(f)->ring->fence_drv.fallback_timer)) 762 amdgpu_fence_schedule_fallback(to_amdgpu_fence(f)->ring); 763 764 return true; 765 } 766 767 /** 768 * amdgpu_job_fence_enable_signaling - enable signalling on job fence 769 * @f: fence 770 * 771 * This is the simliar function with amdgpu_fence_enable_signaling above, it 772 * only handles the job embedded fence. 773 */ 774 static bool amdgpu_job_fence_enable_signaling(struct dma_fence *f) 775 { 776 struct amdgpu_job *job = container_of(f, struct amdgpu_job, hw_fence.base); 777 778 if (!timer_pending(&to_amdgpu_ring(job->base.sched)->fence_drv.fallback_timer)) 779 amdgpu_fence_schedule_fallback(to_amdgpu_ring(job->base.sched)); 780 781 return true; 782 } 783 784 /** 785 * amdgpu_fence_free - free up the fence memory 786 * 787 * @rcu: RCU callback head 788 * 789 * Free up the fence memory after the RCU grace period. 790 */ 791 static void amdgpu_fence_free(struct rcu_head *rcu) 792 { 793 struct dma_fence *f = container_of(rcu, struct dma_fence, rcu); 794 795 /* free fence_slab if it's separated fence*/ 796 kfree(to_amdgpu_fence(f)); 797 } 798 799 /** 800 * amdgpu_job_fence_free - free up the job with embedded fence 801 * 802 * @rcu: RCU callback head 803 * 804 * Free up the job with embedded fence after the RCU grace period. 805 */ 806 static void amdgpu_job_fence_free(struct rcu_head *rcu) 807 { 808 struct dma_fence *f = container_of(rcu, struct dma_fence, rcu); 809 810 /* free job if fence has a parent job */ 811 kfree(container_of(f, struct amdgpu_job, hw_fence.base)); 812 } 813 814 /** 815 * amdgpu_fence_release - callback that fence can be freed 816 * 817 * @f: fence 818 * 819 * This function is called when the reference count becomes zero. 820 * It just RCU schedules freeing up the fence. 821 */ 822 static void amdgpu_fence_release(struct dma_fence *f) 823 { 824 call_rcu(&f->rcu, amdgpu_fence_free); 825 } 826 827 /** 828 * amdgpu_job_fence_release - callback that job embedded fence can be freed 829 * 830 * @f: fence 831 * 832 * This is the simliar function with amdgpu_fence_release above, it 833 * only handles the job embedded fence. 834 */ 835 static void amdgpu_job_fence_release(struct dma_fence *f) 836 { 837 call_rcu(&f->rcu, amdgpu_job_fence_free); 838 } 839 840 static const struct dma_fence_ops amdgpu_fence_ops = { 841 .get_driver_name = amdgpu_fence_get_driver_name, 842 .get_timeline_name = amdgpu_fence_get_timeline_name, 843 .enable_signaling = amdgpu_fence_enable_signaling, 844 .release = amdgpu_fence_release, 845 }; 846 847 static const struct dma_fence_ops amdgpu_job_fence_ops = { 848 .get_driver_name = amdgpu_fence_get_driver_name, 849 .get_timeline_name = amdgpu_job_fence_get_timeline_name, 850 .enable_signaling = amdgpu_job_fence_enable_signaling, 851 .release = amdgpu_job_fence_release, 852 }; 853 854 /* 855 * Fence debugfs 856 */ 857 #if defined(CONFIG_DEBUG_FS) 858 static int amdgpu_debugfs_fence_info_show(struct seq_file *m, void *unused) 859 { 860 struct amdgpu_device *adev = m->private; 861 int i; 862 863 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) { 864 struct amdgpu_ring *ring = adev->rings[i]; 865 866 if (!ring || !ring->fence_drv.initialized) 867 continue; 868 869 amdgpu_fence_process(ring); 870 871 seq_printf(m, "--- ring %d (%s) ---\n", i, ring->name); 872 seq_printf(m, "Last signaled fence 0x%08x\n", 873 atomic_read(&ring->fence_drv.last_seq)); 874 seq_printf(m, "Last emitted 0x%08x\n", 875 ring->fence_drv.sync_seq); 876 877 if (ring->funcs->type == AMDGPU_RING_TYPE_GFX || 878 ring->funcs->type == AMDGPU_RING_TYPE_SDMA) { 879 seq_printf(m, "Last signaled trailing fence 0x%08x\n", 880 le32_to_cpu(*ring->trail_fence_cpu_addr)); 881 seq_printf(m, "Last emitted 0x%08x\n", 882 ring->trail_seq); 883 } 884 885 if (ring->funcs->type != AMDGPU_RING_TYPE_GFX) 886 continue; 887 888 /* set in CP_VMID_PREEMPT and preemption occurred */ 889 seq_printf(m, "Last preempted 0x%08x\n", 890 le32_to_cpu(*(ring->fence_drv.cpu_addr + 2))); 891 /* set in CP_VMID_RESET and reset occurred */ 892 seq_printf(m, "Last reset 0x%08x\n", 893 le32_to_cpu(*(ring->fence_drv.cpu_addr + 4))); 894 /* Both preemption and reset occurred */ 895 seq_printf(m, "Last both 0x%08x\n", 896 le32_to_cpu(*(ring->fence_drv.cpu_addr + 6))); 897 } 898 return 0; 899 } 900 901 /* 902 * amdgpu_debugfs_gpu_recover - manually trigger a gpu reset & recover 903 * 904 * Manually trigger a gpu reset at the next fence wait. 905 */ 906 static int gpu_recover_get(void *data, u64 *val) 907 { 908 struct amdgpu_device *adev = (struct amdgpu_device *)data; 909 struct drm_device *dev = adev_to_drm(adev); 910 int r; 911 912 r = pm_runtime_get_sync(dev->dev); 913 if (r < 0) { 914 pm_runtime_put_autosuspend(dev->dev); 915 return 0; 916 } 917 918 if (amdgpu_reset_domain_schedule(adev->reset_domain, &adev->reset_work)) 919 flush_work(&adev->reset_work); 920 921 *val = atomic_read(&adev->reset_domain->reset_res); 922 923 pm_runtime_mark_last_busy(dev->dev); 924 pm_runtime_put_autosuspend(dev->dev); 925 926 return 0; 927 } 928 929 DEFINE_SHOW_ATTRIBUTE(amdgpu_debugfs_fence_info); 930 DEFINE_DEBUGFS_ATTRIBUTE(amdgpu_debugfs_gpu_recover_fops, gpu_recover_get, NULL, 931 "%lld\n"); 932 933 static void amdgpu_debugfs_reset_work(struct work_struct *work) 934 { 935 struct amdgpu_device *adev = container_of(work, struct amdgpu_device, 936 reset_work); 937 938 struct amdgpu_reset_context reset_context; 939 940 memset(&reset_context, 0, sizeof(reset_context)); 941 942 reset_context.method = AMD_RESET_METHOD_NONE; 943 reset_context.reset_req_dev = adev; 944 reset_context.src = AMDGPU_RESET_SRC_USER; 945 set_bit(AMDGPU_NEED_FULL_RESET, &reset_context.flags); 946 set_bit(AMDGPU_SKIP_COREDUMP, &reset_context.flags); 947 948 amdgpu_device_gpu_recover(adev, NULL, &reset_context); 949 } 950 951 #endif 952 953 void amdgpu_debugfs_fence_init(struct amdgpu_device *adev) 954 { 955 #if defined(CONFIG_DEBUG_FS) 956 struct drm_minor *minor = adev_to_drm(adev)->primary; 957 struct dentry *root = minor->debugfs_root; 958 959 debugfs_create_file("amdgpu_fence_info", 0444, root, adev, 960 &amdgpu_debugfs_fence_info_fops); 961 962 if (!amdgpu_sriov_vf(adev)) { 963 964 INIT_WORK(&adev->reset_work, amdgpu_debugfs_reset_work); 965 debugfs_create_file("amdgpu_gpu_recover", 0444, root, adev, 966 &amdgpu_debugfs_gpu_recover_fops); 967 } 968 #endif 969 } 970 971