1 /* 2 * Copyright 2009 Jerome Glisse. 3 * All Rights Reserved. 4 * 5 * Permission is hereby granted, free of charge, to any person obtaining a 6 * copy of this software and associated documentation files (the 7 * "Software"), to deal in the Software without restriction, including 8 * without limitation the rights to use, copy, modify, merge, publish, 9 * distribute, sub license, and/or sell copies of the Software, and to 10 * permit persons to whom the Software is furnished to do so, subject to 11 * the following conditions: 12 * 13 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 14 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 15 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL 16 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, 17 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR 18 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE 19 * USE OR OTHER DEALINGS IN THE SOFTWARE. 20 * 21 * The above copyright notice and this permission notice (including the 22 * next paragraph) shall be included in all copies or substantial portions 23 * of the Software. 24 * 25 */ 26 /* 27 * Authors: 28 * Jerome Glisse <glisse@freedesktop.org> 29 * Dave Airlie 30 */ 31 #include <linux/seq_file.h> 32 #include <linux/atomic.h> 33 #include <linux/wait.h> 34 #include <linux/kref.h> 35 #include <linux/slab.h> 36 #include <linux/firmware.h> 37 #include <drm/drmP.h> 38 #include "amdgpu.h" 39 #include "amdgpu_trace.h" 40 41 /* 42 * Fences 43 * Fences mark an event in the GPUs pipeline and are used 44 * for GPU/CPU synchronization. When the fence is written, 45 * it is expected that all buffers associated with that fence 46 * are no longer in use by the associated ring on the GPU and 47 * that the the relevant GPU caches have been flushed. 48 */ 49 50 struct amdgpu_fence { 51 struct fence base; 52 53 /* RB, DMA, etc. */ 54 struct amdgpu_ring *ring; 55 }; 56 57 static struct kmem_cache *amdgpu_fence_slab; 58 static atomic_t amdgpu_fence_slab_ref = ATOMIC_INIT(0); 59 60 /* 61 * Cast helper 62 */ 63 static const struct fence_ops amdgpu_fence_ops; 64 static inline struct amdgpu_fence *to_amdgpu_fence(struct fence *f) 65 { 66 struct amdgpu_fence *__f = container_of(f, struct amdgpu_fence, base); 67 68 if (__f->base.ops == &amdgpu_fence_ops) 69 return __f; 70 71 return NULL; 72 } 73 74 /** 75 * amdgpu_fence_write - write a fence value 76 * 77 * @ring: ring the fence is associated with 78 * @seq: sequence number to write 79 * 80 * Writes a fence value to memory (all asics). 81 */ 82 static void amdgpu_fence_write(struct amdgpu_ring *ring, u32 seq) 83 { 84 struct amdgpu_fence_driver *drv = &ring->fence_drv; 85 86 if (drv->cpu_addr) 87 *drv->cpu_addr = cpu_to_le32(seq); 88 } 89 90 /** 91 * amdgpu_fence_read - read a fence value 92 * 93 * @ring: ring the fence is associated with 94 * 95 * Reads a fence value from memory (all asics). 96 * Returns the value of the fence read from memory. 97 */ 98 static u32 amdgpu_fence_read(struct amdgpu_ring *ring) 99 { 100 struct amdgpu_fence_driver *drv = &ring->fence_drv; 101 u32 seq = 0; 102 103 if (drv->cpu_addr) 104 seq = le32_to_cpu(*drv->cpu_addr); 105 else 106 seq = atomic_read(&drv->last_seq); 107 108 return seq; 109 } 110 111 /** 112 * amdgpu_fence_emit - emit a fence on the requested ring 113 * 114 * @ring: ring the fence is associated with 115 * @f: resulting fence object 116 * 117 * Emits a fence command on the requested ring (all asics). 118 * Returns 0 on success, -ENOMEM on failure. 119 */ 120 int amdgpu_fence_emit(struct amdgpu_ring *ring, struct fence **f) 121 { 122 struct amdgpu_device *adev = ring->adev; 123 struct amdgpu_fence *fence; 124 struct fence *old, **ptr; 125 uint32_t seq; 126 127 fence = kmem_cache_alloc(amdgpu_fence_slab, GFP_KERNEL); 128 if (fence == NULL) 129 return -ENOMEM; 130 131 seq = ++ring->fence_drv.sync_seq; 132 fence->ring = ring; 133 fence_init(&fence->base, &amdgpu_fence_ops, 134 &ring->fence_drv.lock, 135 adev->fence_context + ring->idx, 136 seq); 137 amdgpu_ring_emit_fence(ring, ring->fence_drv.gpu_addr, 138 seq, AMDGPU_FENCE_FLAG_INT); 139 140 ptr = &ring->fence_drv.fences[seq & ring->fence_drv.num_fences_mask]; 141 /* This function can't be called concurrently anyway, otherwise 142 * emitting the fence would mess up the hardware ring buffer. 143 */ 144 old = rcu_dereference_protected(*ptr, 1); 145 if (old && !fence_is_signaled(old)) { 146 DRM_INFO("rcu slot is busy\n"); 147 fence_wait(old, false); 148 } 149 150 rcu_assign_pointer(*ptr, fence_get(&fence->base)); 151 152 *f = &fence->base; 153 154 return 0; 155 } 156 157 /** 158 * amdgpu_fence_schedule_fallback - schedule fallback check 159 * 160 * @ring: pointer to struct amdgpu_ring 161 * 162 * Start a timer as fallback to our interrupts. 163 */ 164 static void amdgpu_fence_schedule_fallback(struct amdgpu_ring *ring) 165 { 166 mod_timer(&ring->fence_drv.fallback_timer, 167 jiffies + AMDGPU_FENCE_JIFFIES_TIMEOUT); 168 } 169 170 /** 171 * amdgpu_fence_process - check for fence activity 172 * 173 * @ring: pointer to struct amdgpu_ring 174 * 175 * Checks the current fence value and calculates the last 176 * signalled fence value. Wakes the fence queue if the 177 * sequence number has increased. 178 */ 179 void amdgpu_fence_process(struct amdgpu_ring *ring) 180 { 181 struct amdgpu_fence_driver *drv = &ring->fence_drv; 182 uint32_t seq, last_seq; 183 int r; 184 185 do { 186 last_seq = atomic_read(&ring->fence_drv.last_seq); 187 seq = amdgpu_fence_read(ring); 188 189 } while (atomic_cmpxchg(&drv->last_seq, last_seq, seq) != last_seq); 190 191 if (seq != ring->fence_drv.sync_seq) 192 amdgpu_fence_schedule_fallback(ring); 193 194 while (last_seq != seq) { 195 struct fence *fence, **ptr; 196 197 ptr = &drv->fences[++last_seq & drv->num_fences_mask]; 198 199 /* There is always exactly one thread signaling this fence slot */ 200 fence = rcu_dereference_protected(*ptr, 1); 201 rcu_assign_pointer(*ptr, NULL); 202 203 BUG_ON(!fence); 204 205 r = fence_signal(fence); 206 if (!r) 207 FENCE_TRACE(fence, "signaled from irq context\n"); 208 else 209 BUG(); 210 211 fence_put(fence); 212 } 213 } 214 215 /** 216 * amdgpu_fence_fallback - fallback for hardware interrupts 217 * 218 * @work: delayed work item 219 * 220 * Checks for fence activity. 221 */ 222 static void amdgpu_fence_fallback(unsigned long arg) 223 { 224 struct amdgpu_ring *ring = (void *)arg; 225 226 amdgpu_fence_process(ring); 227 } 228 229 /** 230 * amdgpu_fence_wait_empty - wait for all fences to signal 231 * 232 * @adev: amdgpu device pointer 233 * @ring: ring index the fence is associated with 234 * 235 * Wait for all fences on the requested ring to signal (all asics). 236 * Returns 0 if the fences have passed, error for all other cases. 237 */ 238 int amdgpu_fence_wait_empty(struct amdgpu_ring *ring) 239 { 240 uint64_t seq = ACCESS_ONCE(ring->fence_drv.sync_seq); 241 struct fence *fence, **ptr; 242 int r; 243 244 if (!seq) 245 return 0; 246 247 ptr = &ring->fence_drv.fences[seq & ring->fence_drv.num_fences_mask]; 248 rcu_read_lock(); 249 fence = rcu_dereference(*ptr); 250 if (!fence || !fence_get_rcu(fence)) { 251 rcu_read_unlock(); 252 return 0; 253 } 254 rcu_read_unlock(); 255 256 r = fence_wait(fence, false); 257 fence_put(fence); 258 return r; 259 } 260 261 /** 262 * amdgpu_fence_count_emitted - get the count of emitted fences 263 * 264 * @ring: ring the fence is associated with 265 * 266 * Get the number of fences emitted on the requested ring (all asics). 267 * Returns the number of emitted fences on the ring. Used by the 268 * dynpm code to ring track activity. 269 */ 270 unsigned amdgpu_fence_count_emitted(struct amdgpu_ring *ring) 271 { 272 uint64_t emitted; 273 274 /* We are not protected by ring lock when reading the last sequence 275 * but it's ok to report slightly wrong fence count here. 276 */ 277 amdgpu_fence_process(ring); 278 emitted = 0x100000000ull; 279 emitted -= atomic_read(&ring->fence_drv.last_seq); 280 emitted += ACCESS_ONCE(ring->fence_drv.sync_seq); 281 return lower_32_bits(emitted); 282 } 283 284 /** 285 * amdgpu_fence_driver_start_ring - make the fence driver 286 * ready for use on the requested ring. 287 * 288 * @ring: ring to start the fence driver on 289 * @irq_src: interrupt source to use for this ring 290 * @irq_type: interrupt type to use for this ring 291 * 292 * Make the fence driver ready for processing (all asics). 293 * Not all asics have all rings, so each asic will only 294 * start the fence driver on the rings it has. 295 * Returns 0 for success, errors for failure. 296 */ 297 int amdgpu_fence_driver_start_ring(struct amdgpu_ring *ring, 298 struct amdgpu_irq_src *irq_src, 299 unsigned irq_type) 300 { 301 struct amdgpu_device *adev = ring->adev; 302 uint64_t index; 303 304 if (ring != &adev->uvd.ring) { 305 ring->fence_drv.cpu_addr = &adev->wb.wb[ring->fence_offs]; 306 ring->fence_drv.gpu_addr = adev->wb.gpu_addr + (ring->fence_offs * 4); 307 } else { 308 /* put fence directly behind firmware */ 309 index = ALIGN(adev->uvd.fw->size, 8); 310 ring->fence_drv.cpu_addr = adev->uvd.cpu_addr + index; 311 ring->fence_drv.gpu_addr = adev->uvd.gpu_addr + index; 312 } 313 amdgpu_fence_write(ring, atomic_read(&ring->fence_drv.last_seq)); 314 amdgpu_irq_get(adev, irq_src, irq_type); 315 316 ring->fence_drv.irq_src = irq_src; 317 ring->fence_drv.irq_type = irq_type; 318 ring->fence_drv.initialized = true; 319 320 dev_info(adev->dev, "fence driver on ring %d use gpu addr 0x%016llx, " 321 "cpu addr 0x%p\n", ring->idx, 322 ring->fence_drv.gpu_addr, ring->fence_drv.cpu_addr); 323 return 0; 324 } 325 326 /** 327 * amdgpu_fence_driver_init_ring - init the fence driver 328 * for the requested ring. 329 * 330 * @ring: ring to init the fence driver on 331 * @num_hw_submission: number of entries on the hardware queue 332 * 333 * Init the fence driver for the requested ring (all asics). 334 * Helper function for amdgpu_fence_driver_init(). 335 */ 336 int amdgpu_fence_driver_init_ring(struct amdgpu_ring *ring, 337 unsigned num_hw_submission) 338 { 339 long timeout; 340 int r; 341 342 /* Check that num_hw_submission is a power of two */ 343 if ((num_hw_submission & (num_hw_submission - 1)) != 0) 344 return -EINVAL; 345 346 ring->fence_drv.cpu_addr = NULL; 347 ring->fence_drv.gpu_addr = 0; 348 ring->fence_drv.sync_seq = 0; 349 atomic_set(&ring->fence_drv.last_seq, 0); 350 ring->fence_drv.initialized = false; 351 352 setup_timer(&ring->fence_drv.fallback_timer, amdgpu_fence_fallback, 353 (unsigned long)ring); 354 355 ring->fence_drv.num_fences_mask = num_hw_submission - 1; 356 spin_lock_init(&ring->fence_drv.lock); 357 ring->fence_drv.fences = kcalloc(num_hw_submission, sizeof(void *), 358 GFP_KERNEL); 359 if (!ring->fence_drv.fences) 360 return -ENOMEM; 361 362 timeout = msecs_to_jiffies(amdgpu_lockup_timeout); 363 if (timeout == 0) { 364 /* 365 * FIXME: 366 * Delayed workqueue cannot use it directly, 367 * so the scheduler will not use delayed workqueue if 368 * MAX_SCHEDULE_TIMEOUT is set. 369 * Currently keep it simple and silly. 370 */ 371 timeout = MAX_SCHEDULE_TIMEOUT; 372 } 373 r = amd_sched_init(&ring->sched, &amdgpu_sched_ops, 374 num_hw_submission, 375 timeout, ring->name); 376 if (r) { 377 DRM_ERROR("Failed to create scheduler on ring %s.\n", 378 ring->name); 379 return r; 380 } 381 382 return 0; 383 } 384 385 /** 386 * amdgpu_fence_driver_init - init the fence driver 387 * for all possible rings. 388 * 389 * @adev: amdgpu device pointer 390 * 391 * Init the fence driver for all possible rings (all asics). 392 * Not all asics have all rings, so each asic will only 393 * start the fence driver on the rings it has using 394 * amdgpu_fence_driver_start_ring(). 395 * Returns 0 for success. 396 */ 397 int amdgpu_fence_driver_init(struct amdgpu_device *adev) 398 { 399 if (atomic_inc_return(&amdgpu_fence_slab_ref) == 1) { 400 amdgpu_fence_slab = kmem_cache_create( 401 "amdgpu_fence", sizeof(struct amdgpu_fence), 0, 402 SLAB_HWCACHE_ALIGN, NULL); 403 if (!amdgpu_fence_slab) 404 return -ENOMEM; 405 } 406 if (amdgpu_debugfs_fence_init(adev)) 407 dev_err(adev->dev, "fence debugfs file creation failed\n"); 408 409 return 0; 410 } 411 412 /** 413 * amdgpu_fence_driver_fini - tear down the fence driver 414 * for all possible rings. 415 * 416 * @adev: amdgpu device pointer 417 * 418 * Tear down the fence driver for all possible rings (all asics). 419 */ 420 void amdgpu_fence_driver_fini(struct amdgpu_device *adev) 421 { 422 unsigned i, j; 423 int r; 424 425 for (i = 0; i < AMDGPU_MAX_RINGS; i++) { 426 struct amdgpu_ring *ring = adev->rings[i]; 427 428 if (!ring || !ring->fence_drv.initialized) 429 continue; 430 r = amdgpu_fence_wait_empty(ring); 431 if (r) { 432 /* no need to trigger GPU reset as we are unloading */ 433 amdgpu_fence_driver_force_completion(adev); 434 } 435 amdgpu_irq_put(adev, ring->fence_drv.irq_src, 436 ring->fence_drv.irq_type); 437 amd_sched_fini(&ring->sched); 438 del_timer_sync(&ring->fence_drv.fallback_timer); 439 for (j = 0; j <= ring->fence_drv.num_fences_mask; ++j) 440 fence_put(ring->fence_drv.fences[i]); 441 kfree(ring->fence_drv.fences); 442 ring->fence_drv.initialized = false; 443 } 444 445 if (atomic_dec_and_test(&amdgpu_fence_slab_ref)) 446 kmem_cache_destroy(amdgpu_fence_slab); 447 } 448 449 /** 450 * amdgpu_fence_driver_suspend - suspend the fence driver 451 * for all possible rings. 452 * 453 * @adev: amdgpu device pointer 454 * 455 * Suspend the fence driver for all possible rings (all asics). 456 */ 457 void amdgpu_fence_driver_suspend(struct amdgpu_device *adev) 458 { 459 int i, r; 460 461 for (i = 0; i < AMDGPU_MAX_RINGS; i++) { 462 struct amdgpu_ring *ring = adev->rings[i]; 463 if (!ring || !ring->fence_drv.initialized) 464 continue; 465 466 /* wait for gpu to finish processing current batch */ 467 r = amdgpu_fence_wait_empty(ring); 468 if (r) { 469 /* delay GPU reset to resume */ 470 amdgpu_fence_driver_force_completion(adev); 471 } 472 473 /* disable the interrupt */ 474 amdgpu_irq_put(adev, ring->fence_drv.irq_src, 475 ring->fence_drv.irq_type); 476 } 477 } 478 479 /** 480 * amdgpu_fence_driver_resume - resume the fence driver 481 * for all possible rings. 482 * 483 * @adev: amdgpu device pointer 484 * 485 * Resume the fence driver for all possible rings (all asics). 486 * Not all asics have all rings, so each asic will only 487 * start the fence driver on the rings it has using 488 * amdgpu_fence_driver_start_ring(). 489 * Returns 0 for success. 490 */ 491 void amdgpu_fence_driver_resume(struct amdgpu_device *adev) 492 { 493 int i; 494 495 for (i = 0; i < AMDGPU_MAX_RINGS; i++) { 496 struct amdgpu_ring *ring = adev->rings[i]; 497 if (!ring || !ring->fence_drv.initialized) 498 continue; 499 500 /* enable the interrupt */ 501 amdgpu_irq_get(adev, ring->fence_drv.irq_src, 502 ring->fence_drv.irq_type); 503 } 504 } 505 506 /** 507 * amdgpu_fence_driver_force_completion - force all fence waiter to complete 508 * 509 * @adev: amdgpu device pointer 510 * 511 * In case of GPU reset failure make sure no process keep waiting on fence 512 * that will never complete. 513 */ 514 void amdgpu_fence_driver_force_completion(struct amdgpu_device *adev) 515 { 516 int i; 517 518 for (i = 0; i < AMDGPU_MAX_RINGS; i++) { 519 struct amdgpu_ring *ring = adev->rings[i]; 520 if (!ring || !ring->fence_drv.initialized) 521 continue; 522 523 amdgpu_fence_write(ring, ring->fence_drv.sync_seq); 524 } 525 } 526 527 /* 528 * Common fence implementation 529 */ 530 531 static const char *amdgpu_fence_get_driver_name(struct fence *fence) 532 { 533 return "amdgpu"; 534 } 535 536 static const char *amdgpu_fence_get_timeline_name(struct fence *f) 537 { 538 struct amdgpu_fence *fence = to_amdgpu_fence(f); 539 return (const char *)fence->ring->name; 540 } 541 542 /** 543 * amdgpu_fence_enable_signaling - enable signalling on fence 544 * @fence: fence 545 * 546 * This function is called with fence_queue lock held, and adds a callback 547 * to fence_queue that checks if this fence is signaled, and if so it 548 * signals the fence and removes itself. 549 */ 550 static bool amdgpu_fence_enable_signaling(struct fence *f) 551 { 552 struct amdgpu_fence *fence = to_amdgpu_fence(f); 553 struct amdgpu_ring *ring = fence->ring; 554 555 if (!timer_pending(&ring->fence_drv.fallback_timer)) 556 amdgpu_fence_schedule_fallback(ring); 557 558 FENCE_TRACE(&fence->base, "armed on ring %i!\n", ring->idx); 559 560 return true; 561 } 562 563 /** 564 * amdgpu_fence_free - free up the fence memory 565 * 566 * @rcu: RCU callback head 567 * 568 * Free up the fence memory after the RCU grace period. 569 */ 570 static void amdgpu_fence_free(struct rcu_head *rcu) 571 { 572 struct fence *f = container_of(rcu, struct fence, rcu); 573 struct amdgpu_fence *fence = to_amdgpu_fence(f); 574 kmem_cache_free(amdgpu_fence_slab, fence); 575 } 576 577 /** 578 * amdgpu_fence_release - callback that fence can be freed 579 * 580 * @fence: fence 581 * 582 * This function is called when the reference count becomes zero. 583 * It just RCU schedules freeing up the fence. 584 */ 585 static void amdgpu_fence_release(struct fence *f) 586 { 587 call_rcu(&f->rcu, amdgpu_fence_free); 588 } 589 590 static const struct fence_ops amdgpu_fence_ops = { 591 .get_driver_name = amdgpu_fence_get_driver_name, 592 .get_timeline_name = amdgpu_fence_get_timeline_name, 593 .enable_signaling = amdgpu_fence_enable_signaling, 594 .wait = fence_default_wait, 595 .release = amdgpu_fence_release, 596 }; 597 598 /* 599 * Fence debugfs 600 */ 601 #if defined(CONFIG_DEBUG_FS) 602 static int amdgpu_debugfs_fence_info(struct seq_file *m, void *data) 603 { 604 struct drm_info_node *node = (struct drm_info_node *)m->private; 605 struct drm_device *dev = node->minor->dev; 606 struct amdgpu_device *adev = dev->dev_private; 607 int i; 608 609 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) { 610 struct amdgpu_ring *ring = adev->rings[i]; 611 if (!ring || !ring->fence_drv.initialized) 612 continue; 613 614 amdgpu_fence_process(ring); 615 616 seq_printf(m, "--- ring %d (%s) ---\n", i, ring->name); 617 seq_printf(m, "Last signaled fence 0x%08x\n", 618 atomic_read(&ring->fence_drv.last_seq)); 619 seq_printf(m, "Last emitted 0x%08x\n", 620 ring->fence_drv.sync_seq); 621 } 622 return 0; 623 } 624 625 /** 626 * amdgpu_debugfs_gpu_reset - manually trigger a gpu reset 627 * 628 * Manually trigger a gpu reset at the next fence wait. 629 */ 630 static int amdgpu_debugfs_gpu_reset(struct seq_file *m, void *data) 631 { 632 struct drm_info_node *node = (struct drm_info_node *) m->private; 633 struct drm_device *dev = node->minor->dev; 634 struct amdgpu_device *adev = dev->dev_private; 635 636 seq_printf(m, "gpu reset\n"); 637 amdgpu_gpu_reset(adev); 638 639 return 0; 640 } 641 642 static const struct drm_info_list amdgpu_debugfs_fence_list[] = { 643 {"amdgpu_fence_info", &amdgpu_debugfs_fence_info, 0, NULL}, 644 {"amdgpu_gpu_reset", &amdgpu_debugfs_gpu_reset, 0, NULL} 645 }; 646 #endif 647 648 int amdgpu_debugfs_fence_init(struct amdgpu_device *adev) 649 { 650 #if defined(CONFIG_DEBUG_FS) 651 return amdgpu_debugfs_add_files(adev, amdgpu_debugfs_fence_list, 2); 652 #else 653 return 0; 654 #endif 655 } 656 657