Lines Matching full:ring
63 * @ring: ring the fence is associated with
68 static void amdgpu_fence_write(struct amdgpu_ring *ring, u32 seq) in amdgpu_fence_write() argument
70 struct amdgpu_fence_driver *drv = &ring->fence_drv; in amdgpu_fence_write()
79 * @ring: ring the fence is associated with
84 static u32 amdgpu_fence_read(struct amdgpu_ring *ring) in amdgpu_fence_read() argument
86 struct amdgpu_fence_driver *drv = &ring->fence_drv; in amdgpu_fence_read()
98 * amdgpu_fence_emit - emit a fence on the requested ring
100 * @ring: ring the fence is associated with
105 * Emits a fence command on the requested ring (all asics).
108 int amdgpu_fence_emit(struct amdgpu_ring *ring, struct dma_fence **f, in amdgpu_fence_emit() argument
111 struct amdgpu_device *adev = ring->adev; in amdgpu_fence_emit()
128 am_fence->ring = ring; in amdgpu_fence_emit()
130 seq = ++ring->fence_drv.sync_seq; in amdgpu_fence_emit()
134 &ring->fence_drv.lock, in amdgpu_fence_emit()
135 adev->fence_context + ring->idx, seq); in amdgpu_fence_emit()
140 &ring->fence_drv.lock, in amdgpu_fence_emit()
141 adev->fence_context + ring->idx, seq); in amdgpu_fence_emit()
144 amdgpu_ring_emit_fence(ring, ring->fence_drv.gpu_addr, in amdgpu_fence_emit()
148 ptr = &ring->fence_drv.fences[seq & ring->fence_drv.num_fences_mask]; in amdgpu_fence_emit()
167 * emitting the fence would mess up the hardware ring buffer. in amdgpu_fence_emit()
177 * amdgpu_fence_emit_polling - emit a fence on the requeste ring
179 * @ring: ring the fence is associated with
183 * Emits a fence command on the requested ring (all asics).
187 int amdgpu_fence_emit_polling(struct amdgpu_ring *ring, uint32_t *s, in amdgpu_fence_emit_polling() argument
196 seq = ++ring->fence_drv.sync_seq; in amdgpu_fence_emit_polling()
197 r = amdgpu_fence_wait_polling(ring, in amdgpu_fence_emit_polling()
198 seq - ring->fence_drv.num_fences_mask, in amdgpu_fence_emit_polling()
203 amdgpu_ring_emit_fence(ring, ring->fence_drv.gpu_addr, in amdgpu_fence_emit_polling()
214 * @ring: pointer to struct amdgpu_ring
218 static void amdgpu_fence_schedule_fallback(struct amdgpu_ring *ring) in amdgpu_fence_schedule_fallback() argument
220 mod_timer(&ring->fence_drv.fallback_timer, in amdgpu_fence_schedule_fallback()
227 * @ring: pointer to struct amdgpu_ring
235 bool amdgpu_fence_process(struct amdgpu_ring *ring) in amdgpu_fence_process() argument
237 struct amdgpu_fence_driver *drv = &ring->fence_drv; in amdgpu_fence_process()
238 struct amdgpu_device *adev = ring->adev; in amdgpu_fence_process()
242 last_seq = atomic_read(&ring->fence_drv.last_seq); in amdgpu_fence_process()
243 seq = amdgpu_fence_read(ring); in amdgpu_fence_process()
247 if (timer_delete(&ring->fence_drv.fallback_timer) && in amdgpu_fence_process()
248 seq != ring->fence_drv.sync_seq) in amdgpu_fence_process()
249 amdgpu_fence_schedule_fallback(ring); in amdgpu_fence_process()
273 * wptr was. This is required for re-emitting the ring state for in amdgpu_fence_process()
290 * @t: timer context used to obtain the pointer to ring structure
296 struct amdgpu_ring *ring = timer_container_of(ring, t, in amdgpu_fence_fallback() local
299 if (amdgpu_fence_process(ring)) in amdgpu_fence_fallback()
300 dev_warn(ring->adev->dev, in amdgpu_fence_fallback()
301 "Fence fallback timer expired on ring %s\n", in amdgpu_fence_fallback()
302 ring->name); in amdgpu_fence_fallback()
308 * @ring: ring index the fence is associated with
310 * Wait for all fences on the requested ring to signal (all asics).
313 int amdgpu_fence_wait_empty(struct amdgpu_ring *ring) in amdgpu_fence_wait_empty() argument
315 uint64_t seq = READ_ONCE(ring->fence_drv.sync_seq); in amdgpu_fence_wait_empty()
322 ptr = &ring->fence_drv.fences[seq & ring->fence_drv.num_fences_mask]; in amdgpu_fence_wait_empty()
339 * @ring: ring index the fence is associated with
343 * Wait for all fences on the requested ring to signal (all asics).
346 signed long amdgpu_fence_wait_polling(struct amdgpu_ring *ring, in amdgpu_fence_wait_polling() argument
351 while ((int32_t)(wait_seq - amdgpu_fence_read(ring)) > 0 && timeout > 0) { in amdgpu_fence_wait_polling()
360 * @ring: ring the fence is associated with
362 * Get the number of fences emitted on the requested ring (all asics).
363 * Returns the number of emitted fences on the ring. Used by the
364 * dynpm code to ring track activity.
366 unsigned int amdgpu_fence_count_emitted(struct amdgpu_ring *ring) in amdgpu_fence_count_emitted() argument
370 /* We are not protected by ring lock when reading the last sequence in amdgpu_fence_count_emitted()
374 emitted -= atomic_read(&ring->fence_drv.last_seq); in amdgpu_fence_count_emitted()
375 emitted += READ_ONCE(ring->fence_drv.sync_seq); in amdgpu_fence_count_emitted()
381 * @ring: ring the fence is associated with
386 u64 amdgpu_fence_last_unsignaled_time_us(struct amdgpu_ring *ring) in amdgpu_fence_last_unsignaled_time_us() argument
388 struct amdgpu_fence_driver *drv = &ring->fence_drv; in amdgpu_fence_last_unsignaled_time_us()
392 last_seq = atomic_read(&ring->fence_drv.last_seq); in amdgpu_fence_last_unsignaled_time_us()
393 sync_seq = READ_ONCE(ring->fence_drv.sync_seq); in amdgpu_fence_last_unsignaled_time_us()
409 * @ring: ring the fence is associated with
417 void amdgpu_fence_update_start_timestamp(struct amdgpu_ring *ring, uint32_t seq, ktime_t timestamp) in amdgpu_fence_update_start_timestamp() argument
419 struct amdgpu_fence_driver *drv = &ring->fence_drv; in amdgpu_fence_update_start_timestamp()
432 * ready for use on the requested ring.
434 * @ring: ring to start the fence driver on
435 * @irq_src: interrupt source to use for this ring
436 * @irq_type: interrupt type to use for this ring
443 int amdgpu_fence_driver_start_ring(struct amdgpu_ring *ring, in amdgpu_fence_driver_start_ring() argument
447 struct amdgpu_device *adev = ring->adev; in amdgpu_fence_driver_start_ring()
450 if (ring->funcs->type != AMDGPU_RING_TYPE_UVD) { in amdgpu_fence_driver_start_ring()
451 ring->fence_drv.cpu_addr = ring->fence_cpu_addr; in amdgpu_fence_driver_start_ring()
452 ring->fence_drv.gpu_addr = ring->fence_gpu_addr; in amdgpu_fence_driver_start_ring()
456 ring->fence_drv.cpu_addr = adev->uvd.inst[ring->me].cpu_addr + index; in amdgpu_fence_driver_start_ring()
457 ring->fence_drv.gpu_addr = adev->uvd.inst[ring->me].gpu_addr + index; in amdgpu_fence_driver_start_ring()
459 amdgpu_fence_write(ring, atomic_read(&ring->fence_drv.last_seq)); in amdgpu_fence_driver_start_ring()
461 ring->fence_drv.irq_src = irq_src; in amdgpu_fence_driver_start_ring()
462 ring->fence_drv.irq_type = irq_type; in amdgpu_fence_driver_start_ring()
463 ring->fence_drv.initialized = true; in amdgpu_fence_driver_start_ring()
465 DRM_DEV_DEBUG(adev->dev, "fence driver on ring %s use gpu addr 0x%016llx\n", in amdgpu_fence_driver_start_ring()
466 ring->name, ring->fence_drv.gpu_addr); in amdgpu_fence_driver_start_ring()
472 * for the requested ring.
474 * @ring: ring to init the fence driver on
476 * Init the fence driver for the requested ring (all asics).
479 int amdgpu_fence_driver_init_ring(struct amdgpu_ring *ring) in amdgpu_fence_driver_init_ring() argument
481 struct amdgpu_device *adev = ring->adev; in amdgpu_fence_driver_init_ring()
486 if (!is_power_of_2(ring->num_hw_submission)) in amdgpu_fence_driver_init_ring()
489 ring->fence_drv.cpu_addr = NULL; in amdgpu_fence_driver_init_ring()
490 ring->fence_drv.gpu_addr = 0; in amdgpu_fence_driver_init_ring()
491 ring->fence_drv.sync_seq = 0; in amdgpu_fence_driver_init_ring()
492 atomic_set(&ring->fence_drv.last_seq, 0); in amdgpu_fence_driver_init_ring()
493 ring->fence_drv.initialized = false; in amdgpu_fence_driver_init_ring()
495 timer_setup(&ring->fence_drv.fallback_timer, amdgpu_fence_fallback, 0); in amdgpu_fence_driver_init_ring()
497 ring->fence_drv.num_fences_mask = ring->num_hw_submission * 2 - 1; in amdgpu_fence_driver_init_ring()
498 spin_lock_init(&ring->fence_drv.lock); in amdgpu_fence_driver_init_ring()
499 ring->fence_drv.fences = kcalloc(ring->num_hw_submission * 2, sizeof(void *), in amdgpu_fence_driver_init_ring()
502 if (!ring->fence_drv.fences) in amdgpu_fence_driver_init_ring()
529 * @ring: ring that to be checked
536 static bool amdgpu_fence_need_ring_interrupt_restore(struct amdgpu_ring *ring) in amdgpu_fence_need_ring_interrupt_restore() argument
538 struct amdgpu_device *adev = ring->adev; in amdgpu_fence_need_ring_interrupt_restore()
541 switch (ring->funcs->type) { in amdgpu_fence_need_ring_interrupt_restore()
574 struct amdgpu_ring *ring = adev->rings[i]; in amdgpu_fence_driver_hw_fini() local
576 if (!ring || !ring->fence_drv.initialized) in amdgpu_fence_driver_hw_fini()
581 r = amdgpu_fence_wait_empty(ring); in amdgpu_fence_driver_hw_fini()
586 amdgpu_fence_driver_force_completion(ring); in amdgpu_fence_driver_hw_fini()
589 ring->fence_drv.irq_src && in amdgpu_fence_driver_hw_fini()
590 amdgpu_fence_need_ring_interrupt_restore(ring)) in amdgpu_fence_driver_hw_fini()
591 amdgpu_irq_put(adev, ring->fence_drv.irq_src, in amdgpu_fence_driver_hw_fini()
592 ring->fence_drv.irq_type); in amdgpu_fence_driver_hw_fini()
594 timer_delete_sync(&ring->fence_drv.fallback_timer); in amdgpu_fence_driver_hw_fini()
604 struct amdgpu_ring *ring = adev->rings[i]; in amdgpu_fence_driver_isr_toggle() local
606 if (!ring || !ring->fence_drv.initialized || !ring->fence_drv.irq_src) in amdgpu_fence_driver_isr_toggle()
621 struct amdgpu_ring *ring = adev->rings[i]; in amdgpu_fence_driver_sw_fini() local
623 if (!ring || !ring->fence_drv.initialized) in amdgpu_fence_driver_sw_fini()
632 if (ring->sched.ops) in amdgpu_fence_driver_sw_fini()
633 drm_sched_fini(&ring->sched); in amdgpu_fence_driver_sw_fini()
635 for (j = 0; j <= ring->fence_drv.num_fences_mask; ++j) in amdgpu_fence_driver_sw_fini()
636 dma_fence_put(ring->fence_drv.fences[j]); in amdgpu_fence_driver_sw_fini()
637 kfree(ring->fence_drv.fences); in amdgpu_fence_driver_sw_fini()
638 ring->fence_drv.fences = NULL; in amdgpu_fence_driver_sw_fini()
639 ring->fence_drv.initialized = false; in amdgpu_fence_driver_sw_fini()
660 struct amdgpu_ring *ring = adev->rings[i]; in amdgpu_fence_driver_hw_init() local
662 if (!ring || !ring->fence_drv.initialized) in amdgpu_fence_driver_hw_init()
666 if (ring->fence_drv.irq_src && in amdgpu_fence_driver_hw_init()
667 amdgpu_fence_need_ring_interrupt_restore(ring)) in amdgpu_fence_driver_hw_init()
668 amdgpu_irq_get(adev, ring->fence_drv.irq_src, in amdgpu_fence_driver_hw_init()
669 ring->fence_drv.irq_type); in amdgpu_fence_driver_hw_init()
674 * amdgpu_fence_driver_clear_job_fences - clear job embedded fences of ring
676 * @ring: fence of the ring to be cleared
679 void amdgpu_fence_driver_clear_job_fences(struct amdgpu_ring *ring) in amdgpu_fence_driver_clear_job_fences() argument
684 for (i = 0; i <= ring->fence_drv.num_fences_mask; i++) { in amdgpu_fence_driver_clear_job_fences()
685 ptr = &ring->fence_drv.fences[i]; in amdgpu_fence_driver_clear_job_fences()
705 * @ring: the ring which contains the fences
708 * Set an error code to all the fences pending on the ring.
710 void amdgpu_fence_driver_set_error(struct amdgpu_ring *ring, int error) in amdgpu_fence_driver_set_error() argument
712 struct amdgpu_fence_driver *drv = &ring->fence_drv; in amdgpu_fence_driver_set_error()
728 * amdgpu_fence_driver_force_completion - force signal latest fence of ring
730 * @ring: fence of the ring to signal
733 void amdgpu_fence_driver_force_completion(struct amdgpu_ring *ring) in amdgpu_fence_driver_force_completion() argument
735 amdgpu_fence_driver_set_error(ring, -ECANCELED); in amdgpu_fence_driver_force_completion()
736 amdgpu_fence_write(ring, ring->fence_drv.sync_seq); in amdgpu_fence_driver_force_completion()
737 amdgpu_fence_process(ring); in amdgpu_fence_driver_force_completion()
747 * driver will save the ring contents which are not associated with the guilty
753 * which data needs to be saved out of the queue's ring buffer.
759 * @fence: fence of the ring to signal
765 amdgpu_fence_write(fence->ring, fence->seq); in amdgpu_fence_driver_guilty_force_completion()
766 amdgpu_fence_process(fence->ring); in amdgpu_fence_driver_guilty_force_completion()
773 am_fence->wptr = am_fence->ring->wptr; in amdgpu_fence_save_wptr()
776 static void amdgpu_ring_backup_unprocessed_command(struct amdgpu_ring *ring, in amdgpu_ring_backup_unprocessed_command() argument
779 unsigned int first_idx = start_wptr & ring->buf_mask; in amdgpu_ring_backup_unprocessed_command()
780 unsigned int last_idx = end_wptr & ring->buf_mask; in amdgpu_ring_backup_unprocessed_command()
783 /* Backup the contents of the ring buffer. */ in amdgpu_ring_backup_unprocessed_command()
784 for (i = first_idx; i != last_idx; ++i, i &= ring->buf_mask) in amdgpu_ring_backup_unprocessed_command()
785 ring->ring_backup[ring->ring_backup_entries_to_copy++] = ring->ring[i]; in amdgpu_ring_backup_unprocessed_command()
788 void amdgpu_ring_backup_unprocessed_commands(struct amdgpu_ring *ring, in amdgpu_ring_backup_unprocessed_commands() argument
796 seqno = amdgpu_fence_read(ring); in amdgpu_ring_backup_unprocessed_commands()
797 wptr = ring->fence_drv.signalled_wptr; in amdgpu_ring_backup_unprocessed_commands()
798 ring->ring_backup_entries_to_copy = 0; in amdgpu_ring_backup_unprocessed_commands()
800 for (i = seqno + 1; i <= ring->fence_drv.sync_seq; ++i) { in amdgpu_ring_backup_unprocessed_commands()
801 ptr = &ring->fence_drv.fences[i & ring->fence_drv.num_fences_mask]; in amdgpu_ring_backup_unprocessed_commands()
808 /* save everything if the ring is not guilty, otherwise in amdgpu_ring_backup_unprocessed_commands()
812 amdgpu_ring_backup_unprocessed_command(ring, wptr, in amdgpu_ring_backup_unprocessed_commands()
831 return (const char *)to_amdgpu_fence(f)->ring->name; in amdgpu_fence_get_timeline_name()
851 if (!timer_pending(&to_amdgpu_fence(f)->ring->fence_drv.fallback_timer)) in amdgpu_fence_enable_signaling()
852 amdgpu_fence_schedule_fallback(to_amdgpu_fence(f)->ring); in amdgpu_fence_enable_signaling()
954 struct amdgpu_ring *ring = adev->rings[i]; in amdgpu_debugfs_fence_info_show() local
956 if (!ring || !ring->fence_drv.initialized) in amdgpu_debugfs_fence_info_show()
959 amdgpu_fence_process(ring); in amdgpu_debugfs_fence_info_show()
961 seq_printf(m, "--- ring %d (%s) ---\n", i, ring->name); in amdgpu_debugfs_fence_info_show()
963 atomic_read(&ring->fence_drv.last_seq)); in amdgpu_debugfs_fence_info_show()
965 ring->fence_drv.sync_seq); in amdgpu_debugfs_fence_info_show()
967 if (ring->funcs->type == AMDGPU_RING_TYPE_GFX || in amdgpu_debugfs_fence_info_show()
968 ring->funcs->type == AMDGPU_RING_TYPE_SDMA) { in amdgpu_debugfs_fence_info_show()
970 le32_to_cpu(*ring->trail_fence_cpu_addr)); in amdgpu_debugfs_fence_info_show()
972 ring->trail_seq); in amdgpu_debugfs_fence_info_show()
975 if (ring->funcs->type != AMDGPU_RING_TYPE_GFX) in amdgpu_debugfs_fence_info_show()
980 le32_to_cpu(*(ring->fence_drv.cpu_addr + 2))); in amdgpu_debugfs_fence_info_show()
983 le32_to_cpu(*(ring->fence_drv.cpu_addr + 4))); in amdgpu_debugfs_fence_info_show()
986 le32_to_cpu(*(ring->fence_drv.cpu_addr + 6))); in amdgpu_debugfs_fence_info_show()