Lines Matching full:ring
58 * @ring: ring the fence is associated with
63 static void amdgpu_fence_write(struct amdgpu_ring *ring, u32 seq) in amdgpu_fence_write() argument
65 struct amdgpu_fence_driver *drv = &ring->fence_drv; in amdgpu_fence_write()
74 * @ring: ring the fence is associated with
79 static u32 amdgpu_fence_read(struct amdgpu_ring *ring) in amdgpu_fence_read() argument
81 struct amdgpu_fence_driver *drv = &ring->fence_drv; in amdgpu_fence_read()
93 * amdgpu_fence_emit - emit a fence on the requested ring
95 * @ring: ring the fence is associated with
99 * Emits a fence command on the requested ring (all asics).
102 int amdgpu_fence_emit(struct amdgpu_ring *ring, struct amdgpu_fence *af, in amdgpu_fence_emit() argument
105 struct amdgpu_device *adev = ring->adev; in amdgpu_fence_emit()
112 af->ring = ring; in amdgpu_fence_emit()
114 seq = ++ring->fence_drv.sync_seq; in amdgpu_fence_emit()
116 &ring->fence_drv.lock, in amdgpu_fence_emit()
117 adev->fence_context + ring->idx, seq); in amdgpu_fence_emit()
119 amdgpu_ring_emit_fence(ring, ring->fence_drv.gpu_addr, in amdgpu_fence_emit()
123 ptr = &ring->fence_drv.fences[seq & ring->fence_drv.num_fences_mask]; in amdgpu_fence_emit()
142 * emitting the fence would mess up the hardware ring buffer. in amdgpu_fence_emit()
150 * amdgpu_fence_emit_polling - emit a fence on the requeste ring
152 * @ring: ring the fence is associated with
156 * Emits a fence command on the requested ring (all asics).
160 int amdgpu_fence_emit_polling(struct amdgpu_ring *ring, uint32_t *s, in amdgpu_fence_emit_polling() argument
169 seq = ++ring->fence_drv.sync_seq; in amdgpu_fence_emit_polling()
170 r = amdgpu_fence_wait_polling(ring, in amdgpu_fence_emit_polling()
171 seq - ring->fence_drv.num_fences_mask, in amdgpu_fence_emit_polling()
176 amdgpu_ring_emit_fence(ring, ring->fence_drv.gpu_addr, in amdgpu_fence_emit_polling()
187 * @ring: pointer to struct amdgpu_ring
191 static void amdgpu_fence_schedule_fallback(struct amdgpu_ring *ring) in amdgpu_fence_schedule_fallback() argument
193 mod_timer(&ring->fence_drv.fallback_timer, in amdgpu_fence_schedule_fallback()
200 * @ring: pointer to struct amdgpu_ring
208 bool amdgpu_fence_process(struct amdgpu_ring *ring) in amdgpu_fence_process() argument
210 struct amdgpu_fence_driver *drv = &ring->fence_drv; in amdgpu_fence_process()
211 struct amdgpu_device *adev = ring->adev; in amdgpu_fence_process()
215 last_seq = atomic_read(&ring->fence_drv.last_seq); in amdgpu_fence_process()
216 seq = amdgpu_fence_read(ring); in amdgpu_fence_process()
220 if (timer_delete(&ring->fence_drv.fallback_timer) && in amdgpu_fence_process()
221 seq != ring->fence_drv.sync_seq) in amdgpu_fence_process()
222 amdgpu_fence_schedule_fallback(ring); in amdgpu_fence_process()
246 * wptr was. This is required for re-emitting the ring state for in amdgpu_fence_process()
262 * @t: timer context used to obtain the pointer to ring structure
268 struct amdgpu_ring *ring = timer_container_of(ring, t, in amdgpu_fence_fallback() local
271 if (amdgpu_fence_process(ring)) in amdgpu_fence_fallback()
272 dev_warn(ring->adev->dev, in amdgpu_fence_fallback()
273 "Fence fallback timer expired on ring %s\n", in amdgpu_fence_fallback()
274 ring->name); in amdgpu_fence_fallback()
280 * @ring: ring index the fence is associated with
282 * Wait for all fences on the requested ring to signal (all asics).
285 int amdgpu_fence_wait_empty(struct amdgpu_ring *ring) in amdgpu_fence_wait_empty() argument
287 uint64_t seq = READ_ONCE(ring->fence_drv.sync_seq); in amdgpu_fence_wait_empty()
294 ptr = &ring->fence_drv.fences[seq & ring->fence_drv.num_fences_mask]; in amdgpu_fence_wait_empty()
311 * @ring: ring index the fence is associated with
315 * Wait for all fences on the requested ring to signal (all asics).
318 signed long amdgpu_fence_wait_polling(struct amdgpu_ring *ring, in amdgpu_fence_wait_polling() argument
323 while ((int32_t)(wait_seq - amdgpu_fence_read(ring)) > 0 && timeout > 0) { in amdgpu_fence_wait_polling()
332 * @ring: ring the fence is associated with
334 * Get the number of fences emitted on the requested ring (all asics).
335 * Returns the number of emitted fences on the ring. Used by the
336 * dynpm code to ring track activity.
338 unsigned int amdgpu_fence_count_emitted(struct amdgpu_ring *ring) in amdgpu_fence_count_emitted() argument
342 /* We are not protected by ring lock when reading the last sequence in amdgpu_fence_count_emitted()
346 emitted -= atomic_read(&ring->fence_drv.last_seq); in amdgpu_fence_count_emitted()
347 emitted += READ_ONCE(ring->fence_drv.sync_seq); in amdgpu_fence_count_emitted()
353 * @ring: ring the fence is associated with
358 u64 amdgpu_fence_last_unsignaled_time_us(struct amdgpu_ring *ring) in amdgpu_fence_last_unsignaled_time_us() argument
360 struct amdgpu_fence_driver *drv = &ring->fence_drv; in amdgpu_fence_last_unsignaled_time_us()
364 last_seq = atomic_read(&ring->fence_drv.last_seq); in amdgpu_fence_last_unsignaled_time_us()
365 sync_seq = READ_ONCE(ring->fence_drv.sync_seq); in amdgpu_fence_last_unsignaled_time_us()
381 * @ring: ring the fence is associated with
389 void amdgpu_fence_update_start_timestamp(struct amdgpu_ring *ring, uint32_t seq, ktime_t timestamp) in amdgpu_fence_update_start_timestamp() argument
391 struct amdgpu_fence_driver *drv = &ring->fence_drv; in amdgpu_fence_update_start_timestamp()
404 * ready for use on the requested ring.
406 * @ring: ring to start the fence driver on
407 * @irq_src: interrupt source to use for this ring
408 * @irq_type: interrupt type to use for this ring
415 int amdgpu_fence_driver_start_ring(struct amdgpu_ring *ring, in amdgpu_fence_driver_start_ring() argument
419 struct amdgpu_device *adev = ring->adev; in amdgpu_fence_driver_start_ring()
422 if (ring->funcs->type != AMDGPU_RING_TYPE_UVD) { in amdgpu_fence_driver_start_ring()
423 ring->fence_drv.cpu_addr = ring->fence_cpu_addr; in amdgpu_fence_driver_start_ring()
424 ring->fence_drv.gpu_addr = ring->fence_gpu_addr; in amdgpu_fence_driver_start_ring()
428 ring->fence_drv.cpu_addr = adev->uvd.inst[ring->me].cpu_addr + index; in amdgpu_fence_driver_start_ring()
429 ring->fence_drv.gpu_addr = adev->uvd.inst[ring->me].gpu_addr + index; in amdgpu_fence_driver_start_ring()
431 amdgpu_fence_write(ring, atomic_read(&ring->fence_drv.last_seq)); in amdgpu_fence_driver_start_ring()
433 ring->fence_drv.irq_src = irq_src; in amdgpu_fence_driver_start_ring()
434 ring->fence_drv.irq_type = irq_type; in amdgpu_fence_driver_start_ring()
435 ring->fence_drv.initialized = true; in amdgpu_fence_driver_start_ring()
437 DRM_DEV_DEBUG(adev->dev, "fence driver on ring %s use gpu addr 0x%016llx\n", in amdgpu_fence_driver_start_ring()
438 ring->name, ring->fence_drv.gpu_addr); in amdgpu_fence_driver_start_ring()
444 * for the requested ring.
446 * @ring: ring to init the fence driver on
448 * Init the fence driver for the requested ring (all asics).
451 int amdgpu_fence_driver_init_ring(struct amdgpu_ring *ring) in amdgpu_fence_driver_init_ring() argument
453 struct amdgpu_device *adev = ring->adev; in amdgpu_fence_driver_init_ring()
458 if (!is_power_of_2(ring->num_hw_submission)) in amdgpu_fence_driver_init_ring()
461 ring->fence_drv.cpu_addr = NULL; in amdgpu_fence_driver_init_ring()
462 ring->fence_drv.gpu_addr = 0; in amdgpu_fence_driver_init_ring()
463 ring->fence_drv.sync_seq = 0; in amdgpu_fence_driver_init_ring()
464 atomic_set(&ring->fence_drv.last_seq, 0); in amdgpu_fence_driver_init_ring()
465 ring->fence_drv.initialized = false; in amdgpu_fence_driver_init_ring()
467 timer_setup(&ring->fence_drv.fallback_timer, amdgpu_fence_fallback, 0); in amdgpu_fence_driver_init_ring()
469 ring->fence_drv.num_fences_mask = ring->num_hw_submission * 2 - 1; in amdgpu_fence_driver_init_ring()
470 spin_lock_init(&ring->fence_drv.lock); in amdgpu_fence_driver_init_ring()
471 ring->fence_drv.fences = kcalloc(ring->num_hw_submission * 2, sizeof(void *), in amdgpu_fence_driver_init_ring()
474 if (!ring->fence_drv.fences) in amdgpu_fence_driver_init_ring()
501 * @ring: ring that to be checked
508 static bool amdgpu_fence_need_ring_interrupt_restore(struct amdgpu_ring *ring) in amdgpu_fence_need_ring_interrupt_restore() argument
510 struct amdgpu_device *adev = ring->adev; in amdgpu_fence_need_ring_interrupt_restore()
513 switch (ring->funcs->type) { in amdgpu_fence_need_ring_interrupt_restore()
546 struct amdgpu_ring *ring = adev->rings[i]; in amdgpu_fence_driver_hw_fini() local
548 if (!ring || !ring->fence_drv.initialized) in amdgpu_fence_driver_hw_fini()
553 r = amdgpu_fence_wait_empty(ring); in amdgpu_fence_driver_hw_fini()
558 amdgpu_fence_driver_force_completion(ring); in amdgpu_fence_driver_hw_fini()
561 ring->fence_drv.irq_src && in amdgpu_fence_driver_hw_fini()
562 amdgpu_fence_need_ring_interrupt_restore(ring)) in amdgpu_fence_driver_hw_fini()
563 amdgpu_irq_put(adev, ring->fence_drv.irq_src, in amdgpu_fence_driver_hw_fini()
564 ring->fence_drv.irq_type); in amdgpu_fence_driver_hw_fini()
566 timer_delete_sync(&ring->fence_drv.fallback_timer); in amdgpu_fence_driver_hw_fini()
576 struct amdgpu_ring *ring = adev->rings[i]; in amdgpu_fence_driver_isr_toggle() local
578 if (!ring || !ring->fence_drv.initialized || !ring->fence_drv.irq_src) in amdgpu_fence_driver_isr_toggle()
593 struct amdgpu_ring *ring = adev->rings[i]; in amdgpu_fence_driver_sw_fini() local
595 if (!ring || !ring->fence_drv.initialized) in amdgpu_fence_driver_sw_fini()
604 if (ring->sched.ops) in amdgpu_fence_driver_sw_fini()
605 drm_sched_fini(&ring->sched); in amdgpu_fence_driver_sw_fini()
607 for (j = 0; j <= ring->fence_drv.num_fences_mask; ++j) in amdgpu_fence_driver_sw_fini()
608 dma_fence_put(ring->fence_drv.fences[j]); in amdgpu_fence_driver_sw_fini()
609 kfree(ring->fence_drv.fences); in amdgpu_fence_driver_sw_fini()
610 ring->fence_drv.fences = NULL; in amdgpu_fence_driver_sw_fini()
611 ring->fence_drv.initialized = false; in amdgpu_fence_driver_sw_fini()
632 struct amdgpu_ring *ring = adev->rings[i]; in amdgpu_fence_driver_hw_init() local
634 if (!ring || !ring->fence_drv.initialized) in amdgpu_fence_driver_hw_init()
638 if (ring->fence_drv.irq_src && in amdgpu_fence_driver_hw_init()
639 amdgpu_fence_need_ring_interrupt_restore(ring)) in amdgpu_fence_driver_hw_init()
640 amdgpu_irq_get(adev, ring->fence_drv.irq_src, in amdgpu_fence_driver_hw_init()
641 ring->fence_drv.irq_type); in amdgpu_fence_driver_hw_init()
647 * @ring: the ring which contains the fences
650 * Set an error code to all the fences pending on the ring.
652 void amdgpu_fence_driver_set_error(struct amdgpu_ring *ring, int error) in amdgpu_fence_driver_set_error() argument
654 struct amdgpu_fence_driver *drv = &ring->fence_drv; in amdgpu_fence_driver_set_error()
670 * amdgpu_fence_driver_force_completion - force signal latest fence of ring
672 * @ring: fence of the ring to signal
675 void amdgpu_fence_driver_force_completion(struct amdgpu_ring *ring) in amdgpu_fence_driver_force_completion() argument
677 amdgpu_fence_driver_set_error(ring, -ECANCELED); in amdgpu_fence_driver_force_completion()
678 amdgpu_fence_write(ring, ring->fence_drv.sync_seq); in amdgpu_fence_driver_force_completion()
679 amdgpu_fence_process(ring); in amdgpu_fence_driver_force_completion()
689 * driver will save the ring contents which are not associated with the guilty
695 * which data needs to be saved out of the queue's ring buffer.
701 * @af: fence of the ring to signal
709 struct amdgpu_ring *ring = af->ring; in amdgpu_fence_driver_guilty_force_completion() local
713 last_seq = amdgpu_fence_read(ring) & ring->fence_drv.num_fences_mask; in amdgpu_fence_driver_guilty_force_completion()
714 seq = ring->fence_drv.sync_seq & ring->fence_drv.num_fences_mask; in amdgpu_fence_driver_guilty_force_completion()
717 spin_lock_irqsave(&ring->fence_drv.lock, flags); in amdgpu_fence_driver_guilty_force_completion()
720 last_seq &= ring->fence_drv.num_fences_mask; in amdgpu_fence_driver_guilty_force_completion()
722 ptr = &ring->fence_drv.fences[last_seq]; in amdgpu_fence_driver_guilty_force_completion()
736 spin_unlock_irqrestore(&ring->fence_drv.lock, flags); in amdgpu_fence_driver_guilty_force_completion()
738 amdgpu_fence_write(ring, (u32)af->base.seqno); in amdgpu_fence_driver_guilty_force_completion()
739 amdgpu_fence_process(ring); in amdgpu_fence_driver_guilty_force_completion()
744 af->wptr = af->ring->wptr; in amdgpu_fence_save_wptr()
747 static void amdgpu_ring_backup_unprocessed_command(struct amdgpu_ring *ring, in amdgpu_ring_backup_unprocessed_command() argument
750 unsigned int first_idx = start_wptr & ring->buf_mask; in amdgpu_ring_backup_unprocessed_command()
751 unsigned int last_idx = end_wptr & ring->buf_mask; in amdgpu_ring_backup_unprocessed_command()
754 /* Backup the contents of the ring buffer. */ in amdgpu_ring_backup_unprocessed_command()
755 for (i = first_idx; i != last_idx; ++i, i &= ring->buf_mask) in amdgpu_ring_backup_unprocessed_command()
756 ring->ring_backup[ring->ring_backup_entries_to_copy++] = ring->ring[i]; in amdgpu_ring_backup_unprocessed_command()
759 void amdgpu_ring_backup_unprocessed_commands(struct amdgpu_ring *ring, in amdgpu_ring_backup_unprocessed_commands() argument
768 last_seq = amdgpu_fence_read(ring) & ring->fence_drv.num_fences_mask; in amdgpu_ring_backup_unprocessed_commands()
769 seq = ring->fence_drv.sync_seq & ring->fence_drv.num_fences_mask; in amdgpu_ring_backup_unprocessed_commands()
770 wptr = ring->fence_drv.signalled_wptr; in amdgpu_ring_backup_unprocessed_commands()
771 ring->ring_backup_entries_to_copy = 0; in amdgpu_ring_backup_unprocessed_commands()
775 last_seq &= ring->fence_drv.num_fences_mask; in amdgpu_ring_backup_unprocessed_commands()
777 ptr = &ring->fence_drv.fences[last_seq]; in amdgpu_ring_backup_unprocessed_commands()
784 /* save everything if the ring is not guilty, otherwise in amdgpu_ring_backup_unprocessed_commands()
788 amdgpu_ring_backup_unprocessed_command(ring, wptr, in amdgpu_ring_backup_unprocessed_commands()
807 return (const char *)to_amdgpu_fence(f)->ring->name; in amdgpu_fence_get_timeline_name()
820 if (!timer_pending(&to_amdgpu_fence(f)->ring->fence_drv.fallback_timer)) in amdgpu_fence_enable_signaling()
821 amdgpu_fence_schedule_fallback(to_amdgpu_fence(f)->ring); in amdgpu_fence_enable_signaling()
871 struct amdgpu_ring *ring = adev->rings[i]; in amdgpu_debugfs_fence_info_show() local
873 if (!ring || !ring->fence_drv.initialized) in amdgpu_debugfs_fence_info_show()
876 amdgpu_fence_process(ring); in amdgpu_debugfs_fence_info_show()
878 seq_printf(m, "--- ring %d (%s) ---\n", i, ring->name); in amdgpu_debugfs_fence_info_show()
880 atomic_read(&ring->fence_drv.last_seq)); in amdgpu_debugfs_fence_info_show()
882 ring->fence_drv.sync_seq); in amdgpu_debugfs_fence_info_show()
884 if (ring->funcs->type == AMDGPU_RING_TYPE_GFX || in amdgpu_debugfs_fence_info_show()
885 ring->funcs->type == AMDGPU_RING_TYPE_SDMA) { in amdgpu_debugfs_fence_info_show()
887 le32_to_cpu(*ring->trail_fence_cpu_addr)); in amdgpu_debugfs_fence_info_show()
889 ring->trail_seq); in amdgpu_debugfs_fence_info_show()
892 if (ring->funcs->type != AMDGPU_RING_TYPE_GFX) in amdgpu_debugfs_fence_info_show()
897 le32_to_cpu(*(ring->fence_drv.cpu_addr + 2))); in amdgpu_debugfs_fence_info_show()
900 le32_to_cpu(*(ring->fence_drv.cpu_addr + 4))); in amdgpu_debugfs_fence_info_show()
903 le32_to_cpu(*(ring->fence_drv.cpu_addr + 6))); in amdgpu_debugfs_fence_info_show()