Lines Matching full:ring
52 * are no longer in use by the associated ring on the GPU and
63 * @ring: ring index the fence is associated with
67 static void radeon_fence_write(struct radeon_device *rdev, u32 seq, int ring) in radeon_fence_write() argument
69 struct radeon_fence_driver *drv = &rdev->fence_drv[ring]; in radeon_fence_write()
83 * @ring: ring index the fence is associated with
88 static u32 radeon_fence_read(struct radeon_device *rdev, int ring) in radeon_fence_read() argument
90 struct radeon_fence_driver *drv = &rdev->fence_drv[ring]; in radeon_fence_read()
108 * @ring: ring index we should work with
112 static void radeon_fence_schedule_check(struct radeon_device *rdev, int ring) in radeon_fence_schedule_check() argument
119 &rdev->fence_drv[ring].lockup_work, in radeon_fence_schedule_check()
124 * radeon_fence_emit - emit a fence on the requested ring
128 * @ring: ring index the fence is associated with
130 * Emits a fence command on the requested ring (all asics).
135 int ring) in radeon_fence_emit() argument
139 /* we are protected by the ring emission mutex */ in radeon_fence_emit()
145 (*fence)->seq = seq = ++rdev->fence_drv[ring].sync_seq[ring]; in radeon_fence_emit()
146 (*fence)->ring = ring; in radeon_fence_emit()
150 rdev->fence_context + ring, in radeon_fence_emit()
152 radeon_fence_ring_emit(rdev, ring, *fence); in radeon_fence_emit()
153 trace_radeon_fence_emit(rdev_to_drm(rdev), ring, (*fence)->seq); in radeon_fence_emit()
154 radeon_fence_schedule_check(rdev, ring); in radeon_fence_emit()
177 seq = atomic64_read(&fence->rdev->fence_drv[fence->ring].last_seq); in radeon_fence_check_signaled()
180 radeon_irq_kms_sw_irq_put(fence->rdev, fence->ring); in radeon_fence_check_signaled()
191 * @ring: ring index the fence is associated with
195 * on the ring, and the fence_queue should be waken up.
197 static bool radeon_fence_activity(struct radeon_device *rdev, int ring) in radeon_fence_activity() argument
224 last_seq = atomic64_read(&rdev->fence_drv[ring].last_seq); in radeon_fence_activity()
226 last_emitted = rdev->fence_drv[ring].sync_seq[ring]; in radeon_fence_activity()
227 seq = radeon_fence_read(rdev, ring); in radeon_fence_activity()
251 } while (atomic64_xchg(&rdev->fence_drv[ring].last_seq, seq) > seq); in radeon_fence_activity()
254 radeon_fence_schedule_check(rdev, ring); in radeon_fence_activity()
271 int ring; in radeon_fence_check_lockup() local
276 ring = fence_drv - &rdev->fence_drv[0]; in radeon_fence_check_lockup()
280 radeon_fence_schedule_check(rdev, ring); in radeon_fence_check_lockup()
293 if (radeon_fence_activity(rdev, ring)) in radeon_fence_check_lockup()
296 else if (radeon_ring_is_lockup(rdev, ring, &rdev->ring[ring])) { in radeon_fence_check_lockup()
299 …dev_warn(rdev->dev, "GPU lockup (current fence id 0x%016llx last fence id 0x%016llx on ring %d)\n", in radeon_fence_check_lockup()
301 fence_drv->sync_seq[ring], ring); in radeon_fence_check_lockup()
314 * @ring: ring index the fence is associated with
319 void radeon_fence_process(struct radeon_device *rdev, int ring) in radeon_fence_process() argument
321 if (radeon_fence_activity(rdev, ring)) in radeon_fence_process()
330 * @ring: ring index the fence is associated with
340 u64 seq, unsigned int ring) in radeon_fence_seq_signaled() argument
342 if (atomic64_read(&rdev->fence_drv[ring].last_seq) >= seq) in radeon_fence_seq_signaled()
346 radeon_fence_process(rdev, ring); in radeon_fence_seq_signaled()
347 if (atomic64_read(&rdev->fence_drv[ring].last_seq) >= seq) in radeon_fence_seq_signaled()
357 unsigned int ring = fence->ring; in radeon_fence_is_signaled() local
360 if (atomic64_read(&rdev->fence_drv[ring].last_seq) >= seq) in radeon_fence_is_signaled()
379 if (atomic64_read(&rdev->fence_drv[fence->ring].last_seq) >= fence->seq) in radeon_fence_enable_signaling()
383 radeon_irq_kms_sw_irq_get(rdev, fence->ring); in radeon_fence_enable_signaling()
385 if (radeon_fence_activity(rdev, fence->ring)) in radeon_fence_enable_signaling()
389 if (atomic64_read(&rdev->fence_drv[fence->ring].last_seq) >= fence->seq) { in radeon_fence_enable_signaling()
390 radeon_irq_kms_sw_irq_put(rdev, fence->ring); in radeon_fence_enable_signaling()
398 if (radeon_irq_kms_sw_irq_get_delayed(rdev, fence->ring)) in radeon_fence_enable_signaling()
399 rdev->fence_drv[fence->ring].delayed_irq = true; in radeon_fence_enable_signaling()
400 radeon_fence_schedule_check(rdev, fence->ring); in radeon_fence_enable_signaling()
424 if (radeon_fence_seq_signaled(fence->rdev, fence->seq, fence->ring)) { in radeon_fence_signaled()
461 * Wait for the requested sequence number(s) to be written by any ring
462 * (all asics). Sequnce number array is indexed by ring id.
540 seq[fence->ring] = fence->seq; in radeon_fence_wait_timeout()
574 * @ring: ring index the fence is associated with
576 * Wait for the next fence on the requested ring to signal (all asics).
578 * Caller must hold ring lock.
580 int radeon_fence_wait_next(struct radeon_device *rdev, int ring) in radeon_fence_wait_next() argument
585 seq[ring] = atomic64_read(&rdev->fence_drv[ring].last_seq) + 1ULL; in radeon_fence_wait_next()
586 if (seq[ring] >= rdev->fence_drv[ring].sync_seq[ring]) { in radeon_fence_wait_next()
604 * @ring: ring index the fence is associated with
606 * Wait for all fences on the requested ring to signal (all asics).
608 * Caller must hold ring lock.
610 int radeon_fence_wait_empty(struct radeon_device *rdev, int ring) in radeon_fence_wait_empty() argument
615 seq[ring] = rdev->fence_drv[ring].sync_seq[ring]; in radeon_fence_wait_empty()
616 if (!seq[ring]) in radeon_fence_wait_empty()
624 dev_err(rdev->dev, "error waiting for ring[%d] to become idle (%ld)\n", in radeon_fence_wait_empty()
625 ring, r); in radeon_fence_wait_empty()
664 * @ring: ring index the fence is associated with
666 * Get the number of fences emitted on the requested ring (all asics).
667 * Returns the number of emitted fences on the ring. Used by the
668 * dynpm code to ring track activity.
670 unsigned int radeon_fence_count_emitted(struct radeon_device *rdev, int ring) in radeon_fence_count_emitted() argument
674 /* We are not protected by ring lock when reading the last sequence in radeon_fence_count_emitted()
677 radeon_fence_process(rdev, ring); in radeon_fence_count_emitted()
678 emitted = rdev->fence_drv[ring].sync_seq[ring] in radeon_fence_count_emitted()
679 - atomic64_read(&rdev->fence_drv[ring].last_seq); in radeon_fence_count_emitted()
691 * @dst_ring: which ring to check against
693 * Check if the fence needs to be synced against another ring
695 * Returns true if we need to sync with another ring, false if
705 if (fence->ring == dst_ring) in radeon_fence_need_sync()
708 /* we are protected by the ring mutex */ in radeon_fence_need_sync()
710 if (fence->seq <= fdrv->sync_seq[fence->ring]) in radeon_fence_need_sync()
720 * @dst_ring: which ring to check against
723 * be synced with the requested ring (all asics).
733 if (fence->ring == dst_ring) in radeon_fence_note_sync()
736 /* we are protected by the ring mutex */ in radeon_fence_note_sync()
737 src = &fence->rdev->fence_drv[fence->ring]; in radeon_fence_note_sync()
749 * ready for use on the requested ring.
752 * @ring: ring index to start the fence driver on
759 int radeon_fence_driver_start_ring(struct radeon_device *rdev, int ring) in radeon_fence_driver_start_ring() argument
764 radeon_scratch_free(rdev, rdev->fence_drv[ring].scratch_reg); in radeon_fence_driver_start_ring()
765 if (rdev->wb.use_event || !radeon_ring_supports_scratch_reg(rdev, &rdev->ring[ring])) { in radeon_fence_driver_start_ring()
766 rdev->fence_drv[ring].scratch_reg = 0; in radeon_fence_driver_start_ring()
767 if (ring != R600_RING_TYPE_UVD_INDEX) { in radeon_fence_driver_start_ring()
768 index = R600_WB_EVENT_OFFSET + ring * 4; in radeon_fence_driver_start_ring()
769 rdev->fence_drv[ring].cpu_addr = &rdev->wb.wb[index/4]; in radeon_fence_driver_start_ring()
770 rdev->fence_drv[ring].gpu_addr = rdev->wb.gpu_addr + in radeon_fence_driver_start_ring()
776 rdev->fence_drv[ring].cpu_addr = rdev->uvd.cpu_addr + index; in radeon_fence_driver_start_ring()
777 rdev->fence_drv[ring].gpu_addr = rdev->uvd.gpu_addr + index; in radeon_fence_driver_start_ring()
781 r = radeon_scratch_get(rdev, &rdev->fence_drv[ring].scratch_reg); in radeon_fence_driver_start_ring()
787 rdev->fence_drv[ring].scratch_reg - in radeon_fence_driver_start_ring()
789 rdev->fence_drv[ring].cpu_addr = &rdev->wb.wb[index/4]; in radeon_fence_driver_start_ring()
790 rdev->fence_drv[ring].gpu_addr = rdev->wb.gpu_addr + index; in radeon_fence_driver_start_ring()
792 radeon_fence_write(rdev, atomic64_read(&rdev->fence_drv[ring].last_seq), ring); in radeon_fence_driver_start_ring()
793 rdev->fence_drv[ring].initialized = true; in radeon_fence_driver_start_ring()
794 dev_info(rdev->dev, "fence driver on ring %d uses gpu addr 0x%016llx\n", in radeon_fence_driver_start_ring()
795 ring, rdev->fence_drv[ring].gpu_addr); in radeon_fence_driver_start_ring()
801 * for the requested ring.
804 * @ring: ring index to start the fence driver on
806 * Init the fence driver for the requested ring (all asics).
809 static void radeon_fence_driver_init_ring(struct radeon_device *rdev, int ring) in radeon_fence_driver_init_ring() argument
813 rdev->fence_drv[ring].scratch_reg = -1; in radeon_fence_driver_init_ring()
814 rdev->fence_drv[ring].cpu_addr = NULL; in radeon_fence_driver_init_ring()
815 rdev->fence_drv[ring].gpu_addr = 0; in radeon_fence_driver_init_ring()
817 rdev->fence_drv[ring].sync_seq[i] = 0; in radeon_fence_driver_init_ring()
818 atomic64_set(&rdev->fence_drv[ring].last_seq, 0); in radeon_fence_driver_init_ring()
819 rdev->fence_drv[ring].initialized = false; in radeon_fence_driver_init_ring()
820 INIT_DELAYED_WORK(&rdev->fence_drv[ring].lockup_work, in radeon_fence_driver_init_ring()
822 rdev->fence_drv[ring].rdev = rdev; in radeon_fence_driver_init_ring()
838 int ring; in radeon_fence_driver_init() local
841 for (ring = 0; ring < RADEON_NUM_RINGS; ring++) in radeon_fence_driver_init()
842 radeon_fence_driver_init_ring(rdev, ring); in radeon_fence_driver_init()
857 int ring, r; in radeon_fence_driver_fini() local
860 for (ring = 0; ring < RADEON_NUM_RINGS; ring++) { in radeon_fence_driver_fini()
861 if (!rdev->fence_drv[ring].initialized) in radeon_fence_driver_fini()
863 r = radeon_fence_wait_empty(rdev, ring); in radeon_fence_driver_fini()
866 radeon_fence_driver_force_completion(rdev, ring); in radeon_fence_driver_fini()
868 cancel_delayed_work_sync(&rdev->fence_drv[ring].lockup_work); in radeon_fence_driver_fini()
870 radeon_scratch_free(rdev, rdev->fence_drv[ring].scratch_reg); in radeon_fence_driver_fini()
871 rdev->fence_drv[ring].initialized = false; in radeon_fence_driver_fini()
880 * @ring: the ring to complete
885 void radeon_fence_driver_force_completion(struct radeon_device *rdev, int ring) in radeon_fence_driver_force_completion() argument
887 if (rdev->fence_drv[ring].initialized) { in radeon_fence_driver_force_completion()
888 radeon_fence_write(rdev, rdev->fence_drv[ring].sync_seq[ring], ring); in radeon_fence_driver_force_completion()
889 cancel_delayed_work_sync(&rdev->fence_drv[ring].lockup_work); in radeon_fence_driver_force_completion()
909 seq_printf(m, "--- ring %d ---\n", i); in radeon_debugfs_fence_info_show()
917 seq_printf(m, "Last sync to ring %d 0x%016llx\n", in radeon_debugfs_fence_info_show()
969 switch (fence->ring) { in radeon_fence_get_timeline_name()