Lines Matching full:mux

43 static inline struct amdgpu_mux_entry *amdgpu_ring_mux_sw_entry(struct amdgpu_ring_mux *mux,  in amdgpu_ring_mux_sw_entry()  argument
46 return ring->entry_index < mux->ring_entry_size ? in amdgpu_ring_mux_sw_entry()
47 &mux->ring_entry[ring->entry_index] : NULL; in amdgpu_ring_mux_sw_entry()
51 static void amdgpu_ring_mux_copy_pkt_from_sw_ring(struct amdgpu_ring_mux *mux, in amdgpu_ring_mux_copy_pkt_from_sw_ring() argument
56 struct amdgpu_ring *real_ring = mux->real_ring; in amdgpu_ring_mux_copy_pkt_from_sw_ring()
76 static void amdgpu_mux_resubmit_chunks(struct amdgpu_ring_mux *mux) in amdgpu_mux_resubmit_chunks() argument
84 if (!mux->s_resubmit) in amdgpu_mux_resubmit_chunks()
87 for (i = 0; i < mux->num_ring_entries; i++) { in amdgpu_mux_resubmit_chunks()
88 if (mux->ring_entry[i].ring->hw_prio <= AMDGPU_RING_PRIO_DEFAULT) { in amdgpu_mux_resubmit_chunks()
89 e = &mux->ring_entry[i]; in amdgpu_mux_resubmit_chunks()
100 seq = mux->seqno_to_resubmit; in amdgpu_mux_resubmit_chunks()
118 amdgpu_ring_mux_copy_pkt_from_sw_ring(mux, e->ring, in amdgpu_mux_resubmit_chunks()
121 mux->wptr_resubmit = chunk->end; in amdgpu_mux_resubmit_chunks()
122 amdgpu_ring_commit(mux->real_ring); in amdgpu_mux_resubmit_chunks()
127 timer_delete(&mux->resubmit_timer); in amdgpu_mux_resubmit_chunks()
128 mux->s_resubmit = false; in amdgpu_mux_resubmit_chunks()
131 static void amdgpu_ring_mux_schedule_resubmit(struct amdgpu_ring_mux *mux) in amdgpu_ring_mux_schedule_resubmit() argument
133 mod_timer(&mux->resubmit_timer, jiffies + AMDGPU_MUX_RESUBMIT_JIFFIES_TIMEOUT); in amdgpu_ring_mux_schedule_resubmit()
138 struct amdgpu_ring_mux *mux = timer_container_of(mux, t, in amdgpu_mux_resubmit_fallback() local
141 if (!spin_trylock(&mux->lock)) { in amdgpu_mux_resubmit_fallback()
142 amdgpu_ring_mux_schedule_resubmit(mux); in amdgpu_mux_resubmit_fallback()
146 amdgpu_mux_resubmit_chunks(mux); in amdgpu_mux_resubmit_fallback()
147 spin_unlock(&mux->lock); in amdgpu_mux_resubmit_fallback()
150 int amdgpu_ring_mux_init(struct amdgpu_ring_mux *mux, struct amdgpu_ring *ring, in amdgpu_ring_mux_init() argument
153 mux->real_ring = ring; in amdgpu_ring_mux_init()
154 mux->num_ring_entries = 0; in amdgpu_ring_mux_init()
156 mux->ring_entry = kcalloc(entry_size, sizeof(struct amdgpu_mux_entry), GFP_KERNEL); in amdgpu_ring_mux_init()
157 if (!mux->ring_entry) in amdgpu_ring_mux_init()
160 mux->ring_entry_size = entry_size; in amdgpu_ring_mux_init()
161 mux->s_resubmit = false; in amdgpu_ring_mux_init()
169 spin_lock_init(&mux->lock); in amdgpu_ring_mux_init()
170 timer_setup(&mux->resubmit_timer, amdgpu_mux_resubmit_fallback, 0); in amdgpu_ring_mux_init()
175 void amdgpu_ring_mux_fini(struct amdgpu_ring_mux *mux) in amdgpu_ring_mux_fini() argument
181 for (i = 0; i < mux->num_ring_entries; i++) { in amdgpu_ring_mux_fini()
182 e = &mux->ring_entry[i]; in amdgpu_ring_mux_fini()
189 kfree(mux->ring_entry); in amdgpu_ring_mux_fini()
190 mux->ring_entry = NULL; in amdgpu_ring_mux_fini()
191 mux->num_ring_entries = 0; in amdgpu_ring_mux_fini()
192 mux->ring_entry_size = 0; in amdgpu_ring_mux_fini()
195 int amdgpu_ring_mux_add_sw_ring(struct amdgpu_ring_mux *mux, struct amdgpu_ring *ring) in amdgpu_ring_mux_add_sw_ring() argument
199 if (mux->num_ring_entries >= mux->ring_entry_size) { in amdgpu_ring_mux_add_sw_ring()
204 e = &mux->ring_entry[mux->num_ring_entries]; in amdgpu_ring_mux_add_sw_ring()
205 ring->entry_index = mux->num_ring_entries; in amdgpu_ring_mux_add_sw_ring()
209 mux->num_ring_entries += 1; in amdgpu_ring_mux_add_sw_ring()
213 void amdgpu_ring_mux_set_wptr(struct amdgpu_ring_mux *mux, struct amdgpu_ring *ring, u64 wptr) in amdgpu_ring_mux_set_wptr() argument
217 spin_lock(&mux->lock); in amdgpu_ring_mux_set_wptr()
220 amdgpu_mux_resubmit_chunks(mux); in amdgpu_ring_mux_set_wptr()
222 e = amdgpu_ring_mux_sw_entry(mux, ring); in amdgpu_ring_mux_set_wptr()
225 spin_unlock(&mux->lock); in amdgpu_ring_mux_set_wptr()
230 if (ring->hw_prio <= AMDGPU_RING_PRIO_DEFAULT && mux->pending_trailing_fence_signaled) { in amdgpu_ring_mux_set_wptr()
231 spin_unlock(&mux->lock); in amdgpu_ring_mux_set_wptr()
237 if (ring->hw_prio <= AMDGPU_RING_PRIO_DEFAULT && e->sw_cptr < mux->wptr_resubmit) in amdgpu_ring_mux_set_wptr()
238 e->sw_cptr = mux->wptr_resubmit; in amdgpu_ring_mux_set_wptr()
240 e->start_ptr_in_hw_ring = mux->real_ring->wptr; in amdgpu_ring_mux_set_wptr()
243 if (ring->hw_prio > AMDGPU_RING_PRIO_DEFAULT || mux->wptr_resubmit < wptr) { in amdgpu_ring_mux_set_wptr()
244 amdgpu_ring_mux_copy_pkt_from_sw_ring(mux, ring, e->sw_cptr, wptr); in amdgpu_ring_mux_set_wptr()
245 e->end_ptr_in_hw_ring = mux->real_ring->wptr; in amdgpu_ring_mux_set_wptr()
246 amdgpu_ring_commit(mux->real_ring); in amdgpu_ring_mux_set_wptr()
248 e->end_ptr_in_hw_ring = mux->real_ring->wptr; in amdgpu_ring_mux_set_wptr()
250 spin_unlock(&mux->lock); in amdgpu_ring_mux_set_wptr()
253 u64 amdgpu_ring_mux_get_wptr(struct amdgpu_ring_mux *mux, struct amdgpu_ring *ring) in amdgpu_ring_mux_get_wptr() argument
257 e = amdgpu_ring_mux_sw_entry(mux, ring); in amdgpu_ring_mux_get_wptr()
268 * @mux: the multiplexer the software rings attach to
282 u64 amdgpu_ring_mux_get_rptr(struct amdgpu_ring_mux *mux, struct amdgpu_ring *ring) in amdgpu_ring_mux_get_rptr() argument
287 e = amdgpu_ring_mux_sw_entry(mux, ring); in amdgpu_ring_mux_get_rptr()
293 readp = amdgpu_ring_get_rptr(mux->real_ring); in amdgpu_ring_mux_get_rptr()
295 start = e->start_ptr_in_hw_ring & mux->real_ring->buf_mask; in amdgpu_ring_mux_get_rptr()
296 end = e->end_ptr_in_hw_ring & mux->real_ring->buf_mask; in amdgpu_ring_mux_get_rptr()
299 readp += mux->real_ring->ring_size >> 2; in amdgpu_ring_mux_get_rptr()
300 end += mux->real_ring->ring_size >> 2; in amdgpu_ring_mux_get_rptr()
319 struct amdgpu_ring_mux *mux = &adev->gfx.muxer; in amdgpu_sw_ring_get_rptr_gfx() local
322 return amdgpu_ring_mux_get_rptr(mux, ring); in amdgpu_sw_ring_get_rptr_gfx()
328 struct amdgpu_ring_mux *mux = &adev->gfx.muxer; in amdgpu_sw_ring_get_wptr_gfx() local
331 return amdgpu_ring_mux_get_wptr(mux, ring); in amdgpu_sw_ring_get_wptr_gfx()
337 struct amdgpu_ring_mux *mux = &adev->gfx.muxer; in amdgpu_sw_ring_set_wptr_gfx() local
340 amdgpu_ring_mux_set_wptr(mux, ring, ring->wptr); in amdgpu_sw_ring_set_wptr_gfx()
362 static int amdgpu_mcbp_scan(struct amdgpu_ring_mux *mux) in amdgpu_mcbp_scan() argument
368 for (i = 0; i < mux->num_ring_entries; i++) { in amdgpu_mcbp_scan()
369 ring = mux->ring_entry[i].ring; in amdgpu_mcbp_scan()
378 return need_preempt && !mux->s_resubmit; in amdgpu_mcbp_scan()
382 static int amdgpu_mcbp_trigger_preempt(struct amdgpu_ring_mux *mux) in amdgpu_mcbp_trigger_preempt() argument
386 spin_lock(&mux->lock); in amdgpu_mcbp_trigger_preempt()
387 mux->pending_trailing_fence_signaled = true; in amdgpu_mcbp_trigger_preempt()
388 r = amdgpu_ring_preempt_ib(mux->real_ring); in amdgpu_mcbp_trigger_preempt()
389 spin_unlock(&mux->lock); in amdgpu_mcbp_trigger_preempt()
396 struct amdgpu_ring_mux *mux = &adev->gfx.muxer; in amdgpu_sw_ring_ib_begin() local
400 if (amdgpu_mcbp_scan(mux) > 0) in amdgpu_sw_ring_ib_begin()
401 amdgpu_mcbp_trigger_preempt(mux); in amdgpu_sw_ring_ib_begin()
405 amdgpu_ring_mux_start_ib(mux, ring); in amdgpu_sw_ring_ib_begin()
411 struct amdgpu_ring_mux *mux = &adev->gfx.muxer; in amdgpu_sw_ring_ib_end() local
416 amdgpu_ring_mux_end_ib(mux, ring); in amdgpu_sw_ring_ib_end()
422 struct amdgpu_ring_mux *mux = &adev->gfx.muxer; in amdgpu_sw_ring_ib_mark_offset() local
430 amdgpu_ring_mux_ib_mark_offset(mux, ring, offset, type); in amdgpu_sw_ring_ib_mark_offset()
433 void amdgpu_ring_mux_start_ib(struct amdgpu_ring_mux *mux, struct amdgpu_ring *ring) in amdgpu_ring_mux_start_ib() argument
438 spin_lock(&mux->lock); in amdgpu_ring_mux_start_ib()
439 amdgpu_mux_resubmit_chunks(mux); in amdgpu_ring_mux_start_ib()
440 spin_unlock(&mux->lock); in amdgpu_ring_mux_start_ib()
442 e = amdgpu_ring_mux_sw_entry(mux, ring); in amdgpu_ring_mux_start_ib()
462 static void scan_and_remove_signaled_chunk(struct amdgpu_ring_mux *mux, struct amdgpu_ring *ring) in scan_and_remove_signaled_chunk() argument
468 e = amdgpu_ring_mux_sw_entry(mux, ring); in scan_and_remove_signaled_chunk()
484 void amdgpu_ring_mux_ib_mark_offset(struct amdgpu_ring_mux *mux, in amdgpu_ring_mux_ib_mark_offset() argument
491 e = amdgpu_ring_mux_sw_entry(mux, ring); in amdgpu_ring_mux_ib_mark_offset()
519 void amdgpu_ring_mux_end_ib(struct amdgpu_ring_mux *mux, struct amdgpu_ring *ring) in amdgpu_ring_mux_end_ib() argument
524 e = amdgpu_ring_mux_sw_entry(mux, ring); in amdgpu_ring_mux_end_ib()
539 scan_and_remove_signaled_chunk(mux, ring); in amdgpu_ring_mux_end_ib()
542 bool amdgpu_mcbp_handle_trailing_fence_irq(struct amdgpu_ring_mux *mux) in amdgpu_mcbp_handle_trailing_fence_irq() argument
548 if (!mux->pending_trailing_fence_signaled) in amdgpu_mcbp_handle_trailing_fence_irq()
551 if (mux->real_ring->trail_seq != le32_to_cpu(*mux->real_ring->trail_fence_cpu_addr)) in amdgpu_mcbp_handle_trailing_fence_irq()
554 for (i = 0; i < mux->num_ring_entries; i++) { in amdgpu_mcbp_handle_trailing_fence_irq()
555 e = &mux->ring_entry[i]; in amdgpu_mcbp_handle_trailing_fence_irq()
569 mux->s_resubmit = true; in amdgpu_mcbp_handle_trailing_fence_irq()
570 mux->seqno_to_resubmit = ring->fence_drv.sync_seq; in amdgpu_mcbp_handle_trailing_fence_irq()
571 amdgpu_ring_mux_schedule_resubmit(mux); in amdgpu_mcbp_handle_trailing_fence_irq()
574 mux->pending_trailing_fence_signaled = false; in amdgpu_mcbp_handle_trailing_fence_irq()