Lines Matching refs:e

78 	struct amdgpu_mux_entry *e = NULL;  in amdgpu_mux_resubmit_chunks()  local
89 e = &mux->ring_entry[i]; in amdgpu_mux_resubmit_chunks()
94 if (!e) { in amdgpu_mux_resubmit_chunks()
99 last_seq = atomic_read(&e->ring->fence_drv.last_seq); in amdgpu_mux_resubmit_chunks()
103 list_for_each_entry(chunk, &e->list, entry) { in amdgpu_mux_resubmit_chunks()
105 amdgpu_fence_update_start_timestamp(e->ring, in amdgpu_mux_resubmit_chunks()
109 le32_to_cpu(*(e->ring->fence_drv.cpu_addr + 2))) { in amdgpu_mux_resubmit_chunks()
110 if (chunk->cntl_offset <= e->ring->buf_mask) in amdgpu_mux_resubmit_chunks()
111 amdgpu_ring_patch_cntl(e->ring, in amdgpu_mux_resubmit_chunks()
113 if (chunk->ce_offset <= e->ring->buf_mask) in amdgpu_mux_resubmit_chunks()
114 amdgpu_ring_patch_ce(e->ring, chunk->ce_offset); in amdgpu_mux_resubmit_chunks()
115 if (chunk->de_offset <= e->ring->buf_mask) in amdgpu_mux_resubmit_chunks()
116 amdgpu_ring_patch_de(e->ring, chunk->de_offset); in amdgpu_mux_resubmit_chunks()
118 amdgpu_ring_mux_copy_pkt_from_sw_ring(mux, e->ring, in amdgpu_mux_resubmit_chunks()
177 struct amdgpu_mux_entry *e; in amdgpu_ring_mux_fini() local
182 e = &mux->ring_entry[i]; in amdgpu_ring_mux_fini()
183 list_for_each_entry_safe(chunk, chunk2, &e->list, entry) { in amdgpu_ring_mux_fini()
197 struct amdgpu_mux_entry *e; in amdgpu_ring_mux_add_sw_ring() local
204 e = &mux->ring_entry[mux->num_ring_entries]; in amdgpu_ring_mux_add_sw_ring()
206 e->ring = ring; in amdgpu_ring_mux_add_sw_ring()
208 INIT_LIST_HEAD(&e->list); in amdgpu_ring_mux_add_sw_ring()
215 struct amdgpu_mux_entry *e; in amdgpu_ring_mux_set_wptr() local
222 e = amdgpu_ring_mux_sw_entry(mux, ring); in amdgpu_ring_mux_set_wptr()
223 if (!e) { in amdgpu_ring_mux_set_wptr()
235 e->sw_cptr = e->sw_wptr; in amdgpu_ring_mux_set_wptr()
237 if (ring->hw_prio <= AMDGPU_RING_PRIO_DEFAULT && e->sw_cptr < mux->wptr_resubmit) in amdgpu_ring_mux_set_wptr()
238 e->sw_cptr = mux->wptr_resubmit; in amdgpu_ring_mux_set_wptr()
239 e->sw_wptr = wptr; in amdgpu_ring_mux_set_wptr()
240 e->start_ptr_in_hw_ring = mux->real_ring->wptr; in amdgpu_ring_mux_set_wptr()
244 amdgpu_ring_mux_copy_pkt_from_sw_ring(mux, ring, e->sw_cptr, wptr); in amdgpu_ring_mux_set_wptr()
245 e->end_ptr_in_hw_ring = mux->real_ring->wptr; in amdgpu_ring_mux_set_wptr()
248 e->end_ptr_in_hw_ring = mux->real_ring->wptr; in amdgpu_ring_mux_set_wptr()
255 struct amdgpu_mux_entry *e; in amdgpu_ring_mux_get_wptr() local
257 e = amdgpu_ring_mux_sw_entry(mux, ring); in amdgpu_ring_mux_get_wptr()
258 if (!e) { in amdgpu_ring_mux_get_wptr()
263 return e->sw_wptr; in amdgpu_ring_mux_get_wptr()
284 struct amdgpu_mux_entry *e; in amdgpu_ring_mux_get_rptr() local
287 e = amdgpu_ring_mux_sw_entry(mux, ring); in amdgpu_ring_mux_get_rptr()
288 if (!e) { in amdgpu_ring_mux_get_rptr()
295 start = e->start_ptr_in_hw_ring & mux->real_ring->buf_mask; in amdgpu_ring_mux_get_rptr()
296 end = e->end_ptr_in_hw_ring & mux->real_ring->buf_mask; in amdgpu_ring_mux_get_rptr()
305 e->sw_rptr = (e->sw_cptr + offset) & ring->buf_mask; in amdgpu_ring_mux_get_rptr()
307 e->sw_rptr = e->sw_cptr; in amdgpu_ring_mux_get_rptr()
310 e->sw_rptr = e->sw_wptr; in amdgpu_ring_mux_get_rptr()
313 return e->sw_rptr; in amdgpu_ring_mux_get_rptr()
435 struct amdgpu_mux_entry *e; in amdgpu_ring_mux_start_ib() local
442 e = amdgpu_ring_mux_sw_entry(mux, ring); in amdgpu_ring_mux_start_ib()
443 if (!e) { in amdgpu_ring_mux_start_ib()
459 list_add_tail(&chunk->entry, &e->list); in amdgpu_ring_mux_start_ib()
465 struct amdgpu_mux_entry *e; in scan_and_remove_signaled_chunk() local
468 e = amdgpu_ring_mux_sw_entry(mux, ring); in scan_and_remove_signaled_chunk()
469 if (!e) { in scan_and_remove_signaled_chunk()
476 list_for_each_entry_safe(chunk, tmp, &e->list, entry) { in scan_and_remove_signaled_chunk()
488 struct amdgpu_mux_entry *e; in amdgpu_ring_mux_ib_mark_offset() local
491 e = amdgpu_ring_mux_sw_entry(mux, ring); in amdgpu_ring_mux_ib_mark_offset()
492 if (!e) { in amdgpu_ring_mux_ib_mark_offset()
497 chunk = list_last_entry(&e->list, struct amdgpu_mux_chunk, entry); in amdgpu_ring_mux_ib_mark_offset()
521 struct amdgpu_mux_entry *e; in amdgpu_ring_mux_end_ib() local
524 e = amdgpu_ring_mux_sw_entry(mux, ring); in amdgpu_ring_mux_end_ib()
525 if (!e) { in amdgpu_ring_mux_end_ib()
530 chunk = list_last_entry(&e->list, struct amdgpu_mux_chunk, entry); in amdgpu_ring_mux_end_ib()
544 struct amdgpu_mux_entry *e; in amdgpu_mcbp_handle_trailing_fence_irq() local
555 e = &mux->ring_entry[i]; in amdgpu_mcbp_handle_trailing_fence_irq()
556 if (e->ring->hw_prio <= AMDGPU_RING_PRIO_DEFAULT) { in amdgpu_mcbp_handle_trailing_fence_irq()
557 ring = e->ring; in amdgpu_mcbp_handle_trailing_fence_irq()