Lines Matching full:ring

44 								struct amdgpu_ring *ring)  in amdgpu_ring_mux_sw_entry()  argument
46 return ring->entry_index < mux->ring_entry_size ? in amdgpu_ring_mux_sw_entry()
47 &mux->ring_entry[ring->entry_index] : NULL; in amdgpu_ring_mux_sw_entry()
50 /* copy packages on sw ring range[begin, end) */
52 struct amdgpu_ring *ring, in amdgpu_ring_mux_copy_pkt_from_sw_ring() argument
58 start = s_start & ring->buf_mask; in amdgpu_ring_mux_copy_pkt_from_sw_ring()
59 end = s_end & ring->buf_mask; in amdgpu_ring_mux_copy_pkt_from_sw_ring()
62 DRM_ERROR("no more data copied from sw ring\n"); in amdgpu_ring_mux_copy_pkt_from_sw_ring()
66 amdgpu_ring_alloc(real_ring, (ring->ring_size >> 2) + end - start); in amdgpu_ring_mux_copy_pkt_from_sw_ring()
67 amdgpu_ring_write_multiple(real_ring, (void *)&ring->ring[start], in amdgpu_ring_mux_copy_pkt_from_sw_ring()
68 (ring->ring_size >> 2) - start); in amdgpu_ring_mux_copy_pkt_from_sw_ring()
69 amdgpu_ring_write_multiple(real_ring, (void *)&ring->ring[0], end); in amdgpu_ring_mux_copy_pkt_from_sw_ring()
72 amdgpu_ring_write_multiple(real_ring, (void *)&ring->ring[start], end - start); in amdgpu_ring_mux_copy_pkt_from_sw_ring()
88 if (mux->ring_entry[i].ring->hw_prio <= AMDGPU_RING_PRIO_DEFAULT) { in amdgpu_mux_resubmit_chunks()
95 DRM_ERROR("%s no low priority ring found\n", __func__); in amdgpu_mux_resubmit_chunks()
99 last_seq = atomic_read(&e->ring->fence_drv.last_seq); in amdgpu_mux_resubmit_chunks()
105 amdgpu_fence_update_start_timestamp(e->ring, in amdgpu_mux_resubmit_chunks()
109 le32_to_cpu(*(e->ring->fence_drv.cpu_addr + 2))) { in amdgpu_mux_resubmit_chunks()
110 if (chunk->cntl_offset <= e->ring->buf_mask) in amdgpu_mux_resubmit_chunks()
111 amdgpu_ring_patch_cntl(e->ring, in amdgpu_mux_resubmit_chunks()
113 if (chunk->ce_offset <= e->ring->buf_mask) in amdgpu_mux_resubmit_chunks()
114 amdgpu_ring_patch_ce(e->ring, chunk->ce_offset); in amdgpu_mux_resubmit_chunks()
115 if (chunk->de_offset <= e->ring->buf_mask) in amdgpu_mux_resubmit_chunks()
116 amdgpu_ring_patch_de(e->ring, chunk->de_offset); in amdgpu_mux_resubmit_chunks()
118 amdgpu_ring_mux_copy_pkt_from_sw_ring(mux, e->ring, in amdgpu_mux_resubmit_chunks()
150 int amdgpu_ring_mux_init(struct amdgpu_ring_mux *mux, struct amdgpu_ring *ring, in amdgpu_ring_mux_init() argument
153 mux->real_ring = ring; in amdgpu_ring_mux_init()
195 int amdgpu_ring_mux_add_sw_ring(struct amdgpu_ring_mux *mux, struct amdgpu_ring *ring) in amdgpu_ring_mux_add_sw_ring() argument
200 DRM_ERROR("add sw ring exceeding max entry size\n"); in amdgpu_ring_mux_add_sw_ring()
205 ring->entry_index = mux->num_ring_entries; in amdgpu_ring_mux_add_sw_ring()
206 e->ring = ring; in amdgpu_ring_mux_add_sw_ring()
213 void amdgpu_ring_mux_set_wptr(struct amdgpu_ring_mux *mux, struct amdgpu_ring *ring, u64 wptr) in amdgpu_ring_mux_set_wptr() argument
219 if (ring->hw_prio <= AMDGPU_RING_PRIO_DEFAULT) in amdgpu_ring_mux_set_wptr()
222 e = amdgpu_ring_mux_sw_entry(mux, ring); in amdgpu_ring_mux_set_wptr()
224 DRM_ERROR("cannot find entry for sw ring\n"); in amdgpu_ring_mux_set_wptr()
230 if (ring->hw_prio <= AMDGPU_RING_PRIO_DEFAULT && mux->pending_trailing_fence_signaled) { in amdgpu_ring_mux_set_wptr()
237 if (ring->hw_prio <= AMDGPU_RING_PRIO_DEFAULT && e->sw_cptr < mux->wptr_resubmit) in amdgpu_ring_mux_set_wptr()
243 if (ring->hw_prio > AMDGPU_RING_PRIO_DEFAULT || mux->wptr_resubmit < wptr) { in amdgpu_ring_mux_set_wptr()
244 amdgpu_ring_mux_copy_pkt_from_sw_ring(mux, ring, e->sw_cptr, wptr); in amdgpu_ring_mux_set_wptr()
253 u64 amdgpu_ring_mux_get_wptr(struct amdgpu_ring_mux *mux, struct amdgpu_ring *ring) in amdgpu_ring_mux_get_wptr() argument
257 e = amdgpu_ring_mux_sw_entry(mux, ring); in amdgpu_ring_mux_get_wptr()
259 DRM_ERROR("cannot find entry for sw ring\n"); in amdgpu_ring_mux_get_wptr()
267 * amdgpu_ring_mux_get_rptr - get the readptr of the software ring
269 * @ring: the software ring of which we calculate the readptr
272 * write data onto the real ring buffer.After overwriting on the real ring, we
282 u64 amdgpu_ring_mux_get_rptr(struct amdgpu_ring_mux *mux, struct amdgpu_ring *ring) in amdgpu_ring_mux_get_rptr() argument
287 e = amdgpu_ring_mux_sw_entry(mux, ring); in amdgpu_ring_mux_get_rptr()
305 e->sw_rptr = (e->sw_cptr + offset) & ring->buf_mask; in amdgpu_ring_mux_get_rptr()
316 u64 amdgpu_sw_ring_get_rptr_gfx(struct amdgpu_ring *ring) in amdgpu_sw_ring_get_rptr_gfx() argument
318 struct amdgpu_device *adev = ring->adev; in amdgpu_sw_ring_get_rptr_gfx()
321 WARN_ON(!ring->is_sw_ring); in amdgpu_sw_ring_get_rptr_gfx()
322 return amdgpu_ring_mux_get_rptr(mux, ring); in amdgpu_sw_ring_get_rptr_gfx()
325 u64 amdgpu_sw_ring_get_wptr_gfx(struct amdgpu_ring *ring) in amdgpu_sw_ring_get_wptr_gfx() argument
327 struct amdgpu_device *adev = ring->adev; in amdgpu_sw_ring_get_wptr_gfx()
330 WARN_ON(!ring->is_sw_ring); in amdgpu_sw_ring_get_wptr_gfx()
331 return amdgpu_ring_mux_get_wptr(mux, ring); in amdgpu_sw_ring_get_wptr_gfx()
334 void amdgpu_sw_ring_set_wptr_gfx(struct amdgpu_ring *ring) in amdgpu_sw_ring_set_wptr_gfx() argument
336 struct amdgpu_device *adev = ring->adev; in amdgpu_sw_ring_set_wptr_gfx()
339 WARN_ON(!ring->is_sw_ring); in amdgpu_sw_ring_set_wptr_gfx()
340 amdgpu_ring_mux_set_wptr(mux, ring, ring->wptr); in amdgpu_sw_ring_set_wptr_gfx()
344 void amdgpu_sw_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count) in amdgpu_sw_ring_insert_nop() argument
346 WARN_ON(!ring->is_sw_ring); in amdgpu_sw_ring_insert_nop()
361 /*Scan on low prio rings to have unsignaled fence and high ring has no fence.*/
364 struct amdgpu_ring *ring; in amdgpu_mcbp_scan() local
369 ring = mux->ring_entry[i].ring; in amdgpu_mcbp_scan()
370 if (ring->hw_prio > AMDGPU_RING_PRIO_DEFAULT && in amdgpu_mcbp_scan()
371 amdgpu_fence_count_emitted(ring) > 0) in amdgpu_mcbp_scan()
373 if (ring->hw_prio <= AMDGPU_RING_PRIO_DEFAULT && in amdgpu_mcbp_scan()
374 amdgpu_fence_last_unsignaled_time_us(ring) > in amdgpu_mcbp_scan()
393 void amdgpu_sw_ring_ib_begin(struct amdgpu_ring *ring) in amdgpu_sw_ring_ib_begin() argument
395 struct amdgpu_device *adev = ring->adev; in amdgpu_sw_ring_ib_begin()
398 WARN_ON(!ring->is_sw_ring); in amdgpu_sw_ring_ib_begin()
399 if (adev->gfx.mcbp && ring->hw_prio > AMDGPU_RING_PRIO_DEFAULT) { in amdgpu_sw_ring_ib_begin()
405 amdgpu_ring_mux_start_ib(mux, ring); in amdgpu_sw_ring_ib_begin()
408 void amdgpu_sw_ring_ib_end(struct amdgpu_ring *ring) in amdgpu_sw_ring_ib_end() argument
410 struct amdgpu_device *adev = ring->adev; in amdgpu_sw_ring_ib_end()
413 WARN_ON(!ring->is_sw_ring); in amdgpu_sw_ring_ib_end()
414 if (adev->gfx.mcbp && ring->hw_prio > AMDGPU_RING_PRIO_DEFAULT) in amdgpu_sw_ring_ib_end()
416 amdgpu_ring_mux_end_ib(mux, ring); in amdgpu_sw_ring_ib_end()
419 void amdgpu_sw_ring_ib_mark_offset(struct amdgpu_ring *ring, enum amdgpu_ring_mux_offset_type type) in amdgpu_sw_ring_ib_mark_offset() argument
421 struct amdgpu_device *adev = ring->adev; in amdgpu_sw_ring_ib_mark_offset()
425 if (ring->hw_prio > AMDGPU_RING_PRIO_DEFAULT) in amdgpu_sw_ring_ib_mark_offset()
428 offset = ring->wptr & ring->buf_mask; in amdgpu_sw_ring_ib_mark_offset()
430 amdgpu_ring_mux_ib_mark_offset(mux, ring, offset, type); in amdgpu_sw_ring_ib_mark_offset()
433 void amdgpu_ring_mux_start_ib(struct amdgpu_ring_mux *mux, struct amdgpu_ring *ring) in amdgpu_ring_mux_start_ib() argument
442 e = amdgpu_ring_mux_sw_entry(mux, ring); in amdgpu_ring_mux_start_ib()
454 chunk->start = ring->wptr; in amdgpu_ring_mux_start_ib()
456 chunk->cntl_offset = ring->buf_mask + 1; in amdgpu_ring_mux_start_ib()
457 chunk->de_offset = ring->buf_mask + 1; in amdgpu_ring_mux_start_ib()
458 chunk->ce_offset = ring->buf_mask + 1; in amdgpu_ring_mux_start_ib()
462 static void scan_and_remove_signaled_chunk(struct amdgpu_ring_mux *mux, struct amdgpu_ring *ring) in scan_and_remove_signaled_chunk() argument
468 e = amdgpu_ring_mux_sw_entry(mux, ring); in scan_and_remove_signaled_chunk()
474 last_seq = atomic_read(&ring->fence_drv.last_seq); in scan_and_remove_signaled_chunk()
485 struct amdgpu_ring *ring, u64 offset, in amdgpu_ring_mux_ib_mark_offset() argument
491 e = amdgpu_ring_mux_sw_entry(mux, ring); in amdgpu_ring_mux_ib_mark_offset()
519 void amdgpu_ring_mux_end_ib(struct amdgpu_ring_mux *mux, struct amdgpu_ring *ring) in amdgpu_ring_mux_end_ib() argument
524 e = amdgpu_ring_mux_sw_entry(mux, ring); in amdgpu_ring_mux_end_ib()
536 chunk->end = ring->wptr; in amdgpu_ring_mux_end_ib()
537 chunk->sync_seq = READ_ONCE(ring->fence_drv.sync_seq); in amdgpu_ring_mux_end_ib()
539 scan_and_remove_signaled_chunk(mux, ring); in amdgpu_ring_mux_end_ib()
545 struct amdgpu_ring *ring = NULL; in amdgpu_mcbp_handle_trailing_fence_irq() local
556 if (e->ring->hw_prio <= AMDGPU_RING_PRIO_DEFAULT) { in amdgpu_mcbp_handle_trailing_fence_irq()
557 ring = e->ring; in amdgpu_mcbp_handle_trailing_fence_irq()
562 if (!ring) { in amdgpu_mcbp_handle_trailing_fence_irq()
563 DRM_ERROR("cannot find low priority ring\n"); in amdgpu_mcbp_handle_trailing_fence_irq()
567 amdgpu_fence_process(ring); in amdgpu_mcbp_handle_trailing_fence_irq()
568 if (amdgpu_fence_count_emitted(ring) > 0) { in amdgpu_mcbp_handle_trailing_fence_irq()
570 mux->seqno_to_resubmit = ring->fence_drv.sync_seq; in amdgpu_mcbp_handle_trailing_fence_irq()