Lines Matching full:ring
94 /* Direct submission to the ring buffer during init and reset. */
119 /* sync_seq is protected by ring emission lock */
136 * are no longer in use by the associated ring on the GPU and
144 struct amdgpu_ring *ring; member
155 void amdgpu_fence_driver_set_error(struct amdgpu_ring *ring, int error);
156 void amdgpu_fence_driver_force_completion(struct amdgpu_ring *ring);
160 int amdgpu_fence_driver_init_ring(struct amdgpu_ring *ring);
161 int amdgpu_fence_driver_start_ring(struct amdgpu_ring *ring,
168 int amdgpu_fence_emit(struct amdgpu_ring *ring, struct amdgpu_fence *af,
170 int amdgpu_fence_emit_polling(struct amdgpu_ring *ring, uint32_t *s,
172 bool amdgpu_fence_process(struct amdgpu_ring *ring);
173 int amdgpu_fence_wait_empty(struct amdgpu_ring *ring);
174 signed long amdgpu_fence_wait_polling(struct amdgpu_ring *ring,
177 unsigned amdgpu_fence_count_emitted(struct amdgpu_ring *ring);
181 u64 amdgpu_fence_last_unsignaled_time_us(struct amdgpu_ring *ring);
182 void amdgpu_fence_update_start_timestamp(struct amdgpu_ring *ring, uint32_t seq,
189 /* provided by hw blocks that expose a ring buffer for commands */
195 * use ring buffers. The type field just identifies which component the
196 * ring buffer is associated with.
207 * that initializes the ring.
217 * Optional extra space in bytes that is added to the ring size
218 * when allocating the BO that holds the contents of the ring.
219 * This space isn't used for command submission to the ring,
222 * specific ring to initialize this space.
226 /* ring read/write ptr handling */
227 u64 (*get_rptr)(struct amdgpu_ring *ring);
228 u64 (*get_wptr)(struct amdgpu_ring *ring);
229 void (*set_wptr)(struct amdgpu_ring *ring);
241 void (*emit_ib)(struct amdgpu_ring *ring,
245 void (*emit_fence)(struct amdgpu_ring *ring, uint64_t addr,
247 void (*emit_pipeline_sync)(struct amdgpu_ring *ring);
248 void (*emit_vm_flush)(struct amdgpu_ring *ring, unsigned vmid,
250 void (*emit_hdp_flush)(struct amdgpu_ring *ring);
251 void (*emit_gds_switch)(struct amdgpu_ring *ring, uint32_t vmid,
256 int (*test_ring)(struct amdgpu_ring *ring);
257 int (*test_ib)(struct amdgpu_ring *ring, long timeout);
259 void (*insert_nop)(struct amdgpu_ring *ring, uint32_t count);
260 void (*insert_start)(struct amdgpu_ring *ring);
261 void (*insert_end)(struct amdgpu_ring *ring);
263 void (*pad_ib)(struct amdgpu_ring *ring, struct amdgpu_ib *ib);
264 unsigned (*init_cond_exec)(struct amdgpu_ring *ring, uint64_t addr);
266 void (*begin_use)(struct amdgpu_ring *ring);
267 void (*end_use)(struct amdgpu_ring *ring);
268 void (*emit_switch_buffer) (struct amdgpu_ring *ring);
269 void (*emit_cntxcntl) (struct amdgpu_ring *ring, uint32_t flags);
270 void (*emit_gfx_shadow)(struct amdgpu_ring *ring, u64 shadow_va, u64 csa_va,
272 void (*emit_rreg)(struct amdgpu_ring *ring, uint32_t reg,
274 void (*emit_wreg)(struct amdgpu_ring *ring, uint32_t reg, uint32_t val);
275 void (*emit_reg_wait)(struct amdgpu_ring *ring, uint32_t reg,
277 void (*emit_reg_write_reg_wait)(struct amdgpu_ring *ring,
280 void (*emit_frame_cntl)(struct amdgpu_ring *ring, bool start,
282 /* Try to soft recover the ring to make the fence signal */
283 void (*soft_recovery)(struct amdgpu_ring *ring, unsigned vmid);
284 int (*preempt_ib)(struct amdgpu_ring *ring);
285 void (*emit_mem_sync)(struct amdgpu_ring *ring);
286 void (*emit_wave_limit)(struct amdgpu_ring *ring, bool enable);
287 void (*patch_cntl)(struct amdgpu_ring *ring, unsigned offset);
288 void (*patch_ce)(struct amdgpu_ring *ring, unsigned offset);
289 void (*patch_de)(struct amdgpu_ring *ring, unsigned offset);
290 int (*reset)(struct amdgpu_ring *ring, unsigned int vmid,
292 void (*emit_cleaner_shader)(struct amdgpu_ring *ring);
296 * amdgpu_ring - Holds ring information
305 uint32_t *ring; member
316 * This is part of the Ring buffer implementation and represents the
333 * Maximum number of DWords for ring allocation. This information is
334 * provided at the ring initialization time, and each IP block can
344 * ring. This value is updated based on the ring manipulation.
364 * thresholding. Buffer mask initialized during the ring buffer
453 int amdgpu_ring_alloc(struct amdgpu_ring *ring, unsigned ndw);
454 void amdgpu_ring_ib_begin(struct amdgpu_ring *ring);
455 void amdgpu_ring_ib_end(struct amdgpu_ring *ring);
456 void amdgpu_ring_ib_on_emit_cntl(struct amdgpu_ring *ring);
457 void amdgpu_ring_ib_on_emit_ce(struct amdgpu_ring *ring);
458 void amdgpu_ring_ib_on_emit_de(struct amdgpu_ring *ring);
460 void amdgpu_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count);
461 void amdgpu_ring_generic_pad_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib);
462 void amdgpu_ring_commit(struct amdgpu_ring *ring);
463 void amdgpu_ring_undo(struct amdgpu_ring *ring);
464 int amdgpu_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring,
468 void amdgpu_ring_fini(struct amdgpu_ring *ring);
469 void amdgpu_ring_emit_reg_write_reg_wait_helper(struct amdgpu_ring *ring,
472 bool amdgpu_ring_soft_recovery(struct amdgpu_ring *ring, unsigned int vmid,
475 static inline void amdgpu_ring_set_preempt_cond_exec(struct amdgpu_ring *ring, in amdgpu_ring_set_preempt_cond_exec() argument
478 *ring->cond_exe_cpu_addr = cond_exec; in amdgpu_ring_set_preempt_cond_exec()
481 static inline void amdgpu_ring_clear_ring(struct amdgpu_ring *ring) in amdgpu_ring_clear_ring() argument
483 memset32(ring->ring, ring->funcs->nop, ring->buf_mask + 1); in amdgpu_ring_clear_ring()
486 static inline void amdgpu_ring_write(struct amdgpu_ring *ring, uint32_t v) in amdgpu_ring_write() argument
488 ring->ring[ring->wptr++ & ring->buf_mask] = v; in amdgpu_ring_write()
489 ring->wptr &= ring->ptr_mask; in amdgpu_ring_write()
490 ring->count_dw--; in amdgpu_ring_write()
493 static inline void amdgpu_ring_write_multiple(struct amdgpu_ring *ring, in amdgpu_ring_write_multiple() argument
498 occupied = ring->wptr & ring->buf_mask; in amdgpu_ring_write_multiple()
499 chunk1 = ring->buf_mask + 1 - occupied; in amdgpu_ring_write_multiple()
506 memcpy(&ring->ring[occupied], src, chunk1); in amdgpu_ring_write_multiple()
510 memcpy(ring->ring, src, chunk2); in amdgpu_ring_write_multiple()
513 ring->wptr += count_dw; in amdgpu_ring_write_multiple()
514 ring->wptr &= ring->ptr_mask; in amdgpu_ring_write_multiple()
515 ring->count_dw -= count_dw; in amdgpu_ring_write_multiple()
520 * @ring: amdgpu_ring structure
525 static inline void amdgpu_ring_patch_cond_exec(struct amdgpu_ring *ring, in amdgpu_ring_patch_cond_exec() argument
530 if (!ring->funcs->init_cond_exec) in amdgpu_ring_patch_cond_exec()
533 WARN_ON(offset > ring->buf_mask); in amdgpu_ring_patch_cond_exec()
534 WARN_ON(ring->ring[offset] != 0); in amdgpu_ring_patch_cond_exec()
536 cur = (ring->wptr - 1) & ring->buf_mask; in amdgpu_ring_patch_cond_exec()
538 cur += ring->ring_size >> 2; in amdgpu_ring_patch_cond_exec()
539 ring->ring[offset] = cur - offset; in amdgpu_ring_patch_cond_exec()
542 int amdgpu_ring_test_helper(struct amdgpu_ring *ring);
545 struct amdgpu_ring *ring);
547 int amdgpu_ring_init_mqd(struct amdgpu_ring *ring);
565 int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
571 bool amdgpu_ring_sched_ready(struct amdgpu_ring *ring);
572 void amdgpu_ring_backup_unprocessed_commands(struct amdgpu_ring *ring,
574 void amdgpu_ring_reset_helper_begin(struct amdgpu_ring *ring,
576 int amdgpu_ring_reset_helper_end(struct amdgpu_ring *ring,
578 bool amdgpu_ring_is_reset_type_supported(struct amdgpu_ring *ring,