Lines Matching full:ring
41 * Most engines on the GPU are fed via ring buffers. Ring
47 * pointers are equal, the ring is idle. When the host
48 * writes commands to the ring buffer, it increments the
56 * @type: ring type for which to return the limit.
74 * amdgpu_ring_alloc - allocate space on the ring buffer
76 * @ring: amdgpu_ring structure holding ring information
77 * @ndw: number of dwords to allocate in the ring buffer
79 * Allocate @ndw dwords in the ring buffer (all asics).
82 int amdgpu_ring_alloc(struct amdgpu_ring *ring, unsigned int ndw) in amdgpu_ring_alloc() argument
86 ndw = (ndw + ring->funcs->align_mask) & ~ring->funcs->align_mask; in amdgpu_ring_alloc()
91 if (WARN_ON_ONCE(ndw > ring->max_dw)) in amdgpu_ring_alloc()
94 ring->count_dw = ndw; in amdgpu_ring_alloc()
95 ring->wptr_old = ring->wptr; in amdgpu_ring_alloc()
97 if (ring->funcs->begin_use) in amdgpu_ring_alloc()
98 ring->funcs->begin_use(ring); in amdgpu_ring_alloc()
104 * amdgpu_ring_alloc_reemit - allocate space on the ring buffer for reemit
106 * @ring: amdgpu_ring structure holding ring information
107 * @ndw: number of dwords to allocate in the ring buffer
109 * Allocate @ndw dwords in the ring buffer (all asics).
113 static void amdgpu_ring_alloc_reemit(struct amdgpu_ring *ring, unsigned int ndw) in amdgpu_ring_alloc_reemit() argument
117 ndw = (ndw + ring->funcs->align_mask) & ~ring->funcs->align_mask; in amdgpu_ring_alloc_reemit()
119 ring->count_dw = ndw; in amdgpu_ring_alloc_reemit()
120 ring->wptr_old = ring->wptr; in amdgpu_ring_alloc_reemit()
122 if (ring->funcs->begin_use) in amdgpu_ring_alloc_reemit()
123 ring->funcs->begin_use(ring); in amdgpu_ring_alloc_reemit()
128 * @ring: amdgpu_ring structure holding ring information
133 void amdgpu_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count) in amdgpu_ring_insert_nop() argument
137 occupied = ring->wptr & ring->buf_mask; in amdgpu_ring_insert_nop()
138 chunk1 = ring->buf_mask + 1 - occupied; in amdgpu_ring_insert_nop()
143 memset32(&ring->ring[occupied], ring->funcs->nop, chunk1); in amdgpu_ring_insert_nop()
146 memset32(ring->ring, ring->funcs->nop, chunk2); in amdgpu_ring_insert_nop()
148 ring->wptr += count; in amdgpu_ring_insert_nop()
149 ring->wptr &= ring->ptr_mask; in amdgpu_ring_insert_nop()
150 ring->count_dw -= count; in amdgpu_ring_insert_nop()
156 * @ring: amdgpu_ring structure holding ring information
161 void amdgpu_ring_generic_pad_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib) in amdgpu_ring_generic_pad_ib() argument
163 u32 align_mask = ring->funcs->align_mask; in amdgpu_ring_generic_pad_ib()
169 memset32(&ib->ptr[ib->length_dw], ring->funcs->nop, count); in amdgpu_ring_generic_pad_ib()
177 * commands on the ring buffer
179 * @ring: amdgpu_ring structure holding ring information
182 * execute new commands on the ring buffer (all asics).
184 void amdgpu_ring_commit(struct amdgpu_ring *ring) in amdgpu_ring_commit() argument
188 if (ring->count_dw < 0) in amdgpu_ring_commit()
189 DRM_ERROR("amdgpu: writing more dwords to the ring than expected!\n"); in amdgpu_ring_commit()
192 count = ring->funcs->align_mask + 1 - in amdgpu_ring_commit()
193 (ring->wptr & ring->funcs->align_mask); in amdgpu_ring_commit()
194 count &= ring->funcs->align_mask; in amdgpu_ring_commit()
197 ring->funcs->insert_nop(ring, count); in amdgpu_ring_commit()
200 amdgpu_ring_set_wptr(ring); in amdgpu_ring_commit()
202 if (ring->funcs->end_use) in amdgpu_ring_commit()
203 ring->funcs->end_use(ring); in amdgpu_ring_commit()
209 * @ring: amdgpu_ring structure holding ring information
213 void amdgpu_ring_undo(struct amdgpu_ring *ring) in amdgpu_ring_undo() argument
215 ring->wptr = ring->wptr_old; in amdgpu_ring_undo()
217 if (ring->funcs->end_use) in amdgpu_ring_undo()
218 ring->funcs->end_use(ring); in amdgpu_ring_undo()
221 #define amdgpu_ring_get_gpu_addr(ring, offset) \ argument
222 (ring->adev->wb.gpu_addr + offset * 4)
224 #define amdgpu_ring_get_cpu_addr(ring, offset) \ argument
225 (&ring->adev->wb.wb[offset])
228 * amdgpu_ring_init - init driver ring struct.
231 * @ring: amdgpu_ring structure holding ring information
232 * @max_dw: maximum number of dw for ring alloc
233 * @irq_src: interrupt source to use for this ring
234 * @irq_type: interrupt type to use for this ring
235 * @hw_prio: ring priority (NORMAL/HIGH)
238 * Initialize the driver information for the selected ring (all asics).
241 int amdgpu_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring, in amdgpu_ring_init() argument
256 * KIQ tasks get submitted directly to the ring. in amdgpu_ring_init()
258 if (ring->funcs->type == AMDGPU_RING_TYPE_KIQ) in amdgpu_ring_init()
260 if (ring->funcs->type == AMDGPU_RING_TYPE_MES) in amdgpu_ring_init()
262 else if (ring == &adev->sdma.instance[0].page) in amdgpu_ring_init()
265 if (ring->adev == NULL) { in amdgpu_ring_init()
269 ring->adev = adev; in amdgpu_ring_init()
270 ring->num_hw_submission = sched_hw_submission; in amdgpu_ring_init()
271 ring->sched_score = sched_score; in amdgpu_ring_init()
272 ring->vmid_wait = dma_fence_get_stub(); in amdgpu_ring_init()
274 ring->idx = adev->num_rings++; in amdgpu_ring_init()
275 adev->rings[ring->idx] = ring; in amdgpu_ring_init()
277 r = amdgpu_fence_driver_init_ring(ring); in amdgpu_ring_init()
282 r = amdgpu_device_wb_get(adev, &ring->rptr_offs); in amdgpu_ring_init()
284 dev_err(adev->dev, "(%d) ring rptr_offs wb alloc failed\n", r); in amdgpu_ring_init()
288 r = amdgpu_device_wb_get(adev, &ring->wptr_offs); in amdgpu_ring_init()
290 dev_err(adev->dev, "(%d) ring wptr_offs wb alloc failed\n", r); in amdgpu_ring_init()
294 r = amdgpu_device_wb_get(adev, &ring->fence_offs); in amdgpu_ring_init()
296 dev_err(adev->dev, "(%d) ring fence_offs wb alloc failed\n", r); in amdgpu_ring_init()
300 r = amdgpu_device_wb_get(adev, &ring->trail_fence_offs); in amdgpu_ring_init()
302 dev_err(adev->dev, "(%d) ring trail_fence_offs wb alloc failed\n", r); in amdgpu_ring_init()
306 r = amdgpu_device_wb_get(adev, &ring->cond_exe_offs); in amdgpu_ring_init()
308 dev_err(adev->dev, "(%d) ring cond_exec_polling wb alloc failed\n", r); in amdgpu_ring_init()
312 ring->fence_gpu_addr = in amdgpu_ring_init()
313 amdgpu_ring_get_gpu_addr(ring, ring->fence_offs); in amdgpu_ring_init()
314 ring->fence_cpu_addr = in amdgpu_ring_init()
315 amdgpu_ring_get_cpu_addr(ring, ring->fence_offs); in amdgpu_ring_init()
317 ring->rptr_gpu_addr = in amdgpu_ring_init()
318 amdgpu_ring_get_gpu_addr(ring, ring->rptr_offs); in amdgpu_ring_init()
319 ring->rptr_cpu_addr = in amdgpu_ring_init()
320 amdgpu_ring_get_cpu_addr(ring, ring->rptr_offs); in amdgpu_ring_init()
322 ring->wptr_gpu_addr = in amdgpu_ring_init()
323 amdgpu_ring_get_gpu_addr(ring, ring->wptr_offs); in amdgpu_ring_init()
324 ring->wptr_cpu_addr = in amdgpu_ring_init()
325 amdgpu_ring_get_cpu_addr(ring, ring->wptr_offs); in amdgpu_ring_init()
327 ring->trail_fence_gpu_addr = in amdgpu_ring_init()
328 amdgpu_ring_get_gpu_addr(ring, ring->trail_fence_offs); in amdgpu_ring_init()
329 ring->trail_fence_cpu_addr = in amdgpu_ring_init()
330 amdgpu_ring_get_cpu_addr(ring, ring->trail_fence_offs); in amdgpu_ring_init()
332 ring->cond_exe_gpu_addr = in amdgpu_ring_init()
333 amdgpu_ring_get_gpu_addr(ring, ring->cond_exe_offs); in amdgpu_ring_init()
334 ring->cond_exe_cpu_addr = in amdgpu_ring_init()
335 amdgpu_ring_get_cpu_addr(ring, ring->cond_exe_offs); in amdgpu_ring_init()
338 *ring->cond_exe_cpu_addr = 1; in amdgpu_ring_init()
340 if (ring->funcs->type != AMDGPU_RING_TYPE_CPER) { in amdgpu_ring_init()
341 r = amdgpu_fence_driver_start_ring(ring, irq_src, irq_type); in amdgpu_ring_init()
347 max_ibs_dw = ring->funcs->emit_frame_size + in amdgpu_ring_init()
348 amdgpu_ring_max_ibs(ring->funcs->type) * ring->funcs->emit_ib_size; in amdgpu_ring_init()
349 max_ibs_dw = (max_ibs_dw + ring->funcs->align_mask) & ~ring->funcs->align_mask; in amdgpu_ring_init()
354 ring->ring_size = roundup_pow_of_two(max_dw * 4 * sched_hw_submission); in amdgpu_ring_init()
356 ring->ring_size = roundup_pow_of_two(max_dw * 4); in amdgpu_ring_init()
357 ring->count_dw = (ring->ring_size - 4) >> 2; in amdgpu_ring_init()
358 /* ring buffer is empty now */ in amdgpu_ring_init()
359 ring->wptr = *ring->rptr_cpu_addr = 0; in amdgpu_ring_init()
362 ring->buf_mask = (ring->ring_size / 4) - 1; in amdgpu_ring_init()
363 ring->ptr_mask = ring->funcs->support_64bit_ptrs ? in amdgpu_ring_init()
364 0xffffffffffffffff : ring->buf_mask; in amdgpu_ring_init()
366 ring->cached_rptr = 0; in amdgpu_ring_init()
368 if (!ring->ring_backup) { in amdgpu_ring_init()
369 ring->ring_backup = kvzalloc(ring->ring_size, GFP_KERNEL); in amdgpu_ring_init()
370 if (!ring->ring_backup) in amdgpu_ring_init()
374 /* Allocate ring buffer */ in amdgpu_ring_init()
375 if (ring->ring_obj == NULL) { in amdgpu_ring_init()
376 r = amdgpu_bo_create_kernel(adev, ring->ring_size + ring->funcs->extra_bytes, in amdgpu_ring_init()
379 &ring->ring_obj, in amdgpu_ring_init()
380 &ring->gpu_addr, in amdgpu_ring_init()
381 (void **)&ring->ring); in amdgpu_ring_init()
383 dev_err(adev->dev, "(%d) ring create failed\n", r); in amdgpu_ring_init()
384 kvfree(ring->ring_backup); in amdgpu_ring_init()
387 amdgpu_ring_clear_ring(ring); in amdgpu_ring_init()
390 ring->max_dw = max_dw; in amdgpu_ring_init()
391 ring->hw_prio = hw_prio; in amdgpu_ring_init()
393 if (!ring->no_scheduler && ring->funcs->type < AMDGPU_HW_IP_NUM) { in amdgpu_ring_init()
394 hw_ip = ring->funcs->type; in amdgpu_ring_init()
397 &ring->sched; in amdgpu_ring_init()
404 * amdgpu_ring_fini - tear down the driver ring struct.
406 * @ring: amdgpu_ring structure holding ring information
408 * Tear down the driver information for the selected ring (all asics).
410 void amdgpu_ring_fini(struct amdgpu_ring *ring) in amdgpu_ring_fini() argument
413 /* Not to finish a ring which is not initialized */ in amdgpu_ring_fini()
414 if (!(ring->adev) || !(ring->adev->rings[ring->idx])) in amdgpu_ring_fini()
417 ring->sched.ready = false; in amdgpu_ring_fini()
419 amdgpu_device_wb_free(ring->adev, ring->rptr_offs); in amdgpu_ring_fini()
420 amdgpu_device_wb_free(ring->adev, ring->wptr_offs); in amdgpu_ring_fini()
422 amdgpu_device_wb_free(ring->adev, ring->cond_exe_offs); in amdgpu_ring_fini()
423 amdgpu_device_wb_free(ring->adev, ring->fence_offs); in amdgpu_ring_fini()
425 amdgpu_bo_free_kernel(&ring->ring_obj, in amdgpu_ring_fini()
426 &ring->gpu_addr, in amdgpu_ring_fini()
427 (void **)&ring->ring); in amdgpu_ring_fini()
428 kvfree(ring->ring_backup); in amdgpu_ring_fini()
429 ring->ring_backup = NULL; in amdgpu_ring_fini()
431 dma_fence_put(ring->vmid_wait); in amdgpu_ring_fini()
432 ring->vmid_wait = NULL; in amdgpu_ring_fini()
433 ring->me = 0; in amdgpu_ring_fini()
437 * amdgpu_ring_emit_reg_write_reg_wait_helper - ring helper
439 * @ring: ring to write to
448 void amdgpu_ring_emit_reg_write_reg_wait_helper(struct amdgpu_ring *ring, in amdgpu_ring_emit_reg_write_reg_wait_helper() argument
452 amdgpu_ring_emit_wreg(ring, reg0, ref); in amdgpu_ring_emit_reg_write_reg_wait_helper()
453 amdgpu_ring_emit_reg_wait(ring, reg1, mask, mask); in amdgpu_ring_emit_reg_write_reg_wait_helper()
457 * amdgpu_ring_soft_recovery - try to soft recover a ring lockup
459 * @ring: ring to try the recovery on
463 * Tries to get a ring proceeding again when it is stuck.
465 bool amdgpu_ring_soft_recovery(struct amdgpu_ring *ring, unsigned int vmid, in amdgpu_ring_soft_recovery() argument
474 if (amdgpu_sriov_vf(ring->adev) || !ring->funcs->soft_recovery || !fence) in amdgpu_ring_soft_recovery()
484 ring->funcs->soft_recovery(ring, vmid); in amdgpu_ring_soft_recovery()
489 atomic_inc(&ring->adev->gpu_reset_counter); in amdgpu_ring_soft_recovery()
503 struct amdgpu_ring *ring = file_inode(f)->i_private; in amdgpu_ras_cper_debugfs_read() local
533 r = amdgpu_ras_mgr_handle_ras_cmd(ring->adev, in amdgpu_ras_cper_debugfs_read()
547 r = amdgpu_ras_mgr_handle_ras_cmd(ring->adev, RAS_CMD__GET_CPER_RECORD, in amdgpu_ras_cper_debugfs_read()
564 * followed by n-words of ring data
569 struct amdgpu_ring *ring = file_inode(f)->i_private; in amdgpu_debugfs_ring_read() local
575 if (ring->funcs->type == AMDGPU_RING_TYPE_CPER && amdgpu_uniras_enabled(ring->adev)) in amdgpu_debugfs_ring_read()
584 if (ring->funcs->type == AMDGPU_RING_TYPE_CPER) in amdgpu_debugfs_ring_read()
585 mutex_lock(&ring->adev->cper.ring_lock); in amdgpu_debugfs_ring_read()
587 early[0] = amdgpu_ring_get_rptr(ring) & ring->buf_mask; in amdgpu_debugfs_ring_read()
588 early[1] = amdgpu_ring_get_wptr(ring) & ring->buf_mask; in amdgpu_debugfs_ring_read()
589 early[2] = ring->wptr & ring->buf_mask; in amdgpu_debugfs_ring_read()
603 if (ring->funcs->type != AMDGPU_RING_TYPE_CPER) { in amdgpu_debugfs_ring_read()
605 if (*pos >= (ring->ring_size + 12)) in amdgpu_debugfs_ring_read()
608 value = ring->ring[(*pos - 12)/4]; in amdgpu_debugfs_ring_read()
622 size = ring->ring_size - (early[0] - early[1]); in amdgpu_debugfs_ring_read()
628 value = ring->ring[p]; in amdgpu_debugfs_ring_read()
639 p &= ring->ptr_mask; in amdgpu_debugfs_ring_read()
644 if (ring->funcs->type == AMDGPU_RING_TYPE_CPER) in amdgpu_debugfs_ring_read()
645 mutex_unlock(&ring->adev->cper.ring_lock); in amdgpu_debugfs_ring_read()
653 struct amdgpu_ring *ring = file_inode(f)->i_private; in amdgpu_debugfs_virt_ring_read() local
658 if (ring->funcs->type == AMDGPU_RING_TYPE_CPER) in amdgpu_debugfs_virt_ring_read()
659 amdgpu_virt_req_ras_cper_dump(ring->adev, false); in amdgpu_debugfs_virt_ring_read()
679 struct amdgpu_ring *ring = file_inode(f)->i_private; in amdgpu_debugfs_mqd_read() local
680 ssize_t bytes = min_t(ssize_t, ring->mqd_size - *pos, size); in amdgpu_debugfs_mqd_read()
681 void *from = ((u8 *)ring->mqd_ptr) + *pos; in amdgpu_debugfs_mqd_read()
683 if (*pos > ring->mqd_size) in amdgpu_debugfs_mqd_read()
701 struct amdgpu_ring *ring = data; in amdgpu_debugfs_ring_error() local
703 amdgpu_fence_driver_set_error(ring, val); in amdgpu_debugfs_ring_error()
713 struct amdgpu_ring *ring) in amdgpu_debugfs_ring_init() argument
720 sprintf(name, "amdgpu_ring_%s", ring->name); in amdgpu_debugfs_ring_init()
722 debugfs_create_file_size(name, S_IFREG | 0444, root, ring, in amdgpu_debugfs_ring_init()
724 ring->ring_size + 12); in amdgpu_debugfs_ring_init()
726 debugfs_create_file_size(name, S_IFREG | 0444, root, ring, in amdgpu_debugfs_ring_init()
728 ring->ring_size + 12); in amdgpu_debugfs_ring_init()
730 if (ring->mqd_obj) { in amdgpu_debugfs_ring_init()
731 sprintf(name, "amdgpu_mqd_%s", ring->name); in amdgpu_debugfs_ring_init()
732 debugfs_create_file_size(name, S_IFREG | 0444, root, ring, in amdgpu_debugfs_ring_init()
734 ring->mqd_size); in amdgpu_debugfs_ring_init()
737 sprintf(name, "amdgpu_error_%s", ring->name); in amdgpu_debugfs_ring_init()
738 debugfs_create_file(name, 0200, root, ring, in amdgpu_debugfs_ring_init()
745 * amdgpu_ring_test_helper - tests ring and set sched readiness status
747 * @ring: ring to try the recovery on
749 * Tests ring and set sched readiness status
753 int amdgpu_ring_test_helper(struct amdgpu_ring *ring) in amdgpu_ring_test_helper() argument
755 struct amdgpu_device *adev = ring->adev; in amdgpu_ring_test_helper()
758 r = amdgpu_ring_test_ring(ring); in amdgpu_ring_test_helper()
760 DRM_DEV_ERROR(adev->dev, "ring %s test failed (%d)\n", in amdgpu_ring_test_helper()
761 ring->name, r); in amdgpu_ring_test_helper()
763 DRM_DEV_DEBUG(adev->dev, "ring test on %s succeeded\n", in amdgpu_ring_test_helper()
764 ring->name); in amdgpu_ring_test_helper()
766 ring->sched.ready = !r; in amdgpu_ring_test_helper()
771 static void amdgpu_ring_to_mqd_prop(struct amdgpu_ring *ring, in amdgpu_ring_to_mqd_prop() argument
774 struct amdgpu_device *adev = ring->adev; in amdgpu_ring_to_mqd_prop()
775 bool is_high_prio_compute = ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE && in amdgpu_ring_to_mqd_prop()
776 amdgpu_gfx_is_high_priority_compute_queue(adev, ring); in amdgpu_ring_to_mqd_prop()
777 bool is_high_prio_gfx = ring->funcs->type == AMDGPU_RING_TYPE_GFX && in amdgpu_ring_to_mqd_prop()
778 amdgpu_gfx_is_high_priority_graphics_queue(adev, ring); in amdgpu_ring_to_mqd_prop()
782 prop->mqd_gpu_addr = ring->mqd_gpu_addr; in amdgpu_ring_to_mqd_prop()
783 prop->hqd_base_gpu_addr = ring->gpu_addr; in amdgpu_ring_to_mqd_prop()
784 prop->rptr_gpu_addr = ring->rptr_gpu_addr; in amdgpu_ring_to_mqd_prop()
785 prop->wptr_gpu_addr = ring->wptr_gpu_addr; in amdgpu_ring_to_mqd_prop()
786 prop->queue_size = ring->ring_size; in amdgpu_ring_to_mqd_prop()
787 prop->eop_gpu_addr = ring->eop_gpu_addr; in amdgpu_ring_to_mqd_prop()
788 prop->use_doorbell = ring->use_doorbell; in amdgpu_ring_to_mqd_prop()
789 prop->doorbell_index = ring->doorbell_index; in amdgpu_ring_to_mqd_prop()
795 prop->hqd_active = ring->funcs->type == AMDGPU_RING_TYPE_KIQ; in amdgpu_ring_to_mqd_prop()
804 int amdgpu_ring_init_mqd(struct amdgpu_ring *ring) in amdgpu_ring_init_mqd() argument
806 struct amdgpu_device *adev = ring->adev; in amdgpu_ring_init_mqd()
810 amdgpu_ring_to_mqd_prop(ring, &prop); in amdgpu_ring_init_mqd()
812 ring->wptr = 0; in amdgpu_ring_init_mqd()
814 if (ring->funcs->type == AMDGPU_RING_TYPE_KIQ) in amdgpu_ring_init_mqd()
817 mqd_mgr = &adev->mqds[ring->funcs->type]; in amdgpu_ring_init_mqd()
819 return mqd_mgr->init_mqd(adev, ring->mqd_ptr, &prop); in amdgpu_ring_init_mqd()
822 void amdgpu_ring_ib_begin(struct amdgpu_ring *ring) in amdgpu_ring_ib_begin() argument
824 if (ring->is_sw_ring) in amdgpu_ring_ib_begin()
825 amdgpu_sw_ring_ib_begin(ring); in amdgpu_ring_ib_begin()
828 void amdgpu_ring_ib_end(struct amdgpu_ring *ring) in amdgpu_ring_ib_end() argument
830 if (ring->is_sw_ring) in amdgpu_ring_ib_end()
831 amdgpu_sw_ring_ib_end(ring); in amdgpu_ring_ib_end()
834 void amdgpu_ring_ib_on_emit_cntl(struct amdgpu_ring *ring) in amdgpu_ring_ib_on_emit_cntl() argument
836 if (ring->is_sw_ring) in amdgpu_ring_ib_on_emit_cntl()
837 amdgpu_sw_ring_ib_mark_offset(ring, AMDGPU_MUX_OFFSET_TYPE_CONTROL); in amdgpu_ring_ib_on_emit_cntl()
840 void amdgpu_ring_ib_on_emit_ce(struct amdgpu_ring *ring) in amdgpu_ring_ib_on_emit_ce() argument
842 if (ring->is_sw_ring) in amdgpu_ring_ib_on_emit_ce()
843 amdgpu_sw_ring_ib_mark_offset(ring, AMDGPU_MUX_OFFSET_TYPE_CE); in amdgpu_ring_ib_on_emit_ce()
846 void amdgpu_ring_ib_on_emit_de(struct amdgpu_ring *ring) in amdgpu_ring_ib_on_emit_de() argument
848 if (ring->is_sw_ring) in amdgpu_ring_ib_on_emit_de()
849 amdgpu_sw_ring_ib_mark_offset(ring, AMDGPU_MUX_OFFSET_TYPE_DE); in amdgpu_ring_ib_on_emit_de()
852 bool amdgpu_ring_sched_ready(struct amdgpu_ring *ring) in amdgpu_ring_sched_ready() argument
854 if (!ring) in amdgpu_ring_sched_ready()
857 if (ring->no_scheduler || !drm_sched_wqueue_ready(&ring->sched)) in amdgpu_ring_sched_ready()
863 void amdgpu_ring_reset_helper_begin(struct amdgpu_ring *ring, in amdgpu_ring_reset_helper_begin() argument
866 /* Stop the scheduler to prevent anybody else from touching the ring buffer. */ in amdgpu_ring_reset_helper_begin()
867 drm_sched_wqueue_stop(&ring->sched); in amdgpu_ring_reset_helper_begin()
869 amdgpu_ring_backup_unprocessed_commands(ring, guilty_fence); in amdgpu_ring_reset_helper_begin()
872 int amdgpu_ring_reset_helper_end(struct amdgpu_ring *ring, in amdgpu_ring_reset_helper_end() argument
878 /* verify that the ring is functional */ in amdgpu_ring_reset_helper_end()
879 r = amdgpu_ring_test_ring(ring); in amdgpu_ring_reset_helper_end()
887 if (ring->ring_backup_entries_to_copy) { in amdgpu_ring_reset_helper_end()
888 amdgpu_ring_alloc_reemit(ring, ring->ring_backup_entries_to_copy); in amdgpu_ring_reset_helper_end()
889 for (i = 0; i < ring->ring_backup_entries_to_copy; i++) in amdgpu_ring_reset_helper_end()
890 amdgpu_ring_write(ring, ring->ring_backup[i]); in amdgpu_ring_reset_helper_end()
891 amdgpu_ring_commit(ring); in amdgpu_ring_reset_helper_end()
894 drm_sched_wqueue_start(&ring->sched); in amdgpu_ring_reset_helper_end()
898 bool amdgpu_ring_is_reset_type_supported(struct amdgpu_ring *ring, in amdgpu_ring_is_reset_type_supported() argument
901 switch (ring->funcs->type) { in amdgpu_ring_is_reset_type_supported()
903 if (ring->adev->gfx.gfx_supported_reset & reset_type) in amdgpu_ring_is_reset_type_supported()
907 if (ring->adev->gfx.compute_supported_reset & reset_type) in amdgpu_ring_is_reset_type_supported()
911 if (ring->adev->sdma.supported_reset & reset_type) in amdgpu_ring_is_reset_type_supported()
916 if (ring->adev->vcn.supported_reset & reset_type) in amdgpu_ring_is_reset_type_supported()
920 if (ring->adev->jpeg.supported_reset & reset_type) in amdgpu_ring_is_reset_type_supported()