Lines Matching +full:4 +full:- +full:ring

34 /* VPE CSA resides in the 4th page of CSA */
40 #define VPE_MAX_DPM_LEVEL 4
85 remainder -= arg2_value; in vpe_u1_8_from_fraction()
87 } while (--i != 0); in vpe_u1_8_from_fraction()
114 * VPE has 4 DPM levels from level 0 (lowerest) to 3 (highest),
123 struct amdgpu_device *adev = vpe->ring.adev; in amdgpu_vpe_configure_dpm()
126 if (adev->pm.dpm_enabled) { in amdgpu_vpe_configure_dpm()
135 dpm_ctl = RREG32(vpe_get_reg_offset(vpe, 0, vpe->regs.dpm_enable)); in amdgpu_vpe_configure_dpm()
137 WREG32(vpe_get_reg_offset(vpe, 0, vpe->regs.dpm_enable), dpm_ctl); in amdgpu_vpe_configure_dpm()
141 dev_dbg(adev->dev, "%s: get clock failed!\n", __func__); in amdgpu_vpe_configure_dpm()
152 for (idx = PP_SMU_NUM_VPECLK_DPM_LEVELS; idx && !vpeclk_enalbled_num; idx--) in amdgpu_vpe_configure_dpm()
153 if (VPEClks[idx-1].Freq) in amdgpu_vpe_configure_dpm()
156 /* vpe dpm only cares 4 levels. */ in amdgpu_vpe_configure_dpm()
167 if (soc_dpm_level > vpeclk_enalbled_num - 1) in amdgpu_vpe_configure_dpm()
168 soc_dpm_level = vpeclk_enalbled_num - 1; in amdgpu_vpe_configure_dpm()
199 WREG32(vpe_get_reg_offset(vpe, 0, vpe->regs.dpm_pratio), pratio_ctl); /* PRatio */ in amdgpu_vpe_configure_dpm()
200 … WREG32(vpe_get_reg_offset(vpe, 0, vpe->regs.dpm_request_interval), 24000); /* 1ms, unit=1/24MHz */ in amdgpu_vpe_configure_dpm()
201 WREG32(vpe_get_reg_offset(vpe, 0, vpe->regs.dpm_decision_threshold), 1200000); /* 50ms */ in amdgpu_vpe_configure_dpm()
202 WREG32(vpe_get_reg_offset(vpe, 0, vpe->regs.dpm_busy_clamp_threshold), 1200000);/* 50ms */ in amdgpu_vpe_configure_dpm()
203 WREG32(vpe_get_reg_offset(vpe, 0, vpe->regs.dpm_idle_clamp_threshold), 1200000);/* 50ms */ in amdgpu_vpe_configure_dpm()
204 dev_dbg(adev->dev, "%s: configure vpe dpm pratio done!\n", __func__); in amdgpu_vpe_configure_dpm()
206 dev_dbg(adev->dev, "%s: invalid pratio parameters!\n", __func__); in amdgpu_vpe_configure_dpm()
213 dpm_ctl = RREG32(vpe_get_reg_offset(vpe, 0, vpe->regs.dpm_enable)); in amdgpu_vpe_configure_dpm()
215 WREG32(vpe_get_reg_offset(vpe, 0, vpe->regs.dpm_enable), dpm_ctl); in amdgpu_vpe_configure_dpm()
216 dev_dbg(adev->dev, "%s: disable vpe dpm\n", __func__); in amdgpu_vpe_configure_dpm()
217 return -EINVAL; in amdgpu_vpe_configure_dpm()
224 .mc_addr = adev->vpe.cmdbuf_gpu_addr, in amdgpu_vpe_psp_update_sram()
228 return psp_execute_ip_fw_load(&adev->psp, &ucode); in amdgpu_vpe_psp_update_sram()
233 struct amdgpu_device *adev = vpe->ring.adev; in amdgpu_vpe_init_microcode()
239 ret = amdgpu_ucode_request(adev, &adev->vpe.fw, AMDGPU_UCODE_REQUIRED, in amdgpu_vpe_init_microcode()
244 vpe_hdr = (const struct vpe_firmware_header_v1_0 *)adev->vpe.fw->data; in amdgpu_vpe_init_microcode()
245 adev->vpe.fw_version = le32_to_cpu(vpe_hdr->header.ucode_version); in amdgpu_vpe_init_microcode()
246 adev->vpe.feature_version = le32_to_cpu(vpe_hdr->ucode_feature_version); in amdgpu_vpe_init_microcode()
248 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) { in amdgpu_vpe_init_microcode()
251 info = &adev->firmware.ucode[AMDGPU_UCODE_ID_VPE_CTX]; in amdgpu_vpe_init_microcode()
252 info->ucode_id = AMDGPU_UCODE_ID_VPE_CTX; in amdgpu_vpe_init_microcode()
253 info->fw = adev->vpe.fw; in amdgpu_vpe_init_microcode()
254 adev->firmware.fw_size += in amdgpu_vpe_init_microcode()
255 ALIGN(le32_to_cpu(vpe_hdr->ctx_ucode_size_bytes), PAGE_SIZE); in amdgpu_vpe_init_microcode()
257 info = &adev->firmware.ucode[AMDGPU_UCODE_ID_VPE_CTL]; in amdgpu_vpe_init_microcode()
258 info->ucode_id = AMDGPU_UCODE_ID_VPE_CTL; in amdgpu_vpe_init_microcode()
259 info->fw = adev->vpe.fw; in amdgpu_vpe_init_microcode()
260 adev->firmware.fw_size += in amdgpu_vpe_init_microcode()
261 ALIGN(le32_to_cpu(vpe_hdr->ctl_ucode_size_bytes), PAGE_SIZE); in amdgpu_vpe_init_microcode()
266 dev_err(adev->dev, "fail to initialize vpe microcode\n"); in amdgpu_vpe_init_microcode()
267 release_firmware(adev->vpe.fw); in amdgpu_vpe_init_microcode()
268 adev->vpe.fw = NULL; in amdgpu_vpe_init_microcode()
275 struct amdgpu_ring *ring = &vpe->ring; in amdgpu_vpe_ring_init() local
278 ring->ring_obj = NULL; in amdgpu_vpe_ring_init()
279 ring->use_doorbell = true; in amdgpu_vpe_ring_init()
280 ring->vm_hub = AMDGPU_MMHUB0(0); in amdgpu_vpe_ring_init()
281 ring->doorbell_index = (adev->doorbell_index.vpe_ring << 1); in amdgpu_vpe_ring_init()
282 snprintf(ring->name, 4, "vpe"); in amdgpu_vpe_ring_init()
284 ret = amdgpu_ring_init(adev, ring, 1024, &vpe->trap_irq, 0, in amdgpu_vpe_ring_init()
294 amdgpu_ring_fini(&vpe->ring); in amdgpu_vpe_ring_fini()
301 struct amdgpu_device *adev = ip_block->adev; in vpe_early_init()
302 struct amdgpu_vpe *vpe = &adev->vpe; in vpe_early_init()
311 vpe->collaborate_mode = true; in vpe_early_init()
314 return -EINVAL; in vpe_early_init()
320 dev_info(adev->dev, "VPE: collaborate mode %s", vpe->collaborate_mode ? "true" : "false"); in vpe_early_init()
329 return adev->pm.fw_version < 0x0a640500; in vpe_need_dpm0_at_power_down()
337 struct amdgpu_vpe *vpe = &adev->vpe; in vpe_get_dpm_level()
339 if (!adev->pm.dpm_enabled) in vpe_get_dpm_level()
342 return RREG32(vpe_get_reg_offset(vpe, 0, vpe->regs.dpm_request_lv)); in vpe_get_dpm_level()
351 fences += amdgpu_fence_count_emitted(&adev->vpe.ring); in vpe_idle_work_handler()
362 schedule_delayed_work(&adev->vpe.idle_work, VPE_IDLE_TIMEOUT); in vpe_idle_work_handler()
372 &adev->vpe.cmdbuf_obj, in vpe_common_init()
373 &adev->vpe.cmdbuf_gpu_addr, in vpe_common_init()
374 (void **)&adev->vpe.cmdbuf_cpu_addr); in vpe_common_init()
376 dev_err(adev->dev, "VPE: failed to allocate cmdbuf bo %d\n", r); in vpe_common_init()
380 vpe->context_started = false; in vpe_common_init()
381 INIT_DELAYED_WORK(&adev->vpe.idle_work, vpe_idle_work_handler); in vpe_common_init()
388 struct amdgpu_device *adev = ip_block->adev; in vpe_sw_init()
389 struct amdgpu_vpe *vpe = &adev->vpe; in vpe_sw_init()
408 adev->vpe.supported_reset = in vpe_sw_init()
409 amdgpu_get_soft_full_reset_mask(&adev->vpe.ring); in vpe_sw_init()
411 adev->vpe.supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE; in vpe_sw_init()
421 struct amdgpu_device *adev = ip_block->adev; in vpe_sw_fini()
422 struct amdgpu_vpe *vpe = &adev->vpe; in vpe_sw_fini()
424 release_firmware(vpe->fw); in vpe_sw_fini()
425 vpe->fw = NULL; in vpe_sw_fini()
430 amdgpu_bo_free_kernel(&adev->vpe.cmdbuf_obj, in vpe_sw_fini()
431 &adev->vpe.cmdbuf_gpu_addr, in vpe_sw_fini()
432 (void **)&adev->vpe.cmdbuf_cpu_addr); in vpe_sw_fini()
439 struct amdgpu_device *adev = ip_block->adev; in vpe_hw_init()
440 struct amdgpu_vpe *vpe = &adev->vpe; in vpe_hw_init()
462 struct amdgpu_device *adev = ip_block->adev; in vpe_hw_fini()
463 struct amdgpu_vpe *vpe = &adev->vpe; in vpe_hw_fini()
465 cancel_delayed_work_sync(&adev->vpe.idle_work); in vpe_hw_fini()
485 static void vpe_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count) in vpe_ring_insert_nop() argument
491 amdgpu_ring_write(ring, ring->funcs->nop | in vpe_ring_insert_nop()
492 VPE_CMD_NOP_HEADER_COUNT(count - 1)); in vpe_ring_insert_nop()
494 amdgpu_ring_write(ring, ring->funcs->nop); in vpe_ring_insert_nop()
497 static uint64_t vpe_get_csa_mc_addr(struct amdgpu_ring *ring, uint32_t vmid) in vpe_get_csa_mc_addr() argument
499 struct amdgpu_device *adev = ring->adev; in vpe_get_csa_mc_addr()
503 if (amdgpu_sriov_vf(adev) || vmid == 0 || !adev->gfx.mcbp) in vpe_get_csa_mc_addr()
512 static void vpe_ring_emit_pred_exec(struct amdgpu_ring *ring, in vpe_ring_emit_pred_exec() argument
516 if (!ring->adev->vpe.collaborate_mode) in vpe_ring_emit_pred_exec()
519 amdgpu_ring_write(ring, VPE_CMD_HEADER(VPE_CMD_OPCODE_PRED_EXE, 0) | in vpe_ring_emit_pred_exec()
521 amdgpu_ring_write(ring, exec_count & 0x1fff); in vpe_ring_emit_pred_exec()
524 static void vpe_ring_emit_ib(struct amdgpu_ring *ring, in vpe_ring_emit_ib() argument
530 uint64_t csa_mc_addr = vpe_get_csa_mc_addr(ring, vmid); in vpe_ring_emit_ib()
532 amdgpu_ring_write(ring, VPE_CMD_HEADER(VPE_CMD_OPCODE_INDIRECT, 0) | in vpe_ring_emit_ib()
536 amdgpu_ring_write(ring, ib->gpu_addr & 0xffffffe0); in vpe_ring_emit_ib()
537 amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr)); in vpe_ring_emit_ib()
538 amdgpu_ring_write(ring, ib->length_dw); in vpe_ring_emit_ib()
539 amdgpu_ring_write(ring, lower_32_bits(csa_mc_addr)); in vpe_ring_emit_ib()
540 amdgpu_ring_write(ring, upper_32_bits(csa_mc_addr)); in vpe_ring_emit_ib()
543 static void vpe_ring_emit_fence(struct amdgpu_ring *ring, uint64_t addr, in vpe_ring_emit_fence() argument
550 amdgpu_ring_write(ring, VPE_CMD_HEADER(VPE_CMD_OPCODE_FENCE, 0)); in vpe_ring_emit_fence()
553 amdgpu_ring_write(ring, lower_32_bits(addr)); in vpe_ring_emit_fence()
554 amdgpu_ring_write(ring, upper_32_bits(addr)); in vpe_ring_emit_fence()
555 amdgpu_ring_write(ring, i == 0 ? lower_32_bits(seq) : upper_32_bits(seq)); in vpe_ring_emit_fence()
556 addr += 4; in vpe_ring_emit_fence()
561 amdgpu_ring_write(ring, VPE_CMD_HEADER(VPE_CMD_OPCODE_TRAP, 0)); in vpe_ring_emit_fence()
562 amdgpu_ring_write(ring, 0); in vpe_ring_emit_fence()
567 static void vpe_ring_emit_pipeline_sync(struct amdgpu_ring *ring) in vpe_ring_emit_pipeline_sync() argument
569 uint32_t seq = ring->fence_drv.sync_seq; in vpe_ring_emit_pipeline_sync()
570 uint64_t addr = ring->fence_drv.gpu_addr; in vpe_ring_emit_pipeline_sync()
572 vpe_ring_emit_pred_exec(ring, 0, 6); in vpe_ring_emit_pipeline_sync()
575 amdgpu_ring_write(ring, VPE_CMD_HEADER(VPE_CMD_OPCODE_POLL_REGMEM, in vpe_ring_emit_pipeline_sync()
579 amdgpu_ring_write(ring, addr & 0xfffffffc); in vpe_ring_emit_pipeline_sync()
580 amdgpu_ring_write(ring, upper_32_bits(addr)); in vpe_ring_emit_pipeline_sync()
581 amdgpu_ring_write(ring, seq); /* reference */ in vpe_ring_emit_pipeline_sync()
582 amdgpu_ring_write(ring, 0xffffffff); /* mask */ in vpe_ring_emit_pipeline_sync()
583 amdgpu_ring_write(ring, VPE_CMD_POLL_REGMEM_DW5_RETRY_COUNT(0xfff) | in vpe_ring_emit_pipeline_sync()
584 VPE_CMD_POLL_REGMEM_DW5_INTERVAL(4)); in vpe_ring_emit_pipeline_sync()
587 static void vpe_ring_emit_wreg(struct amdgpu_ring *ring, uint32_t reg, uint32_t val) in vpe_ring_emit_wreg() argument
589 vpe_ring_emit_pred_exec(ring, 0, 3); in vpe_ring_emit_wreg()
591 amdgpu_ring_write(ring, VPE_CMD_HEADER(VPE_CMD_OPCODE_REG_WRITE, 0)); in vpe_ring_emit_wreg()
592 amdgpu_ring_write(ring, reg << 2); in vpe_ring_emit_wreg()
593 amdgpu_ring_write(ring, val); in vpe_ring_emit_wreg()
596 static void vpe_ring_emit_reg_wait(struct amdgpu_ring *ring, uint32_t reg, in vpe_ring_emit_reg_wait() argument
599 vpe_ring_emit_pred_exec(ring, 0, 6); in vpe_ring_emit_reg_wait()
601 amdgpu_ring_write(ring, VPE_CMD_HEADER(VPE_CMD_OPCODE_POLL_REGMEM, in vpe_ring_emit_reg_wait()
605 amdgpu_ring_write(ring, reg << 2); in vpe_ring_emit_reg_wait()
606 amdgpu_ring_write(ring, 0); in vpe_ring_emit_reg_wait()
607 amdgpu_ring_write(ring, val); /* reference */ in vpe_ring_emit_reg_wait()
608 amdgpu_ring_write(ring, mask); /* mask */ in vpe_ring_emit_reg_wait()
609 amdgpu_ring_write(ring, VPE_CMD_POLL_REGMEM_DW5_RETRY_COUNT(0xfff) | in vpe_ring_emit_reg_wait()
613 static void vpe_ring_emit_vm_flush(struct amdgpu_ring *ring, unsigned int vmid, in vpe_ring_emit_vm_flush() argument
616 amdgpu_gmc_emit_flush_gpu_tlb(ring, vmid, pd_addr); in vpe_ring_emit_vm_flush()
619 static unsigned int vpe_ring_init_cond_exec(struct amdgpu_ring *ring, in vpe_ring_init_cond_exec() argument
624 amdgpu_ring_write(ring, VPE_CMD_HEADER(VPE_CMD_OPCODE_COND_EXE, 0)); in vpe_ring_init_cond_exec()
625 amdgpu_ring_write(ring, lower_32_bits(addr)); in vpe_ring_init_cond_exec()
626 amdgpu_ring_write(ring, upper_32_bits(addr)); in vpe_ring_init_cond_exec()
627 amdgpu_ring_write(ring, 1); in vpe_ring_init_cond_exec()
628 ret = ring->wptr & ring->buf_mask; in vpe_ring_init_cond_exec()
629 amdgpu_ring_write(ring, 0); in vpe_ring_init_cond_exec()
634 static int vpe_ring_preempt_ib(struct amdgpu_ring *ring) in vpe_ring_preempt_ib() argument
636 struct amdgpu_device *adev = ring->adev; in vpe_ring_preempt_ib()
637 struct amdgpu_vpe *vpe = &adev->vpe; in vpe_ring_preempt_ib()
638 uint32_t preempt_reg = vpe->regs.queue0_preempt; in vpe_ring_preempt_ib()
642 amdgpu_ring_set_preempt_cond_exec(ring, false); in vpe_ring_preempt_ib()
645 ring->trail_seq += 1; in vpe_ring_preempt_ib()
646 amdgpu_ring_alloc(ring, 10); in vpe_ring_preempt_ib()
647 vpe_ring_emit_fence(ring, ring->trail_fence_gpu_addr, ring->trail_seq, 0); in vpe_ring_preempt_ib()
648 amdgpu_ring_commit(ring); in vpe_ring_preempt_ib()
651 WREG32(vpe_get_reg_offset(vpe, ring->me, preempt_reg), 1); in vpe_ring_preempt_ib()
654 for (i = 0; i < adev->usec_timeout; i++) { in vpe_ring_preempt_ib()
655 if (ring->trail_seq == in vpe_ring_preempt_ib()
656 le32_to_cpu(*(ring->trail_fence_cpu_addr))) in vpe_ring_preempt_ib()
661 if (i >= adev->usec_timeout) { in vpe_ring_preempt_ib()
662 r = -EINVAL; in vpe_ring_preempt_ib()
663 dev_err(adev->dev, "ring %d failed to be preempted\n", ring->idx); in vpe_ring_preempt_ib()
667 WREG32(vpe_get_reg_offset(vpe, ring->me, preempt_reg), 0); in vpe_ring_preempt_ib()
670 amdgpu_ring_set_preempt_cond_exec(ring, true); in vpe_ring_preempt_ib()
684 struct amdgpu_device *adev = ip_block->adev; in vpe_set_powergating_state()
685 struct amdgpu_vpe *vpe = &adev->vpe; in vpe_set_powergating_state()
687 if (!adev->pm.dpm_enabled) in vpe_set_powergating_state()
688 dev_err(adev->dev, "Without PM, cannot support powergating\n"); in vpe_set_powergating_state()
690 dev_dbg(adev->dev, "%s: %s!\n", __func__, (state == AMD_PG_STATE_GATE) ? "GATE":"UNGATE"); in vpe_set_powergating_state()
694 vpe->context_started = false; in vpe_set_powergating_state()
702 static uint64_t vpe_ring_get_rptr(struct amdgpu_ring *ring) in vpe_ring_get_rptr() argument
704 struct amdgpu_device *adev = ring->adev; in vpe_ring_get_rptr()
705 struct amdgpu_vpe *vpe = &adev->vpe; in vpe_ring_get_rptr()
708 if (ring->use_doorbell) { in vpe_ring_get_rptr()
709 rptr = atomic64_read((atomic64_t *)ring->rptr_cpu_addr); in vpe_ring_get_rptr()
710 dev_dbg(adev->dev, "rptr/doorbell before shift == 0x%016llx\n", rptr); in vpe_ring_get_rptr()
712 rptr = RREG32(vpe_get_reg_offset(vpe, ring->me, vpe->regs.queue0_rb_rptr_hi)); in vpe_ring_get_rptr()
714 rptr |= RREG32(vpe_get_reg_offset(vpe, ring->me, vpe->regs.queue0_rb_rptr_lo)); in vpe_ring_get_rptr()
715 dev_dbg(adev->dev, "rptr before shift [%i] == 0x%016llx\n", ring->me, rptr); in vpe_ring_get_rptr()
721 static uint64_t vpe_ring_get_wptr(struct amdgpu_ring *ring) in vpe_ring_get_wptr() argument
723 struct amdgpu_device *adev = ring->adev; in vpe_ring_get_wptr()
724 struct amdgpu_vpe *vpe = &adev->vpe; in vpe_ring_get_wptr()
727 if (ring->use_doorbell) { in vpe_ring_get_wptr()
728 wptr = atomic64_read((atomic64_t *)ring->wptr_cpu_addr); in vpe_ring_get_wptr()
729 dev_dbg(adev->dev, "wptr/doorbell before shift == 0x%016llx\n", wptr); in vpe_ring_get_wptr()
731 wptr = RREG32(vpe_get_reg_offset(vpe, ring->me, vpe->regs.queue0_rb_wptr_hi)); in vpe_ring_get_wptr()
733 wptr |= RREG32(vpe_get_reg_offset(vpe, ring->me, vpe->regs.queue0_rb_wptr_lo)); in vpe_ring_get_wptr()
734 dev_dbg(adev->dev, "wptr before shift [%i] == 0x%016llx\n", ring->me, wptr); in vpe_ring_get_wptr()
740 static void vpe_ring_set_wptr(struct amdgpu_ring *ring) in vpe_ring_set_wptr() argument
742 struct amdgpu_device *adev = ring->adev; in vpe_ring_set_wptr()
743 struct amdgpu_vpe *vpe = &adev->vpe; in vpe_ring_set_wptr()
745 if (ring->use_doorbell) { in vpe_ring_set_wptr()
746 dev_dbg(adev->dev, "Using doorbell, \ in vpe_ring_set_wptr()
748 lower_32_bits(ring->wptr) << 2 == 0x%08x, \ in vpe_ring_set_wptr()
749 upper_32_bits(ring->wptr) << 2 == 0x%08x\n", in vpe_ring_set_wptr()
750 ring->wptr_offs, in vpe_ring_set_wptr()
751 lower_32_bits(ring->wptr << 2), in vpe_ring_set_wptr()
752 upper_32_bits(ring->wptr << 2)); in vpe_ring_set_wptr()
753 atomic64_set((atomic64_t *)ring->wptr_cpu_addr, ring->wptr << 2); in vpe_ring_set_wptr()
754 WDOORBELL64(ring->doorbell_index, ring->wptr << 2); in vpe_ring_set_wptr()
755 if (vpe->collaborate_mode) in vpe_ring_set_wptr()
756 WDOORBELL64(ring->doorbell_index + 4, ring->wptr << 2); in vpe_ring_set_wptr()
760 for (i = 0; i < vpe->num_instances; i++) { in vpe_ring_set_wptr()
761 dev_dbg(adev->dev, "Not using doorbell, \ in vpe_ring_set_wptr()
764 lower_32_bits(ring->wptr << 2), in vpe_ring_set_wptr()
765 upper_32_bits(ring->wptr << 2)); in vpe_ring_set_wptr()
766 WREG32(vpe_get_reg_offset(vpe, i, vpe->regs.queue0_rb_wptr_lo), in vpe_ring_set_wptr()
767 lower_32_bits(ring->wptr << 2)); in vpe_ring_set_wptr()
768 WREG32(vpe_get_reg_offset(vpe, i, vpe->regs.queue0_rb_wptr_hi), in vpe_ring_set_wptr()
769 upper_32_bits(ring->wptr << 2)); in vpe_ring_set_wptr()
774 static int vpe_ring_test_ring(struct amdgpu_ring *ring) in vpe_ring_test_ring() argument
776 struct amdgpu_device *adev = ring->adev; in vpe_ring_test_ring()
784 dev_err(adev->dev, "(%d) failed to allocate wb slot\n", ret); in vpe_ring_test_ring()
788 adev->wb.wb[index] = 0; in vpe_ring_test_ring()
789 wb_addr = adev->wb.gpu_addr + (index * 4); in vpe_ring_test_ring()
791 ret = amdgpu_ring_alloc(ring, 4); in vpe_ring_test_ring()
793 dev_err(adev->dev, "amdgpu: dma failed to lock ring %d (%d).\n", ring->idx, ret); in vpe_ring_test_ring()
797 amdgpu_ring_write(ring, VPE_CMD_HEADER(VPE_CMD_OPCODE_FENCE, 0)); in vpe_ring_test_ring()
798 amdgpu_ring_write(ring, lower_32_bits(wb_addr)); in vpe_ring_test_ring()
799 amdgpu_ring_write(ring, upper_32_bits(wb_addr)); in vpe_ring_test_ring()
800 amdgpu_ring_write(ring, test_pattern); in vpe_ring_test_ring()
801 amdgpu_ring_commit(ring); in vpe_ring_test_ring()
803 for (i = 0; i < adev->usec_timeout; i++) { in vpe_ring_test_ring()
804 if (le32_to_cpu(adev->wb.wb[index]) == test_pattern) in vpe_ring_test_ring()
809 ret = -ETIMEDOUT; in vpe_ring_test_ring()
816 static int vpe_ring_test_ib(struct amdgpu_ring *ring, long timeout) in vpe_ring_test_ib() argument
818 struct amdgpu_device *adev = ring->adev; in vpe_ring_test_ib()
828 dev_err(adev->dev, "(%d) failed to allocate wb slot\n", ret); in vpe_ring_test_ib()
832 adev->wb.wb[index] = 0; in vpe_ring_test_ib()
833 wb_addr = adev->wb.gpu_addr + (index * 4); in vpe_ring_test_ib()
843 ib.ptr[4] = VPE_CMD_HEADER(VPE_CMD_OPCODE_NOP, 0); in vpe_ring_test_ib()
849 ret = amdgpu_ib_schedule(ring, 1, &ib, NULL, &f); in vpe_ring_test_ib()
855 ret = ret ? : -ETIMEDOUT; in vpe_ring_test_ib()
859 ret = (le32_to_cpu(adev->wb.wb[index]) == test_pattern) ? 0 : -EINVAL; in vpe_ring_test_ib()
870 static void vpe_ring_begin_use(struct amdgpu_ring *ring) in vpe_ring_begin_use() argument
872 struct amdgpu_device *adev = ring->adev; in vpe_ring_begin_use()
873 struct amdgpu_vpe *vpe = &adev->vpe; in vpe_ring_begin_use()
875 cancel_delayed_work_sync(&adev->vpe.idle_work); in vpe_ring_begin_use()
878 if (!vpe->context_started) { in vpe_ring_begin_use()
885 context_notify = RREG32(vpe_get_reg_offset(vpe, 0, vpe->regs.context_indicator)); in vpe_ring_begin_use()
890 WREG32(vpe_get_reg_offset(vpe, 0, vpe->regs.context_indicator), context_notify); in vpe_ring_begin_use()
891 vpe->context_started = true; in vpe_ring_begin_use()
895 static void vpe_ring_end_use(struct amdgpu_ring *ring) in vpe_ring_end_use() argument
897 struct amdgpu_device *adev = ring->adev; in vpe_ring_end_use()
899 schedule_delayed_work(&adev->vpe.idle_work, VPE_IDLE_TIMEOUT); in vpe_ring_end_use()
902 static int vpe_ring_reset(struct amdgpu_ring *ring, in vpe_ring_reset() argument
906 struct amdgpu_device *adev = ring->adev; in vpe_ring_reset()
909 amdgpu_ring_reset_helper_begin(ring, timedout_fence); in vpe_ring_reset()
920 return amdgpu_ring_reset_helper_end(ring, timedout_fence); in vpe_ring_reset()
931 return -ENODEV; in amdgpu_get_vpe_reset_mask()
933 return amdgpu_show_reset_mask(buf, adev->vpe.supported_reset); in amdgpu_get_vpe_reset_mask()
943 if (adev->vpe.num_instances) { in amdgpu_vpe_sysfs_reset_mask_init()
944 r = device_create_file(adev->dev, &dev_attr_vpe_reset_mask); in amdgpu_vpe_sysfs_reset_mask_init()
954 if (adev->dev->kobj.sd) { in amdgpu_vpe_sysfs_reset_mask_fini()
955 if (adev->vpe.num_instances) in amdgpu_vpe_sysfs_reset_mask_fini()
956 device_remove_file(adev->dev, &dev_attr_vpe_reset_mask); in amdgpu_vpe_sysfs_reset_mask_fini()
996 adev->vpe.ring.funcs = &vpe_ring_funcs; in vpe_set_ring_funcs()