Lines Matching refs:vpe
121 int amdgpu_vpe_configure_dpm(struct amdgpu_vpe *vpe) in amdgpu_vpe_configure_dpm() argument
123 struct amdgpu_device *adev = vpe->ring.adev; in amdgpu_vpe_configure_dpm()
135 dpm_ctl = RREG32(vpe_get_reg_offset(vpe, 0, vpe->regs.dpm_enable)); in amdgpu_vpe_configure_dpm()
137 WREG32(vpe_get_reg_offset(vpe, 0, vpe->regs.dpm_enable), dpm_ctl); in amdgpu_vpe_configure_dpm()
199 WREG32(vpe_get_reg_offset(vpe, 0, vpe->regs.dpm_pratio), pratio_ctl); /* PRatio */ in amdgpu_vpe_configure_dpm()
200 … WREG32(vpe_get_reg_offset(vpe, 0, vpe->regs.dpm_request_interval), 24000); /* 1ms, unit=1/24MHz */ in amdgpu_vpe_configure_dpm()
201 WREG32(vpe_get_reg_offset(vpe, 0, vpe->regs.dpm_decision_threshold), 1200000); /* 50ms */ in amdgpu_vpe_configure_dpm()
202 WREG32(vpe_get_reg_offset(vpe, 0, vpe->regs.dpm_busy_clamp_threshold), 1200000);/* 50ms */ in amdgpu_vpe_configure_dpm()
203 WREG32(vpe_get_reg_offset(vpe, 0, vpe->regs.dpm_idle_clamp_threshold), 1200000);/* 50ms */ in amdgpu_vpe_configure_dpm()
213 dpm_ctl = RREG32(vpe_get_reg_offset(vpe, 0, vpe->regs.dpm_enable)); in amdgpu_vpe_configure_dpm()
215 WREG32(vpe_get_reg_offset(vpe, 0, vpe->regs.dpm_enable), dpm_ctl); in amdgpu_vpe_configure_dpm()
224 .mc_addr = adev->vpe.cmdbuf_gpu_addr, in amdgpu_vpe_psp_update_sram()
231 int amdgpu_vpe_init_microcode(struct amdgpu_vpe *vpe) in amdgpu_vpe_init_microcode() argument
233 struct amdgpu_device *adev = vpe->ring.adev; in amdgpu_vpe_init_microcode()
239 ret = amdgpu_ucode_request(adev, &adev->vpe.fw, AMDGPU_UCODE_REQUIRED, in amdgpu_vpe_init_microcode()
244 vpe_hdr = (const struct vpe_firmware_header_v1_0 *)adev->vpe.fw->data; in amdgpu_vpe_init_microcode()
245 adev->vpe.fw_version = le32_to_cpu(vpe_hdr->header.ucode_version); in amdgpu_vpe_init_microcode()
246 adev->vpe.feature_version = le32_to_cpu(vpe_hdr->ucode_feature_version); in amdgpu_vpe_init_microcode()
253 info->fw = adev->vpe.fw; in amdgpu_vpe_init_microcode()
259 info->fw = adev->vpe.fw; in amdgpu_vpe_init_microcode()
267 release_firmware(adev->vpe.fw); in amdgpu_vpe_init_microcode()
268 adev->vpe.fw = NULL; in amdgpu_vpe_init_microcode()
272 int amdgpu_vpe_ring_init(struct amdgpu_vpe *vpe) in amdgpu_vpe_ring_init() argument
274 struct amdgpu_device *adev = container_of(vpe, struct amdgpu_device, vpe); in amdgpu_vpe_ring_init()
275 struct amdgpu_ring *ring = &vpe->ring; in amdgpu_vpe_ring_init()
284 ret = amdgpu_ring_init(adev, ring, 1024, &vpe->trap_irq, 0, in amdgpu_vpe_ring_init()
292 int amdgpu_vpe_ring_fini(struct amdgpu_vpe *vpe) in amdgpu_vpe_ring_fini() argument
294 amdgpu_ring_fini(&vpe->ring); in amdgpu_vpe_ring_fini()
302 struct amdgpu_vpe *vpe = &adev->vpe; in vpe_early_init() local
307 vpe_v6_1_set_funcs(vpe); in vpe_early_init()
310 vpe_v6_1_set_funcs(vpe); in vpe_early_init()
311 vpe->collaborate_mode = true; in vpe_early_init()
318 vpe_set_regs(vpe); in vpe_early_init()
320 dev_info(adev->dev, "VPE: collaborate mode %s", vpe->collaborate_mode ? "true" : "false"); in vpe_early_init()
337 struct amdgpu_vpe *vpe = &adev->vpe; in vpe_get_dpm_level() local
342 return RREG32(vpe_get_reg_offset(vpe, 0, vpe->regs.dpm_request_lv)); in vpe_get_dpm_level()
348 container_of(work, struct amdgpu_device, vpe.idle_work.work); in vpe_idle_work_handler()
351 fences += amdgpu_fence_count_emitted(&adev->vpe.ring); in vpe_idle_work_handler()
362 schedule_delayed_work(&adev->vpe.idle_work, VPE_IDLE_TIMEOUT); in vpe_idle_work_handler()
365 static int vpe_common_init(struct amdgpu_vpe *vpe) in vpe_common_init() argument
367 struct amdgpu_device *adev = container_of(vpe, struct amdgpu_device, vpe); in vpe_common_init()
372 &adev->vpe.cmdbuf_obj, in vpe_common_init()
373 &adev->vpe.cmdbuf_gpu_addr, in vpe_common_init()
374 (void **)&adev->vpe.cmdbuf_cpu_addr); in vpe_common_init()
380 vpe->context_started = false; in vpe_common_init()
381 INIT_DELAYED_WORK(&adev->vpe.idle_work, vpe_idle_work_handler); in vpe_common_init()
389 struct amdgpu_vpe *vpe = &adev->vpe; in vpe_sw_init() local
392 ret = vpe_common_init(vpe); in vpe_sw_init()
396 ret = vpe_irq_init(vpe); in vpe_sw_init()
400 ret = vpe_ring_init(vpe); in vpe_sw_init()
404 ret = vpe_init_microcode(vpe); in vpe_sw_init()
408 adev->vpe.supported_reset = in vpe_sw_init()
409 amdgpu_get_soft_full_reset_mask(&adev->vpe.ring); in vpe_sw_init()
411 adev->vpe.supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE; in vpe_sw_init()
422 struct amdgpu_vpe *vpe = &adev->vpe; in vpe_sw_fini() local
424 release_firmware(vpe->fw); in vpe_sw_fini()
425 vpe->fw = NULL; in vpe_sw_fini()
428 vpe_ring_fini(vpe); in vpe_sw_fini()
430 amdgpu_bo_free_kernel(&adev->vpe.cmdbuf_obj, in vpe_sw_fini()
431 &adev->vpe.cmdbuf_gpu_addr, in vpe_sw_fini()
432 (void **)&adev->vpe.cmdbuf_cpu_addr); in vpe_sw_fini()
440 struct amdgpu_vpe *vpe = &adev->vpe; in vpe_hw_init() local
449 ret = vpe_load_microcode(vpe); in vpe_hw_init()
453 ret = vpe_ring_start(vpe); in vpe_hw_init()
463 struct amdgpu_vpe *vpe = &adev->vpe; in vpe_hw_fini() local
465 cancel_delayed_work_sync(&adev->vpe.idle_work); in vpe_hw_fini()
467 vpe_ring_stop(vpe); in vpe_hw_fini()
516 if (!ring->adev->vpe.collaborate_mode) in vpe_ring_emit_pred_exec()
637 struct amdgpu_vpe *vpe = &adev->vpe; in vpe_ring_preempt_ib() local
638 uint32_t preempt_reg = vpe->regs.queue0_preempt; in vpe_ring_preempt_ib()
651 WREG32(vpe_get_reg_offset(vpe, ring->me, preempt_reg), 1); in vpe_ring_preempt_ib()
667 WREG32(vpe_get_reg_offset(vpe, ring->me, preempt_reg), 0); in vpe_ring_preempt_ib()
685 struct amdgpu_vpe *vpe = &adev->vpe; in vpe_set_powergating_state() local
694 vpe->context_started = false; in vpe_set_powergating_state()
705 struct amdgpu_vpe *vpe = &adev->vpe; in vpe_ring_get_rptr() local
712 rptr = RREG32(vpe_get_reg_offset(vpe, ring->me, vpe->regs.queue0_rb_rptr_hi)); in vpe_ring_get_rptr()
714 rptr |= RREG32(vpe_get_reg_offset(vpe, ring->me, vpe->regs.queue0_rb_rptr_lo)); in vpe_ring_get_rptr()
724 struct amdgpu_vpe *vpe = &adev->vpe; in vpe_ring_get_wptr() local
731 wptr = RREG32(vpe_get_reg_offset(vpe, ring->me, vpe->regs.queue0_rb_wptr_hi)); in vpe_ring_get_wptr()
733 wptr |= RREG32(vpe_get_reg_offset(vpe, ring->me, vpe->regs.queue0_rb_wptr_lo)); in vpe_ring_get_wptr()
743 struct amdgpu_vpe *vpe = &adev->vpe; in vpe_ring_set_wptr() local
755 if (vpe->collaborate_mode) in vpe_ring_set_wptr()
760 for (i = 0; i < vpe->num_instances; i++) { in vpe_ring_set_wptr()
766 WREG32(vpe_get_reg_offset(vpe, i, vpe->regs.queue0_rb_wptr_lo), in vpe_ring_set_wptr()
768 WREG32(vpe_get_reg_offset(vpe, i, vpe->regs.queue0_rb_wptr_hi), in vpe_ring_set_wptr()
873 struct amdgpu_vpe *vpe = &adev->vpe; in vpe_ring_begin_use() local
875 cancel_delayed_work_sync(&adev->vpe.idle_work); in vpe_ring_begin_use()
878 if (!vpe->context_started) { in vpe_ring_begin_use()
885 context_notify = RREG32(vpe_get_reg_offset(vpe, 0, vpe->regs.context_indicator)); in vpe_ring_begin_use()
890 WREG32(vpe_get_reg_offset(vpe, 0, vpe->regs.context_indicator), context_notify); in vpe_ring_begin_use()
891 vpe->context_started = true; in vpe_ring_begin_use()
899 schedule_delayed_work(&adev->vpe.idle_work, VPE_IDLE_TIMEOUT); in vpe_ring_end_use()
933 return amdgpu_show_reset_mask(buf, adev->vpe.supported_reset); in amdgpu_get_vpe_reset_mask()
943 if (adev->vpe.num_instances) { in amdgpu_vpe_sysfs_reset_mask_init()
955 if (adev->vpe.num_instances) in amdgpu_vpe_sysfs_reset_mask_fini()
996 adev->vpe.ring.funcs = &vpe_ring_funcs; in vpe_set_ring_funcs()