Lines Matching full:vpe

34 /* VPE CSA resides in the 4th page of CSA */
114 * VPE has 4 DPM levels from level 0 (lowerest) to 3 (highest),
115 * VPE FW will dynamically decide which level should be used according to current loading.
117 * Get VPE and SOC clocks from PM, and select the appropriate four clock values,
119 * The VPE FW can then request the appropriate frequency from the PMFW.
121 int amdgpu_vpe_configure_dpm(struct amdgpu_vpe *vpe) in amdgpu_vpe_configure_dpm() argument
123 struct amdgpu_device *adev = vpe->ring.adev; in amdgpu_vpe_configure_dpm()
135 dpm_ctl = RREG32(vpe_get_reg_offset(vpe, 0, vpe->regs.dpm_enable)); in amdgpu_vpe_configure_dpm()
137 WREG32(vpe_get_reg_offset(vpe, 0, vpe->regs.dpm_enable), dpm_ctl); in amdgpu_vpe_configure_dpm()
148 /* Comfirm enabled vpe clk num in amdgpu_vpe_configure_dpm()
149 * Enabled VPE clocks are ordered from low to high in VPEClks in amdgpu_vpe_configure_dpm()
156 /* vpe dpm only cares 4 levels. */ in amdgpu_vpe_configure_dpm()
199 WREG32(vpe_get_reg_offset(vpe, 0, vpe->regs.dpm_pratio), pratio_ctl); /* PRatio */ in amdgpu_vpe_configure_dpm()
200 … WREG32(vpe_get_reg_offset(vpe, 0, vpe->regs.dpm_request_interval), 24000); /* 1ms, unit=1/24MHz */ in amdgpu_vpe_configure_dpm()
201 WREG32(vpe_get_reg_offset(vpe, 0, vpe->regs.dpm_decision_threshold), 1200000); /* 50ms */ in amdgpu_vpe_configure_dpm()
202 WREG32(vpe_get_reg_offset(vpe, 0, vpe->regs.dpm_busy_clamp_threshold), 1200000);/* 50ms */ in amdgpu_vpe_configure_dpm()
203 WREG32(vpe_get_reg_offset(vpe, 0, vpe->regs.dpm_idle_clamp_threshold), 1200000);/* 50ms */ in amdgpu_vpe_configure_dpm()
204 dev_dbg(adev->dev, "%s: configure vpe dpm pratio done!\n", __func__); in amdgpu_vpe_configure_dpm()
213 dpm_ctl = RREG32(vpe_get_reg_offset(vpe, 0, vpe->regs.dpm_enable)); in amdgpu_vpe_configure_dpm()
215 WREG32(vpe_get_reg_offset(vpe, 0, vpe->regs.dpm_enable), dpm_ctl); in amdgpu_vpe_configure_dpm()
216 dev_dbg(adev->dev, "%s: disable vpe dpm\n", __func__); in amdgpu_vpe_configure_dpm()
224 .mc_addr = adev->vpe.cmdbuf_gpu_addr, in amdgpu_vpe_psp_update_sram()
231 int amdgpu_vpe_init_microcode(struct amdgpu_vpe *vpe) in amdgpu_vpe_init_microcode() argument
233 struct amdgpu_device *adev = vpe->ring.adev; in amdgpu_vpe_init_microcode()
239 ret = amdgpu_ucode_request(adev, &adev->vpe.fw, "amdgpu/%s.bin", fw_prefix); in amdgpu_vpe_init_microcode()
243 vpe_hdr = (const struct vpe_firmware_header_v1_0 *)adev->vpe.fw->data; in amdgpu_vpe_init_microcode()
244 adev->vpe.fw_version = le32_to_cpu(vpe_hdr->header.ucode_version); in amdgpu_vpe_init_microcode()
245 adev->vpe.feature_version = le32_to_cpu(vpe_hdr->ucode_feature_version); in amdgpu_vpe_init_microcode()
252 info->fw = adev->vpe.fw; in amdgpu_vpe_init_microcode()
258 info->fw = adev->vpe.fw; in amdgpu_vpe_init_microcode()
265 dev_err(adev->dev, "fail to initialize vpe microcode\n"); in amdgpu_vpe_init_microcode()
266 release_firmware(adev->vpe.fw); in amdgpu_vpe_init_microcode()
267 adev->vpe.fw = NULL; in amdgpu_vpe_init_microcode()
271 int amdgpu_vpe_ring_init(struct amdgpu_vpe *vpe) in amdgpu_vpe_ring_init() argument
273 struct amdgpu_device *adev = container_of(vpe, struct amdgpu_device, vpe); in amdgpu_vpe_ring_init()
274 struct amdgpu_ring *ring = &vpe->ring; in amdgpu_vpe_ring_init()
281 snprintf(ring->name, 4, "vpe"); in amdgpu_vpe_ring_init()
283 ret = amdgpu_ring_init(adev, ring, 1024, &vpe->trap_irq, 0, in amdgpu_vpe_ring_init()
291 int amdgpu_vpe_ring_fini(struct amdgpu_vpe *vpe) in amdgpu_vpe_ring_fini() argument
293 amdgpu_ring_fini(&vpe->ring); in amdgpu_vpe_ring_fini()
301 struct amdgpu_vpe *vpe = &adev->vpe; in vpe_early_init() local
306 vpe_v6_1_set_funcs(vpe); in vpe_early_init()
309 vpe_v6_1_set_funcs(vpe); in vpe_early_init()
310 vpe->collaborate_mode = true; in vpe_early_init()
317 vpe_set_regs(vpe); in vpe_early_init()
319 dev_info(adev->dev, "VPE: collaborate mode %s", vpe->collaborate_mode ? "true" : "false"); in vpe_early_init()
327 container_of(work, struct amdgpu_device, vpe.idle_work.work); in vpe_idle_work_handler()
330 fences += amdgpu_fence_count_emitted(&adev->vpe.ring); in vpe_idle_work_handler()
335 schedule_delayed_work(&adev->vpe.idle_work, VPE_IDLE_TIMEOUT); in vpe_idle_work_handler()
338 static int vpe_common_init(struct amdgpu_vpe *vpe) in vpe_common_init() argument
340 struct amdgpu_device *adev = container_of(vpe, struct amdgpu_device, vpe); in vpe_common_init()
345 &adev->vpe.cmdbuf_obj, in vpe_common_init()
346 &adev->vpe.cmdbuf_gpu_addr, in vpe_common_init()
347 (void **)&adev->vpe.cmdbuf_cpu_addr); in vpe_common_init()
349 dev_err(adev->dev, "VPE: failed to allocate cmdbuf bo %d\n", r); in vpe_common_init()
353 vpe->context_started = false; in vpe_common_init()
354 INIT_DELAYED_WORK(&adev->vpe.idle_work, vpe_idle_work_handler); in vpe_common_init()
362 struct amdgpu_vpe *vpe = &adev->vpe; in vpe_sw_init() local
365 ret = vpe_common_init(vpe); in vpe_sw_init()
369 ret = vpe_irq_init(vpe); in vpe_sw_init()
373 ret = vpe_ring_init(vpe); in vpe_sw_init()
377 ret = vpe_init_microcode(vpe); in vpe_sw_init()
387 struct amdgpu_vpe *vpe = &adev->vpe; in vpe_sw_fini() local
389 release_firmware(vpe->fw); in vpe_sw_fini()
390 vpe->fw = NULL; in vpe_sw_fini()
392 vpe_ring_fini(vpe); in vpe_sw_fini()
394 amdgpu_bo_free_kernel(&adev->vpe.cmdbuf_obj, in vpe_sw_fini()
395 &adev->vpe.cmdbuf_gpu_addr, in vpe_sw_fini()
396 (void **)&adev->vpe.cmdbuf_cpu_addr); in vpe_sw_fini()
404 struct amdgpu_vpe *vpe = &adev->vpe; in vpe_hw_init() local
407 /* Power on VPE */ in vpe_hw_init()
413 ret = vpe_load_microcode(vpe); in vpe_hw_init()
417 ret = vpe_ring_start(vpe); in vpe_hw_init()
427 struct amdgpu_vpe *vpe = &adev->vpe; in vpe_hw_fini() local
429 vpe_ring_stop(vpe); in vpe_hw_fini()
431 /* Power off VPE */ in vpe_hw_fini()
441 cancel_delayed_work_sync(&adev->vpe.idle_work); in vpe_suspend()
484 if (!ring->adev->vpe.collaborate_mode) in vpe_ring_emit_pred_exec()
605 struct amdgpu_vpe *vpe = &adev->vpe; in vpe_ring_preempt_ib() local
606 uint32_t preempt_reg = vpe->regs.queue0_preempt; in vpe_ring_preempt_ib()
619 WREG32(vpe_get_reg_offset(vpe, ring->me, preempt_reg), 1); in vpe_ring_preempt_ib()
635 WREG32(vpe_get_reg_offset(vpe, ring->me, preempt_reg), 0); in vpe_ring_preempt_ib()
653 struct amdgpu_vpe *vpe = &adev->vpe; in vpe_set_powergating_state() local
662 vpe->context_started = false; in vpe_set_powergating_state()
673 struct amdgpu_vpe *vpe = &adev->vpe; in vpe_ring_get_rptr() local
680 rptr = RREG32(vpe_get_reg_offset(vpe, ring->me, vpe->regs.queue0_rb_rptr_hi)); in vpe_ring_get_rptr()
682 rptr |= RREG32(vpe_get_reg_offset(vpe, ring->me, vpe->regs.queue0_rb_rptr_lo)); in vpe_ring_get_rptr()
692 struct amdgpu_vpe *vpe = &adev->vpe; in vpe_ring_get_wptr() local
699 wptr = RREG32(vpe_get_reg_offset(vpe, ring->me, vpe->regs.queue0_rb_wptr_hi)); in vpe_ring_get_wptr()
701 wptr |= RREG32(vpe_get_reg_offset(vpe, ring->me, vpe->regs.queue0_rb_wptr_lo)); in vpe_ring_get_wptr()
711 struct amdgpu_vpe *vpe = &adev->vpe; in vpe_ring_set_wptr() local
723 if (vpe->collaborate_mode) in vpe_ring_set_wptr()
728 for (i = 0; i < vpe->num_instances; i++) { in vpe_ring_set_wptr()
734 WREG32(vpe_get_reg_offset(vpe, i, vpe->regs.queue0_rb_wptr_lo), in vpe_ring_set_wptr()
736 WREG32(vpe_get_reg_offset(vpe, i, vpe->regs.queue0_rb_wptr_hi), in vpe_ring_set_wptr()
841 struct amdgpu_vpe *vpe = &adev->vpe; in vpe_ring_begin_use() local
843 cancel_delayed_work_sync(&adev->vpe.idle_work); in vpe_ring_begin_use()
845 /* Power on VPE and notify VPE of new context */ in vpe_ring_begin_use()
846 if (!vpe->context_started) { in vpe_ring_begin_use()
849 /* Power on VPE */ in vpe_ring_begin_use()
853 context_notify = RREG32(vpe_get_reg_offset(vpe, 0, vpe->regs.context_indicator)); in vpe_ring_begin_use()
858 WREG32(vpe_get_reg_offset(vpe, 0, vpe->regs.context_indicator), context_notify); in vpe_ring_begin_use()
859 vpe->context_started = true; in vpe_ring_begin_use()
867 schedule_delayed_work(&adev->vpe.idle_work, VPE_IDLE_TIMEOUT); in vpe_ring_end_use()
905 adev->vpe.ring.funcs = &vpe_ring_funcs; in vpe_set_ring_funcs()