1aaa36a97SAlex Deucher /*
2aaa36a97SAlex Deucher * Copyright 2014 Advanced Micro Devices, Inc.
3aaa36a97SAlex Deucher * All Rights Reserved.
4aaa36a97SAlex Deucher *
5aaa36a97SAlex Deucher * Permission is hereby granted, free of charge, to any person obtaining a
6aaa36a97SAlex Deucher * copy of this software and associated documentation files (the
7aaa36a97SAlex Deucher * "Software"), to deal in the Software without restriction, including
8aaa36a97SAlex Deucher * without limitation the rights to use, copy, modify, merge, publish,
9aaa36a97SAlex Deucher * distribute, sub license, and/or sell copies of the Software, and to
10aaa36a97SAlex Deucher * permit persons to whom the Software is furnished to do so, subject to
11aaa36a97SAlex Deucher * the following conditions:
12aaa36a97SAlex Deucher *
13aaa36a97SAlex Deucher * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14aaa36a97SAlex Deucher * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15aaa36a97SAlex Deucher * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
16aaa36a97SAlex Deucher * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
17aaa36a97SAlex Deucher * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
18aaa36a97SAlex Deucher * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
19aaa36a97SAlex Deucher * USE OR OTHER DEALINGS IN THE SOFTWARE.
20aaa36a97SAlex Deucher *
21aaa36a97SAlex Deucher * The above copyright notice and this permission notice (including the
22aaa36a97SAlex Deucher * next paragraph) shall be included in all copies or substantial portions
23aaa36a97SAlex Deucher * of the Software.
24aaa36a97SAlex Deucher *
25aaa36a97SAlex Deucher * Authors: Christian König <christian.koenig@amd.com>
26aaa36a97SAlex Deucher */
27aaa36a97SAlex Deucher
28aaa36a97SAlex Deucher #include <linux/firmware.h>
2947b757fbSSam Ravnborg
30aaa36a97SAlex Deucher #include "amdgpu.h"
31aaa36a97SAlex Deucher #include "amdgpu_vce.h"
32aaa36a97SAlex Deucher #include "vid.h"
33aaa36a97SAlex Deucher #include "vce/vce_3_0_d.h"
34aaa36a97SAlex Deucher #include "vce/vce_3_0_sh_mask.h"
35be4f38e2SAlex Deucher #include "oss/oss_3_0_d.h"
36be4f38e2SAlex Deucher #include "oss/oss_3_0_sh_mask.h"
375bbc553aSLeo Liu #include "gca/gfx_8_0_d.h"
386a585777SAlex Deucher #include "smu/smu_7_1_2_d.h"
396a585777SAlex Deucher #include "smu/smu_7_1_2_sh_mask.h"
40115933a5SChunming Zhou #include "gca/gfx_8_0_sh_mask.h"
41091aec0bSAndrey Grodzovsky #include "ivsrcid/ivsrcid_vislands30.h"
42115933a5SChunming Zhou
435bbc553aSLeo Liu
445bbc553aSLeo Liu #define GRBM_GFX_INDEX__VCE_INSTANCE__SHIFT 0x04
455bbc553aSLeo Liu #define GRBM_GFX_INDEX__VCE_INSTANCE_MASK 0x10
4650a1ebc7SRex Zhu #define GRBM_GFX_INDEX__VCE_ALL_PIPE 0x07
4750a1ebc7SRex Zhu
483c0ff9f1SLeo Liu #define mmVCE_LMI_VCPU_CACHE_40BIT_BAR0 0x8616
493c0ff9f1SLeo Liu #define mmVCE_LMI_VCPU_CACHE_40BIT_BAR1 0x8617
503c0ff9f1SLeo Liu #define mmVCE_LMI_VCPU_CACHE_40BIT_BAR2 0x8618
5150a1ebc7SRex Zhu #define mmGRBM_GFX_INDEX_DEFAULT 0xE0000000
5250a1ebc7SRex Zhu
53567e6e29Sjimqu #define VCE_STATUS_VCPU_REPORT_FW_LOADED_MASK 0x02
54aaa36a97SAlex Deucher
55e9822622SLeo Liu #define VCE_V3_0_FW_SIZE (384 * 1024)
56e9822622SLeo Liu #define VCE_V3_0_STACK_SIZE (64 * 1024)
57e9822622SLeo Liu #define VCE_V3_0_DATA_SIZE ((16 * 1024 * AMDGPU_MAX_VCE_HANDLES) + (52 * 1024))
58e9822622SLeo Liu
59ef6239e0SAlex Deucher #define FW_52_8_3 ((52 << 24) | (8 << 16) | (3 << 8))
60ef6239e0SAlex Deucher
6150a1ebc7SRex Zhu #define GET_VCE_INSTANCE(i) ((i) << GRBM_GFX_INDEX__VCE_INSTANCE__SHIFT \
6250a1ebc7SRex Zhu | GRBM_GFX_INDEX__VCE_ALL_PIPE)
6350a1ebc7SRex Zhu
645bbc553aSLeo Liu static void vce_v3_0_mc_resume(struct amdgpu_device *adev, int idx);
65aaa36a97SAlex Deucher static void vce_v3_0_set_ring_funcs(struct amdgpu_device *adev);
66aaa36a97SAlex Deucher static void vce_v3_0_set_irq_funcs(struct amdgpu_device *adev);
67567e6e29Sjimqu static int vce_v3_0_wait_for_idle(void *handle);
6826679899SRex Zhu static int vce_v3_0_set_clockgating_state(void *handle,
6926679899SRex Zhu enum amd_clockgating_state state);
70aaa36a97SAlex Deucher /**
71aaa36a97SAlex Deucher * vce_v3_0_ring_get_rptr - get read pointer
72aaa36a97SAlex Deucher *
73aaa36a97SAlex Deucher * @ring: amdgpu_ring pointer
74aaa36a97SAlex Deucher *
75aaa36a97SAlex Deucher * Returns the current hardware read pointer
76aaa36a97SAlex Deucher */
vce_v3_0_ring_get_rptr(struct amdgpu_ring * ring)77536fbf94SKen Wang static uint64_t vce_v3_0_ring_get_rptr(struct amdgpu_ring *ring)
78aaa36a97SAlex Deucher {
79aaa36a97SAlex Deucher struct amdgpu_device *adev = ring->adev;
8045cc6586SLeo Liu u32 v;
8145cc6586SLeo Liu
8245cc6586SLeo Liu mutex_lock(&adev->grbm_idx_mutex);
8345cc6586SLeo Liu if (adev->vce.harvest_config == 0 ||
8445cc6586SLeo Liu adev->vce.harvest_config == AMDGPU_VCE_HARVEST_VCE1)
8545cc6586SLeo Liu WREG32(mmGRBM_GFX_INDEX, GET_VCE_INSTANCE(0));
8645cc6586SLeo Liu else if (adev->vce.harvest_config == AMDGPU_VCE_HARVEST_VCE0)
8745cc6586SLeo Liu WREG32(mmGRBM_GFX_INDEX, GET_VCE_INSTANCE(1));
88aaa36a97SAlex Deucher
895d4af988SAlex Deucher if (ring->me == 0)
9045cc6586SLeo Liu v = RREG32(mmVCE_RB_RPTR);
915d4af988SAlex Deucher else if (ring->me == 1)
9245cc6586SLeo Liu v = RREG32(mmVCE_RB_RPTR2);
936f0359ffSAlex Deucher else
9445cc6586SLeo Liu v = RREG32(mmVCE_RB_RPTR3);
9545cc6586SLeo Liu
9645cc6586SLeo Liu WREG32(mmGRBM_GFX_INDEX, mmGRBM_GFX_INDEX_DEFAULT);
9745cc6586SLeo Liu mutex_unlock(&adev->grbm_idx_mutex);
9845cc6586SLeo Liu
9945cc6586SLeo Liu return v;
100aaa36a97SAlex Deucher }
101aaa36a97SAlex Deucher
102aaa36a97SAlex Deucher /**
103aaa36a97SAlex Deucher * vce_v3_0_ring_get_wptr - get write pointer
104aaa36a97SAlex Deucher *
105aaa36a97SAlex Deucher * @ring: amdgpu_ring pointer
106aaa36a97SAlex Deucher *
107aaa36a97SAlex Deucher * Returns the current hardware write pointer
108aaa36a97SAlex Deucher */
vce_v3_0_ring_get_wptr(struct amdgpu_ring * ring)109536fbf94SKen Wang static uint64_t vce_v3_0_ring_get_wptr(struct amdgpu_ring *ring)
110aaa36a97SAlex Deucher {
111aaa36a97SAlex Deucher struct amdgpu_device *adev = ring->adev;
11245cc6586SLeo Liu u32 v;
11345cc6586SLeo Liu
11445cc6586SLeo Liu mutex_lock(&adev->grbm_idx_mutex);
11545cc6586SLeo Liu if (adev->vce.harvest_config == 0 ||
11645cc6586SLeo Liu adev->vce.harvest_config == AMDGPU_VCE_HARVEST_VCE1)
11745cc6586SLeo Liu WREG32(mmGRBM_GFX_INDEX, GET_VCE_INSTANCE(0));
11845cc6586SLeo Liu else if (adev->vce.harvest_config == AMDGPU_VCE_HARVEST_VCE0)
11945cc6586SLeo Liu WREG32(mmGRBM_GFX_INDEX, GET_VCE_INSTANCE(1));
120aaa36a97SAlex Deucher
1215d4af988SAlex Deucher if (ring->me == 0)
12245cc6586SLeo Liu v = RREG32(mmVCE_RB_WPTR);
1235d4af988SAlex Deucher else if (ring->me == 1)
12445cc6586SLeo Liu v = RREG32(mmVCE_RB_WPTR2);
1256f0359ffSAlex Deucher else
12645cc6586SLeo Liu v = RREG32(mmVCE_RB_WPTR3);
12745cc6586SLeo Liu
12845cc6586SLeo Liu WREG32(mmGRBM_GFX_INDEX, mmGRBM_GFX_INDEX_DEFAULT);
12945cc6586SLeo Liu mutex_unlock(&adev->grbm_idx_mutex);
13045cc6586SLeo Liu
13145cc6586SLeo Liu return v;
132aaa36a97SAlex Deucher }
133aaa36a97SAlex Deucher
134aaa36a97SAlex Deucher /**
135aaa36a97SAlex Deucher * vce_v3_0_ring_set_wptr - set write pointer
136aaa36a97SAlex Deucher *
137aaa36a97SAlex Deucher * @ring: amdgpu_ring pointer
138aaa36a97SAlex Deucher *
139aaa36a97SAlex Deucher * Commits the write pointer to the hardware
140aaa36a97SAlex Deucher */
vce_v3_0_ring_set_wptr(struct amdgpu_ring * ring)141aaa36a97SAlex Deucher static void vce_v3_0_ring_set_wptr(struct amdgpu_ring *ring)
142aaa36a97SAlex Deucher {
143aaa36a97SAlex Deucher struct amdgpu_device *adev = ring->adev;
144aaa36a97SAlex Deucher
14545cc6586SLeo Liu mutex_lock(&adev->grbm_idx_mutex);
14645cc6586SLeo Liu if (adev->vce.harvest_config == 0 ||
14745cc6586SLeo Liu adev->vce.harvest_config == AMDGPU_VCE_HARVEST_VCE1)
14845cc6586SLeo Liu WREG32(mmGRBM_GFX_INDEX, GET_VCE_INSTANCE(0));
14945cc6586SLeo Liu else if (adev->vce.harvest_config == AMDGPU_VCE_HARVEST_VCE0)
15045cc6586SLeo Liu WREG32(mmGRBM_GFX_INDEX, GET_VCE_INSTANCE(1));
15145cc6586SLeo Liu
1525d4af988SAlex Deucher if (ring->me == 0)
153536fbf94SKen Wang WREG32(mmVCE_RB_WPTR, lower_32_bits(ring->wptr));
1545d4af988SAlex Deucher else if (ring->me == 1)
155536fbf94SKen Wang WREG32(mmVCE_RB_WPTR2, lower_32_bits(ring->wptr));
1566f0359ffSAlex Deucher else
157536fbf94SKen Wang WREG32(mmVCE_RB_WPTR3, lower_32_bits(ring->wptr));
15845cc6586SLeo Liu
15945cc6586SLeo Liu WREG32(mmGRBM_GFX_INDEX, mmGRBM_GFX_INDEX_DEFAULT);
16045cc6586SLeo Liu mutex_unlock(&adev->grbm_idx_mutex);
161aaa36a97SAlex Deucher }
162aaa36a97SAlex Deucher
vce_v3_0_override_vce_clock_gating(struct amdgpu_device * adev,bool override)1630689a570SEric Huang static void vce_v3_0_override_vce_clock_gating(struct amdgpu_device *adev, bool override)
1640689a570SEric Huang {
165f3f0ea95STom St Denis WREG32_FIELD(VCE_RB_ARB_CTRL, VCE_CGTT_OVERRIDE, override ? 1 : 0);
1660689a570SEric Huang }
1670689a570SEric Huang
vce_v3_0_set_vce_sw_clock_gating(struct amdgpu_device * adev,bool gated)1680689a570SEric Huang static void vce_v3_0_set_vce_sw_clock_gating(struct amdgpu_device *adev,
1690689a570SEric Huang bool gated)
1700689a570SEric Huang {
171f3f0ea95STom St Denis u32 data;
172f16fe6d3STom St Denis
1730689a570SEric Huang /* Set Override to disable Clock Gating */
1740689a570SEric Huang vce_v3_0_override_vce_clock_gating(adev, true);
1750689a570SEric Huang
1766f906814STom St Denis /* This function enables MGCG which is controlled by firmware.
1776f906814STom St Denis With the clocks in the gated state the core is still
1786f906814STom St Denis accessible but the firmware will throttle the clocks on the
1796f906814STom St Denis fly as necessary.
1800689a570SEric Huang */
181ecc2cf7cSMaruthi Srinivas Bayyavarapu if (!gated) {
182f3f0ea95STom St Denis data = RREG32(mmVCE_CLOCK_GATING_B);
1830689a570SEric Huang data |= 0x1ff;
1840689a570SEric Huang data &= ~0xef0000;
1850689a570SEric Huang WREG32(mmVCE_CLOCK_GATING_B, data);
1860689a570SEric Huang
187f3f0ea95STom St Denis data = RREG32(mmVCE_UENC_CLOCK_GATING);
1880689a570SEric Huang data |= 0x3ff000;
1890689a570SEric Huang data &= ~0xffc00000;
1900689a570SEric Huang WREG32(mmVCE_UENC_CLOCK_GATING, data);
1910689a570SEric Huang
192f3f0ea95STom St Denis data = RREG32(mmVCE_UENC_CLOCK_GATING_2);
1930689a570SEric Huang data |= 0x2;
1946f906814STom St Denis data &= ~0x00010000;
1950689a570SEric Huang WREG32(mmVCE_UENC_CLOCK_GATING_2, data);
1960689a570SEric Huang
197f3f0ea95STom St Denis data = RREG32(mmVCE_UENC_REG_CLOCK_GATING);
1980689a570SEric Huang data |= 0x37f;
1990689a570SEric Huang WREG32(mmVCE_UENC_REG_CLOCK_GATING, data);
2000689a570SEric Huang
201f3f0ea95STom St Denis data = RREG32(mmVCE_UENC_DMA_DCLK_CTRL);
2020689a570SEric Huang data |= VCE_UENC_DMA_DCLK_CTRL__WRDMCLK_FORCEON_MASK |
2030689a570SEric Huang VCE_UENC_DMA_DCLK_CTRL__RDDMCLK_FORCEON_MASK |
2040689a570SEric Huang VCE_UENC_DMA_DCLK_CTRL__REGCLK_FORCEON_MASK |
2050689a570SEric Huang 0x8;
2060689a570SEric Huang WREG32(mmVCE_UENC_DMA_DCLK_CTRL, data);
2070689a570SEric Huang } else {
208f3f0ea95STom St Denis data = RREG32(mmVCE_CLOCK_GATING_B);
2090689a570SEric Huang data &= ~0x80010;
2100689a570SEric Huang data |= 0xe70008;
2110689a570SEric Huang WREG32(mmVCE_CLOCK_GATING_B, data);
2126f906814STom St Denis
213f3f0ea95STom St Denis data = RREG32(mmVCE_UENC_CLOCK_GATING);
2140689a570SEric Huang data |= 0xffc00000;
2150689a570SEric Huang WREG32(mmVCE_UENC_CLOCK_GATING, data);
2166f906814STom St Denis
217f3f0ea95STom St Denis data = RREG32(mmVCE_UENC_CLOCK_GATING_2);
2180689a570SEric Huang data |= 0x10000;
2190689a570SEric Huang WREG32(mmVCE_UENC_CLOCK_GATING_2, data);
2206f906814STom St Denis
221f3f0ea95STom St Denis data = RREG32(mmVCE_UENC_REG_CLOCK_GATING);
222e05208deSRex Zhu data &= ~0x3ff;
2230689a570SEric Huang WREG32(mmVCE_UENC_REG_CLOCK_GATING, data);
2246f906814STom St Denis
225f3f0ea95STom St Denis data = RREG32(mmVCE_UENC_DMA_DCLK_CTRL);
2260689a570SEric Huang data &= ~(VCE_UENC_DMA_DCLK_CTRL__WRDMCLK_FORCEON_MASK |
2270689a570SEric Huang VCE_UENC_DMA_DCLK_CTRL__RDDMCLK_FORCEON_MASK |
2280689a570SEric Huang VCE_UENC_DMA_DCLK_CTRL__REGCLK_FORCEON_MASK |
2290689a570SEric Huang 0x8);
2300689a570SEric Huang WREG32(mmVCE_UENC_DMA_DCLK_CTRL, data);
2310689a570SEric Huang }
2320689a570SEric Huang vce_v3_0_override_vce_clock_gating(adev, false);
2330689a570SEric Huang }
2340689a570SEric Huang
vce_v3_0_firmware_loaded(struct amdgpu_device * adev)235567e6e29Sjimqu static int vce_v3_0_firmware_loaded(struct amdgpu_device *adev)
236567e6e29Sjimqu {
237567e6e29Sjimqu int i, j;
238567e6e29Sjimqu
239567e6e29Sjimqu for (i = 0; i < 10; ++i) {
240567e6e29Sjimqu for (j = 0; j < 100; ++j) {
241b7e2e9f7Sjimqu uint32_t status = RREG32(mmVCE_STATUS);
242b7e2e9f7Sjimqu
243567e6e29Sjimqu if (status & VCE_STATUS_VCPU_REPORT_FW_LOADED_MASK)
244567e6e29Sjimqu return 0;
245567e6e29Sjimqu mdelay(10);
246567e6e29Sjimqu }
247567e6e29Sjimqu
248567e6e29Sjimqu DRM_ERROR("VCE not responding, trying to reset the ECPU!!!\n");
249f3f0ea95STom St Denis WREG32_FIELD(VCE_SOFT_RESET, ECPU_SOFT_RESET, 1);
250567e6e29Sjimqu mdelay(10);
251f3f0ea95STom St Denis WREG32_FIELD(VCE_SOFT_RESET, ECPU_SOFT_RESET, 0);
252567e6e29Sjimqu mdelay(10);
253567e6e29Sjimqu }
254567e6e29Sjimqu
255567e6e29Sjimqu return -ETIMEDOUT;
256567e6e29Sjimqu }
257567e6e29Sjimqu
258aaa36a97SAlex Deucher /**
259aaa36a97SAlex Deucher * vce_v3_0_start - start VCE block
260aaa36a97SAlex Deucher *
261aaa36a97SAlex Deucher * @adev: amdgpu_device pointer
262aaa36a97SAlex Deucher *
263aaa36a97SAlex Deucher * Setup and start the VCE block
264aaa36a97SAlex Deucher */
vce_v3_0_start(struct amdgpu_device * adev)265aaa36a97SAlex Deucher static int vce_v3_0_start(struct amdgpu_device *adev)
266aaa36a97SAlex Deucher {
267aaa36a97SAlex Deucher struct amdgpu_ring *ring;
268567e6e29Sjimqu int idx, r;
269567e6e29Sjimqu
27045cc6586SLeo Liu mutex_lock(&adev->grbm_idx_mutex);
27145cc6586SLeo Liu for (idx = 0; idx < 2; ++idx) {
27245cc6586SLeo Liu if (adev->vce.harvest_config & (1 << idx))
27345cc6586SLeo Liu continue;
27445cc6586SLeo Liu
27545cc6586SLeo Liu WREG32(mmGRBM_GFX_INDEX, GET_VCE_INSTANCE(idx));
27645cc6586SLeo Liu
27745cc6586SLeo Liu /* Program instance 0 reg space for two instances or instance 0 case
27845cc6586SLeo Liu program instance 1 reg space for only instance 1 available case */
27945cc6586SLeo Liu if (idx != 1 || adev->vce.harvest_config == AMDGPU_VCE_HARVEST_VCE0) {
280567e6e29Sjimqu ring = &adev->vce.ring[0];
281536fbf94SKen Wang WREG32(mmVCE_RB_RPTR, lower_32_bits(ring->wptr));
282536fbf94SKen Wang WREG32(mmVCE_RB_WPTR, lower_32_bits(ring->wptr));
283567e6e29Sjimqu WREG32(mmVCE_RB_BASE_LO, ring->gpu_addr);
284567e6e29Sjimqu WREG32(mmVCE_RB_BASE_HI, upper_32_bits(ring->gpu_addr));
285567e6e29Sjimqu WREG32(mmVCE_RB_SIZE, ring->ring_size / 4);
286567e6e29Sjimqu
287567e6e29Sjimqu ring = &adev->vce.ring[1];
288536fbf94SKen Wang WREG32(mmVCE_RB_RPTR2, lower_32_bits(ring->wptr));
289536fbf94SKen Wang WREG32(mmVCE_RB_WPTR2, lower_32_bits(ring->wptr));
290567e6e29Sjimqu WREG32(mmVCE_RB_BASE_LO2, ring->gpu_addr);
291567e6e29Sjimqu WREG32(mmVCE_RB_BASE_HI2, upper_32_bits(ring->gpu_addr));
292567e6e29Sjimqu WREG32(mmVCE_RB_SIZE2, ring->ring_size / 4);
293aaa36a97SAlex Deucher
2946f0359ffSAlex Deucher ring = &adev->vce.ring[2];
295536fbf94SKen Wang WREG32(mmVCE_RB_RPTR3, lower_32_bits(ring->wptr));
296536fbf94SKen Wang WREG32(mmVCE_RB_WPTR3, lower_32_bits(ring->wptr));
2976f0359ffSAlex Deucher WREG32(mmVCE_RB_BASE_LO3, ring->gpu_addr);
2986f0359ffSAlex Deucher WREG32(mmVCE_RB_BASE_HI3, upper_32_bits(ring->gpu_addr));
2996f0359ffSAlex Deucher WREG32(mmVCE_RB_SIZE3, ring->ring_size / 4);
30045cc6586SLeo Liu }
3016f0359ffSAlex Deucher
3025bbc553aSLeo Liu vce_v3_0_mc_resume(adev, idx);
303f3f0ea95STom St Denis WREG32_FIELD(VCE_STATUS, JOB_BUSY, 1);
304567e6e29Sjimqu
3053c0ff9f1SLeo Liu if (adev->asic_type >= CHIP_STONEY)
3063c0ff9f1SLeo Liu WREG32_P(mmVCE_VCPU_CNTL, 1, ~0x200001);
3073c0ff9f1SLeo Liu else
308f3f0ea95STom St Denis WREG32_FIELD(VCE_VCPU_CNTL, CLK_EN, 1);
309aaa36a97SAlex Deucher
310f3f0ea95STom St Denis WREG32_FIELD(VCE_SOFT_RESET, ECPU_SOFT_RESET, 0);
311aaa36a97SAlex Deucher mdelay(100);
312aaa36a97SAlex Deucher
313567e6e29Sjimqu r = vce_v3_0_firmware_loaded(adev);
314aaa36a97SAlex Deucher
315aaa36a97SAlex Deucher /* clear BUSY flag */
316f3f0ea95STom St Denis WREG32_FIELD(VCE_STATUS, JOB_BUSY, 0);
317aaa36a97SAlex Deucher
318aaa36a97SAlex Deucher if (r) {
319aaa36a97SAlex Deucher DRM_ERROR("VCE not responding, giving up!!!\n");
3205bbc553aSLeo Liu mutex_unlock(&adev->grbm_idx_mutex);
321aaa36a97SAlex Deucher return r;
322aaa36a97SAlex Deucher }
3235bbc553aSLeo Liu }
3245bbc553aSLeo Liu
32550a1ebc7SRex Zhu WREG32(mmGRBM_GFX_INDEX, mmGRBM_GFX_INDEX_DEFAULT);
3265bbc553aSLeo Liu mutex_unlock(&adev->grbm_idx_mutex);
3275bbc553aSLeo Liu
328567e6e29Sjimqu return 0;
329567e6e29Sjimqu }
3305bbc553aSLeo Liu
vce_v3_0_stop(struct amdgpu_device * adev)331567e6e29Sjimqu static int vce_v3_0_stop(struct amdgpu_device *adev)
332567e6e29Sjimqu {
333567e6e29Sjimqu int idx;
334567e6e29Sjimqu
335567e6e29Sjimqu mutex_lock(&adev->grbm_idx_mutex);
336567e6e29Sjimqu for (idx = 0; idx < 2; ++idx) {
337567e6e29Sjimqu if (adev->vce.harvest_config & (1 << idx))
338567e6e29Sjimqu continue;
339567e6e29Sjimqu
34050a1ebc7SRex Zhu WREG32(mmGRBM_GFX_INDEX, GET_VCE_INSTANCE(idx));
341567e6e29Sjimqu
342567e6e29Sjimqu if (adev->asic_type >= CHIP_STONEY)
343567e6e29Sjimqu WREG32_P(mmVCE_VCPU_CNTL, 0, ~0x200001);
344567e6e29Sjimqu else
345f3f0ea95STom St Denis WREG32_FIELD(VCE_VCPU_CNTL, CLK_EN, 0);
346f3f0ea95STom St Denis
347567e6e29Sjimqu /* hold on ECPU */
348f3f0ea95STom St Denis WREG32_FIELD(VCE_SOFT_RESET, ECPU_SOFT_RESET, 1);
349567e6e29Sjimqu
35026679899SRex Zhu /* clear VCE STATUS */
35126679899SRex Zhu WREG32(mmVCE_STATUS, 0);
352567e6e29Sjimqu }
353567e6e29Sjimqu
35450a1ebc7SRex Zhu WREG32(mmGRBM_GFX_INDEX, mmGRBM_GFX_INDEX_DEFAULT);
355567e6e29Sjimqu mutex_unlock(&adev->grbm_idx_mutex);
356aaa36a97SAlex Deucher
357aaa36a97SAlex Deucher return 0;
358aaa36a97SAlex Deucher }
359aaa36a97SAlex Deucher
3606a585777SAlex Deucher #define ixVCE_HARVEST_FUSE_MACRO__ADDRESS 0xC0014074
3616a585777SAlex Deucher #define VCE_HARVEST_FUSE_MACRO__SHIFT 27
3626a585777SAlex Deucher #define VCE_HARVEST_FUSE_MACRO__MASK 0x18000000
3636a585777SAlex Deucher
vce_v3_0_get_harvest_config(struct amdgpu_device * adev)3646a585777SAlex Deucher static unsigned vce_v3_0_get_harvest_config(struct amdgpu_device *adev)
3656a585777SAlex Deucher {
3666a585777SAlex Deucher u32 tmp;
3676a585777SAlex Deucher
368cfaba566SSamuel Li if ((adev->asic_type == CHIP_FIJI) ||
36932bec2afSLeo Liu (adev->asic_type == CHIP_STONEY))
3701dab5f06STom St Denis return AMDGPU_VCE_HARVEST_VCE1;
371188a9bcdSAlex Deucher
3722f7d10b3SJammy Zhou if (adev->flags & AMD_IS_APU)
3736a585777SAlex Deucher tmp = (RREG32_SMC(ixVCE_HARVEST_FUSE_MACRO__ADDRESS) &
3746a585777SAlex Deucher VCE_HARVEST_FUSE_MACRO__MASK) >>
3756a585777SAlex Deucher VCE_HARVEST_FUSE_MACRO__SHIFT;
3766a585777SAlex Deucher else
3776a585777SAlex Deucher tmp = (RREG32_SMC(ixCC_HARVEST_FUSES) &
3786a585777SAlex Deucher CC_HARVEST_FUSES__VCE_DISABLE_MASK) >>
3796a585777SAlex Deucher CC_HARVEST_FUSES__VCE_DISABLE__SHIFT;
3806a585777SAlex Deucher
3816a585777SAlex Deucher switch (tmp) {
3826a585777SAlex Deucher case 1:
3831dab5f06STom St Denis return AMDGPU_VCE_HARVEST_VCE0;
3846a585777SAlex Deucher case 2:
3851dab5f06STom St Denis return AMDGPU_VCE_HARVEST_VCE1;
3866a585777SAlex Deucher case 3:
3871dab5f06STom St Denis return AMDGPU_VCE_HARVEST_VCE0 | AMDGPU_VCE_HARVEST_VCE1;
3886a585777SAlex Deucher default:
38932bec2afSLeo Liu if ((adev->asic_type == CHIP_POLARIS10) ||
39032bec2afSLeo Liu (adev->asic_type == CHIP_POLARIS11) ||
391a7712897SLeo Liu (adev->asic_type == CHIP_POLARIS12) ||
392a7712897SLeo Liu (adev->asic_type == CHIP_VEGAM))
39332bec2afSLeo Liu return AMDGPU_VCE_HARVEST_VCE1;
39432bec2afSLeo Liu
3951dab5f06STom St Denis return 0;
3966a585777SAlex Deucher }
3976a585777SAlex Deucher }
3986a585777SAlex Deucher
vce_v3_0_early_init(void * handle)3995fc3aeebSyanyang1 static int vce_v3_0_early_init(void *handle)
400aaa36a97SAlex Deucher {
4015fc3aeebSyanyang1 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
4025fc3aeebSyanyang1
4036a585777SAlex Deucher adev->vce.harvest_config = vce_v3_0_get_harvest_config(adev);
4046a585777SAlex Deucher
4056a585777SAlex Deucher if ((adev->vce.harvest_config &
4066a585777SAlex Deucher (AMDGPU_VCE_HARVEST_VCE0 | AMDGPU_VCE_HARVEST_VCE1)) ==
4076a585777SAlex Deucher (AMDGPU_VCE_HARVEST_VCE0 | AMDGPU_VCE_HARVEST_VCE1))
4086a585777SAlex Deucher return -ENOENT;
4096a585777SAlex Deucher
4106f0359ffSAlex Deucher adev->vce.num_rings = 3;
41175c65480SAlex Deucher
412aaa36a97SAlex Deucher vce_v3_0_set_ring_funcs(adev);
413aaa36a97SAlex Deucher vce_v3_0_set_irq_funcs(adev);
414aaa36a97SAlex Deucher
415aaa36a97SAlex Deucher return 0;
416aaa36a97SAlex Deucher }
417aaa36a97SAlex Deucher
vce_v3_0_sw_init(void * handle)4185fc3aeebSyanyang1 static int vce_v3_0_sw_init(void *handle)
419aaa36a97SAlex Deucher {
4205fc3aeebSyanyang1 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
421aaa36a97SAlex Deucher struct amdgpu_ring *ring;
42275c65480SAlex Deucher int r, i;
423aaa36a97SAlex Deucher
424aaa36a97SAlex Deucher /* VCE */
4251ffdeca6SChristian König r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_VCE_TRAP, &adev->vce.irq);
426aaa36a97SAlex Deucher if (r)
427aaa36a97SAlex Deucher return r;
428aaa36a97SAlex Deucher
429e9822622SLeo Liu r = amdgpu_vce_sw_init(adev, VCE_V3_0_FW_SIZE +
430e9822622SLeo Liu (VCE_V3_0_STACK_SIZE + VCE_V3_0_DATA_SIZE) * 2);
431aaa36a97SAlex Deucher if (r)
432aaa36a97SAlex Deucher return r;
433aaa36a97SAlex Deucher
434ef6239e0SAlex Deucher /* 52.8.3 required for 3 ring support */
435ef6239e0SAlex Deucher if (adev->vce.fw_version < FW_52_8_3)
436ef6239e0SAlex Deucher adev->vce.num_rings = 2;
437ef6239e0SAlex Deucher
438aaa36a97SAlex Deucher r = amdgpu_vce_resume(adev);
439aaa36a97SAlex Deucher if (r)
440aaa36a97SAlex Deucher return r;
441aaa36a97SAlex Deucher
44275c65480SAlex Deucher for (i = 0; i < adev->vce.num_rings; i++) {
443080e613cSSatyajit Sahu enum amdgpu_ring_priority_level hw_prio = amdgpu_vce_get_ring_prio(i);
444080e613cSSatyajit Sahu
44575c65480SAlex Deucher ring = &adev->vce.ring[i];
44675c65480SAlex Deucher sprintf(ring->name, "vce%d", i);
4471c6d567bSNirmoy Das r = amdgpu_ring_init(adev, ring, 512, &adev->vce.irq, 0,
448080e613cSSatyajit Sahu hw_prio, NULL);
449aaa36a97SAlex Deucher if (r)
450aaa36a97SAlex Deucher return r;
45175c65480SAlex Deucher }
452aaa36a97SAlex Deucher
453aaa36a97SAlex Deucher return r;
454aaa36a97SAlex Deucher }
455aaa36a97SAlex Deucher
vce_v3_0_sw_fini(void * handle)4565fc3aeebSyanyang1 static int vce_v3_0_sw_fini(void *handle)
457aaa36a97SAlex Deucher {
458aaa36a97SAlex Deucher int r;
4595fc3aeebSyanyang1 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
460aaa36a97SAlex Deucher
461aaa36a97SAlex Deucher r = amdgpu_vce_suspend(adev);
462aaa36a97SAlex Deucher if (r)
463aaa36a97SAlex Deucher return r;
464aaa36a97SAlex Deucher
46550237287SRex Zhu return amdgpu_vce_sw_fini(adev);
466aaa36a97SAlex Deucher }
467aaa36a97SAlex Deucher
vce_v3_0_hw_init(void * handle)4685fc3aeebSyanyang1 static int vce_v3_0_hw_init(void *handle)
469aaa36a97SAlex Deucher {
470691ca86aSTom St Denis int r, i;
4715fc3aeebSyanyang1 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
472aaa36a97SAlex Deucher
4736fc11b0eSRex Zhu vce_v3_0_override_vce_clock_gating(adev, true);
47408ebb6e9SRex Zhu
4756fc11b0eSRex Zhu amdgpu_asic_set_vce_clocks(adev, 10000, 10000);
476aaa36a97SAlex Deucher
47775c65480SAlex Deucher for (i = 0; i < adev->vce.num_rings; i++) {
478c66ed765SAndrey Grodzovsky r = amdgpu_ring_test_helper(&adev->vce.ring[i]);
479691ca86aSTom St Denis if (r)
480aaa36a97SAlex Deucher return r;
481aaa36a97SAlex Deucher }
482aaa36a97SAlex Deucher
483aaa36a97SAlex Deucher DRM_INFO("VCE initialized successfully.\n");
484aaa36a97SAlex Deucher
485aaa36a97SAlex Deucher return 0;
486aaa36a97SAlex Deucher }
487aaa36a97SAlex Deucher
vce_v3_0_hw_fini(void * handle)4885fc3aeebSyanyang1 static int vce_v3_0_hw_fini(void *handle)
489aaa36a97SAlex Deucher {
490567e6e29Sjimqu int r;
491567e6e29Sjimqu struct amdgpu_device *adev = (struct amdgpu_device *)handle;
492567e6e29Sjimqu
493d82e2c24SAndrey Grodzovsky cancel_delayed_work_sync(&adev->vce.idle_work);
494d82e2c24SAndrey Grodzovsky
495d82e2c24SAndrey Grodzovsky r = vce_v3_0_wait_for_idle(handle);
496d82e2c24SAndrey Grodzovsky if (r)
497d82e2c24SAndrey Grodzovsky return r;
498d82e2c24SAndrey Grodzovsky
499d82e2c24SAndrey Grodzovsky vce_v3_0_stop(adev);
500d82e2c24SAndrey Grodzovsky return vce_v3_0_set_clockgating_state(adev, AMD_CG_STATE_GATE);
501d82e2c24SAndrey Grodzovsky }
502d82e2c24SAndrey Grodzovsky
vce_v3_0_suspend(void * handle)503d82e2c24SAndrey Grodzovsky static int vce_v3_0_suspend(void *handle)
504d82e2c24SAndrey Grodzovsky {
505d82e2c24SAndrey Grodzovsky int r;
506d82e2c24SAndrey Grodzovsky struct amdgpu_device *adev = (struct amdgpu_device *)handle;
507d82e2c24SAndrey Grodzovsky
508bf756fb8SEvan Quan /*
509bf756fb8SEvan Quan * Proper cleanups before halting the HW engine:
510bf756fb8SEvan Quan * - cancel the delayed idle work
511bf756fb8SEvan Quan * - enable powergating
512bf756fb8SEvan Quan * - enable clockgating
513bf756fb8SEvan Quan * - disable dpm
514bf756fb8SEvan Quan *
515bf756fb8SEvan Quan * TODO: to align with the VCN implementation, move the
516bf756fb8SEvan Quan * jobs for clockgating/powergating/dpm setting to
517bf756fb8SEvan Quan * ->set_powergating_state().
518bf756fb8SEvan Quan */
519bf756fb8SEvan Quan cancel_delayed_work_sync(&adev->vce.idle_work);
520bf756fb8SEvan Quan
521bf756fb8SEvan Quan if (adev->pm.dpm_enabled) {
522bf756fb8SEvan Quan amdgpu_dpm_enable_vce(adev, false);
523bf756fb8SEvan Quan } else {
524bf756fb8SEvan Quan amdgpu_asic_set_vce_clocks(adev, 0, 0);
525bf756fb8SEvan Quan amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCE,
526bf756fb8SEvan Quan AMD_PG_STATE_GATE);
527bf756fb8SEvan Quan amdgpu_device_ip_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_VCE,
528bf756fb8SEvan Quan AMD_CG_STATE_GATE);
529bf756fb8SEvan Quan }
530bf756fb8SEvan Quan
531aaa36a97SAlex Deucher r = vce_v3_0_hw_fini(adev);
532aaa36a97SAlex Deucher if (r)
533aaa36a97SAlex Deucher return r;
534aaa36a97SAlex Deucher
53550237287SRex Zhu return amdgpu_vce_suspend(adev);
536aaa36a97SAlex Deucher }
537aaa36a97SAlex Deucher
vce_v3_0_resume(void * handle)5385fc3aeebSyanyang1 static int vce_v3_0_resume(void *handle)
539aaa36a97SAlex Deucher {
540aaa36a97SAlex Deucher int r;
5415fc3aeebSyanyang1 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
542aaa36a97SAlex Deucher
543aaa36a97SAlex Deucher r = amdgpu_vce_resume(adev);
544aaa36a97SAlex Deucher if (r)
545aaa36a97SAlex Deucher return r;
546aaa36a97SAlex Deucher
54750237287SRex Zhu return vce_v3_0_hw_init(adev);
548aaa36a97SAlex Deucher }
549aaa36a97SAlex Deucher
vce_v3_0_mc_resume(struct amdgpu_device * adev,int idx)5505bbc553aSLeo Liu static void vce_v3_0_mc_resume(struct amdgpu_device *adev, int idx)
551aaa36a97SAlex Deucher {
552aaa36a97SAlex Deucher uint32_t offset, size;
553aaa36a97SAlex Deucher
554aaa36a97SAlex Deucher WREG32_P(mmVCE_CLOCK_GATING_A, 0, ~(1 << 16));
555aaa36a97SAlex Deucher WREG32_P(mmVCE_UENC_CLOCK_GATING, 0x1FF000, ~0xFF9FF000);
556aaa36a97SAlex Deucher WREG32_P(mmVCE_UENC_REG_CLOCK_GATING, 0x3F, ~0x3F);
5576f906814STom St Denis WREG32(mmVCE_CLOCK_GATING_B, 0x1FF);
558aaa36a97SAlex Deucher
559aaa36a97SAlex Deucher WREG32(mmVCE_LMI_CTRL, 0x00398000);
560aaa36a97SAlex Deucher WREG32_P(mmVCE_LMI_CACHE_CTRL, 0x0, ~0x1);
561aaa36a97SAlex Deucher WREG32(mmVCE_LMI_SWAP_CNTL, 0);
562aaa36a97SAlex Deucher WREG32(mmVCE_LMI_SWAP_CNTL1, 0);
563aaa36a97SAlex Deucher WREG32(mmVCE_LMI_VM_CTRL, 0);
564d50e5c24SAlan Harrison WREG32_OR(mmVCE_VCPU_CNTL, 0x00100000);
565d50e5c24SAlan Harrison
5663c0ff9f1SLeo Liu if (adev->asic_type >= CHIP_STONEY) {
5673c0ff9f1SLeo Liu WREG32(mmVCE_LMI_VCPU_CACHE_40BIT_BAR0, (adev->vce.gpu_addr >> 8));
5683c0ff9f1SLeo Liu WREG32(mmVCE_LMI_VCPU_CACHE_40BIT_BAR1, (adev->vce.gpu_addr >> 8));
5693c0ff9f1SLeo Liu WREG32(mmVCE_LMI_VCPU_CACHE_40BIT_BAR2, (adev->vce.gpu_addr >> 8));
5703c0ff9f1SLeo Liu } else
571aaa36a97SAlex Deucher WREG32(mmVCE_LMI_VCPU_CACHE_40BIT_BAR, (adev->vce.gpu_addr >> 8));
572aaa36a97SAlex Deucher offset = AMDGPU_VCE_FIRMWARE_OFFSET;
573e9822622SLeo Liu size = VCE_V3_0_FW_SIZE;
574aaa36a97SAlex Deucher WREG32(mmVCE_VCPU_CACHE_OFFSET0, offset & 0x7fffffff);
575aaa36a97SAlex Deucher WREG32(mmVCE_VCPU_CACHE_SIZE0, size);
576aaa36a97SAlex Deucher
5775bbc553aSLeo Liu if (idx == 0) {
578aaa36a97SAlex Deucher offset += size;
579e9822622SLeo Liu size = VCE_V3_0_STACK_SIZE;
580aaa36a97SAlex Deucher WREG32(mmVCE_VCPU_CACHE_OFFSET1, offset & 0x7fffffff);
581aaa36a97SAlex Deucher WREG32(mmVCE_VCPU_CACHE_SIZE1, size);
582aaa36a97SAlex Deucher offset += size;
583e9822622SLeo Liu size = VCE_V3_0_DATA_SIZE;
584aaa36a97SAlex Deucher WREG32(mmVCE_VCPU_CACHE_OFFSET2, offset & 0x7fffffff);
585aaa36a97SAlex Deucher WREG32(mmVCE_VCPU_CACHE_SIZE2, size);
5865bbc553aSLeo Liu } else {
5875bbc553aSLeo Liu offset += size + VCE_V3_0_STACK_SIZE + VCE_V3_0_DATA_SIZE;
5885bbc553aSLeo Liu size = VCE_V3_0_STACK_SIZE;
5895bbc553aSLeo Liu WREG32(mmVCE_VCPU_CACHE_OFFSET1, offset & 0xfffffff);
5905bbc553aSLeo Liu WREG32(mmVCE_VCPU_CACHE_SIZE1, size);
5915bbc553aSLeo Liu offset += size;
5925bbc553aSLeo Liu size = VCE_V3_0_DATA_SIZE;
5935bbc553aSLeo Liu WREG32(mmVCE_VCPU_CACHE_OFFSET2, offset & 0xfffffff);
5945bbc553aSLeo Liu WREG32(mmVCE_VCPU_CACHE_SIZE2, size);
5955bbc553aSLeo Liu }
596aaa36a97SAlex Deucher
597aaa36a97SAlex Deucher WREG32_P(mmVCE_LMI_CTRL2, 0x0, ~0x100);
598f3f0ea95STom St Denis WREG32_FIELD(VCE_SYS_INT_EN, VCE_SYS_INT_TRAP_INTERRUPT_EN, 1);
599aaa36a97SAlex Deucher }
600aaa36a97SAlex Deucher
vce_v3_0_is_idle(void * handle)6015fc3aeebSyanyang1 static bool vce_v3_0_is_idle(void *handle)
602aaa36a97SAlex Deucher {
6035fc3aeebSyanyang1 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
604be4f38e2SAlex Deucher u32 mask = 0;
6055fc3aeebSyanyang1
60674af1276STom St Denis mask |= (adev->vce.harvest_config & AMDGPU_VCE_HARVEST_VCE0) ? 0 : SRBM_STATUS2__VCE0_BUSY_MASK;
60774af1276STom St Denis mask |= (adev->vce.harvest_config & AMDGPU_VCE_HARVEST_VCE1) ? 0 : SRBM_STATUS2__VCE1_BUSY_MASK;
608be4f38e2SAlex Deucher
609be4f38e2SAlex Deucher return !(RREG32(mmSRBM_STATUS2) & mask);
610aaa36a97SAlex Deucher }
611aaa36a97SAlex Deucher
vce_v3_0_wait_for_idle(void * handle)6125fc3aeebSyanyang1 static int vce_v3_0_wait_for_idle(void *handle)
613aaa36a97SAlex Deucher {
614aaa36a97SAlex Deucher unsigned i;
6155fc3aeebSyanyang1 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
616be4f38e2SAlex Deucher
61792988e60STom St Denis for (i = 0; i < adev->usec_timeout; i++)
61892988e60STom St Denis if (vce_v3_0_is_idle(handle))
619aaa36a97SAlex Deucher return 0;
62092988e60STom St Denis
621aaa36a97SAlex Deucher return -ETIMEDOUT;
622aaa36a97SAlex Deucher }
623aaa36a97SAlex Deucher
624ac8e3f30SRex Zhu #define VCE_STATUS_VCPU_REPORT_AUTO_BUSY_MASK 0x00000008L /* AUTO_BUSY */
625ac8e3f30SRex Zhu #define VCE_STATUS_VCPU_REPORT_RB0_BUSY_MASK 0x00000010L /* RB0_BUSY */
626ac8e3f30SRex Zhu #define VCE_STATUS_VCPU_REPORT_RB1_BUSY_MASK 0x00000020L /* RB1_BUSY */
627ac8e3f30SRex Zhu #define AMDGPU_VCE_STATUS_BUSY_MASK (VCE_STATUS_VCPU_REPORT_AUTO_BUSY_MASK | \
628ac8e3f30SRex Zhu VCE_STATUS_VCPU_REPORT_RB0_BUSY_MASK)
629115933a5SChunming Zhou
vce_v3_0_check_soft_reset(void * handle)630da146d3bSAlex Deucher static bool vce_v3_0_check_soft_reset(void *handle)
631115933a5SChunming Zhou {
632115933a5SChunming Zhou struct amdgpu_device *adev = (struct amdgpu_device *)handle;
633115933a5SChunming Zhou u32 srbm_soft_reset = 0;
634115933a5SChunming Zhou
635115933a5SChunming Zhou /* According to VCE team , we should use VCE_STATUS instead
636115933a5SChunming Zhou * SRBM_STATUS.VCE_BUSY bit for busy status checking.
637115933a5SChunming Zhou * GRBM_GFX_INDEX.INSTANCE_INDEX is used to specify which VCE
638115933a5SChunming Zhou * instance's registers are accessed
639115933a5SChunming Zhou * (0 for 1st instance, 10 for 2nd instance).
640115933a5SChunming Zhou *
641115933a5SChunming Zhou *VCE_STATUS
642115933a5SChunming Zhou *|UENC|ACPI|AUTO ACTIVE|RB1 |RB0 |RB2 | |FW_LOADED|JOB |
643115933a5SChunming Zhou *|----+----+-----------+----+----+----+----------+---------+----|
644115933a5SChunming Zhou *|bit8|bit7| bit6 |bit5|bit4|bit3| bit2 | bit1 |bit0|
645115933a5SChunming Zhou *
646115933a5SChunming Zhou * VCE team suggest use bit 3--bit 6 for busy status check
647115933a5SChunming Zhou */
6489aeb774cSTom St Denis mutex_lock(&adev->grbm_idx_mutex);
64950a1ebc7SRex Zhu WREG32(mmGRBM_GFX_INDEX, GET_VCE_INSTANCE(0));
650115933a5SChunming Zhou if (RREG32(mmVCE_STATUS) & AMDGPU_VCE_STATUS_BUSY_MASK) {
651115933a5SChunming Zhou srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_VCE0, 1);
652115933a5SChunming Zhou srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_VCE1, 1);
653115933a5SChunming Zhou }
65450a1ebc7SRex Zhu WREG32(mmGRBM_GFX_INDEX, GET_VCE_INSTANCE(1));
655115933a5SChunming Zhou if (RREG32(mmVCE_STATUS) & AMDGPU_VCE_STATUS_BUSY_MASK) {
656115933a5SChunming Zhou srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_VCE0, 1);
657115933a5SChunming Zhou srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_VCE1, 1);
658115933a5SChunming Zhou }
65950a1ebc7SRex Zhu WREG32(mmGRBM_GFX_INDEX, GET_VCE_INSTANCE(0));
660da146d3bSAlex Deucher mutex_unlock(&adev->grbm_idx_mutex);
661115933a5SChunming Zhou
662115933a5SChunming Zhou if (srbm_soft_reset) {
663115933a5SChunming Zhou adev->vce.srbm_soft_reset = srbm_soft_reset;
664da146d3bSAlex Deucher return true;
665115933a5SChunming Zhou } else {
666115933a5SChunming Zhou adev->vce.srbm_soft_reset = 0;
667da146d3bSAlex Deucher return false;
668115933a5SChunming Zhou }
669115933a5SChunming Zhou }
670115933a5SChunming Zhou
vce_v3_0_soft_reset(void * handle)6715fc3aeebSyanyang1 static int vce_v3_0_soft_reset(void *handle)
672aaa36a97SAlex Deucher {
6735fc3aeebSyanyang1 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
674115933a5SChunming Zhou u32 srbm_soft_reset;
6755fc3aeebSyanyang1
676da146d3bSAlex Deucher if (!adev->vce.srbm_soft_reset)
677115933a5SChunming Zhou return 0;
678115933a5SChunming Zhou srbm_soft_reset = adev->vce.srbm_soft_reset;
679be4f38e2SAlex Deucher
680115933a5SChunming Zhou if (srbm_soft_reset) {
681115933a5SChunming Zhou u32 tmp;
682115933a5SChunming Zhou
683115933a5SChunming Zhou tmp = RREG32(mmSRBM_SOFT_RESET);
684115933a5SChunming Zhou tmp |= srbm_soft_reset;
685115933a5SChunming Zhou dev_info(adev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp);
686115933a5SChunming Zhou WREG32(mmSRBM_SOFT_RESET, tmp);
687115933a5SChunming Zhou tmp = RREG32(mmSRBM_SOFT_RESET);
688115933a5SChunming Zhou
689115933a5SChunming Zhou udelay(50);
690115933a5SChunming Zhou
691115933a5SChunming Zhou tmp &= ~srbm_soft_reset;
692115933a5SChunming Zhou WREG32(mmSRBM_SOFT_RESET, tmp);
693115933a5SChunming Zhou tmp = RREG32(mmSRBM_SOFT_RESET);
694115933a5SChunming Zhou
695115933a5SChunming Zhou /* Wait a little for things to settle down */
696115933a5SChunming Zhou udelay(50);
697115933a5SChunming Zhou }
698115933a5SChunming Zhou
699115933a5SChunming Zhou return 0;
700115933a5SChunming Zhou }
701115933a5SChunming Zhou
vce_v3_0_pre_soft_reset(void * handle)702115933a5SChunming Zhou static int vce_v3_0_pre_soft_reset(void *handle)
703115933a5SChunming Zhou {
704115933a5SChunming Zhou struct amdgpu_device *adev = (struct amdgpu_device *)handle;
705115933a5SChunming Zhou
706da146d3bSAlex Deucher if (!adev->vce.srbm_soft_reset)
707115933a5SChunming Zhou return 0;
708115933a5SChunming Zhou
709aaa36a97SAlex Deucher mdelay(5);
710aaa36a97SAlex Deucher
711115933a5SChunming Zhou return vce_v3_0_suspend(adev);
712115933a5SChunming Zhou }
713115933a5SChunming Zhou
714115933a5SChunming Zhou
vce_v3_0_post_soft_reset(void * handle)715115933a5SChunming Zhou static int vce_v3_0_post_soft_reset(void *handle)
716115933a5SChunming Zhou {
717115933a5SChunming Zhou struct amdgpu_device *adev = (struct amdgpu_device *)handle;
718115933a5SChunming Zhou
719da146d3bSAlex Deucher if (!adev->vce.srbm_soft_reset)
720115933a5SChunming Zhou return 0;
721115933a5SChunming Zhou
722115933a5SChunming Zhou mdelay(5);
723115933a5SChunming Zhou
724115933a5SChunming Zhou return vce_v3_0_resume(adev);
725aaa36a97SAlex Deucher }
726aaa36a97SAlex Deucher
vce_v3_0_set_interrupt_state(struct amdgpu_device * adev,struct amdgpu_irq_src * source,unsigned type,enum amdgpu_interrupt_state state)727aaa36a97SAlex Deucher static int vce_v3_0_set_interrupt_state(struct amdgpu_device *adev,
728aaa36a97SAlex Deucher struct amdgpu_irq_src *source,
729aaa36a97SAlex Deucher unsigned type,
730aaa36a97SAlex Deucher enum amdgpu_interrupt_state state)
731aaa36a97SAlex Deucher {
732aaa36a97SAlex Deucher uint32_t val = 0;
733aaa36a97SAlex Deucher
734aaa36a97SAlex Deucher if (state == AMDGPU_IRQ_STATE_ENABLE)
735aaa36a97SAlex Deucher val |= VCE_SYS_INT_EN__VCE_SYS_INT_TRAP_INTERRUPT_EN_MASK;
736aaa36a97SAlex Deucher
737aaa36a97SAlex Deucher WREG32_P(mmVCE_SYS_INT_EN, val, ~VCE_SYS_INT_EN__VCE_SYS_INT_TRAP_INTERRUPT_EN_MASK);
738aaa36a97SAlex Deucher return 0;
739aaa36a97SAlex Deucher }
740aaa36a97SAlex Deucher
vce_v3_0_process_interrupt(struct amdgpu_device * adev,struct amdgpu_irq_src * source,struct amdgpu_iv_entry * entry)741aaa36a97SAlex Deucher static int vce_v3_0_process_interrupt(struct amdgpu_device *adev,
742aaa36a97SAlex Deucher struct amdgpu_irq_src *source,
743aaa36a97SAlex Deucher struct amdgpu_iv_entry *entry)
744aaa36a97SAlex Deucher {
745aaa36a97SAlex Deucher DRM_DEBUG("IH: VCE\n");
746d6c29c30SLeo Liu
747f3f0ea95STom St Denis WREG32_FIELD(VCE_SYS_INT_STATUS, VCE_SYS_INT_TRAP_INTERRUPT_INT, 1);
748d6c29c30SLeo Liu
7497ccf5aa8SAlex Deucher switch (entry->src_data[0]) {
750aaa36a97SAlex Deucher case 0:
751aaa36a97SAlex Deucher case 1:
7526f0359ffSAlex Deucher case 2:
7537ccf5aa8SAlex Deucher amdgpu_fence_process(&adev->vce.ring[entry->src_data[0]]);
754aaa36a97SAlex Deucher break;
755aaa36a97SAlex Deucher default:
756aaa36a97SAlex Deucher DRM_ERROR("Unhandled interrupt: %d %d\n",
7577ccf5aa8SAlex Deucher entry->src_id, entry->src_data[0]);
758aaa36a97SAlex Deucher break;
759aaa36a97SAlex Deucher }
760aaa36a97SAlex Deucher
761aaa36a97SAlex Deucher return 0;
762aaa36a97SAlex Deucher }
763aaa36a97SAlex Deucher
vce_v3_0_set_clockgating_state(void * handle,enum amd_clockgating_state state)7645fc3aeebSyanyang1 static int vce_v3_0_set_clockgating_state(void *handle,
7655fc3aeebSyanyang1 enum amd_clockgating_state state)
766aaa36a97SAlex Deucher {
7670689a570SEric Huang struct amdgpu_device *adev = (struct amdgpu_device *)handle;
768a9d4fe2fSNirmoy Das bool enable = (state == AMD_CG_STATE_GATE);
7690689a570SEric Huang int i;
7700689a570SEric Huang
771e3b04bc7SAlex Deucher if (!(adev->cg_flags & AMD_CG_SUPPORT_VCE_MGCG))
7720689a570SEric Huang return 0;
7730689a570SEric Huang
7740689a570SEric Huang mutex_lock(&adev->grbm_idx_mutex);
7750689a570SEric Huang for (i = 0; i < 2; i++) {
7760689a570SEric Huang /* Program VCE Instance 0 or 1 if not harvested */
7770689a570SEric Huang if (adev->vce.harvest_config & (1 << i))
7780689a570SEric Huang continue;
7790689a570SEric Huang
78050a1ebc7SRex Zhu WREG32(mmGRBM_GFX_INDEX, GET_VCE_INSTANCE(i));
7810689a570SEric Huang
78226679899SRex Zhu if (!enable) {
7830689a570SEric Huang /* initialize VCE_CLOCK_GATING_A: Clock ON/OFF delay */
7840689a570SEric Huang uint32_t data = RREG32(mmVCE_CLOCK_GATING_A);
7850689a570SEric Huang data &= ~(0xf | 0xff0);
7860689a570SEric Huang data |= ((0x0 << 0) | (0x04 << 4));
7870689a570SEric Huang WREG32(mmVCE_CLOCK_GATING_A, data);
7880689a570SEric Huang
7890689a570SEric Huang /* initialize VCE_UENC_CLOCK_GATING: Clock ON/OFF delay */
7900689a570SEric Huang data = RREG32(mmVCE_UENC_CLOCK_GATING);
7910689a570SEric Huang data &= ~(0xf | 0xff0);
7920689a570SEric Huang data |= ((0x0 << 0) | (0x04 << 4));
7930689a570SEric Huang WREG32(mmVCE_UENC_CLOCK_GATING, data);
7940689a570SEric Huang }
7950689a570SEric Huang
7960689a570SEric Huang vce_v3_0_set_vce_sw_clock_gating(adev, enable);
7970689a570SEric Huang }
7980689a570SEric Huang
79950a1ebc7SRex Zhu WREG32(mmGRBM_GFX_INDEX, mmGRBM_GFX_INDEX_DEFAULT);
8000689a570SEric Huang mutex_unlock(&adev->grbm_idx_mutex);
8010689a570SEric Huang
802aaa36a97SAlex Deucher return 0;
803aaa36a97SAlex Deucher }
804aaa36a97SAlex Deucher
vce_v3_0_set_powergating_state(void * handle,enum amd_powergating_state state)8055fc3aeebSyanyang1 static int vce_v3_0_set_powergating_state(void *handle,
8065fc3aeebSyanyang1 enum amd_powergating_state state)
807aaa36a97SAlex Deucher {
808aaa36a97SAlex Deucher /* This doesn't actually powergate the VCE block.
809aaa36a97SAlex Deucher * That's done in the dpm code via the SMC. This
810aaa36a97SAlex Deucher * just re-inits the block as necessary. The actual
811aaa36a97SAlex Deucher * gating still happens in the dpm code. We should
812aaa36a97SAlex Deucher * revisit this when there is a cleaner line between
813aaa36a97SAlex Deucher * the smc and the hw blocks
814aaa36a97SAlex Deucher */
8155fc3aeebSyanyang1 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
816c79b5561SHuang Rui int ret = 0;
8175fc3aeebSyanyang1
818c79b5561SHuang Rui if (state == AMD_PG_STATE_GATE) {
8196fc11b0eSRex Zhu ret = vce_v3_0_stop(adev);
8206fc11b0eSRex Zhu if (ret)
8216fc11b0eSRex Zhu goto out;
822c79b5561SHuang Rui } else {
823c79b5561SHuang Rui ret = vce_v3_0_start(adev);
824c79b5561SHuang Rui if (ret)
825c79b5561SHuang Rui goto out;
826c79b5561SHuang Rui }
827c79b5561SHuang Rui
828c79b5561SHuang Rui out:
829c79b5561SHuang Rui return ret;
830c79b5561SHuang Rui }
831c79b5561SHuang Rui
vce_v3_0_get_clockgating_state(void * handle,u64 * flags)83225faeddcSEvan Quan static void vce_v3_0_get_clockgating_state(void *handle, u64 *flags)
833c79b5561SHuang Rui {
834c79b5561SHuang Rui struct amdgpu_device *adev = (struct amdgpu_device *)handle;
835c79b5561SHuang Rui int data;
836c79b5561SHuang Rui
837c79b5561SHuang Rui mutex_lock(&adev->pm.mutex);
838c79b5561SHuang Rui
8391c622002SRex Zhu if (adev->flags & AMD_IS_APU)
8401c622002SRex Zhu data = RREG32_SMC(ixCURRENT_PG_STATUS_APU);
8411c622002SRex Zhu else
8421c622002SRex Zhu data = RREG32_SMC(ixCURRENT_PG_STATUS);
8431c622002SRex Zhu
8441c622002SRex Zhu if (data & CURRENT_PG_STATUS__VCE_PG_STATUS_MASK) {
845c79b5561SHuang Rui DRM_INFO("Cannot get clockgating state when VCE is powergated.\n");
846c79b5561SHuang Rui goto out;
847c79b5561SHuang Rui }
848c79b5561SHuang Rui
849c79b5561SHuang Rui WREG32_FIELD(GRBM_GFX_INDEX, VCE_INSTANCE, 0);
850c79b5561SHuang Rui
851c79b5561SHuang Rui /* AMD_CG_SUPPORT_VCE_MGCG */
852c79b5561SHuang Rui data = RREG32(mmVCE_CLOCK_GATING_A);
853c79b5561SHuang Rui if (data & (0x04 << 4))
854c79b5561SHuang Rui *flags |= AMD_CG_SUPPORT_VCE_MGCG;
855c79b5561SHuang Rui
856c79b5561SHuang Rui out:
857c79b5561SHuang Rui mutex_unlock(&adev->pm.mutex);
858aaa36a97SAlex Deucher }
859aaa36a97SAlex Deucher
vce_v3_0_ring_emit_ib(struct amdgpu_ring * ring,struct amdgpu_job * job,struct amdgpu_ib * ib,uint32_t flags)860ea4a8c1dSMaruthi Srinivas Bayyavarapu static void vce_v3_0_ring_emit_ib(struct amdgpu_ring *ring,
86134955e03SRex Zhu struct amdgpu_job *job,
86234955e03SRex Zhu struct amdgpu_ib *ib,
863c4c905ecSJack Xiao uint32_t flags)
864ea4a8c1dSMaruthi Srinivas Bayyavarapu {
86534955e03SRex Zhu unsigned vmid = AMDGPU_JOB_GET_VMID(job);
86634955e03SRex Zhu
867ea4a8c1dSMaruthi Srinivas Bayyavarapu amdgpu_ring_write(ring, VCE_CMD_IB_VM);
868c4f46f22SChristian König amdgpu_ring_write(ring, vmid);
869ea4a8c1dSMaruthi Srinivas Bayyavarapu amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr));
870ea4a8c1dSMaruthi Srinivas Bayyavarapu amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr));
871ea4a8c1dSMaruthi Srinivas Bayyavarapu amdgpu_ring_write(ring, ib->length_dw);
872ea4a8c1dSMaruthi Srinivas Bayyavarapu }
873ea4a8c1dSMaruthi Srinivas Bayyavarapu
vce_v3_0_emit_vm_flush(struct amdgpu_ring * ring,unsigned int vmid,uint64_t pd_addr)874ea4a8c1dSMaruthi Srinivas Bayyavarapu static void vce_v3_0_emit_vm_flush(struct amdgpu_ring *ring,
875c633c00bSChristian König unsigned int vmid, uint64_t pd_addr)
876ea4a8c1dSMaruthi Srinivas Bayyavarapu {
877ea4a8c1dSMaruthi Srinivas Bayyavarapu amdgpu_ring_write(ring, VCE_CMD_UPDATE_PTB);
878c4f46f22SChristian König amdgpu_ring_write(ring, vmid);
879ea4a8c1dSMaruthi Srinivas Bayyavarapu amdgpu_ring_write(ring, pd_addr >> 12);
880ea4a8c1dSMaruthi Srinivas Bayyavarapu
881ea4a8c1dSMaruthi Srinivas Bayyavarapu amdgpu_ring_write(ring, VCE_CMD_FLUSH_TLB);
882c4f46f22SChristian König amdgpu_ring_write(ring, vmid);
883ea4a8c1dSMaruthi Srinivas Bayyavarapu amdgpu_ring_write(ring, VCE_CMD_END);
884ea4a8c1dSMaruthi Srinivas Bayyavarapu }
885ea4a8c1dSMaruthi Srinivas Bayyavarapu
vce_v3_0_emit_pipeline_sync(struct amdgpu_ring * ring)886ea4a8c1dSMaruthi Srinivas Bayyavarapu static void vce_v3_0_emit_pipeline_sync(struct amdgpu_ring *ring)
887ea4a8c1dSMaruthi Srinivas Bayyavarapu {
888ea4a8c1dSMaruthi Srinivas Bayyavarapu uint32_t seq = ring->fence_drv.sync_seq;
889ea4a8c1dSMaruthi Srinivas Bayyavarapu uint64_t addr = ring->fence_drv.gpu_addr;
890ea4a8c1dSMaruthi Srinivas Bayyavarapu
891ea4a8c1dSMaruthi Srinivas Bayyavarapu amdgpu_ring_write(ring, VCE_CMD_WAIT_GE);
892ea4a8c1dSMaruthi Srinivas Bayyavarapu amdgpu_ring_write(ring, lower_32_bits(addr));
893ea4a8c1dSMaruthi Srinivas Bayyavarapu amdgpu_ring_write(ring, upper_32_bits(addr));
894ea4a8c1dSMaruthi Srinivas Bayyavarapu amdgpu_ring_write(ring, seq);
895ea4a8c1dSMaruthi Srinivas Bayyavarapu }
896ea4a8c1dSMaruthi Srinivas Bayyavarapu
897a1255107SAlex Deucher static const struct amd_ip_funcs vce_v3_0_ip_funcs = {
89888a907d6STom St Denis .name = "vce_v3_0",
899aaa36a97SAlex Deucher .early_init = vce_v3_0_early_init,
900aaa36a97SAlex Deucher .late_init = NULL,
901aaa36a97SAlex Deucher .sw_init = vce_v3_0_sw_init,
902aaa36a97SAlex Deucher .sw_fini = vce_v3_0_sw_fini,
903aaa36a97SAlex Deucher .hw_init = vce_v3_0_hw_init,
904aaa36a97SAlex Deucher .hw_fini = vce_v3_0_hw_fini,
905aaa36a97SAlex Deucher .suspend = vce_v3_0_suspend,
906aaa36a97SAlex Deucher .resume = vce_v3_0_resume,
907aaa36a97SAlex Deucher .is_idle = vce_v3_0_is_idle,
908aaa36a97SAlex Deucher .wait_for_idle = vce_v3_0_wait_for_idle,
909115933a5SChunming Zhou .check_soft_reset = vce_v3_0_check_soft_reset,
910115933a5SChunming Zhou .pre_soft_reset = vce_v3_0_pre_soft_reset,
911aaa36a97SAlex Deucher .soft_reset = vce_v3_0_soft_reset,
912115933a5SChunming Zhou .post_soft_reset = vce_v3_0_post_soft_reset,
913aaa36a97SAlex Deucher .set_clockgating_state = vce_v3_0_set_clockgating_state,
914aaa36a97SAlex Deucher .set_powergating_state = vce_v3_0_set_powergating_state,
915c79b5561SHuang Rui .get_clockgating_state = vce_v3_0_get_clockgating_state,
916e21d253bSSunil Khatri .dump_ip_state = NULL,
91740356542SSunil Khatri .print_ip_state = NULL,
918aaa36a97SAlex Deucher };
919aaa36a97SAlex Deucher
920ea4a8c1dSMaruthi Srinivas Bayyavarapu static const struct amdgpu_ring_funcs vce_v3_0_ring_phys_funcs = {
92121cd942eSChristian König .type = AMDGPU_RING_TYPE_VCE,
92279887142SChristian König .align_mask = 0xf,
92379887142SChristian König .nop = VCE_CMD_NO_OP,
924536fbf94SKen Wang .support_64bit_ptrs = false,
925f61334b5SLeo Liu .no_user_fence = true,
926aaa36a97SAlex Deucher .get_rptr = vce_v3_0_ring_get_rptr,
927aaa36a97SAlex Deucher .get_wptr = vce_v3_0_ring_get_wptr,
928aaa36a97SAlex Deucher .set_wptr = vce_v3_0_ring_set_wptr,
929aaa36a97SAlex Deucher .parse_cs = amdgpu_vce_ring_parse_cs,
930e12f3d7aSChristian König .emit_frame_size =
931e12f3d7aSChristian König 4 + /* vce_v3_0_emit_pipeline_sync */
932e12f3d7aSChristian König 6, /* amdgpu_vce_ring_emit_fence x1 no user fence */
9333413accbSAlex Deucher .emit_ib_size = 4, /* amdgpu_vce_ring_emit_ib */
934aaa36a97SAlex Deucher .emit_ib = amdgpu_vce_ring_emit_ib,
935aaa36a97SAlex Deucher .emit_fence = amdgpu_vce_ring_emit_fence,
936aaa36a97SAlex Deucher .test_ring = amdgpu_vce_ring_test_ring,
937aaa36a97SAlex Deucher .test_ib = amdgpu_vce_ring_test_ib,
938edff0e28SJammy Zhou .insert_nop = amdgpu_ring_insert_nop,
9399e5d5309SChristian König .pad_ib = amdgpu_ring_generic_pad_ib,
940ebff485eSChristian König .begin_use = amdgpu_vce_ring_begin_use,
941ebff485eSChristian König .end_use = amdgpu_vce_ring_end_use,
942aaa36a97SAlex Deucher };
943aaa36a97SAlex Deucher
944ea4a8c1dSMaruthi Srinivas Bayyavarapu static const struct amdgpu_ring_funcs vce_v3_0_ring_vm_funcs = {
94521cd942eSChristian König .type = AMDGPU_RING_TYPE_VCE,
94679887142SChristian König .align_mask = 0xf,
94779887142SChristian König .nop = VCE_CMD_NO_OP,
948536fbf94SKen Wang .support_64bit_ptrs = false,
949f61334b5SLeo Liu .no_user_fence = true,
950ea4a8c1dSMaruthi Srinivas Bayyavarapu .get_rptr = vce_v3_0_ring_get_rptr,
951ea4a8c1dSMaruthi Srinivas Bayyavarapu .get_wptr = vce_v3_0_ring_get_wptr,
952ea4a8c1dSMaruthi Srinivas Bayyavarapu .set_wptr = vce_v3_0_ring_set_wptr,
953*6a28a072SDavid (Ming Qiang) Wu .patch_cs_in_place = amdgpu_vce_ring_parse_cs_vm,
954e12f3d7aSChristian König .emit_frame_size =
955e12f3d7aSChristian König 6 + /* vce_v3_0_emit_vm_flush */
956e12f3d7aSChristian König 4 + /* vce_v3_0_emit_pipeline_sync */
957e12f3d7aSChristian König 6 + 6, /* amdgpu_vce_ring_emit_fence x2 vm fence */
9583413accbSAlex Deucher .emit_ib_size = 5, /* vce_v3_0_ring_emit_ib */
959ea4a8c1dSMaruthi Srinivas Bayyavarapu .emit_ib = vce_v3_0_ring_emit_ib,
960ea4a8c1dSMaruthi Srinivas Bayyavarapu .emit_vm_flush = vce_v3_0_emit_vm_flush,
961ea4a8c1dSMaruthi Srinivas Bayyavarapu .emit_pipeline_sync = vce_v3_0_emit_pipeline_sync,
962ea4a8c1dSMaruthi Srinivas Bayyavarapu .emit_fence = amdgpu_vce_ring_emit_fence,
963ea4a8c1dSMaruthi Srinivas Bayyavarapu .test_ring = amdgpu_vce_ring_test_ring,
964ea4a8c1dSMaruthi Srinivas Bayyavarapu .test_ib = amdgpu_vce_ring_test_ib,
965ea4a8c1dSMaruthi Srinivas Bayyavarapu .insert_nop = amdgpu_ring_insert_nop,
966ea4a8c1dSMaruthi Srinivas Bayyavarapu .pad_ib = amdgpu_ring_generic_pad_ib,
967ea4a8c1dSMaruthi Srinivas Bayyavarapu .begin_use = amdgpu_vce_ring_begin_use,
968ea4a8c1dSMaruthi Srinivas Bayyavarapu .end_use = amdgpu_vce_ring_end_use,
969ea4a8c1dSMaruthi Srinivas Bayyavarapu };
970ea4a8c1dSMaruthi Srinivas Bayyavarapu
vce_v3_0_set_ring_funcs(struct amdgpu_device * adev)971aaa36a97SAlex Deucher static void vce_v3_0_set_ring_funcs(struct amdgpu_device *adev)
972aaa36a97SAlex Deucher {
97375c65480SAlex Deucher int i;
97475c65480SAlex Deucher
975ea4a8c1dSMaruthi Srinivas Bayyavarapu if (adev->asic_type >= CHIP_STONEY) {
9765d4af988SAlex Deucher for (i = 0; i < adev->vce.num_rings; i++) {
977ea4a8c1dSMaruthi Srinivas Bayyavarapu adev->vce.ring[i].funcs = &vce_v3_0_ring_vm_funcs;
9785d4af988SAlex Deucher adev->vce.ring[i].me = i;
9795d4af988SAlex Deucher }
980ea4a8c1dSMaruthi Srinivas Bayyavarapu DRM_INFO("VCE enabled in VM mode\n");
981ea4a8c1dSMaruthi Srinivas Bayyavarapu } else {
9825d4af988SAlex Deucher for (i = 0; i < adev->vce.num_rings; i++) {
983ea4a8c1dSMaruthi Srinivas Bayyavarapu adev->vce.ring[i].funcs = &vce_v3_0_ring_phys_funcs;
9845d4af988SAlex Deucher adev->vce.ring[i].me = i;
9855d4af988SAlex Deucher }
986ea4a8c1dSMaruthi Srinivas Bayyavarapu DRM_INFO("VCE enabled in physical mode\n");
987ea4a8c1dSMaruthi Srinivas Bayyavarapu }
988aaa36a97SAlex Deucher }
989aaa36a97SAlex Deucher
990aaa36a97SAlex Deucher static const struct amdgpu_irq_src_funcs vce_v3_0_irq_funcs = {
991aaa36a97SAlex Deucher .set = vce_v3_0_set_interrupt_state,
992aaa36a97SAlex Deucher .process = vce_v3_0_process_interrupt,
993aaa36a97SAlex Deucher };
994aaa36a97SAlex Deucher
vce_v3_0_set_irq_funcs(struct amdgpu_device * adev)995aaa36a97SAlex Deucher static void vce_v3_0_set_irq_funcs(struct amdgpu_device *adev)
996aaa36a97SAlex Deucher {
997aaa36a97SAlex Deucher adev->vce.irq.num_types = 1;
998aaa36a97SAlex Deucher adev->vce.irq.funcs = &vce_v3_0_irq_funcs;
999aaa36a97SAlex Deucher };
1000a1255107SAlex Deucher
1001042a70e4SRan Sun const struct amdgpu_ip_block_version vce_v3_0_ip_block = {
1002a1255107SAlex Deucher .type = AMD_IP_BLOCK_TYPE_VCE,
1003a1255107SAlex Deucher .major = 3,
1004a1255107SAlex Deucher .minor = 0,
1005a1255107SAlex Deucher .rev = 0,
1006a1255107SAlex Deucher .funcs = &vce_v3_0_ip_funcs,
1007a1255107SAlex Deucher };
1008a1255107SAlex Deucher
1009042a70e4SRan Sun const struct amdgpu_ip_block_version vce_v3_1_ip_block = {
1010a1255107SAlex Deucher .type = AMD_IP_BLOCK_TYPE_VCE,
1011a1255107SAlex Deucher .major = 3,
1012a1255107SAlex Deucher .minor = 1,
1013a1255107SAlex Deucher .rev = 0,
1014a1255107SAlex Deucher .funcs = &vce_v3_0_ip_funcs,
1015a1255107SAlex Deucher };
1016a1255107SAlex Deucher
1017042a70e4SRan Sun const struct amdgpu_ip_block_version vce_v3_4_ip_block = {
1018a1255107SAlex Deucher .type = AMD_IP_BLOCK_TYPE_VCE,
1019a1255107SAlex Deucher .major = 3,
1020a1255107SAlex Deucher .minor = 4,
1021a1255107SAlex Deucher .rev = 0,
1022a1255107SAlex Deucher .funcs = &vce_v3_0_ip_funcs,
1023a1255107SAlex Deucher };
1024