xref: /linux/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c (revision b7e32ae6664285e156e9f0cd821e63e19798baf7)
1 /*
2  * Copyright 2019 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  */
23 
24 #include <linux/firmware.h>
25 #include "amdgpu.h"
26 #include "amdgpu_vcn.h"
27 #include "amdgpu_pm.h"
28 #include "amdgpu_cs.h"
29 #include "soc15.h"
30 #include "soc15d.h"
31 #include "vcn_v2_0.h"
32 #include "mmsch_v3_0.h"
33 #include "vcn_sw_ring.h"
34 
35 #include "vcn/vcn_3_0_0_offset.h"
36 #include "vcn/vcn_3_0_0_sh_mask.h"
37 #include "ivsrcid/vcn/irqsrcs_vcn_2_0.h"
38 
39 #include <drm/drm_drv.h>
40 
41 #define VCN_VID_SOC_ADDRESS_2_0					0x1fa00
42 #define VCN1_VID_SOC_ADDRESS_3_0				0x48200
43 #define VCN1_AON_SOC_ADDRESS_3_0				0x48000
44 
45 #define mmUVD_CONTEXT_ID_INTERNAL_OFFSET			0x27
46 #define mmUVD_GPCOM_VCPU_CMD_INTERNAL_OFFSET			0x0f
47 #define mmUVD_GPCOM_VCPU_DATA0_INTERNAL_OFFSET			0x10
48 #define mmUVD_GPCOM_VCPU_DATA1_INTERNAL_OFFSET			0x11
49 #define mmUVD_NO_OP_INTERNAL_OFFSET				0x29
50 #define mmUVD_GP_SCRATCH8_INTERNAL_OFFSET			0x66
51 #define mmUVD_SCRATCH9_INTERNAL_OFFSET				0xc01d
52 
53 #define mmUVD_LMI_RBC_IB_VMID_INTERNAL_OFFSET			0x431
54 #define mmUVD_LMI_RBC_IB_64BIT_BAR_LOW_INTERNAL_OFFSET		0x3b4
55 #define mmUVD_LMI_RBC_IB_64BIT_BAR_HIGH_INTERNAL_OFFSET		0x3b5
56 #define mmUVD_RBC_IB_SIZE_INTERNAL_OFFSET			0x25c
57 
58 #define VCN_INSTANCES_SIENNA_CICHLID				2
59 #define DEC_SW_RING_ENABLED					FALSE
60 
61 #define RDECODE_MSG_CREATE					0x00000000
62 #define RDECODE_MESSAGE_CREATE					0x00000001
63 
64 static const struct amdgpu_hwip_reg_entry vcn_reg_list_3_0[] = {
65 	SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_POWER_STATUS),
66 	SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_STATUS),
67 	SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_CONTEXT_ID),
68 	SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_CONTEXT_ID2),
69 	SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_GPCOM_VCPU_DATA0),
70 	SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_GPCOM_VCPU_DATA1),
71 	SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_GPCOM_VCPU_CMD),
72 	SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_BASE_HI),
73 	SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_BASE_LO),
74 	SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_BASE_HI2),
75 	SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_BASE_LO2),
76 	SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_BASE_HI3),
77 	SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_BASE_LO3),
78 	SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_BASE_HI4),
79 	SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_BASE_LO4),
80 	SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_RPTR),
81 	SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_WPTR),
82 	SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_RPTR2),
83 	SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_WPTR2),
84 	SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_RPTR3),
85 	SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_WPTR3),
86 	SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_RPTR4),
87 	SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_WPTR4),
88 	SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_SIZE),
89 	SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_SIZE2),
90 	SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_SIZE3),
91 	SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_SIZE4),
92 	SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_PGFSM_CONFIG),
93 	SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_PGFSM_STATUS),
94 	SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_DPG_LMA_CTL),
95 	SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_DPG_LMA_DATA),
96 	SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_DPG_LMA_MASK),
97 	SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_DPG_PAUSE)
98 };
99 
100 static int amdgpu_ih_clientid_vcns[] = {
101 	SOC15_IH_CLIENTID_VCN,
102 	SOC15_IH_CLIENTID_VCN1
103 };
104 
105 static int vcn_v3_0_start_sriov(struct amdgpu_device *adev);
106 static void vcn_v3_0_set_dec_ring_funcs(struct amdgpu_device *adev);
107 static void vcn_v3_0_set_enc_ring_funcs(struct amdgpu_device *adev);
108 static void vcn_v3_0_set_irq_funcs(struct amdgpu_device *adev);
109 static int vcn_v3_0_set_pg_state(struct amdgpu_vcn_inst *vinst,
110 				 enum amd_powergating_state state);
111 static int vcn_v3_0_pause_dpg_mode(struct amdgpu_vcn_inst *vinst,
112 				   struct dpg_pause_state *new_state);
113 static int vcn_v3_0_reset(struct amdgpu_vcn_inst *vinst);
114 
115 static void vcn_v3_0_dec_ring_set_wptr(struct amdgpu_ring *ring);
116 static void vcn_v3_0_enc_ring_set_wptr(struct amdgpu_ring *ring);
117 
118 /**
119  * vcn_v3_0_early_init - set function pointers and load microcode
120  *
121  * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
122  *
123  * Set ring and irq function pointers
124  * Load microcode from filesystem
125  */
126 static int vcn_v3_0_early_init(struct amdgpu_ip_block *ip_block)
127 {
128 	struct amdgpu_device *adev = ip_block->adev;
129 	int i, r;
130 
131 	if (amdgpu_sriov_vf(adev)) {
132 		adev->vcn.num_vcn_inst = VCN_INSTANCES_SIENNA_CICHLID;
133 		adev->vcn.harvest_config = 0;
134 		for (i = 0; i < adev->vcn.num_vcn_inst; i++)
135 			adev->vcn.inst[i].num_enc_rings = 1;
136 
137 	} else {
138 		if (adev->vcn.harvest_config == (AMDGPU_VCN_HARVEST_VCN0 |
139 						 AMDGPU_VCN_HARVEST_VCN1))
140 			/* both instances are harvested, disable the block */
141 			return -ENOENT;
142 
143 		for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
144 			if (amdgpu_ip_version(adev, UVD_HWIP, 0) ==
145 			    IP_VERSION(3, 0, 33))
146 				adev->vcn.inst[i].num_enc_rings = 0;
147 			else
148 				adev->vcn.inst[i].num_enc_rings = 2;
149 		}
150 	}
151 
152 	vcn_v3_0_set_dec_ring_funcs(adev);
153 	vcn_v3_0_set_enc_ring_funcs(adev);
154 	vcn_v3_0_set_irq_funcs(adev);
155 
156 	for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
157 		adev->vcn.inst[i].set_pg_state = vcn_v3_0_set_pg_state;
158 
159 		r = amdgpu_vcn_early_init(adev, i);
160 		if (r)
161 			return r;
162 	}
163 	return 0;
164 }
165 
166 /**
167  * vcn_v3_0_sw_init - sw init for VCN block
168  *
169  * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
170  *
171  * Load firmware and sw initialization
172  */
173 static int vcn_v3_0_sw_init(struct amdgpu_ip_block *ip_block)
174 {
175 	struct amdgpu_ring *ring;
176 	int i, j, r;
177 	int vcn_doorbell_index = 0;
178 	uint32_t reg_count = ARRAY_SIZE(vcn_reg_list_3_0);
179 	uint32_t *ptr;
180 	struct amdgpu_device *adev = ip_block->adev;
181 
182 	/*
183 	 * Note: doorbell assignment is fixed for SRIOV multiple VCN engines
184 	 * Formula:
185 	 *   vcn_db_base  = adev->doorbell_index.vcn.vcn_ring0_1 << 1;
186 	 *   dec_ring_i   = vcn_db_base + i * (adev->vcn.num_enc_rings + 1)
187 	 *   enc_ring_i,j = vcn_db_base + i * (adev->vcn.num_enc_rings + 1) + 1 + j
188 	 */
189 	if (amdgpu_sriov_vf(adev)) {
190 		vcn_doorbell_index = adev->doorbell_index.vcn.vcn_ring0_1;
191 		/* get DWORD offset */
192 		vcn_doorbell_index = vcn_doorbell_index << 1;
193 	}
194 
195 	for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
196 		volatile struct amdgpu_fw_shared *fw_shared;
197 
198 		if (adev->vcn.harvest_config & (1 << i))
199 			continue;
200 
201 		r = amdgpu_vcn_sw_init(adev, i);
202 		if (r)
203 			return r;
204 
205 		amdgpu_vcn_setup_ucode(adev, i);
206 
207 		r = amdgpu_vcn_resume(adev, i);
208 		if (r)
209 			return r;
210 
211 		adev->vcn.inst[i].internal.context_id = mmUVD_CONTEXT_ID_INTERNAL_OFFSET;
212 		adev->vcn.inst[i].internal.ib_vmid = mmUVD_LMI_RBC_IB_VMID_INTERNAL_OFFSET;
213 		adev->vcn.inst[i].internal.ib_bar_low = mmUVD_LMI_RBC_IB_64BIT_BAR_LOW_INTERNAL_OFFSET;
214 		adev->vcn.inst[i].internal.ib_bar_high = mmUVD_LMI_RBC_IB_64BIT_BAR_HIGH_INTERNAL_OFFSET;
215 		adev->vcn.inst[i].internal.ib_size = mmUVD_RBC_IB_SIZE_INTERNAL_OFFSET;
216 		adev->vcn.inst[i].internal.gp_scratch8 = mmUVD_GP_SCRATCH8_INTERNAL_OFFSET;
217 
218 		adev->vcn.inst[i].internal.scratch9 = mmUVD_SCRATCH9_INTERNAL_OFFSET;
219 		adev->vcn.inst[i].external.scratch9 = SOC15_REG_OFFSET(VCN, i, mmUVD_SCRATCH9);
220 		adev->vcn.inst[i].internal.data0 = mmUVD_GPCOM_VCPU_DATA0_INTERNAL_OFFSET;
221 		adev->vcn.inst[i].external.data0 = SOC15_REG_OFFSET(VCN, i, mmUVD_GPCOM_VCPU_DATA0);
222 		adev->vcn.inst[i].internal.data1 = mmUVD_GPCOM_VCPU_DATA1_INTERNAL_OFFSET;
223 		adev->vcn.inst[i].external.data1 = SOC15_REG_OFFSET(VCN, i, mmUVD_GPCOM_VCPU_DATA1);
224 		adev->vcn.inst[i].internal.cmd = mmUVD_GPCOM_VCPU_CMD_INTERNAL_OFFSET;
225 		adev->vcn.inst[i].external.cmd = SOC15_REG_OFFSET(VCN, i, mmUVD_GPCOM_VCPU_CMD);
226 		adev->vcn.inst[i].internal.nop = mmUVD_NO_OP_INTERNAL_OFFSET;
227 		adev->vcn.inst[i].external.nop = SOC15_REG_OFFSET(VCN, i, mmUVD_NO_OP);
228 
229 		/* VCN DEC TRAP */
230 		r = amdgpu_irq_add_id(adev, amdgpu_ih_clientid_vcns[i],
231 				VCN_2_0__SRCID__UVD_SYSTEM_MESSAGE_INTERRUPT, &adev->vcn.inst[i].irq);
232 		if (r)
233 			return r;
234 
235 		atomic_set(&adev->vcn.inst[i].sched_score, 0);
236 
237 		ring = &adev->vcn.inst[i].ring_dec;
238 		ring->use_doorbell = true;
239 		if (amdgpu_sriov_vf(adev)) {
240 			ring->doorbell_index = vcn_doorbell_index + i * (adev->vcn.inst[i].num_enc_rings + 1);
241 		} else {
242 			ring->doorbell_index = (adev->doorbell_index.vcn.vcn_ring0_1 << 1) + 8 * i;
243 		}
244 		ring->vm_hub = AMDGPU_MMHUB0(0);
245 		sprintf(ring->name, "vcn_dec_%d", i);
246 		r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.inst[i].irq, 0,
247 				     AMDGPU_RING_PRIO_DEFAULT,
248 				     &adev->vcn.inst[i].sched_score);
249 		if (r)
250 			return r;
251 
252 		for (j = 0; j < adev->vcn.inst[i].num_enc_rings; ++j) {
253 			enum amdgpu_ring_priority_level hw_prio = amdgpu_vcn_get_enc_ring_prio(j);
254 
255 			/* VCN ENC TRAP */
256 			r = amdgpu_irq_add_id(adev, amdgpu_ih_clientid_vcns[i],
257 				j + VCN_2_0__SRCID__UVD_ENC_GENERAL_PURPOSE, &adev->vcn.inst[i].irq);
258 			if (r)
259 				return r;
260 
261 			ring = &adev->vcn.inst[i].ring_enc[j];
262 			ring->use_doorbell = true;
263 			if (amdgpu_sriov_vf(adev)) {
264 				ring->doorbell_index = vcn_doorbell_index + i * (adev->vcn.inst[i].num_enc_rings + 1) + 1 + j;
265 			} else {
266 				ring->doorbell_index = (adev->doorbell_index.vcn.vcn_ring0_1 << 1) + 2 + j + 8 * i;
267 			}
268 			ring->vm_hub = AMDGPU_MMHUB0(0);
269 			sprintf(ring->name, "vcn_enc_%d.%d", i, j);
270 			r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.inst[i].irq, 0,
271 					     hw_prio, &adev->vcn.inst[i].sched_score);
272 			if (r)
273 				return r;
274 		}
275 
276 		fw_shared = adev->vcn.inst[i].fw_shared.cpu_addr;
277 		fw_shared->present_flag_0 |= cpu_to_le32(AMDGPU_VCN_SW_RING_FLAG) |
278 					     cpu_to_le32(AMDGPU_VCN_MULTI_QUEUE_FLAG) |
279 					     cpu_to_le32(AMDGPU_VCN_FW_SHARED_FLAG_0_RB);
280 		fw_shared->sw_ring.is_enabled = cpu_to_le32(DEC_SW_RING_ENABLED);
281 		fw_shared->present_flag_0 |= AMDGPU_VCN_SMU_VERSION_INFO_FLAG;
282 		if (amdgpu_ip_version(adev, UVD_HWIP, 0) == IP_VERSION(3, 1, 2))
283 			fw_shared->smu_interface_info.smu_interface_type = 2;
284 		else if (amdgpu_ip_version(adev, UVD_HWIP, 0) ==
285 			 IP_VERSION(3, 1, 1))
286 			fw_shared->smu_interface_info.smu_interface_type = 1;
287 
288 		if (amdgpu_vcnfw_log)
289 			amdgpu_vcn_fwlog_init(&adev->vcn.inst[i]);
290 
291 		if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG)
292 			adev->vcn.inst[i].pause_dpg_mode = vcn_v3_0_pause_dpg_mode;
293 		adev->vcn.inst[i].reset = vcn_v3_0_reset;
294 	}
295 
296 	adev->vcn.supported_reset =
297 		amdgpu_get_soft_full_reset_mask(&adev->vcn.inst[0].ring_enc[0]);
298 	if (!amdgpu_sriov_vf(adev))
299 		adev->vcn.supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE;
300 
301 	if (amdgpu_sriov_vf(adev)) {
302 		r = amdgpu_virt_alloc_mm_table(adev);
303 		if (r)
304 			return r;
305 	}
306 
307 	/* Allocate memory for VCN IP Dump buffer */
308 	ptr = kcalloc(adev->vcn.num_vcn_inst * reg_count, sizeof(uint32_t), GFP_KERNEL);
309 	if (ptr == NULL) {
310 		DRM_ERROR("Failed to allocate memory for VCN IP Dump\n");
311 		adev->vcn.ip_dump = NULL;
312 	} else {
313 		adev->vcn.ip_dump = ptr;
314 	}
315 
316 	r = amdgpu_vcn_sysfs_reset_mask_init(adev);
317 	if (r)
318 		return r;
319 
320 	return 0;
321 }
322 
323 /**
324  * vcn_v3_0_sw_fini - sw fini for VCN block
325  *
326  * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
327  *
328  * VCN suspend and free up sw allocation
329  */
330 static int vcn_v3_0_sw_fini(struct amdgpu_ip_block *ip_block)
331 {
332 	struct amdgpu_device *adev = ip_block->adev;
333 	int i, r, idx;
334 
335 	if (drm_dev_enter(adev_to_drm(adev), &idx)) {
336 		for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
337 			volatile struct amdgpu_fw_shared *fw_shared;
338 
339 			if (adev->vcn.harvest_config & (1 << i))
340 				continue;
341 			fw_shared = adev->vcn.inst[i].fw_shared.cpu_addr;
342 			fw_shared->present_flag_0 = 0;
343 			fw_shared->sw_ring.is_enabled = false;
344 		}
345 
346 		drm_dev_exit(idx);
347 	}
348 
349 	if (amdgpu_sriov_vf(adev))
350 		amdgpu_virt_free_mm_table(adev);
351 
352 	amdgpu_vcn_sysfs_reset_mask_fini(adev);
353 
354 	for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
355 		r = amdgpu_vcn_suspend(adev, i);
356 		if (r)
357 			return r;
358 
359 		r = amdgpu_vcn_sw_fini(adev, i);
360 		if (r)
361 			return r;
362 	}
363 
364 	kfree(adev->vcn.ip_dump);
365 	return 0;
366 }
367 
368 /**
369  * vcn_v3_0_hw_init - start and test VCN block
370  *
371  * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
372  *
373  * Initialize the hardware, boot up the VCPU and do some testing
374  */
375 static int vcn_v3_0_hw_init(struct amdgpu_ip_block *ip_block)
376 {
377 	struct amdgpu_device *adev = ip_block->adev;
378 	struct amdgpu_ring *ring;
379 	int i, j, r;
380 
381 	if (amdgpu_sriov_vf(adev)) {
382 		r = vcn_v3_0_start_sriov(adev);
383 		if (r)
384 			return r;
385 
386 		/* initialize VCN dec and enc ring buffers */
387 		for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
388 			if (adev->vcn.harvest_config & (1 << i))
389 				continue;
390 
391 			ring = &adev->vcn.inst[i].ring_dec;
392 			if (amdgpu_vcn_is_disabled_vcn(adev, VCN_DECODE_RING, i)) {
393 				ring->sched.ready = false;
394 				ring->no_scheduler = true;
395 				dev_info(adev->dev, "ring %s is disabled by hypervisor\n", ring->name);
396 			} else {
397 				ring->wptr = 0;
398 				ring->wptr_old = 0;
399 				vcn_v3_0_dec_ring_set_wptr(ring);
400 				ring->sched.ready = true;
401 			}
402 
403 			for (j = 0; j < adev->vcn.inst[i].num_enc_rings; ++j) {
404 				ring = &adev->vcn.inst[i].ring_enc[j];
405 				if (amdgpu_vcn_is_disabled_vcn(adev, VCN_ENCODE_RING, i)) {
406 					ring->sched.ready = false;
407 					ring->no_scheduler = true;
408 					dev_info(adev->dev, "ring %s is disabled by hypervisor\n", ring->name);
409 				} else {
410 					ring->wptr = 0;
411 					ring->wptr_old = 0;
412 					vcn_v3_0_enc_ring_set_wptr(ring);
413 					ring->sched.ready = true;
414 				}
415 			}
416 		}
417 	} else {
418 		for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
419 			if (adev->vcn.harvest_config & (1 << i))
420 				continue;
421 
422 			ring = &adev->vcn.inst[i].ring_dec;
423 
424 			adev->nbio.funcs->vcn_doorbell_range(adev, ring->use_doorbell,
425 						     ring->doorbell_index, i);
426 
427 			r = amdgpu_ring_test_helper(ring);
428 			if (r)
429 				return r;
430 
431 			for (j = 0; j < adev->vcn.inst[i].num_enc_rings; ++j) {
432 				ring = &adev->vcn.inst[i].ring_enc[j];
433 				r = amdgpu_ring_test_helper(ring);
434 				if (r)
435 					return r;
436 			}
437 		}
438 	}
439 
440 	return 0;
441 }
442 
443 /**
444  * vcn_v3_0_hw_fini - stop the hardware block
445  *
446  * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
447  *
448  * Stop the VCN block, mark ring as not ready any more
449  */
450 static int vcn_v3_0_hw_fini(struct amdgpu_ip_block *ip_block)
451 {
452 	struct amdgpu_device *adev = ip_block->adev;
453 	int i;
454 
455 	for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
456 		struct amdgpu_vcn_inst *vinst = &adev->vcn.inst[i];
457 
458 		if (adev->vcn.harvest_config & (1 << i))
459 			continue;
460 
461 		cancel_delayed_work_sync(&vinst->idle_work);
462 
463 		if (!amdgpu_sriov_vf(adev)) {
464 			if ((adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) ||
465 			    (vinst->cur_state != AMD_PG_STATE_GATE &&
466 			     RREG32_SOC15(VCN, i, mmUVD_STATUS))) {
467 				vinst->set_pg_state(vinst, AMD_PG_STATE_GATE);
468 			}
469 		}
470 	}
471 
472 	return 0;
473 }
474 
475 /**
476  * vcn_v3_0_suspend - suspend VCN block
477  *
478  * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
479  *
480  * HW fini and suspend VCN block
481  */
482 static int vcn_v3_0_suspend(struct amdgpu_ip_block *ip_block)
483 {
484 	struct amdgpu_device *adev = ip_block->adev;
485 	int r, i;
486 
487 	r = vcn_v3_0_hw_fini(ip_block);
488 	if (r)
489 		return r;
490 
491 	for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
492 		r = amdgpu_vcn_suspend(ip_block->adev, i);
493 		if (r)
494 			return r;
495 	}
496 
497 	return 0;
498 }
499 
500 /**
501  * vcn_v3_0_resume - resume VCN block
502  *
503  * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
504  *
505  * Resume firmware and hw init VCN block
506  */
507 static int vcn_v3_0_resume(struct amdgpu_ip_block *ip_block)
508 {
509 	struct amdgpu_device *adev = ip_block->adev;
510 	int r, i;
511 
512 	for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
513 		r = amdgpu_vcn_resume(ip_block->adev, i);
514 		if (r)
515 			return r;
516 	}
517 
518 	r = vcn_v3_0_hw_init(ip_block);
519 
520 	return r;
521 }
522 
523 /**
524  * vcn_v3_0_mc_resume - memory controller programming
525  *
526  * @vinst: VCN instance
527  *
528  * Let the VCN memory controller know it's offsets
529  */
530 static void vcn_v3_0_mc_resume(struct amdgpu_vcn_inst *vinst)
531 {
532 	struct amdgpu_device *adev = vinst->adev;
533 	int inst = vinst->inst;
534 	uint32_t size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.inst[inst].fw->size + 4);
535 	uint32_t offset;
536 
537 	/* cache window 0: fw */
538 	if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
539 		WREG32_SOC15(VCN, inst, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW,
540 			(adev->firmware.ucode[AMDGPU_UCODE_ID_VCN + inst].tmr_mc_addr_lo));
541 		WREG32_SOC15(VCN, inst, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH,
542 			(adev->firmware.ucode[AMDGPU_UCODE_ID_VCN + inst].tmr_mc_addr_hi));
543 		WREG32_SOC15(VCN, inst, mmUVD_VCPU_CACHE_OFFSET0, 0);
544 		offset = 0;
545 	} else {
546 		WREG32_SOC15(VCN, inst, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW,
547 			lower_32_bits(adev->vcn.inst[inst].gpu_addr));
548 		WREG32_SOC15(VCN, inst, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH,
549 			upper_32_bits(adev->vcn.inst[inst].gpu_addr));
550 		offset = size;
551 		WREG32_SOC15(VCN, inst, mmUVD_VCPU_CACHE_OFFSET0,
552 			AMDGPU_UVD_FIRMWARE_OFFSET >> 3);
553 	}
554 	WREG32_SOC15(VCN, inst, mmUVD_VCPU_CACHE_SIZE0, size);
555 
556 	/* cache window 1: stack */
557 	WREG32_SOC15(VCN, inst, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW,
558 		lower_32_bits(adev->vcn.inst[inst].gpu_addr + offset));
559 	WREG32_SOC15(VCN, inst, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH,
560 		upper_32_bits(adev->vcn.inst[inst].gpu_addr + offset));
561 	WREG32_SOC15(VCN, inst, mmUVD_VCPU_CACHE_OFFSET1, 0);
562 	WREG32_SOC15(VCN, inst, mmUVD_VCPU_CACHE_SIZE1, AMDGPU_VCN_STACK_SIZE);
563 
564 	/* cache window 2: context */
565 	WREG32_SOC15(VCN, inst, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW,
566 		lower_32_bits(adev->vcn.inst[inst].gpu_addr + offset + AMDGPU_VCN_STACK_SIZE));
567 	WREG32_SOC15(VCN, inst, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH,
568 		upper_32_bits(adev->vcn.inst[inst].gpu_addr + offset + AMDGPU_VCN_STACK_SIZE));
569 	WREG32_SOC15(VCN, inst, mmUVD_VCPU_CACHE_OFFSET2, 0);
570 	WREG32_SOC15(VCN, inst, mmUVD_VCPU_CACHE_SIZE2, AMDGPU_VCN_CONTEXT_SIZE);
571 
572 	/* non-cache window */
573 	WREG32_SOC15(VCN, inst, mmUVD_LMI_VCPU_NC0_64BIT_BAR_LOW,
574 		lower_32_bits(adev->vcn.inst[inst].fw_shared.gpu_addr));
575 	WREG32_SOC15(VCN, inst, mmUVD_LMI_VCPU_NC0_64BIT_BAR_HIGH,
576 		upper_32_bits(adev->vcn.inst[inst].fw_shared.gpu_addr));
577 	WREG32_SOC15(VCN, inst, mmUVD_VCPU_NONCACHE_OFFSET0, 0);
578 	WREG32_SOC15(VCN, inst, mmUVD_VCPU_NONCACHE_SIZE0,
579 		AMDGPU_GPU_PAGE_ALIGN(sizeof(struct amdgpu_fw_shared)));
580 }
581 
582 static void vcn_v3_0_mc_resume_dpg_mode(struct amdgpu_vcn_inst *vinst,
583 					bool indirect)
584 {
585 	struct amdgpu_device *adev = vinst->adev;
586 	int inst_idx = vinst->inst;
587 	uint32_t size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.inst[inst_idx].fw->size + 4);
588 	uint32_t offset;
589 
590 	/* cache window 0: fw */
591 	if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
592 		if (!indirect) {
593 			WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
594 				VCN, inst_idx, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW),
595 				(adev->firmware.ucode[AMDGPU_UCODE_ID_VCN + inst_idx].tmr_mc_addr_lo), 0, indirect);
596 			WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
597 				VCN, inst_idx, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH),
598 				(adev->firmware.ucode[AMDGPU_UCODE_ID_VCN + inst_idx].tmr_mc_addr_hi), 0, indirect);
599 			WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
600 				VCN, inst_idx, mmUVD_VCPU_CACHE_OFFSET0), 0, 0, indirect);
601 		} else {
602 			WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
603 				VCN, inst_idx, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW), 0, 0, indirect);
604 			WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
605 				VCN, inst_idx, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH), 0, 0, indirect);
606 			WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
607 				VCN, inst_idx, mmUVD_VCPU_CACHE_OFFSET0), 0, 0, indirect);
608 		}
609 		offset = 0;
610 	} else {
611 		WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
612 			VCN, inst_idx, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW),
613 			lower_32_bits(adev->vcn.inst[inst_idx].gpu_addr), 0, indirect);
614 		WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
615 			VCN, inst_idx, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH),
616 			upper_32_bits(adev->vcn.inst[inst_idx].gpu_addr), 0, indirect);
617 		offset = size;
618 		WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
619 			VCN, inst_idx, mmUVD_VCPU_CACHE_OFFSET0),
620 			AMDGPU_UVD_FIRMWARE_OFFSET >> 3, 0, indirect);
621 	}
622 
623 	if (!indirect)
624 		WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
625 			VCN, inst_idx, mmUVD_VCPU_CACHE_SIZE0), size, 0, indirect);
626 	else
627 		WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
628 			VCN, inst_idx, mmUVD_VCPU_CACHE_SIZE0), 0, 0, indirect);
629 
630 	/* cache window 1: stack */
631 	if (!indirect) {
632 		WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
633 			VCN, inst_idx, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW),
634 			lower_32_bits(adev->vcn.inst[inst_idx].gpu_addr + offset), 0, indirect);
635 		WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
636 			VCN, inst_idx, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH),
637 			upper_32_bits(adev->vcn.inst[inst_idx].gpu_addr + offset), 0, indirect);
638 		WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
639 			VCN, inst_idx, mmUVD_VCPU_CACHE_OFFSET1), 0, 0, indirect);
640 	} else {
641 		WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
642 			VCN, inst_idx, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW), 0, 0, indirect);
643 		WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
644 			VCN, inst_idx, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH), 0, 0, indirect);
645 		WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
646 			VCN, inst_idx, mmUVD_VCPU_CACHE_OFFSET1), 0, 0, indirect);
647 	}
648 	WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
649 			VCN, inst_idx, mmUVD_VCPU_CACHE_SIZE1), AMDGPU_VCN_STACK_SIZE, 0, indirect);
650 
651 	/* cache window 2: context */
652 	WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
653 			VCN, inst_idx, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW),
654 			lower_32_bits(adev->vcn.inst[inst_idx].gpu_addr + offset + AMDGPU_VCN_STACK_SIZE), 0, indirect);
655 	WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
656 			VCN, inst_idx, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH),
657 			upper_32_bits(adev->vcn.inst[inst_idx].gpu_addr + offset + AMDGPU_VCN_STACK_SIZE), 0, indirect);
658 	WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
659 			VCN, inst_idx, mmUVD_VCPU_CACHE_OFFSET2), 0, 0, indirect);
660 	WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
661 			VCN, inst_idx, mmUVD_VCPU_CACHE_SIZE2), AMDGPU_VCN_CONTEXT_SIZE, 0, indirect);
662 
663 	/* non-cache window */
664 	WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
665 			VCN, inst_idx, mmUVD_LMI_VCPU_NC0_64BIT_BAR_LOW),
666 			lower_32_bits(adev->vcn.inst[inst_idx].fw_shared.gpu_addr), 0, indirect);
667 	WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
668 			VCN, inst_idx, mmUVD_LMI_VCPU_NC0_64BIT_BAR_HIGH),
669 			upper_32_bits(adev->vcn.inst[inst_idx].fw_shared.gpu_addr), 0, indirect);
670 	WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
671 			VCN, inst_idx, mmUVD_VCPU_NONCACHE_OFFSET0), 0, 0, indirect);
672 	WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
673 			VCN, inst_idx, mmUVD_VCPU_NONCACHE_SIZE0),
674 			AMDGPU_GPU_PAGE_ALIGN(sizeof(struct amdgpu_fw_shared)), 0, indirect);
675 
676 	/* VCN global tiling registers */
677 	WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
678 		UVD, inst_idx, mmUVD_GFX10_ADDR_CONFIG), adev->gfx.config.gb_addr_config, 0, indirect);
679 }
680 
681 static void vcn_v3_0_disable_static_power_gating(struct amdgpu_vcn_inst *vinst)
682 {
683 	struct amdgpu_device *adev = vinst->adev;
684 	int inst = vinst->inst;
685 	uint32_t data = 0;
686 
687 	if (adev->pg_flags & AMD_PG_SUPPORT_VCN) {
688 		data = (1 << UVD_PGFSM_CONFIG__UVDM_PWR_CONFIG__SHIFT
689 			| 1 << UVD_PGFSM_CONFIG__UVDU_PWR_CONFIG__SHIFT
690 			| 2 << UVD_PGFSM_CONFIG__UVDF_PWR_CONFIG__SHIFT
691 			| 2 << UVD_PGFSM_CONFIG__UVDC_PWR_CONFIG__SHIFT
692 			| 2 << UVD_PGFSM_CONFIG__UVDB_PWR_CONFIG__SHIFT
693 			| 2 << UVD_PGFSM_CONFIG__UVDIRL_PWR_CONFIG__SHIFT
694 			| 1 << UVD_PGFSM_CONFIG__UVDLM_PWR_CONFIG__SHIFT
695 			| 2 << UVD_PGFSM_CONFIG__UVDTD_PWR_CONFIG__SHIFT
696 			| 2 << UVD_PGFSM_CONFIG__UVDTE_PWR_CONFIG__SHIFT
697 			| 2 << UVD_PGFSM_CONFIG__UVDE_PWR_CONFIG__SHIFT
698 			| 2 << UVD_PGFSM_CONFIG__UVDAB_PWR_CONFIG__SHIFT
699 			| 2 << UVD_PGFSM_CONFIG__UVDATD_PWR_CONFIG__SHIFT
700 			| 2 << UVD_PGFSM_CONFIG__UVDNA_PWR_CONFIG__SHIFT
701 			| 2 << UVD_PGFSM_CONFIG__UVDNB_PWR_CONFIG__SHIFT);
702 
703 		WREG32_SOC15(VCN, inst, mmUVD_PGFSM_CONFIG, data);
704 		SOC15_WAIT_ON_RREG(VCN, inst, mmUVD_PGFSM_STATUS,
705 			UVD_PGFSM_STATUS__UVDM_UVDU_UVDLM_PWR_ON_3_0, 0x3F3FFFFF);
706 	} else {
707 		data = (1 << UVD_PGFSM_CONFIG__UVDM_PWR_CONFIG__SHIFT
708 			| 1 << UVD_PGFSM_CONFIG__UVDU_PWR_CONFIG__SHIFT
709 			| 1 << UVD_PGFSM_CONFIG__UVDF_PWR_CONFIG__SHIFT
710 			| 1 << UVD_PGFSM_CONFIG__UVDC_PWR_CONFIG__SHIFT
711 			| 1 << UVD_PGFSM_CONFIG__UVDB_PWR_CONFIG__SHIFT
712 			| 1 << UVD_PGFSM_CONFIG__UVDIRL_PWR_CONFIG__SHIFT
713 			| 1 << UVD_PGFSM_CONFIG__UVDLM_PWR_CONFIG__SHIFT
714 			| 1 << UVD_PGFSM_CONFIG__UVDTD_PWR_CONFIG__SHIFT
715 			| 1 << UVD_PGFSM_CONFIG__UVDTE_PWR_CONFIG__SHIFT
716 			| 1 << UVD_PGFSM_CONFIG__UVDE_PWR_CONFIG__SHIFT
717 			| 1 << UVD_PGFSM_CONFIG__UVDAB_PWR_CONFIG__SHIFT
718 			| 1 << UVD_PGFSM_CONFIG__UVDATD_PWR_CONFIG__SHIFT
719 			| 1 << UVD_PGFSM_CONFIG__UVDNA_PWR_CONFIG__SHIFT
720 			| 1 << UVD_PGFSM_CONFIG__UVDNB_PWR_CONFIG__SHIFT);
721 		WREG32_SOC15(VCN, inst, mmUVD_PGFSM_CONFIG, data);
722 		SOC15_WAIT_ON_RREG(VCN, inst, mmUVD_PGFSM_STATUS, 0,  0x3F3FFFFF);
723 	}
724 
725 	data = RREG32_SOC15(VCN, inst, mmUVD_POWER_STATUS);
726 	data &= ~0x103;
727 	if (adev->pg_flags & AMD_PG_SUPPORT_VCN)
728 		data |= UVD_PGFSM_CONFIG__UVDM_UVDU_PWR_ON |
729 			UVD_POWER_STATUS__UVD_PG_EN_MASK;
730 
731 	WREG32_SOC15(VCN, inst, mmUVD_POWER_STATUS, data);
732 }
733 
734 static void vcn_v3_0_enable_static_power_gating(struct amdgpu_vcn_inst *vinst)
735 {
736 	struct amdgpu_device *adev = vinst->adev;
737 	int inst = vinst->inst;
738 	uint32_t data;
739 
740 	if (adev->pg_flags & AMD_PG_SUPPORT_VCN) {
741 		/* Before power off, this indicator has to be turned on */
742 		data = RREG32_SOC15(VCN, inst, mmUVD_POWER_STATUS);
743 		data &= ~UVD_POWER_STATUS__UVD_POWER_STATUS_MASK;
744 		data |= UVD_POWER_STATUS__UVD_POWER_STATUS_TILES_OFF;
745 		WREG32_SOC15(VCN, inst, mmUVD_POWER_STATUS, data);
746 
747 		data = (2 << UVD_PGFSM_CONFIG__UVDM_PWR_CONFIG__SHIFT
748 			| 2 << UVD_PGFSM_CONFIG__UVDU_PWR_CONFIG__SHIFT
749 			| 2 << UVD_PGFSM_CONFIG__UVDF_PWR_CONFIG__SHIFT
750 			| 2 << UVD_PGFSM_CONFIG__UVDC_PWR_CONFIG__SHIFT
751 			| 2 << UVD_PGFSM_CONFIG__UVDB_PWR_CONFIG__SHIFT
752 			| 2 << UVD_PGFSM_CONFIG__UVDIRL_PWR_CONFIG__SHIFT
753 			| 2 << UVD_PGFSM_CONFIG__UVDLM_PWR_CONFIG__SHIFT
754 			| 2 << UVD_PGFSM_CONFIG__UVDTD_PWR_CONFIG__SHIFT
755 			| 2 << UVD_PGFSM_CONFIG__UVDTE_PWR_CONFIG__SHIFT
756 			| 2 << UVD_PGFSM_CONFIG__UVDE_PWR_CONFIG__SHIFT
757 			| 2 << UVD_PGFSM_CONFIG__UVDAB_PWR_CONFIG__SHIFT
758 			| 2 << UVD_PGFSM_CONFIG__UVDATD_PWR_CONFIG__SHIFT
759 			| 2 << UVD_PGFSM_CONFIG__UVDNA_PWR_CONFIG__SHIFT
760 			| 2 << UVD_PGFSM_CONFIG__UVDNB_PWR_CONFIG__SHIFT);
761 		WREG32_SOC15(VCN, inst, mmUVD_PGFSM_CONFIG, data);
762 
763 		data = (2 << UVD_PGFSM_STATUS__UVDM_PWR_STATUS__SHIFT
764 			| 2 << UVD_PGFSM_STATUS__UVDU_PWR_STATUS__SHIFT
765 			| 2 << UVD_PGFSM_STATUS__UVDF_PWR_STATUS__SHIFT
766 			| 2 << UVD_PGFSM_STATUS__UVDC_PWR_STATUS__SHIFT
767 			| 2 << UVD_PGFSM_STATUS__UVDB_PWR_STATUS__SHIFT
768 			| 2 << UVD_PGFSM_STATUS__UVDIRL_PWR_STATUS__SHIFT
769 			| 2 << UVD_PGFSM_STATUS__UVDLM_PWR_STATUS__SHIFT
770 			| 2 << UVD_PGFSM_STATUS__UVDTD_PWR_STATUS__SHIFT
771 			| 2 << UVD_PGFSM_STATUS__UVDTE_PWR_STATUS__SHIFT
772 			| 2 << UVD_PGFSM_STATUS__UVDE_PWR_STATUS__SHIFT
773 			| 2 << UVD_PGFSM_STATUS__UVDAB_PWR_STATUS__SHIFT
774 			| 2 << UVD_PGFSM_STATUS__UVDATD_PWR_STATUS__SHIFT
775 			| 2 << UVD_PGFSM_STATUS__UVDNA_PWR_STATUS__SHIFT
776 			| 2 << UVD_PGFSM_STATUS__UVDNB_PWR_STATUS__SHIFT);
777 		SOC15_WAIT_ON_RREG(VCN, inst, mmUVD_PGFSM_STATUS, data, 0x3F3FFFFF);
778 	}
779 }
780 
781 /**
782  * vcn_v3_0_disable_clock_gating - disable VCN clock gating
783  *
784  * @vinst: Pointer to the VCN instance structure
785  *
786  * Disable clock gating for VCN block
787  */
788 static void vcn_v3_0_disable_clock_gating(struct amdgpu_vcn_inst *vinst)
789 {
790 	struct amdgpu_device *adev = vinst->adev;
791 	int inst = vinst->inst;
792 	uint32_t data;
793 
794 	/* VCN disable CGC */
795 	data = RREG32_SOC15(VCN, inst, mmUVD_CGC_CTRL);
796 	if (adev->cg_flags & AMD_CG_SUPPORT_VCN_MGCG)
797 		data |= 1 << UVD_CGC_CTRL__DYN_CLOCK_MODE__SHIFT;
798 	else
799 		data &= ~UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK;
800 	data |= 1 << UVD_CGC_CTRL__CLK_GATE_DLY_TIMER__SHIFT;
801 	data |= 4 << UVD_CGC_CTRL__CLK_OFF_DELAY__SHIFT;
802 	WREG32_SOC15(VCN, inst, mmUVD_CGC_CTRL, data);
803 
804 	data = RREG32_SOC15(VCN, inst, mmUVD_CGC_GATE);
805 	data &= ~(UVD_CGC_GATE__SYS_MASK
806 		| UVD_CGC_GATE__UDEC_MASK
807 		| UVD_CGC_GATE__MPEG2_MASK
808 		| UVD_CGC_GATE__REGS_MASK
809 		| UVD_CGC_GATE__RBC_MASK
810 		| UVD_CGC_GATE__LMI_MC_MASK
811 		| UVD_CGC_GATE__LMI_UMC_MASK
812 		| UVD_CGC_GATE__IDCT_MASK
813 		| UVD_CGC_GATE__MPRD_MASK
814 		| UVD_CGC_GATE__MPC_MASK
815 		| UVD_CGC_GATE__LBSI_MASK
816 		| UVD_CGC_GATE__LRBBM_MASK
817 		| UVD_CGC_GATE__UDEC_RE_MASK
818 		| UVD_CGC_GATE__UDEC_CM_MASK
819 		| UVD_CGC_GATE__UDEC_IT_MASK
820 		| UVD_CGC_GATE__UDEC_DB_MASK
821 		| UVD_CGC_GATE__UDEC_MP_MASK
822 		| UVD_CGC_GATE__WCB_MASK
823 		| UVD_CGC_GATE__VCPU_MASK
824 		| UVD_CGC_GATE__MMSCH_MASK);
825 
826 	WREG32_SOC15(VCN, inst, mmUVD_CGC_GATE, data);
827 
828 	SOC15_WAIT_ON_RREG(VCN, inst, mmUVD_CGC_GATE, 0,  0xFFFFFFFF);
829 
830 	data = RREG32_SOC15(VCN, inst, mmUVD_CGC_CTRL);
831 	data &= ~(UVD_CGC_CTRL__UDEC_RE_MODE_MASK
832 		| UVD_CGC_CTRL__UDEC_CM_MODE_MASK
833 		| UVD_CGC_CTRL__UDEC_IT_MODE_MASK
834 		| UVD_CGC_CTRL__UDEC_DB_MODE_MASK
835 		| UVD_CGC_CTRL__UDEC_MP_MODE_MASK
836 		| UVD_CGC_CTRL__SYS_MODE_MASK
837 		| UVD_CGC_CTRL__UDEC_MODE_MASK
838 		| UVD_CGC_CTRL__MPEG2_MODE_MASK
839 		| UVD_CGC_CTRL__REGS_MODE_MASK
840 		| UVD_CGC_CTRL__RBC_MODE_MASK
841 		| UVD_CGC_CTRL__LMI_MC_MODE_MASK
842 		| UVD_CGC_CTRL__LMI_UMC_MODE_MASK
843 		| UVD_CGC_CTRL__IDCT_MODE_MASK
844 		| UVD_CGC_CTRL__MPRD_MODE_MASK
845 		| UVD_CGC_CTRL__MPC_MODE_MASK
846 		| UVD_CGC_CTRL__LBSI_MODE_MASK
847 		| UVD_CGC_CTRL__LRBBM_MODE_MASK
848 		| UVD_CGC_CTRL__WCB_MODE_MASK
849 		| UVD_CGC_CTRL__VCPU_MODE_MASK
850 		| UVD_CGC_CTRL__MMSCH_MODE_MASK);
851 	WREG32_SOC15(VCN, inst, mmUVD_CGC_CTRL, data);
852 
853 	data = RREG32_SOC15(VCN, inst, mmUVD_SUVD_CGC_GATE);
854 	data |= (UVD_SUVD_CGC_GATE__SRE_MASK
855 		| UVD_SUVD_CGC_GATE__SIT_MASK
856 		| UVD_SUVD_CGC_GATE__SMP_MASK
857 		| UVD_SUVD_CGC_GATE__SCM_MASK
858 		| UVD_SUVD_CGC_GATE__SDB_MASK
859 		| UVD_SUVD_CGC_GATE__SRE_H264_MASK
860 		| UVD_SUVD_CGC_GATE__SRE_HEVC_MASK
861 		| UVD_SUVD_CGC_GATE__SIT_H264_MASK
862 		| UVD_SUVD_CGC_GATE__SIT_HEVC_MASK
863 		| UVD_SUVD_CGC_GATE__SCM_H264_MASK
864 		| UVD_SUVD_CGC_GATE__SCM_HEVC_MASK
865 		| UVD_SUVD_CGC_GATE__SDB_H264_MASK
866 		| UVD_SUVD_CGC_GATE__SDB_HEVC_MASK
867 		| UVD_SUVD_CGC_GATE__SCLR_MASK
868 		| UVD_SUVD_CGC_GATE__ENT_MASK
869 		| UVD_SUVD_CGC_GATE__IME_MASK
870 		| UVD_SUVD_CGC_GATE__SIT_HEVC_DEC_MASK
871 		| UVD_SUVD_CGC_GATE__SIT_HEVC_ENC_MASK
872 		| UVD_SUVD_CGC_GATE__SITE_MASK
873 		| UVD_SUVD_CGC_GATE__SRE_VP9_MASK
874 		| UVD_SUVD_CGC_GATE__SCM_VP9_MASK
875 		| UVD_SUVD_CGC_GATE__SIT_VP9_DEC_MASK
876 		| UVD_SUVD_CGC_GATE__SDB_VP9_MASK
877 		| UVD_SUVD_CGC_GATE__IME_HEVC_MASK
878 		| UVD_SUVD_CGC_GATE__EFC_MASK
879 		| UVD_SUVD_CGC_GATE__SAOE_MASK
880 		| UVD_SUVD_CGC_GATE__SRE_AV1_MASK
881 		| UVD_SUVD_CGC_GATE__FBC_PCLK_MASK
882 		| UVD_SUVD_CGC_GATE__FBC_CCLK_MASK
883 		| UVD_SUVD_CGC_GATE__SCM_AV1_MASK
884 		| UVD_SUVD_CGC_GATE__SMPA_MASK);
885 	WREG32_SOC15(VCN, inst, mmUVD_SUVD_CGC_GATE, data);
886 
887 	data = RREG32_SOC15(VCN, inst, mmUVD_SUVD_CGC_GATE2);
888 	data |= (UVD_SUVD_CGC_GATE2__MPBE0_MASK
889 		| UVD_SUVD_CGC_GATE2__MPBE1_MASK
890 		| UVD_SUVD_CGC_GATE2__SIT_AV1_MASK
891 		| UVD_SUVD_CGC_GATE2__SDB_AV1_MASK
892 		| UVD_SUVD_CGC_GATE2__MPC1_MASK);
893 	WREG32_SOC15(VCN, inst, mmUVD_SUVD_CGC_GATE2, data);
894 
895 	data = RREG32_SOC15(VCN, inst, mmUVD_SUVD_CGC_CTRL);
896 	data &= ~(UVD_SUVD_CGC_CTRL__SRE_MODE_MASK
897 		| UVD_SUVD_CGC_CTRL__SIT_MODE_MASK
898 		| UVD_SUVD_CGC_CTRL__SMP_MODE_MASK
899 		| UVD_SUVD_CGC_CTRL__SCM_MODE_MASK
900 		| UVD_SUVD_CGC_CTRL__SDB_MODE_MASK
901 		| UVD_SUVD_CGC_CTRL__SCLR_MODE_MASK
902 		| UVD_SUVD_CGC_CTRL__ENT_MODE_MASK
903 		| UVD_SUVD_CGC_CTRL__IME_MODE_MASK
904 		| UVD_SUVD_CGC_CTRL__SITE_MODE_MASK
905 		| UVD_SUVD_CGC_CTRL__EFC_MODE_MASK
906 		| UVD_SUVD_CGC_CTRL__SAOE_MODE_MASK
907 		| UVD_SUVD_CGC_CTRL__SMPA_MODE_MASK
908 		| UVD_SUVD_CGC_CTRL__MPBE0_MODE_MASK
909 		| UVD_SUVD_CGC_CTRL__MPBE1_MODE_MASK
910 		| UVD_SUVD_CGC_CTRL__SIT_AV1_MODE_MASK
911 		| UVD_SUVD_CGC_CTRL__SDB_AV1_MODE_MASK
912 		| UVD_SUVD_CGC_CTRL__MPC1_MODE_MASK
913 		| UVD_SUVD_CGC_CTRL__FBC_PCLK_MASK
914 		| UVD_SUVD_CGC_CTRL__FBC_CCLK_MASK);
915 	WREG32_SOC15(VCN, inst, mmUVD_SUVD_CGC_CTRL, data);
916 }
917 
918 static void vcn_v3_0_clock_gating_dpg_mode(struct amdgpu_vcn_inst *vinst,
919 					   uint8_t sram_sel,
920 					   uint8_t indirect)
921 {
922 	struct amdgpu_device *adev = vinst->adev;
923 	int inst_idx = vinst->inst;
924 	uint32_t reg_data = 0;
925 
926 	/* enable sw clock gating control */
927 	if (adev->cg_flags & AMD_CG_SUPPORT_VCN_MGCG)
928 		reg_data = 1 << UVD_CGC_CTRL__DYN_CLOCK_MODE__SHIFT;
929 	else
930 		reg_data = 0 << UVD_CGC_CTRL__DYN_CLOCK_MODE__SHIFT;
931 	reg_data |= 1 << UVD_CGC_CTRL__CLK_GATE_DLY_TIMER__SHIFT;
932 	reg_data |= 4 << UVD_CGC_CTRL__CLK_OFF_DELAY__SHIFT;
933 	reg_data &= ~(UVD_CGC_CTRL__UDEC_RE_MODE_MASK |
934 		 UVD_CGC_CTRL__UDEC_CM_MODE_MASK |
935 		 UVD_CGC_CTRL__UDEC_IT_MODE_MASK |
936 		 UVD_CGC_CTRL__UDEC_DB_MODE_MASK |
937 		 UVD_CGC_CTRL__UDEC_MP_MODE_MASK |
938 		 UVD_CGC_CTRL__SYS_MODE_MASK |
939 		 UVD_CGC_CTRL__UDEC_MODE_MASK |
940 		 UVD_CGC_CTRL__MPEG2_MODE_MASK |
941 		 UVD_CGC_CTRL__REGS_MODE_MASK |
942 		 UVD_CGC_CTRL__RBC_MODE_MASK |
943 		 UVD_CGC_CTRL__LMI_MC_MODE_MASK |
944 		 UVD_CGC_CTRL__LMI_UMC_MODE_MASK |
945 		 UVD_CGC_CTRL__IDCT_MODE_MASK |
946 		 UVD_CGC_CTRL__MPRD_MODE_MASK |
947 		 UVD_CGC_CTRL__MPC_MODE_MASK |
948 		 UVD_CGC_CTRL__LBSI_MODE_MASK |
949 		 UVD_CGC_CTRL__LRBBM_MODE_MASK |
950 		 UVD_CGC_CTRL__WCB_MODE_MASK |
951 		 UVD_CGC_CTRL__VCPU_MODE_MASK |
952 		 UVD_CGC_CTRL__MMSCH_MODE_MASK);
953 	WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
954 		VCN, inst_idx, mmUVD_CGC_CTRL), reg_data, sram_sel, indirect);
955 
956 	/* turn off clock gating */
957 	WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
958 		VCN, inst_idx, mmUVD_CGC_GATE), 0, sram_sel, indirect);
959 
960 	/* turn on SUVD clock gating */
961 	WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
962 		VCN, inst_idx, mmUVD_SUVD_CGC_GATE), 1, sram_sel, indirect);
963 
964 	/* turn on sw mode in UVD_SUVD_CGC_CTRL */
965 	WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
966 		VCN, inst_idx, mmUVD_SUVD_CGC_CTRL), 0, sram_sel, indirect);
967 }
968 
969 /**
970  * vcn_v3_0_enable_clock_gating - enable VCN clock gating
971  *
972  * @vinst: Pointer to the VCN instance structure
973  *
974  * Enable clock gating for VCN block
975  */
976 static void vcn_v3_0_enable_clock_gating(struct amdgpu_vcn_inst *vinst)
977 {
978 	struct amdgpu_device *adev = vinst->adev;
979 	int inst = vinst->inst;
980 	uint32_t data;
981 
982 	/* enable VCN CGC */
983 	data = RREG32_SOC15(VCN, inst, mmUVD_CGC_CTRL);
984 	if (adev->cg_flags & AMD_CG_SUPPORT_VCN_MGCG)
985 		data |= 1 << UVD_CGC_CTRL__DYN_CLOCK_MODE__SHIFT;
986 	else
987 		data |= 0 << UVD_CGC_CTRL__DYN_CLOCK_MODE__SHIFT;
988 	data |= 1 << UVD_CGC_CTRL__CLK_GATE_DLY_TIMER__SHIFT;
989 	data |= 4 << UVD_CGC_CTRL__CLK_OFF_DELAY__SHIFT;
990 	WREG32_SOC15(VCN, inst, mmUVD_CGC_CTRL, data);
991 
992 	data = RREG32_SOC15(VCN, inst, mmUVD_CGC_CTRL);
993 	data |= (UVD_CGC_CTRL__UDEC_RE_MODE_MASK
994 		| UVD_CGC_CTRL__UDEC_CM_MODE_MASK
995 		| UVD_CGC_CTRL__UDEC_IT_MODE_MASK
996 		| UVD_CGC_CTRL__UDEC_DB_MODE_MASK
997 		| UVD_CGC_CTRL__UDEC_MP_MODE_MASK
998 		| UVD_CGC_CTRL__SYS_MODE_MASK
999 		| UVD_CGC_CTRL__UDEC_MODE_MASK
1000 		| UVD_CGC_CTRL__MPEG2_MODE_MASK
1001 		| UVD_CGC_CTRL__REGS_MODE_MASK
1002 		| UVD_CGC_CTRL__RBC_MODE_MASK
1003 		| UVD_CGC_CTRL__LMI_MC_MODE_MASK
1004 		| UVD_CGC_CTRL__LMI_UMC_MODE_MASK
1005 		| UVD_CGC_CTRL__IDCT_MODE_MASK
1006 		| UVD_CGC_CTRL__MPRD_MODE_MASK
1007 		| UVD_CGC_CTRL__MPC_MODE_MASK
1008 		| UVD_CGC_CTRL__LBSI_MODE_MASK
1009 		| UVD_CGC_CTRL__LRBBM_MODE_MASK
1010 		| UVD_CGC_CTRL__WCB_MODE_MASK
1011 		| UVD_CGC_CTRL__VCPU_MODE_MASK
1012 		| UVD_CGC_CTRL__MMSCH_MODE_MASK);
1013 	WREG32_SOC15(VCN, inst, mmUVD_CGC_CTRL, data);
1014 
1015 	data = RREG32_SOC15(VCN, inst, mmUVD_SUVD_CGC_CTRL);
1016 	data |= (UVD_SUVD_CGC_CTRL__SRE_MODE_MASK
1017 		| UVD_SUVD_CGC_CTRL__SIT_MODE_MASK
1018 		| UVD_SUVD_CGC_CTRL__SMP_MODE_MASK
1019 		| UVD_SUVD_CGC_CTRL__SCM_MODE_MASK
1020 		| UVD_SUVD_CGC_CTRL__SDB_MODE_MASK
1021 		| UVD_SUVD_CGC_CTRL__SCLR_MODE_MASK
1022 		| UVD_SUVD_CGC_CTRL__ENT_MODE_MASK
1023 		| UVD_SUVD_CGC_CTRL__IME_MODE_MASK
1024 		| UVD_SUVD_CGC_CTRL__SITE_MODE_MASK
1025 		| UVD_SUVD_CGC_CTRL__EFC_MODE_MASK
1026 		| UVD_SUVD_CGC_CTRL__SAOE_MODE_MASK
1027 		| UVD_SUVD_CGC_CTRL__SMPA_MODE_MASK
1028 		| UVD_SUVD_CGC_CTRL__MPBE0_MODE_MASK
1029 		| UVD_SUVD_CGC_CTRL__MPBE1_MODE_MASK
1030 		| UVD_SUVD_CGC_CTRL__SIT_AV1_MODE_MASK
1031 		| UVD_SUVD_CGC_CTRL__SDB_AV1_MODE_MASK
1032 		| UVD_SUVD_CGC_CTRL__MPC1_MODE_MASK
1033 		| UVD_SUVD_CGC_CTRL__FBC_PCLK_MASK
1034 		| UVD_SUVD_CGC_CTRL__FBC_CCLK_MASK);
1035 	WREG32_SOC15(VCN, inst, mmUVD_SUVD_CGC_CTRL, data);
1036 }
1037 
1038 static int vcn_v3_0_start_dpg_mode(struct amdgpu_vcn_inst *vinst, bool indirect)
1039 {
1040 	struct amdgpu_device *adev = vinst->adev;
1041 	int inst_idx = vinst->inst;
1042 	volatile struct amdgpu_fw_shared *fw_shared = adev->vcn.inst[inst_idx].fw_shared.cpu_addr;
1043 	struct amdgpu_ring *ring;
1044 	uint32_t rb_bufsz, tmp;
1045 
1046 	/* disable register anti-hang mechanism */
1047 	WREG32_P(SOC15_REG_OFFSET(VCN, inst_idx, mmUVD_POWER_STATUS), 1,
1048 		~UVD_POWER_STATUS__UVD_POWER_STATUS_MASK);
1049 	/* enable dynamic power gating mode */
1050 	tmp = RREG32_SOC15(VCN, inst_idx, mmUVD_POWER_STATUS);
1051 	tmp |= UVD_POWER_STATUS__UVD_PG_MODE_MASK;
1052 	tmp |= UVD_POWER_STATUS__UVD_PG_EN_MASK;
1053 	WREG32_SOC15(VCN, inst_idx, mmUVD_POWER_STATUS, tmp);
1054 
1055 	if (indirect)
1056 		adev->vcn.inst[inst_idx].dpg_sram_curr_addr = (uint32_t *)adev->vcn.inst[inst_idx].dpg_sram_cpu_addr;
1057 
1058 	/* enable clock gating */
1059 	vcn_v3_0_clock_gating_dpg_mode(vinst, 0, indirect);
1060 
1061 	/* enable VCPU clock */
1062 	tmp = (0xFF << UVD_VCPU_CNTL__PRB_TIMEOUT_VAL__SHIFT);
1063 	tmp |= UVD_VCPU_CNTL__CLK_EN_MASK;
1064 	tmp |= UVD_VCPU_CNTL__BLK_RST_MASK;
1065 	WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
1066 		VCN, inst_idx, mmUVD_VCPU_CNTL), tmp, 0, indirect);
1067 
1068 	/* disable master interupt */
1069 	WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
1070 		VCN, inst_idx, mmUVD_MASTINT_EN), 0, 0, indirect);
1071 
1072 	/* setup mmUVD_LMI_CTRL */
1073 	tmp = (0x8 | UVD_LMI_CTRL__WRITE_CLEAN_TIMER_EN_MASK |
1074 		UVD_LMI_CTRL__REQ_MODE_MASK |
1075 		UVD_LMI_CTRL__CRC_RESET_MASK |
1076 		UVD_LMI_CTRL__MASK_MC_URGENT_MASK |
1077 		UVD_LMI_CTRL__DATA_COHERENCY_EN_MASK |
1078 		UVD_LMI_CTRL__VCPU_DATA_COHERENCY_EN_MASK |
1079 		(8 << UVD_LMI_CTRL__WRITE_CLEAN_TIMER__SHIFT) |
1080 		0x00100000L);
1081 	WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
1082 		VCN, inst_idx, mmUVD_LMI_CTRL), tmp, 0, indirect);
1083 
1084 	WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
1085 		VCN, inst_idx, mmUVD_MPC_CNTL),
1086 		0x2 << UVD_MPC_CNTL__REPLACEMENT_MODE__SHIFT, 0, indirect);
1087 
1088 	WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
1089 		VCN, inst_idx, mmUVD_MPC_SET_MUXA0),
1090 		((0x1 << UVD_MPC_SET_MUXA0__VARA_1__SHIFT) |
1091 		 (0x2 << UVD_MPC_SET_MUXA0__VARA_2__SHIFT) |
1092 		 (0x3 << UVD_MPC_SET_MUXA0__VARA_3__SHIFT) |
1093 		 (0x4 << UVD_MPC_SET_MUXA0__VARA_4__SHIFT)), 0, indirect);
1094 
1095 	WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
1096 		VCN, inst_idx, mmUVD_MPC_SET_MUXB0),
1097 		 ((0x1 << UVD_MPC_SET_MUXB0__VARB_1__SHIFT) |
1098 		 (0x2 << UVD_MPC_SET_MUXB0__VARB_2__SHIFT) |
1099 		 (0x3 << UVD_MPC_SET_MUXB0__VARB_3__SHIFT) |
1100 		 (0x4 << UVD_MPC_SET_MUXB0__VARB_4__SHIFT)), 0, indirect);
1101 
1102 	WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
1103 		VCN, inst_idx, mmUVD_MPC_SET_MUX),
1104 		((0x0 << UVD_MPC_SET_MUX__SET_0__SHIFT) |
1105 		 (0x1 << UVD_MPC_SET_MUX__SET_1__SHIFT) |
1106 		 (0x2 << UVD_MPC_SET_MUX__SET_2__SHIFT)), 0, indirect);
1107 
1108 	vcn_v3_0_mc_resume_dpg_mode(vinst, indirect);
1109 
1110 	WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
1111 		VCN, inst_idx, mmUVD_REG_XX_MASK), 0x10, 0, indirect);
1112 	WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
1113 		VCN, inst_idx, mmUVD_RBC_XX_IB_REG_CHECK), 0x3, 0, indirect);
1114 
1115 	/* enable LMI MC and UMC channels */
1116 	WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
1117 		VCN, inst_idx, mmUVD_LMI_CTRL2), 0, 0, indirect);
1118 
1119 	/* unblock VCPU register access */
1120 	WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
1121 		VCN, inst_idx, mmUVD_RB_ARB_CTRL), 0, 0, indirect);
1122 
1123 	tmp = (0xFF << UVD_VCPU_CNTL__PRB_TIMEOUT_VAL__SHIFT);
1124 	tmp |= UVD_VCPU_CNTL__CLK_EN_MASK;
1125 	WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
1126 		VCN, inst_idx, mmUVD_VCPU_CNTL), tmp, 0, indirect);
1127 
1128 	/* enable master interrupt */
1129 	WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
1130 		VCN, inst_idx, mmUVD_MASTINT_EN),
1131 		UVD_MASTINT_EN__VCPU_EN_MASK, 0, indirect);
1132 
1133 	/* add nop to workaround PSP size check */
1134 	WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
1135 		VCN, inst_idx, mmUVD_VCPU_CNTL), tmp, 0, indirect);
1136 
1137 	if (indirect)
1138 		amdgpu_vcn_psp_update_sram(adev, inst_idx, 0);
1139 
1140 	ring = &adev->vcn.inst[inst_idx].ring_dec;
1141 	/* force RBC into idle state */
1142 	rb_bufsz = order_base_2(ring->ring_size);
1143 	tmp = REG_SET_FIELD(0, UVD_RBC_RB_CNTL, RB_BUFSZ, rb_bufsz);
1144 	tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_BLKSZ, 1);
1145 	tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_FETCH, 1);
1146 	tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_UPDATE, 1);
1147 	tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_RPTR_WR_EN, 1);
1148 	WREG32_SOC15(VCN, inst_idx, mmUVD_RBC_RB_CNTL, tmp);
1149 
1150 	/* Stall DPG before WPTR/RPTR reset */
1151 	WREG32_P(SOC15_REG_OFFSET(VCN, inst_idx, mmUVD_POWER_STATUS),
1152 		UVD_POWER_STATUS__STALL_DPG_POWER_UP_MASK,
1153 		~UVD_POWER_STATUS__STALL_DPG_POWER_UP_MASK);
1154 	fw_shared->multi_queue.decode_queue_mode |= cpu_to_le32(FW_QUEUE_RING_RESET);
1155 
1156 	/* set the write pointer delay */
1157 	WREG32_SOC15(VCN, inst_idx, mmUVD_RBC_RB_WPTR_CNTL, 0);
1158 
1159 	/* set the wb address */
1160 	WREG32_SOC15(VCN, inst_idx, mmUVD_RBC_RB_RPTR_ADDR,
1161 		(upper_32_bits(ring->gpu_addr) >> 2));
1162 
1163 	/* programm the RB_BASE for ring buffer */
1164 	WREG32_SOC15(VCN, inst_idx, mmUVD_LMI_RBC_RB_64BIT_BAR_LOW,
1165 		lower_32_bits(ring->gpu_addr));
1166 	WREG32_SOC15(VCN, inst_idx, mmUVD_LMI_RBC_RB_64BIT_BAR_HIGH,
1167 		upper_32_bits(ring->gpu_addr));
1168 
1169 	/* Initialize the ring buffer's read and write pointers */
1170 	WREG32_SOC15(VCN, inst_idx, mmUVD_RBC_RB_RPTR, 0);
1171 
1172 	WREG32_SOC15(VCN, inst_idx, mmUVD_SCRATCH2, 0);
1173 
1174 	ring->wptr = RREG32_SOC15(VCN, inst_idx, mmUVD_RBC_RB_RPTR);
1175 	WREG32_SOC15(VCN, inst_idx, mmUVD_RBC_RB_WPTR,
1176 		lower_32_bits(ring->wptr));
1177 
1178 	/* Reset FW shared memory RBC WPTR/RPTR */
1179 	fw_shared->rb.rptr = 0;
1180 	fw_shared->rb.wptr = lower_32_bits(ring->wptr);
1181 
1182 	/*resetting done, fw can check RB ring */
1183 	fw_shared->multi_queue.decode_queue_mode &= cpu_to_le32(~FW_QUEUE_RING_RESET);
1184 
1185 	/* Unstall DPG */
1186 	WREG32_P(SOC15_REG_OFFSET(VCN, inst_idx, mmUVD_POWER_STATUS),
1187 		0, ~UVD_POWER_STATUS__STALL_DPG_POWER_UP_MASK);
1188 
1189 	/* Keeping one read-back to ensure all register writes are done,
1190 	 * otherwise it may introduce race conditions.
1191 	 */
1192 	RREG32_SOC15(VCN, inst_idx, mmUVD_STATUS);
1193 
1194 	return 0;
1195 }
1196 
1197 static int vcn_v3_0_start(struct amdgpu_vcn_inst *vinst)
1198 {
1199 	struct amdgpu_device *adev = vinst->adev;
1200 	int i = vinst->inst;
1201 	volatile struct amdgpu_fw_shared *fw_shared;
1202 	struct amdgpu_ring *ring;
1203 	uint32_t rb_bufsz, tmp;
1204 	int j, k, r;
1205 
1206 	if (adev->vcn.harvest_config & (1 << i))
1207 		return 0;
1208 
1209 	if (adev->pm.dpm_enabled)
1210 		amdgpu_dpm_enable_vcn(adev, true, i);
1211 
1212 	if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG)
1213 		return vcn_v3_0_start_dpg_mode(vinst, vinst->indirect_sram);
1214 
1215 	/* disable VCN power gating */
1216 	vcn_v3_0_disable_static_power_gating(vinst);
1217 
1218 	/* set VCN status busy */
1219 	tmp = RREG32_SOC15(VCN, i, mmUVD_STATUS) | UVD_STATUS__UVD_BUSY;
1220 	WREG32_SOC15(VCN, i, mmUVD_STATUS, tmp);
1221 
1222 	/* SW clock gating */
1223 	vcn_v3_0_disable_clock_gating(vinst);
1224 
1225 	/* enable VCPU clock */
1226 	WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_VCPU_CNTL),
1227 		 UVD_VCPU_CNTL__CLK_EN_MASK, ~UVD_VCPU_CNTL__CLK_EN_MASK);
1228 
1229 	/* disable master interrupt */
1230 	WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_MASTINT_EN), 0,
1231 		 ~UVD_MASTINT_EN__VCPU_EN_MASK);
1232 
1233 	/* enable LMI MC and UMC channels */
1234 	WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_LMI_CTRL2), 0,
1235 		 ~UVD_LMI_CTRL2__STALL_ARB_UMC_MASK);
1236 
1237 	tmp = RREG32_SOC15(VCN, i, mmUVD_SOFT_RESET);
1238 	tmp &= ~UVD_SOFT_RESET__LMI_SOFT_RESET_MASK;
1239 	tmp &= ~UVD_SOFT_RESET__LMI_UMC_SOFT_RESET_MASK;
1240 	WREG32_SOC15(VCN, i, mmUVD_SOFT_RESET, tmp);
1241 
1242 	/* setup mmUVD_LMI_CTRL */
1243 	tmp = RREG32_SOC15(VCN, i, mmUVD_LMI_CTRL);
1244 	WREG32_SOC15(VCN, i, mmUVD_LMI_CTRL, tmp |
1245 		     UVD_LMI_CTRL__WRITE_CLEAN_TIMER_EN_MASK	|
1246 		     UVD_LMI_CTRL__MASK_MC_URGENT_MASK |
1247 		     UVD_LMI_CTRL__DATA_COHERENCY_EN_MASK |
1248 		     UVD_LMI_CTRL__VCPU_DATA_COHERENCY_EN_MASK);
1249 
1250 	/* setup mmUVD_MPC_CNTL */
1251 	tmp = RREG32_SOC15(VCN, i, mmUVD_MPC_CNTL);
1252 	tmp &= ~UVD_MPC_CNTL__REPLACEMENT_MODE_MASK;
1253 	tmp |= 0x2 << UVD_MPC_CNTL__REPLACEMENT_MODE__SHIFT;
1254 	WREG32_SOC15(VCN, i, mmUVD_MPC_CNTL, tmp);
1255 
1256 	/* setup UVD_MPC_SET_MUXA0 */
1257 	WREG32_SOC15(VCN, i, mmUVD_MPC_SET_MUXA0,
1258 		     ((0x1 << UVD_MPC_SET_MUXA0__VARA_1__SHIFT) |
1259 		      (0x2 << UVD_MPC_SET_MUXA0__VARA_2__SHIFT) |
1260 		      (0x3 << UVD_MPC_SET_MUXA0__VARA_3__SHIFT) |
1261 		      (0x4 << UVD_MPC_SET_MUXA0__VARA_4__SHIFT)));
1262 
1263 	/* setup UVD_MPC_SET_MUXB0 */
1264 	WREG32_SOC15(VCN, i, mmUVD_MPC_SET_MUXB0,
1265 		     ((0x1 << UVD_MPC_SET_MUXB0__VARB_1__SHIFT) |
1266 		      (0x2 << UVD_MPC_SET_MUXB0__VARB_2__SHIFT) |
1267 		      (0x3 << UVD_MPC_SET_MUXB0__VARB_3__SHIFT) |
1268 		      (0x4 << UVD_MPC_SET_MUXB0__VARB_4__SHIFT)));
1269 
1270 	/* setup mmUVD_MPC_SET_MUX */
1271 	WREG32_SOC15(VCN, i, mmUVD_MPC_SET_MUX,
1272 		     ((0x0 << UVD_MPC_SET_MUX__SET_0__SHIFT) |
1273 		      (0x1 << UVD_MPC_SET_MUX__SET_1__SHIFT) |
1274 		      (0x2 << UVD_MPC_SET_MUX__SET_2__SHIFT)));
1275 
1276 	vcn_v3_0_mc_resume(vinst);
1277 
1278 	/* VCN global tiling registers */
1279 	WREG32_SOC15(VCN, i, mmUVD_GFX10_ADDR_CONFIG,
1280 		     adev->gfx.config.gb_addr_config);
1281 
1282 	/* unblock VCPU register access */
1283 	WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_RB_ARB_CTRL), 0,
1284 		 ~UVD_RB_ARB_CTRL__VCPU_DIS_MASK);
1285 
1286 	/* release VCPU reset to boot */
1287 	WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_VCPU_CNTL), 0,
1288 		 ~UVD_VCPU_CNTL__BLK_RST_MASK);
1289 
1290 	for (j = 0; j < 10; ++j) {
1291 		uint32_t status;
1292 
1293 		for (k = 0; k < 100; ++k) {
1294 			status = RREG32_SOC15(VCN, i, mmUVD_STATUS);
1295 			if (status & 2)
1296 				break;
1297 			mdelay(10);
1298 		}
1299 		r = 0;
1300 		if (status & 2)
1301 			break;
1302 
1303 		DRM_ERROR("VCN[%d] decode not responding, trying to reset the VCPU!!!\n", i);
1304 		WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_VCPU_CNTL),
1305 			 UVD_VCPU_CNTL__BLK_RST_MASK,
1306 			 ~UVD_VCPU_CNTL__BLK_RST_MASK);
1307 		mdelay(10);
1308 		WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_VCPU_CNTL), 0,
1309 			 ~UVD_VCPU_CNTL__BLK_RST_MASK);
1310 
1311 		mdelay(10);
1312 		r = -1;
1313 	}
1314 
1315 	if (r) {
1316 		DRM_ERROR("VCN[%d] decode not responding, giving up!!!\n", i);
1317 		return r;
1318 	}
1319 
1320 	/* enable master interrupt */
1321 	WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_MASTINT_EN),
1322 		 UVD_MASTINT_EN__VCPU_EN_MASK,
1323 		 ~UVD_MASTINT_EN__VCPU_EN_MASK);
1324 
1325 	/* clear the busy bit of VCN_STATUS */
1326 	WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_STATUS), 0,
1327 		 ~(2 << UVD_STATUS__VCPU_REPORT__SHIFT));
1328 
1329 	WREG32_SOC15(VCN, i, mmUVD_LMI_RBC_RB_VMID, 0);
1330 
1331 	ring = &adev->vcn.inst[i].ring_dec;
1332 	/* force RBC into idle state */
1333 	rb_bufsz = order_base_2(ring->ring_size);
1334 	tmp = REG_SET_FIELD(0, UVD_RBC_RB_CNTL, RB_BUFSZ, rb_bufsz);
1335 	tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_BLKSZ, 1);
1336 	tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_FETCH, 1);
1337 	tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_UPDATE, 1);
1338 	tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_RPTR_WR_EN, 1);
1339 	WREG32_SOC15(VCN, i, mmUVD_RBC_RB_CNTL, tmp);
1340 
1341 	fw_shared = adev->vcn.inst[i].fw_shared.cpu_addr;
1342 	fw_shared->multi_queue.decode_queue_mode |= cpu_to_le32(FW_QUEUE_RING_RESET);
1343 
1344 	/* programm the RB_BASE for ring buffer */
1345 	WREG32_SOC15(VCN, i, mmUVD_LMI_RBC_RB_64BIT_BAR_LOW,
1346 		     lower_32_bits(ring->gpu_addr));
1347 	WREG32_SOC15(VCN, i, mmUVD_LMI_RBC_RB_64BIT_BAR_HIGH,
1348 		     upper_32_bits(ring->gpu_addr));
1349 
1350 	/* Initialize the ring buffer's read and write pointers */
1351 	WREG32_SOC15(VCN, i, mmUVD_RBC_RB_RPTR, 0);
1352 
1353 	WREG32_SOC15(VCN, i, mmUVD_SCRATCH2, 0);
1354 	ring->wptr = RREG32_SOC15(VCN, i, mmUVD_RBC_RB_RPTR);
1355 	WREG32_SOC15(VCN, i, mmUVD_RBC_RB_WPTR,
1356 		     lower_32_bits(ring->wptr));
1357 	fw_shared->rb.wptr = lower_32_bits(ring->wptr);
1358 	fw_shared->multi_queue.decode_queue_mode &= cpu_to_le32(~FW_QUEUE_RING_RESET);
1359 
1360 	if (amdgpu_ip_version(adev, UVD_HWIP, 0) !=
1361 	    IP_VERSION(3, 0, 33)) {
1362 		fw_shared->multi_queue.encode_generalpurpose_queue_mode |= cpu_to_le32(FW_QUEUE_RING_RESET);
1363 		ring = &adev->vcn.inst[i].ring_enc[0];
1364 		WREG32_SOC15(VCN, i, mmUVD_RB_RPTR, lower_32_bits(ring->wptr));
1365 		WREG32_SOC15(VCN, i, mmUVD_RB_WPTR, lower_32_bits(ring->wptr));
1366 		WREG32_SOC15(VCN, i, mmUVD_RB_BASE_LO, ring->gpu_addr);
1367 		WREG32_SOC15(VCN, i, mmUVD_RB_BASE_HI, upper_32_bits(ring->gpu_addr));
1368 		WREG32_SOC15(VCN, i, mmUVD_RB_SIZE, ring->ring_size / 4);
1369 		fw_shared->multi_queue.encode_generalpurpose_queue_mode &= cpu_to_le32(~FW_QUEUE_RING_RESET);
1370 
1371 		fw_shared->multi_queue.encode_lowlatency_queue_mode |= cpu_to_le32(FW_QUEUE_RING_RESET);
1372 		ring = &adev->vcn.inst[i].ring_enc[1];
1373 		WREG32_SOC15(VCN, i, mmUVD_RB_RPTR2, lower_32_bits(ring->wptr));
1374 		WREG32_SOC15(VCN, i, mmUVD_RB_WPTR2, lower_32_bits(ring->wptr));
1375 		WREG32_SOC15(VCN, i, mmUVD_RB_BASE_LO2, ring->gpu_addr);
1376 		WREG32_SOC15(VCN, i, mmUVD_RB_BASE_HI2, upper_32_bits(ring->gpu_addr));
1377 		WREG32_SOC15(VCN, i, mmUVD_RB_SIZE2, ring->ring_size / 4);
1378 		fw_shared->multi_queue.encode_lowlatency_queue_mode &= cpu_to_le32(~FW_QUEUE_RING_RESET);
1379 	}
1380 
1381 	/* Keeping one read-back to ensure all register writes are done,
1382 	 * otherwise it may introduce race conditions.
1383 	 */
1384 	RREG32_SOC15(VCN, i, mmUVD_STATUS);
1385 
1386 	return 0;
1387 }
1388 
1389 static int vcn_v3_0_start_sriov(struct amdgpu_device *adev)
1390 {
1391 	int i, j;
1392 	struct amdgpu_ring *ring;
1393 	uint64_t cache_addr;
1394 	uint64_t rb_addr;
1395 	uint64_t ctx_addr;
1396 	uint32_t param, resp, expected;
1397 	uint32_t offset, cache_size;
1398 	uint32_t tmp, timeout;
1399 
1400 	struct amdgpu_mm_table *table = &adev->virt.mm_table;
1401 	uint32_t *table_loc;
1402 	uint32_t table_size;
1403 	uint32_t size, size_dw;
1404 
1405 	struct mmsch_v3_0_cmd_direct_write
1406 		direct_wt = { {0} };
1407 	struct mmsch_v3_0_cmd_direct_read_modify_write
1408 		direct_rd_mod_wt = { {0} };
1409 	struct mmsch_v3_0_cmd_end end = { {0} };
1410 	struct mmsch_v3_0_init_header header;
1411 
1412 	direct_wt.cmd_header.command_type =
1413 		MMSCH_COMMAND__DIRECT_REG_WRITE;
1414 	direct_rd_mod_wt.cmd_header.command_type =
1415 		MMSCH_COMMAND__DIRECT_REG_READ_MODIFY_WRITE;
1416 	end.cmd_header.command_type =
1417 		MMSCH_COMMAND__END;
1418 
1419 	header.version = MMSCH_VERSION;
1420 	header.total_size = sizeof(struct mmsch_v3_0_init_header) >> 2;
1421 	for (i = 0; i < MMSCH_V3_0_VCN_INSTANCES; i++) {
1422 		header.inst[i].init_status = 0;
1423 		header.inst[i].table_offset = 0;
1424 		header.inst[i].table_size = 0;
1425 	}
1426 
1427 	table_loc = (uint32_t *)table->cpu_addr;
1428 	table_loc += header.total_size;
1429 	for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
1430 		if (adev->vcn.harvest_config & (1 << i))
1431 			continue;
1432 
1433 		table_size = 0;
1434 
1435 		MMSCH_V3_0_INSERT_DIRECT_RD_MOD_WT(SOC15_REG_OFFSET(VCN, i,
1436 			mmUVD_STATUS),
1437 			~UVD_STATUS__UVD_BUSY, UVD_STATUS__UVD_BUSY);
1438 
1439 		cache_size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.inst[i].fw->size + 4);
1440 
1441 		if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
1442 			MMSCH_V3_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, i,
1443 				mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW),
1444 				adev->firmware.ucode[AMDGPU_UCODE_ID_VCN + i].tmr_mc_addr_lo);
1445 			MMSCH_V3_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, i,
1446 				mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH),
1447 				adev->firmware.ucode[AMDGPU_UCODE_ID_VCN + i].tmr_mc_addr_hi);
1448 			offset = 0;
1449 			MMSCH_V3_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, i,
1450 				mmUVD_VCPU_CACHE_OFFSET0),
1451 				0);
1452 		} else {
1453 			MMSCH_V3_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, i,
1454 				mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW),
1455 				lower_32_bits(adev->vcn.inst[i].gpu_addr));
1456 			MMSCH_V3_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, i,
1457 				mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH),
1458 				upper_32_bits(adev->vcn.inst[i].gpu_addr));
1459 			offset = cache_size;
1460 			MMSCH_V3_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, i,
1461 				mmUVD_VCPU_CACHE_OFFSET0),
1462 				AMDGPU_UVD_FIRMWARE_OFFSET >> 3);
1463 		}
1464 
1465 		MMSCH_V3_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, i,
1466 			mmUVD_VCPU_CACHE_SIZE0),
1467 			cache_size);
1468 
1469 		cache_addr = adev->vcn.inst[i].gpu_addr + offset;
1470 		MMSCH_V3_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, i,
1471 			mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW),
1472 			lower_32_bits(cache_addr));
1473 		MMSCH_V3_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, i,
1474 			mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH),
1475 			upper_32_bits(cache_addr));
1476 		MMSCH_V3_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, i,
1477 			mmUVD_VCPU_CACHE_OFFSET1),
1478 			0);
1479 		MMSCH_V3_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, i,
1480 			mmUVD_VCPU_CACHE_SIZE1),
1481 			AMDGPU_VCN_STACK_SIZE);
1482 
1483 		cache_addr = adev->vcn.inst[i].gpu_addr + offset +
1484 			AMDGPU_VCN_STACK_SIZE;
1485 		MMSCH_V3_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, i,
1486 			mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW),
1487 			lower_32_bits(cache_addr));
1488 		MMSCH_V3_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, i,
1489 			mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH),
1490 			upper_32_bits(cache_addr));
1491 		MMSCH_V3_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, i,
1492 			mmUVD_VCPU_CACHE_OFFSET2),
1493 			0);
1494 		MMSCH_V3_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, i,
1495 			mmUVD_VCPU_CACHE_SIZE2),
1496 			AMDGPU_VCN_CONTEXT_SIZE);
1497 
1498 		for (j = 0; j < adev->vcn.inst[i].num_enc_rings; ++j) {
1499 			ring = &adev->vcn.inst[i].ring_enc[j];
1500 			ring->wptr = 0;
1501 			rb_addr = ring->gpu_addr;
1502 			MMSCH_V3_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, i,
1503 				mmUVD_RB_BASE_LO),
1504 				lower_32_bits(rb_addr));
1505 			MMSCH_V3_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, i,
1506 				mmUVD_RB_BASE_HI),
1507 				upper_32_bits(rb_addr));
1508 			MMSCH_V3_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, i,
1509 				mmUVD_RB_SIZE),
1510 				ring->ring_size / 4);
1511 		}
1512 
1513 		ring = &adev->vcn.inst[i].ring_dec;
1514 		ring->wptr = 0;
1515 		rb_addr = ring->gpu_addr;
1516 		MMSCH_V3_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, i,
1517 			mmUVD_LMI_RBC_RB_64BIT_BAR_LOW),
1518 			lower_32_bits(rb_addr));
1519 		MMSCH_V3_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, i,
1520 			mmUVD_LMI_RBC_RB_64BIT_BAR_HIGH),
1521 			upper_32_bits(rb_addr));
1522 		/* force RBC into idle state */
1523 		tmp = order_base_2(ring->ring_size);
1524 		tmp = REG_SET_FIELD(0, UVD_RBC_RB_CNTL, RB_BUFSZ, tmp);
1525 		tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_BLKSZ, 1);
1526 		tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_FETCH, 1);
1527 		tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_UPDATE, 1);
1528 		tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_RPTR_WR_EN, 1);
1529 		MMSCH_V3_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, i,
1530 			mmUVD_RBC_RB_CNTL),
1531 			tmp);
1532 
1533 		/* add end packet */
1534 		MMSCH_V3_0_INSERT_END();
1535 
1536 		/* refine header */
1537 		header.inst[i].init_status = 0;
1538 		header.inst[i].table_offset = header.total_size;
1539 		header.inst[i].table_size = table_size;
1540 		header.total_size += table_size;
1541 	}
1542 
1543 	/* Update init table header in memory */
1544 	size = sizeof(struct mmsch_v3_0_init_header);
1545 	table_loc = (uint32_t *)table->cpu_addr;
1546 	memcpy((void *)table_loc, &header, size);
1547 
1548 	/* message MMSCH (in VCN[0]) to initialize this client
1549 	 * 1, write to mmsch_vf_ctx_addr_lo/hi register with GPU mc addr
1550 	 * of memory descriptor location
1551 	 */
1552 	ctx_addr = table->gpu_addr;
1553 	WREG32_SOC15(VCN, 0, mmMMSCH_VF_CTX_ADDR_LO, lower_32_bits(ctx_addr));
1554 	WREG32_SOC15(VCN, 0, mmMMSCH_VF_CTX_ADDR_HI, upper_32_bits(ctx_addr));
1555 
1556 	/* 2, update vmid of descriptor */
1557 	tmp = RREG32_SOC15(VCN, 0, mmMMSCH_VF_VMID);
1558 	tmp &= ~MMSCH_VF_VMID__VF_CTX_VMID_MASK;
1559 	/* use domain0 for MM scheduler */
1560 	tmp |= (0 << MMSCH_VF_VMID__VF_CTX_VMID__SHIFT);
1561 	WREG32_SOC15(VCN, 0, mmMMSCH_VF_VMID, tmp);
1562 
1563 	/* 3, notify mmsch about the size of this descriptor */
1564 	size = header.total_size;
1565 	WREG32_SOC15(VCN, 0, mmMMSCH_VF_CTX_SIZE, size);
1566 
1567 	/* 4, set resp to zero */
1568 	WREG32_SOC15(VCN, 0, mmMMSCH_VF_MAILBOX_RESP, 0);
1569 
1570 	/* 5, kick off the initialization and wait until
1571 	 * MMSCH_VF_MAILBOX_RESP becomes non-zero
1572 	 */
1573 	param = 0x10000001;
1574 	WREG32_SOC15(VCN, 0, mmMMSCH_VF_MAILBOX_HOST, param);
1575 	tmp = 0;
1576 	timeout = 1000;
1577 	resp = 0;
1578 	expected = param + 1;
1579 	while (resp != expected) {
1580 		resp = RREG32_SOC15(VCN, 0, mmMMSCH_VF_MAILBOX_RESP);
1581 		if (resp == expected)
1582 			break;
1583 
1584 		udelay(10);
1585 		tmp = tmp + 10;
1586 		if (tmp >= timeout) {
1587 			DRM_ERROR("failed to init MMSCH. TIME-OUT after %d usec"\
1588 				" waiting for mmMMSCH_VF_MAILBOX_RESP "\
1589 				"(expected=0x%08x, readback=0x%08x)\n",
1590 				tmp, expected, resp);
1591 			return -EBUSY;
1592 		}
1593 	}
1594 
1595 	return 0;
1596 }
1597 
1598 static int vcn_v3_0_stop_dpg_mode(struct amdgpu_vcn_inst *vinst)
1599 {
1600 	struct amdgpu_device *adev = vinst->adev;
1601 	int inst_idx = vinst->inst;
1602 	struct dpg_pause_state state = {.fw_based = VCN_DPG_STATE__UNPAUSE};
1603 	uint32_t tmp;
1604 
1605 	vcn_v3_0_pause_dpg_mode(vinst, &state);
1606 
1607 	/* Wait for power status to be 1 */
1608 	SOC15_WAIT_ON_RREG(VCN, inst_idx, mmUVD_POWER_STATUS, 1,
1609 		UVD_POWER_STATUS__UVD_POWER_STATUS_MASK);
1610 
1611 	/* wait for read ptr to be equal to write ptr */
1612 	tmp = RREG32_SOC15(VCN, inst_idx, mmUVD_RB_WPTR);
1613 	SOC15_WAIT_ON_RREG(VCN, inst_idx, mmUVD_RB_RPTR, tmp, 0xFFFFFFFF);
1614 
1615 	tmp = RREG32_SOC15(VCN, inst_idx, mmUVD_RB_WPTR2);
1616 	SOC15_WAIT_ON_RREG(VCN, inst_idx, mmUVD_RB_RPTR2, tmp, 0xFFFFFFFF);
1617 
1618 	tmp = RREG32_SOC15(VCN, inst_idx, mmUVD_RBC_RB_WPTR) & 0x7FFFFFFF;
1619 	SOC15_WAIT_ON_RREG(VCN, inst_idx, mmUVD_RBC_RB_RPTR, tmp, 0xFFFFFFFF);
1620 
1621 	SOC15_WAIT_ON_RREG(VCN, inst_idx, mmUVD_POWER_STATUS, 1,
1622 		UVD_POWER_STATUS__UVD_POWER_STATUS_MASK);
1623 
1624 	/* disable dynamic power gating mode */
1625 	WREG32_P(SOC15_REG_OFFSET(VCN, inst_idx, mmUVD_POWER_STATUS), 0,
1626 		~UVD_POWER_STATUS__UVD_PG_MODE_MASK);
1627 
1628 	/* Keeping one read-back to ensure all register writes are done,
1629 	 * otherwise it may introduce race conditions.
1630 	 */
1631 	RREG32_SOC15(VCN, inst_idx, mmUVD_STATUS);
1632 
1633 	return 0;
1634 }
1635 
1636 static int vcn_v3_0_stop(struct amdgpu_vcn_inst *vinst)
1637 {
1638 	struct amdgpu_device *adev = vinst->adev;
1639 	int i = vinst->inst;
1640 	uint32_t tmp;
1641 	int r = 0;
1642 
1643 	if (adev->vcn.harvest_config & (1 << i))
1644 		return 0;
1645 
1646 	if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) {
1647 		r = vcn_v3_0_stop_dpg_mode(vinst);
1648 		goto done;
1649 	}
1650 
1651 	/* wait for vcn idle */
1652 	r = SOC15_WAIT_ON_RREG(VCN, i, mmUVD_STATUS, UVD_STATUS__IDLE, 0x7);
1653 	if (r)
1654 		goto done;
1655 
1656 	tmp = UVD_LMI_STATUS__VCPU_LMI_WRITE_CLEAN_MASK |
1657 		UVD_LMI_STATUS__READ_CLEAN_MASK |
1658 		UVD_LMI_STATUS__WRITE_CLEAN_MASK |
1659 		UVD_LMI_STATUS__WRITE_CLEAN_RAW_MASK;
1660 	r = SOC15_WAIT_ON_RREG(VCN, i, mmUVD_LMI_STATUS, tmp, tmp);
1661 	if (r)
1662 		goto done;
1663 
1664 	/* disable LMI UMC channel */
1665 	tmp = RREG32_SOC15(VCN, i, mmUVD_LMI_CTRL2);
1666 	tmp |= UVD_LMI_CTRL2__STALL_ARB_UMC_MASK;
1667 	WREG32_SOC15(VCN, i, mmUVD_LMI_CTRL2, tmp);
1668 	tmp = UVD_LMI_STATUS__UMC_READ_CLEAN_RAW_MASK|
1669 		UVD_LMI_STATUS__UMC_WRITE_CLEAN_RAW_MASK;
1670 	r = SOC15_WAIT_ON_RREG(VCN, i, mmUVD_LMI_STATUS, tmp, tmp);
1671 	if (r)
1672 		goto done;
1673 
1674 	/* block VCPU register access */
1675 	WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_RB_ARB_CTRL),
1676 		 UVD_RB_ARB_CTRL__VCPU_DIS_MASK,
1677 		 ~UVD_RB_ARB_CTRL__VCPU_DIS_MASK);
1678 
1679 	/* reset VCPU */
1680 	WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_VCPU_CNTL),
1681 		 UVD_VCPU_CNTL__BLK_RST_MASK,
1682 		 ~UVD_VCPU_CNTL__BLK_RST_MASK);
1683 
1684 	/* disable VCPU clock */
1685 	WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_VCPU_CNTL), 0,
1686 		 ~(UVD_VCPU_CNTL__CLK_EN_MASK));
1687 
1688 	/* apply soft reset */
1689 	tmp = RREG32_SOC15(VCN, i, mmUVD_SOFT_RESET);
1690 	tmp |= UVD_SOFT_RESET__LMI_UMC_SOFT_RESET_MASK;
1691 	WREG32_SOC15(VCN, i, mmUVD_SOFT_RESET, tmp);
1692 	tmp = RREG32_SOC15(VCN, i, mmUVD_SOFT_RESET);
1693 	tmp |= UVD_SOFT_RESET__LMI_SOFT_RESET_MASK;
1694 	WREG32_SOC15(VCN, i, mmUVD_SOFT_RESET, tmp);
1695 
1696 	/* clear status */
1697 	WREG32_SOC15(VCN, i, mmUVD_STATUS, 0);
1698 
1699 	/* apply HW clock gating */
1700 	vcn_v3_0_enable_clock_gating(vinst);
1701 
1702 	/* enable VCN power gating */
1703 	vcn_v3_0_enable_static_power_gating(vinst);
1704 
1705 	/* Keeping one read-back to ensure all register writes are done,
1706 	 * otherwise it may introduce race conditions.
1707 	 */
1708 	RREG32_SOC15(VCN, i, mmUVD_STATUS);
1709 
1710 done:
1711 	if (adev->pm.dpm_enabled)
1712 		amdgpu_dpm_enable_vcn(adev, false, i);
1713 
1714 	return r;
1715 }
1716 
1717 static int vcn_v3_0_pause_dpg_mode(struct amdgpu_vcn_inst *vinst,
1718 				   struct dpg_pause_state *new_state)
1719 {
1720 	struct amdgpu_device *adev = vinst->adev;
1721 	int inst_idx = vinst->inst;
1722 	volatile struct amdgpu_fw_shared *fw_shared;
1723 	struct amdgpu_ring *ring;
1724 	uint32_t reg_data = 0;
1725 	int ret_code;
1726 
1727 	/* pause/unpause if state is changed */
1728 	if (adev->vcn.inst[inst_idx].pause_state.fw_based != new_state->fw_based) {
1729 		DRM_DEBUG("dpg pause state changed %d -> %d",
1730 			adev->vcn.inst[inst_idx].pause_state.fw_based,	new_state->fw_based);
1731 		reg_data = RREG32_SOC15(VCN, inst_idx, mmUVD_DPG_PAUSE) &
1732 			(~UVD_DPG_PAUSE__NJ_PAUSE_DPG_ACK_MASK);
1733 
1734 		if (new_state->fw_based == VCN_DPG_STATE__PAUSE) {
1735 			ret_code = SOC15_WAIT_ON_RREG(VCN, inst_idx, mmUVD_POWER_STATUS, 0x1,
1736 				UVD_POWER_STATUS__UVD_POWER_STATUS_MASK);
1737 
1738 			if (!ret_code) {
1739 				/* pause DPG */
1740 				reg_data |= UVD_DPG_PAUSE__NJ_PAUSE_DPG_REQ_MASK;
1741 				WREG32_SOC15(VCN, inst_idx, mmUVD_DPG_PAUSE, reg_data);
1742 
1743 				/* wait for ACK */
1744 				SOC15_WAIT_ON_RREG(VCN, inst_idx, mmUVD_DPG_PAUSE,
1745 					UVD_DPG_PAUSE__NJ_PAUSE_DPG_ACK_MASK,
1746 					UVD_DPG_PAUSE__NJ_PAUSE_DPG_ACK_MASK);
1747 
1748 				/* Stall DPG before WPTR/RPTR reset */
1749 				WREG32_P(SOC15_REG_OFFSET(VCN, inst_idx, mmUVD_POWER_STATUS),
1750 					UVD_POWER_STATUS__STALL_DPG_POWER_UP_MASK,
1751 					~UVD_POWER_STATUS__STALL_DPG_POWER_UP_MASK);
1752 
1753 				if (amdgpu_ip_version(adev, UVD_HWIP, 0) !=
1754 				    IP_VERSION(3, 0, 33)) {
1755 					/* Restore */
1756 					fw_shared = adev->vcn.inst[inst_idx].fw_shared.cpu_addr;
1757 					fw_shared->multi_queue.encode_generalpurpose_queue_mode |= cpu_to_le32(FW_QUEUE_RING_RESET);
1758 					ring = &adev->vcn.inst[inst_idx].ring_enc[0];
1759 					ring->wptr = 0;
1760 					WREG32_SOC15(VCN, inst_idx, mmUVD_RB_BASE_LO, ring->gpu_addr);
1761 					WREG32_SOC15(VCN, inst_idx, mmUVD_RB_BASE_HI, upper_32_bits(ring->gpu_addr));
1762 					WREG32_SOC15(VCN, inst_idx, mmUVD_RB_SIZE, ring->ring_size / 4);
1763 					WREG32_SOC15(VCN, inst_idx, mmUVD_RB_RPTR, lower_32_bits(ring->wptr));
1764 					WREG32_SOC15(VCN, inst_idx, mmUVD_RB_WPTR, lower_32_bits(ring->wptr));
1765 					fw_shared->multi_queue.encode_generalpurpose_queue_mode &= cpu_to_le32(~FW_QUEUE_RING_RESET);
1766 
1767 					fw_shared->multi_queue.encode_lowlatency_queue_mode |= cpu_to_le32(FW_QUEUE_RING_RESET);
1768 					ring = &adev->vcn.inst[inst_idx].ring_enc[1];
1769 					ring->wptr = 0;
1770 					WREG32_SOC15(VCN, inst_idx, mmUVD_RB_BASE_LO2, ring->gpu_addr);
1771 					WREG32_SOC15(VCN, inst_idx, mmUVD_RB_BASE_HI2, upper_32_bits(ring->gpu_addr));
1772 					WREG32_SOC15(VCN, inst_idx, mmUVD_RB_SIZE2, ring->ring_size / 4);
1773 					WREG32_SOC15(VCN, inst_idx, mmUVD_RB_RPTR2, lower_32_bits(ring->wptr));
1774 					WREG32_SOC15(VCN, inst_idx, mmUVD_RB_WPTR2, lower_32_bits(ring->wptr));
1775 					fw_shared->multi_queue.encode_lowlatency_queue_mode &= cpu_to_le32(~FW_QUEUE_RING_RESET);
1776 
1777 					/* restore wptr/rptr with pointers saved in FW shared memory*/
1778 					WREG32_SOC15(VCN, inst_idx, mmUVD_RBC_RB_RPTR, fw_shared->rb.rptr);
1779 					WREG32_SOC15(VCN, inst_idx, mmUVD_RBC_RB_WPTR, fw_shared->rb.wptr);
1780 				}
1781 
1782 				/* Unstall DPG */
1783 				WREG32_P(SOC15_REG_OFFSET(VCN, inst_idx, mmUVD_POWER_STATUS),
1784 					0, ~UVD_POWER_STATUS__STALL_DPG_POWER_UP_MASK);
1785 
1786 				SOC15_WAIT_ON_RREG(VCN, inst_idx, mmUVD_POWER_STATUS,
1787 					UVD_PGFSM_CONFIG__UVDM_UVDU_PWR_ON, UVD_POWER_STATUS__UVD_POWER_STATUS_MASK);
1788 			}
1789 		} else {
1790 			/* unpause dpg, no need to wait */
1791 			reg_data &= ~UVD_DPG_PAUSE__NJ_PAUSE_DPG_REQ_MASK;
1792 			WREG32_SOC15(VCN, inst_idx, mmUVD_DPG_PAUSE, reg_data);
1793 		}
1794 		adev->vcn.inst[inst_idx].pause_state.fw_based = new_state->fw_based;
1795 	}
1796 
1797 	return 0;
1798 }
1799 
1800 /**
1801  * vcn_v3_0_dec_ring_get_rptr - get read pointer
1802  *
1803  * @ring: amdgpu_ring pointer
1804  *
1805  * Returns the current hardware read pointer
1806  */
1807 static uint64_t vcn_v3_0_dec_ring_get_rptr(struct amdgpu_ring *ring)
1808 {
1809 	struct amdgpu_device *adev = ring->adev;
1810 
1811 	return RREG32_SOC15(VCN, ring->me, mmUVD_RBC_RB_RPTR);
1812 }
1813 
1814 /**
1815  * vcn_v3_0_dec_ring_get_wptr - get write pointer
1816  *
1817  * @ring: amdgpu_ring pointer
1818  *
1819  * Returns the current hardware write pointer
1820  */
1821 static uint64_t vcn_v3_0_dec_ring_get_wptr(struct amdgpu_ring *ring)
1822 {
1823 	struct amdgpu_device *adev = ring->adev;
1824 
1825 	if (ring->use_doorbell)
1826 		return *ring->wptr_cpu_addr;
1827 	else
1828 		return RREG32_SOC15(VCN, ring->me, mmUVD_RBC_RB_WPTR);
1829 }
1830 
1831 /**
1832  * vcn_v3_0_dec_ring_set_wptr - set write pointer
1833  *
1834  * @ring: amdgpu_ring pointer
1835  *
1836  * Commits the write pointer to the hardware
1837  */
1838 static void vcn_v3_0_dec_ring_set_wptr(struct amdgpu_ring *ring)
1839 {
1840 	struct amdgpu_device *adev = ring->adev;
1841 	volatile struct amdgpu_fw_shared *fw_shared;
1842 
1843 	if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) {
1844 		/*whenever update RBC_RB_WPTR, we save the wptr in shared rb.wptr and scratch2 */
1845 		fw_shared = adev->vcn.inst[ring->me].fw_shared.cpu_addr;
1846 		fw_shared->rb.wptr = lower_32_bits(ring->wptr);
1847 		WREG32_SOC15(VCN, ring->me, mmUVD_SCRATCH2,
1848 			lower_32_bits(ring->wptr));
1849 	}
1850 
1851 	if (ring->use_doorbell) {
1852 		*ring->wptr_cpu_addr = lower_32_bits(ring->wptr);
1853 		WDOORBELL32(ring->doorbell_index, lower_32_bits(ring->wptr));
1854 	} else {
1855 		WREG32_SOC15(VCN, ring->me, mmUVD_RBC_RB_WPTR, lower_32_bits(ring->wptr));
1856 	}
1857 }
1858 
1859 static const struct amdgpu_ring_funcs vcn_v3_0_dec_sw_ring_vm_funcs = {
1860 	.type = AMDGPU_RING_TYPE_VCN_DEC,
1861 	.align_mask = 0x3f,
1862 	.nop = VCN_DEC_SW_CMD_NO_OP,
1863 	.secure_submission_supported = true,
1864 	.get_rptr = vcn_v3_0_dec_ring_get_rptr,
1865 	.get_wptr = vcn_v3_0_dec_ring_get_wptr,
1866 	.set_wptr = vcn_v3_0_dec_ring_set_wptr,
1867 	.emit_frame_size =
1868 		SOC15_FLUSH_GPU_TLB_NUM_WREG * 3 +
1869 		SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 4 +
1870 		VCN_SW_RING_EMIT_FRAME_SIZE,
1871 	.emit_ib_size = 5, /* vcn_dec_sw_ring_emit_ib */
1872 	.emit_ib = vcn_dec_sw_ring_emit_ib,
1873 	.emit_fence = vcn_dec_sw_ring_emit_fence,
1874 	.emit_vm_flush = vcn_dec_sw_ring_emit_vm_flush,
1875 	.test_ring = amdgpu_vcn_dec_sw_ring_test_ring,
1876 	.test_ib = NULL,//amdgpu_vcn_dec_sw_ring_test_ib,
1877 	.insert_nop = amdgpu_ring_insert_nop,
1878 	.insert_end = vcn_dec_sw_ring_insert_end,
1879 	.pad_ib = amdgpu_ring_generic_pad_ib,
1880 	.begin_use = amdgpu_vcn_ring_begin_use,
1881 	.end_use = amdgpu_vcn_ring_end_use,
1882 	.emit_wreg = vcn_dec_sw_ring_emit_wreg,
1883 	.emit_reg_wait = vcn_dec_sw_ring_emit_reg_wait,
1884 	.emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper,
1885 };
1886 
1887 static int vcn_v3_0_limit_sched(struct amdgpu_cs_parser *p,
1888 				struct amdgpu_job *job)
1889 {
1890 	struct drm_gpu_scheduler **scheds;
1891 	struct dma_fence *fence;
1892 
1893 	/* if VCN0 is harvested, we can't support AV1 */
1894 	if (p->adev->vcn.harvest_config & AMDGPU_VCN_HARVEST_VCN0)
1895 		return -EINVAL;
1896 
1897 	/* wait for all jobs to finish before switching to instance 0 */
1898 	fence = amdgpu_ctx_get_fence(p->ctx, job->base.entity, ~0ull);
1899 	if (fence) {
1900 		dma_fence_wait(fence, false);
1901 		dma_fence_put(fence);
1902 	}
1903 
1904 	scheds = p->adev->gpu_sched[AMDGPU_HW_IP_VCN_DEC]
1905 		[AMDGPU_RING_PRIO_DEFAULT].sched;
1906 	drm_sched_entity_modify_sched(job->base.entity, scheds, 1);
1907 	return 0;
1908 }
1909 
1910 static int vcn_v3_0_dec_msg(struct amdgpu_cs_parser *p, struct amdgpu_job *job,
1911 			    uint64_t addr)
1912 {
1913 	struct ttm_operation_ctx ctx = { false, false };
1914 	struct amdgpu_bo_va_mapping *map;
1915 	uint32_t *msg, num_buffers;
1916 	struct amdgpu_bo *bo;
1917 	uint64_t start, end;
1918 	unsigned int i;
1919 	void *ptr;
1920 	int r;
1921 
1922 	addr &= AMDGPU_GMC_HOLE_MASK;
1923 	r = amdgpu_cs_find_mapping(p, addr, &bo, &map);
1924 	if (r) {
1925 		DRM_ERROR("Can't find BO for addr 0x%08Lx\n", addr);
1926 		return r;
1927 	}
1928 
1929 	start = map->start * AMDGPU_GPU_PAGE_SIZE;
1930 	end = (map->last + 1) * AMDGPU_GPU_PAGE_SIZE;
1931 	if (addr & 0x7) {
1932 		DRM_ERROR("VCN messages must be 8 byte aligned!\n");
1933 		return -EINVAL;
1934 	}
1935 
1936 	bo->flags |= AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
1937 	amdgpu_bo_placement_from_domain(bo, bo->allowed_domains);
1938 	r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
1939 	if (r) {
1940 		DRM_ERROR("Failed validating the VCN message BO (%d)!\n", r);
1941 		return r;
1942 	}
1943 
1944 	r = amdgpu_bo_kmap(bo, &ptr);
1945 	if (r) {
1946 		DRM_ERROR("Failed mapping the VCN message (%d)!\n", r);
1947 		return r;
1948 	}
1949 
1950 	msg = ptr + addr - start;
1951 
1952 	/* Check length */
1953 	if (msg[1] > end - addr) {
1954 		r = -EINVAL;
1955 		goto out;
1956 	}
1957 
1958 	if (msg[3] != RDECODE_MSG_CREATE)
1959 		goto out;
1960 
1961 	num_buffers = msg[2];
1962 	for (i = 0, msg = &msg[6]; i < num_buffers; ++i, msg += 4) {
1963 		uint32_t offset, size, *create;
1964 
1965 		if (msg[0] != RDECODE_MESSAGE_CREATE)
1966 			continue;
1967 
1968 		offset = msg[1];
1969 		size = msg[2];
1970 
1971 		if (offset + size > end) {
1972 			r = -EINVAL;
1973 			goto out;
1974 		}
1975 
1976 		create = ptr + addr + offset - start;
1977 
1978 		/* H246, HEVC and VP9 can run on any instance */
1979 		if (create[0] == 0x7 || create[0] == 0x10 || create[0] == 0x11)
1980 			continue;
1981 
1982 		r = vcn_v3_0_limit_sched(p, job);
1983 		if (r)
1984 			goto out;
1985 	}
1986 
1987 out:
1988 	amdgpu_bo_kunmap(bo);
1989 	return r;
1990 }
1991 
1992 static int vcn_v3_0_ring_patch_cs_in_place(struct amdgpu_cs_parser *p,
1993 					   struct amdgpu_job *job,
1994 					   struct amdgpu_ib *ib)
1995 {
1996 	struct amdgpu_ring *ring = amdgpu_job_ring(job);
1997 	uint32_t msg_lo = 0, msg_hi = 0;
1998 	unsigned i;
1999 	int r;
2000 
2001 	/* The first instance can decode anything */
2002 	if (!ring->me)
2003 		return 0;
2004 
2005 	for (i = 0; i < ib->length_dw; i += 2) {
2006 		uint32_t reg = amdgpu_ib_get_value(ib, i);
2007 		uint32_t val = amdgpu_ib_get_value(ib, i + 1);
2008 
2009 		if (reg == PACKET0(p->adev->vcn.inst[ring->me].internal.data0, 0)) {
2010 			msg_lo = val;
2011 		} else if (reg == PACKET0(p->adev->vcn.inst[ring->me].internal.data1, 0)) {
2012 			msg_hi = val;
2013 		} else if (reg == PACKET0(p->adev->vcn.inst[ring->me].internal.cmd, 0) &&
2014 			   val == 0) {
2015 			r = vcn_v3_0_dec_msg(p, job,
2016 					     ((u64)msg_hi) << 32 | msg_lo);
2017 			if (r)
2018 				return r;
2019 		}
2020 	}
2021 	return 0;
2022 }
2023 
2024 static const struct amdgpu_ring_funcs vcn_v3_0_dec_ring_vm_funcs = {
2025 	.type = AMDGPU_RING_TYPE_VCN_DEC,
2026 	.align_mask = 0xf,
2027 	.secure_submission_supported = true,
2028 	.get_rptr = vcn_v3_0_dec_ring_get_rptr,
2029 	.get_wptr = vcn_v3_0_dec_ring_get_wptr,
2030 	.set_wptr = vcn_v3_0_dec_ring_set_wptr,
2031 	.patch_cs_in_place = vcn_v3_0_ring_patch_cs_in_place,
2032 	.emit_frame_size =
2033 		SOC15_FLUSH_GPU_TLB_NUM_WREG * 6 +
2034 		SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 8 +
2035 		8 + /* vcn_v2_0_dec_ring_emit_vm_flush */
2036 		14 + 14 + /* vcn_v2_0_dec_ring_emit_fence x2 vm fence */
2037 		6,
2038 	.emit_ib_size = 8, /* vcn_v2_0_dec_ring_emit_ib */
2039 	.emit_ib = vcn_v2_0_dec_ring_emit_ib,
2040 	.emit_fence = vcn_v2_0_dec_ring_emit_fence,
2041 	.emit_vm_flush = vcn_v2_0_dec_ring_emit_vm_flush,
2042 	.test_ring = vcn_v2_0_dec_ring_test_ring,
2043 	.test_ib = amdgpu_vcn_dec_ring_test_ib,
2044 	.insert_nop = vcn_v2_0_dec_ring_insert_nop,
2045 	.insert_start = vcn_v2_0_dec_ring_insert_start,
2046 	.insert_end = vcn_v2_0_dec_ring_insert_end,
2047 	.pad_ib = amdgpu_ring_generic_pad_ib,
2048 	.begin_use = amdgpu_vcn_ring_begin_use,
2049 	.end_use = amdgpu_vcn_ring_end_use,
2050 	.emit_wreg = vcn_v2_0_dec_ring_emit_wreg,
2051 	.emit_reg_wait = vcn_v2_0_dec_ring_emit_reg_wait,
2052 	.emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper,
2053 	.reset = amdgpu_vcn_ring_reset,
2054 };
2055 
2056 /**
2057  * vcn_v3_0_enc_ring_get_rptr - get enc read pointer
2058  *
2059  * @ring: amdgpu_ring pointer
2060  *
2061  * Returns the current hardware enc read pointer
2062  */
2063 static uint64_t vcn_v3_0_enc_ring_get_rptr(struct amdgpu_ring *ring)
2064 {
2065 	struct amdgpu_device *adev = ring->adev;
2066 
2067 	if (ring == &adev->vcn.inst[ring->me].ring_enc[0])
2068 		return RREG32_SOC15(VCN, ring->me, mmUVD_RB_RPTR);
2069 	else
2070 		return RREG32_SOC15(VCN, ring->me, mmUVD_RB_RPTR2);
2071 }
2072 
2073 /**
2074  * vcn_v3_0_enc_ring_get_wptr - get enc write pointer
2075  *
2076  * @ring: amdgpu_ring pointer
2077  *
2078  * Returns the current hardware enc write pointer
2079  */
2080 static uint64_t vcn_v3_0_enc_ring_get_wptr(struct amdgpu_ring *ring)
2081 {
2082 	struct amdgpu_device *adev = ring->adev;
2083 
2084 	if (ring == &adev->vcn.inst[ring->me].ring_enc[0]) {
2085 		if (ring->use_doorbell)
2086 			return *ring->wptr_cpu_addr;
2087 		else
2088 			return RREG32_SOC15(VCN, ring->me, mmUVD_RB_WPTR);
2089 	} else {
2090 		if (ring->use_doorbell)
2091 			return *ring->wptr_cpu_addr;
2092 		else
2093 			return RREG32_SOC15(VCN, ring->me, mmUVD_RB_WPTR2);
2094 	}
2095 }
2096 
2097 /**
2098  * vcn_v3_0_enc_ring_set_wptr - set enc write pointer
2099  *
2100  * @ring: amdgpu_ring pointer
2101  *
2102  * Commits the enc write pointer to the hardware
2103  */
2104 static void vcn_v3_0_enc_ring_set_wptr(struct amdgpu_ring *ring)
2105 {
2106 	struct amdgpu_device *adev = ring->adev;
2107 
2108 	if (ring == &adev->vcn.inst[ring->me].ring_enc[0]) {
2109 		if (ring->use_doorbell) {
2110 			*ring->wptr_cpu_addr = lower_32_bits(ring->wptr);
2111 			WDOORBELL32(ring->doorbell_index, lower_32_bits(ring->wptr));
2112 		} else {
2113 			WREG32_SOC15(VCN, ring->me, mmUVD_RB_WPTR, lower_32_bits(ring->wptr));
2114 		}
2115 	} else {
2116 		if (ring->use_doorbell) {
2117 			*ring->wptr_cpu_addr = lower_32_bits(ring->wptr);
2118 			WDOORBELL32(ring->doorbell_index, lower_32_bits(ring->wptr));
2119 		} else {
2120 			WREG32_SOC15(VCN, ring->me, mmUVD_RB_WPTR2, lower_32_bits(ring->wptr));
2121 		}
2122 	}
2123 }
2124 
2125 static const struct amdgpu_ring_funcs vcn_v3_0_enc_ring_vm_funcs = {
2126 	.type = AMDGPU_RING_TYPE_VCN_ENC,
2127 	.align_mask = 0x3f,
2128 	.nop = VCN_ENC_CMD_NO_OP,
2129 	.get_rptr = vcn_v3_0_enc_ring_get_rptr,
2130 	.get_wptr = vcn_v3_0_enc_ring_get_wptr,
2131 	.set_wptr = vcn_v3_0_enc_ring_set_wptr,
2132 	.emit_frame_size =
2133 		SOC15_FLUSH_GPU_TLB_NUM_WREG * 3 +
2134 		SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 4 +
2135 		4 + /* vcn_v2_0_enc_ring_emit_vm_flush */
2136 		5 + 5 + /* vcn_v2_0_enc_ring_emit_fence x2 vm fence */
2137 		1, /* vcn_v2_0_enc_ring_insert_end */
2138 	.emit_ib_size = 5, /* vcn_v2_0_enc_ring_emit_ib */
2139 	.emit_ib = vcn_v2_0_enc_ring_emit_ib,
2140 	.emit_fence = vcn_v2_0_enc_ring_emit_fence,
2141 	.emit_vm_flush = vcn_v2_0_enc_ring_emit_vm_flush,
2142 	.test_ring = amdgpu_vcn_enc_ring_test_ring,
2143 	.test_ib = amdgpu_vcn_enc_ring_test_ib,
2144 	.insert_nop = amdgpu_ring_insert_nop,
2145 	.insert_end = vcn_v2_0_enc_ring_insert_end,
2146 	.pad_ib = amdgpu_ring_generic_pad_ib,
2147 	.begin_use = amdgpu_vcn_ring_begin_use,
2148 	.end_use = amdgpu_vcn_ring_end_use,
2149 	.emit_wreg = vcn_v2_0_enc_ring_emit_wreg,
2150 	.emit_reg_wait = vcn_v2_0_enc_ring_emit_reg_wait,
2151 	.emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper,
2152 	.reset = amdgpu_vcn_ring_reset,
2153 };
2154 
2155 static void vcn_v3_0_set_dec_ring_funcs(struct amdgpu_device *adev)
2156 {
2157 	int i;
2158 
2159 	for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
2160 		if (adev->vcn.harvest_config & (1 << i))
2161 			continue;
2162 
2163 		if (!DEC_SW_RING_ENABLED)
2164 			adev->vcn.inst[i].ring_dec.funcs = &vcn_v3_0_dec_ring_vm_funcs;
2165 		else
2166 			adev->vcn.inst[i].ring_dec.funcs = &vcn_v3_0_dec_sw_ring_vm_funcs;
2167 		adev->vcn.inst[i].ring_dec.me = i;
2168 	}
2169 }
2170 
2171 static void vcn_v3_0_set_enc_ring_funcs(struct amdgpu_device *adev)
2172 {
2173 	int i, j;
2174 
2175 	for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
2176 		if (adev->vcn.harvest_config & (1 << i))
2177 			continue;
2178 
2179 		for (j = 0; j < adev->vcn.inst[i].num_enc_rings; ++j) {
2180 			adev->vcn.inst[i].ring_enc[j].funcs = &vcn_v3_0_enc_ring_vm_funcs;
2181 			adev->vcn.inst[i].ring_enc[j].me = i;
2182 		}
2183 	}
2184 }
2185 
2186 static int vcn_v3_0_reset(struct amdgpu_vcn_inst *vinst)
2187 {
2188 	int r;
2189 
2190 	r = vcn_v3_0_stop(vinst);
2191 	if (r)
2192 		return r;
2193 	vcn_v3_0_enable_clock_gating(vinst);
2194 	vcn_v3_0_enable_static_power_gating(vinst);
2195 	return vcn_v3_0_start(vinst);
2196 }
2197 
2198 static bool vcn_v3_0_is_idle(struct amdgpu_ip_block *ip_block)
2199 {
2200 	struct amdgpu_device *adev = ip_block->adev;
2201 	int i, ret = 1;
2202 
2203 	for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
2204 		if (adev->vcn.harvest_config & (1 << i))
2205 			continue;
2206 
2207 		ret &= (RREG32_SOC15(VCN, i, mmUVD_STATUS) == UVD_STATUS__IDLE);
2208 	}
2209 
2210 	return ret;
2211 }
2212 
2213 static int vcn_v3_0_wait_for_idle(struct amdgpu_ip_block *ip_block)
2214 {
2215 	struct amdgpu_device *adev = ip_block->adev;
2216 	int i, ret = 0;
2217 
2218 	for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
2219 		if (adev->vcn.harvest_config & (1 << i))
2220 			continue;
2221 
2222 		ret = SOC15_WAIT_ON_RREG(VCN, i, mmUVD_STATUS, UVD_STATUS__IDLE,
2223 			UVD_STATUS__IDLE);
2224 		if (ret)
2225 			return ret;
2226 	}
2227 
2228 	return ret;
2229 }
2230 
2231 static int vcn_v3_0_set_clockgating_state(struct amdgpu_ip_block *ip_block,
2232 					  enum amd_clockgating_state state)
2233 {
2234 	struct amdgpu_device *adev = ip_block->adev;
2235 	bool enable = state == AMD_CG_STATE_GATE;
2236 	int i;
2237 
2238 	for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
2239 		struct amdgpu_vcn_inst *vinst = &adev->vcn.inst[i];
2240 		if (adev->vcn.harvest_config & (1 << i))
2241 			continue;
2242 
2243 		if (enable) {
2244 			if (RREG32_SOC15(VCN, i, mmUVD_STATUS) != UVD_STATUS__IDLE)
2245 				return -EBUSY;
2246 			vcn_v3_0_enable_clock_gating(vinst);
2247 		} else {
2248 			vcn_v3_0_disable_clock_gating(vinst);
2249 		}
2250 	}
2251 
2252 	return 0;
2253 }
2254 
2255 static int vcn_v3_0_set_pg_state(struct amdgpu_vcn_inst *vinst,
2256 				 enum amd_powergating_state state)
2257 {
2258 	struct amdgpu_device *adev = vinst->adev;
2259 	int ret = 0;
2260 
2261 	/* for SRIOV, guest should not control VCN Power-gating
2262 	 * MMSCH FW should control Power-gating and clock-gating
2263 	 * guest should avoid touching CGC and PG
2264 	 */
2265 	if (amdgpu_sriov_vf(adev)) {
2266 		vinst->cur_state = AMD_PG_STATE_UNGATE;
2267 		return 0;
2268 	}
2269 
2270 	if (state == vinst->cur_state)
2271 		return 0;
2272 
2273 	if (state == AMD_PG_STATE_GATE)
2274 		ret = vcn_v3_0_stop(vinst);
2275 	else
2276 		ret = vcn_v3_0_start(vinst);
2277 
2278 	if (!ret)
2279 		vinst->cur_state = state;
2280 
2281 	return ret;
2282 }
2283 
2284 static int vcn_v3_0_set_interrupt_state(struct amdgpu_device *adev,
2285 					struct amdgpu_irq_src *source,
2286 					unsigned type,
2287 					enum amdgpu_interrupt_state state)
2288 {
2289 	return 0;
2290 }
2291 
2292 static int vcn_v3_0_process_interrupt(struct amdgpu_device *adev,
2293 				      struct amdgpu_irq_src *source,
2294 				      struct amdgpu_iv_entry *entry)
2295 {
2296 	uint32_t ip_instance;
2297 
2298 	switch (entry->client_id) {
2299 	case SOC15_IH_CLIENTID_VCN:
2300 		ip_instance = 0;
2301 		break;
2302 	case SOC15_IH_CLIENTID_VCN1:
2303 		ip_instance = 1;
2304 		break;
2305 	default:
2306 		DRM_ERROR("Unhandled client id: %d\n", entry->client_id);
2307 		return 0;
2308 	}
2309 
2310 	DRM_DEBUG("IH: VCN TRAP\n");
2311 
2312 	switch (entry->src_id) {
2313 	case VCN_2_0__SRCID__UVD_SYSTEM_MESSAGE_INTERRUPT:
2314 		amdgpu_fence_process(&adev->vcn.inst[ip_instance].ring_dec);
2315 		break;
2316 	case VCN_2_0__SRCID__UVD_ENC_GENERAL_PURPOSE:
2317 		amdgpu_fence_process(&adev->vcn.inst[ip_instance].ring_enc[0]);
2318 		break;
2319 	case VCN_2_0__SRCID__UVD_ENC_LOW_LATENCY:
2320 		amdgpu_fence_process(&adev->vcn.inst[ip_instance].ring_enc[1]);
2321 		break;
2322 	default:
2323 		DRM_ERROR("Unhandled interrupt: %d %d\n",
2324 			  entry->src_id, entry->src_data[0]);
2325 		break;
2326 	}
2327 
2328 	return 0;
2329 }
2330 
2331 static const struct amdgpu_irq_src_funcs vcn_v3_0_irq_funcs = {
2332 	.set = vcn_v3_0_set_interrupt_state,
2333 	.process = vcn_v3_0_process_interrupt,
2334 };
2335 
2336 static void vcn_v3_0_set_irq_funcs(struct amdgpu_device *adev)
2337 {
2338 	int i;
2339 
2340 	for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
2341 		if (adev->vcn.harvest_config & (1 << i))
2342 			continue;
2343 
2344 		adev->vcn.inst[i].irq.num_types = adev->vcn.inst[i].num_enc_rings + 1;
2345 		adev->vcn.inst[i].irq.funcs = &vcn_v3_0_irq_funcs;
2346 	}
2347 }
2348 
2349 static void vcn_v3_0_print_ip_state(struct amdgpu_ip_block *ip_block, struct drm_printer *p)
2350 {
2351 	struct amdgpu_device *adev = ip_block->adev;
2352 	int i, j;
2353 	uint32_t reg_count = ARRAY_SIZE(vcn_reg_list_3_0);
2354 	uint32_t inst_off;
2355 	bool is_powered;
2356 
2357 	if (!adev->vcn.ip_dump)
2358 		return;
2359 
2360 	drm_printf(p, "num_instances:%d\n", adev->vcn.num_vcn_inst);
2361 	for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
2362 		if (adev->vcn.harvest_config & (1 << i)) {
2363 			drm_printf(p, "\nHarvested Instance:VCN%d Skipping dump\n", i);
2364 			continue;
2365 		}
2366 
2367 		inst_off = i * reg_count;
2368 		is_powered = (adev->vcn.ip_dump[inst_off] &
2369 			      UVD_POWER_STATUS__UVD_POWER_STATUS_MASK) != 1;
2370 
2371 		if (is_powered) {
2372 			drm_printf(p, "\nActive Instance:VCN%d\n", i);
2373 			for (j = 0; j < reg_count; j++)
2374 				drm_printf(p, "%-50s \t 0x%08x\n", vcn_reg_list_3_0[j].reg_name,
2375 					   adev->vcn.ip_dump[inst_off + j]);
2376 		} else {
2377 			drm_printf(p, "\nInactive Instance:VCN%d\n", i);
2378 		}
2379 	}
2380 }
2381 
2382 static void vcn_v3_0_dump_ip_state(struct amdgpu_ip_block *ip_block)
2383 {
2384 	struct amdgpu_device *adev = ip_block->adev;
2385 	int i, j;
2386 	bool is_powered;
2387 	uint32_t inst_off;
2388 	uint32_t reg_count = ARRAY_SIZE(vcn_reg_list_3_0);
2389 
2390 	if (!adev->vcn.ip_dump)
2391 		return;
2392 
2393 	for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
2394 		if (adev->vcn.harvest_config & (1 << i))
2395 			continue;
2396 
2397 		inst_off = i * reg_count;
2398 		/* mmUVD_POWER_STATUS is always readable and is first element of the array */
2399 		adev->vcn.ip_dump[inst_off] = RREG32_SOC15(VCN, i, mmUVD_POWER_STATUS);
2400 		is_powered = (adev->vcn.ip_dump[inst_off] &
2401 			      UVD_POWER_STATUS__UVD_POWER_STATUS_MASK) != 1;
2402 
2403 		if (is_powered)
2404 			for (j = 1; j < reg_count; j++)
2405 				adev->vcn.ip_dump[inst_off + j] =
2406 					RREG32(SOC15_REG_ENTRY_OFFSET_INST(vcn_reg_list_3_0[j], i));
2407 	}
2408 }
2409 
2410 static const struct amd_ip_funcs vcn_v3_0_ip_funcs = {
2411 	.name = "vcn_v3_0",
2412 	.early_init = vcn_v3_0_early_init,
2413 	.sw_init = vcn_v3_0_sw_init,
2414 	.sw_fini = vcn_v3_0_sw_fini,
2415 	.hw_init = vcn_v3_0_hw_init,
2416 	.hw_fini = vcn_v3_0_hw_fini,
2417 	.suspend = vcn_v3_0_suspend,
2418 	.resume = vcn_v3_0_resume,
2419 	.is_idle = vcn_v3_0_is_idle,
2420 	.wait_for_idle = vcn_v3_0_wait_for_idle,
2421 	.set_clockgating_state = vcn_v3_0_set_clockgating_state,
2422 	.set_powergating_state = vcn_set_powergating_state,
2423 	.dump_ip_state = vcn_v3_0_dump_ip_state,
2424 	.print_ip_state = vcn_v3_0_print_ip_state,
2425 };
2426 
2427 const struct amdgpu_ip_block_version vcn_v3_0_ip_block = {
2428 	.type = AMD_IP_BLOCK_TYPE_VCN,
2429 	.major = 3,
2430 	.minor = 0,
2431 	.rev = 0,
2432 	.funcs = &vcn_v3_0_ip_funcs,
2433 };
2434