xref: /linux/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c (revision 16280ded45fba1216d1d4c6acfc20c2d5b45ef50)
1 /*
2  * Copyright 2019 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  */
23 
24 #include <linux/firmware.h>
25 #include <drm/drm_drv.h>
26 
27 #include "amdgpu.h"
28 #include "amdgpu_vcn.h"
29 #include "amdgpu_pm.h"
30 #include "soc15.h"
31 #include "soc15d.h"
32 #include "vcn_v2_0.h"
33 #include "mmsch_v1_0.h"
34 #include "vcn_v2_5.h"
35 
36 #include "vcn/vcn_2_5_offset.h"
37 #include "vcn/vcn_2_5_sh_mask.h"
38 #include "ivsrcid/vcn/irqsrcs_vcn_2_0.h"
39 
40 #define VCN_VID_SOC_ADDRESS_2_0					0x1fa00
41 #define VCN1_VID_SOC_ADDRESS_3_0				0x48200
42 
43 #define mmUVD_CONTEXT_ID_INTERNAL_OFFSET			0x27
44 #define mmUVD_GPCOM_VCPU_CMD_INTERNAL_OFFSET			0x0f
45 #define mmUVD_GPCOM_VCPU_DATA0_INTERNAL_OFFSET			0x10
46 #define mmUVD_GPCOM_VCPU_DATA1_INTERNAL_OFFSET			0x11
47 #define mmUVD_NO_OP_INTERNAL_OFFSET				0x29
48 #define mmUVD_GP_SCRATCH8_INTERNAL_OFFSET			0x66
49 #define mmUVD_SCRATCH9_INTERNAL_OFFSET				0xc01d
50 
51 #define mmUVD_LMI_RBC_IB_VMID_INTERNAL_OFFSET			0x431
52 #define mmUVD_LMI_RBC_IB_64BIT_BAR_LOW_INTERNAL_OFFSET		0x3b4
53 #define mmUVD_LMI_RBC_IB_64BIT_BAR_HIGH_INTERNAL_OFFSET		0x3b5
54 #define mmUVD_RBC_IB_SIZE_INTERNAL_OFFSET			0x25c
55 
56 #define VCN25_MAX_HW_INSTANCES_ARCTURUS			2
57 
58 static const struct amdgpu_hwip_reg_entry vcn_reg_list_2_5[] = {
59 	SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_POWER_STATUS),
60 	SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_POWER_STATUS),
61 	SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_STATUS),
62 	SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_CONTEXT_ID),
63 	SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_CONTEXT_ID2),
64 	SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_GPCOM_VCPU_DATA0),
65 	SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_GPCOM_VCPU_DATA1),
66 	SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_GPCOM_VCPU_CMD),
67 	SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_BASE_HI),
68 	SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_BASE_LO),
69 	SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_BASE_HI2),
70 	SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_BASE_LO2),
71 	SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_BASE_HI3),
72 	SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_BASE_LO3),
73 	SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_BASE_HI4),
74 	SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_BASE_LO4),
75 	SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_RPTR),
76 	SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_WPTR),
77 	SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_RPTR2),
78 	SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_WPTR2),
79 	SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_RPTR3),
80 	SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_WPTR3),
81 	SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_RPTR4),
82 	SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_WPTR4),
83 	SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_SIZE),
84 	SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_SIZE2),
85 	SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_SIZE3),
86 	SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_SIZE4),
87 	SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_PGFSM_CONFIG),
88 	SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_PGFSM_STATUS),
89 	SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_DPG_LMA_CTL),
90 	SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_DPG_LMA_DATA),
91 	SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_DPG_LMA_MASK),
92 	SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_DPG_PAUSE)
93 };
94 
95 static void vcn_v2_5_set_dec_ring_funcs(struct amdgpu_device *adev);
96 static void vcn_v2_5_set_enc_ring_funcs(struct amdgpu_device *adev);
97 static void vcn_v2_5_set_irq_funcs(struct amdgpu_device *adev);
98 static int vcn_v2_5_set_pg_state(struct amdgpu_vcn_inst *vinst,
99 				 enum amd_powergating_state state);
100 static int vcn_v2_5_pause_dpg_mode(struct amdgpu_vcn_inst *vinst,
101 				   struct dpg_pause_state *new_state);
102 static int vcn_v2_5_sriov_start(struct amdgpu_device *adev);
103 static void vcn_v2_5_set_ras_funcs(struct amdgpu_device *adev);
104 
105 static int amdgpu_ih_clientid_vcns[] = {
106 	SOC15_IH_CLIENTID_VCN,
107 	SOC15_IH_CLIENTID_VCN1
108 };
109 
110 /**
111  * vcn_v2_5_early_init - set function pointers and load microcode
112  *
113  * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
114  *
115  * Set ring and irq function pointers
116  * Load microcode from filesystem
117  */
118 static int vcn_v2_5_early_init(struct amdgpu_ip_block *ip_block)
119 {
120 	struct amdgpu_device *adev = ip_block->adev;
121 	int i, r;
122 
123 	if (amdgpu_sriov_vf(adev)) {
124 		adev->vcn.num_vcn_inst = 2;
125 		adev->vcn.harvest_config = 0;
126 		for (i = 0; i < adev->vcn.num_vcn_inst; i++)
127 			adev->vcn.inst[i].num_enc_rings = 1;
128 	} else {
129 		u32 harvest;
130 		int i;
131 
132 		for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
133 			harvest = RREG32_SOC15(VCN, i, mmCC_UVD_HARVESTING);
134 			if (harvest & CC_UVD_HARVESTING__UVD_DISABLE_MASK)
135 				adev->vcn.harvest_config |= 1 << i;
136 			adev->vcn.inst[i].num_enc_rings = 2;
137 		}
138 		if (adev->vcn.harvest_config == (AMDGPU_VCN_HARVEST_VCN0 |
139 						 AMDGPU_VCN_HARVEST_VCN1))
140 			/* both instances are harvested, disable the block */
141 			return -ENOENT;
142 	}
143 
144 	vcn_v2_5_set_dec_ring_funcs(adev);
145 	vcn_v2_5_set_enc_ring_funcs(adev);
146 	vcn_v2_5_set_irq_funcs(adev);
147 	vcn_v2_5_set_ras_funcs(adev);
148 
149 	for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
150 		adev->vcn.inst[i].set_pg_state = vcn_v2_5_set_pg_state;
151 
152 		r = amdgpu_vcn_early_init(adev, i);
153 		if (r)
154 			return r;
155 	}
156 
157 	return 0;
158 }
159 
160 /**
161  * vcn_v2_5_sw_init - sw init for VCN block
162  *
163  * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
164  *
165  * Load firmware and sw initialization
166  */
167 static int vcn_v2_5_sw_init(struct amdgpu_ip_block *ip_block)
168 {
169 	struct amdgpu_ring *ring;
170 	int i, j, r;
171 	uint32_t reg_count = ARRAY_SIZE(vcn_reg_list_2_5);
172 	uint32_t *ptr;
173 	struct amdgpu_device *adev = ip_block->adev;
174 
175 	for (j = 0; j < adev->vcn.num_vcn_inst; j++) {
176 		volatile struct amdgpu_fw_shared *fw_shared;
177 
178 		if (adev->vcn.harvest_config & (1 << j))
179 			continue;
180 		/* VCN DEC TRAP */
181 		r = amdgpu_irq_add_id(adev, amdgpu_ih_clientid_vcns[j],
182 				VCN_2_0__SRCID__UVD_SYSTEM_MESSAGE_INTERRUPT, &adev->vcn.inst[j].irq);
183 		if (r)
184 			return r;
185 
186 		/* VCN ENC TRAP */
187 		for (i = 0; i < adev->vcn.inst[j].num_enc_rings; ++i) {
188 			r = amdgpu_irq_add_id(adev, amdgpu_ih_clientid_vcns[j],
189 				i + VCN_2_0__SRCID__UVD_ENC_GENERAL_PURPOSE, &adev->vcn.inst[j].irq);
190 			if (r)
191 				return r;
192 		}
193 
194 		/* VCN POISON TRAP */
195 		r = amdgpu_irq_add_id(adev, amdgpu_ih_clientid_vcns[j],
196 			VCN_2_6__SRCID_UVD_POISON, &adev->vcn.inst[j].ras_poison_irq);
197 		if (r)
198 			return r;
199 
200 		r = amdgpu_vcn_sw_init(adev, j);
201 		if (r)
202 			return r;
203 
204 		amdgpu_vcn_setup_ucode(adev, j);
205 
206 		r = amdgpu_vcn_resume(adev, j);
207 		if (r)
208 			return r;
209 
210 		adev->vcn.inst[j].internal.context_id = mmUVD_CONTEXT_ID_INTERNAL_OFFSET;
211 		adev->vcn.inst[j].internal.ib_vmid = mmUVD_LMI_RBC_IB_VMID_INTERNAL_OFFSET;
212 		adev->vcn.inst[j].internal.ib_bar_low = mmUVD_LMI_RBC_IB_64BIT_BAR_LOW_INTERNAL_OFFSET;
213 		adev->vcn.inst[j].internal.ib_bar_high = mmUVD_LMI_RBC_IB_64BIT_BAR_HIGH_INTERNAL_OFFSET;
214 		adev->vcn.inst[j].internal.ib_size = mmUVD_RBC_IB_SIZE_INTERNAL_OFFSET;
215 		adev->vcn.inst[j].internal.gp_scratch8 = mmUVD_GP_SCRATCH8_INTERNAL_OFFSET;
216 
217 		adev->vcn.inst[j].internal.scratch9 = mmUVD_SCRATCH9_INTERNAL_OFFSET;
218 		adev->vcn.inst[j].external.scratch9 = SOC15_REG_OFFSET(VCN, j, mmUVD_SCRATCH9);
219 		adev->vcn.inst[j].internal.data0 = mmUVD_GPCOM_VCPU_DATA0_INTERNAL_OFFSET;
220 		adev->vcn.inst[j].external.data0 = SOC15_REG_OFFSET(VCN, j, mmUVD_GPCOM_VCPU_DATA0);
221 		adev->vcn.inst[j].internal.data1 = mmUVD_GPCOM_VCPU_DATA1_INTERNAL_OFFSET;
222 		adev->vcn.inst[j].external.data1 = SOC15_REG_OFFSET(VCN, j, mmUVD_GPCOM_VCPU_DATA1);
223 		adev->vcn.inst[j].internal.cmd = mmUVD_GPCOM_VCPU_CMD_INTERNAL_OFFSET;
224 		adev->vcn.inst[j].external.cmd = SOC15_REG_OFFSET(VCN, j, mmUVD_GPCOM_VCPU_CMD);
225 		adev->vcn.inst[j].internal.nop = mmUVD_NO_OP_INTERNAL_OFFSET;
226 		adev->vcn.inst[j].external.nop = SOC15_REG_OFFSET(VCN, j, mmUVD_NO_OP);
227 
228 		ring = &adev->vcn.inst[j].ring_dec;
229 		ring->use_doorbell = true;
230 
231 		ring->doorbell_index = (adev->doorbell_index.vcn.vcn_ring0_1 << 1) +
232 				(amdgpu_sriov_vf(adev) ? 2*j : 8*j);
233 
234 		if (amdgpu_ip_version(adev, UVD_HWIP, 0) == IP_VERSION(2, 5, 0))
235 			ring->vm_hub = AMDGPU_MMHUB1(0);
236 		else
237 			ring->vm_hub = AMDGPU_MMHUB0(0);
238 
239 		sprintf(ring->name, "vcn_dec_%d", j);
240 		r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.inst[j].irq,
241 				     0, AMDGPU_RING_PRIO_DEFAULT, NULL);
242 		if (r)
243 			return r;
244 
245 		for (i = 0; i < adev->vcn.inst[j].num_enc_rings; ++i) {
246 			enum amdgpu_ring_priority_level hw_prio = amdgpu_vcn_get_enc_ring_prio(i);
247 
248 			ring = &adev->vcn.inst[j].ring_enc[i];
249 			ring->use_doorbell = true;
250 
251 			ring->doorbell_index = (adev->doorbell_index.vcn.vcn_ring0_1 << 1) +
252 					(amdgpu_sriov_vf(adev) ? (1 + i + 2*j) : (2 + i + 8*j));
253 
254 			if (amdgpu_ip_version(adev, UVD_HWIP, 0) ==
255 			    IP_VERSION(2, 5, 0))
256 				ring->vm_hub = AMDGPU_MMHUB1(0);
257 			else
258 				ring->vm_hub = AMDGPU_MMHUB0(0);
259 
260 			sprintf(ring->name, "vcn_enc_%d.%d", j, i);
261 			r = amdgpu_ring_init(adev, ring, 512,
262 					     &adev->vcn.inst[j].irq, 0,
263 					     hw_prio, NULL);
264 			if (r)
265 				return r;
266 		}
267 
268 		fw_shared = adev->vcn.inst[j].fw_shared.cpu_addr;
269 		fw_shared->present_flag_0 = cpu_to_le32(AMDGPU_VCN_MULTI_QUEUE_FLAG);
270 
271 		if (amdgpu_vcnfw_log)
272 			amdgpu_vcn_fwlog_init(&adev->vcn.inst[i]);
273 
274 		if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG)
275 			adev->vcn.inst[j].pause_dpg_mode = vcn_v2_5_pause_dpg_mode;
276 	}
277 
278 	if (amdgpu_sriov_vf(adev)) {
279 		r = amdgpu_virt_alloc_mm_table(adev);
280 		if (r)
281 			return r;
282 	}
283 
284 	r = amdgpu_vcn_ras_sw_init(adev);
285 	if (r)
286 		return r;
287 
288 	/* Allocate memory for VCN IP Dump buffer */
289 	ptr = kcalloc(adev->vcn.num_vcn_inst * reg_count, sizeof(uint32_t), GFP_KERNEL);
290 	if (!ptr) {
291 		DRM_ERROR("Failed to allocate memory for VCN IP Dump\n");
292 		adev->vcn.ip_dump = NULL;
293 	} else {
294 		adev->vcn.ip_dump = ptr;
295 	}
296 
297 	return 0;
298 }
299 
300 /**
301  * vcn_v2_5_sw_fini - sw fini for VCN block
302  *
303  * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
304  *
305  * VCN suspend and free up sw allocation
306  */
307 static int vcn_v2_5_sw_fini(struct amdgpu_ip_block *ip_block)
308 {
309 	int i, r, idx;
310 	struct amdgpu_device *adev = ip_block->adev;
311 	volatile struct amdgpu_fw_shared *fw_shared;
312 
313 	if (drm_dev_enter(adev_to_drm(adev), &idx)) {
314 		for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
315 			if (adev->vcn.harvest_config & (1 << i))
316 				continue;
317 			fw_shared = adev->vcn.inst[i].fw_shared.cpu_addr;
318 			fw_shared->present_flag_0 = 0;
319 		}
320 		drm_dev_exit(idx);
321 	}
322 
323 
324 	if (amdgpu_sriov_vf(adev))
325 		amdgpu_virt_free_mm_table(adev);
326 
327 	for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
328 		r = amdgpu_vcn_suspend(adev, i);
329 		if (r)
330 			return r;
331 		r = amdgpu_vcn_sw_fini(adev, i);
332 		if (r)
333 			return r;
334 	}
335 
336 	kfree(adev->vcn.ip_dump);
337 
338 	return 0;
339 }
340 
341 /**
342  * vcn_v2_5_hw_init - start and test VCN block
343  *
344  * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
345  *
346  * Initialize the hardware, boot up the VCPU and do some testing
347  */
348 static int vcn_v2_5_hw_init(struct amdgpu_ip_block *ip_block)
349 {
350 	struct amdgpu_device *adev = ip_block->adev;
351 	struct amdgpu_ring *ring;
352 	int i, j, r = 0;
353 
354 	if (amdgpu_sriov_vf(adev))
355 		r = vcn_v2_5_sriov_start(adev);
356 
357 	for (j = 0; j < adev->vcn.num_vcn_inst; ++j) {
358 		if (adev->vcn.harvest_config & (1 << j))
359 			continue;
360 
361 		if (amdgpu_sriov_vf(adev)) {
362 			adev->vcn.inst[j].ring_enc[0].sched.ready = true;
363 			adev->vcn.inst[j].ring_enc[1].sched.ready = false;
364 			adev->vcn.inst[j].ring_enc[2].sched.ready = false;
365 			adev->vcn.inst[j].ring_dec.sched.ready = true;
366 		} else {
367 
368 			ring = &adev->vcn.inst[j].ring_dec;
369 
370 			adev->nbio.funcs->vcn_doorbell_range(adev, ring->use_doorbell,
371 						     ring->doorbell_index, j);
372 
373 			r = amdgpu_ring_test_helper(ring);
374 			if (r)
375 				return r;
376 
377 			for (i = 0; i < adev->vcn.inst[j].num_enc_rings; ++i) {
378 				ring = &adev->vcn.inst[j].ring_enc[i];
379 				r = amdgpu_ring_test_helper(ring);
380 				if (r)
381 					return r;
382 			}
383 		}
384 	}
385 
386 	return r;
387 }
388 
389 /**
390  * vcn_v2_5_hw_fini - stop the hardware block
391  *
392  * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
393  *
394  * Stop the VCN block, mark ring as not ready any more
395  */
396 static int vcn_v2_5_hw_fini(struct amdgpu_ip_block *ip_block)
397 {
398 	struct amdgpu_device *adev = ip_block->adev;
399 	int i;
400 
401 	for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
402 		struct amdgpu_vcn_inst *vinst = &adev->vcn.inst[i];
403 
404 		if (adev->vcn.harvest_config & (1 << i))
405 			continue;
406 
407 		cancel_delayed_work_sync(&vinst->idle_work);
408 
409 		if ((adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) ||
410 		    (vinst->cur_state != AMD_PG_STATE_GATE &&
411 		     RREG32_SOC15(VCN, i, mmUVD_STATUS)))
412 			vinst->set_pg_state(vinst, AMD_PG_STATE_GATE);
413 
414 		if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__VCN))
415 			amdgpu_irq_put(adev, &vinst->ras_poison_irq, 0);
416 	}
417 
418 	return 0;
419 }
420 
421 /**
422  * vcn_v2_5_suspend - suspend VCN block
423  *
424  * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
425  *
426  * HW fini and suspend VCN block
427  */
428 static int vcn_v2_5_suspend(struct amdgpu_ip_block *ip_block)
429 {
430 	struct amdgpu_device *adev = ip_block->adev;
431 	int r, i;
432 
433 	r = vcn_v2_5_hw_fini(ip_block);
434 	if (r)
435 		return r;
436 
437 	for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
438 		r = amdgpu_vcn_suspend(ip_block->adev, i);
439 		if (r)
440 			return r;
441 	}
442 
443 	return 0;
444 }
445 
446 /**
447  * vcn_v2_5_resume - resume VCN block
448  *
449  * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
450  *
451  * Resume firmware and hw init VCN block
452  */
453 static int vcn_v2_5_resume(struct amdgpu_ip_block *ip_block)
454 {
455 	struct amdgpu_device *adev = ip_block->adev;
456 	int r, i;
457 
458 	for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
459 		r = amdgpu_vcn_resume(ip_block->adev, i);
460 		if (r)
461 			return r;
462 	}
463 
464 	r = vcn_v2_5_hw_init(ip_block);
465 
466 	return r;
467 }
468 
469 /**
470  * vcn_v2_5_mc_resume - memory controller programming
471  *
472  * @vinst: VCN instance
473  *
474  * Let the VCN memory controller know it's offsets
475  */
476 static void vcn_v2_5_mc_resume(struct amdgpu_vcn_inst *vinst)
477 {
478 	struct amdgpu_device *adev = vinst->adev;
479 	int i = vinst->inst;
480 	uint32_t size;
481 	uint32_t offset;
482 
483 	if (adev->vcn.harvest_config & (1 << i))
484 		return;
485 
486 	size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.inst[i].fw->size + 4);
487 	/* cache window 0: fw */
488 	if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
489 		WREG32_SOC15(VCN, i, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW,
490 			     (adev->firmware.ucode[AMDGPU_UCODE_ID_VCN + i].tmr_mc_addr_lo));
491 		WREG32_SOC15(VCN, i, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH,
492 			     (adev->firmware.ucode[AMDGPU_UCODE_ID_VCN + i].tmr_mc_addr_hi));
493 		WREG32_SOC15(VCN, i, mmUVD_VCPU_CACHE_OFFSET0, 0);
494 		offset = 0;
495 	} else {
496 		WREG32_SOC15(VCN, i, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW,
497 			     lower_32_bits(adev->vcn.inst[i].gpu_addr));
498 		WREG32_SOC15(VCN, i, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH,
499 			     upper_32_bits(adev->vcn.inst[i].gpu_addr));
500 		offset = size;
501 		WREG32_SOC15(VCN, i, mmUVD_VCPU_CACHE_OFFSET0,
502 			     AMDGPU_UVD_FIRMWARE_OFFSET >> 3);
503 	}
504 	WREG32_SOC15(VCN, i, mmUVD_VCPU_CACHE_SIZE0, size);
505 
506 	/* cache window 1: stack */
507 	WREG32_SOC15(VCN, i, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW,
508 		     lower_32_bits(adev->vcn.inst[i].gpu_addr + offset));
509 	WREG32_SOC15(VCN, i, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH,
510 		     upper_32_bits(adev->vcn.inst[i].gpu_addr + offset));
511 	WREG32_SOC15(VCN, i, mmUVD_VCPU_CACHE_OFFSET1, 0);
512 	WREG32_SOC15(VCN, i, mmUVD_VCPU_CACHE_SIZE1, AMDGPU_VCN_STACK_SIZE);
513 
514 	/* cache window 2: context */
515 	WREG32_SOC15(VCN, i, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW,
516 		     lower_32_bits(adev->vcn.inst[i].gpu_addr + offset + AMDGPU_VCN_STACK_SIZE));
517 	WREG32_SOC15(VCN, i, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH,
518 		     upper_32_bits(adev->vcn.inst[i].gpu_addr + offset + AMDGPU_VCN_STACK_SIZE));
519 	WREG32_SOC15(VCN, i, mmUVD_VCPU_CACHE_OFFSET2, 0);
520 	WREG32_SOC15(VCN, i, mmUVD_VCPU_CACHE_SIZE2, AMDGPU_VCN_CONTEXT_SIZE);
521 
522 	/* non-cache window */
523 	WREG32_SOC15(VCN, i, mmUVD_LMI_VCPU_NC0_64BIT_BAR_LOW,
524 		     lower_32_bits(adev->vcn.inst[i].fw_shared.gpu_addr));
525 	WREG32_SOC15(VCN, i, mmUVD_LMI_VCPU_NC0_64BIT_BAR_HIGH,
526 		     upper_32_bits(adev->vcn.inst[i].fw_shared.gpu_addr));
527 	WREG32_SOC15(VCN, i, mmUVD_VCPU_NONCACHE_OFFSET0, 0);
528 	WREG32_SOC15(VCN, i, mmUVD_VCPU_NONCACHE_SIZE0,
529 		     AMDGPU_GPU_PAGE_ALIGN(sizeof(struct amdgpu_fw_shared)));
530 }
531 
532 static void vcn_v2_5_mc_resume_dpg_mode(struct amdgpu_vcn_inst *vinst,
533 					bool indirect)
534 {
535 	struct amdgpu_device *adev = vinst->adev;
536 	int inst_idx = vinst->inst;
537 	uint32_t size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.inst[inst_idx].fw->size + 4);
538 	uint32_t offset;
539 
540 	/* cache window 0: fw */
541 	if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
542 		if (!indirect) {
543 			WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
544 				VCN, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW),
545 				(adev->firmware.ucode[AMDGPU_UCODE_ID_VCN + inst_idx].tmr_mc_addr_lo), 0, indirect);
546 			WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
547 				VCN, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH),
548 				(adev->firmware.ucode[AMDGPU_UCODE_ID_VCN + inst_idx].tmr_mc_addr_hi), 0, indirect);
549 			WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
550 				VCN, 0, mmUVD_VCPU_CACHE_OFFSET0), 0, 0, indirect);
551 		} else {
552 			WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
553 				VCN, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW), 0, 0, indirect);
554 			WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
555 				VCN, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH), 0, 0, indirect);
556 			WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
557 				VCN, 0, mmUVD_VCPU_CACHE_OFFSET0), 0, 0, indirect);
558 		}
559 		offset = 0;
560 	} else {
561 		WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
562 			VCN, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW),
563 			lower_32_bits(adev->vcn.inst[inst_idx].gpu_addr), 0, indirect);
564 		WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
565 			VCN, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH),
566 			upper_32_bits(adev->vcn.inst[inst_idx].gpu_addr), 0, indirect);
567 		offset = size;
568 		WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
569 			VCN, 0, mmUVD_VCPU_CACHE_OFFSET0),
570 			AMDGPU_UVD_FIRMWARE_OFFSET >> 3, 0, indirect);
571 	}
572 
573 	if (!indirect)
574 		WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
575 			VCN, 0, mmUVD_VCPU_CACHE_SIZE0), size, 0, indirect);
576 	else
577 		WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
578 			VCN, 0, mmUVD_VCPU_CACHE_SIZE0), 0, 0, indirect);
579 
580 	/* cache window 1: stack */
581 	if (!indirect) {
582 		WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
583 			VCN, 0, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW),
584 			lower_32_bits(adev->vcn.inst[inst_idx].gpu_addr + offset), 0, indirect);
585 		WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
586 			VCN, 0, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH),
587 			upper_32_bits(adev->vcn.inst[inst_idx].gpu_addr + offset), 0, indirect);
588 		WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
589 			VCN, 0, mmUVD_VCPU_CACHE_OFFSET1), 0, 0, indirect);
590 	} else {
591 		WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
592 			VCN, 0, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW), 0, 0, indirect);
593 		WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
594 			VCN, 0, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH), 0, 0, indirect);
595 		WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
596 			VCN, 0, mmUVD_VCPU_CACHE_OFFSET1), 0, 0, indirect);
597 	}
598 	WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
599 		VCN, 0, mmUVD_VCPU_CACHE_SIZE1), AMDGPU_VCN_STACK_SIZE, 0, indirect);
600 
601 	/* cache window 2: context */
602 	WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
603 		VCN, 0, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW),
604 		lower_32_bits(adev->vcn.inst[inst_idx].gpu_addr + offset + AMDGPU_VCN_STACK_SIZE), 0, indirect);
605 	WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
606 		VCN, 0, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH),
607 		upper_32_bits(adev->vcn.inst[inst_idx].gpu_addr + offset + AMDGPU_VCN_STACK_SIZE), 0, indirect);
608 	WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
609 		VCN, 0, mmUVD_VCPU_CACHE_OFFSET2), 0, 0, indirect);
610 	WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
611 		VCN, 0, mmUVD_VCPU_CACHE_SIZE2), AMDGPU_VCN_CONTEXT_SIZE, 0, indirect);
612 
613 	/* non-cache window */
614 	WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
615 		VCN, 0, mmUVD_LMI_VCPU_NC0_64BIT_BAR_LOW),
616 		lower_32_bits(adev->vcn.inst[inst_idx].fw_shared.gpu_addr), 0, indirect);
617 	WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
618 		VCN, 0, mmUVD_LMI_VCPU_NC0_64BIT_BAR_HIGH),
619 		upper_32_bits(adev->vcn.inst[inst_idx].fw_shared.gpu_addr), 0, indirect);
620 	WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
621 		VCN, 0, mmUVD_VCPU_NONCACHE_OFFSET0), 0, 0, indirect);
622 	WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
623 		VCN, 0, mmUVD_VCPU_NONCACHE_SIZE0),
624 		AMDGPU_GPU_PAGE_ALIGN(sizeof(struct amdgpu_fw_shared)), 0, indirect);
625 
626 	/* VCN global tiling registers */
627 	WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
628 		VCN, 0, mmUVD_GFX8_ADDR_CONFIG), adev->gfx.config.gb_addr_config, 0, indirect);
629 }
630 
631 /**
632  * vcn_v2_5_disable_clock_gating - disable VCN clock gating
633  *
634  * @vinst: VCN instance
635  *
636  * Disable clock gating for VCN block
637  */
638 static void vcn_v2_5_disable_clock_gating(struct amdgpu_vcn_inst *vinst)
639 {
640 	struct amdgpu_device *adev = vinst->adev;
641 	int i = vinst->inst;
642 	uint32_t data;
643 
644 	if (adev->vcn.harvest_config & (1 << i))
645 		return;
646 	/* UVD disable CGC */
647 	data = RREG32_SOC15(VCN, i, mmUVD_CGC_CTRL);
648 	if (adev->cg_flags & AMD_CG_SUPPORT_VCN_MGCG)
649 		data |= 1 << UVD_CGC_CTRL__DYN_CLOCK_MODE__SHIFT;
650 	else
651 		data &= ~UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK;
652 	data |= 1 << UVD_CGC_CTRL__CLK_GATE_DLY_TIMER__SHIFT;
653 	data |= 4 << UVD_CGC_CTRL__CLK_OFF_DELAY__SHIFT;
654 	WREG32_SOC15(VCN, i, mmUVD_CGC_CTRL, data);
655 
656 	data = RREG32_SOC15(VCN, i, mmUVD_CGC_GATE);
657 	data &= ~(UVD_CGC_GATE__SYS_MASK
658 		  | UVD_CGC_GATE__UDEC_MASK
659 		  | UVD_CGC_GATE__MPEG2_MASK
660 		  | UVD_CGC_GATE__REGS_MASK
661 		  | UVD_CGC_GATE__RBC_MASK
662 		  | UVD_CGC_GATE__LMI_MC_MASK
663 		  | UVD_CGC_GATE__LMI_UMC_MASK
664 		  | UVD_CGC_GATE__IDCT_MASK
665 		  | UVD_CGC_GATE__MPRD_MASK
666 		  | UVD_CGC_GATE__MPC_MASK
667 		  | UVD_CGC_GATE__LBSI_MASK
668 		  | UVD_CGC_GATE__LRBBM_MASK
669 		  | UVD_CGC_GATE__UDEC_RE_MASK
670 		  | UVD_CGC_GATE__UDEC_CM_MASK
671 		  | UVD_CGC_GATE__UDEC_IT_MASK
672 		  | UVD_CGC_GATE__UDEC_DB_MASK
673 		  | UVD_CGC_GATE__UDEC_MP_MASK
674 		  | UVD_CGC_GATE__WCB_MASK
675 		  | UVD_CGC_GATE__VCPU_MASK
676 		  | UVD_CGC_GATE__MMSCH_MASK);
677 
678 	WREG32_SOC15(VCN, i, mmUVD_CGC_GATE, data);
679 
680 	SOC15_WAIT_ON_RREG(VCN, i, mmUVD_CGC_GATE, 0,  0xFFFFFFFF);
681 
682 	data = RREG32_SOC15(VCN, i, mmUVD_CGC_CTRL);
683 	data &= ~(UVD_CGC_CTRL__UDEC_RE_MODE_MASK
684 		  | UVD_CGC_CTRL__UDEC_CM_MODE_MASK
685 		  | UVD_CGC_CTRL__UDEC_IT_MODE_MASK
686 		  | UVD_CGC_CTRL__UDEC_DB_MODE_MASK
687 		  | UVD_CGC_CTRL__UDEC_MP_MODE_MASK
688 		  | UVD_CGC_CTRL__SYS_MODE_MASK
689 		  | UVD_CGC_CTRL__UDEC_MODE_MASK
690 		  | UVD_CGC_CTRL__MPEG2_MODE_MASK
691 		  | UVD_CGC_CTRL__REGS_MODE_MASK
692 		  | UVD_CGC_CTRL__RBC_MODE_MASK
693 		  | UVD_CGC_CTRL__LMI_MC_MODE_MASK
694 		  | UVD_CGC_CTRL__LMI_UMC_MODE_MASK
695 		  | UVD_CGC_CTRL__IDCT_MODE_MASK
696 		  | UVD_CGC_CTRL__MPRD_MODE_MASK
697 		  | UVD_CGC_CTRL__MPC_MODE_MASK
698 		  | UVD_CGC_CTRL__LBSI_MODE_MASK
699 		  | UVD_CGC_CTRL__LRBBM_MODE_MASK
700 		  | UVD_CGC_CTRL__WCB_MODE_MASK
701 		  | UVD_CGC_CTRL__VCPU_MODE_MASK
702 		  | UVD_CGC_CTRL__MMSCH_MODE_MASK);
703 	WREG32_SOC15(VCN, i, mmUVD_CGC_CTRL, data);
704 
705 	/* turn on */
706 	data = RREG32_SOC15(VCN, i, mmUVD_SUVD_CGC_GATE);
707 	data |= (UVD_SUVD_CGC_GATE__SRE_MASK
708 		 | UVD_SUVD_CGC_GATE__SIT_MASK
709 		 | UVD_SUVD_CGC_GATE__SMP_MASK
710 		 | UVD_SUVD_CGC_GATE__SCM_MASK
711 		 | UVD_SUVD_CGC_GATE__SDB_MASK
712 		 | UVD_SUVD_CGC_GATE__SRE_H264_MASK
713 		 | UVD_SUVD_CGC_GATE__SRE_HEVC_MASK
714 		 | UVD_SUVD_CGC_GATE__SIT_H264_MASK
715 		 | UVD_SUVD_CGC_GATE__SIT_HEVC_MASK
716 		 | UVD_SUVD_CGC_GATE__SCM_H264_MASK
717 		 | UVD_SUVD_CGC_GATE__SCM_HEVC_MASK
718 		 | UVD_SUVD_CGC_GATE__SDB_H264_MASK
719 		 | UVD_SUVD_CGC_GATE__SDB_HEVC_MASK
720 		 | UVD_SUVD_CGC_GATE__SCLR_MASK
721 		 | UVD_SUVD_CGC_GATE__UVD_SC_MASK
722 		 | UVD_SUVD_CGC_GATE__ENT_MASK
723 		 | UVD_SUVD_CGC_GATE__SIT_HEVC_DEC_MASK
724 		 | UVD_SUVD_CGC_GATE__SIT_HEVC_ENC_MASK
725 		 | UVD_SUVD_CGC_GATE__SITE_MASK
726 		 | UVD_SUVD_CGC_GATE__SRE_VP9_MASK
727 		 | UVD_SUVD_CGC_GATE__SCM_VP9_MASK
728 		 | UVD_SUVD_CGC_GATE__SIT_VP9_DEC_MASK
729 		 | UVD_SUVD_CGC_GATE__SDB_VP9_MASK
730 		 | UVD_SUVD_CGC_GATE__IME_HEVC_MASK);
731 	WREG32_SOC15(VCN, i, mmUVD_SUVD_CGC_GATE, data);
732 
733 	data = RREG32_SOC15(VCN, i, mmUVD_SUVD_CGC_CTRL);
734 	data &= ~(UVD_SUVD_CGC_CTRL__SRE_MODE_MASK
735 		  | UVD_SUVD_CGC_CTRL__SIT_MODE_MASK
736 		  | UVD_SUVD_CGC_CTRL__SMP_MODE_MASK
737 		  | UVD_SUVD_CGC_CTRL__SCM_MODE_MASK
738 		  | UVD_SUVD_CGC_CTRL__SDB_MODE_MASK
739 		  | UVD_SUVD_CGC_CTRL__SCLR_MODE_MASK
740 		  | UVD_SUVD_CGC_CTRL__UVD_SC_MODE_MASK
741 		  | UVD_SUVD_CGC_CTRL__ENT_MODE_MASK
742 			| UVD_SUVD_CGC_CTRL__IME_MODE_MASK
743 		  | UVD_SUVD_CGC_CTRL__SITE_MODE_MASK);
744 	WREG32_SOC15(VCN, i, mmUVD_SUVD_CGC_CTRL, data);
745 }
746 
747 static void vcn_v2_5_clock_gating_dpg_mode(struct amdgpu_vcn_inst *vinst,
748 		uint8_t sram_sel, uint8_t indirect)
749 {
750 	struct amdgpu_device *adev = vinst->adev;
751 	int inst_idx = vinst->inst;
752 	uint32_t reg_data = 0;
753 
754 	/* enable sw clock gating control */
755 	if (adev->cg_flags & AMD_CG_SUPPORT_VCN_MGCG)
756 		reg_data = 1 << UVD_CGC_CTRL__DYN_CLOCK_MODE__SHIFT;
757 	else
758 		reg_data = 0 << UVD_CGC_CTRL__DYN_CLOCK_MODE__SHIFT;
759 	reg_data |= 1 << UVD_CGC_CTRL__CLK_GATE_DLY_TIMER__SHIFT;
760 	reg_data |= 4 << UVD_CGC_CTRL__CLK_OFF_DELAY__SHIFT;
761 	reg_data &= ~(UVD_CGC_CTRL__UDEC_RE_MODE_MASK |
762 		 UVD_CGC_CTRL__UDEC_CM_MODE_MASK |
763 		 UVD_CGC_CTRL__UDEC_IT_MODE_MASK |
764 		 UVD_CGC_CTRL__UDEC_DB_MODE_MASK |
765 		 UVD_CGC_CTRL__UDEC_MP_MODE_MASK |
766 		 UVD_CGC_CTRL__SYS_MODE_MASK |
767 		 UVD_CGC_CTRL__UDEC_MODE_MASK |
768 		 UVD_CGC_CTRL__MPEG2_MODE_MASK |
769 		 UVD_CGC_CTRL__REGS_MODE_MASK |
770 		 UVD_CGC_CTRL__RBC_MODE_MASK |
771 		 UVD_CGC_CTRL__LMI_MC_MODE_MASK |
772 		 UVD_CGC_CTRL__LMI_UMC_MODE_MASK |
773 		 UVD_CGC_CTRL__IDCT_MODE_MASK |
774 		 UVD_CGC_CTRL__MPRD_MODE_MASK |
775 		 UVD_CGC_CTRL__MPC_MODE_MASK |
776 		 UVD_CGC_CTRL__LBSI_MODE_MASK |
777 		 UVD_CGC_CTRL__LRBBM_MODE_MASK |
778 		 UVD_CGC_CTRL__WCB_MODE_MASK |
779 		 UVD_CGC_CTRL__VCPU_MODE_MASK |
780 		 UVD_CGC_CTRL__MMSCH_MODE_MASK);
781 	WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
782 		VCN, 0, mmUVD_CGC_CTRL), reg_data, sram_sel, indirect);
783 
784 	/* turn off clock gating */
785 	WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
786 		VCN, 0, mmUVD_CGC_GATE), 0, sram_sel, indirect);
787 
788 	/* turn on SUVD clock gating */
789 	WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
790 		VCN, 0, mmUVD_SUVD_CGC_GATE), 1, sram_sel, indirect);
791 
792 	/* turn on sw mode in UVD_SUVD_CGC_CTRL */
793 	WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
794 		VCN, 0, mmUVD_SUVD_CGC_CTRL), 0, sram_sel, indirect);
795 }
796 
797 /**
798  * vcn_v2_5_enable_clock_gating - enable VCN clock gating
799  *
800  * @vinst: VCN instance
801  *
802  * Enable clock gating for VCN block
803  */
804 static void vcn_v2_5_enable_clock_gating(struct amdgpu_vcn_inst *vinst)
805 {
806 	struct amdgpu_device *adev = vinst->adev;
807 	int i = vinst->inst;
808 	uint32_t data = 0;
809 
810 	if (adev->vcn.harvest_config & (1 << i))
811 		return;
812 	/* enable UVD CGC */
813 	data = RREG32_SOC15(VCN, i, mmUVD_CGC_CTRL);
814 	if (adev->cg_flags & AMD_CG_SUPPORT_VCN_MGCG)
815 		data |= 1 << UVD_CGC_CTRL__DYN_CLOCK_MODE__SHIFT;
816 	else
817 		data |= 0 << UVD_CGC_CTRL__DYN_CLOCK_MODE__SHIFT;
818 	data |= 1 << UVD_CGC_CTRL__CLK_GATE_DLY_TIMER__SHIFT;
819 	data |= 4 << UVD_CGC_CTRL__CLK_OFF_DELAY__SHIFT;
820 	WREG32_SOC15(VCN, i, mmUVD_CGC_CTRL, data);
821 
822 	data = RREG32_SOC15(VCN, i, mmUVD_CGC_CTRL);
823 	data |= (UVD_CGC_CTRL__UDEC_RE_MODE_MASK
824 		 | UVD_CGC_CTRL__UDEC_CM_MODE_MASK
825 		 | UVD_CGC_CTRL__UDEC_IT_MODE_MASK
826 		 | UVD_CGC_CTRL__UDEC_DB_MODE_MASK
827 		 | UVD_CGC_CTRL__UDEC_MP_MODE_MASK
828 		 | UVD_CGC_CTRL__SYS_MODE_MASK
829 		 | UVD_CGC_CTRL__UDEC_MODE_MASK
830 		 | UVD_CGC_CTRL__MPEG2_MODE_MASK
831 		 | UVD_CGC_CTRL__REGS_MODE_MASK
832 		 | UVD_CGC_CTRL__RBC_MODE_MASK
833 		 | UVD_CGC_CTRL__LMI_MC_MODE_MASK
834 		 | UVD_CGC_CTRL__LMI_UMC_MODE_MASK
835 		 | UVD_CGC_CTRL__IDCT_MODE_MASK
836 		 | UVD_CGC_CTRL__MPRD_MODE_MASK
837 		 | UVD_CGC_CTRL__MPC_MODE_MASK
838 		 | UVD_CGC_CTRL__LBSI_MODE_MASK
839 		 | UVD_CGC_CTRL__LRBBM_MODE_MASK
840 		 | UVD_CGC_CTRL__WCB_MODE_MASK
841 		 | UVD_CGC_CTRL__VCPU_MODE_MASK);
842 	WREG32_SOC15(VCN, i, mmUVD_CGC_CTRL, data);
843 
844 	data = RREG32_SOC15(VCN, i, mmUVD_SUVD_CGC_CTRL);
845 	data |= (UVD_SUVD_CGC_CTRL__SRE_MODE_MASK
846 		 | UVD_SUVD_CGC_CTRL__SIT_MODE_MASK
847 		 | UVD_SUVD_CGC_CTRL__SMP_MODE_MASK
848 		 | UVD_SUVD_CGC_CTRL__SCM_MODE_MASK
849 		 | UVD_SUVD_CGC_CTRL__SDB_MODE_MASK
850 		 | UVD_SUVD_CGC_CTRL__SCLR_MODE_MASK
851 		 | UVD_SUVD_CGC_CTRL__UVD_SC_MODE_MASK
852 		 | UVD_SUVD_CGC_CTRL__ENT_MODE_MASK
853 		 | UVD_SUVD_CGC_CTRL__IME_MODE_MASK
854 		 | UVD_SUVD_CGC_CTRL__SITE_MODE_MASK);
855 	WREG32_SOC15(VCN, i, mmUVD_SUVD_CGC_CTRL, data);
856 }
857 
858 static void vcn_v2_6_enable_ras(struct amdgpu_vcn_inst *vinst,
859 				bool indirect)
860 {
861 	struct amdgpu_device *adev = vinst->adev;
862 	int inst_idx = vinst->inst;
863 	uint32_t tmp;
864 
865 	if (amdgpu_ip_version(adev, UVD_HWIP, 0) != IP_VERSION(2, 6, 0))
866 		return;
867 
868 	tmp = VCN_RAS_CNTL__VCPU_VCODEC_REARM_MASK |
869 	      VCN_RAS_CNTL__VCPU_VCODEC_IH_EN_MASK |
870 	      VCN_RAS_CNTL__VCPU_VCODEC_PMI_EN_MASK |
871 	      VCN_RAS_CNTL__VCPU_VCODEC_STALL_EN_MASK;
872 	WREG32_SOC15_DPG_MODE(inst_idx,
873 			      SOC15_DPG_MODE_OFFSET(VCN, 0, mmVCN_RAS_CNTL),
874 			      tmp, 0, indirect);
875 
876 	tmp = UVD_VCPU_INT_EN__RASCNTL_VCPU_VCODEC_EN_MASK;
877 	WREG32_SOC15_DPG_MODE(inst_idx,
878 			      SOC15_DPG_MODE_OFFSET(VCN, 0, mmUVD_VCPU_INT_EN),
879 			      tmp, 0, indirect);
880 
881 	tmp = UVD_SYS_INT_EN__RASCNTL_VCPU_VCODEC_EN_MASK;
882 	WREG32_SOC15_DPG_MODE(inst_idx,
883 			      SOC15_DPG_MODE_OFFSET(VCN, 0, mmUVD_SYS_INT_EN),
884 			      tmp, 0, indirect);
885 }
886 
887 static int vcn_v2_5_start_dpg_mode(struct amdgpu_vcn_inst *vinst, bool indirect)
888 {
889 	struct amdgpu_device *adev = vinst->adev;
890 	int inst_idx = vinst->inst;
891 	volatile struct amdgpu_fw_shared *fw_shared = adev->vcn.inst[inst_idx].fw_shared.cpu_addr;
892 	struct amdgpu_ring *ring;
893 	uint32_t rb_bufsz, tmp;
894 
895 	/* disable register anti-hang mechanism */
896 	WREG32_P(SOC15_REG_OFFSET(VCN, inst_idx, mmUVD_POWER_STATUS), 1,
897 		~UVD_POWER_STATUS__UVD_POWER_STATUS_MASK);
898 	/* enable dynamic power gating mode */
899 	tmp = RREG32_SOC15(VCN, inst_idx, mmUVD_POWER_STATUS);
900 	tmp |= UVD_POWER_STATUS__UVD_PG_MODE_MASK;
901 	tmp |= UVD_POWER_STATUS__UVD_PG_EN_MASK;
902 	WREG32_SOC15(VCN, inst_idx, mmUVD_POWER_STATUS, tmp);
903 
904 	if (indirect)
905 		adev->vcn.inst[inst_idx].dpg_sram_curr_addr = (uint32_t *)adev->vcn.inst[inst_idx].dpg_sram_cpu_addr;
906 
907 	/* enable clock gating */
908 	vcn_v2_5_clock_gating_dpg_mode(vinst, 0, indirect);
909 
910 	/* enable VCPU clock */
911 	tmp = (0xFF << UVD_VCPU_CNTL__PRB_TIMEOUT_VAL__SHIFT);
912 	tmp |= UVD_VCPU_CNTL__CLK_EN_MASK;
913 	tmp |= UVD_VCPU_CNTL__BLK_RST_MASK;
914 	WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
915 		VCN, 0, mmUVD_VCPU_CNTL), tmp, 0, indirect);
916 
917 	/* disable master interupt */
918 	WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
919 		VCN, 0, mmUVD_MASTINT_EN), 0, 0, indirect);
920 
921 	/* setup mmUVD_LMI_CTRL */
922 	tmp = (0x8 | UVD_LMI_CTRL__WRITE_CLEAN_TIMER_EN_MASK |
923 		UVD_LMI_CTRL__REQ_MODE_MASK |
924 		UVD_LMI_CTRL__CRC_RESET_MASK |
925 		UVD_LMI_CTRL__MASK_MC_URGENT_MASK |
926 		UVD_LMI_CTRL__DATA_COHERENCY_EN_MASK |
927 		UVD_LMI_CTRL__VCPU_DATA_COHERENCY_EN_MASK |
928 		(8 << UVD_LMI_CTRL__WRITE_CLEAN_TIMER__SHIFT) |
929 		0x00100000L);
930 	WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
931 		VCN, 0, mmUVD_LMI_CTRL), tmp, 0, indirect);
932 
933 	WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
934 		VCN, 0, mmUVD_MPC_CNTL),
935 		0x2 << UVD_MPC_CNTL__REPLACEMENT_MODE__SHIFT, 0, indirect);
936 
937 	WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
938 		VCN, 0, mmUVD_MPC_SET_MUXA0),
939 		((0x1 << UVD_MPC_SET_MUXA0__VARA_1__SHIFT) |
940 		 (0x2 << UVD_MPC_SET_MUXA0__VARA_2__SHIFT) |
941 		 (0x3 << UVD_MPC_SET_MUXA0__VARA_3__SHIFT) |
942 		 (0x4 << UVD_MPC_SET_MUXA0__VARA_4__SHIFT)), 0, indirect);
943 
944 	WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
945 		VCN, 0, mmUVD_MPC_SET_MUXB0),
946 		((0x1 << UVD_MPC_SET_MUXB0__VARB_1__SHIFT) |
947 		 (0x2 << UVD_MPC_SET_MUXB0__VARB_2__SHIFT) |
948 		 (0x3 << UVD_MPC_SET_MUXB0__VARB_3__SHIFT) |
949 		 (0x4 << UVD_MPC_SET_MUXB0__VARB_4__SHIFT)), 0, indirect);
950 
951 	WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
952 		VCN, 0, mmUVD_MPC_SET_MUX),
953 		((0x0 << UVD_MPC_SET_MUX__SET_0__SHIFT) |
954 		 (0x1 << UVD_MPC_SET_MUX__SET_1__SHIFT) |
955 		 (0x2 << UVD_MPC_SET_MUX__SET_2__SHIFT)), 0, indirect);
956 
957 	vcn_v2_5_mc_resume_dpg_mode(vinst, indirect);
958 
959 	WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
960 		VCN, 0, mmUVD_REG_XX_MASK), 0x10, 0, indirect);
961 	WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
962 		VCN, 0, mmUVD_RBC_XX_IB_REG_CHECK), 0x3, 0, indirect);
963 
964 	/* enable LMI MC and UMC channels */
965 	WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
966 		VCN, 0, mmUVD_LMI_CTRL2), 0, 0, indirect);
967 
968 	vcn_v2_6_enable_ras(vinst, indirect);
969 
970 	/* unblock VCPU register access */
971 	WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
972 		VCN, 0, mmUVD_RB_ARB_CTRL), 0, 0, indirect);
973 
974 	tmp = (0xFF << UVD_VCPU_CNTL__PRB_TIMEOUT_VAL__SHIFT);
975 	tmp |= UVD_VCPU_CNTL__CLK_EN_MASK;
976 	WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
977 		VCN, 0, mmUVD_VCPU_CNTL), tmp, 0, indirect);
978 
979 	/* enable master interrupt */
980 	WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
981 		VCN, 0, mmUVD_MASTINT_EN),
982 		UVD_MASTINT_EN__VCPU_EN_MASK, 0, indirect);
983 
984 	if (indirect)
985 		amdgpu_vcn_psp_update_sram(adev, inst_idx, 0);
986 
987 	ring = &adev->vcn.inst[inst_idx].ring_dec;
988 	/* force RBC into idle state */
989 	rb_bufsz = order_base_2(ring->ring_size);
990 	tmp = REG_SET_FIELD(0, UVD_RBC_RB_CNTL, RB_BUFSZ, rb_bufsz);
991 	tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_BLKSZ, 1);
992 	tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_FETCH, 1);
993 	tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_UPDATE, 1);
994 	tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_RPTR_WR_EN, 1);
995 	WREG32_SOC15(VCN, inst_idx, mmUVD_RBC_RB_CNTL, tmp);
996 
997 	/* Stall DPG before WPTR/RPTR reset */
998 	WREG32_P(SOC15_REG_OFFSET(VCN, inst_idx, mmUVD_POWER_STATUS),
999 		UVD_POWER_STATUS__STALL_DPG_POWER_UP_MASK,
1000 		~UVD_POWER_STATUS__STALL_DPG_POWER_UP_MASK);
1001 	fw_shared->multi_queue.decode_queue_mode |= FW_QUEUE_RING_RESET;
1002 
1003 	/* set the write pointer delay */
1004 	WREG32_SOC15(VCN, inst_idx, mmUVD_RBC_RB_WPTR_CNTL, 0);
1005 
1006 	/* set the wb address */
1007 	WREG32_SOC15(VCN, inst_idx, mmUVD_RBC_RB_RPTR_ADDR,
1008 		(upper_32_bits(ring->gpu_addr) >> 2));
1009 
1010 	/* program the RB_BASE for ring buffer */
1011 	WREG32_SOC15(VCN, inst_idx, mmUVD_LMI_RBC_RB_64BIT_BAR_LOW,
1012 		lower_32_bits(ring->gpu_addr));
1013 	WREG32_SOC15(VCN, inst_idx, mmUVD_LMI_RBC_RB_64BIT_BAR_HIGH,
1014 		upper_32_bits(ring->gpu_addr));
1015 
1016 	/* Initialize the ring buffer's read and write pointers */
1017 	WREG32_SOC15(VCN, inst_idx, mmUVD_RBC_RB_RPTR, 0);
1018 
1019 	WREG32_SOC15(VCN, inst_idx, mmUVD_SCRATCH2, 0);
1020 
1021 	ring->wptr = RREG32_SOC15(VCN, inst_idx, mmUVD_RBC_RB_RPTR);
1022 	WREG32_SOC15(VCN, inst_idx, mmUVD_RBC_RB_WPTR,
1023 		lower_32_bits(ring->wptr));
1024 
1025 	fw_shared->multi_queue.decode_queue_mode &= ~FW_QUEUE_RING_RESET;
1026 	/* Unstall DPG */
1027 	WREG32_P(SOC15_REG_OFFSET(VCN, inst_idx, mmUVD_POWER_STATUS),
1028 		0, ~UVD_POWER_STATUS__STALL_DPG_POWER_UP_MASK);
1029 
1030 	return 0;
1031 }
1032 
1033 static int vcn_v2_5_start(struct amdgpu_vcn_inst *vinst)
1034 {
1035 	struct amdgpu_device *adev = vinst->adev;
1036 	int i = vinst->inst;
1037 	volatile struct amdgpu_fw_shared *fw_shared =
1038 		adev->vcn.inst[i].fw_shared.cpu_addr;
1039 	struct amdgpu_ring *ring;
1040 	uint32_t rb_bufsz, tmp;
1041 	int j, k, r;
1042 
1043 	if (adev->vcn.harvest_config & (1 << i))
1044 		return 0;
1045 
1046 	if (adev->pm.dpm_enabled)
1047 		amdgpu_dpm_enable_vcn(adev, true, i);
1048 
1049 	if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG)
1050 		return vcn_v2_5_start_dpg_mode(vinst, adev->vcn.inst[i].indirect_sram);
1051 
1052 	/* disable register anti-hang mechanism */
1053 	WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_POWER_STATUS), 0,
1054 		 ~UVD_POWER_STATUS__UVD_POWER_STATUS_MASK);
1055 
1056 	/* set uvd status busy */
1057 	tmp = RREG32_SOC15(VCN, i, mmUVD_STATUS) | UVD_STATUS__UVD_BUSY;
1058 	WREG32_SOC15(VCN, i, mmUVD_STATUS, tmp);
1059 
1060 	if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG)
1061 		return 0;
1062 
1063 	/* SW clock gating */
1064 	vcn_v2_5_disable_clock_gating(vinst);
1065 
1066 	/* enable VCPU clock */
1067 	WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_VCPU_CNTL),
1068 		 UVD_VCPU_CNTL__CLK_EN_MASK, ~UVD_VCPU_CNTL__CLK_EN_MASK);
1069 
1070 	/* disable master interrupt */
1071 	WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_MASTINT_EN), 0,
1072 		 ~UVD_MASTINT_EN__VCPU_EN_MASK);
1073 
1074 	/* setup mmUVD_LMI_CTRL */
1075 	tmp = RREG32_SOC15(VCN, i, mmUVD_LMI_CTRL);
1076 	tmp &= ~0xff;
1077 	WREG32_SOC15(VCN, i, mmUVD_LMI_CTRL, tmp | 0x8|
1078 		     UVD_LMI_CTRL__WRITE_CLEAN_TIMER_EN_MASK	|
1079 		     UVD_LMI_CTRL__MASK_MC_URGENT_MASK |
1080 		     UVD_LMI_CTRL__DATA_COHERENCY_EN_MASK |
1081 		     UVD_LMI_CTRL__VCPU_DATA_COHERENCY_EN_MASK);
1082 
1083 	/* setup mmUVD_MPC_CNTL */
1084 	tmp = RREG32_SOC15(VCN, i, mmUVD_MPC_CNTL);
1085 	tmp &= ~UVD_MPC_CNTL__REPLACEMENT_MODE_MASK;
1086 	tmp |= 0x2 << UVD_MPC_CNTL__REPLACEMENT_MODE__SHIFT;
1087 	WREG32_SOC15(VCN, i, mmUVD_MPC_CNTL, tmp);
1088 
1089 	/* setup UVD_MPC_SET_MUXA0 */
1090 	WREG32_SOC15(VCN, i, mmUVD_MPC_SET_MUXA0,
1091 		     ((0x1 << UVD_MPC_SET_MUXA0__VARA_1__SHIFT) |
1092 		      (0x2 << UVD_MPC_SET_MUXA0__VARA_2__SHIFT) |
1093 		      (0x3 << UVD_MPC_SET_MUXA0__VARA_3__SHIFT) |
1094 		      (0x4 << UVD_MPC_SET_MUXA0__VARA_4__SHIFT)));
1095 
1096 	/* setup UVD_MPC_SET_MUXB0 */
1097 	WREG32_SOC15(VCN, i, mmUVD_MPC_SET_MUXB0,
1098 		     ((0x1 << UVD_MPC_SET_MUXB0__VARB_1__SHIFT) |
1099 		      (0x2 << UVD_MPC_SET_MUXB0__VARB_2__SHIFT) |
1100 		      (0x3 << UVD_MPC_SET_MUXB0__VARB_3__SHIFT) |
1101 		      (0x4 << UVD_MPC_SET_MUXB0__VARB_4__SHIFT)));
1102 
1103 	/* setup mmUVD_MPC_SET_MUX */
1104 	WREG32_SOC15(VCN, i, mmUVD_MPC_SET_MUX,
1105 		     ((0x0 << UVD_MPC_SET_MUX__SET_0__SHIFT) |
1106 		      (0x1 << UVD_MPC_SET_MUX__SET_1__SHIFT) |
1107 		      (0x2 << UVD_MPC_SET_MUX__SET_2__SHIFT)));
1108 
1109 	vcn_v2_5_mc_resume(vinst);
1110 
1111 	/* VCN global tiling registers */
1112 	WREG32_SOC15(VCN, i, mmUVD_GFX8_ADDR_CONFIG,
1113 		     adev->gfx.config.gb_addr_config);
1114 	WREG32_SOC15(VCN, i, mmUVD_GFX8_ADDR_CONFIG,
1115 		     adev->gfx.config.gb_addr_config);
1116 
1117 	/* enable LMI MC and UMC channels */
1118 	WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_LMI_CTRL2), 0,
1119 		 ~UVD_LMI_CTRL2__STALL_ARB_UMC_MASK);
1120 
1121 	/* unblock VCPU register access */
1122 	WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_RB_ARB_CTRL), 0,
1123 		 ~UVD_RB_ARB_CTRL__VCPU_DIS_MASK);
1124 
1125 	WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_VCPU_CNTL), 0,
1126 		 ~UVD_VCPU_CNTL__BLK_RST_MASK);
1127 
1128 	for (k = 0; k < 10; ++k) {
1129 		uint32_t status;
1130 
1131 		for (j = 0; j < 100; ++j) {
1132 			status = RREG32_SOC15(VCN, i, mmUVD_STATUS);
1133 			if (status & 2)
1134 				break;
1135 			if (amdgpu_emu_mode == 1)
1136 				msleep(500);
1137 			else
1138 				mdelay(10);
1139 		}
1140 		r = 0;
1141 		if (status & 2)
1142 			break;
1143 
1144 		DRM_ERROR("VCN decode not responding, trying to reset the VCPU!!!\n");
1145 		WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_VCPU_CNTL),
1146 			 UVD_VCPU_CNTL__BLK_RST_MASK,
1147 			 ~UVD_VCPU_CNTL__BLK_RST_MASK);
1148 		mdelay(10);
1149 		WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_VCPU_CNTL), 0,
1150 			 ~UVD_VCPU_CNTL__BLK_RST_MASK);
1151 
1152 		mdelay(10);
1153 		r = -1;
1154 	}
1155 
1156 	if (r) {
1157 		DRM_ERROR("VCN decode not responding, giving up!!!\n");
1158 		return r;
1159 	}
1160 
1161 	/* enable master interrupt */
1162 	WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_MASTINT_EN),
1163 		 UVD_MASTINT_EN__VCPU_EN_MASK,
1164 		 ~UVD_MASTINT_EN__VCPU_EN_MASK);
1165 
1166 	/* clear the busy bit of VCN_STATUS */
1167 	WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_STATUS), 0,
1168 		 ~(2 << UVD_STATUS__VCPU_REPORT__SHIFT));
1169 
1170 	WREG32_SOC15(VCN, i, mmUVD_LMI_RBC_RB_VMID, 0);
1171 
1172 	ring = &adev->vcn.inst[i].ring_dec;
1173 	/* force RBC into idle state */
1174 	rb_bufsz = order_base_2(ring->ring_size);
1175 	tmp = REG_SET_FIELD(0, UVD_RBC_RB_CNTL, RB_BUFSZ, rb_bufsz);
1176 	tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_BLKSZ, 1);
1177 	tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_FETCH, 1);
1178 	tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_UPDATE, 1);
1179 	tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_RPTR_WR_EN, 1);
1180 	WREG32_SOC15(VCN, i, mmUVD_RBC_RB_CNTL, tmp);
1181 
1182 	fw_shared->multi_queue.decode_queue_mode |= FW_QUEUE_RING_RESET;
1183 	/* program the RB_BASE for ring buffer */
1184 	WREG32_SOC15(VCN, i, mmUVD_LMI_RBC_RB_64BIT_BAR_LOW,
1185 		     lower_32_bits(ring->gpu_addr));
1186 	WREG32_SOC15(VCN, i, mmUVD_LMI_RBC_RB_64BIT_BAR_HIGH,
1187 		     upper_32_bits(ring->gpu_addr));
1188 
1189 	/* Initialize the ring buffer's read and write pointers */
1190 	WREG32_SOC15(VCN, i, mmUVD_RBC_RB_RPTR, 0);
1191 
1192 	ring->wptr = RREG32_SOC15(VCN, i, mmUVD_RBC_RB_RPTR);
1193 	WREG32_SOC15(VCN, i, mmUVD_RBC_RB_WPTR,
1194 		     lower_32_bits(ring->wptr));
1195 	fw_shared->multi_queue.decode_queue_mode &= ~FW_QUEUE_RING_RESET;
1196 
1197 	fw_shared->multi_queue.encode_generalpurpose_queue_mode |= FW_QUEUE_RING_RESET;
1198 	ring = &adev->vcn.inst[i].ring_enc[0];
1199 	WREG32_SOC15(VCN, i, mmUVD_RB_RPTR, lower_32_bits(ring->wptr));
1200 	WREG32_SOC15(VCN, i, mmUVD_RB_WPTR, lower_32_bits(ring->wptr));
1201 	WREG32_SOC15(VCN, i, mmUVD_RB_BASE_LO, ring->gpu_addr);
1202 	WREG32_SOC15(VCN, i, mmUVD_RB_BASE_HI, upper_32_bits(ring->gpu_addr));
1203 	WREG32_SOC15(VCN, i, mmUVD_RB_SIZE, ring->ring_size / 4);
1204 	fw_shared->multi_queue.encode_generalpurpose_queue_mode &= ~FW_QUEUE_RING_RESET;
1205 
1206 	fw_shared->multi_queue.encode_lowlatency_queue_mode |= FW_QUEUE_RING_RESET;
1207 	ring = &adev->vcn.inst[i].ring_enc[1];
1208 	WREG32_SOC15(VCN, i, mmUVD_RB_RPTR2, lower_32_bits(ring->wptr));
1209 	WREG32_SOC15(VCN, i, mmUVD_RB_WPTR2, lower_32_bits(ring->wptr));
1210 	WREG32_SOC15(VCN, i, mmUVD_RB_BASE_LO2, ring->gpu_addr);
1211 	WREG32_SOC15(VCN, i, mmUVD_RB_BASE_HI2, upper_32_bits(ring->gpu_addr));
1212 	WREG32_SOC15(VCN, i, mmUVD_RB_SIZE2, ring->ring_size / 4);
1213 	fw_shared->multi_queue.encode_lowlatency_queue_mode &= ~FW_QUEUE_RING_RESET;
1214 
1215 	return 0;
1216 }
1217 
1218 static int vcn_v2_5_mmsch_start(struct amdgpu_device *adev,
1219 				struct amdgpu_mm_table *table)
1220 {
1221 	uint32_t data = 0, loop = 0, size = 0;
1222 	uint64_t addr = table->gpu_addr;
1223 	struct mmsch_v1_1_init_header *header = NULL;
1224 
1225 	header = (struct mmsch_v1_1_init_header *)table->cpu_addr;
1226 	size = header->total_size;
1227 
1228 	/*
1229 	 * 1, write to vce_mmsch_vf_ctx_addr_lo/hi register with GPU mc addr of
1230 	 *  memory descriptor location
1231 	 */
1232 	WREG32_SOC15(VCN, 0, mmMMSCH_VF_CTX_ADDR_LO, lower_32_bits(addr));
1233 	WREG32_SOC15(VCN, 0, mmMMSCH_VF_CTX_ADDR_HI, upper_32_bits(addr));
1234 
1235 	/* 2, update vmid of descriptor */
1236 	data = RREG32_SOC15(VCN, 0, mmMMSCH_VF_VMID);
1237 	data &= ~MMSCH_VF_VMID__VF_CTX_VMID_MASK;
1238 	/* use domain0 for MM scheduler */
1239 	data |= (0 << MMSCH_VF_VMID__VF_CTX_VMID__SHIFT);
1240 	WREG32_SOC15(VCN, 0, mmMMSCH_VF_VMID, data);
1241 
1242 	/* 3, notify mmsch about the size of this descriptor */
1243 	WREG32_SOC15(VCN, 0, mmMMSCH_VF_CTX_SIZE, size);
1244 
1245 	/* 4, set resp to zero */
1246 	WREG32_SOC15(VCN, 0, mmMMSCH_VF_MAILBOX_RESP, 0);
1247 
1248 	/*
1249 	 * 5, kick off the initialization and wait until
1250 	 * VCE_MMSCH_VF_MAILBOX_RESP becomes non-zero
1251 	 */
1252 	WREG32_SOC15(VCN, 0, mmMMSCH_VF_MAILBOX_HOST, 0x10000001);
1253 
1254 	data = RREG32_SOC15(VCN, 0, mmMMSCH_VF_MAILBOX_RESP);
1255 	loop = 10;
1256 	while ((data & 0x10000002) != 0x10000002) {
1257 		udelay(100);
1258 		data = RREG32_SOC15(VCN, 0, mmMMSCH_VF_MAILBOX_RESP);
1259 		loop--;
1260 		if (!loop)
1261 			break;
1262 	}
1263 
1264 	if (!loop) {
1265 		dev_err(adev->dev,
1266 			"failed to init MMSCH, mmMMSCH_VF_MAILBOX_RESP = %x\n",
1267 			data);
1268 		return -EBUSY;
1269 	}
1270 
1271 	return 0;
1272 }
1273 
1274 static int vcn_v2_5_sriov_start(struct amdgpu_device *adev)
1275 {
1276 	struct amdgpu_ring *ring;
1277 	uint32_t offset, size, tmp, i, rb_bufsz;
1278 	uint32_t table_size = 0;
1279 	struct mmsch_v1_0_cmd_direct_write direct_wt = { { 0 } };
1280 	struct mmsch_v1_0_cmd_direct_read_modify_write direct_rd_mod_wt = { { 0 } };
1281 	struct mmsch_v1_0_cmd_end end = { { 0 } };
1282 	uint32_t *init_table = adev->virt.mm_table.cpu_addr;
1283 	struct mmsch_v1_1_init_header *header = (struct mmsch_v1_1_init_header *)init_table;
1284 
1285 	direct_wt.cmd_header.command_type = MMSCH_COMMAND__DIRECT_REG_WRITE;
1286 	direct_rd_mod_wt.cmd_header.command_type = MMSCH_COMMAND__DIRECT_REG_READ_MODIFY_WRITE;
1287 	end.cmd_header.command_type = MMSCH_COMMAND__END;
1288 
1289 	header->version = MMSCH_VERSION;
1290 	header->total_size = sizeof(struct mmsch_v1_1_init_header) >> 2;
1291 	init_table += header->total_size;
1292 
1293 	for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
1294 		header->eng[i].table_offset = header->total_size;
1295 		header->eng[i].init_status = 0;
1296 		header->eng[i].table_size = 0;
1297 
1298 		table_size = 0;
1299 
1300 		MMSCH_V1_0_INSERT_DIRECT_RD_MOD_WT(
1301 			SOC15_REG_OFFSET(VCN, i, mmUVD_STATUS),
1302 			~UVD_STATUS__UVD_BUSY, UVD_STATUS__UVD_BUSY);
1303 
1304 		size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.inst[i].fw->size + 4);
1305 		/* mc resume*/
1306 		if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
1307 			MMSCH_V1_0_INSERT_DIRECT_WT(
1308 				SOC15_REG_OFFSET(VCN, i,
1309 					mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW),
1310 				adev->firmware.ucode[AMDGPU_UCODE_ID_VCN + i].tmr_mc_addr_lo);
1311 			MMSCH_V1_0_INSERT_DIRECT_WT(
1312 				SOC15_REG_OFFSET(VCN, i,
1313 					mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH),
1314 				adev->firmware.ucode[AMDGPU_UCODE_ID_VCN + i].tmr_mc_addr_hi);
1315 			offset = 0;
1316 			MMSCH_V1_0_INSERT_DIRECT_WT(
1317 				SOC15_REG_OFFSET(VCN, i, mmUVD_VCPU_CACHE_OFFSET0), 0);
1318 		} else {
1319 			MMSCH_V1_0_INSERT_DIRECT_WT(
1320 				SOC15_REG_OFFSET(VCN, i,
1321 					mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW),
1322 				lower_32_bits(adev->vcn.inst[i].gpu_addr));
1323 			MMSCH_V1_0_INSERT_DIRECT_WT(
1324 				SOC15_REG_OFFSET(VCN, i,
1325 					mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH),
1326 				upper_32_bits(adev->vcn.inst[i].gpu_addr));
1327 			offset = size;
1328 			MMSCH_V1_0_INSERT_DIRECT_WT(
1329 				SOC15_REG_OFFSET(VCN, i, mmUVD_VCPU_CACHE_OFFSET0),
1330 				AMDGPU_UVD_FIRMWARE_OFFSET >> 3);
1331 		}
1332 
1333 		MMSCH_V1_0_INSERT_DIRECT_WT(
1334 			SOC15_REG_OFFSET(VCN, i, mmUVD_VCPU_CACHE_SIZE0),
1335 			size);
1336 		MMSCH_V1_0_INSERT_DIRECT_WT(
1337 			SOC15_REG_OFFSET(VCN, i,
1338 				mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW),
1339 			lower_32_bits(adev->vcn.inst[i].gpu_addr + offset));
1340 		MMSCH_V1_0_INSERT_DIRECT_WT(
1341 			SOC15_REG_OFFSET(VCN, i,
1342 				mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH),
1343 			upper_32_bits(adev->vcn.inst[i].gpu_addr + offset));
1344 		MMSCH_V1_0_INSERT_DIRECT_WT(
1345 			SOC15_REG_OFFSET(VCN, i, mmUVD_VCPU_CACHE_OFFSET1),
1346 			0);
1347 		MMSCH_V1_0_INSERT_DIRECT_WT(
1348 			SOC15_REG_OFFSET(VCN, i, mmUVD_VCPU_CACHE_SIZE1),
1349 			AMDGPU_VCN_STACK_SIZE);
1350 		MMSCH_V1_0_INSERT_DIRECT_WT(
1351 			SOC15_REG_OFFSET(VCN, i,
1352 				mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW),
1353 			lower_32_bits(adev->vcn.inst[i].gpu_addr + offset +
1354 				AMDGPU_VCN_STACK_SIZE));
1355 		MMSCH_V1_0_INSERT_DIRECT_WT(
1356 			SOC15_REG_OFFSET(VCN, i,
1357 				mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH),
1358 			upper_32_bits(adev->vcn.inst[i].gpu_addr + offset +
1359 				AMDGPU_VCN_STACK_SIZE));
1360 		MMSCH_V1_0_INSERT_DIRECT_WT(
1361 			SOC15_REG_OFFSET(VCN, i, mmUVD_VCPU_CACHE_OFFSET2),
1362 			0);
1363 		MMSCH_V1_0_INSERT_DIRECT_WT(
1364 			SOC15_REG_OFFSET(VCN, i, mmUVD_VCPU_CACHE_SIZE2),
1365 			AMDGPU_VCN_CONTEXT_SIZE);
1366 
1367 		ring = &adev->vcn.inst[i].ring_enc[0];
1368 		ring->wptr = 0;
1369 
1370 		MMSCH_V1_0_INSERT_DIRECT_WT(
1371 			SOC15_REG_OFFSET(VCN, i, mmUVD_RB_BASE_LO),
1372 			lower_32_bits(ring->gpu_addr));
1373 		MMSCH_V1_0_INSERT_DIRECT_WT(
1374 			SOC15_REG_OFFSET(VCN, i, mmUVD_RB_BASE_HI),
1375 			upper_32_bits(ring->gpu_addr));
1376 		MMSCH_V1_0_INSERT_DIRECT_WT(
1377 			SOC15_REG_OFFSET(VCN, i, mmUVD_RB_SIZE),
1378 			ring->ring_size / 4);
1379 
1380 		ring = &adev->vcn.inst[i].ring_dec;
1381 		ring->wptr = 0;
1382 		MMSCH_V1_0_INSERT_DIRECT_WT(
1383 			SOC15_REG_OFFSET(VCN, i,
1384 				mmUVD_LMI_RBC_RB_64BIT_BAR_LOW),
1385 			lower_32_bits(ring->gpu_addr));
1386 		MMSCH_V1_0_INSERT_DIRECT_WT(
1387 			SOC15_REG_OFFSET(VCN, i,
1388 				mmUVD_LMI_RBC_RB_64BIT_BAR_HIGH),
1389 			upper_32_bits(ring->gpu_addr));
1390 
1391 		/* force RBC into idle state */
1392 		rb_bufsz = order_base_2(ring->ring_size);
1393 		tmp = REG_SET_FIELD(0, UVD_RBC_RB_CNTL, RB_BUFSZ, rb_bufsz);
1394 		tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_BLKSZ, 1);
1395 		tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_FETCH, 1);
1396 		tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_UPDATE, 1);
1397 		tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_RPTR_WR_EN, 1);
1398 		MMSCH_V1_0_INSERT_DIRECT_WT(
1399 			SOC15_REG_OFFSET(VCN, i, mmUVD_RBC_RB_CNTL), tmp);
1400 
1401 		/* add end packet */
1402 		memcpy((void *)init_table, &end, sizeof(struct mmsch_v1_0_cmd_end));
1403 		table_size += sizeof(struct mmsch_v1_0_cmd_end) / 4;
1404 		init_table += sizeof(struct mmsch_v1_0_cmd_end) / 4;
1405 
1406 		/* refine header */
1407 		header->eng[i].table_size = table_size;
1408 		header->total_size += table_size;
1409 	}
1410 
1411 	return vcn_v2_5_mmsch_start(adev, &adev->virt.mm_table);
1412 }
1413 
1414 static int vcn_v2_5_stop_dpg_mode(struct amdgpu_vcn_inst *vinst)
1415 {
1416 	struct amdgpu_device *adev = vinst->adev;
1417 	int inst_idx = vinst->inst;
1418 	uint32_t tmp;
1419 
1420 	/* Wait for power status to be 1 */
1421 	SOC15_WAIT_ON_RREG(VCN, inst_idx, mmUVD_POWER_STATUS, 1,
1422 		UVD_POWER_STATUS__UVD_POWER_STATUS_MASK);
1423 
1424 	/* wait for read ptr to be equal to write ptr */
1425 	tmp = RREG32_SOC15(VCN, inst_idx, mmUVD_RB_WPTR);
1426 	SOC15_WAIT_ON_RREG(VCN, inst_idx, mmUVD_RB_RPTR, tmp, 0xFFFFFFFF);
1427 
1428 	tmp = RREG32_SOC15(VCN, inst_idx, mmUVD_RB_WPTR2);
1429 	SOC15_WAIT_ON_RREG(VCN, inst_idx, mmUVD_RB_RPTR2, tmp, 0xFFFFFFFF);
1430 
1431 	tmp = RREG32_SOC15(VCN, inst_idx, mmUVD_RBC_RB_WPTR) & 0x7FFFFFFF;
1432 	SOC15_WAIT_ON_RREG(VCN, inst_idx, mmUVD_RBC_RB_RPTR, tmp, 0xFFFFFFFF);
1433 
1434 	SOC15_WAIT_ON_RREG(VCN, inst_idx, mmUVD_POWER_STATUS, 1,
1435 		UVD_POWER_STATUS__UVD_POWER_STATUS_MASK);
1436 
1437 	/* disable dynamic power gating mode */
1438 	WREG32_P(SOC15_REG_OFFSET(VCN, inst_idx, mmUVD_POWER_STATUS), 0,
1439 			~UVD_POWER_STATUS__UVD_PG_MODE_MASK);
1440 
1441 	return 0;
1442 }
1443 
1444 static int vcn_v2_5_stop(struct amdgpu_vcn_inst *vinst)
1445 {
1446 	struct amdgpu_device *adev = vinst->adev;
1447 	int i = vinst->inst;
1448 	uint32_t tmp;
1449 	int r;
1450 
1451 	if (adev->vcn.harvest_config & (1 << i))
1452 		return 0;
1453 
1454 	if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) {
1455 		r = vcn_v2_5_stop_dpg_mode(vinst);
1456 		goto done;
1457 	}
1458 
1459 	/* wait for vcn idle */
1460 	r = SOC15_WAIT_ON_RREG(VCN, i, mmUVD_STATUS, UVD_STATUS__IDLE, 0x7);
1461 	if (r)
1462 		goto done;
1463 
1464 	tmp = UVD_LMI_STATUS__VCPU_LMI_WRITE_CLEAN_MASK |
1465 		UVD_LMI_STATUS__READ_CLEAN_MASK |
1466 		UVD_LMI_STATUS__WRITE_CLEAN_MASK |
1467 		UVD_LMI_STATUS__WRITE_CLEAN_RAW_MASK;
1468 	r = SOC15_WAIT_ON_RREG(VCN, i, mmUVD_LMI_STATUS, tmp, tmp);
1469 	if (r)
1470 		goto done;
1471 
1472 	/* block LMI UMC channel */
1473 	tmp = RREG32_SOC15(VCN, i, mmUVD_LMI_CTRL2);
1474 	tmp |= UVD_LMI_CTRL2__STALL_ARB_UMC_MASK;
1475 	WREG32_SOC15(VCN, i, mmUVD_LMI_CTRL2, tmp);
1476 
1477 	tmp = UVD_LMI_STATUS__UMC_READ_CLEAN_RAW_MASK|
1478 		UVD_LMI_STATUS__UMC_WRITE_CLEAN_RAW_MASK;
1479 	r = SOC15_WAIT_ON_RREG(VCN, i, mmUVD_LMI_STATUS, tmp, tmp);
1480 	if (r)
1481 		goto done;
1482 
1483 	/* block VCPU register access */
1484 	WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_RB_ARB_CTRL),
1485 		 UVD_RB_ARB_CTRL__VCPU_DIS_MASK,
1486 		 ~UVD_RB_ARB_CTRL__VCPU_DIS_MASK);
1487 
1488 	/* reset VCPU */
1489 	WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_VCPU_CNTL),
1490 		 UVD_VCPU_CNTL__BLK_RST_MASK,
1491 		 ~UVD_VCPU_CNTL__BLK_RST_MASK);
1492 
1493 	/* disable VCPU clock */
1494 	WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_VCPU_CNTL), 0,
1495 		 ~(UVD_VCPU_CNTL__CLK_EN_MASK));
1496 
1497 	/* clear status */
1498 	WREG32_SOC15(VCN, i, mmUVD_STATUS, 0);
1499 
1500 	vcn_v2_5_enable_clock_gating(vinst);
1501 
1502 	/* enable register anti-hang mechanism */
1503 	WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_POWER_STATUS),
1504 		 UVD_POWER_STATUS__UVD_POWER_STATUS_MASK,
1505 		 ~UVD_POWER_STATUS__UVD_POWER_STATUS_MASK);
1506 
1507 done:
1508 	if (adev->pm.dpm_enabled)
1509 		amdgpu_dpm_enable_vcn(adev, false, i);
1510 
1511 	return r;
1512 }
1513 
1514 static int vcn_v2_5_pause_dpg_mode(struct amdgpu_vcn_inst *vinst,
1515 				   struct dpg_pause_state *new_state)
1516 {
1517 	struct amdgpu_device *adev = vinst->adev;
1518 	int inst_idx = vinst->inst;
1519 	struct amdgpu_ring *ring;
1520 	uint32_t reg_data = 0;
1521 	int ret_code = 0;
1522 
1523 	/* pause/unpause if state is changed */
1524 	if (adev->vcn.inst[inst_idx].pause_state.fw_based != new_state->fw_based) {
1525 		DRM_DEBUG("dpg pause state changed %d -> %d",
1526 			adev->vcn.inst[inst_idx].pause_state.fw_based,	new_state->fw_based);
1527 		reg_data = RREG32_SOC15(VCN, inst_idx, mmUVD_DPG_PAUSE) &
1528 			(~UVD_DPG_PAUSE__NJ_PAUSE_DPG_ACK_MASK);
1529 
1530 		if (new_state->fw_based == VCN_DPG_STATE__PAUSE) {
1531 			ret_code = SOC15_WAIT_ON_RREG(VCN, inst_idx, mmUVD_POWER_STATUS, 0x1,
1532 				UVD_POWER_STATUS__UVD_POWER_STATUS_MASK);
1533 
1534 			if (!ret_code) {
1535 				volatile struct amdgpu_fw_shared *fw_shared = adev->vcn.inst[inst_idx].fw_shared.cpu_addr;
1536 
1537 				/* pause DPG */
1538 				reg_data |= UVD_DPG_PAUSE__NJ_PAUSE_DPG_REQ_MASK;
1539 				WREG32_SOC15(VCN, inst_idx, mmUVD_DPG_PAUSE, reg_data);
1540 
1541 				/* wait for ACK */
1542 				SOC15_WAIT_ON_RREG(VCN, inst_idx, mmUVD_DPG_PAUSE,
1543 					   UVD_DPG_PAUSE__NJ_PAUSE_DPG_ACK_MASK,
1544 					   UVD_DPG_PAUSE__NJ_PAUSE_DPG_ACK_MASK);
1545 
1546 				/* Stall DPG before WPTR/RPTR reset */
1547 				WREG32_P(SOC15_REG_OFFSET(VCN, inst_idx, mmUVD_POWER_STATUS),
1548 					   UVD_POWER_STATUS__STALL_DPG_POWER_UP_MASK,
1549 					   ~UVD_POWER_STATUS__STALL_DPG_POWER_UP_MASK);
1550 
1551 				/* Restore */
1552 				fw_shared->multi_queue.encode_generalpurpose_queue_mode |= FW_QUEUE_RING_RESET;
1553 				ring = &adev->vcn.inst[inst_idx].ring_enc[0];
1554 				ring->wptr = 0;
1555 				WREG32_SOC15(VCN, inst_idx, mmUVD_RB_BASE_LO, ring->gpu_addr);
1556 				WREG32_SOC15(VCN, inst_idx, mmUVD_RB_BASE_HI, upper_32_bits(ring->gpu_addr));
1557 				WREG32_SOC15(VCN, inst_idx, mmUVD_RB_SIZE, ring->ring_size / 4);
1558 				WREG32_SOC15(VCN, inst_idx, mmUVD_RB_RPTR, lower_32_bits(ring->wptr));
1559 				WREG32_SOC15(VCN, inst_idx, mmUVD_RB_WPTR, lower_32_bits(ring->wptr));
1560 				fw_shared->multi_queue.encode_generalpurpose_queue_mode &= ~FW_QUEUE_RING_RESET;
1561 
1562 				fw_shared->multi_queue.encode_lowlatency_queue_mode |= FW_QUEUE_RING_RESET;
1563 				ring = &adev->vcn.inst[inst_idx].ring_enc[1];
1564 				ring->wptr = 0;
1565 				WREG32_SOC15(VCN, inst_idx, mmUVD_RB_BASE_LO2, ring->gpu_addr);
1566 				WREG32_SOC15(VCN, inst_idx, mmUVD_RB_BASE_HI2, upper_32_bits(ring->gpu_addr));
1567 				WREG32_SOC15(VCN, inst_idx, mmUVD_RB_SIZE2, ring->ring_size / 4);
1568 				WREG32_SOC15(VCN, inst_idx, mmUVD_RB_RPTR2, lower_32_bits(ring->wptr));
1569 				WREG32_SOC15(VCN, inst_idx, mmUVD_RB_WPTR2, lower_32_bits(ring->wptr));
1570 				fw_shared->multi_queue.encode_lowlatency_queue_mode &= ~FW_QUEUE_RING_RESET;
1571 
1572 				/* Unstall DPG */
1573 				WREG32_P(SOC15_REG_OFFSET(VCN, inst_idx, mmUVD_POWER_STATUS),
1574 					   0, ~UVD_POWER_STATUS__STALL_DPG_POWER_UP_MASK);
1575 
1576 				SOC15_WAIT_ON_RREG(VCN, inst_idx, mmUVD_POWER_STATUS,
1577 					   UVD_PGFSM_CONFIG__UVDM_UVDU_PWR_ON, UVD_POWER_STATUS__UVD_POWER_STATUS_MASK);
1578 			}
1579 		} else {
1580 			reg_data &= ~UVD_DPG_PAUSE__NJ_PAUSE_DPG_REQ_MASK;
1581 			WREG32_SOC15(VCN, inst_idx, mmUVD_DPG_PAUSE, reg_data);
1582 			SOC15_WAIT_ON_RREG(VCN, inst_idx, mmUVD_POWER_STATUS, 0x1,
1583 				UVD_POWER_STATUS__UVD_POWER_STATUS_MASK);
1584 		}
1585 		adev->vcn.inst[inst_idx].pause_state.fw_based = new_state->fw_based;
1586 	}
1587 
1588 	return 0;
1589 }
1590 
1591 /**
1592  * vcn_v2_5_dec_ring_get_rptr - get read pointer
1593  *
1594  * @ring: amdgpu_ring pointer
1595  *
1596  * Returns the current hardware read pointer
1597  */
1598 static uint64_t vcn_v2_5_dec_ring_get_rptr(struct amdgpu_ring *ring)
1599 {
1600 	struct amdgpu_device *adev = ring->adev;
1601 
1602 	return RREG32_SOC15(VCN, ring->me, mmUVD_RBC_RB_RPTR);
1603 }
1604 
1605 /**
1606  * vcn_v2_5_dec_ring_get_wptr - get write pointer
1607  *
1608  * @ring: amdgpu_ring pointer
1609  *
1610  * Returns the current hardware write pointer
1611  */
1612 static uint64_t vcn_v2_5_dec_ring_get_wptr(struct amdgpu_ring *ring)
1613 {
1614 	struct amdgpu_device *adev = ring->adev;
1615 
1616 	if (ring->use_doorbell)
1617 		return *ring->wptr_cpu_addr;
1618 	else
1619 		return RREG32_SOC15(VCN, ring->me, mmUVD_RBC_RB_WPTR);
1620 }
1621 
1622 /**
1623  * vcn_v2_5_dec_ring_set_wptr - set write pointer
1624  *
1625  * @ring: amdgpu_ring pointer
1626  *
1627  * Commits the write pointer to the hardware
1628  */
1629 static void vcn_v2_5_dec_ring_set_wptr(struct amdgpu_ring *ring)
1630 {
1631 	struct amdgpu_device *adev = ring->adev;
1632 
1633 	if (ring->use_doorbell) {
1634 		*ring->wptr_cpu_addr = lower_32_bits(ring->wptr);
1635 		WDOORBELL32(ring->doorbell_index, lower_32_bits(ring->wptr));
1636 	} else {
1637 		WREG32_SOC15(VCN, ring->me, mmUVD_RBC_RB_WPTR, lower_32_bits(ring->wptr));
1638 	}
1639 }
1640 
1641 static const struct amdgpu_ring_funcs vcn_v2_5_dec_ring_vm_funcs = {
1642 	.type = AMDGPU_RING_TYPE_VCN_DEC,
1643 	.align_mask = 0xf,
1644 	.secure_submission_supported = true,
1645 	.get_rptr = vcn_v2_5_dec_ring_get_rptr,
1646 	.get_wptr = vcn_v2_5_dec_ring_get_wptr,
1647 	.set_wptr = vcn_v2_5_dec_ring_set_wptr,
1648 	.emit_frame_size =
1649 		SOC15_FLUSH_GPU_TLB_NUM_WREG * 6 +
1650 		SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 8 +
1651 		8 + /* vcn_v2_0_dec_ring_emit_vm_flush */
1652 		14 + 14 + /* vcn_v2_0_dec_ring_emit_fence x2 vm fence */
1653 		6,
1654 	.emit_ib_size = 8, /* vcn_v2_0_dec_ring_emit_ib */
1655 	.emit_ib = vcn_v2_0_dec_ring_emit_ib,
1656 	.emit_fence = vcn_v2_0_dec_ring_emit_fence,
1657 	.emit_vm_flush = vcn_v2_0_dec_ring_emit_vm_flush,
1658 	.test_ring = vcn_v2_0_dec_ring_test_ring,
1659 	.test_ib = amdgpu_vcn_dec_ring_test_ib,
1660 	.insert_nop = vcn_v2_0_dec_ring_insert_nop,
1661 	.insert_start = vcn_v2_0_dec_ring_insert_start,
1662 	.insert_end = vcn_v2_0_dec_ring_insert_end,
1663 	.pad_ib = amdgpu_ring_generic_pad_ib,
1664 	.begin_use = amdgpu_vcn_ring_begin_use,
1665 	.end_use = amdgpu_vcn_ring_end_use,
1666 	.emit_wreg = vcn_v2_0_dec_ring_emit_wreg,
1667 	.emit_reg_wait = vcn_v2_0_dec_ring_emit_reg_wait,
1668 	.emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper,
1669 };
1670 
1671 /**
1672  * vcn_v2_5_enc_ring_get_rptr - get enc read pointer
1673  *
1674  * @ring: amdgpu_ring pointer
1675  *
1676  * Returns the current hardware enc read pointer
1677  */
1678 static uint64_t vcn_v2_5_enc_ring_get_rptr(struct amdgpu_ring *ring)
1679 {
1680 	struct amdgpu_device *adev = ring->adev;
1681 
1682 	if (ring == &adev->vcn.inst[ring->me].ring_enc[0])
1683 		return RREG32_SOC15(VCN, ring->me, mmUVD_RB_RPTR);
1684 	else
1685 		return RREG32_SOC15(VCN, ring->me, mmUVD_RB_RPTR2);
1686 }
1687 
1688 /**
1689  * vcn_v2_5_enc_ring_get_wptr - get enc write pointer
1690  *
1691  * @ring: amdgpu_ring pointer
1692  *
1693  * Returns the current hardware enc write pointer
1694  */
1695 static uint64_t vcn_v2_5_enc_ring_get_wptr(struct amdgpu_ring *ring)
1696 {
1697 	struct amdgpu_device *adev = ring->adev;
1698 
1699 	if (ring == &adev->vcn.inst[ring->me].ring_enc[0]) {
1700 		if (ring->use_doorbell)
1701 			return *ring->wptr_cpu_addr;
1702 		else
1703 			return RREG32_SOC15(VCN, ring->me, mmUVD_RB_WPTR);
1704 	} else {
1705 		if (ring->use_doorbell)
1706 			return *ring->wptr_cpu_addr;
1707 		else
1708 			return RREG32_SOC15(VCN, ring->me, mmUVD_RB_WPTR2);
1709 	}
1710 }
1711 
1712 /**
1713  * vcn_v2_5_enc_ring_set_wptr - set enc write pointer
1714  *
1715  * @ring: amdgpu_ring pointer
1716  *
1717  * Commits the enc write pointer to the hardware
1718  */
1719 static void vcn_v2_5_enc_ring_set_wptr(struct amdgpu_ring *ring)
1720 {
1721 	struct amdgpu_device *adev = ring->adev;
1722 
1723 	if (ring == &adev->vcn.inst[ring->me].ring_enc[0]) {
1724 		if (ring->use_doorbell) {
1725 			*ring->wptr_cpu_addr = lower_32_bits(ring->wptr);
1726 			WDOORBELL32(ring->doorbell_index, lower_32_bits(ring->wptr));
1727 		} else {
1728 			WREG32_SOC15(VCN, ring->me, mmUVD_RB_WPTR, lower_32_bits(ring->wptr));
1729 		}
1730 	} else {
1731 		if (ring->use_doorbell) {
1732 			*ring->wptr_cpu_addr = lower_32_bits(ring->wptr);
1733 			WDOORBELL32(ring->doorbell_index, lower_32_bits(ring->wptr));
1734 		} else {
1735 			WREG32_SOC15(VCN, ring->me, mmUVD_RB_WPTR2, lower_32_bits(ring->wptr));
1736 		}
1737 	}
1738 }
1739 
1740 static const struct amdgpu_ring_funcs vcn_v2_5_enc_ring_vm_funcs = {
1741 	.type = AMDGPU_RING_TYPE_VCN_ENC,
1742 	.align_mask = 0x3f,
1743 	.nop = VCN_ENC_CMD_NO_OP,
1744 	.get_rptr = vcn_v2_5_enc_ring_get_rptr,
1745 	.get_wptr = vcn_v2_5_enc_ring_get_wptr,
1746 	.set_wptr = vcn_v2_5_enc_ring_set_wptr,
1747 	.emit_frame_size =
1748 		SOC15_FLUSH_GPU_TLB_NUM_WREG * 3 +
1749 		SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 4 +
1750 		4 + /* vcn_v2_0_enc_ring_emit_vm_flush */
1751 		5 + 5 + /* vcn_v2_0_enc_ring_emit_fence x2 vm fence */
1752 		1, /* vcn_v2_0_enc_ring_insert_end */
1753 	.emit_ib_size = 5, /* vcn_v2_0_enc_ring_emit_ib */
1754 	.emit_ib = vcn_v2_0_enc_ring_emit_ib,
1755 	.emit_fence = vcn_v2_0_enc_ring_emit_fence,
1756 	.emit_vm_flush = vcn_v2_0_enc_ring_emit_vm_flush,
1757 	.test_ring = amdgpu_vcn_enc_ring_test_ring,
1758 	.test_ib = amdgpu_vcn_enc_ring_test_ib,
1759 	.insert_nop = amdgpu_ring_insert_nop,
1760 	.insert_end = vcn_v2_0_enc_ring_insert_end,
1761 	.pad_ib = amdgpu_ring_generic_pad_ib,
1762 	.begin_use = amdgpu_vcn_ring_begin_use,
1763 	.end_use = amdgpu_vcn_ring_end_use,
1764 	.emit_wreg = vcn_v2_0_enc_ring_emit_wreg,
1765 	.emit_reg_wait = vcn_v2_0_enc_ring_emit_reg_wait,
1766 	.emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper,
1767 };
1768 
1769 static void vcn_v2_5_set_dec_ring_funcs(struct amdgpu_device *adev)
1770 {
1771 	int i;
1772 
1773 	for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
1774 		if (adev->vcn.harvest_config & (1 << i))
1775 			continue;
1776 		adev->vcn.inst[i].ring_dec.funcs = &vcn_v2_5_dec_ring_vm_funcs;
1777 		adev->vcn.inst[i].ring_dec.me = i;
1778 	}
1779 }
1780 
1781 static void vcn_v2_5_set_enc_ring_funcs(struct amdgpu_device *adev)
1782 {
1783 	int i, j;
1784 
1785 	for (j = 0; j < adev->vcn.num_vcn_inst; ++j) {
1786 		if (adev->vcn.harvest_config & (1 << j))
1787 			continue;
1788 		for (i = 0; i < adev->vcn.inst[j].num_enc_rings; ++i) {
1789 			adev->vcn.inst[j].ring_enc[i].funcs = &vcn_v2_5_enc_ring_vm_funcs;
1790 			adev->vcn.inst[j].ring_enc[i].me = j;
1791 		}
1792 	}
1793 }
1794 
1795 static bool vcn_v2_5_is_idle(struct amdgpu_ip_block *ip_block)
1796 {
1797 	struct amdgpu_device *adev = ip_block->adev;
1798 	int i, ret = 1;
1799 
1800 	for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
1801 		if (adev->vcn.harvest_config & (1 << i))
1802 			continue;
1803 
1804 		ret &= (RREG32_SOC15(VCN, i, mmUVD_STATUS) == UVD_STATUS__IDLE);
1805 	}
1806 
1807 	return ret;
1808 }
1809 
1810 static int vcn_v2_5_wait_for_idle(struct amdgpu_ip_block *ip_block)
1811 {
1812 	struct amdgpu_device *adev = ip_block->adev;
1813 	int i, ret = 0;
1814 
1815 	for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
1816 		if (adev->vcn.harvest_config & (1 << i))
1817 			continue;
1818 		ret = SOC15_WAIT_ON_RREG(VCN, i, mmUVD_STATUS, UVD_STATUS__IDLE,
1819 			UVD_STATUS__IDLE);
1820 		if (ret)
1821 			return ret;
1822 	}
1823 
1824 	return ret;
1825 }
1826 
1827 static int vcn_v2_5_set_clockgating_state(struct amdgpu_ip_block *ip_block,
1828 					  enum amd_clockgating_state state)
1829 {
1830 	struct amdgpu_device *adev = ip_block->adev;
1831 	bool enable = (state == AMD_CG_STATE_GATE);
1832 	int i;
1833 
1834 	if (amdgpu_sriov_vf(adev))
1835 		return 0;
1836 
1837 	for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
1838 		struct amdgpu_vcn_inst *vinst = &adev->vcn.inst[i];
1839 
1840 		if (enable) {
1841 			if (!vcn_v2_5_is_idle(ip_block))
1842 				return -EBUSY;
1843 			vcn_v2_5_enable_clock_gating(vinst);
1844 		} else {
1845 			vcn_v2_5_disable_clock_gating(vinst);
1846 		}
1847 	}
1848 
1849 	return 0;
1850 }
1851 
1852 static int vcn_v2_5_set_pg_state(struct amdgpu_vcn_inst *vinst,
1853 				 enum amd_powergating_state state)
1854 {
1855 	struct amdgpu_device *adev = vinst->adev;
1856 	int ret;
1857 
1858 	if (amdgpu_sriov_vf(adev))
1859 		return 0;
1860 
1861 	if (state == vinst->cur_state)
1862 		return 0;
1863 
1864 	if (state == AMD_PG_STATE_GATE)
1865 		ret = vcn_v2_5_stop(vinst);
1866 	else
1867 		ret = vcn_v2_5_start(vinst);
1868 
1869 	if (!ret)
1870 		vinst->cur_state = state;
1871 
1872 	return ret;
1873 }
1874 
1875 static int vcn_v2_5_set_interrupt_state(struct amdgpu_device *adev,
1876 					struct amdgpu_irq_src *source,
1877 					unsigned type,
1878 					enum amdgpu_interrupt_state state)
1879 {
1880 	return 0;
1881 }
1882 
1883 static int vcn_v2_6_set_ras_interrupt_state(struct amdgpu_device *adev,
1884 					struct amdgpu_irq_src *source,
1885 					unsigned int type,
1886 					enum amdgpu_interrupt_state state)
1887 {
1888 	return 0;
1889 }
1890 
1891 static int vcn_v2_5_process_interrupt(struct amdgpu_device *adev,
1892 				      struct amdgpu_irq_src *source,
1893 				      struct amdgpu_iv_entry *entry)
1894 {
1895 	uint32_t ip_instance;
1896 
1897 	switch (entry->client_id) {
1898 	case SOC15_IH_CLIENTID_VCN:
1899 		ip_instance = 0;
1900 		break;
1901 	case SOC15_IH_CLIENTID_VCN1:
1902 		ip_instance = 1;
1903 		break;
1904 	default:
1905 		DRM_ERROR("Unhandled client id: %d\n", entry->client_id);
1906 		return 0;
1907 	}
1908 
1909 	DRM_DEBUG("IH: VCN TRAP\n");
1910 
1911 	switch (entry->src_id) {
1912 	case VCN_2_0__SRCID__UVD_SYSTEM_MESSAGE_INTERRUPT:
1913 		amdgpu_fence_process(&adev->vcn.inst[ip_instance].ring_dec);
1914 		break;
1915 	case VCN_2_0__SRCID__UVD_ENC_GENERAL_PURPOSE:
1916 		amdgpu_fence_process(&adev->vcn.inst[ip_instance].ring_enc[0]);
1917 		break;
1918 	case VCN_2_0__SRCID__UVD_ENC_LOW_LATENCY:
1919 		amdgpu_fence_process(&adev->vcn.inst[ip_instance].ring_enc[1]);
1920 		break;
1921 	default:
1922 		DRM_ERROR("Unhandled interrupt: %d %d\n",
1923 			  entry->src_id, entry->src_data[0]);
1924 		break;
1925 	}
1926 
1927 	return 0;
1928 }
1929 
1930 static const struct amdgpu_irq_src_funcs vcn_v2_5_irq_funcs = {
1931 	.set = vcn_v2_5_set_interrupt_state,
1932 	.process = vcn_v2_5_process_interrupt,
1933 };
1934 
1935 static const struct amdgpu_irq_src_funcs vcn_v2_6_ras_irq_funcs = {
1936 	.set = vcn_v2_6_set_ras_interrupt_state,
1937 	.process = amdgpu_vcn_process_poison_irq,
1938 };
1939 
1940 static void vcn_v2_5_set_irq_funcs(struct amdgpu_device *adev)
1941 {
1942 	int i;
1943 
1944 	for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
1945 		if (adev->vcn.harvest_config & (1 << i))
1946 			continue;
1947 		adev->vcn.inst[i].irq.num_types = adev->vcn.inst[i].num_enc_rings + 1;
1948 		adev->vcn.inst[i].irq.funcs = &vcn_v2_5_irq_funcs;
1949 
1950 		adev->vcn.inst[i].ras_poison_irq.num_types = adev->vcn.inst[i].num_enc_rings + 1;
1951 		adev->vcn.inst[i].ras_poison_irq.funcs = &vcn_v2_6_ras_irq_funcs;
1952 	}
1953 }
1954 
1955 static void vcn_v2_5_print_ip_state(struct amdgpu_ip_block *ip_block, struct drm_printer *p)
1956 {
1957 	struct amdgpu_device *adev = ip_block->adev;
1958 	int i, j;
1959 	uint32_t reg_count = ARRAY_SIZE(vcn_reg_list_2_5);
1960 	uint32_t inst_off, is_powered;
1961 
1962 	if (!adev->vcn.ip_dump)
1963 		return;
1964 
1965 	drm_printf(p, "num_instances:%d\n", adev->vcn.num_vcn_inst);
1966 	for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
1967 		if (adev->vcn.harvest_config & (1 << i)) {
1968 			drm_printf(p, "\nHarvested Instance:VCN%d Skipping dump\n", i);
1969 			continue;
1970 		}
1971 
1972 		inst_off = i * reg_count;
1973 		is_powered = (adev->vcn.ip_dump[inst_off] &
1974 				UVD_POWER_STATUS__UVD_POWER_STATUS_MASK) != 1;
1975 
1976 		if (is_powered) {
1977 			drm_printf(p, "\nActive Instance:VCN%d\n", i);
1978 			for (j = 0; j < reg_count; j++)
1979 				drm_printf(p, "%-50s \t 0x%08x\n", vcn_reg_list_2_5[j].reg_name,
1980 					   adev->vcn.ip_dump[inst_off + j]);
1981 		} else {
1982 			drm_printf(p, "\nInactive Instance:VCN%d\n", i);
1983 		}
1984 	}
1985 }
1986 
1987 static void vcn_v2_5_dump_ip_state(struct amdgpu_ip_block *ip_block)
1988 {
1989 	struct amdgpu_device *adev = ip_block->adev;
1990 	int i, j;
1991 	bool is_powered;
1992 	uint32_t inst_off;
1993 	uint32_t reg_count = ARRAY_SIZE(vcn_reg_list_2_5);
1994 
1995 	if (!adev->vcn.ip_dump)
1996 		return;
1997 
1998 	for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
1999 		if (adev->vcn.harvest_config & (1 << i))
2000 			continue;
2001 
2002 		inst_off = i * reg_count;
2003 		/* mmUVD_POWER_STATUS is always readable and is first element of the array */
2004 		adev->vcn.ip_dump[inst_off] = RREG32_SOC15(VCN, i, mmUVD_POWER_STATUS);
2005 		is_powered = (adev->vcn.ip_dump[inst_off] &
2006 				UVD_POWER_STATUS__UVD_POWER_STATUS_MASK) != 1;
2007 
2008 		if (is_powered)
2009 			for (j = 1; j < reg_count; j++)
2010 				adev->vcn.ip_dump[inst_off + j] =
2011 					RREG32(SOC15_REG_ENTRY_OFFSET_INST(vcn_reg_list_2_5[j], i));
2012 	}
2013 }
2014 
2015 static const struct amd_ip_funcs vcn_v2_5_ip_funcs = {
2016 	.name = "vcn_v2_5",
2017 	.early_init = vcn_v2_5_early_init,
2018 	.sw_init = vcn_v2_5_sw_init,
2019 	.sw_fini = vcn_v2_5_sw_fini,
2020 	.hw_init = vcn_v2_5_hw_init,
2021 	.hw_fini = vcn_v2_5_hw_fini,
2022 	.suspend = vcn_v2_5_suspend,
2023 	.resume = vcn_v2_5_resume,
2024 	.is_idle = vcn_v2_5_is_idle,
2025 	.wait_for_idle = vcn_v2_5_wait_for_idle,
2026 	.set_clockgating_state = vcn_v2_5_set_clockgating_state,
2027 	.set_powergating_state = vcn_set_powergating_state,
2028 	.dump_ip_state = vcn_v2_5_dump_ip_state,
2029 	.print_ip_state = vcn_v2_5_print_ip_state,
2030 };
2031 
2032 static const struct amd_ip_funcs vcn_v2_6_ip_funcs = {
2033         .name = "vcn_v2_6",
2034         .early_init = vcn_v2_5_early_init,
2035         .sw_init = vcn_v2_5_sw_init,
2036         .sw_fini = vcn_v2_5_sw_fini,
2037         .hw_init = vcn_v2_5_hw_init,
2038         .hw_fini = vcn_v2_5_hw_fini,
2039         .suspend = vcn_v2_5_suspend,
2040         .resume = vcn_v2_5_resume,
2041         .is_idle = vcn_v2_5_is_idle,
2042         .wait_for_idle = vcn_v2_5_wait_for_idle,
2043         .set_clockgating_state = vcn_v2_5_set_clockgating_state,
2044 	.set_powergating_state = vcn_set_powergating_state,
2045 	.dump_ip_state = vcn_v2_5_dump_ip_state,
2046 	.print_ip_state = vcn_v2_5_print_ip_state,
2047 };
2048 
2049 const struct amdgpu_ip_block_version vcn_v2_5_ip_block =
2050 {
2051 		.type = AMD_IP_BLOCK_TYPE_VCN,
2052 		.major = 2,
2053 		.minor = 5,
2054 		.rev = 0,
2055 		.funcs = &vcn_v2_5_ip_funcs,
2056 };
2057 
2058 const struct amdgpu_ip_block_version vcn_v2_6_ip_block =
2059 {
2060 		.type = AMD_IP_BLOCK_TYPE_VCN,
2061 		.major = 2,
2062 		.minor = 6,
2063 		.rev = 0,
2064 		.funcs = &vcn_v2_6_ip_funcs,
2065 };
2066 
2067 static uint32_t vcn_v2_6_query_poison_by_instance(struct amdgpu_device *adev,
2068 			uint32_t instance, uint32_t sub_block)
2069 {
2070 	uint32_t poison_stat = 0, reg_value = 0;
2071 
2072 	switch (sub_block) {
2073 	case AMDGPU_VCN_V2_6_VCPU_VCODEC:
2074 		reg_value = RREG32_SOC15(VCN, instance, mmUVD_RAS_VCPU_VCODEC_STATUS);
2075 		poison_stat = REG_GET_FIELD(reg_value, UVD_RAS_VCPU_VCODEC_STATUS, POISONED_PF);
2076 		break;
2077 	default:
2078 		break;
2079 	}
2080 
2081 	if (poison_stat)
2082 		dev_info(adev->dev, "Poison detected in VCN%d, sub_block%d\n",
2083 			instance, sub_block);
2084 
2085 	return poison_stat;
2086 }
2087 
2088 static bool vcn_v2_6_query_poison_status(struct amdgpu_device *adev)
2089 {
2090 	uint32_t inst, sub;
2091 	uint32_t poison_stat = 0;
2092 
2093 	for (inst = 0; inst < adev->vcn.num_vcn_inst; inst++)
2094 		for (sub = 0; sub < AMDGPU_VCN_V2_6_MAX_SUB_BLOCK; sub++)
2095 			poison_stat +=
2096 			vcn_v2_6_query_poison_by_instance(adev, inst, sub);
2097 
2098 	return !!poison_stat;
2099 }
2100 
2101 const struct amdgpu_ras_block_hw_ops vcn_v2_6_ras_hw_ops = {
2102 	.query_poison_status = vcn_v2_6_query_poison_status,
2103 };
2104 
2105 static struct amdgpu_vcn_ras vcn_v2_6_ras = {
2106 	.ras_block = {
2107 		.hw_ops = &vcn_v2_6_ras_hw_ops,
2108 		.ras_late_init = amdgpu_vcn_ras_late_init,
2109 	},
2110 };
2111 
2112 static void vcn_v2_5_set_ras_funcs(struct amdgpu_device *adev)
2113 {
2114 	switch (amdgpu_ip_version(adev, VCN_HWIP, 0)) {
2115 	case IP_VERSION(2, 6, 0):
2116 		adev->vcn.ras = &vcn_v2_6_ras;
2117 		break;
2118 	default:
2119 		break;
2120 	}
2121 }
2122