xref: /linux/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c (revision 2c1ed907520c50326b8f604907a8478b27881a2e)
1 /*
2  * Copyright 2019 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  */
23 
24 #include <linux/firmware.h>
25 #include <drm/drm_drv.h>
26 
27 #include "amdgpu.h"
28 #include "amdgpu_vcn.h"
29 #include "amdgpu_pm.h"
30 #include "soc15.h"
31 #include "soc15d.h"
32 #include "vcn_v2_0.h"
33 #include "mmsch_v1_0.h"
34 #include "vcn_v2_5.h"
35 
36 #include "vcn/vcn_2_5_offset.h"
37 #include "vcn/vcn_2_5_sh_mask.h"
38 #include "ivsrcid/vcn/irqsrcs_vcn_2_0.h"
39 
40 #define VCN_VID_SOC_ADDRESS_2_0					0x1fa00
41 #define VCN1_VID_SOC_ADDRESS_3_0				0x48200
42 
43 #define mmUVD_CONTEXT_ID_INTERNAL_OFFSET			0x27
44 #define mmUVD_GPCOM_VCPU_CMD_INTERNAL_OFFSET			0x0f
45 #define mmUVD_GPCOM_VCPU_DATA0_INTERNAL_OFFSET			0x10
46 #define mmUVD_GPCOM_VCPU_DATA1_INTERNAL_OFFSET			0x11
47 #define mmUVD_NO_OP_INTERNAL_OFFSET				0x29
48 #define mmUVD_GP_SCRATCH8_INTERNAL_OFFSET			0x66
49 #define mmUVD_SCRATCH9_INTERNAL_OFFSET				0xc01d
50 
51 #define mmUVD_LMI_RBC_IB_VMID_INTERNAL_OFFSET			0x431
52 #define mmUVD_LMI_RBC_IB_64BIT_BAR_LOW_INTERNAL_OFFSET		0x3b4
53 #define mmUVD_LMI_RBC_IB_64BIT_BAR_HIGH_INTERNAL_OFFSET		0x3b5
54 #define mmUVD_RBC_IB_SIZE_INTERNAL_OFFSET			0x25c
55 
56 #define VCN25_MAX_HW_INSTANCES_ARCTURUS			2
57 
58 static const struct amdgpu_hwip_reg_entry vcn_reg_list_2_5[] = {
59 	SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_POWER_STATUS),
60 	SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_POWER_STATUS),
61 	SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_STATUS),
62 	SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_CONTEXT_ID),
63 	SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_CONTEXT_ID2),
64 	SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_GPCOM_VCPU_DATA0),
65 	SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_GPCOM_VCPU_DATA1),
66 	SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_GPCOM_VCPU_CMD),
67 	SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_BASE_HI),
68 	SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_BASE_LO),
69 	SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_BASE_HI2),
70 	SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_BASE_LO2),
71 	SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_BASE_HI3),
72 	SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_BASE_LO3),
73 	SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_BASE_HI4),
74 	SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_BASE_LO4),
75 	SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_RPTR),
76 	SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_WPTR),
77 	SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_RPTR2),
78 	SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_WPTR2),
79 	SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_RPTR3),
80 	SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_WPTR3),
81 	SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_RPTR4),
82 	SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_WPTR4),
83 	SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_SIZE),
84 	SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_SIZE2),
85 	SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_SIZE3),
86 	SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_SIZE4),
87 	SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_PGFSM_CONFIG),
88 	SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_PGFSM_STATUS),
89 	SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_DPG_LMA_CTL),
90 	SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_DPG_LMA_DATA),
91 	SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_DPG_LMA_MASK),
92 	SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_DPG_PAUSE)
93 };
94 
95 static void vcn_v2_5_set_dec_ring_funcs(struct amdgpu_device *adev);
96 static void vcn_v2_5_set_enc_ring_funcs(struct amdgpu_device *adev);
97 static void vcn_v2_5_set_irq_funcs(struct amdgpu_device *adev);
98 static int vcn_v2_5_set_powergating_state(struct amdgpu_ip_block *ip_block,
99 				enum amd_powergating_state state);
100 static int vcn_v2_5_pause_dpg_mode(struct amdgpu_device *adev,
101 				int inst_idx, struct dpg_pause_state *new_state);
102 static int vcn_v2_5_sriov_start(struct amdgpu_device *adev);
103 static void vcn_v2_5_set_ras_funcs(struct amdgpu_device *adev);
104 
105 static int amdgpu_ih_clientid_vcns[] = {
106 	SOC15_IH_CLIENTID_VCN,
107 	SOC15_IH_CLIENTID_VCN1
108 };
109 
110 /**
111  * vcn_v2_5_early_init - set function pointers and load microcode
112  *
113  * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
114  *
115  * Set ring and irq function pointers
116  * Load microcode from filesystem
117  */
vcn_v2_5_early_init(struct amdgpu_ip_block * ip_block)118 static int vcn_v2_5_early_init(struct amdgpu_ip_block *ip_block)
119 {
120 	struct amdgpu_device *adev = ip_block->adev;
121 
122 	if (amdgpu_sriov_vf(adev)) {
123 		adev->vcn.num_vcn_inst = 2;
124 		adev->vcn.harvest_config = 0;
125 		adev->vcn.num_enc_rings = 1;
126 	} else {
127 		u32 harvest;
128 		int i;
129 
130 		for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
131 			harvest = RREG32_SOC15(VCN, i, mmCC_UVD_HARVESTING);
132 			if (harvest & CC_UVD_HARVESTING__UVD_DISABLE_MASK)
133 				adev->vcn.harvest_config |= 1 << i;
134 		}
135 		if (adev->vcn.harvest_config == (AMDGPU_VCN_HARVEST_VCN0 |
136 					AMDGPU_VCN_HARVEST_VCN1))
137 			/* both instances are harvested, disable the block */
138 			return -ENOENT;
139 
140 		adev->vcn.num_enc_rings = 2;
141 	}
142 
143 	vcn_v2_5_set_dec_ring_funcs(adev);
144 	vcn_v2_5_set_enc_ring_funcs(adev);
145 	vcn_v2_5_set_irq_funcs(adev);
146 	vcn_v2_5_set_ras_funcs(adev);
147 
148 	return amdgpu_vcn_early_init(adev);
149 }
150 
151 /**
152  * vcn_v2_5_sw_init - sw init for VCN block
153  *
154  * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
155  *
156  * Load firmware and sw initialization
157  */
vcn_v2_5_sw_init(struct amdgpu_ip_block * ip_block)158 static int vcn_v2_5_sw_init(struct amdgpu_ip_block *ip_block)
159 {
160 	struct amdgpu_ring *ring;
161 	int i, j, r;
162 	uint32_t reg_count = ARRAY_SIZE(vcn_reg_list_2_5);
163 	uint32_t *ptr;
164 	struct amdgpu_device *adev = ip_block->adev;
165 
166 	for (j = 0; j < adev->vcn.num_vcn_inst; j++) {
167 		if (adev->vcn.harvest_config & (1 << j))
168 			continue;
169 		/* VCN DEC TRAP */
170 		r = amdgpu_irq_add_id(adev, amdgpu_ih_clientid_vcns[j],
171 				VCN_2_0__SRCID__UVD_SYSTEM_MESSAGE_INTERRUPT, &adev->vcn.inst[j].irq);
172 		if (r)
173 			return r;
174 
175 		/* VCN ENC TRAP */
176 		for (i = 0; i < adev->vcn.num_enc_rings; ++i) {
177 			r = amdgpu_irq_add_id(adev, amdgpu_ih_clientid_vcns[j],
178 				i + VCN_2_0__SRCID__UVD_ENC_GENERAL_PURPOSE, &adev->vcn.inst[j].irq);
179 			if (r)
180 				return r;
181 		}
182 
183 		/* VCN POISON TRAP */
184 		r = amdgpu_irq_add_id(adev, amdgpu_ih_clientid_vcns[j],
185 			VCN_2_6__SRCID_UVD_POISON, &adev->vcn.inst[j].ras_poison_irq);
186 		if (r)
187 			return r;
188 	}
189 
190 	r = amdgpu_vcn_sw_init(adev);
191 	if (r)
192 		return r;
193 
194 	amdgpu_vcn_setup_ucode(adev);
195 
196 	r = amdgpu_vcn_resume(adev);
197 	if (r)
198 		return r;
199 
200 	for (j = 0; j < adev->vcn.num_vcn_inst; j++) {
201 		volatile struct amdgpu_fw_shared *fw_shared;
202 
203 		if (adev->vcn.harvest_config & (1 << j))
204 			continue;
205 		adev->vcn.internal.context_id = mmUVD_CONTEXT_ID_INTERNAL_OFFSET;
206 		adev->vcn.internal.ib_vmid = mmUVD_LMI_RBC_IB_VMID_INTERNAL_OFFSET;
207 		adev->vcn.internal.ib_bar_low = mmUVD_LMI_RBC_IB_64BIT_BAR_LOW_INTERNAL_OFFSET;
208 		adev->vcn.internal.ib_bar_high = mmUVD_LMI_RBC_IB_64BIT_BAR_HIGH_INTERNAL_OFFSET;
209 		adev->vcn.internal.ib_size = mmUVD_RBC_IB_SIZE_INTERNAL_OFFSET;
210 		adev->vcn.internal.gp_scratch8 = mmUVD_GP_SCRATCH8_INTERNAL_OFFSET;
211 
212 		adev->vcn.internal.scratch9 = mmUVD_SCRATCH9_INTERNAL_OFFSET;
213 		adev->vcn.inst[j].external.scratch9 = SOC15_REG_OFFSET(VCN, j, mmUVD_SCRATCH9);
214 		adev->vcn.internal.data0 = mmUVD_GPCOM_VCPU_DATA0_INTERNAL_OFFSET;
215 		adev->vcn.inst[j].external.data0 = SOC15_REG_OFFSET(VCN, j, mmUVD_GPCOM_VCPU_DATA0);
216 		adev->vcn.internal.data1 = mmUVD_GPCOM_VCPU_DATA1_INTERNAL_OFFSET;
217 		adev->vcn.inst[j].external.data1 = SOC15_REG_OFFSET(VCN, j, mmUVD_GPCOM_VCPU_DATA1);
218 		adev->vcn.internal.cmd = mmUVD_GPCOM_VCPU_CMD_INTERNAL_OFFSET;
219 		adev->vcn.inst[j].external.cmd = SOC15_REG_OFFSET(VCN, j, mmUVD_GPCOM_VCPU_CMD);
220 		adev->vcn.internal.nop = mmUVD_NO_OP_INTERNAL_OFFSET;
221 		adev->vcn.inst[j].external.nop = SOC15_REG_OFFSET(VCN, j, mmUVD_NO_OP);
222 
223 		ring = &adev->vcn.inst[j].ring_dec;
224 		ring->use_doorbell = true;
225 
226 		ring->doorbell_index = (adev->doorbell_index.vcn.vcn_ring0_1 << 1) +
227 				(amdgpu_sriov_vf(adev) ? 2*j : 8*j);
228 
229 		if (amdgpu_ip_version(adev, UVD_HWIP, 0) == IP_VERSION(2, 5, 0))
230 			ring->vm_hub = AMDGPU_MMHUB1(0);
231 		else
232 			ring->vm_hub = AMDGPU_MMHUB0(0);
233 
234 		sprintf(ring->name, "vcn_dec_%d", j);
235 		r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.inst[j].irq,
236 				     0, AMDGPU_RING_PRIO_DEFAULT, NULL);
237 		if (r)
238 			return r;
239 
240 		for (i = 0; i < adev->vcn.num_enc_rings; ++i) {
241 			enum amdgpu_ring_priority_level hw_prio = amdgpu_vcn_get_enc_ring_prio(i);
242 
243 			ring = &adev->vcn.inst[j].ring_enc[i];
244 			ring->use_doorbell = true;
245 
246 			ring->doorbell_index = (adev->doorbell_index.vcn.vcn_ring0_1 << 1) +
247 					(amdgpu_sriov_vf(adev) ? (1 + i + 2*j) : (2 + i + 8*j));
248 
249 			if (amdgpu_ip_version(adev, UVD_HWIP, 0) ==
250 			    IP_VERSION(2, 5, 0))
251 				ring->vm_hub = AMDGPU_MMHUB1(0);
252 			else
253 				ring->vm_hub = AMDGPU_MMHUB0(0);
254 
255 			sprintf(ring->name, "vcn_enc_%d.%d", j, i);
256 			r = amdgpu_ring_init(adev, ring, 512,
257 					     &adev->vcn.inst[j].irq, 0,
258 					     hw_prio, NULL);
259 			if (r)
260 				return r;
261 		}
262 
263 		fw_shared = adev->vcn.inst[j].fw_shared.cpu_addr;
264 		fw_shared->present_flag_0 = cpu_to_le32(AMDGPU_VCN_MULTI_QUEUE_FLAG);
265 
266 		if (amdgpu_vcnfw_log)
267 			amdgpu_vcn_fwlog_init(&adev->vcn.inst[i]);
268 	}
269 
270 	if (amdgpu_sriov_vf(adev)) {
271 		r = amdgpu_virt_alloc_mm_table(adev);
272 		if (r)
273 			return r;
274 	}
275 
276 	if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG)
277 		adev->vcn.pause_dpg_mode = vcn_v2_5_pause_dpg_mode;
278 
279 	r = amdgpu_vcn_ras_sw_init(adev);
280 	if (r)
281 		return r;
282 
283 	/* Allocate memory for VCN IP Dump buffer */
284 	ptr = kcalloc(adev->vcn.num_vcn_inst * reg_count, sizeof(uint32_t), GFP_KERNEL);
285 	if (!ptr) {
286 		DRM_ERROR("Failed to allocate memory for VCN IP Dump\n");
287 		adev->vcn.ip_dump = NULL;
288 	} else {
289 		adev->vcn.ip_dump = ptr;
290 	}
291 
292 	return 0;
293 }
294 
295 /**
296  * vcn_v2_5_sw_fini - sw fini for VCN block
297  *
298  * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
299  *
300  * VCN suspend and free up sw allocation
301  */
vcn_v2_5_sw_fini(struct amdgpu_ip_block * ip_block)302 static int vcn_v2_5_sw_fini(struct amdgpu_ip_block *ip_block)
303 {
304 	int i, r, idx;
305 	struct amdgpu_device *adev = ip_block->adev;
306 	volatile struct amdgpu_fw_shared *fw_shared;
307 
308 	if (drm_dev_enter(adev_to_drm(adev), &idx)) {
309 		for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
310 			if (adev->vcn.harvest_config & (1 << i))
311 				continue;
312 			fw_shared = adev->vcn.inst[i].fw_shared.cpu_addr;
313 			fw_shared->present_flag_0 = 0;
314 		}
315 		drm_dev_exit(idx);
316 	}
317 
318 
319 	if (amdgpu_sriov_vf(adev))
320 		amdgpu_virt_free_mm_table(adev);
321 
322 	r = amdgpu_vcn_suspend(adev);
323 	if (r)
324 		return r;
325 
326 	r = amdgpu_vcn_sw_fini(adev);
327 
328 	kfree(adev->vcn.ip_dump);
329 
330 	return r;
331 }
332 
333 /**
334  * vcn_v2_5_hw_init - start and test VCN block
335  *
336  * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
337  *
338  * Initialize the hardware, boot up the VCPU and do some testing
339  */
vcn_v2_5_hw_init(struct amdgpu_ip_block * ip_block)340 static int vcn_v2_5_hw_init(struct amdgpu_ip_block *ip_block)
341 {
342 	struct amdgpu_device *adev = ip_block->adev;
343 	struct amdgpu_ring *ring;
344 	int i, j, r = 0;
345 
346 	if (amdgpu_sriov_vf(adev))
347 		r = vcn_v2_5_sriov_start(adev);
348 
349 	for (j = 0; j < adev->vcn.num_vcn_inst; ++j) {
350 		if (adev->vcn.harvest_config & (1 << j))
351 			continue;
352 
353 		if (amdgpu_sriov_vf(adev)) {
354 			adev->vcn.inst[j].ring_enc[0].sched.ready = true;
355 			adev->vcn.inst[j].ring_enc[1].sched.ready = false;
356 			adev->vcn.inst[j].ring_enc[2].sched.ready = false;
357 			adev->vcn.inst[j].ring_dec.sched.ready = true;
358 		} else {
359 
360 			ring = &adev->vcn.inst[j].ring_dec;
361 
362 			adev->nbio.funcs->vcn_doorbell_range(adev, ring->use_doorbell,
363 						     ring->doorbell_index, j);
364 
365 			r = amdgpu_ring_test_helper(ring);
366 			if (r)
367 				return r;
368 
369 			for (i = 0; i < adev->vcn.num_enc_rings; ++i) {
370 				ring = &adev->vcn.inst[j].ring_enc[i];
371 				r = amdgpu_ring_test_helper(ring);
372 				if (r)
373 					return r;
374 			}
375 		}
376 	}
377 
378 	return r;
379 }
380 
381 /**
382  * vcn_v2_5_hw_fini - stop the hardware block
383  *
384  * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
385  *
386  * Stop the VCN block, mark ring as not ready any more
387  */
vcn_v2_5_hw_fini(struct amdgpu_ip_block * ip_block)388 static int vcn_v2_5_hw_fini(struct amdgpu_ip_block *ip_block)
389 {
390 	struct amdgpu_device *adev = ip_block->adev;
391 	int i;
392 
393 	cancel_delayed_work_sync(&adev->vcn.idle_work);
394 
395 	for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
396 		if (adev->vcn.harvest_config & (1 << i))
397 			continue;
398 
399 		if ((adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) ||
400 		    (adev->vcn.cur_state != AMD_PG_STATE_GATE &&
401 		     RREG32_SOC15(VCN, i, mmUVD_STATUS)))
402 			vcn_v2_5_set_powergating_state(ip_block, AMD_PG_STATE_GATE);
403 
404 		if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__VCN))
405 			amdgpu_irq_put(adev, &adev->vcn.inst[i].ras_poison_irq, 0);
406 	}
407 
408 	return 0;
409 }
410 
411 /**
412  * vcn_v2_5_suspend - suspend VCN block
413  *
414  * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
415  *
416  * HW fini and suspend VCN block
417  */
vcn_v2_5_suspend(struct amdgpu_ip_block * ip_block)418 static int vcn_v2_5_suspend(struct amdgpu_ip_block *ip_block)
419 {
420 	int r;
421 
422 	r = vcn_v2_5_hw_fini(ip_block);
423 	if (r)
424 		return r;
425 
426 	r = amdgpu_vcn_suspend(ip_block->adev);
427 
428 	return r;
429 }
430 
431 /**
432  * vcn_v2_5_resume - resume VCN block
433  *
434  * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
435  *
436  * Resume firmware and hw init VCN block
437  */
vcn_v2_5_resume(struct amdgpu_ip_block * ip_block)438 static int vcn_v2_5_resume(struct amdgpu_ip_block *ip_block)
439 {
440 	int r;
441 
442 	r = amdgpu_vcn_resume(ip_block->adev);
443 	if (r)
444 		return r;
445 
446 	r = vcn_v2_5_hw_init(ip_block);
447 
448 	return r;
449 }
450 
451 /**
452  * vcn_v2_5_mc_resume - memory controller programming
453  *
454  * @adev: amdgpu_device pointer
455  *
456  * Let the VCN memory controller know it's offsets
457  */
vcn_v2_5_mc_resume(struct amdgpu_device * adev)458 static void vcn_v2_5_mc_resume(struct amdgpu_device *adev)
459 {
460 	uint32_t size;
461 	uint32_t offset;
462 	int i;
463 
464 	for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
465 		if (adev->vcn.harvest_config & (1 << i))
466 			continue;
467 
468 		size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.inst[i].fw->size + 4);
469 		/* cache window 0: fw */
470 		if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
471 			WREG32_SOC15(VCN, i, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW,
472 				(adev->firmware.ucode[AMDGPU_UCODE_ID_VCN + i].tmr_mc_addr_lo));
473 			WREG32_SOC15(VCN, i, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH,
474 				(adev->firmware.ucode[AMDGPU_UCODE_ID_VCN + i].tmr_mc_addr_hi));
475 			WREG32_SOC15(VCN, i, mmUVD_VCPU_CACHE_OFFSET0, 0);
476 			offset = 0;
477 		} else {
478 			WREG32_SOC15(VCN, i, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW,
479 				lower_32_bits(adev->vcn.inst[i].gpu_addr));
480 			WREG32_SOC15(VCN, i, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH,
481 				upper_32_bits(adev->vcn.inst[i].gpu_addr));
482 			offset = size;
483 			WREG32_SOC15(VCN, i, mmUVD_VCPU_CACHE_OFFSET0,
484 				AMDGPU_UVD_FIRMWARE_OFFSET >> 3);
485 		}
486 		WREG32_SOC15(VCN, i, mmUVD_VCPU_CACHE_SIZE0, size);
487 
488 		/* cache window 1: stack */
489 		WREG32_SOC15(VCN, i, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW,
490 			lower_32_bits(adev->vcn.inst[i].gpu_addr + offset));
491 		WREG32_SOC15(VCN, i, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH,
492 			upper_32_bits(adev->vcn.inst[i].gpu_addr + offset));
493 		WREG32_SOC15(VCN, i, mmUVD_VCPU_CACHE_OFFSET1, 0);
494 		WREG32_SOC15(VCN, i, mmUVD_VCPU_CACHE_SIZE1, AMDGPU_VCN_STACK_SIZE);
495 
496 		/* cache window 2: context */
497 		WREG32_SOC15(VCN, i, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW,
498 			lower_32_bits(adev->vcn.inst[i].gpu_addr + offset + AMDGPU_VCN_STACK_SIZE));
499 		WREG32_SOC15(VCN, i, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH,
500 			upper_32_bits(adev->vcn.inst[i].gpu_addr + offset + AMDGPU_VCN_STACK_SIZE));
501 		WREG32_SOC15(VCN, i, mmUVD_VCPU_CACHE_OFFSET2, 0);
502 		WREG32_SOC15(VCN, i, mmUVD_VCPU_CACHE_SIZE2, AMDGPU_VCN_CONTEXT_SIZE);
503 
504 		/* non-cache window */
505 		WREG32_SOC15(VCN, i, mmUVD_LMI_VCPU_NC0_64BIT_BAR_LOW,
506 			lower_32_bits(adev->vcn.inst[i].fw_shared.gpu_addr));
507 		WREG32_SOC15(VCN, i, mmUVD_LMI_VCPU_NC0_64BIT_BAR_HIGH,
508 			upper_32_bits(adev->vcn.inst[i].fw_shared.gpu_addr));
509 		WREG32_SOC15(VCN, i, mmUVD_VCPU_NONCACHE_OFFSET0, 0);
510 		WREG32_SOC15(VCN, i, mmUVD_VCPU_NONCACHE_SIZE0,
511 			AMDGPU_GPU_PAGE_ALIGN(sizeof(struct amdgpu_fw_shared)));
512 	}
513 }
514 
vcn_v2_5_mc_resume_dpg_mode(struct amdgpu_device * adev,int inst_idx,bool indirect)515 static void vcn_v2_5_mc_resume_dpg_mode(struct amdgpu_device *adev, int inst_idx, bool indirect)
516 {
517 	uint32_t size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.inst[inst_idx].fw->size + 4);
518 	uint32_t offset;
519 
520 	/* cache window 0: fw */
521 	if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
522 		if (!indirect) {
523 			WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
524 				VCN, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW),
525 				(adev->firmware.ucode[AMDGPU_UCODE_ID_VCN + inst_idx].tmr_mc_addr_lo), 0, indirect);
526 			WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
527 				VCN, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH),
528 				(adev->firmware.ucode[AMDGPU_UCODE_ID_VCN + inst_idx].tmr_mc_addr_hi), 0, indirect);
529 			WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
530 				VCN, 0, mmUVD_VCPU_CACHE_OFFSET0), 0, 0, indirect);
531 		} else {
532 			WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
533 				VCN, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW), 0, 0, indirect);
534 			WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
535 				VCN, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH), 0, 0, indirect);
536 			WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
537 				VCN, 0, mmUVD_VCPU_CACHE_OFFSET0), 0, 0, indirect);
538 		}
539 		offset = 0;
540 	} else {
541 		WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
542 			VCN, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW),
543 			lower_32_bits(adev->vcn.inst[inst_idx].gpu_addr), 0, indirect);
544 		WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
545 			VCN, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH),
546 			upper_32_bits(adev->vcn.inst[inst_idx].gpu_addr), 0, indirect);
547 		offset = size;
548 		WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
549 			VCN, 0, mmUVD_VCPU_CACHE_OFFSET0),
550 			AMDGPU_UVD_FIRMWARE_OFFSET >> 3, 0, indirect);
551 	}
552 
553 	if (!indirect)
554 		WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
555 			VCN, 0, mmUVD_VCPU_CACHE_SIZE0), size, 0, indirect);
556 	else
557 		WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
558 			VCN, 0, mmUVD_VCPU_CACHE_SIZE0), 0, 0, indirect);
559 
560 	/* cache window 1: stack */
561 	if (!indirect) {
562 		WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
563 			VCN, 0, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW),
564 			lower_32_bits(adev->vcn.inst[inst_idx].gpu_addr + offset), 0, indirect);
565 		WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
566 			VCN, 0, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH),
567 			upper_32_bits(adev->vcn.inst[inst_idx].gpu_addr + offset), 0, indirect);
568 		WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
569 			VCN, 0, mmUVD_VCPU_CACHE_OFFSET1), 0, 0, indirect);
570 	} else {
571 		WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
572 			VCN, 0, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW), 0, 0, indirect);
573 		WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
574 			VCN, 0, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH), 0, 0, indirect);
575 		WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
576 			VCN, 0, mmUVD_VCPU_CACHE_OFFSET1), 0, 0, indirect);
577 	}
578 	WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
579 		VCN, 0, mmUVD_VCPU_CACHE_SIZE1), AMDGPU_VCN_STACK_SIZE, 0, indirect);
580 
581 	/* cache window 2: context */
582 	WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
583 		VCN, 0, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW),
584 		lower_32_bits(adev->vcn.inst[inst_idx].gpu_addr + offset + AMDGPU_VCN_STACK_SIZE), 0, indirect);
585 	WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
586 		VCN, 0, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH),
587 		upper_32_bits(adev->vcn.inst[inst_idx].gpu_addr + offset + AMDGPU_VCN_STACK_SIZE), 0, indirect);
588 	WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
589 		VCN, 0, mmUVD_VCPU_CACHE_OFFSET2), 0, 0, indirect);
590 	WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
591 		VCN, 0, mmUVD_VCPU_CACHE_SIZE2), AMDGPU_VCN_CONTEXT_SIZE, 0, indirect);
592 
593 	/* non-cache window */
594 	WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
595 		VCN, 0, mmUVD_LMI_VCPU_NC0_64BIT_BAR_LOW),
596 		lower_32_bits(adev->vcn.inst[inst_idx].fw_shared.gpu_addr), 0, indirect);
597 	WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
598 		VCN, 0, mmUVD_LMI_VCPU_NC0_64BIT_BAR_HIGH),
599 		upper_32_bits(adev->vcn.inst[inst_idx].fw_shared.gpu_addr), 0, indirect);
600 	WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
601 		VCN, 0, mmUVD_VCPU_NONCACHE_OFFSET0), 0, 0, indirect);
602 	WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
603 		VCN, 0, mmUVD_VCPU_NONCACHE_SIZE0),
604 		AMDGPU_GPU_PAGE_ALIGN(sizeof(struct amdgpu_fw_shared)), 0, indirect);
605 
606 	/* VCN global tiling registers */
607 	WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
608 		VCN, 0, mmUVD_GFX8_ADDR_CONFIG), adev->gfx.config.gb_addr_config, 0, indirect);
609 }
610 
611 /**
612  * vcn_v2_5_disable_clock_gating - disable VCN clock gating
613  *
614  * @adev: amdgpu_device pointer
615  *
616  * Disable clock gating for VCN block
617  */
vcn_v2_5_disable_clock_gating(struct amdgpu_device * adev)618 static void vcn_v2_5_disable_clock_gating(struct amdgpu_device *adev)
619 {
620 	uint32_t data;
621 	int i;
622 
623 	for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
624 		if (adev->vcn.harvest_config & (1 << i))
625 			continue;
626 		/* UVD disable CGC */
627 		data = RREG32_SOC15(VCN, i, mmUVD_CGC_CTRL);
628 		if (adev->cg_flags & AMD_CG_SUPPORT_VCN_MGCG)
629 			data |= 1 << UVD_CGC_CTRL__DYN_CLOCK_MODE__SHIFT;
630 		else
631 			data &= ~UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK;
632 		data |= 1 << UVD_CGC_CTRL__CLK_GATE_DLY_TIMER__SHIFT;
633 		data |= 4 << UVD_CGC_CTRL__CLK_OFF_DELAY__SHIFT;
634 		WREG32_SOC15(VCN, i, mmUVD_CGC_CTRL, data);
635 
636 		data = RREG32_SOC15(VCN, i, mmUVD_CGC_GATE);
637 		data &= ~(UVD_CGC_GATE__SYS_MASK
638 			| UVD_CGC_GATE__UDEC_MASK
639 			| UVD_CGC_GATE__MPEG2_MASK
640 			| UVD_CGC_GATE__REGS_MASK
641 			| UVD_CGC_GATE__RBC_MASK
642 			| UVD_CGC_GATE__LMI_MC_MASK
643 			| UVD_CGC_GATE__LMI_UMC_MASK
644 			| UVD_CGC_GATE__IDCT_MASK
645 			| UVD_CGC_GATE__MPRD_MASK
646 			| UVD_CGC_GATE__MPC_MASK
647 			| UVD_CGC_GATE__LBSI_MASK
648 			| UVD_CGC_GATE__LRBBM_MASK
649 			| UVD_CGC_GATE__UDEC_RE_MASK
650 			| UVD_CGC_GATE__UDEC_CM_MASK
651 			| UVD_CGC_GATE__UDEC_IT_MASK
652 			| UVD_CGC_GATE__UDEC_DB_MASK
653 			| UVD_CGC_GATE__UDEC_MP_MASK
654 			| UVD_CGC_GATE__WCB_MASK
655 			| UVD_CGC_GATE__VCPU_MASK
656 			| UVD_CGC_GATE__MMSCH_MASK);
657 
658 		WREG32_SOC15(VCN, i, mmUVD_CGC_GATE, data);
659 
660 		SOC15_WAIT_ON_RREG(VCN, i, mmUVD_CGC_GATE, 0,  0xFFFFFFFF);
661 
662 		data = RREG32_SOC15(VCN, i, mmUVD_CGC_CTRL);
663 		data &= ~(UVD_CGC_CTRL__UDEC_RE_MODE_MASK
664 			| UVD_CGC_CTRL__UDEC_CM_MODE_MASK
665 			| UVD_CGC_CTRL__UDEC_IT_MODE_MASK
666 			| UVD_CGC_CTRL__UDEC_DB_MODE_MASK
667 			| UVD_CGC_CTRL__UDEC_MP_MODE_MASK
668 			| UVD_CGC_CTRL__SYS_MODE_MASK
669 			| UVD_CGC_CTRL__UDEC_MODE_MASK
670 			| UVD_CGC_CTRL__MPEG2_MODE_MASK
671 			| UVD_CGC_CTRL__REGS_MODE_MASK
672 			| UVD_CGC_CTRL__RBC_MODE_MASK
673 			| UVD_CGC_CTRL__LMI_MC_MODE_MASK
674 			| UVD_CGC_CTRL__LMI_UMC_MODE_MASK
675 			| UVD_CGC_CTRL__IDCT_MODE_MASK
676 			| UVD_CGC_CTRL__MPRD_MODE_MASK
677 			| UVD_CGC_CTRL__MPC_MODE_MASK
678 			| UVD_CGC_CTRL__LBSI_MODE_MASK
679 			| UVD_CGC_CTRL__LRBBM_MODE_MASK
680 			| UVD_CGC_CTRL__WCB_MODE_MASK
681 			| UVD_CGC_CTRL__VCPU_MODE_MASK
682 			| UVD_CGC_CTRL__MMSCH_MODE_MASK);
683 		WREG32_SOC15(VCN, i, mmUVD_CGC_CTRL, data);
684 
685 		/* turn on */
686 		data = RREG32_SOC15(VCN, i, mmUVD_SUVD_CGC_GATE);
687 		data |= (UVD_SUVD_CGC_GATE__SRE_MASK
688 			| UVD_SUVD_CGC_GATE__SIT_MASK
689 			| UVD_SUVD_CGC_GATE__SMP_MASK
690 			| UVD_SUVD_CGC_GATE__SCM_MASK
691 			| UVD_SUVD_CGC_GATE__SDB_MASK
692 			| UVD_SUVD_CGC_GATE__SRE_H264_MASK
693 			| UVD_SUVD_CGC_GATE__SRE_HEVC_MASK
694 			| UVD_SUVD_CGC_GATE__SIT_H264_MASK
695 			| UVD_SUVD_CGC_GATE__SIT_HEVC_MASK
696 			| UVD_SUVD_CGC_GATE__SCM_H264_MASK
697 			| UVD_SUVD_CGC_GATE__SCM_HEVC_MASK
698 			| UVD_SUVD_CGC_GATE__SDB_H264_MASK
699 			| UVD_SUVD_CGC_GATE__SDB_HEVC_MASK
700 			| UVD_SUVD_CGC_GATE__SCLR_MASK
701 			| UVD_SUVD_CGC_GATE__UVD_SC_MASK
702 			| UVD_SUVD_CGC_GATE__ENT_MASK
703 			| UVD_SUVD_CGC_GATE__SIT_HEVC_DEC_MASK
704 			| UVD_SUVD_CGC_GATE__SIT_HEVC_ENC_MASK
705 			| UVD_SUVD_CGC_GATE__SITE_MASK
706 			| UVD_SUVD_CGC_GATE__SRE_VP9_MASK
707 			| UVD_SUVD_CGC_GATE__SCM_VP9_MASK
708 			| UVD_SUVD_CGC_GATE__SIT_VP9_DEC_MASK
709 			| UVD_SUVD_CGC_GATE__SDB_VP9_MASK
710 			| UVD_SUVD_CGC_GATE__IME_HEVC_MASK);
711 		WREG32_SOC15(VCN, i, mmUVD_SUVD_CGC_GATE, data);
712 
713 		data = RREG32_SOC15(VCN, i, mmUVD_SUVD_CGC_CTRL);
714 		data &= ~(UVD_SUVD_CGC_CTRL__SRE_MODE_MASK
715 			| UVD_SUVD_CGC_CTRL__SIT_MODE_MASK
716 			| UVD_SUVD_CGC_CTRL__SMP_MODE_MASK
717 			| UVD_SUVD_CGC_CTRL__SCM_MODE_MASK
718 			| UVD_SUVD_CGC_CTRL__SDB_MODE_MASK
719 			| UVD_SUVD_CGC_CTRL__SCLR_MODE_MASK
720 			| UVD_SUVD_CGC_CTRL__UVD_SC_MODE_MASK
721 			| UVD_SUVD_CGC_CTRL__ENT_MODE_MASK
722 			| UVD_SUVD_CGC_CTRL__IME_MODE_MASK
723 			| UVD_SUVD_CGC_CTRL__SITE_MODE_MASK);
724 		WREG32_SOC15(VCN, i, mmUVD_SUVD_CGC_CTRL, data);
725 	}
726 }
727 
vcn_v2_5_clock_gating_dpg_mode(struct amdgpu_device * adev,uint8_t sram_sel,int inst_idx,uint8_t indirect)728 static void vcn_v2_5_clock_gating_dpg_mode(struct amdgpu_device *adev,
729 		uint8_t sram_sel, int inst_idx, uint8_t indirect)
730 {
731 	uint32_t reg_data = 0;
732 
733 	/* enable sw clock gating control */
734 	if (adev->cg_flags & AMD_CG_SUPPORT_VCN_MGCG)
735 		reg_data = 1 << UVD_CGC_CTRL__DYN_CLOCK_MODE__SHIFT;
736 	else
737 		reg_data = 0 << UVD_CGC_CTRL__DYN_CLOCK_MODE__SHIFT;
738 	reg_data |= 1 << UVD_CGC_CTRL__CLK_GATE_DLY_TIMER__SHIFT;
739 	reg_data |= 4 << UVD_CGC_CTRL__CLK_OFF_DELAY__SHIFT;
740 	reg_data &= ~(UVD_CGC_CTRL__UDEC_RE_MODE_MASK |
741 		 UVD_CGC_CTRL__UDEC_CM_MODE_MASK |
742 		 UVD_CGC_CTRL__UDEC_IT_MODE_MASK |
743 		 UVD_CGC_CTRL__UDEC_DB_MODE_MASK |
744 		 UVD_CGC_CTRL__UDEC_MP_MODE_MASK |
745 		 UVD_CGC_CTRL__SYS_MODE_MASK |
746 		 UVD_CGC_CTRL__UDEC_MODE_MASK |
747 		 UVD_CGC_CTRL__MPEG2_MODE_MASK |
748 		 UVD_CGC_CTRL__REGS_MODE_MASK |
749 		 UVD_CGC_CTRL__RBC_MODE_MASK |
750 		 UVD_CGC_CTRL__LMI_MC_MODE_MASK |
751 		 UVD_CGC_CTRL__LMI_UMC_MODE_MASK |
752 		 UVD_CGC_CTRL__IDCT_MODE_MASK |
753 		 UVD_CGC_CTRL__MPRD_MODE_MASK |
754 		 UVD_CGC_CTRL__MPC_MODE_MASK |
755 		 UVD_CGC_CTRL__LBSI_MODE_MASK |
756 		 UVD_CGC_CTRL__LRBBM_MODE_MASK |
757 		 UVD_CGC_CTRL__WCB_MODE_MASK |
758 		 UVD_CGC_CTRL__VCPU_MODE_MASK |
759 		 UVD_CGC_CTRL__MMSCH_MODE_MASK);
760 	WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
761 		VCN, 0, mmUVD_CGC_CTRL), reg_data, sram_sel, indirect);
762 
763 	/* turn off clock gating */
764 	WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
765 		VCN, 0, mmUVD_CGC_GATE), 0, sram_sel, indirect);
766 
767 	/* turn on SUVD clock gating */
768 	WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
769 		VCN, 0, mmUVD_SUVD_CGC_GATE), 1, sram_sel, indirect);
770 
771 	/* turn on sw mode in UVD_SUVD_CGC_CTRL */
772 	WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
773 		VCN, 0, mmUVD_SUVD_CGC_CTRL), 0, sram_sel, indirect);
774 }
775 
776 /**
777  * vcn_v2_5_enable_clock_gating - enable VCN clock gating
778  *
779  * @adev: amdgpu_device pointer
780  *
781  * Enable clock gating for VCN block
782  */
vcn_v2_5_enable_clock_gating(struct amdgpu_device * adev)783 static void vcn_v2_5_enable_clock_gating(struct amdgpu_device *adev)
784 {
785 	uint32_t data = 0;
786 	int i;
787 
788 	for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
789 		if (adev->vcn.harvest_config & (1 << i))
790 			continue;
791 		/* enable UVD CGC */
792 		data = RREG32_SOC15(VCN, i, mmUVD_CGC_CTRL);
793 		if (adev->cg_flags & AMD_CG_SUPPORT_VCN_MGCG)
794 			data |= 1 << UVD_CGC_CTRL__DYN_CLOCK_MODE__SHIFT;
795 		else
796 			data |= 0 << UVD_CGC_CTRL__DYN_CLOCK_MODE__SHIFT;
797 		data |= 1 << UVD_CGC_CTRL__CLK_GATE_DLY_TIMER__SHIFT;
798 		data |= 4 << UVD_CGC_CTRL__CLK_OFF_DELAY__SHIFT;
799 		WREG32_SOC15(VCN, i, mmUVD_CGC_CTRL, data);
800 
801 		data = RREG32_SOC15(VCN, i, mmUVD_CGC_CTRL);
802 		data |= (UVD_CGC_CTRL__UDEC_RE_MODE_MASK
803 			| UVD_CGC_CTRL__UDEC_CM_MODE_MASK
804 			| UVD_CGC_CTRL__UDEC_IT_MODE_MASK
805 			| UVD_CGC_CTRL__UDEC_DB_MODE_MASK
806 			| UVD_CGC_CTRL__UDEC_MP_MODE_MASK
807 			| UVD_CGC_CTRL__SYS_MODE_MASK
808 			| UVD_CGC_CTRL__UDEC_MODE_MASK
809 			| UVD_CGC_CTRL__MPEG2_MODE_MASK
810 			| UVD_CGC_CTRL__REGS_MODE_MASK
811 			| UVD_CGC_CTRL__RBC_MODE_MASK
812 			| UVD_CGC_CTRL__LMI_MC_MODE_MASK
813 			| UVD_CGC_CTRL__LMI_UMC_MODE_MASK
814 			| UVD_CGC_CTRL__IDCT_MODE_MASK
815 			| UVD_CGC_CTRL__MPRD_MODE_MASK
816 			| UVD_CGC_CTRL__MPC_MODE_MASK
817 			| UVD_CGC_CTRL__LBSI_MODE_MASK
818 			| UVD_CGC_CTRL__LRBBM_MODE_MASK
819 			| UVD_CGC_CTRL__WCB_MODE_MASK
820 			| UVD_CGC_CTRL__VCPU_MODE_MASK);
821 		WREG32_SOC15(VCN, i, mmUVD_CGC_CTRL, data);
822 
823 		data = RREG32_SOC15(VCN, i, mmUVD_SUVD_CGC_CTRL);
824 		data |= (UVD_SUVD_CGC_CTRL__SRE_MODE_MASK
825 			| UVD_SUVD_CGC_CTRL__SIT_MODE_MASK
826 			| UVD_SUVD_CGC_CTRL__SMP_MODE_MASK
827 			| UVD_SUVD_CGC_CTRL__SCM_MODE_MASK
828 			| UVD_SUVD_CGC_CTRL__SDB_MODE_MASK
829 			| UVD_SUVD_CGC_CTRL__SCLR_MODE_MASK
830 			| UVD_SUVD_CGC_CTRL__UVD_SC_MODE_MASK
831 			| UVD_SUVD_CGC_CTRL__ENT_MODE_MASK
832 			| UVD_SUVD_CGC_CTRL__IME_MODE_MASK
833 			| UVD_SUVD_CGC_CTRL__SITE_MODE_MASK);
834 		WREG32_SOC15(VCN, i, mmUVD_SUVD_CGC_CTRL, data);
835 	}
836 }
837 
vcn_v2_6_enable_ras(struct amdgpu_device * adev,int inst_idx,bool indirect)838 static void vcn_v2_6_enable_ras(struct amdgpu_device *adev, int inst_idx,
839 				bool indirect)
840 {
841 	uint32_t tmp;
842 
843 	if (amdgpu_ip_version(adev, UVD_HWIP, 0) != IP_VERSION(2, 6, 0))
844 		return;
845 
846 	tmp = VCN_RAS_CNTL__VCPU_VCODEC_REARM_MASK |
847 	      VCN_RAS_CNTL__VCPU_VCODEC_IH_EN_MASK |
848 	      VCN_RAS_CNTL__VCPU_VCODEC_PMI_EN_MASK |
849 	      VCN_RAS_CNTL__VCPU_VCODEC_STALL_EN_MASK;
850 	WREG32_SOC15_DPG_MODE(inst_idx,
851 			      SOC15_DPG_MODE_OFFSET(VCN, 0, mmVCN_RAS_CNTL),
852 			      tmp, 0, indirect);
853 
854 	tmp = UVD_VCPU_INT_EN__RASCNTL_VCPU_VCODEC_EN_MASK;
855 	WREG32_SOC15_DPG_MODE(inst_idx,
856 			      SOC15_DPG_MODE_OFFSET(VCN, 0, mmUVD_VCPU_INT_EN),
857 			      tmp, 0, indirect);
858 
859 	tmp = UVD_SYS_INT_EN__RASCNTL_VCPU_VCODEC_EN_MASK;
860 	WREG32_SOC15_DPG_MODE(inst_idx,
861 			      SOC15_DPG_MODE_OFFSET(VCN, 0, mmUVD_SYS_INT_EN),
862 			      tmp, 0, indirect);
863 }
864 
vcn_v2_5_start_dpg_mode(struct amdgpu_device * adev,int inst_idx,bool indirect)865 static int vcn_v2_5_start_dpg_mode(struct amdgpu_device *adev, int inst_idx, bool indirect)
866 {
867 	volatile struct amdgpu_fw_shared *fw_shared = adev->vcn.inst[inst_idx].fw_shared.cpu_addr;
868 	struct amdgpu_ring *ring;
869 	uint32_t rb_bufsz, tmp;
870 
871 	/* disable register anti-hang mechanism */
872 	WREG32_P(SOC15_REG_OFFSET(VCN, inst_idx, mmUVD_POWER_STATUS), 1,
873 		~UVD_POWER_STATUS__UVD_POWER_STATUS_MASK);
874 	/* enable dynamic power gating mode */
875 	tmp = RREG32_SOC15(VCN, inst_idx, mmUVD_POWER_STATUS);
876 	tmp |= UVD_POWER_STATUS__UVD_PG_MODE_MASK;
877 	tmp |= UVD_POWER_STATUS__UVD_PG_EN_MASK;
878 	WREG32_SOC15(VCN, inst_idx, mmUVD_POWER_STATUS, tmp);
879 
880 	if (indirect)
881 		adev->vcn.inst[inst_idx].dpg_sram_curr_addr = (uint32_t *)adev->vcn.inst[inst_idx].dpg_sram_cpu_addr;
882 
883 	/* enable clock gating */
884 	vcn_v2_5_clock_gating_dpg_mode(adev, 0, inst_idx, indirect);
885 
886 	/* enable VCPU clock */
887 	tmp = (0xFF << UVD_VCPU_CNTL__PRB_TIMEOUT_VAL__SHIFT);
888 	tmp |= UVD_VCPU_CNTL__CLK_EN_MASK;
889 	tmp |= UVD_VCPU_CNTL__BLK_RST_MASK;
890 	WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
891 		VCN, 0, mmUVD_VCPU_CNTL), tmp, 0, indirect);
892 
893 	/* disable master interupt */
894 	WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
895 		VCN, 0, mmUVD_MASTINT_EN), 0, 0, indirect);
896 
897 	/* setup mmUVD_LMI_CTRL */
898 	tmp = (0x8 | UVD_LMI_CTRL__WRITE_CLEAN_TIMER_EN_MASK |
899 		UVD_LMI_CTRL__REQ_MODE_MASK |
900 		UVD_LMI_CTRL__CRC_RESET_MASK |
901 		UVD_LMI_CTRL__MASK_MC_URGENT_MASK |
902 		UVD_LMI_CTRL__DATA_COHERENCY_EN_MASK |
903 		UVD_LMI_CTRL__VCPU_DATA_COHERENCY_EN_MASK |
904 		(8 << UVD_LMI_CTRL__WRITE_CLEAN_TIMER__SHIFT) |
905 		0x00100000L);
906 	WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
907 		VCN, 0, mmUVD_LMI_CTRL), tmp, 0, indirect);
908 
909 	WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
910 		VCN, 0, mmUVD_MPC_CNTL),
911 		0x2 << UVD_MPC_CNTL__REPLACEMENT_MODE__SHIFT, 0, indirect);
912 
913 	WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
914 		VCN, 0, mmUVD_MPC_SET_MUXA0),
915 		((0x1 << UVD_MPC_SET_MUXA0__VARA_1__SHIFT) |
916 		 (0x2 << UVD_MPC_SET_MUXA0__VARA_2__SHIFT) |
917 		 (0x3 << UVD_MPC_SET_MUXA0__VARA_3__SHIFT) |
918 		 (0x4 << UVD_MPC_SET_MUXA0__VARA_4__SHIFT)), 0, indirect);
919 
920 	WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
921 		VCN, 0, mmUVD_MPC_SET_MUXB0),
922 		((0x1 << UVD_MPC_SET_MUXB0__VARB_1__SHIFT) |
923 		 (0x2 << UVD_MPC_SET_MUXB0__VARB_2__SHIFT) |
924 		 (0x3 << UVD_MPC_SET_MUXB0__VARB_3__SHIFT) |
925 		 (0x4 << UVD_MPC_SET_MUXB0__VARB_4__SHIFT)), 0, indirect);
926 
927 	WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
928 		VCN, 0, mmUVD_MPC_SET_MUX),
929 		((0x0 << UVD_MPC_SET_MUX__SET_0__SHIFT) |
930 		 (0x1 << UVD_MPC_SET_MUX__SET_1__SHIFT) |
931 		 (0x2 << UVD_MPC_SET_MUX__SET_2__SHIFT)), 0, indirect);
932 
933 	vcn_v2_5_mc_resume_dpg_mode(adev, inst_idx, indirect);
934 
935 	WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
936 		VCN, 0, mmUVD_REG_XX_MASK), 0x10, 0, indirect);
937 	WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
938 		VCN, 0, mmUVD_RBC_XX_IB_REG_CHECK), 0x3, 0, indirect);
939 
940 	/* enable LMI MC and UMC channels */
941 	WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
942 		VCN, 0, mmUVD_LMI_CTRL2), 0, 0, indirect);
943 
944 	vcn_v2_6_enable_ras(adev, inst_idx, indirect);
945 
946 	/* unblock VCPU register access */
947 	WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
948 		VCN, 0, mmUVD_RB_ARB_CTRL), 0, 0, indirect);
949 
950 	tmp = (0xFF << UVD_VCPU_CNTL__PRB_TIMEOUT_VAL__SHIFT);
951 	tmp |= UVD_VCPU_CNTL__CLK_EN_MASK;
952 	WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
953 		VCN, 0, mmUVD_VCPU_CNTL), tmp, 0, indirect);
954 
955 	/* enable master interrupt */
956 	WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
957 		VCN, 0, mmUVD_MASTINT_EN),
958 		UVD_MASTINT_EN__VCPU_EN_MASK, 0, indirect);
959 
960 	if (indirect)
961 		amdgpu_vcn_psp_update_sram(adev, inst_idx, 0);
962 
963 	ring = &adev->vcn.inst[inst_idx].ring_dec;
964 	/* force RBC into idle state */
965 	rb_bufsz = order_base_2(ring->ring_size);
966 	tmp = REG_SET_FIELD(0, UVD_RBC_RB_CNTL, RB_BUFSZ, rb_bufsz);
967 	tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_BLKSZ, 1);
968 	tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_FETCH, 1);
969 	tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_UPDATE, 1);
970 	tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_RPTR_WR_EN, 1);
971 	WREG32_SOC15(VCN, inst_idx, mmUVD_RBC_RB_CNTL, tmp);
972 
973 	/* Stall DPG before WPTR/RPTR reset */
974 	WREG32_P(SOC15_REG_OFFSET(VCN, inst_idx, mmUVD_POWER_STATUS),
975 		UVD_POWER_STATUS__STALL_DPG_POWER_UP_MASK,
976 		~UVD_POWER_STATUS__STALL_DPG_POWER_UP_MASK);
977 	fw_shared->multi_queue.decode_queue_mode |= FW_QUEUE_RING_RESET;
978 
979 	/* set the write pointer delay */
980 	WREG32_SOC15(VCN, inst_idx, mmUVD_RBC_RB_WPTR_CNTL, 0);
981 
982 	/* set the wb address */
983 	WREG32_SOC15(VCN, inst_idx, mmUVD_RBC_RB_RPTR_ADDR,
984 		(upper_32_bits(ring->gpu_addr) >> 2));
985 
986 	/* program the RB_BASE for ring buffer */
987 	WREG32_SOC15(VCN, inst_idx, mmUVD_LMI_RBC_RB_64BIT_BAR_LOW,
988 		lower_32_bits(ring->gpu_addr));
989 	WREG32_SOC15(VCN, inst_idx, mmUVD_LMI_RBC_RB_64BIT_BAR_HIGH,
990 		upper_32_bits(ring->gpu_addr));
991 
992 	/* Initialize the ring buffer's read and write pointers */
993 	WREG32_SOC15(VCN, inst_idx, mmUVD_RBC_RB_RPTR, 0);
994 
995 	WREG32_SOC15(VCN, inst_idx, mmUVD_SCRATCH2, 0);
996 
997 	ring->wptr = RREG32_SOC15(VCN, inst_idx, mmUVD_RBC_RB_RPTR);
998 	WREG32_SOC15(VCN, inst_idx, mmUVD_RBC_RB_WPTR,
999 		lower_32_bits(ring->wptr));
1000 
1001 	fw_shared->multi_queue.decode_queue_mode &= ~FW_QUEUE_RING_RESET;
1002 	/* Unstall DPG */
1003 	WREG32_P(SOC15_REG_OFFSET(VCN, inst_idx, mmUVD_POWER_STATUS),
1004 		0, ~UVD_POWER_STATUS__STALL_DPG_POWER_UP_MASK);
1005 
1006 	return 0;
1007 }
1008 
vcn_v2_5_start(struct amdgpu_device * adev)1009 static int vcn_v2_5_start(struct amdgpu_device *adev)
1010 {
1011 	struct amdgpu_ring *ring;
1012 	uint32_t rb_bufsz, tmp;
1013 	int i, j, k, r;
1014 
1015 	for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
1016 		if (adev->pm.dpm_enabled)
1017 			amdgpu_dpm_enable_vcn(adev, true, i);
1018 	}
1019 
1020 	for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
1021 		if (adev->vcn.harvest_config & (1 << i))
1022 			continue;
1023 		if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) {
1024 			r = vcn_v2_5_start_dpg_mode(adev, i, adev->vcn.indirect_sram);
1025 			continue;
1026 		}
1027 
1028 		/* disable register anti-hang mechanism */
1029 		WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_POWER_STATUS), 0,
1030 			~UVD_POWER_STATUS__UVD_POWER_STATUS_MASK);
1031 
1032 		/* set uvd status busy */
1033 		tmp = RREG32_SOC15(VCN, i, mmUVD_STATUS) | UVD_STATUS__UVD_BUSY;
1034 		WREG32_SOC15(VCN, i, mmUVD_STATUS, tmp);
1035 	}
1036 
1037 	if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG)
1038 		return 0;
1039 
1040 	/*SW clock gating */
1041 	vcn_v2_5_disable_clock_gating(adev);
1042 
1043 	for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
1044 		if (adev->vcn.harvest_config & (1 << i))
1045 			continue;
1046 		/* enable VCPU clock */
1047 		WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_VCPU_CNTL),
1048 			UVD_VCPU_CNTL__CLK_EN_MASK, ~UVD_VCPU_CNTL__CLK_EN_MASK);
1049 
1050 		/* disable master interrupt */
1051 		WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_MASTINT_EN), 0,
1052 			~UVD_MASTINT_EN__VCPU_EN_MASK);
1053 
1054 		/* setup mmUVD_LMI_CTRL */
1055 		tmp = RREG32_SOC15(VCN, i, mmUVD_LMI_CTRL);
1056 		tmp &= ~0xff;
1057 		WREG32_SOC15(VCN, i, mmUVD_LMI_CTRL, tmp | 0x8|
1058 			UVD_LMI_CTRL__WRITE_CLEAN_TIMER_EN_MASK	|
1059 			UVD_LMI_CTRL__MASK_MC_URGENT_MASK |
1060 			UVD_LMI_CTRL__DATA_COHERENCY_EN_MASK |
1061 			UVD_LMI_CTRL__VCPU_DATA_COHERENCY_EN_MASK);
1062 
1063 		/* setup mmUVD_MPC_CNTL */
1064 		tmp = RREG32_SOC15(VCN, i, mmUVD_MPC_CNTL);
1065 		tmp &= ~UVD_MPC_CNTL__REPLACEMENT_MODE_MASK;
1066 		tmp |= 0x2 << UVD_MPC_CNTL__REPLACEMENT_MODE__SHIFT;
1067 		WREG32_SOC15(VCN, i, mmUVD_MPC_CNTL, tmp);
1068 
1069 		/* setup UVD_MPC_SET_MUXA0 */
1070 		WREG32_SOC15(VCN, i, mmUVD_MPC_SET_MUXA0,
1071 			((0x1 << UVD_MPC_SET_MUXA0__VARA_1__SHIFT) |
1072 			(0x2 << UVD_MPC_SET_MUXA0__VARA_2__SHIFT) |
1073 			(0x3 << UVD_MPC_SET_MUXA0__VARA_3__SHIFT) |
1074 			(0x4 << UVD_MPC_SET_MUXA0__VARA_4__SHIFT)));
1075 
1076 		/* setup UVD_MPC_SET_MUXB0 */
1077 		WREG32_SOC15(VCN, i, mmUVD_MPC_SET_MUXB0,
1078 			((0x1 << UVD_MPC_SET_MUXB0__VARB_1__SHIFT) |
1079 			(0x2 << UVD_MPC_SET_MUXB0__VARB_2__SHIFT) |
1080 			(0x3 << UVD_MPC_SET_MUXB0__VARB_3__SHIFT) |
1081 			(0x4 << UVD_MPC_SET_MUXB0__VARB_4__SHIFT)));
1082 
1083 		/* setup mmUVD_MPC_SET_MUX */
1084 		WREG32_SOC15(VCN, i, mmUVD_MPC_SET_MUX,
1085 			((0x0 << UVD_MPC_SET_MUX__SET_0__SHIFT) |
1086 			(0x1 << UVD_MPC_SET_MUX__SET_1__SHIFT) |
1087 			(0x2 << UVD_MPC_SET_MUX__SET_2__SHIFT)));
1088 	}
1089 
1090 	vcn_v2_5_mc_resume(adev);
1091 
1092 	for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
1093 		volatile struct amdgpu_fw_shared *fw_shared = adev->vcn.inst[i].fw_shared.cpu_addr;
1094 		if (adev->vcn.harvest_config & (1 << i))
1095 			continue;
1096 		/* VCN global tiling registers */
1097 		WREG32_SOC15(VCN, i, mmUVD_GFX8_ADDR_CONFIG,
1098 			adev->gfx.config.gb_addr_config);
1099 		WREG32_SOC15(VCN, i, mmUVD_GFX8_ADDR_CONFIG,
1100 			adev->gfx.config.gb_addr_config);
1101 
1102 		/* enable LMI MC and UMC channels */
1103 		WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_LMI_CTRL2), 0,
1104 			~UVD_LMI_CTRL2__STALL_ARB_UMC_MASK);
1105 
1106 		/* unblock VCPU register access */
1107 		WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_RB_ARB_CTRL), 0,
1108 			~UVD_RB_ARB_CTRL__VCPU_DIS_MASK);
1109 
1110 		WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_VCPU_CNTL), 0,
1111 			~UVD_VCPU_CNTL__BLK_RST_MASK);
1112 
1113 		for (k = 0; k < 10; ++k) {
1114 			uint32_t status;
1115 
1116 			for (j = 0; j < 100; ++j) {
1117 				status = RREG32_SOC15(VCN, i, mmUVD_STATUS);
1118 				if (status & 2)
1119 					break;
1120 				if (amdgpu_emu_mode == 1)
1121 					msleep(500);
1122 				else
1123 					mdelay(10);
1124 			}
1125 			r = 0;
1126 			if (status & 2)
1127 				break;
1128 
1129 			DRM_ERROR("VCN decode not responding, trying to reset the VCPU!!!\n");
1130 			WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_VCPU_CNTL),
1131 				UVD_VCPU_CNTL__BLK_RST_MASK,
1132 				~UVD_VCPU_CNTL__BLK_RST_MASK);
1133 			mdelay(10);
1134 			WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_VCPU_CNTL), 0,
1135 				~UVD_VCPU_CNTL__BLK_RST_MASK);
1136 
1137 			mdelay(10);
1138 			r = -1;
1139 		}
1140 
1141 		if (r) {
1142 			DRM_ERROR("VCN decode not responding, giving up!!!\n");
1143 			return r;
1144 		}
1145 
1146 		/* enable master interrupt */
1147 		WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_MASTINT_EN),
1148 			UVD_MASTINT_EN__VCPU_EN_MASK,
1149 			~UVD_MASTINT_EN__VCPU_EN_MASK);
1150 
1151 		/* clear the busy bit of VCN_STATUS */
1152 		WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_STATUS), 0,
1153 			~(2 << UVD_STATUS__VCPU_REPORT__SHIFT));
1154 
1155 		WREG32_SOC15(VCN, i, mmUVD_LMI_RBC_RB_VMID, 0);
1156 
1157 		ring = &adev->vcn.inst[i].ring_dec;
1158 		/* force RBC into idle state */
1159 		rb_bufsz = order_base_2(ring->ring_size);
1160 		tmp = REG_SET_FIELD(0, UVD_RBC_RB_CNTL, RB_BUFSZ, rb_bufsz);
1161 		tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_BLKSZ, 1);
1162 		tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_FETCH, 1);
1163 		tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_UPDATE, 1);
1164 		tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_RPTR_WR_EN, 1);
1165 		WREG32_SOC15(VCN, i, mmUVD_RBC_RB_CNTL, tmp);
1166 
1167 		fw_shared->multi_queue.decode_queue_mode |= FW_QUEUE_RING_RESET;
1168 		/* program the RB_BASE for ring buffer */
1169 		WREG32_SOC15(VCN, i, mmUVD_LMI_RBC_RB_64BIT_BAR_LOW,
1170 			lower_32_bits(ring->gpu_addr));
1171 		WREG32_SOC15(VCN, i, mmUVD_LMI_RBC_RB_64BIT_BAR_HIGH,
1172 			upper_32_bits(ring->gpu_addr));
1173 
1174 		/* Initialize the ring buffer's read and write pointers */
1175 		WREG32_SOC15(VCN, i, mmUVD_RBC_RB_RPTR, 0);
1176 
1177 		ring->wptr = RREG32_SOC15(VCN, i, mmUVD_RBC_RB_RPTR);
1178 		WREG32_SOC15(VCN, i, mmUVD_RBC_RB_WPTR,
1179 				lower_32_bits(ring->wptr));
1180 		fw_shared->multi_queue.decode_queue_mode &= ~FW_QUEUE_RING_RESET;
1181 
1182 		fw_shared->multi_queue.encode_generalpurpose_queue_mode |= FW_QUEUE_RING_RESET;
1183 		ring = &adev->vcn.inst[i].ring_enc[0];
1184 		WREG32_SOC15(VCN, i, mmUVD_RB_RPTR, lower_32_bits(ring->wptr));
1185 		WREG32_SOC15(VCN, i, mmUVD_RB_WPTR, lower_32_bits(ring->wptr));
1186 		WREG32_SOC15(VCN, i, mmUVD_RB_BASE_LO, ring->gpu_addr);
1187 		WREG32_SOC15(VCN, i, mmUVD_RB_BASE_HI, upper_32_bits(ring->gpu_addr));
1188 		WREG32_SOC15(VCN, i, mmUVD_RB_SIZE, ring->ring_size / 4);
1189 		fw_shared->multi_queue.encode_generalpurpose_queue_mode &= ~FW_QUEUE_RING_RESET;
1190 
1191 		fw_shared->multi_queue.encode_lowlatency_queue_mode |= FW_QUEUE_RING_RESET;
1192 		ring = &adev->vcn.inst[i].ring_enc[1];
1193 		WREG32_SOC15(VCN, i, mmUVD_RB_RPTR2, lower_32_bits(ring->wptr));
1194 		WREG32_SOC15(VCN, i, mmUVD_RB_WPTR2, lower_32_bits(ring->wptr));
1195 		WREG32_SOC15(VCN, i, mmUVD_RB_BASE_LO2, ring->gpu_addr);
1196 		WREG32_SOC15(VCN, i, mmUVD_RB_BASE_HI2, upper_32_bits(ring->gpu_addr));
1197 		WREG32_SOC15(VCN, i, mmUVD_RB_SIZE2, ring->ring_size / 4);
1198 		fw_shared->multi_queue.encode_lowlatency_queue_mode &= ~FW_QUEUE_RING_RESET;
1199 	}
1200 
1201 	return 0;
1202 }
1203 
vcn_v2_5_mmsch_start(struct amdgpu_device * adev,struct amdgpu_mm_table * table)1204 static int vcn_v2_5_mmsch_start(struct amdgpu_device *adev,
1205 				struct amdgpu_mm_table *table)
1206 {
1207 	uint32_t data = 0, loop = 0, size = 0;
1208 	uint64_t addr = table->gpu_addr;
1209 	struct mmsch_v1_1_init_header *header = NULL;
1210 
1211 	header = (struct mmsch_v1_1_init_header *)table->cpu_addr;
1212 	size = header->total_size;
1213 
1214 	/*
1215 	 * 1, write to vce_mmsch_vf_ctx_addr_lo/hi register with GPU mc addr of
1216 	 *  memory descriptor location
1217 	 */
1218 	WREG32_SOC15(VCN, 0, mmMMSCH_VF_CTX_ADDR_LO, lower_32_bits(addr));
1219 	WREG32_SOC15(VCN, 0, mmMMSCH_VF_CTX_ADDR_HI, upper_32_bits(addr));
1220 
1221 	/* 2, update vmid of descriptor */
1222 	data = RREG32_SOC15(VCN, 0, mmMMSCH_VF_VMID);
1223 	data &= ~MMSCH_VF_VMID__VF_CTX_VMID_MASK;
1224 	/* use domain0 for MM scheduler */
1225 	data |= (0 << MMSCH_VF_VMID__VF_CTX_VMID__SHIFT);
1226 	WREG32_SOC15(VCN, 0, mmMMSCH_VF_VMID, data);
1227 
1228 	/* 3, notify mmsch about the size of this descriptor */
1229 	WREG32_SOC15(VCN, 0, mmMMSCH_VF_CTX_SIZE, size);
1230 
1231 	/* 4, set resp to zero */
1232 	WREG32_SOC15(VCN, 0, mmMMSCH_VF_MAILBOX_RESP, 0);
1233 
1234 	/*
1235 	 * 5, kick off the initialization and wait until
1236 	 * VCE_MMSCH_VF_MAILBOX_RESP becomes non-zero
1237 	 */
1238 	WREG32_SOC15(VCN, 0, mmMMSCH_VF_MAILBOX_HOST, 0x10000001);
1239 
1240 	data = RREG32_SOC15(VCN, 0, mmMMSCH_VF_MAILBOX_RESP);
1241 	loop = 10;
1242 	while ((data & 0x10000002) != 0x10000002) {
1243 		udelay(100);
1244 		data = RREG32_SOC15(VCN, 0, mmMMSCH_VF_MAILBOX_RESP);
1245 		loop--;
1246 		if (!loop)
1247 			break;
1248 	}
1249 
1250 	if (!loop) {
1251 		dev_err(adev->dev,
1252 			"failed to init MMSCH, mmMMSCH_VF_MAILBOX_RESP = %x\n",
1253 			data);
1254 		return -EBUSY;
1255 	}
1256 
1257 	return 0;
1258 }
1259 
vcn_v2_5_sriov_start(struct amdgpu_device * adev)1260 static int vcn_v2_5_sriov_start(struct amdgpu_device *adev)
1261 {
1262 	struct amdgpu_ring *ring;
1263 	uint32_t offset, size, tmp, i, rb_bufsz;
1264 	uint32_t table_size = 0;
1265 	struct mmsch_v1_0_cmd_direct_write direct_wt = { { 0 } };
1266 	struct mmsch_v1_0_cmd_direct_read_modify_write direct_rd_mod_wt = { { 0 } };
1267 	struct mmsch_v1_0_cmd_end end = { { 0 } };
1268 	uint32_t *init_table = adev->virt.mm_table.cpu_addr;
1269 	struct mmsch_v1_1_init_header *header = (struct mmsch_v1_1_init_header *)init_table;
1270 
1271 	direct_wt.cmd_header.command_type = MMSCH_COMMAND__DIRECT_REG_WRITE;
1272 	direct_rd_mod_wt.cmd_header.command_type = MMSCH_COMMAND__DIRECT_REG_READ_MODIFY_WRITE;
1273 	end.cmd_header.command_type = MMSCH_COMMAND__END;
1274 
1275 	header->version = MMSCH_VERSION;
1276 	header->total_size = sizeof(struct mmsch_v1_1_init_header) >> 2;
1277 	init_table += header->total_size;
1278 
1279 	for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
1280 		header->eng[i].table_offset = header->total_size;
1281 		header->eng[i].init_status = 0;
1282 		header->eng[i].table_size = 0;
1283 
1284 		table_size = 0;
1285 
1286 		MMSCH_V1_0_INSERT_DIRECT_RD_MOD_WT(
1287 			SOC15_REG_OFFSET(VCN, i, mmUVD_STATUS),
1288 			~UVD_STATUS__UVD_BUSY, UVD_STATUS__UVD_BUSY);
1289 
1290 		size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.inst[i].fw->size + 4);
1291 		/* mc resume*/
1292 		if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
1293 			MMSCH_V1_0_INSERT_DIRECT_WT(
1294 				SOC15_REG_OFFSET(VCN, i,
1295 					mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW),
1296 				adev->firmware.ucode[AMDGPU_UCODE_ID_VCN + i].tmr_mc_addr_lo);
1297 			MMSCH_V1_0_INSERT_DIRECT_WT(
1298 				SOC15_REG_OFFSET(VCN, i,
1299 					mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH),
1300 				adev->firmware.ucode[AMDGPU_UCODE_ID_VCN + i].tmr_mc_addr_hi);
1301 			offset = 0;
1302 			MMSCH_V1_0_INSERT_DIRECT_WT(
1303 				SOC15_REG_OFFSET(VCN, i, mmUVD_VCPU_CACHE_OFFSET0), 0);
1304 		} else {
1305 			MMSCH_V1_0_INSERT_DIRECT_WT(
1306 				SOC15_REG_OFFSET(VCN, i,
1307 					mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW),
1308 				lower_32_bits(adev->vcn.inst[i].gpu_addr));
1309 			MMSCH_V1_0_INSERT_DIRECT_WT(
1310 				SOC15_REG_OFFSET(VCN, i,
1311 					mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH),
1312 				upper_32_bits(adev->vcn.inst[i].gpu_addr));
1313 			offset = size;
1314 			MMSCH_V1_0_INSERT_DIRECT_WT(
1315 				SOC15_REG_OFFSET(VCN, i, mmUVD_VCPU_CACHE_OFFSET0),
1316 				AMDGPU_UVD_FIRMWARE_OFFSET >> 3);
1317 		}
1318 
1319 		MMSCH_V1_0_INSERT_DIRECT_WT(
1320 			SOC15_REG_OFFSET(VCN, i, mmUVD_VCPU_CACHE_SIZE0),
1321 			size);
1322 		MMSCH_V1_0_INSERT_DIRECT_WT(
1323 			SOC15_REG_OFFSET(VCN, i,
1324 				mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW),
1325 			lower_32_bits(adev->vcn.inst[i].gpu_addr + offset));
1326 		MMSCH_V1_0_INSERT_DIRECT_WT(
1327 			SOC15_REG_OFFSET(VCN, i,
1328 				mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH),
1329 			upper_32_bits(adev->vcn.inst[i].gpu_addr + offset));
1330 		MMSCH_V1_0_INSERT_DIRECT_WT(
1331 			SOC15_REG_OFFSET(VCN, i, mmUVD_VCPU_CACHE_OFFSET1),
1332 			0);
1333 		MMSCH_V1_0_INSERT_DIRECT_WT(
1334 			SOC15_REG_OFFSET(VCN, i, mmUVD_VCPU_CACHE_SIZE1),
1335 			AMDGPU_VCN_STACK_SIZE);
1336 		MMSCH_V1_0_INSERT_DIRECT_WT(
1337 			SOC15_REG_OFFSET(VCN, i,
1338 				mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW),
1339 			lower_32_bits(adev->vcn.inst[i].gpu_addr + offset +
1340 				AMDGPU_VCN_STACK_SIZE));
1341 		MMSCH_V1_0_INSERT_DIRECT_WT(
1342 			SOC15_REG_OFFSET(VCN, i,
1343 				mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH),
1344 			upper_32_bits(adev->vcn.inst[i].gpu_addr + offset +
1345 				AMDGPU_VCN_STACK_SIZE));
1346 		MMSCH_V1_0_INSERT_DIRECT_WT(
1347 			SOC15_REG_OFFSET(VCN, i, mmUVD_VCPU_CACHE_OFFSET2),
1348 			0);
1349 		MMSCH_V1_0_INSERT_DIRECT_WT(
1350 			SOC15_REG_OFFSET(VCN, i, mmUVD_VCPU_CACHE_SIZE2),
1351 			AMDGPU_VCN_CONTEXT_SIZE);
1352 
1353 		ring = &adev->vcn.inst[i].ring_enc[0];
1354 		ring->wptr = 0;
1355 
1356 		MMSCH_V1_0_INSERT_DIRECT_WT(
1357 			SOC15_REG_OFFSET(VCN, i, mmUVD_RB_BASE_LO),
1358 			lower_32_bits(ring->gpu_addr));
1359 		MMSCH_V1_0_INSERT_DIRECT_WT(
1360 			SOC15_REG_OFFSET(VCN, i, mmUVD_RB_BASE_HI),
1361 			upper_32_bits(ring->gpu_addr));
1362 		MMSCH_V1_0_INSERT_DIRECT_WT(
1363 			SOC15_REG_OFFSET(VCN, i, mmUVD_RB_SIZE),
1364 			ring->ring_size / 4);
1365 
1366 		ring = &adev->vcn.inst[i].ring_dec;
1367 		ring->wptr = 0;
1368 		MMSCH_V1_0_INSERT_DIRECT_WT(
1369 			SOC15_REG_OFFSET(VCN, i,
1370 				mmUVD_LMI_RBC_RB_64BIT_BAR_LOW),
1371 			lower_32_bits(ring->gpu_addr));
1372 		MMSCH_V1_0_INSERT_DIRECT_WT(
1373 			SOC15_REG_OFFSET(VCN, i,
1374 				mmUVD_LMI_RBC_RB_64BIT_BAR_HIGH),
1375 			upper_32_bits(ring->gpu_addr));
1376 
1377 		/* force RBC into idle state */
1378 		rb_bufsz = order_base_2(ring->ring_size);
1379 		tmp = REG_SET_FIELD(0, UVD_RBC_RB_CNTL, RB_BUFSZ, rb_bufsz);
1380 		tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_BLKSZ, 1);
1381 		tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_FETCH, 1);
1382 		tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_UPDATE, 1);
1383 		tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_RPTR_WR_EN, 1);
1384 		MMSCH_V1_0_INSERT_DIRECT_WT(
1385 			SOC15_REG_OFFSET(VCN, i, mmUVD_RBC_RB_CNTL), tmp);
1386 
1387 		/* add end packet */
1388 		memcpy((void *)init_table, &end, sizeof(struct mmsch_v1_0_cmd_end));
1389 		table_size += sizeof(struct mmsch_v1_0_cmd_end) / 4;
1390 		init_table += sizeof(struct mmsch_v1_0_cmd_end) / 4;
1391 
1392 		/* refine header */
1393 		header->eng[i].table_size = table_size;
1394 		header->total_size += table_size;
1395 	}
1396 
1397 	return vcn_v2_5_mmsch_start(adev, &adev->virt.mm_table);
1398 }
1399 
vcn_v2_5_stop_dpg_mode(struct amdgpu_device * adev,int inst_idx)1400 static int vcn_v2_5_stop_dpg_mode(struct amdgpu_device *adev, int inst_idx)
1401 {
1402 	uint32_t tmp;
1403 
1404 	/* Wait for power status to be 1 */
1405 	SOC15_WAIT_ON_RREG(VCN, inst_idx, mmUVD_POWER_STATUS, 1,
1406 		UVD_POWER_STATUS__UVD_POWER_STATUS_MASK);
1407 
1408 	/* wait for read ptr to be equal to write ptr */
1409 	tmp = RREG32_SOC15(VCN, inst_idx, mmUVD_RB_WPTR);
1410 	SOC15_WAIT_ON_RREG(VCN, inst_idx, mmUVD_RB_RPTR, tmp, 0xFFFFFFFF);
1411 
1412 	tmp = RREG32_SOC15(VCN, inst_idx, mmUVD_RB_WPTR2);
1413 	SOC15_WAIT_ON_RREG(VCN, inst_idx, mmUVD_RB_RPTR2, tmp, 0xFFFFFFFF);
1414 
1415 	tmp = RREG32_SOC15(VCN, inst_idx, mmUVD_RBC_RB_WPTR) & 0x7FFFFFFF;
1416 	SOC15_WAIT_ON_RREG(VCN, inst_idx, mmUVD_RBC_RB_RPTR, tmp, 0xFFFFFFFF);
1417 
1418 	SOC15_WAIT_ON_RREG(VCN, inst_idx, mmUVD_POWER_STATUS, 1,
1419 		UVD_POWER_STATUS__UVD_POWER_STATUS_MASK);
1420 
1421 	/* disable dynamic power gating mode */
1422 	WREG32_P(SOC15_REG_OFFSET(VCN, inst_idx, mmUVD_POWER_STATUS), 0,
1423 			~UVD_POWER_STATUS__UVD_PG_MODE_MASK);
1424 
1425 	return 0;
1426 }
1427 
vcn_v2_5_stop(struct amdgpu_device * adev)1428 static int vcn_v2_5_stop(struct amdgpu_device *adev)
1429 {
1430 	uint32_t tmp;
1431 	int i, r = 0;
1432 
1433 	for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
1434 		if (adev->vcn.harvest_config & (1 << i))
1435 			continue;
1436 		if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) {
1437 			r = vcn_v2_5_stop_dpg_mode(adev, i);
1438 			continue;
1439 		}
1440 
1441 		/* wait for vcn idle */
1442 		r = SOC15_WAIT_ON_RREG(VCN, i, mmUVD_STATUS, UVD_STATUS__IDLE, 0x7);
1443 		if (r)
1444 			return r;
1445 
1446 		tmp = UVD_LMI_STATUS__VCPU_LMI_WRITE_CLEAN_MASK |
1447 			UVD_LMI_STATUS__READ_CLEAN_MASK |
1448 			UVD_LMI_STATUS__WRITE_CLEAN_MASK |
1449 			UVD_LMI_STATUS__WRITE_CLEAN_RAW_MASK;
1450 		r = SOC15_WAIT_ON_RREG(VCN, i, mmUVD_LMI_STATUS, tmp, tmp);
1451 		if (r)
1452 			return r;
1453 
1454 		/* block LMI UMC channel */
1455 		tmp = RREG32_SOC15(VCN, i, mmUVD_LMI_CTRL2);
1456 		tmp |= UVD_LMI_CTRL2__STALL_ARB_UMC_MASK;
1457 		WREG32_SOC15(VCN, i, mmUVD_LMI_CTRL2, tmp);
1458 
1459 		tmp = UVD_LMI_STATUS__UMC_READ_CLEAN_RAW_MASK|
1460 			UVD_LMI_STATUS__UMC_WRITE_CLEAN_RAW_MASK;
1461 		r = SOC15_WAIT_ON_RREG(VCN, i, mmUVD_LMI_STATUS, tmp, tmp);
1462 		if (r)
1463 			return r;
1464 
1465 		/* block VCPU register access */
1466 		WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_RB_ARB_CTRL),
1467 			UVD_RB_ARB_CTRL__VCPU_DIS_MASK,
1468 			~UVD_RB_ARB_CTRL__VCPU_DIS_MASK);
1469 
1470 		/* reset VCPU */
1471 		WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_VCPU_CNTL),
1472 			UVD_VCPU_CNTL__BLK_RST_MASK,
1473 			~UVD_VCPU_CNTL__BLK_RST_MASK);
1474 
1475 		/* disable VCPU clock */
1476 		WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_VCPU_CNTL), 0,
1477 			~(UVD_VCPU_CNTL__CLK_EN_MASK));
1478 
1479 		/* clear status */
1480 		WREG32_SOC15(VCN, i, mmUVD_STATUS, 0);
1481 
1482 		vcn_v2_5_enable_clock_gating(adev);
1483 
1484 		/* enable register anti-hang mechanism */
1485 		WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_POWER_STATUS),
1486 			UVD_POWER_STATUS__UVD_POWER_STATUS_MASK,
1487 			~UVD_POWER_STATUS__UVD_POWER_STATUS_MASK);
1488 	}
1489 
1490 	for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
1491 		if (adev->pm.dpm_enabled)
1492 			amdgpu_dpm_enable_vcn(adev, false, i);
1493 	}
1494 
1495 	return 0;
1496 }
1497 
vcn_v2_5_pause_dpg_mode(struct amdgpu_device * adev,int inst_idx,struct dpg_pause_state * new_state)1498 static int vcn_v2_5_pause_dpg_mode(struct amdgpu_device *adev,
1499 				int inst_idx, struct dpg_pause_state *new_state)
1500 {
1501 	struct amdgpu_ring *ring;
1502 	uint32_t reg_data = 0;
1503 	int ret_code = 0;
1504 
1505 	/* pause/unpause if state is changed */
1506 	if (adev->vcn.inst[inst_idx].pause_state.fw_based != new_state->fw_based) {
1507 		DRM_DEBUG("dpg pause state changed %d -> %d",
1508 			adev->vcn.inst[inst_idx].pause_state.fw_based,	new_state->fw_based);
1509 		reg_data = RREG32_SOC15(VCN, inst_idx, mmUVD_DPG_PAUSE) &
1510 			(~UVD_DPG_PAUSE__NJ_PAUSE_DPG_ACK_MASK);
1511 
1512 		if (new_state->fw_based == VCN_DPG_STATE__PAUSE) {
1513 			ret_code = SOC15_WAIT_ON_RREG(VCN, inst_idx, mmUVD_POWER_STATUS, 0x1,
1514 				UVD_POWER_STATUS__UVD_POWER_STATUS_MASK);
1515 
1516 			if (!ret_code) {
1517 				volatile struct amdgpu_fw_shared *fw_shared = adev->vcn.inst[inst_idx].fw_shared.cpu_addr;
1518 
1519 				/* pause DPG */
1520 				reg_data |= UVD_DPG_PAUSE__NJ_PAUSE_DPG_REQ_MASK;
1521 				WREG32_SOC15(VCN, inst_idx, mmUVD_DPG_PAUSE, reg_data);
1522 
1523 				/* wait for ACK */
1524 				SOC15_WAIT_ON_RREG(VCN, inst_idx, mmUVD_DPG_PAUSE,
1525 					   UVD_DPG_PAUSE__NJ_PAUSE_DPG_ACK_MASK,
1526 					   UVD_DPG_PAUSE__NJ_PAUSE_DPG_ACK_MASK);
1527 
1528 				/* Stall DPG before WPTR/RPTR reset */
1529 				WREG32_P(SOC15_REG_OFFSET(VCN, inst_idx, mmUVD_POWER_STATUS),
1530 					   UVD_POWER_STATUS__STALL_DPG_POWER_UP_MASK,
1531 					   ~UVD_POWER_STATUS__STALL_DPG_POWER_UP_MASK);
1532 
1533 				/* Restore */
1534 				fw_shared->multi_queue.encode_generalpurpose_queue_mode |= FW_QUEUE_RING_RESET;
1535 				ring = &adev->vcn.inst[inst_idx].ring_enc[0];
1536 				ring->wptr = 0;
1537 				WREG32_SOC15(VCN, inst_idx, mmUVD_RB_BASE_LO, ring->gpu_addr);
1538 				WREG32_SOC15(VCN, inst_idx, mmUVD_RB_BASE_HI, upper_32_bits(ring->gpu_addr));
1539 				WREG32_SOC15(VCN, inst_idx, mmUVD_RB_SIZE, ring->ring_size / 4);
1540 				WREG32_SOC15(VCN, inst_idx, mmUVD_RB_RPTR, lower_32_bits(ring->wptr));
1541 				WREG32_SOC15(VCN, inst_idx, mmUVD_RB_WPTR, lower_32_bits(ring->wptr));
1542 				fw_shared->multi_queue.encode_generalpurpose_queue_mode &= ~FW_QUEUE_RING_RESET;
1543 
1544 				fw_shared->multi_queue.encode_lowlatency_queue_mode |= FW_QUEUE_RING_RESET;
1545 				ring = &adev->vcn.inst[inst_idx].ring_enc[1];
1546 				ring->wptr = 0;
1547 				WREG32_SOC15(VCN, inst_idx, mmUVD_RB_BASE_LO2, ring->gpu_addr);
1548 				WREG32_SOC15(VCN, inst_idx, mmUVD_RB_BASE_HI2, upper_32_bits(ring->gpu_addr));
1549 				WREG32_SOC15(VCN, inst_idx, mmUVD_RB_SIZE2, ring->ring_size / 4);
1550 				WREG32_SOC15(VCN, inst_idx, mmUVD_RB_RPTR2, lower_32_bits(ring->wptr));
1551 				WREG32_SOC15(VCN, inst_idx, mmUVD_RB_WPTR2, lower_32_bits(ring->wptr));
1552 				fw_shared->multi_queue.encode_lowlatency_queue_mode &= ~FW_QUEUE_RING_RESET;
1553 
1554 				/* Unstall DPG */
1555 				WREG32_P(SOC15_REG_OFFSET(VCN, inst_idx, mmUVD_POWER_STATUS),
1556 					   0, ~UVD_POWER_STATUS__STALL_DPG_POWER_UP_MASK);
1557 
1558 				SOC15_WAIT_ON_RREG(VCN, inst_idx, mmUVD_POWER_STATUS,
1559 					   UVD_PGFSM_CONFIG__UVDM_UVDU_PWR_ON, UVD_POWER_STATUS__UVD_POWER_STATUS_MASK);
1560 			}
1561 		} else {
1562 			reg_data &= ~UVD_DPG_PAUSE__NJ_PAUSE_DPG_REQ_MASK;
1563 			WREG32_SOC15(VCN, inst_idx, mmUVD_DPG_PAUSE, reg_data);
1564 			SOC15_WAIT_ON_RREG(VCN, inst_idx, mmUVD_POWER_STATUS, 0x1,
1565 				UVD_POWER_STATUS__UVD_POWER_STATUS_MASK);
1566 		}
1567 		adev->vcn.inst[inst_idx].pause_state.fw_based = new_state->fw_based;
1568 	}
1569 
1570 	return 0;
1571 }
1572 
1573 /**
1574  * vcn_v2_5_dec_ring_get_rptr - get read pointer
1575  *
1576  * @ring: amdgpu_ring pointer
1577  *
1578  * Returns the current hardware read pointer
1579  */
vcn_v2_5_dec_ring_get_rptr(struct amdgpu_ring * ring)1580 static uint64_t vcn_v2_5_dec_ring_get_rptr(struct amdgpu_ring *ring)
1581 {
1582 	struct amdgpu_device *adev = ring->adev;
1583 
1584 	return RREG32_SOC15(VCN, ring->me, mmUVD_RBC_RB_RPTR);
1585 }
1586 
1587 /**
1588  * vcn_v2_5_dec_ring_get_wptr - get write pointer
1589  *
1590  * @ring: amdgpu_ring pointer
1591  *
1592  * Returns the current hardware write pointer
1593  */
vcn_v2_5_dec_ring_get_wptr(struct amdgpu_ring * ring)1594 static uint64_t vcn_v2_5_dec_ring_get_wptr(struct amdgpu_ring *ring)
1595 {
1596 	struct amdgpu_device *adev = ring->adev;
1597 
1598 	if (ring->use_doorbell)
1599 		return *ring->wptr_cpu_addr;
1600 	else
1601 		return RREG32_SOC15(VCN, ring->me, mmUVD_RBC_RB_WPTR);
1602 }
1603 
1604 /**
1605  * vcn_v2_5_dec_ring_set_wptr - set write pointer
1606  *
1607  * @ring: amdgpu_ring pointer
1608  *
1609  * Commits the write pointer to the hardware
1610  */
vcn_v2_5_dec_ring_set_wptr(struct amdgpu_ring * ring)1611 static void vcn_v2_5_dec_ring_set_wptr(struct amdgpu_ring *ring)
1612 {
1613 	struct amdgpu_device *adev = ring->adev;
1614 
1615 	if (ring->use_doorbell) {
1616 		*ring->wptr_cpu_addr = lower_32_bits(ring->wptr);
1617 		WDOORBELL32(ring->doorbell_index, lower_32_bits(ring->wptr));
1618 	} else {
1619 		WREG32_SOC15(VCN, ring->me, mmUVD_RBC_RB_WPTR, lower_32_bits(ring->wptr));
1620 	}
1621 }
1622 
1623 static const struct amdgpu_ring_funcs vcn_v2_5_dec_ring_vm_funcs = {
1624 	.type = AMDGPU_RING_TYPE_VCN_DEC,
1625 	.align_mask = 0xf,
1626 	.secure_submission_supported = true,
1627 	.get_rptr = vcn_v2_5_dec_ring_get_rptr,
1628 	.get_wptr = vcn_v2_5_dec_ring_get_wptr,
1629 	.set_wptr = vcn_v2_5_dec_ring_set_wptr,
1630 	.emit_frame_size =
1631 		SOC15_FLUSH_GPU_TLB_NUM_WREG * 6 +
1632 		SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 8 +
1633 		8 + /* vcn_v2_0_dec_ring_emit_vm_flush */
1634 		14 + 14 + /* vcn_v2_0_dec_ring_emit_fence x2 vm fence */
1635 		6,
1636 	.emit_ib_size = 8, /* vcn_v2_0_dec_ring_emit_ib */
1637 	.emit_ib = vcn_v2_0_dec_ring_emit_ib,
1638 	.emit_fence = vcn_v2_0_dec_ring_emit_fence,
1639 	.emit_vm_flush = vcn_v2_0_dec_ring_emit_vm_flush,
1640 	.test_ring = vcn_v2_0_dec_ring_test_ring,
1641 	.test_ib = amdgpu_vcn_dec_ring_test_ib,
1642 	.insert_nop = vcn_v2_0_dec_ring_insert_nop,
1643 	.insert_start = vcn_v2_0_dec_ring_insert_start,
1644 	.insert_end = vcn_v2_0_dec_ring_insert_end,
1645 	.pad_ib = amdgpu_ring_generic_pad_ib,
1646 	.begin_use = amdgpu_vcn_ring_begin_use,
1647 	.end_use = amdgpu_vcn_ring_end_use,
1648 	.emit_wreg = vcn_v2_0_dec_ring_emit_wreg,
1649 	.emit_reg_wait = vcn_v2_0_dec_ring_emit_reg_wait,
1650 	.emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper,
1651 };
1652 
1653 /**
1654  * vcn_v2_5_enc_ring_get_rptr - get enc read pointer
1655  *
1656  * @ring: amdgpu_ring pointer
1657  *
1658  * Returns the current hardware enc read pointer
1659  */
vcn_v2_5_enc_ring_get_rptr(struct amdgpu_ring * ring)1660 static uint64_t vcn_v2_5_enc_ring_get_rptr(struct amdgpu_ring *ring)
1661 {
1662 	struct amdgpu_device *adev = ring->adev;
1663 
1664 	if (ring == &adev->vcn.inst[ring->me].ring_enc[0])
1665 		return RREG32_SOC15(VCN, ring->me, mmUVD_RB_RPTR);
1666 	else
1667 		return RREG32_SOC15(VCN, ring->me, mmUVD_RB_RPTR2);
1668 }
1669 
1670 /**
1671  * vcn_v2_5_enc_ring_get_wptr - get enc write pointer
1672  *
1673  * @ring: amdgpu_ring pointer
1674  *
1675  * Returns the current hardware enc write pointer
1676  */
vcn_v2_5_enc_ring_get_wptr(struct amdgpu_ring * ring)1677 static uint64_t vcn_v2_5_enc_ring_get_wptr(struct amdgpu_ring *ring)
1678 {
1679 	struct amdgpu_device *adev = ring->adev;
1680 
1681 	if (ring == &adev->vcn.inst[ring->me].ring_enc[0]) {
1682 		if (ring->use_doorbell)
1683 			return *ring->wptr_cpu_addr;
1684 		else
1685 			return RREG32_SOC15(VCN, ring->me, mmUVD_RB_WPTR);
1686 	} else {
1687 		if (ring->use_doorbell)
1688 			return *ring->wptr_cpu_addr;
1689 		else
1690 			return RREG32_SOC15(VCN, ring->me, mmUVD_RB_WPTR2);
1691 	}
1692 }
1693 
1694 /**
1695  * vcn_v2_5_enc_ring_set_wptr - set enc write pointer
1696  *
1697  * @ring: amdgpu_ring pointer
1698  *
1699  * Commits the enc write pointer to the hardware
1700  */
vcn_v2_5_enc_ring_set_wptr(struct amdgpu_ring * ring)1701 static void vcn_v2_5_enc_ring_set_wptr(struct amdgpu_ring *ring)
1702 {
1703 	struct amdgpu_device *adev = ring->adev;
1704 
1705 	if (ring == &adev->vcn.inst[ring->me].ring_enc[0]) {
1706 		if (ring->use_doorbell) {
1707 			*ring->wptr_cpu_addr = lower_32_bits(ring->wptr);
1708 			WDOORBELL32(ring->doorbell_index, lower_32_bits(ring->wptr));
1709 		} else {
1710 			WREG32_SOC15(VCN, ring->me, mmUVD_RB_WPTR, lower_32_bits(ring->wptr));
1711 		}
1712 	} else {
1713 		if (ring->use_doorbell) {
1714 			*ring->wptr_cpu_addr = lower_32_bits(ring->wptr);
1715 			WDOORBELL32(ring->doorbell_index, lower_32_bits(ring->wptr));
1716 		} else {
1717 			WREG32_SOC15(VCN, ring->me, mmUVD_RB_WPTR2, lower_32_bits(ring->wptr));
1718 		}
1719 	}
1720 }
1721 
1722 static const struct amdgpu_ring_funcs vcn_v2_5_enc_ring_vm_funcs = {
1723 	.type = AMDGPU_RING_TYPE_VCN_ENC,
1724 	.align_mask = 0x3f,
1725 	.nop = VCN_ENC_CMD_NO_OP,
1726 	.get_rptr = vcn_v2_5_enc_ring_get_rptr,
1727 	.get_wptr = vcn_v2_5_enc_ring_get_wptr,
1728 	.set_wptr = vcn_v2_5_enc_ring_set_wptr,
1729 	.emit_frame_size =
1730 		SOC15_FLUSH_GPU_TLB_NUM_WREG * 3 +
1731 		SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 4 +
1732 		4 + /* vcn_v2_0_enc_ring_emit_vm_flush */
1733 		5 + 5 + /* vcn_v2_0_enc_ring_emit_fence x2 vm fence */
1734 		1, /* vcn_v2_0_enc_ring_insert_end */
1735 	.emit_ib_size = 5, /* vcn_v2_0_enc_ring_emit_ib */
1736 	.emit_ib = vcn_v2_0_enc_ring_emit_ib,
1737 	.emit_fence = vcn_v2_0_enc_ring_emit_fence,
1738 	.emit_vm_flush = vcn_v2_0_enc_ring_emit_vm_flush,
1739 	.test_ring = amdgpu_vcn_enc_ring_test_ring,
1740 	.test_ib = amdgpu_vcn_enc_ring_test_ib,
1741 	.insert_nop = amdgpu_ring_insert_nop,
1742 	.insert_end = vcn_v2_0_enc_ring_insert_end,
1743 	.pad_ib = amdgpu_ring_generic_pad_ib,
1744 	.begin_use = amdgpu_vcn_ring_begin_use,
1745 	.end_use = amdgpu_vcn_ring_end_use,
1746 	.emit_wreg = vcn_v2_0_enc_ring_emit_wreg,
1747 	.emit_reg_wait = vcn_v2_0_enc_ring_emit_reg_wait,
1748 	.emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper,
1749 };
1750 
vcn_v2_5_set_dec_ring_funcs(struct amdgpu_device * adev)1751 static void vcn_v2_5_set_dec_ring_funcs(struct amdgpu_device *adev)
1752 {
1753 	int i;
1754 
1755 	for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
1756 		if (adev->vcn.harvest_config & (1 << i))
1757 			continue;
1758 		adev->vcn.inst[i].ring_dec.funcs = &vcn_v2_5_dec_ring_vm_funcs;
1759 		adev->vcn.inst[i].ring_dec.me = i;
1760 	}
1761 }
1762 
vcn_v2_5_set_enc_ring_funcs(struct amdgpu_device * adev)1763 static void vcn_v2_5_set_enc_ring_funcs(struct amdgpu_device *adev)
1764 {
1765 	int i, j;
1766 
1767 	for (j = 0; j < adev->vcn.num_vcn_inst; ++j) {
1768 		if (adev->vcn.harvest_config & (1 << j))
1769 			continue;
1770 		for (i = 0; i < adev->vcn.num_enc_rings; ++i) {
1771 			adev->vcn.inst[j].ring_enc[i].funcs = &vcn_v2_5_enc_ring_vm_funcs;
1772 			adev->vcn.inst[j].ring_enc[i].me = j;
1773 		}
1774 	}
1775 }
1776 
vcn_v2_5_is_idle(void * handle)1777 static bool vcn_v2_5_is_idle(void *handle)
1778 {
1779 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1780 	int i, ret = 1;
1781 
1782 	for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
1783 		if (adev->vcn.harvest_config & (1 << i))
1784 			continue;
1785 
1786 		ret &= (RREG32_SOC15(VCN, i, mmUVD_STATUS) == UVD_STATUS__IDLE);
1787 	}
1788 
1789 	return ret;
1790 }
1791 
vcn_v2_5_wait_for_idle(struct amdgpu_ip_block * ip_block)1792 static int vcn_v2_5_wait_for_idle(struct amdgpu_ip_block *ip_block)
1793 {
1794 	struct amdgpu_device *adev = ip_block->adev;
1795 	int i, ret = 0;
1796 
1797 	for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
1798 		if (adev->vcn.harvest_config & (1 << i))
1799 			continue;
1800 		ret = SOC15_WAIT_ON_RREG(VCN, i, mmUVD_STATUS, UVD_STATUS__IDLE,
1801 			UVD_STATUS__IDLE);
1802 		if (ret)
1803 			return ret;
1804 	}
1805 
1806 	return ret;
1807 }
1808 
vcn_v2_5_set_clockgating_state(struct amdgpu_ip_block * ip_block,enum amd_clockgating_state state)1809 static int vcn_v2_5_set_clockgating_state(struct amdgpu_ip_block *ip_block,
1810 					  enum amd_clockgating_state state)
1811 {
1812 	struct amdgpu_device *adev = ip_block->adev;
1813 	bool enable = (state == AMD_CG_STATE_GATE);
1814 
1815 	if (amdgpu_sriov_vf(adev))
1816 		return 0;
1817 
1818 	if (enable) {
1819 		if (!vcn_v2_5_is_idle(adev))
1820 			return -EBUSY;
1821 		vcn_v2_5_enable_clock_gating(adev);
1822 	} else {
1823 		vcn_v2_5_disable_clock_gating(adev);
1824 	}
1825 
1826 	return 0;
1827 }
1828 
vcn_v2_5_set_powergating_state(struct amdgpu_ip_block * ip_block,enum amd_powergating_state state)1829 static int vcn_v2_5_set_powergating_state(struct amdgpu_ip_block *ip_block,
1830 					  enum amd_powergating_state state)
1831 {
1832 	struct amdgpu_device *adev = ip_block->adev;
1833 	int ret;
1834 
1835 	if (amdgpu_sriov_vf(adev))
1836 		return 0;
1837 
1838 	if(state == adev->vcn.cur_state)
1839 		return 0;
1840 
1841 	if (state == AMD_PG_STATE_GATE)
1842 		ret = vcn_v2_5_stop(adev);
1843 	else
1844 		ret = vcn_v2_5_start(adev);
1845 
1846 	if(!ret)
1847 		adev->vcn.cur_state = state;
1848 
1849 	return ret;
1850 }
1851 
vcn_v2_5_set_interrupt_state(struct amdgpu_device * adev,struct amdgpu_irq_src * source,unsigned type,enum amdgpu_interrupt_state state)1852 static int vcn_v2_5_set_interrupt_state(struct amdgpu_device *adev,
1853 					struct amdgpu_irq_src *source,
1854 					unsigned type,
1855 					enum amdgpu_interrupt_state state)
1856 {
1857 	return 0;
1858 }
1859 
vcn_v2_6_set_ras_interrupt_state(struct amdgpu_device * adev,struct amdgpu_irq_src * source,unsigned int type,enum amdgpu_interrupt_state state)1860 static int vcn_v2_6_set_ras_interrupt_state(struct amdgpu_device *adev,
1861 					struct amdgpu_irq_src *source,
1862 					unsigned int type,
1863 					enum amdgpu_interrupt_state state)
1864 {
1865 	return 0;
1866 }
1867 
vcn_v2_5_process_interrupt(struct amdgpu_device * adev,struct amdgpu_irq_src * source,struct amdgpu_iv_entry * entry)1868 static int vcn_v2_5_process_interrupt(struct amdgpu_device *adev,
1869 				      struct amdgpu_irq_src *source,
1870 				      struct amdgpu_iv_entry *entry)
1871 {
1872 	uint32_t ip_instance;
1873 
1874 	switch (entry->client_id) {
1875 	case SOC15_IH_CLIENTID_VCN:
1876 		ip_instance = 0;
1877 		break;
1878 	case SOC15_IH_CLIENTID_VCN1:
1879 		ip_instance = 1;
1880 		break;
1881 	default:
1882 		DRM_ERROR("Unhandled client id: %d\n", entry->client_id);
1883 		return 0;
1884 	}
1885 
1886 	DRM_DEBUG("IH: VCN TRAP\n");
1887 
1888 	switch (entry->src_id) {
1889 	case VCN_2_0__SRCID__UVD_SYSTEM_MESSAGE_INTERRUPT:
1890 		amdgpu_fence_process(&adev->vcn.inst[ip_instance].ring_dec);
1891 		break;
1892 	case VCN_2_0__SRCID__UVD_ENC_GENERAL_PURPOSE:
1893 		amdgpu_fence_process(&adev->vcn.inst[ip_instance].ring_enc[0]);
1894 		break;
1895 	case VCN_2_0__SRCID__UVD_ENC_LOW_LATENCY:
1896 		amdgpu_fence_process(&adev->vcn.inst[ip_instance].ring_enc[1]);
1897 		break;
1898 	default:
1899 		DRM_ERROR("Unhandled interrupt: %d %d\n",
1900 			  entry->src_id, entry->src_data[0]);
1901 		break;
1902 	}
1903 
1904 	return 0;
1905 }
1906 
1907 static const struct amdgpu_irq_src_funcs vcn_v2_5_irq_funcs = {
1908 	.set = vcn_v2_5_set_interrupt_state,
1909 	.process = vcn_v2_5_process_interrupt,
1910 };
1911 
1912 static const struct amdgpu_irq_src_funcs vcn_v2_6_ras_irq_funcs = {
1913 	.set = vcn_v2_6_set_ras_interrupt_state,
1914 	.process = amdgpu_vcn_process_poison_irq,
1915 };
1916 
vcn_v2_5_set_irq_funcs(struct amdgpu_device * adev)1917 static void vcn_v2_5_set_irq_funcs(struct amdgpu_device *adev)
1918 {
1919 	int i;
1920 
1921 	for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
1922 		if (adev->vcn.harvest_config & (1 << i))
1923 			continue;
1924 		adev->vcn.inst[i].irq.num_types = adev->vcn.num_enc_rings + 1;
1925 		adev->vcn.inst[i].irq.funcs = &vcn_v2_5_irq_funcs;
1926 
1927 		adev->vcn.inst[i].ras_poison_irq.num_types = adev->vcn.num_enc_rings + 1;
1928 		adev->vcn.inst[i].ras_poison_irq.funcs = &vcn_v2_6_ras_irq_funcs;
1929 	}
1930 }
1931 
vcn_v2_5_print_ip_state(struct amdgpu_ip_block * ip_block,struct drm_printer * p)1932 static void vcn_v2_5_print_ip_state(struct amdgpu_ip_block *ip_block, struct drm_printer *p)
1933 {
1934 	struct amdgpu_device *adev = ip_block->adev;
1935 	int i, j;
1936 	uint32_t reg_count = ARRAY_SIZE(vcn_reg_list_2_5);
1937 	uint32_t inst_off, is_powered;
1938 
1939 	if (!adev->vcn.ip_dump)
1940 		return;
1941 
1942 	drm_printf(p, "num_instances:%d\n", adev->vcn.num_vcn_inst);
1943 	for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
1944 		if (adev->vcn.harvest_config & (1 << i)) {
1945 			drm_printf(p, "\nHarvested Instance:VCN%d Skipping dump\n", i);
1946 			continue;
1947 		}
1948 
1949 		inst_off = i * reg_count;
1950 		is_powered = (adev->vcn.ip_dump[inst_off] &
1951 				UVD_POWER_STATUS__UVD_POWER_STATUS_MASK) != 1;
1952 
1953 		if (is_powered) {
1954 			drm_printf(p, "\nActive Instance:VCN%d\n", i);
1955 			for (j = 0; j < reg_count; j++)
1956 				drm_printf(p, "%-50s \t 0x%08x\n", vcn_reg_list_2_5[j].reg_name,
1957 					   adev->vcn.ip_dump[inst_off + j]);
1958 		} else {
1959 			drm_printf(p, "\nInactive Instance:VCN%d\n", i);
1960 		}
1961 	}
1962 }
1963 
vcn_v2_5_dump_ip_state(struct amdgpu_ip_block * ip_block)1964 static void vcn_v2_5_dump_ip_state(struct amdgpu_ip_block *ip_block)
1965 {
1966 	struct amdgpu_device *adev = ip_block->adev;
1967 	int i, j;
1968 	bool is_powered;
1969 	uint32_t inst_off;
1970 	uint32_t reg_count = ARRAY_SIZE(vcn_reg_list_2_5);
1971 
1972 	if (!adev->vcn.ip_dump)
1973 		return;
1974 
1975 	for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
1976 		if (adev->vcn.harvest_config & (1 << i))
1977 			continue;
1978 
1979 		inst_off = i * reg_count;
1980 		/* mmUVD_POWER_STATUS is always readable and is first element of the array */
1981 		adev->vcn.ip_dump[inst_off] = RREG32_SOC15(VCN, i, mmUVD_POWER_STATUS);
1982 		is_powered = (adev->vcn.ip_dump[inst_off] &
1983 				UVD_POWER_STATUS__UVD_POWER_STATUS_MASK) != 1;
1984 
1985 		if (is_powered)
1986 			for (j = 1; j < reg_count; j++)
1987 				adev->vcn.ip_dump[inst_off + j] =
1988 					RREG32(SOC15_REG_ENTRY_OFFSET_INST(vcn_reg_list_2_5[j], i));
1989 	}
1990 }
1991 
1992 static const struct amd_ip_funcs vcn_v2_5_ip_funcs = {
1993 	.name = "vcn_v2_5",
1994 	.early_init = vcn_v2_5_early_init,
1995 	.sw_init = vcn_v2_5_sw_init,
1996 	.sw_fini = vcn_v2_5_sw_fini,
1997 	.hw_init = vcn_v2_5_hw_init,
1998 	.hw_fini = vcn_v2_5_hw_fini,
1999 	.suspend = vcn_v2_5_suspend,
2000 	.resume = vcn_v2_5_resume,
2001 	.is_idle = vcn_v2_5_is_idle,
2002 	.wait_for_idle = vcn_v2_5_wait_for_idle,
2003 	.set_clockgating_state = vcn_v2_5_set_clockgating_state,
2004 	.set_powergating_state = vcn_v2_5_set_powergating_state,
2005 	.dump_ip_state = vcn_v2_5_dump_ip_state,
2006 	.print_ip_state = vcn_v2_5_print_ip_state,
2007 };
2008 
2009 static const struct amd_ip_funcs vcn_v2_6_ip_funcs = {
2010         .name = "vcn_v2_6",
2011         .early_init = vcn_v2_5_early_init,
2012         .sw_init = vcn_v2_5_sw_init,
2013         .sw_fini = vcn_v2_5_sw_fini,
2014         .hw_init = vcn_v2_5_hw_init,
2015         .hw_fini = vcn_v2_5_hw_fini,
2016         .suspend = vcn_v2_5_suspend,
2017         .resume = vcn_v2_5_resume,
2018         .is_idle = vcn_v2_5_is_idle,
2019         .wait_for_idle = vcn_v2_5_wait_for_idle,
2020         .set_clockgating_state = vcn_v2_5_set_clockgating_state,
2021         .set_powergating_state = vcn_v2_5_set_powergating_state,
2022 	.dump_ip_state = vcn_v2_5_dump_ip_state,
2023 	.print_ip_state = vcn_v2_5_print_ip_state,
2024 };
2025 
2026 const struct amdgpu_ip_block_version vcn_v2_5_ip_block =
2027 {
2028 		.type = AMD_IP_BLOCK_TYPE_VCN,
2029 		.major = 2,
2030 		.minor = 5,
2031 		.rev = 0,
2032 		.funcs = &vcn_v2_5_ip_funcs,
2033 };
2034 
2035 const struct amdgpu_ip_block_version vcn_v2_6_ip_block =
2036 {
2037 		.type = AMD_IP_BLOCK_TYPE_VCN,
2038 		.major = 2,
2039 		.minor = 6,
2040 		.rev = 0,
2041 		.funcs = &vcn_v2_6_ip_funcs,
2042 };
2043 
vcn_v2_6_query_poison_by_instance(struct amdgpu_device * adev,uint32_t instance,uint32_t sub_block)2044 static uint32_t vcn_v2_6_query_poison_by_instance(struct amdgpu_device *adev,
2045 			uint32_t instance, uint32_t sub_block)
2046 {
2047 	uint32_t poison_stat = 0, reg_value = 0;
2048 
2049 	switch (sub_block) {
2050 	case AMDGPU_VCN_V2_6_VCPU_VCODEC:
2051 		reg_value = RREG32_SOC15(VCN, instance, mmUVD_RAS_VCPU_VCODEC_STATUS);
2052 		poison_stat = REG_GET_FIELD(reg_value, UVD_RAS_VCPU_VCODEC_STATUS, POISONED_PF);
2053 		break;
2054 	default:
2055 		break;
2056 	}
2057 
2058 	if (poison_stat)
2059 		dev_info(adev->dev, "Poison detected in VCN%d, sub_block%d\n",
2060 			instance, sub_block);
2061 
2062 	return poison_stat;
2063 }
2064 
vcn_v2_6_query_poison_status(struct amdgpu_device * adev)2065 static bool vcn_v2_6_query_poison_status(struct amdgpu_device *adev)
2066 {
2067 	uint32_t inst, sub;
2068 	uint32_t poison_stat = 0;
2069 
2070 	for (inst = 0; inst < adev->vcn.num_vcn_inst; inst++)
2071 		for (sub = 0; sub < AMDGPU_VCN_V2_6_MAX_SUB_BLOCK; sub++)
2072 			poison_stat +=
2073 			vcn_v2_6_query_poison_by_instance(adev, inst, sub);
2074 
2075 	return !!poison_stat;
2076 }
2077 
2078 const struct amdgpu_ras_block_hw_ops vcn_v2_6_ras_hw_ops = {
2079 	.query_poison_status = vcn_v2_6_query_poison_status,
2080 };
2081 
2082 static struct amdgpu_vcn_ras vcn_v2_6_ras = {
2083 	.ras_block = {
2084 		.hw_ops = &vcn_v2_6_ras_hw_ops,
2085 		.ras_late_init = amdgpu_vcn_ras_late_init,
2086 	},
2087 };
2088 
vcn_v2_5_set_ras_funcs(struct amdgpu_device * adev)2089 static void vcn_v2_5_set_ras_funcs(struct amdgpu_device *adev)
2090 {
2091 	switch (amdgpu_ip_version(adev, VCN_HWIP, 0)) {
2092 	case IP_VERSION(2, 6, 0):
2093 		adev->vcn.ras = &vcn_v2_6_ras;
2094 		break;
2095 	default:
2096 		break;
2097 	}
2098 }
2099