xref: /linux/drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c (revision bc50cf64e9c7cd048a4b14d111b6a7f94783d6f8)
1 /*
2  * Copyright 2021 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  */
23 
24 #include <linux/firmware.h>
25 #include "amdgpu.h"
26 #include "amdgpu_vcn.h"
27 #include "amdgpu_pm.h"
28 #include "amdgpu_cs.h"
29 #include "soc15.h"
30 #include "soc15d.h"
31 #include "soc15_hw_ip.h"
32 #include "vcn_v2_0.h"
33 #include "mmsch_v4_0.h"
34 #include "vcn_v4_0.h"
35 
36 #include "vcn/vcn_4_0_0_offset.h"
37 #include "vcn/vcn_4_0_0_sh_mask.h"
38 #include "ivsrcid/vcn/irqsrcs_vcn_4_0.h"
39 
40 #include <drm/drm_drv.h>
41 
42 #define mmUVD_DPG_LMA_CTL							regUVD_DPG_LMA_CTL
43 #define mmUVD_DPG_LMA_CTL_BASE_IDX						regUVD_DPG_LMA_CTL_BASE_IDX
44 #define mmUVD_DPG_LMA_DATA							regUVD_DPG_LMA_DATA
45 #define mmUVD_DPG_LMA_DATA_BASE_IDX						regUVD_DPG_LMA_DATA_BASE_IDX
46 
47 #define VCN_VID_SOC_ADDRESS_2_0							0x1fb00
48 #define VCN1_VID_SOC_ADDRESS_3_0						0x48300
49 
50 #define VCN_HARVEST_MMSCH								0
51 
52 #define RDECODE_MSG_CREATE							0x00000000
53 #define RDECODE_MESSAGE_CREATE							0x00000001
54 
55 static int amdgpu_ih_clientid_vcns[] = {
56 	SOC15_IH_CLIENTID_VCN,
57 	SOC15_IH_CLIENTID_VCN1
58 };
59 
60 static int vcn_v4_0_start_sriov(struct amdgpu_device *adev);
61 static void vcn_v4_0_set_unified_ring_funcs(struct amdgpu_device *adev);
62 static void vcn_v4_0_set_irq_funcs(struct amdgpu_device *adev);
63 static int vcn_v4_0_set_powergating_state(void *handle,
64         enum amd_powergating_state state);
65 static int vcn_v4_0_pause_dpg_mode(struct amdgpu_device *adev,
66         int inst_idx, struct dpg_pause_state *new_state);
67 static void vcn_v4_0_unified_ring_set_wptr(struct amdgpu_ring *ring);
68 static void vcn_v4_0_set_ras_funcs(struct amdgpu_device *adev);
69 
70 /**
71  * vcn_v4_0_early_init - set function pointers
72  *
73  * @handle: amdgpu_device pointer
74  *
75  * Set ring and irq function pointers
76  */
77 static int vcn_v4_0_early_init(void *handle)
78 {
79 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
80 
81 	if (amdgpu_sriov_vf(adev))
82 		adev->vcn.harvest_config = VCN_HARVEST_MMSCH;
83 
84 	/* re-use enc ring as unified ring */
85 	adev->vcn.num_enc_rings = 1;
86 
87 	vcn_v4_0_set_unified_ring_funcs(adev);
88 	vcn_v4_0_set_irq_funcs(adev);
89 	vcn_v4_0_set_ras_funcs(adev);
90 
91 	return 0;
92 }
93 
94 /**
95  * vcn_v4_0_sw_init - sw init for VCN block
96  *
97  * @handle: amdgpu_device pointer
98  *
99  * Load firmware and sw initialization
100  */
101 static int vcn_v4_0_sw_init(void *handle)
102 {
103 	struct amdgpu_ring *ring;
104 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
105 	int i, r;
106 	int vcn_doorbell_index = 0;
107 
108 	r = amdgpu_vcn_sw_init(adev);
109 	if (r)
110 		return r;
111 
112 	amdgpu_vcn_setup_ucode(adev);
113 
114 	r = amdgpu_vcn_resume(adev);
115 	if (r)
116 		return r;
117 
118 	if (amdgpu_sriov_vf(adev)) {
119 		vcn_doorbell_index = adev->doorbell_index.vcn.vcn_ring0_1 - MMSCH_DOORBELL_OFFSET;
120 		/* get DWORD offset */
121 		vcn_doorbell_index = vcn_doorbell_index << 1;
122 	}
123 
124 	for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
125 		volatile struct amdgpu_vcn4_fw_shared *fw_shared;
126 
127 		if (adev->vcn.harvest_config & (1 << i))
128 			continue;
129 
130 		atomic_set(&adev->vcn.inst[i].sched_score, 0);
131 
132 		/* VCN UNIFIED TRAP */
133 		r = amdgpu_irq_add_id(adev, amdgpu_ih_clientid_vcns[i],
134 				VCN_4_0__SRCID__UVD_ENC_GENERAL_PURPOSE, &adev->vcn.inst[i].irq);
135 		if (r)
136 			return r;
137 
138 		/* VCN POISON TRAP */
139 		r = amdgpu_irq_add_id(adev, amdgpu_ih_clientid_vcns[i],
140 				VCN_4_0__SRCID_UVD_POISON, &adev->vcn.inst[i].irq);
141 		if (r)
142 			return r;
143 
144 		ring = &adev->vcn.inst[i].ring_enc[0];
145 		ring->use_doorbell = true;
146 		if (amdgpu_sriov_vf(adev))
147 			ring->doorbell_index = vcn_doorbell_index + i * (adev->vcn.num_enc_rings + 1) + 1;
148 		else
149 			ring->doorbell_index = (adev->doorbell_index.vcn.vcn_ring0_1 << 1) + 2 + 8 * i;
150 
151 		sprintf(ring->name, "vcn_unified_%d", i);
152 
153 		r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.inst[i].irq, 0,
154 						AMDGPU_RING_PRIO_0, &adev->vcn.inst[i].sched_score);
155 		if (r)
156 			return r;
157 
158 		fw_shared = adev->vcn.inst[i].fw_shared.cpu_addr;
159 		fw_shared->present_flag_0 = cpu_to_le32(AMDGPU_FW_SHARED_FLAG_0_UNIFIED_QUEUE);
160 		fw_shared->sq.is_enabled = 1;
161 
162 		fw_shared->present_flag_0 |= cpu_to_le32(AMDGPU_VCN_SMU_DPM_INTERFACE_FLAG);
163 		fw_shared->smu_dpm_interface.smu_interface_type = (adev->flags & AMD_IS_APU) ?
164 			AMDGPU_VCN_SMU_DPM_INTERFACE_APU : AMDGPU_VCN_SMU_DPM_INTERFACE_DGPU;
165 
166 		if (amdgpu_sriov_vf(adev))
167 			fw_shared->present_flag_0 |= cpu_to_le32(AMDGPU_VCN_VF_RB_SETUP_FLAG);
168 
169 		if (amdgpu_vcnfw_log)
170 			amdgpu_vcn_fwlog_init(&adev->vcn.inst[i]);
171 	}
172 
173 	if (amdgpu_sriov_vf(adev)) {
174 		r = amdgpu_virt_alloc_mm_table(adev);
175 		if (r)
176 			return r;
177 	}
178 
179 	if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG)
180 		adev->vcn.pause_dpg_mode = vcn_v4_0_pause_dpg_mode;
181 
182 	return 0;
183 }
184 
185 /**
186  * vcn_v4_0_sw_fini - sw fini for VCN block
187  *
188  * @handle: amdgpu_device pointer
189  *
190  * VCN suspend and free up sw allocation
191  */
192 static int vcn_v4_0_sw_fini(void *handle)
193 {
194 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
195 	int i, r, idx;
196 
197 	if (drm_dev_enter(adev_to_drm(adev), &idx)) {
198 		for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
199 			volatile struct amdgpu_vcn4_fw_shared *fw_shared;
200 
201 			if (adev->vcn.harvest_config & (1 << i))
202 				continue;
203 
204 			fw_shared = adev->vcn.inst[i].fw_shared.cpu_addr;
205 			fw_shared->present_flag_0 = 0;
206 			fw_shared->sq.is_enabled = 0;
207 		}
208 
209 		drm_dev_exit(idx);
210 	}
211 
212 	if (amdgpu_sriov_vf(adev))
213 		amdgpu_virt_free_mm_table(adev);
214 
215 	r = amdgpu_vcn_suspend(adev);
216 	if (r)
217 		return r;
218 
219 	r = amdgpu_vcn_sw_fini(adev);
220 
221 	return r;
222 }
223 
224 /**
225  * vcn_v4_0_hw_init - start and test VCN block
226  *
227  * @handle: amdgpu_device pointer
228  *
229  * Initialize the hardware, boot up the VCPU and do some testing
230  */
231 static int vcn_v4_0_hw_init(void *handle)
232 {
233 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
234 	struct amdgpu_ring *ring;
235 	int i, r;
236 
237 	if (amdgpu_sriov_vf(adev)) {
238 		r = vcn_v4_0_start_sriov(adev);
239 		if (r)
240 			goto done;
241 
242 		for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
243 			if (adev->vcn.harvest_config & (1 << i))
244 				continue;
245 
246 			ring = &adev->vcn.inst[i].ring_enc[0];
247 			if (amdgpu_vcn_is_disabled_vcn(adev, VCN_ENCODE_RING, i)) {
248 				ring->sched.ready = false;
249 				ring->no_scheduler = true;
250 				dev_info(adev->dev, "ring %s is disabled by hypervisor\n", ring->name);
251 			} else {
252 				ring->wptr = 0;
253 				ring->wptr_old = 0;
254 				vcn_v4_0_unified_ring_set_wptr(ring);
255 				ring->sched.ready = true;
256 			}
257 		}
258 	} else {
259 		for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
260 			if (adev->vcn.harvest_config & (1 << i))
261 				continue;
262 
263 			ring = &adev->vcn.inst[i].ring_enc[0];
264 
265 			adev->nbio.funcs->vcn_doorbell_range(adev, ring->use_doorbell,
266 					((adev->doorbell_index.vcn.vcn_ring0_1 << 1) + 8 * i), i);
267 
268 			r = amdgpu_ring_test_helper(ring);
269 			if (r)
270 				goto done;
271 
272 		}
273 	}
274 
275 done:
276 	if (!r)
277 		DRM_INFO("VCN decode and encode initialized successfully(under %s).\n",
278 			(adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG)?"DPG Mode":"SPG Mode");
279 
280 	return r;
281 }
282 
283 /**
284  * vcn_v4_0_hw_fini - stop the hardware block
285  *
286  * @handle: amdgpu_device pointer
287  *
288  * Stop the VCN block, mark ring as not ready any more
289  */
290 static int vcn_v4_0_hw_fini(void *handle)
291 {
292 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
293 	int i;
294 
295 	cancel_delayed_work_sync(&adev->vcn.idle_work);
296 
297 	for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
298 		if (adev->vcn.harvest_config & (1 << i))
299 			continue;
300 		if (!amdgpu_sriov_vf(adev)) {
301 			if ((adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) ||
302                         (adev->vcn.cur_state != AMD_PG_STATE_GATE &&
303                                 RREG32_SOC15(VCN, i, regUVD_STATUS))) {
304                         vcn_v4_0_set_powergating_state(adev, AMD_PG_STATE_GATE);
305 			}
306 		}
307 
308 		amdgpu_irq_put(adev, &adev->vcn.inst[i].irq, 0);
309 	}
310 
311 	return 0;
312 }
313 
314 /**
315  * vcn_v4_0_suspend - suspend VCN block
316  *
317  * @handle: amdgpu_device pointer
318  *
319  * HW fini and suspend VCN block
320  */
321 static int vcn_v4_0_suspend(void *handle)
322 {
323 	int r;
324 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
325 
326 	r = vcn_v4_0_hw_fini(adev);
327 	if (r)
328 		return r;
329 
330 	r = amdgpu_vcn_suspend(adev);
331 
332 	return r;
333 }
334 
335 /**
336  * vcn_v4_0_resume - resume VCN block
337  *
338  * @handle: amdgpu_device pointer
339  *
340  * Resume firmware and hw init VCN block
341  */
342 static int vcn_v4_0_resume(void *handle)
343 {
344 	int r;
345 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
346 
347 	r = amdgpu_vcn_resume(adev);
348 	if (r)
349 		return r;
350 
351 	r = vcn_v4_0_hw_init(adev);
352 
353 	return r;
354 }
355 
356 /**
357  * vcn_v4_0_mc_resume - memory controller programming
358  *
359  * @adev: amdgpu_device pointer
360  * @inst: instance number
361  *
362  * Let the VCN memory controller know it's offsets
363  */
364 static void vcn_v4_0_mc_resume(struct amdgpu_device *adev, int inst)
365 {
366 	uint32_t offset, size;
367 	const struct common_firmware_header *hdr;
368 
369 	hdr = (const struct common_firmware_header *)adev->vcn.fw->data;
370 	size = AMDGPU_GPU_PAGE_ALIGN(le32_to_cpu(hdr->ucode_size_bytes) + 8);
371 
372 	/* cache window 0: fw */
373 	if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
374 		WREG32_SOC15(VCN, inst, regUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW,
375 			(adev->firmware.ucode[AMDGPU_UCODE_ID_VCN + inst].tmr_mc_addr_lo));
376 		WREG32_SOC15(VCN, inst, regUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH,
377 			(adev->firmware.ucode[AMDGPU_UCODE_ID_VCN + inst].tmr_mc_addr_hi));
378 		WREG32_SOC15(VCN, inst, regUVD_VCPU_CACHE_OFFSET0, 0);
379 		offset = 0;
380 	} else {
381 		WREG32_SOC15(VCN, inst, regUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW,
382 			lower_32_bits(adev->vcn.inst[inst].gpu_addr));
383 		WREG32_SOC15(VCN, inst, regUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH,
384 			upper_32_bits(adev->vcn.inst[inst].gpu_addr));
385 		offset = size;
386                 WREG32_SOC15(VCN, inst, regUVD_VCPU_CACHE_OFFSET0, AMDGPU_UVD_FIRMWARE_OFFSET >> 3);
387 	}
388 	WREG32_SOC15(VCN, inst, regUVD_VCPU_CACHE_SIZE0, size);
389 
390 	/* cache window 1: stack */
391 	WREG32_SOC15(VCN, inst, regUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW,
392 		lower_32_bits(adev->vcn.inst[inst].gpu_addr + offset));
393 	WREG32_SOC15(VCN, inst, regUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH,
394 		upper_32_bits(adev->vcn.inst[inst].gpu_addr + offset));
395 	WREG32_SOC15(VCN, inst, regUVD_VCPU_CACHE_OFFSET1, 0);
396 	WREG32_SOC15(VCN, inst, regUVD_VCPU_CACHE_SIZE1, AMDGPU_VCN_STACK_SIZE);
397 
398 	/* cache window 2: context */
399 	WREG32_SOC15(VCN, inst, regUVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW,
400 		lower_32_bits(adev->vcn.inst[inst].gpu_addr + offset + AMDGPU_VCN_STACK_SIZE));
401 	WREG32_SOC15(VCN, inst, regUVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH,
402 		upper_32_bits(adev->vcn.inst[inst].gpu_addr + offset + AMDGPU_VCN_STACK_SIZE));
403 	WREG32_SOC15(VCN, inst, regUVD_VCPU_CACHE_OFFSET2, 0);
404 	WREG32_SOC15(VCN, inst, regUVD_VCPU_CACHE_SIZE2, AMDGPU_VCN_CONTEXT_SIZE);
405 
406 	/* non-cache window */
407 	WREG32_SOC15(VCN, inst, regUVD_LMI_VCPU_NC0_64BIT_BAR_LOW,
408 		lower_32_bits(adev->vcn.inst[inst].fw_shared.gpu_addr));
409 	WREG32_SOC15(VCN, inst, regUVD_LMI_VCPU_NC0_64BIT_BAR_HIGH,
410 		upper_32_bits(adev->vcn.inst[inst].fw_shared.gpu_addr));
411 	WREG32_SOC15(VCN, inst, regUVD_VCPU_NONCACHE_OFFSET0, 0);
412 	WREG32_SOC15(VCN, inst, regUVD_VCPU_NONCACHE_SIZE0,
413 		AMDGPU_GPU_PAGE_ALIGN(sizeof(struct amdgpu_vcn4_fw_shared)));
414 }
415 
416 /**
417  * vcn_v4_0_mc_resume_dpg_mode - memory controller programming for dpg mode
418  *
419  * @adev: amdgpu_device pointer
420  * @inst_idx: instance number index
421  * @indirect: indirectly write sram
422  *
423  * Let the VCN memory controller know it's offsets with dpg mode
424  */
425 static void vcn_v4_0_mc_resume_dpg_mode(struct amdgpu_device *adev, int inst_idx, bool indirect)
426 {
427 	uint32_t offset, size;
428 	const struct common_firmware_header *hdr;
429 	hdr = (const struct common_firmware_header *)adev->vcn.fw->data;
430 	size = AMDGPU_GPU_PAGE_ALIGN(le32_to_cpu(hdr->ucode_size_bytes) + 8);
431 
432 	/* cache window 0: fw */
433 	if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
434 		if (!indirect) {
435 			WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
436 				VCN, inst_idx, regUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW),
437 				(adev->firmware.ucode[AMDGPU_UCODE_ID_VCN + inst_idx].tmr_mc_addr_lo), 0, indirect);
438 			WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
439 				VCN, inst_idx, regUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH),
440 				(adev->firmware.ucode[AMDGPU_UCODE_ID_VCN + inst_idx].tmr_mc_addr_hi), 0, indirect);
441 			WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
442 				VCN, inst_idx, regUVD_VCPU_CACHE_OFFSET0), 0, 0, indirect);
443 		} else {
444 			WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
445 				VCN, inst_idx, regUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW), 0, 0, indirect);
446 			WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
447 				VCN, inst_idx, regUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH), 0, 0, indirect);
448 			WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
449 				VCN, inst_idx, regUVD_VCPU_CACHE_OFFSET0), 0, 0, indirect);
450 		}
451 		offset = 0;
452 	} else {
453 		WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
454 			VCN, inst_idx, regUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW),
455 			lower_32_bits(adev->vcn.inst[inst_idx].gpu_addr), 0, indirect);
456 		WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
457 			VCN, inst_idx, regUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH),
458 			upper_32_bits(adev->vcn.inst[inst_idx].gpu_addr), 0, indirect);
459 		offset = size;
460 		WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
461 			VCN, inst_idx, regUVD_VCPU_CACHE_OFFSET0),
462 			AMDGPU_UVD_FIRMWARE_OFFSET >> 3, 0, indirect);
463 	}
464 
465 	if (!indirect)
466 		WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
467 			VCN, inst_idx, regUVD_VCPU_CACHE_SIZE0), size, 0, indirect);
468 	else
469 		WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
470 			VCN, inst_idx, regUVD_VCPU_CACHE_SIZE0), 0, 0, indirect);
471 
472 	/* cache window 1: stack */
473 	if (!indirect) {
474 		WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
475 			VCN, inst_idx, regUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW),
476 			lower_32_bits(adev->vcn.inst[inst_idx].gpu_addr + offset), 0, indirect);
477 		WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
478 			VCN, inst_idx, regUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH),
479 			upper_32_bits(adev->vcn.inst[inst_idx].gpu_addr + offset), 0, indirect);
480 		WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
481 			VCN, inst_idx, regUVD_VCPU_CACHE_OFFSET1), 0, 0, indirect);
482 	} else {
483 		WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
484 			VCN, inst_idx, regUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW), 0, 0, indirect);
485 		WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
486 			VCN, inst_idx, regUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH), 0, 0, indirect);
487 		WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
488 			VCN, inst_idx, regUVD_VCPU_CACHE_OFFSET1), 0, 0, indirect);
489 	}
490 	WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
491 			VCN, inst_idx, regUVD_VCPU_CACHE_SIZE1), AMDGPU_VCN_STACK_SIZE, 0, indirect);
492 
493 	/* cache window 2: context */
494 	WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
495 			VCN, inst_idx, regUVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW),
496 			lower_32_bits(adev->vcn.inst[inst_idx].gpu_addr + offset + AMDGPU_VCN_STACK_SIZE), 0, indirect);
497 	WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
498 			VCN, inst_idx, regUVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH),
499 			upper_32_bits(adev->vcn.inst[inst_idx].gpu_addr + offset + AMDGPU_VCN_STACK_SIZE), 0, indirect);
500 	WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
501 			VCN, inst_idx, regUVD_VCPU_CACHE_OFFSET2), 0, 0, indirect);
502 	WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
503 			VCN, inst_idx, regUVD_VCPU_CACHE_SIZE2), AMDGPU_VCN_CONTEXT_SIZE, 0, indirect);
504 
505 	/* non-cache window */
506 	WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
507 			VCN, inst_idx, regUVD_LMI_VCPU_NC0_64BIT_BAR_LOW),
508 			lower_32_bits(adev->vcn.inst[inst_idx].fw_shared.gpu_addr), 0, indirect);
509 	WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
510 			VCN, inst_idx, regUVD_LMI_VCPU_NC0_64BIT_BAR_HIGH),
511 			upper_32_bits(adev->vcn.inst[inst_idx].fw_shared.gpu_addr), 0, indirect);
512 	WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
513 			VCN, inst_idx, regUVD_VCPU_NONCACHE_OFFSET0), 0, 0, indirect);
514 	WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
515 			VCN, inst_idx, regUVD_VCPU_NONCACHE_SIZE0),
516 			AMDGPU_GPU_PAGE_ALIGN(sizeof(struct amdgpu_vcn4_fw_shared)), 0, indirect);
517 
518 	/* VCN global tiling registers */
519 	WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
520 		VCN, 0, regUVD_GFX10_ADDR_CONFIG), adev->gfx.config.gb_addr_config, 0, indirect);
521 }
522 
523 /**
524  * vcn_v4_0_disable_static_power_gating - disable VCN static power gating
525  *
526  * @adev: amdgpu_device pointer
527  * @inst: instance number
528  *
529  * Disable static power gating for VCN block
530  */
531 static void vcn_v4_0_disable_static_power_gating(struct amdgpu_device *adev, int inst)
532 {
533 	uint32_t data = 0;
534 
535 	if (adev->pg_flags & AMD_PG_SUPPORT_VCN) {
536 		data = (1 << UVD_PGFSM_CONFIG__UVDM_PWR_CONFIG__SHIFT
537 			| 1 << UVD_PGFSM_CONFIG__UVDS_PWR_CONFIG__SHIFT
538 			| 1 << UVD_PGFSM_CONFIG__UVDLM_PWR_CONFIG__SHIFT
539 			| 2 << UVD_PGFSM_CONFIG__UVDF_PWR_CONFIG__SHIFT
540 			| 2 << UVD_PGFSM_CONFIG__UVDTC_PWR_CONFIG__SHIFT
541 			| 2 << UVD_PGFSM_CONFIG__UVDB_PWR_CONFIG__SHIFT
542 			| 2 << UVD_PGFSM_CONFIG__UVDTA_PWR_CONFIG__SHIFT
543 			| 2 << UVD_PGFSM_CONFIG__UVDTD_PWR_CONFIG__SHIFT
544 			| 2 << UVD_PGFSM_CONFIG__UVDTE_PWR_CONFIG__SHIFT
545 			| 2 << UVD_PGFSM_CONFIG__UVDE_PWR_CONFIG__SHIFT
546 			| 2 << UVD_PGFSM_CONFIG__UVDAB_PWR_CONFIG__SHIFT
547 			| 2 << UVD_PGFSM_CONFIG__UVDTB_PWR_CONFIG__SHIFT
548 			| 2 << UVD_PGFSM_CONFIG__UVDNA_PWR_CONFIG__SHIFT
549 			| 2 << UVD_PGFSM_CONFIG__UVDNB_PWR_CONFIG__SHIFT);
550 
551 		WREG32_SOC15(VCN, inst, regUVD_PGFSM_CONFIG, data);
552 		SOC15_WAIT_ON_RREG(VCN, inst, regUVD_PGFSM_STATUS,
553 			UVD_PGFSM_STATUS__UVDM_UVDU_UVDLM_PWR_ON_3_0, 0x3F3FFFFF);
554 	} else {
555 		uint32_t value;
556 
557 		value = (inst) ? 0x2200800 : 0;
558 		data = (1 << UVD_PGFSM_CONFIG__UVDM_PWR_CONFIG__SHIFT
559 			| 1 << UVD_PGFSM_CONFIG__UVDS_PWR_CONFIG__SHIFT
560 			| 1 << UVD_PGFSM_CONFIG__UVDLM_PWR_CONFIG__SHIFT
561 			| 1 << UVD_PGFSM_CONFIG__UVDF_PWR_CONFIG__SHIFT
562 			| 1 << UVD_PGFSM_CONFIG__UVDTC_PWR_CONFIG__SHIFT
563 			| 1 << UVD_PGFSM_CONFIG__UVDB_PWR_CONFIG__SHIFT
564 			| 1 << UVD_PGFSM_CONFIG__UVDTA_PWR_CONFIG__SHIFT
565 			| 1 << UVD_PGFSM_CONFIG__UVDTD_PWR_CONFIG__SHIFT
566 			| 1 << UVD_PGFSM_CONFIG__UVDTE_PWR_CONFIG__SHIFT
567 			| 1 << UVD_PGFSM_CONFIG__UVDE_PWR_CONFIG__SHIFT
568 			| 1 << UVD_PGFSM_CONFIG__UVDAB_PWR_CONFIG__SHIFT
569 			| 1 << UVD_PGFSM_CONFIG__UVDTB_PWR_CONFIG__SHIFT
570 			| 1 << UVD_PGFSM_CONFIG__UVDNA_PWR_CONFIG__SHIFT
571 			| 1 << UVD_PGFSM_CONFIG__UVDNB_PWR_CONFIG__SHIFT);
572 
573                 WREG32_SOC15(VCN, inst, regUVD_PGFSM_CONFIG, data);
574                 SOC15_WAIT_ON_RREG(VCN, inst, regUVD_PGFSM_STATUS, value,  0x3F3FFFFF);
575         }
576 
577         data = RREG32_SOC15(VCN, inst, regUVD_POWER_STATUS);
578         data &= ~0x103;
579         if (adev->pg_flags & AMD_PG_SUPPORT_VCN)
580                 data |= UVD_PGFSM_CONFIG__UVDM_UVDU_PWR_ON |
581                         UVD_POWER_STATUS__UVD_PG_EN_MASK;
582 
583         WREG32_SOC15(VCN, inst, regUVD_POWER_STATUS, data);
584 
585         return;
586 }
587 
588 /**
589  * vcn_v4_0_enable_static_power_gating - enable VCN static power gating
590  *
591  * @adev: amdgpu_device pointer
592  * @inst: instance number
593  *
594  * Enable static power gating for VCN block
595  */
596 static void vcn_v4_0_enable_static_power_gating(struct amdgpu_device *adev, int inst)
597 {
598 	uint32_t data;
599 
600 	if (adev->pg_flags & AMD_PG_SUPPORT_VCN) {
601 		/* Before power off, this indicator has to be turned on */
602 		data = RREG32_SOC15(VCN, inst, regUVD_POWER_STATUS);
603 		data &= ~UVD_POWER_STATUS__UVD_POWER_STATUS_MASK;
604 		data |= UVD_POWER_STATUS__UVD_POWER_STATUS_TILES_OFF;
605 		WREG32_SOC15(VCN, inst, regUVD_POWER_STATUS, data);
606 
607 		data = (2 << UVD_PGFSM_CONFIG__UVDM_PWR_CONFIG__SHIFT
608 			| 2 << UVD_PGFSM_CONFIG__UVDS_PWR_CONFIG__SHIFT
609 			| 2 << UVD_PGFSM_CONFIG__UVDF_PWR_CONFIG__SHIFT
610 			| 2 << UVD_PGFSM_CONFIG__UVDTC_PWR_CONFIG__SHIFT
611 			| 2 << UVD_PGFSM_CONFIG__UVDB_PWR_CONFIG__SHIFT
612 			| 2 << UVD_PGFSM_CONFIG__UVDTA_PWR_CONFIG__SHIFT
613 			| 2 << UVD_PGFSM_CONFIG__UVDLM_PWR_CONFIG__SHIFT
614 			| 2 << UVD_PGFSM_CONFIG__UVDTD_PWR_CONFIG__SHIFT
615 			| 2 << UVD_PGFSM_CONFIG__UVDTE_PWR_CONFIG__SHIFT
616 			| 2 << UVD_PGFSM_CONFIG__UVDE_PWR_CONFIG__SHIFT
617 			| 2 << UVD_PGFSM_CONFIG__UVDAB_PWR_CONFIG__SHIFT
618 			| 2 << UVD_PGFSM_CONFIG__UVDTB_PWR_CONFIG__SHIFT
619 			| 2 << UVD_PGFSM_CONFIG__UVDNA_PWR_CONFIG__SHIFT
620 			| 2 << UVD_PGFSM_CONFIG__UVDNB_PWR_CONFIG__SHIFT);
621 		WREG32_SOC15(VCN, inst, regUVD_PGFSM_CONFIG, data);
622 
623 		data = (2 << UVD_PGFSM_STATUS__UVDM_PWR_STATUS__SHIFT
624 			| 2 << UVD_PGFSM_STATUS__UVDS_PWR_STATUS__SHIFT
625 			| 2 << UVD_PGFSM_STATUS__UVDF_PWR_STATUS__SHIFT
626 			| 2 << UVD_PGFSM_STATUS__UVDTC_PWR_STATUS__SHIFT
627 			| 2 << UVD_PGFSM_STATUS__UVDB_PWR_STATUS__SHIFT
628 			| 2 << UVD_PGFSM_STATUS__UVDTA_PWR_STATUS__SHIFT
629 			| 2 << UVD_PGFSM_STATUS__UVDLM_PWR_STATUS__SHIFT
630 			| 2 << UVD_PGFSM_STATUS__UVDTD_PWR_STATUS__SHIFT
631 			| 2 << UVD_PGFSM_STATUS__UVDTE_PWR_STATUS__SHIFT
632 			| 2 << UVD_PGFSM_STATUS__UVDE_PWR_STATUS__SHIFT
633 			| 2 << UVD_PGFSM_STATUS__UVDAB_PWR_STATUS__SHIFT
634 			| 2 << UVD_PGFSM_STATUS__UVDTB_PWR_STATUS__SHIFT
635 			| 2 << UVD_PGFSM_STATUS__UVDNA_PWR_STATUS__SHIFT
636 			| 2 << UVD_PGFSM_STATUS__UVDNB_PWR_STATUS__SHIFT);
637 		SOC15_WAIT_ON_RREG(VCN, inst, regUVD_PGFSM_STATUS, data, 0x3F3FFFFF);
638 	}
639 
640         return;
641 }
642 
643 /**
644  * vcn_v4_0_disable_clock_gating - disable VCN clock gating
645  *
646  * @adev: amdgpu_device pointer
647  * @inst: instance number
648  *
649  * Disable clock gating for VCN block
650  */
651 static void vcn_v4_0_disable_clock_gating(struct amdgpu_device *adev, int inst)
652 {
653 	uint32_t data;
654 
655 	if (adev->cg_flags & AMD_CG_SUPPORT_VCN_MGCG)
656 		return;
657 
658 	/* VCN disable CGC */
659 	data = RREG32_SOC15(VCN, inst, regUVD_CGC_CTRL);
660 	data &= ~UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK;
661 	data |= 1 << UVD_CGC_CTRL__CLK_GATE_DLY_TIMER__SHIFT;
662 	data |= 4 << UVD_CGC_CTRL__CLK_OFF_DELAY__SHIFT;
663 	WREG32_SOC15(VCN, inst, regUVD_CGC_CTRL, data);
664 
665 	data = RREG32_SOC15(VCN, inst, regUVD_CGC_GATE);
666 	data &= ~(UVD_CGC_GATE__SYS_MASK
667 		| UVD_CGC_GATE__UDEC_MASK
668 		| UVD_CGC_GATE__MPEG2_MASK
669 		| UVD_CGC_GATE__REGS_MASK
670 		| UVD_CGC_GATE__RBC_MASK
671 		| UVD_CGC_GATE__LMI_MC_MASK
672 		| UVD_CGC_GATE__LMI_UMC_MASK
673 		| UVD_CGC_GATE__IDCT_MASK
674 		| UVD_CGC_GATE__MPRD_MASK
675 		| UVD_CGC_GATE__MPC_MASK
676 		| UVD_CGC_GATE__LBSI_MASK
677 		| UVD_CGC_GATE__LRBBM_MASK
678 		| UVD_CGC_GATE__UDEC_RE_MASK
679 		| UVD_CGC_GATE__UDEC_CM_MASK
680 		| UVD_CGC_GATE__UDEC_IT_MASK
681 		| UVD_CGC_GATE__UDEC_DB_MASK
682 		| UVD_CGC_GATE__UDEC_MP_MASK
683 		| UVD_CGC_GATE__WCB_MASK
684 		| UVD_CGC_GATE__VCPU_MASK
685 		| UVD_CGC_GATE__MMSCH_MASK);
686 
687 	WREG32_SOC15(VCN, inst, regUVD_CGC_GATE, data);
688 	SOC15_WAIT_ON_RREG(VCN, inst, regUVD_CGC_GATE, 0,  0xFFFFFFFF);
689 
690 	data = RREG32_SOC15(VCN, inst, regUVD_CGC_CTRL);
691 	data &= ~(UVD_CGC_CTRL__UDEC_RE_MODE_MASK
692 		| UVD_CGC_CTRL__UDEC_CM_MODE_MASK
693 		| UVD_CGC_CTRL__UDEC_IT_MODE_MASK
694 		| UVD_CGC_CTRL__UDEC_DB_MODE_MASK
695 		| UVD_CGC_CTRL__UDEC_MP_MODE_MASK
696 		| UVD_CGC_CTRL__SYS_MODE_MASK
697 		| UVD_CGC_CTRL__UDEC_MODE_MASK
698 		| UVD_CGC_CTRL__MPEG2_MODE_MASK
699 		| UVD_CGC_CTRL__REGS_MODE_MASK
700 		| UVD_CGC_CTRL__RBC_MODE_MASK
701 		| UVD_CGC_CTRL__LMI_MC_MODE_MASK
702 		| UVD_CGC_CTRL__LMI_UMC_MODE_MASK
703 		| UVD_CGC_CTRL__IDCT_MODE_MASK
704 		| UVD_CGC_CTRL__MPRD_MODE_MASK
705 		| UVD_CGC_CTRL__MPC_MODE_MASK
706 		| UVD_CGC_CTRL__LBSI_MODE_MASK
707 		| UVD_CGC_CTRL__LRBBM_MODE_MASK
708 		| UVD_CGC_CTRL__WCB_MODE_MASK
709 		| UVD_CGC_CTRL__VCPU_MODE_MASK
710 		| UVD_CGC_CTRL__MMSCH_MODE_MASK);
711 	WREG32_SOC15(VCN, inst, regUVD_CGC_CTRL, data);
712 
713 	data = RREG32_SOC15(VCN, inst, regUVD_SUVD_CGC_GATE);
714 	data |= (UVD_SUVD_CGC_GATE__SRE_MASK
715 		| UVD_SUVD_CGC_GATE__SIT_MASK
716 		| UVD_SUVD_CGC_GATE__SMP_MASK
717 		| UVD_SUVD_CGC_GATE__SCM_MASK
718 		| UVD_SUVD_CGC_GATE__SDB_MASK
719 		| UVD_SUVD_CGC_GATE__SRE_H264_MASK
720 		| UVD_SUVD_CGC_GATE__SRE_HEVC_MASK
721 		| UVD_SUVD_CGC_GATE__SIT_H264_MASK
722 		| UVD_SUVD_CGC_GATE__SIT_HEVC_MASK
723 		| UVD_SUVD_CGC_GATE__SCM_H264_MASK
724 		| UVD_SUVD_CGC_GATE__SCM_HEVC_MASK
725 		| UVD_SUVD_CGC_GATE__SDB_H264_MASK
726 		| UVD_SUVD_CGC_GATE__SDB_HEVC_MASK
727 		| UVD_SUVD_CGC_GATE__SCLR_MASK
728 		| UVD_SUVD_CGC_GATE__UVD_SC_MASK
729 		| UVD_SUVD_CGC_GATE__ENT_MASK
730 		| UVD_SUVD_CGC_GATE__SIT_HEVC_DEC_MASK
731 		| UVD_SUVD_CGC_GATE__SIT_HEVC_ENC_MASK
732 		| UVD_SUVD_CGC_GATE__SITE_MASK
733 		| UVD_SUVD_CGC_GATE__SRE_VP9_MASK
734 		| UVD_SUVD_CGC_GATE__SCM_VP9_MASK
735 		| UVD_SUVD_CGC_GATE__SIT_VP9_DEC_MASK
736 		| UVD_SUVD_CGC_GATE__SDB_VP9_MASK
737 		| UVD_SUVD_CGC_GATE__IME_HEVC_MASK);
738 	WREG32_SOC15(VCN, inst, regUVD_SUVD_CGC_GATE, data);
739 
740 	data = RREG32_SOC15(VCN, inst, regUVD_SUVD_CGC_CTRL);
741 	data &= ~(UVD_SUVD_CGC_CTRL__SRE_MODE_MASK
742 		| UVD_SUVD_CGC_CTRL__SIT_MODE_MASK
743 		| UVD_SUVD_CGC_CTRL__SMP_MODE_MASK
744 		| UVD_SUVD_CGC_CTRL__SCM_MODE_MASK
745 		| UVD_SUVD_CGC_CTRL__SDB_MODE_MASK
746 		| UVD_SUVD_CGC_CTRL__SCLR_MODE_MASK
747 		| UVD_SUVD_CGC_CTRL__UVD_SC_MODE_MASK
748 		| UVD_SUVD_CGC_CTRL__ENT_MODE_MASK
749 		| UVD_SUVD_CGC_CTRL__IME_MODE_MASK
750 		| UVD_SUVD_CGC_CTRL__SITE_MODE_MASK);
751 	WREG32_SOC15(VCN, inst, regUVD_SUVD_CGC_CTRL, data);
752 }
753 
754 /**
755  * vcn_v4_0_disable_clock_gating_dpg_mode - disable VCN clock gating dpg mode
756  *
757  * @adev: amdgpu_device pointer
758  * @sram_sel: sram select
759  * @inst_idx: instance number index
760  * @indirect: indirectly write sram
761  *
762  * Disable clock gating for VCN block with dpg mode
763  */
764 static void vcn_v4_0_disable_clock_gating_dpg_mode(struct amdgpu_device *adev, uint8_t sram_sel,
765       int inst_idx, uint8_t indirect)
766 {
767 	uint32_t reg_data = 0;
768 
769 	if (adev->cg_flags & AMD_CG_SUPPORT_VCN_MGCG)
770 		return;
771 
772 	/* enable sw clock gating control */
773 	reg_data = 0 << UVD_CGC_CTRL__DYN_CLOCK_MODE__SHIFT;
774 	reg_data |= 1 << UVD_CGC_CTRL__CLK_GATE_DLY_TIMER__SHIFT;
775 	reg_data |= 4 << UVD_CGC_CTRL__CLK_OFF_DELAY__SHIFT;
776 	reg_data &= ~(UVD_CGC_CTRL__UDEC_RE_MODE_MASK |
777 		 UVD_CGC_CTRL__UDEC_CM_MODE_MASK |
778 		 UVD_CGC_CTRL__UDEC_IT_MODE_MASK |
779 		 UVD_CGC_CTRL__UDEC_DB_MODE_MASK |
780 		 UVD_CGC_CTRL__UDEC_MP_MODE_MASK |
781 		 UVD_CGC_CTRL__SYS_MODE_MASK |
782 		 UVD_CGC_CTRL__UDEC_MODE_MASK |
783 		 UVD_CGC_CTRL__MPEG2_MODE_MASK |
784 		 UVD_CGC_CTRL__REGS_MODE_MASK |
785 		 UVD_CGC_CTRL__RBC_MODE_MASK |
786 		 UVD_CGC_CTRL__LMI_MC_MODE_MASK |
787 		 UVD_CGC_CTRL__LMI_UMC_MODE_MASK |
788 		 UVD_CGC_CTRL__IDCT_MODE_MASK |
789 		 UVD_CGC_CTRL__MPRD_MODE_MASK |
790 		 UVD_CGC_CTRL__MPC_MODE_MASK |
791 		 UVD_CGC_CTRL__LBSI_MODE_MASK |
792 		 UVD_CGC_CTRL__LRBBM_MODE_MASK |
793 		 UVD_CGC_CTRL__WCB_MODE_MASK |
794 		 UVD_CGC_CTRL__VCPU_MODE_MASK);
795 	WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
796 		VCN, inst_idx, regUVD_CGC_CTRL), reg_data, sram_sel, indirect);
797 
798 	/* turn off clock gating */
799 	WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
800 		VCN, inst_idx, regUVD_CGC_GATE), 0, sram_sel, indirect);
801 
802 	/* turn on SUVD clock gating */
803 	WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
804 		VCN, inst_idx, regUVD_SUVD_CGC_GATE), 1, sram_sel, indirect);
805 
806 	/* turn on sw mode in UVD_SUVD_CGC_CTRL */
807 	WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
808 		VCN, inst_idx, regUVD_SUVD_CGC_CTRL), 0, sram_sel, indirect);
809 }
810 
811 /**
812  * vcn_v4_0_enable_clock_gating - enable VCN clock gating
813  *
814  * @adev: amdgpu_device pointer
815  * @inst: instance number
816  *
817  * Enable clock gating for VCN block
818  */
819 static void vcn_v4_0_enable_clock_gating(struct amdgpu_device *adev, int inst)
820 {
821 	uint32_t data;
822 
823 	if (adev->cg_flags & AMD_CG_SUPPORT_VCN_MGCG)
824 		return;
825 
826 	/* enable VCN CGC */
827 	data = RREG32_SOC15(VCN, inst, regUVD_CGC_CTRL);
828 	data |= 0 << UVD_CGC_CTRL__DYN_CLOCK_MODE__SHIFT;
829 	data |= 1 << UVD_CGC_CTRL__CLK_GATE_DLY_TIMER__SHIFT;
830 	data |= 4 << UVD_CGC_CTRL__CLK_OFF_DELAY__SHIFT;
831 	WREG32_SOC15(VCN, inst, regUVD_CGC_CTRL, data);
832 
833 	data = RREG32_SOC15(VCN, inst, regUVD_CGC_CTRL);
834 	data |= (UVD_CGC_CTRL__UDEC_RE_MODE_MASK
835 		| UVD_CGC_CTRL__UDEC_CM_MODE_MASK
836 		| UVD_CGC_CTRL__UDEC_IT_MODE_MASK
837 		| UVD_CGC_CTRL__UDEC_DB_MODE_MASK
838 		| UVD_CGC_CTRL__UDEC_MP_MODE_MASK
839 		| UVD_CGC_CTRL__SYS_MODE_MASK
840 		| UVD_CGC_CTRL__UDEC_MODE_MASK
841 		| UVD_CGC_CTRL__MPEG2_MODE_MASK
842 		| UVD_CGC_CTRL__REGS_MODE_MASK
843 		| UVD_CGC_CTRL__RBC_MODE_MASK
844 		| UVD_CGC_CTRL__LMI_MC_MODE_MASK
845 		| UVD_CGC_CTRL__LMI_UMC_MODE_MASK
846 		| UVD_CGC_CTRL__IDCT_MODE_MASK
847 		| UVD_CGC_CTRL__MPRD_MODE_MASK
848 		| UVD_CGC_CTRL__MPC_MODE_MASK
849 		| UVD_CGC_CTRL__LBSI_MODE_MASK
850 		| UVD_CGC_CTRL__LRBBM_MODE_MASK
851 		| UVD_CGC_CTRL__WCB_MODE_MASK
852 		| UVD_CGC_CTRL__VCPU_MODE_MASK
853 		| UVD_CGC_CTRL__MMSCH_MODE_MASK);
854 	WREG32_SOC15(VCN, inst, regUVD_CGC_CTRL, data);
855 
856 	data = RREG32_SOC15(VCN, inst, regUVD_SUVD_CGC_CTRL);
857 	data |= (UVD_SUVD_CGC_CTRL__SRE_MODE_MASK
858 		| UVD_SUVD_CGC_CTRL__SIT_MODE_MASK
859 		| UVD_SUVD_CGC_CTRL__SMP_MODE_MASK
860 		| UVD_SUVD_CGC_CTRL__SCM_MODE_MASK
861 		| UVD_SUVD_CGC_CTRL__SDB_MODE_MASK
862 		| UVD_SUVD_CGC_CTRL__SCLR_MODE_MASK
863 		| UVD_SUVD_CGC_CTRL__UVD_SC_MODE_MASK
864 		| UVD_SUVD_CGC_CTRL__ENT_MODE_MASK
865 		| UVD_SUVD_CGC_CTRL__IME_MODE_MASK
866 		| UVD_SUVD_CGC_CTRL__SITE_MODE_MASK);
867 	WREG32_SOC15(VCN, inst, regUVD_SUVD_CGC_CTRL, data);
868 
869 	return;
870 }
871 
872 /**
873  * vcn_v4_0_start_dpg_mode - VCN start with dpg mode
874  *
875  * @adev: amdgpu_device pointer
876  * @inst_idx: instance number index
877  * @indirect: indirectly write sram
878  *
879  * Start VCN block with dpg mode
880  */
881 static int vcn_v4_0_start_dpg_mode(struct amdgpu_device *adev, int inst_idx, bool indirect)
882 {
883 	volatile struct amdgpu_vcn4_fw_shared *fw_shared = adev->vcn.inst[inst_idx].fw_shared.cpu_addr;
884 	struct amdgpu_ring *ring;
885 	uint32_t tmp;
886 
887 	/* disable register anti-hang mechanism */
888 	WREG32_P(SOC15_REG_OFFSET(VCN, inst_idx, regUVD_POWER_STATUS), 1,
889 		~UVD_POWER_STATUS__UVD_POWER_STATUS_MASK);
890 	/* enable dynamic power gating mode */
891 	tmp = RREG32_SOC15(VCN, inst_idx, regUVD_POWER_STATUS);
892 	tmp |= UVD_POWER_STATUS__UVD_PG_MODE_MASK;
893 	tmp |= UVD_POWER_STATUS__UVD_PG_EN_MASK;
894 	WREG32_SOC15(VCN, inst_idx, regUVD_POWER_STATUS, tmp);
895 
896 	if (indirect)
897 		adev->vcn.inst[inst_idx].dpg_sram_curr_addr = (uint32_t *)adev->vcn.inst[inst_idx].dpg_sram_cpu_addr;
898 
899 	/* enable clock gating */
900 	vcn_v4_0_disable_clock_gating_dpg_mode(adev, 0, inst_idx, indirect);
901 
902 	/* enable VCPU clock */
903 	tmp = (0xFF << UVD_VCPU_CNTL__PRB_TIMEOUT_VAL__SHIFT);
904 	tmp |= UVD_VCPU_CNTL__CLK_EN_MASK | UVD_VCPU_CNTL__BLK_RST_MASK;
905 	WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
906 		VCN, inst_idx, regUVD_VCPU_CNTL), tmp, 0, indirect);
907 
908 	/* disable master interupt */
909 	WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
910 		VCN, inst_idx, regUVD_MASTINT_EN), 0, 0, indirect);
911 
912 	/* setup regUVD_LMI_CTRL */
913 	tmp = (UVD_LMI_CTRL__WRITE_CLEAN_TIMER_EN_MASK |
914 		UVD_LMI_CTRL__REQ_MODE_MASK |
915 		UVD_LMI_CTRL__CRC_RESET_MASK |
916 		UVD_LMI_CTRL__MASK_MC_URGENT_MASK |
917 		UVD_LMI_CTRL__DATA_COHERENCY_EN_MASK |
918 		UVD_LMI_CTRL__VCPU_DATA_COHERENCY_EN_MASK |
919 		(8 << UVD_LMI_CTRL__WRITE_CLEAN_TIMER__SHIFT) |
920 		0x00100000L);
921 	WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
922 		VCN, inst_idx, regUVD_LMI_CTRL), tmp, 0, indirect);
923 
924 	WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
925 		VCN, inst_idx, regUVD_MPC_CNTL),
926 		0x2 << UVD_MPC_CNTL__REPLACEMENT_MODE__SHIFT, 0, indirect);
927 
928 	WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
929 		VCN, inst_idx, regUVD_MPC_SET_MUXA0),
930 		((0x1 << UVD_MPC_SET_MUXA0__VARA_1__SHIFT) |
931 		 (0x2 << UVD_MPC_SET_MUXA0__VARA_2__SHIFT) |
932 		 (0x3 << UVD_MPC_SET_MUXA0__VARA_3__SHIFT) |
933 		 (0x4 << UVD_MPC_SET_MUXA0__VARA_4__SHIFT)), 0, indirect);
934 
935 	WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
936 		VCN, inst_idx, regUVD_MPC_SET_MUXB0),
937 		 ((0x1 << UVD_MPC_SET_MUXB0__VARB_1__SHIFT) |
938 		 (0x2 << UVD_MPC_SET_MUXB0__VARB_2__SHIFT) |
939 		 (0x3 << UVD_MPC_SET_MUXB0__VARB_3__SHIFT) |
940 		 (0x4 << UVD_MPC_SET_MUXB0__VARB_4__SHIFT)), 0, indirect);
941 
942 	WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
943 		VCN, inst_idx, regUVD_MPC_SET_MUX),
944 		((0x0 << UVD_MPC_SET_MUX__SET_0__SHIFT) |
945 		 (0x1 << UVD_MPC_SET_MUX__SET_1__SHIFT) |
946 		 (0x2 << UVD_MPC_SET_MUX__SET_2__SHIFT)), 0, indirect);
947 
948 	vcn_v4_0_mc_resume_dpg_mode(adev, inst_idx, indirect);
949 
950 	tmp = (0xFF << UVD_VCPU_CNTL__PRB_TIMEOUT_VAL__SHIFT);
951 	tmp |= UVD_VCPU_CNTL__CLK_EN_MASK;
952 	WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
953 		VCN, inst_idx, regUVD_VCPU_CNTL), tmp, 0, indirect);
954 
955 	/* enable LMI MC and UMC channels */
956 	tmp = 0x1f << UVD_LMI_CTRL2__RE_OFLD_MIF_WR_REQ_NUM__SHIFT;
957 	WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
958 		VCN, inst_idx, regUVD_LMI_CTRL2), tmp, 0, indirect);
959 
960 	/* enable master interrupt */
961 	WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
962 		VCN, inst_idx, regUVD_MASTINT_EN),
963 		UVD_MASTINT_EN__VCPU_EN_MASK, 0, indirect);
964 
965 
966 	if (indirect)
967 		psp_update_vcn_sram(adev, inst_idx, adev->vcn.inst[inst_idx].dpg_sram_gpu_addr,
968 			(uint32_t)((uintptr_t)adev->vcn.inst[inst_idx].dpg_sram_curr_addr -
969 				(uintptr_t)adev->vcn.inst[inst_idx].dpg_sram_cpu_addr));
970 
971 	ring = &adev->vcn.inst[inst_idx].ring_enc[0];
972 
973 	WREG32_SOC15(VCN, inst_idx, regUVD_RB_BASE_LO, ring->gpu_addr);
974 	WREG32_SOC15(VCN, inst_idx, regUVD_RB_BASE_HI, upper_32_bits(ring->gpu_addr));
975 	WREG32_SOC15(VCN, inst_idx, regUVD_RB_SIZE, ring->ring_size / 4);
976 
977 	tmp = RREG32_SOC15(VCN, inst_idx, regVCN_RB_ENABLE);
978 	tmp &= ~(VCN_RB_ENABLE__RB1_EN_MASK);
979 	WREG32_SOC15(VCN, inst_idx, regVCN_RB_ENABLE, tmp);
980 	fw_shared->sq.queue_mode |= FW_QUEUE_RING_RESET;
981 	WREG32_SOC15(VCN, inst_idx, regUVD_RB_RPTR, 0);
982 	WREG32_SOC15(VCN, inst_idx, regUVD_RB_WPTR, 0);
983 
984 	tmp = RREG32_SOC15(VCN, inst_idx, regUVD_RB_RPTR);
985 	WREG32_SOC15(VCN, inst_idx, regUVD_RB_WPTR, tmp);
986 	ring->wptr = RREG32_SOC15(VCN, inst_idx, regUVD_RB_WPTR);
987 
988 	tmp = RREG32_SOC15(VCN, inst_idx, regVCN_RB_ENABLE);
989 	tmp |= VCN_RB_ENABLE__RB1_EN_MASK;
990 	WREG32_SOC15(VCN, inst_idx, regVCN_RB_ENABLE, tmp);
991 	fw_shared->sq.queue_mode &= ~(FW_QUEUE_RING_RESET | FW_QUEUE_DPG_HOLD_OFF);
992 
993 	WREG32_SOC15(VCN, inst_idx, regVCN_RB1_DB_CTRL,
994 			ring->doorbell_index << VCN_RB1_DB_CTRL__OFFSET__SHIFT |
995 			VCN_RB1_DB_CTRL__EN_MASK);
996 
997 	return 0;
998 }
999 
1000 
1001 /**
1002  * vcn_v4_0_start - VCN start
1003  *
1004  * @adev: amdgpu_device pointer
1005  *
1006  * Start VCN block
1007  */
1008 static int vcn_v4_0_start(struct amdgpu_device *adev)
1009 {
1010 	volatile struct amdgpu_vcn4_fw_shared *fw_shared;
1011 	struct amdgpu_ring *ring;
1012 	uint32_t tmp;
1013 	int i, j, k, r;
1014 
1015 	if (adev->pm.dpm_enabled)
1016 		amdgpu_dpm_enable_uvd(adev, true);
1017 
1018 	for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
1019 		fw_shared = adev->vcn.inst[i].fw_shared.cpu_addr;
1020 
1021 		if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) {
1022 			r = vcn_v4_0_start_dpg_mode(adev, i, adev->vcn.indirect_sram);
1023 			continue;
1024 		}
1025 
1026 		/* disable VCN power gating */
1027 		vcn_v4_0_disable_static_power_gating(adev, i);
1028 
1029 		/* set VCN status busy */
1030 		tmp = RREG32_SOC15(VCN, i, regUVD_STATUS) | UVD_STATUS__UVD_BUSY;
1031 		WREG32_SOC15(VCN, i, regUVD_STATUS, tmp);
1032 
1033 		/*SW clock gating */
1034 		vcn_v4_0_disable_clock_gating(adev, i);
1035 
1036 		/* enable VCPU clock */
1037 		WREG32_P(SOC15_REG_OFFSET(VCN, i, regUVD_VCPU_CNTL),
1038 				UVD_VCPU_CNTL__CLK_EN_MASK, ~UVD_VCPU_CNTL__CLK_EN_MASK);
1039 
1040 		/* disable master interrupt */
1041 		WREG32_P(SOC15_REG_OFFSET(VCN, i, regUVD_MASTINT_EN), 0,
1042 				~UVD_MASTINT_EN__VCPU_EN_MASK);
1043 
1044 		/* enable LMI MC and UMC channels */
1045 		WREG32_P(SOC15_REG_OFFSET(VCN, i, regUVD_LMI_CTRL2), 0,
1046 				~UVD_LMI_CTRL2__STALL_ARB_UMC_MASK);
1047 
1048 		tmp = RREG32_SOC15(VCN, i, regUVD_SOFT_RESET);
1049 		tmp &= ~UVD_SOFT_RESET__LMI_SOFT_RESET_MASK;
1050 		tmp &= ~UVD_SOFT_RESET__LMI_UMC_SOFT_RESET_MASK;
1051 		WREG32_SOC15(VCN, i, regUVD_SOFT_RESET, tmp);
1052 
1053 		/* setup regUVD_LMI_CTRL */
1054 		tmp = RREG32_SOC15(VCN, i, regUVD_LMI_CTRL);
1055 		WREG32_SOC15(VCN, i, regUVD_LMI_CTRL, tmp |
1056 				UVD_LMI_CTRL__WRITE_CLEAN_TIMER_EN_MASK |
1057 				UVD_LMI_CTRL__MASK_MC_URGENT_MASK |
1058 				UVD_LMI_CTRL__DATA_COHERENCY_EN_MASK |
1059 				UVD_LMI_CTRL__VCPU_DATA_COHERENCY_EN_MASK);
1060 
1061 		/* setup regUVD_MPC_CNTL */
1062 		tmp = RREG32_SOC15(VCN, i, regUVD_MPC_CNTL);
1063 		tmp &= ~UVD_MPC_CNTL__REPLACEMENT_MODE_MASK;
1064 		tmp |= 0x2 << UVD_MPC_CNTL__REPLACEMENT_MODE__SHIFT;
1065 		WREG32_SOC15(VCN, i, regUVD_MPC_CNTL, tmp);
1066 
1067 		/* setup UVD_MPC_SET_MUXA0 */
1068 		WREG32_SOC15(VCN, i, regUVD_MPC_SET_MUXA0,
1069 				((0x1 << UVD_MPC_SET_MUXA0__VARA_1__SHIFT) |
1070 				 (0x2 << UVD_MPC_SET_MUXA0__VARA_2__SHIFT) |
1071 				 (0x3 << UVD_MPC_SET_MUXA0__VARA_3__SHIFT) |
1072 				 (0x4 << UVD_MPC_SET_MUXA0__VARA_4__SHIFT)));
1073 
1074 		/* setup UVD_MPC_SET_MUXB0 */
1075 		WREG32_SOC15(VCN, i, regUVD_MPC_SET_MUXB0,
1076 				((0x1 << UVD_MPC_SET_MUXB0__VARB_1__SHIFT) |
1077 				 (0x2 << UVD_MPC_SET_MUXB0__VARB_2__SHIFT) |
1078 				 (0x3 << UVD_MPC_SET_MUXB0__VARB_3__SHIFT) |
1079 				 (0x4 << UVD_MPC_SET_MUXB0__VARB_4__SHIFT)));
1080 
1081 		/* setup UVD_MPC_SET_MUX */
1082 		WREG32_SOC15(VCN, i, regUVD_MPC_SET_MUX,
1083 				((0x0 << UVD_MPC_SET_MUX__SET_0__SHIFT) |
1084 				 (0x1 << UVD_MPC_SET_MUX__SET_1__SHIFT) |
1085 				 (0x2 << UVD_MPC_SET_MUX__SET_2__SHIFT)));
1086 
1087 		vcn_v4_0_mc_resume(adev, i);
1088 
1089 		/* VCN global tiling registers */
1090 		WREG32_SOC15(VCN, i, regUVD_GFX10_ADDR_CONFIG,
1091 				adev->gfx.config.gb_addr_config);
1092 
1093 		/* unblock VCPU register access */
1094 		WREG32_P(SOC15_REG_OFFSET(VCN, i, regUVD_RB_ARB_CTRL), 0,
1095 				~UVD_RB_ARB_CTRL__VCPU_DIS_MASK);
1096 
1097 		/* release VCPU reset to boot */
1098 		WREG32_P(SOC15_REG_OFFSET(VCN, i, regUVD_VCPU_CNTL), 0,
1099 				~UVD_VCPU_CNTL__BLK_RST_MASK);
1100 
1101 		for (j = 0; j < 10; ++j) {
1102 			uint32_t status;
1103 
1104 			for (k = 0; k < 100; ++k) {
1105 				status = RREG32_SOC15(VCN, i, regUVD_STATUS);
1106 				if (status & 2)
1107 					break;
1108 				mdelay(10);
1109 				if (amdgpu_emu_mode==1)
1110 					msleep(1);
1111 			}
1112 
1113 			if (amdgpu_emu_mode==1) {
1114 				r = -1;
1115 				if (status & 2) {
1116 					r = 0;
1117 					break;
1118 				}
1119 			} else {
1120 				r = 0;
1121 				if (status & 2)
1122 					break;
1123 
1124 				dev_err(adev->dev, "VCN[%d] is not responding, trying to reset the VCPU!!!\n", i);
1125 				WREG32_P(SOC15_REG_OFFSET(VCN, i, regUVD_VCPU_CNTL),
1126 							UVD_VCPU_CNTL__BLK_RST_MASK,
1127 							~UVD_VCPU_CNTL__BLK_RST_MASK);
1128 				mdelay(10);
1129 				WREG32_P(SOC15_REG_OFFSET(VCN, i, regUVD_VCPU_CNTL), 0,
1130 						~UVD_VCPU_CNTL__BLK_RST_MASK);
1131 
1132 				mdelay(10);
1133 				r = -1;
1134 			}
1135 		}
1136 
1137 		if (r) {
1138 			dev_err(adev->dev, "VCN[%d] is not responding, giving up!!!\n", i);
1139 			return r;
1140 		}
1141 
1142 		/* enable master interrupt */
1143 		WREG32_P(SOC15_REG_OFFSET(VCN, i, regUVD_MASTINT_EN),
1144 				UVD_MASTINT_EN__VCPU_EN_MASK,
1145 				~UVD_MASTINT_EN__VCPU_EN_MASK);
1146 
1147 		/* clear the busy bit of VCN_STATUS */
1148 		WREG32_P(SOC15_REG_OFFSET(VCN, i, regUVD_STATUS), 0,
1149 				~(2 << UVD_STATUS__VCPU_REPORT__SHIFT));
1150 
1151 		ring = &adev->vcn.inst[i].ring_enc[0];
1152 		WREG32_SOC15(VCN, i, regVCN_RB1_DB_CTRL,
1153 				ring->doorbell_index << VCN_RB1_DB_CTRL__OFFSET__SHIFT |
1154 				VCN_RB1_DB_CTRL__EN_MASK);
1155 
1156 		WREG32_SOC15(VCN, i, regUVD_RB_BASE_LO, ring->gpu_addr);
1157 		WREG32_SOC15(VCN, i, regUVD_RB_BASE_HI, upper_32_bits(ring->gpu_addr));
1158 		WREG32_SOC15(VCN, i, regUVD_RB_SIZE, ring->ring_size / 4);
1159 
1160 		tmp = RREG32_SOC15(VCN, i, regVCN_RB_ENABLE);
1161 		tmp &= ~(VCN_RB_ENABLE__RB1_EN_MASK);
1162 		WREG32_SOC15(VCN, i, regVCN_RB_ENABLE, tmp);
1163 		fw_shared->sq.queue_mode |= FW_QUEUE_RING_RESET;
1164 		WREG32_SOC15(VCN, i, regUVD_RB_RPTR, 0);
1165 		WREG32_SOC15(VCN, i, regUVD_RB_WPTR, 0);
1166 
1167 		tmp = RREG32_SOC15(VCN, i, regUVD_RB_RPTR);
1168 		WREG32_SOC15(VCN, i, regUVD_RB_WPTR, tmp);
1169 		ring->wptr = RREG32_SOC15(VCN, i, regUVD_RB_WPTR);
1170 
1171 		tmp = RREG32_SOC15(VCN, i, regVCN_RB_ENABLE);
1172 		tmp |= VCN_RB_ENABLE__RB1_EN_MASK;
1173 		WREG32_SOC15(VCN, i, regVCN_RB_ENABLE, tmp);
1174 		fw_shared->sq.queue_mode &= ~(FW_QUEUE_RING_RESET | FW_QUEUE_DPG_HOLD_OFF);
1175 	}
1176 
1177 	return 0;
1178 }
1179 
1180 static int vcn_v4_0_start_sriov(struct amdgpu_device *adev)
1181 {
1182 	int i;
1183 	struct amdgpu_ring *ring_enc;
1184 	uint64_t cache_addr;
1185 	uint64_t rb_enc_addr;
1186 	uint64_t ctx_addr;
1187 	uint32_t param, resp, expected;
1188 	uint32_t offset, cache_size;
1189 	uint32_t tmp, timeout;
1190 
1191 	struct amdgpu_mm_table *table = &adev->virt.mm_table;
1192 	uint32_t *table_loc;
1193 	uint32_t table_size;
1194 	uint32_t size, size_dw;
1195 	uint32_t init_status;
1196 	uint32_t enabled_vcn;
1197 
1198 	struct mmsch_v4_0_cmd_direct_write
1199 		direct_wt = { {0} };
1200 	struct mmsch_v4_0_cmd_direct_read_modify_write
1201 		direct_rd_mod_wt = { {0} };
1202 	struct mmsch_v4_0_cmd_end end = { {0} };
1203 	struct mmsch_v4_0_init_header header;
1204 
1205 	volatile struct amdgpu_vcn4_fw_shared *fw_shared;
1206 	volatile struct amdgpu_fw_shared_rb_setup *rb_setup;
1207 
1208 	direct_wt.cmd_header.command_type =
1209 		MMSCH_COMMAND__DIRECT_REG_WRITE;
1210 	direct_rd_mod_wt.cmd_header.command_type =
1211 		MMSCH_COMMAND__DIRECT_REG_READ_MODIFY_WRITE;
1212 	end.cmd_header.command_type =
1213 		MMSCH_COMMAND__END;
1214 
1215 	header.version = MMSCH_VERSION;
1216 	header.total_size = sizeof(struct mmsch_v4_0_init_header) >> 2;
1217 	for (i = 0; i < AMDGPU_MAX_VCN_INSTANCES; i++) {
1218 		header.inst[i].init_status = 0;
1219 		header.inst[i].table_offset = 0;
1220 		header.inst[i].table_size = 0;
1221 	}
1222 
1223 	table_loc = (uint32_t *)table->cpu_addr;
1224 	table_loc += header.total_size;
1225 	for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
1226 		if (adev->vcn.harvest_config & (1 << i))
1227 			continue;
1228 
1229 		table_size = 0;
1230 
1231 		MMSCH_V4_0_INSERT_DIRECT_RD_MOD_WT(SOC15_REG_OFFSET(VCN, i,
1232 			regUVD_STATUS),
1233 			~UVD_STATUS__UVD_BUSY, UVD_STATUS__UVD_BUSY);
1234 
1235 		cache_size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.fw->size + 4);
1236 
1237 		if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
1238 			MMSCH_V4_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, i,
1239 				regUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW),
1240 				adev->firmware.ucode[AMDGPU_UCODE_ID_VCN + i].tmr_mc_addr_lo);
1241 			MMSCH_V4_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, i,
1242 				regUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH),
1243 				adev->firmware.ucode[AMDGPU_UCODE_ID_VCN + i].tmr_mc_addr_hi);
1244 			offset = 0;
1245 			MMSCH_V4_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, i,
1246 				regUVD_VCPU_CACHE_OFFSET0),
1247 				0);
1248 		} else {
1249 			MMSCH_V4_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, i,
1250 				regUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW),
1251 				lower_32_bits(adev->vcn.inst[i].gpu_addr));
1252 			MMSCH_V4_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, i,
1253 				regUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH),
1254 				upper_32_bits(adev->vcn.inst[i].gpu_addr));
1255 			offset = cache_size;
1256 			MMSCH_V4_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, i,
1257 				regUVD_VCPU_CACHE_OFFSET0),
1258 				AMDGPU_UVD_FIRMWARE_OFFSET >> 3);
1259 		}
1260 
1261 		MMSCH_V4_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, i,
1262 			regUVD_VCPU_CACHE_SIZE0),
1263 			cache_size);
1264 
1265 		cache_addr = adev->vcn.inst[i].gpu_addr + offset;
1266 		MMSCH_V4_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, i,
1267 			regUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW),
1268 			lower_32_bits(cache_addr));
1269 		MMSCH_V4_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, i,
1270 			regUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH),
1271 			upper_32_bits(cache_addr));
1272 		MMSCH_V4_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, i,
1273 			regUVD_VCPU_CACHE_OFFSET1),
1274 			0);
1275 		MMSCH_V4_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, i,
1276 			regUVD_VCPU_CACHE_SIZE1),
1277 			AMDGPU_VCN_STACK_SIZE);
1278 
1279 		cache_addr = adev->vcn.inst[i].gpu_addr + offset +
1280 			AMDGPU_VCN_STACK_SIZE;
1281 		MMSCH_V4_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, i,
1282 			regUVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW),
1283 			lower_32_bits(cache_addr));
1284 		MMSCH_V4_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, i,
1285 			regUVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH),
1286 			upper_32_bits(cache_addr));
1287 		MMSCH_V4_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, i,
1288 			regUVD_VCPU_CACHE_OFFSET2),
1289 			0);
1290 		MMSCH_V4_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, i,
1291 			regUVD_VCPU_CACHE_SIZE2),
1292 			AMDGPU_VCN_CONTEXT_SIZE);
1293 
1294 		fw_shared = adev->vcn.inst[i].fw_shared.cpu_addr;
1295 		rb_setup = &fw_shared->rb_setup;
1296 
1297 		ring_enc = &adev->vcn.inst[i].ring_enc[0];
1298 		ring_enc->wptr = 0;
1299 		rb_enc_addr = ring_enc->gpu_addr;
1300 
1301 		rb_setup->is_rb_enabled_flags |= RB_ENABLED;
1302 		rb_setup->rb_addr_lo = lower_32_bits(rb_enc_addr);
1303 		rb_setup->rb_addr_hi = upper_32_bits(rb_enc_addr);
1304 		rb_setup->rb_size = ring_enc->ring_size / 4;
1305 		fw_shared->present_flag_0 |= cpu_to_le32(AMDGPU_VCN_VF_RB_SETUP_FLAG);
1306 
1307 		MMSCH_V4_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, i,
1308 			regUVD_LMI_VCPU_NC0_64BIT_BAR_LOW),
1309 			lower_32_bits(adev->vcn.inst[i].fw_shared.gpu_addr));
1310 		MMSCH_V4_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, i,
1311 			regUVD_LMI_VCPU_NC0_64BIT_BAR_HIGH),
1312 			upper_32_bits(adev->vcn.inst[i].fw_shared.gpu_addr));
1313 		MMSCH_V4_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, i,
1314 			regUVD_VCPU_NONCACHE_SIZE0),
1315 			AMDGPU_GPU_PAGE_ALIGN(sizeof(struct amdgpu_vcn4_fw_shared)));
1316 
1317 		/* add end packet */
1318 		MMSCH_V4_0_INSERT_END();
1319 
1320 		/* refine header */
1321 		header.inst[i].init_status = 0;
1322 		header.inst[i].table_offset = header.total_size;
1323 		header.inst[i].table_size = table_size;
1324 		header.total_size += table_size;
1325 	}
1326 
1327 	/* Update init table header in memory */
1328 	size = sizeof(struct mmsch_v4_0_init_header);
1329 	table_loc = (uint32_t *)table->cpu_addr;
1330 	memcpy((void *)table_loc, &header, size);
1331 
1332 	/* message MMSCH (in VCN[0]) to initialize this client
1333 	 * 1, write to mmsch_vf_ctx_addr_lo/hi register with GPU mc addr
1334 	 * of memory descriptor location
1335 	 */
1336 	ctx_addr = table->gpu_addr;
1337 	WREG32_SOC15(VCN, 0, regMMSCH_VF_CTX_ADDR_LO, lower_32_bits(ctx_addr));
1338 	WREG32_SOC15(VCN, 0, regMMSCH_VF_CTX_ADDR_HI, upper_32_bits(ctx_addr));
1339 
1340 	/* 2, update vmid of descriptor */
1341 	tmp = RREG32_SOC15(VCN, 0, regMMSCH_VF_VMID);
1342 	tmp &= ~MMSCH_VF_VMID__VF_CTX_VMID_MASK;
1343 	/* use domain0 for MM scheduler */
1344 	tmp |= (0 << MMSCH_VF_VMID__VF_CTX_VMID__SHIFT);
1345 	WREG32_SOC15(VCN, 0, regMMSCH_VF_VMID, tmp);
1346 
1347 	/* 3, notify mmsch about the size of this descriptor */
1348 	size = header.total_size;
1349 	WREG32_SOC15(VCN, 0, regMMSCH_VF_CTX_SIZE, size);
1350 
1351 	/* 4, set resp to zero */
1352 	WREG32_SOC15(VCN, 0, regMMSCH_VF_MAILBOX_RESP, 0);
1353 
1354 	/* 5, kick off the initialization and wait until
1355 	 * MMSCH_VF_MAILBOX_RESP becomes non-zero
1356 	 */
1357 	param = 0x00000001;
1358 	WREG32_SOC15(VCN, 0, regMMSCH_VF_MAILBOX_HOST, param);
1359 	tmp = 0;
1360 	timeout = 1000;
1361 	resp = 0;
1362 	expected = MMSCH_VF_MAILBOX_RESP__OK;
1363 	while (resp != expected) {
1364 		resp = RREG32_SOC15(VCN, 0, regMMSCH_VF_MAILBOX_RESP);
1365 		if (resp != 0)
1366 			break;
1367 
1368 		udelay(10);
1369 		tmp = tmp + 10;
1370 		if (tmp >= timeout) {
1371 			DRM_ERROR("failed to init MMSCH. TIME-OUT after %d usec"\
1372 				" waiting for regMMSCH_VF_MAILBOX_RESP "\
1373 				"(expected=0x%08x, readback=0x%08x)\n",
1374 				tmp, expected, resp);
1375 			return -EBUSY;
1376 		}
1377 	}
1378 	enabled_vcn = amdgpu_vcn_is_disabled_vcn(adev, VCN_DECODE_RING, 0) ? 1 : 0;
1379 	init_status = ((struct mmsch_v4_0_init_header *)(table_loc))->inst[enabled_vcn].init_status;
1380 	if (resp != expected && resp != MMSCH_VF_MAILBOX_RESP__INCOMPLETE
1381 	&& init_status != MMSCH_VF_ENGINE_STATUS__PASS)
1382 		DRM_ERROR("MMSCH init status is incorrect! readback=0x%08x, header init "\
1383 			"status for VCN%x: 0x%x\n", resp, enabled_vcn, init_status);
1384 
1385 	return 0;
1386 }
1387 
1388 /**
1389  * vcn_v4_0_stop_dpg_mode - VCN stop with dpg mode
1390  *
1391  * @adev: amdgpu_device pointer
1392  * @inst_idx: instance number index
1393  *
1394  * Stop VCN block with dpg mode
1395  */
1396 static void vcn_v4_0_stop_dpg_mode(struct amdgpu_device *adev, int inst_idx)
1397 {
1398 	uint32_t tmp;
1399 
1400 	/* Wait for power status to be 1 */
1401 	SOC15_WAIT_ON_RREG(VCN, inst_idx, regUVD_POWER_STATUS, 1,
1402 		UVD_POWER_STATUS__UVD_POWER_STATUS_MASK);
1403 
1404 	/* wait for read ptr to be equal to write ptr */
1405 	tmp = RREG32_SOC15(VCN, inst_idx, regUVD_RB_WPTR);
1406 	SOC15_WAIT_ON_RREG(VCN, inst_idx, regUVD_RB_RPTR, tmp, 0xFFFFFFFF);
1407 
1408 	SOC15_WAIT_ON_RREG(VCN, inst_idx, regUVD_POWER_STATUS, 1,
1409 		UVD_POWER_STATUS__UVD_POWER_STATUS_MASK);
1410 
1411 	/* disable dynamic power gating mode */
1412 	WREG32_P(SOC15_REG_OFFSET(VCN, inst_idx, regUVD_POWER_STATUS), 0,
1413 		~UVD_POWER_STATUS__UVD_PG_MODE_MASK);
1414 }
1415 
1416 /**
1417  * vcn_v4_0_stop - VCN stop
1418  *
1419  * @adev: amdgpu_device pointer
1420  *
1421  * Stop VCN block
1422  */
1423 static int vcn_v4_0_stop(struct amdgpu_device *adev)
1424 {
1425 	volatile struct amdgpu_vcn4_fw_shared *fw_shared;
1426 	uint32_t tmp;
1427 	int i, r = 0;
1428 
1429 	for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
1430 		fw_shared = adev->vcn.inst[i].fw_shared.cpu_addr;
1431 		fw_shared->sq.queue_mode |= FW_QUEUE_DPG_HOLD_OFF;
1432 
1433 		if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) {
1434 			vcn_v4_0_stop_dpg_mode(adev, i);
1435 			continue;
1436 		}
1437 
1438 		/* wait for vcn idle */
1439 		r = SOC15_WAIT_ON_RREG(VCN, i, regUVD_STATUS, UVD_STATUS__IDLE, 0x7);
1440 		if (r)
1441 			return r;
1442 
1443 		tmp = UVD_LMI_STATUS__VCPU_LMI_WRITE_CLEAN_MASK |
1444 			UVD_LMI_STATUS__READ_CLEAN_MASK |
1445 			UVD_LMI_STATUS__WRITE_CLEAN_MASK |
1446 			UVD_LMI_STATUS__WRITE_CLEAN_RAW_MASK;
1447 		r = SOC15_WAIT_ON_RREG(VCN, i, regUVD_LMI_STATUS, tmp, tmp);
1448 		if (r)
1449 			return r;
1450 
1451 		/* disable LMI UMC channel */
1452 		tmp = RREG32_SOC15(VCN, i, regUVD_LMI_CTRL2);
1453 		tmp |= UVD_LMI_CTRL2__STALL_ARB_UMC_MASK;
1454 		WREG32_SOC15(VCN, i, regUVD_LMI_CTRL2, tmp);
1455 		tmp = UVD_LMI_STATUS__UMC_READ_CLEAN_RAW_MASK |
1456 			UVD_LMI_STATUS__UMC_WRITE_CLEAN_RAW_MASK;
1457 		r = SOC15_WAIT_ON_RREG(VCN, i, regUVD_LMI_STATUS, tmp, tmp);
1458 		if (r)
1459 			return r;
1460 
1461 		/* block VCPU register access */
1462 		WREG32_P(SOC15_REG_OFFSET(VCN, i, regUVD_RB_ARB_CTRL),
1463 				UVD_RB_ARB_CTRL__VCPU_DIS_MASK,
1464 				~UVD_RB_ARB_CTRL__VCPU_DIS_MASK);
1465 
1466 		/* reset VCPU */
1467 		WREG32_P(SOC15_REG_OFFSET(VCN, i, regUVD_VCPU_CNTL),
1468 				UVD_VCPU_CNTL__BLK_RST_MASK,
1469 				~UVD_VCPU_CNTL__BLK_RST_MASK);
1470 
1471 		/* disable VCPU clock */
1472 		WREG32_P(SOC15_REG_OFFSET(VCN, i, regUVD_VCPU_CNTL), 0,
1473 				~(UVD_VCPU_CNTL__CLK_EN_MASK));
1474 
1475 		/* apply soft reset */
1476 		tmp = RREG32_SOC15(VCN, i, regUVD_SOFT_RESET);
1477 		tmp |= UVD_SOFT_RESET__LMI_UMC_SOFT_RESET_MASK;
1478 		WREG32_SOC15(VCN, i, regUVD_SOFT_RESET, tmp);
1479 		tmp = RREG32_SOC15(VCN, i, regUVD_SOFT_RESET);
1480 		tmp |= UVD_SOFT_RESET__LMI_SOFT_RESET_MASK;
1481 		WREG32_SOC15(VCN, i, regUVD_SOFT_RESET, tmp);
1482 
1483 		/* clear status */
1484 		WREG32_SOC15(VCN, i, regUVD_STATUS, 0);
1485 
1486 		/* apply HW clock gating */
1487 		vcn_v4_0_enable_clock_gating(adev, i);
1488 
1489 		/* enable VCN power gating */
1490 		vcn_v4_0_enable_static_power_gating(adev, i);
1491 	}
1492 
1493 	if (adev->pm.dpm_enabled)
1494 		amdgpu_dpm_enable_uvd(adev, false);
1495 
1496 	return 0;
1497 }
1498 
1499 /**
1500  * vcn_v4_0_pause_dpg_mode - VCN pause with dpg mode
1501  *
1502  * @adev: amdgpu_device pointer
1503  * @inst_idx: instance number index
1504  * @new_state: pause state
1505  *
1506  * Pause dpg mode for VCN block
1507  */
1508 static int vcn_v4_0_pause_dpg_mode(struct amdgpu_device *adev, int inst_idx,
1509       struct dpg_pause_state *new_state)
1510 {
1511 	uint32_t reg_data = 0;
1512 	int ret_code;
1513 
1514 	/* pause/unpause if state is changed */
1515 	if (adev->vcn.inst[inst_idx].pause_state.fw_based != new_state->fw_based) {
1516 		DRM_DEV_DEBUG(adev->dev, "dpg pause state changed %d -> %d",
1517 			adev->vcn.inst[inst_idx].pause_state.fw_based,	new_state->fw_based);
1518 		reg_data = RREG32_SOC15(VCN, inst_idx, regUVD_DPG_PAUSE) &
1519 			(~UVD_DPG_PAUSE__NJ_PAUSE_DPG_ACK_MASK);
1520 
1521 		if (new_state->fw_based == VCN_DPG_STATE__PAUSE) {
1522 			ret_code = SOC15_WAIT_ON_RREG(VCN, inst_idx, regUVD_POWER_STATUS, 0x1,
1523 				UVD_POWER_STATUS__UVD_POWER_STATUS_MASK);
1524 
1525 			if (!ret_code) {
1526 				/* pause DPG */
1527 				reg_data |= UVD_DPG_PAUSE__NJ_PAUSE_DPG_REQ_MASK;
1528 				WREG32_SOC15(VCN, inst_idx, regUVD_DPG_PAUSE, reg_data);
1529 
1530 				/* wait for ACK */
1531 				SOC15_WAIT_ON_RREG(VCN, inst_idx, regUVD_DPG_PAUSE,
1532 					UVD_DPG_PAUSE__NJ_PAUSE_DPG_ACK_MASK,
1533 					UVD_DPG_PAUSE__NJ_PAUSE_DPG_ACK_MASK);
1534 
1535 				SOC15_WAIT_ON_RREG(VCN, inst_idx, regUVD_POWER_STATUS,
1536 					UVD_PGFSM_CONFIG__UVDM_UVDU_PWR_ON, UVD_POWER_STATUS__UVD_POWER_STATUS_MASK);
1537 			}
1538 		} else {
1539 			/* unpause dpg, no need to wait */
1540 			reg_data &= ~UVD_DPG_PAUSE__NJ_PAUSE_DPG_REQ_MASK;
1541 			WREG32_SOC15(VCN, inst_idx, regUVD_DPG_PAUSE, reg_data);
1542 		}
1543 		adev->vcn.inst[inst_idx].pause_state.fw_based = new_state->fw_based;
1544 	}
1545 
1546 	return 0;
1547 }
1548 
1549 /**
1550  * vcn_v4_0_unified_ring_get_rptr - get unified read pointer
1551  *
1552  * @ring: amdgpu_ring pointer
1553  *
1554  * Returns the current hardware unified read pointer
1555  */
1556 static uint64_t vcn_v4_0_unified_ring_get_rptr(struct amdgpu_ring *ring)
1557 {
1558 	struct amdgpu_device *adev = ring->adev;
1559 
1560 	if (ring != &adev->vcn.inst[ring->me].ring_enc[0])
1561 		DRM_ERROR("wrong ring id is identified in %s", __func__);
1562 
1563 	return RREG32_SOC15(VCN, ring->me, regUVD_RB_RPTR);
1564 }
1565 
1566 /**
1567  * vcn_v4_0_unified_ring_get_wptr - get unified write pointer
1568  *
1569  * @ring: amdgpu_ring pointer
1570  *
1571  * Returns the current hardware unified write pointer
1572  */
1573 static uint64_t vcn_v4_0_unified_ring_get_wptr(struct amdgpu_ring *ring)
1574 {
1575 	struct amdgpu_device *adev = ring->adev;
1576 
1577 	if (ring != &adev->vcn.inst[ring->me].ring_enc[0])
1578 		DRM_ERROR("wrong ring id is identified in %s", __func__);
1579 
1580 	if (ring->use_doorbell)
1581 		return *ring->wptr_cpu_addr;
1582 	else
1583 		return RREG32_SOC15(VCN, ring->me, regUVD_RB_WPTR);
1584 }
1585 
1586 /**
1587  * vcn_v4_0_unified_ring_set_wptr - set enc write pointer
1588  *
1589  * @ring: amdgpu_ring pointer
1590  *
1591  * Commits the enc write pointer to the hardware
1592  */
1593 static void vcn_v4_0_unified_ring_set_wptr(struct amdgpu_ring *ring)
1594 {
1595 	struct amdgpu_device *adev = ring->adev;
1596 
1597 	if (ring != &adev->vcn.inst[ring->me].ring_enc[0])
1598 		DRM_ERROR("wrong ring id is identified in %s", __func__);
1599 
1600 	if (ring->use_doorbell) {
1601 		*ring->wptr_cpu_addr = lower_32_bits(ring->wptr);
1602 		WDOORBELL32(ring->doorbell_index, lower_32_bits(ring->wptr));
1603 	} else {
1604 		WREG32_SOC15(VCN, ring->me, regUVD_RB_WPTR, lower_32_bits(ring->wptr));
1605 	}
1606 }
1607 
1608 static int vcn_v4_0_limit_sched(struct amdgpu_cs_parser *p,
1609 				struct amdgpu_job *job)
1610 {
1611 	struct drm_gpu_scheduler **scheds;
1612 
1613 	/* The create msg must be in the first IB submitted */
1614 	if (atomic_read(&job->base.entity->fence_seq))
1615 		return -EINVAL;
1616 
1617 	scheds = p->adev->gpu_sched[AMDGPU_HW_IP_VCN_ENC]
1618 		[AMDGPU_RING_PRIO_0].sched;
1619 	drm_sched_entity_modify_sched(job->base.entity, scheds, 1);
1620 	return 0;
1621 }
1622 
1623 static int vcn_v4_0_dec_msg(struct amdgpu_cs_parser *p, struct amdgpu_job *job,
1624 			    uint64_t addr)
1625 {
1626 	struct ttm_operation_ctx ctx = { false, false };
1627 	struct amdgpu_bo_va_mapping *map;
1628 	uint32_t *msg, num_buffers;
1629 	struct amdgpu_bo *bo;
1630 	uint64_t start, end;
1631 	unsigned int i;
1632 	void *ptr;
1633 	int r;
1634 
1635 	addr &= AMDGPU_GMC_HOLE_MASK;
1636 	r = amdgpu_cs_find_mapping(p, addr, &bo, &map);
1637 	if (r) {
1638 		DRM_ERROR("Can't find BO for addr 0x%08llx\n", addr);
1639 		return r;
1640 	}
1641 
1642 	start = map->start * AMDGPU_GPU_PAGE_SIZE;
1643 	end = (map->last + 1) * AMDGPU_GPU_PAGE_SIZE;
1644 	if (addr & 0x7) {
1645 		DRM_ERROR("VCN messages must be 8 byte aligned!\n");
1646 		return -EINVAL;
1647 	}
1648 
1649 	bo->flags |= AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
1650 	amdgpu_bo_placement_from_domain(bo, bo->allowed_domains);
1651 	r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
1652 	if (r) {
1653 		DRM_ERROR("Failed validating the VCN message BO (%d)!\n", r);
1654 		return r;
1655 	}
1656 
1657 	r = amdgpu_bo_kmap(bo, &ptr);
1658 	if (r) {
1659 		DRM_ERROR("Failed mapping the VCN message (%d)!\n", r);
1660 		return r;
1661 	}
1662 
1663 	msg = ptr + addr - start;
1664 
1665 	/* Check length */
1666 	if (msg[1] > end - addr) {
1667 		r = -EINVAL;
1668 		goto out;
1669 	}
1670 
1671 	if (msg[3] != RDECODE_MSG_CREATE)
1672 		goto out;
1673 
1674 	num_buffers = msg[2];
1675 	for (i = 0, msg = &msg[6]; i < num_buffers; ++i, msg += 4) {
1676 		uint32_t offset, size, *create;
1677 
1678 		if (msg[0] != RDECODE_MESSAGE_CREATE)
1679 			continue;
1680 
1681 		offset = msg[1];
1682 		size = msg[2];
1683 
1684 		if (offset + size > end) {
1685 			r = -EINVAL;
1686 			goto out;
1687 		}
1688 
1689 		create = ptr + addr + offset - start;
1690 
1691 		/* H246, HEVC and VP9 can run on any instance */
1692 		if (create[0] == 0x7 || create[0] == 0x10 || create[0] == 0x11)
1693 			continue;
1694 
1695 		r = vcn_v4_0_limit_sched(p, job);
1696 		if (r)
1697 			goto out;
1698 	}
1699 
1700 out:
1701 	amdgpu_bo_kunmap(bo);
1702 	return r;
1703 }
1704 
1705 #define RADEON_VCN_ENGINE_TYPE_DECODE                                 (0x00000003)
1706 
1707 static int vcn_v4_0_ring_patch_cs_in_place(struct amdgpu_cs_parser *p,
1708 					   struct amdgpu_job *job,
1709 					   struct amdgpu_ib *ib)
1710 {
1711 	struct amdgpu_ring *ring = amdgpu_job_ring(job);
1712 	struct amdgpu_vcn_decode_buffer *decode_buffer;
1713 	uint64_t addr;
1714 	uint32_t val;
1715 
1716 	/* The first instance can decode anything */
1717 	if (!ring->me)
1718 		return 0;
1719 
1720 	/* unified queue ib header has 8 double words. */
1721 	if (ib->length_dw < 8)
1722 		return 0;
1723 
1724 	val = amdgpu_ib_get_value(ib, 6); //RADEON_VCN_ENGINE_TYPE
1725 	if (val != RADEON_VCN_ENGINE_TYPE_DECODE)
1726 		return 0;
1727 
1728 	decode_buffer = (struct amdgpu_vcn_decode_buffer *)&ib->ptr[10];
1729 
1730 	if (!(decode_buffer->valid_buf_flag  & 0x1))
1731 		return 0;
1732 
1733 	addr = ((u64)decode_buffer->msg_buffer_address_hi) << 32 |
1734 		decode_buffer->msg_buffer_address_lo;
1735 	return vcn_v4_0_dec_msg(p, job, addr);
1736 }
1737 
1738 static const struct amdgpu_ring_funcs vcn_v4_0_unified_ring_vm_funcs = {
1739 	.type = AMDGPU_RING_TYPE_VCN_ENC,
1740 	.align_mask = 0x3f,
1741 	.nop = VCN_ENC_CMD_NO_OP,
1742 	.vmhub = AMDGPU_MMHUB_0,
1743 	.get_rptr = vcn_v4_0_unified_ring_get_rptr,
1744 	.get_wptr = vcn_v4_0_unified_ring_get_wptr,
1745 	.set_wptr = vcn_v4_0_unified_ring_set_wptr,
1746 	.patch_cs_in_place = vcn_v4_0_ring_patch_cs_in_place,
1747 	.emit_frame_size =
1748 		SOC15_FLUSH_GPU_TLB_NUM_WREG * 3 +
1749 		SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 4 +
1750 		4 + /* vcn_v2_0_enc_ring_emit_vm_flush */
1751 		5 + 5 + /* vcn_v2_0_enc_ring_emit_fence x2 vm fence */
1752 		1, /* vcn_v2_0_enc_ring_insert_end */
1753 	.emit_ib_size = 5, /* vcn_v2_0_enc_ring_emit_ib */
1754 	.emit_ib = vcn_v2_0_enc_ring_emit_ib,
1755 	.emit_fence = vcn_v2_0_enc_ring_emit_fence,
1756 	.emit_vm_flush = vcn_v2_0_enc_ring_emit_vm_flush,
1757 	.test_ring = amdgpu_vcn_enc_ring_test_ring,
1758 	.test_ib = amdgpu_vcn_unified_ring_test_ib,
1759 	.insert_nop = amdgpu_ring_insert_nop,
1760 	.insert_end = vcn_v2_0_enc_ring_insert_end,
1761 	.pad_ib = amdgpu_ring_generic_pad_ib,
1762 	.begin_use = amdgpu_vcn_ring_begin_use,
1763 	.end_use = amdgpu_vcn_ring_end_use,
1764 	.emit_wreg = vcn_v2_0_enc_ring_emit_wreg,
1765 	.emit_reg_wait = vcn_v2_0_enc_ring_emit_reg_wait,
1766 	.emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper,
1767 };
1768 
1769 /**
1770  * vcn_v4_0_set_unified_ring_funcs - set unified ring functions
1771  *
1772  * @adev: amdgpu_device pointer
1773  *
1774  * Set unified ring functions
1775  */
1776 static void vcn_v4_0_set_unified_ring_funcs(struct amdgpu_device *adev)
1777 {
1778 	int i;
1779 
1780 	for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
1781 		if (adev->vcn.harvest_config & (1 << i))
1782 			continue;
1783 
1784 		adev->vcn.inst[i].ring_enc[0].funcs = &vcn_v4_0_unified_ring_vm_funcs;
1785 		adev->vcn.inst[i].ring_enc[0].me = i;
1786 
1787 		DRM_INFO("VCN(%d) encode/decode are enabled in VM mode\n", i);
1788 	}
1789 }
1790 
1791 /**
1792  * vcn_v4_0_is_idle - check VCN block is idle
1793  *
1794  * @handle: amdgpu_device pointer
1795  *
1796  * Check whether VCN block is idle
1797  */
1798 static bool vcn_v4_0_is_idle(void *handle)
1799 {
1800 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1801 	int i, ret = 1;
1802 
1803 	for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
1804 		if (adev->vcn.harvest_config & (1 << i))
1805 			continue;
1806 
1807 		ret &= (RREG32_SOC15(VCN, i, regUVD_STATUS) == UVD_STATUS__IDLE);
1808 	}
1809 
1810 	return ret;
1811 }
1812 
1813 /**
1814  * vcn_v4_0_wait_for_idle - wait for VCN block idle
1815  *
1816  * @handle: amdgpu_device pointer
1817  *
1818  * Wait for VCN block idle
1819  */
1820 static int vcn_v4_0_wait_for_idle(void *handle)
1821 {
1822 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1823 	int i, ret = 0;
1824 
1825 	for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
1826 		if (adev->vcn.harvest_config & (1 << i))
1827 			continue;
1828 
1829 		ret = SOC15_WAIT_ON_RREG(VCN, i, regUVD_STATUS, UVD_STATUS__IDLE,
1830 			UVD_STATUS__IDLE);
1831 		if (ret)
1832 			return ret;
1833 	}
1834 
1835 	return ret;
1836 }
1837 
1838 /**
1839  * vcn_v4_0_set_clockgating_state - set VCN block clockgating state
1840  *
1841  * @handle: amdgpu_device pointer
1842  * @state: clock gating state
1843  *
1844  * Set VCN block clockgating state
1845  */
1846 static int vcn_v4_0_set_clockgating_state(void *handle, enum amd_clockgating_state state)
1847 {
1848 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1849 	bool enable = (state == AMD_CG_STATE_GATE) ? true : false;
1850 	int i;
1851 
1852 	for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
1853 		if (adev->vcn.harvest_config & (1 << i))
1854 			continue;
1855 
1856 		if (enable) {
1857 			if (RREG32_SOC15(VCN, i, regUVD_STATUS) != UVD_STATUS__IDLE)
1858 				return -EBUSY;
1859 			vcn_v4_0_enable_clock_gating(adev, i);
1860 		} else {
1861 			vcn_v4_0_disable_clock_gating(adev, i);
1862 		}
1863 	}
1864 
1865 	return 0;
1866 }
1867 
1868 /**
1869  * vcn_v4_0_set_powergating_state - set VCN block powergating state
1870  *
1871  * @handle: amdgpu_device pointer
1872  * @state: power gating state
1873  *
1874  * Set VCN block powergating state
1875  */
1876 static int vcn_v4_0_set_powergating_state(void *handle, enum amd_powergating_state state)
1877 {
1878 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1879 	int ret;
1880 
1881 	/* for SRIOV, guest should not control VCN Power-gating
1882 	 * MMSCH FW should control Power-gating and clock-gating
1883 	 * guest should avoid touching CGC and PG
1884 	 */
1885 	if (amdgpu_sriov_vf(adev)) {
1886 		adev->vcn.cur_state = AMD_PG_STATE_UNGATE;
1887 		return 0;
1888 	}
1889 
1890 	if(state == adev->vcn.cur_state)
1891 		return 0;
1892 
1893 	if (state == AMD_PG_STATE_GATE)
1894 		ret = vcn_v4_0_stop(adev);
1895 	else
1896 		ret = vcn_v4_0_start(adev);
1897 
1898 	if(!ret)
1899 		adev->vcn.cur_state = state;
1900 
1901 	return ret;
1902 }
1903 
1904 /**
1905  * vcn_v4_0_set_interrupt_state - set VCN block interrupt state
1906  *
1907  * @adev: amdgpu_device pointer
1908  * @source: interrupt sources
1909  * @type: interrupt types
1910  * @state: interrupt states
1911  *
1912  * Set VCN block interrupt state
1913  */
1914 static int vcn_v4_0_set_interrupt_state(struct amdgpu_device *adev, struct amdgpu_irq_src *source,
1915       unsigned type, enum amdgpu_interrupt_state state)
1916 {
1917 	return 0;
1918 }
1919 
1920 /**
1921  * vcn_v4_0_process_interrupt - process VCN block interrupt
1922  *
1923  * @adev: amdgpu_device pointer
1924  * @source: interrupt sources
1925  * @entry: interrupt entry from clients and sources
1926  *
1927  * Process VCN block interrupt
1928  */
1929 static int vcn_v4_0_process_interrupt(struct amdgpu_device *adev, struct amdgpu_irq_src *source,
1930       struct amdgpu_iv_entry *entry)
1931 {
1932 	uint32_t ip_instance;
1933 
1934 	switch (entry->client_id) {
1935 	case SOC15_IH_CLIENTID_VCN:
1936 		ip_instance = 0;
1937 		break;
1938 	case SOC15_IH_CLIENTID_VCN1:
1939 		ip_instance = 1;
1940 		break;
1941 	default:
1942 		DRM_ERROR("Unhandled client id: %d\n", entry->client_id);
1943 		return 0;
1944 	}
1945 
1946 	DRM_DEBUG("IH: VCN TRAP\n");
1947 
1948 	switch (entry->src_id) {
1949 	case VCN_4_0__SRCID__UVD_ENC_GENERAL_PURPOSE:
1950 		amdgpu_fence_process(&adev->vcn.inst[ip_instance].ring_enc[0]);
1951 		break;
1952 	case VCN_4_0__SRCID_UVD_POISON:
1953 		amdgpu_vcn_process_poison_irq(adev, source, entry);
1954 		break;
1955 	default:
1956 		DRM_ERROR("Unhandled interrupt: %d %d\n",
1957 			  entry->src_id, entry->src_data[0]);
1958 		break;
1959 	}
1960 
1961 	return 0;
1962 }
1963 
1964 static const struct amdgpu_irq_src_funcs vcn_v4_0_irq_funcs = {
1965 	.set = vcn_v4_0_set_interrupt_state,
1966 	.process = vcn_v4_0_process_interrupt,
1967 };
1968 
1969 /**
1970  * vcn_v4_0_set_irq_funcs - set VCN block interrupt irq functions
1971  *
1972  * @adev: amdgpu_device pointer
1973  *
1974  * Set VCN block interrupt irq functions
1975  */
1976 static void vcn_v4_0_set_irq_funcs(struct amdgpu_device *adev)
1977 {
1978 	int i;
1979 
1980 	for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
1981 		if (adev->vcn.harvest_config & (1 << i))
1982 			continue;
1983 
1984 		adev->vcn.inst[i].irq.num_types = adev->vcn.num_enc_rings + 1;
1985 		adev->vcn.inst[i].irq.funcs = &vcn_v4_0_irq_funcs;
1986 	}
1987 }
1988 
1989 static const struct amd_ip_funcs vcn_v4_0_ip_funcs = {
1990 	.name = "vcn_v4_0",
1991 	.early_init = vcn_v4_0_early_init,
1992 	.late_init = NULL,
1993 	.sw_init = vcn_v4_0_sw_init,
1994 	.sw_fini = vcn_v4_0_sw_fini,
1995 	.hw_init = vcn_v4_0_hw_init,
1996 	.hw_fini = vcn_v4_0_hw_fini,
1997 	.suspend = vcn_v4_0_suspend,
1998 	.resume = vcn_v4_0_resume,
1999 	.is_idle = vcn_v4_0_is_idle,
2000 	.wait_for_idle = vcn_v4_0_wait_for_idle,
2001 	.check_soft_reset = NULL,
2002 	.pre_soft_reset = NULL,
2003 	.soft_reset = NULL,
2004 	.post_soft_reset = NULL,
2005 	.set_clockgating_state = vcn_v4_0_set_clockgating_state,
2006 	.set_powergating_state = vcn_v4_0_set_powergating_state,
2007 };
2008 
2009 const struct amdgpu_ip_block_version vcn_v4_0_ip_block =
2010 {
2011 	.type = AMD_IP_BLOCK_TYPE_VCN,
2012 	.major = 4,
2013 	.minor = 0,
2014 	.rev = 0,
2015 	.funcs = &vcn_v4_0_ip_funcs,
2016 };
2017 
2018 static uint32_t vcn_v4_0_query_poison_by_instance(struct amdgpu_device *adev,
2019 			uint32_t instance, uint32_t sub_block)
2020 {
2021 	uint32_t poison_stat = 0, reg_value = 0;
2022 
2023 	switch (sub_block) {
2024 	case AMDGPU_VCN_V4_0_VCPU_VCODEC:
2025 		reg_value = RREG32_SOC15(VCN, instance, regUVD_RAS_VCPU_VCODEC_STATUS);
2026 		poison_stat = REG_GET_FIELD(reg_value, UVD_RAS_VCPU_VCODEC_STATUS, POISONED_PF);
2027 		break;
2028 	default:
2029 		break;
2030 	}
2031 
2032 	if (poison_stat)
2033 		dev_info(adev->dev, "Poison detected in VCN%d, sub_block%d\n",
2034 			instance, sub_block);
2035 
2036 	return poison_stat;
2037 }
2038 
2039 static bool vcn_v4_0_query_ras_poison_status(struct amdgpu_device *adev)
2040 {
2041 	uint32_t inst, sub;
2042 	uint32_t poison_stat = 0;
2043 
2044 	for (inst = 0; inst < adev->vcn.num_vcn_inst; inst++)
2045 		for (sub = 0; sub < AMDGPU_VCN_V4_0_MAX_SUB_BLOCK; sub++)
2046 			poison_stat +=
2047 				vcn_v4_0_query_poison_by_instance(adev, inst, sub);
2048 
2049 	return !!poison_stat;
2050 }
2051 
2052 const struct amdgpu_ras_block_hw_ops vcn_v4_0_ras_hw_ops = {
2053 	.query_poison_status = vcn_v4_0_query_ras_poison_status,
2054 };
2055 
2056 static struct amdgpu_vcn_ras vcn_v4_0_ras = {
2057 	.ras_block = {
2058 		.hw_ops = &vcn_v4_0_ras_hw_ops,
2059 	},
2060 };
2061 
2062 static void vcn_v4_0_set_ras_funcs(struct amdgpu_device *adev)
2063 {
2064 	switch (adev->ip_versions[VCN_HWIP][0]) {
2065 	case IP_VERSION(4, 0, 0):
2066 		adev->vcn.ras = &vcn_v4_0_ras;
2067 		break;
2068 	default:
2069 		break;
2070 	}
2071 
2072 	amdgpu_vcn_set_ras_funcs(adev);
2073 }
2074