xref: /linux/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c (revision 002a612023c8b105bd3829d81862dee04368d6de)
1 /*
2  * Copyright 2016-2024 Advanced Micro Devices, Inc.
3  * All Rights Reserved.
4  *
5  * Permission is hereby granted, free of charge, to any person obtaining a
6  * copy of this software and associated documentation files (the
7  * "Software"), to deal in the Software without restriction, including
8  * without limitation the rights to use, copy, modify, merge, publish,
9  * distribute, sub license, and/or sell copies of the Software, and to
10  * permit persons to whom the Software is furnished to do so, subject to
11  * the following conditions:
12  *
13  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
16  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
17  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
18  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
19  * USE OR OTHER DEALINGS IN THE SOFTWARE.
20  *
21  * The above copyright notice and this permission notice (including the
22  * next paragraph) shall be included in all copies or substantial portions
23  * of the Software.
24  *
25  */
26 
27 #include <linux/firmware.h>
28 #include <linux/module.h>
29 #include <linux/dmi.h>
30 #include <linux/pci.h>
31 #include <linux/debugfs.h>
32 #include <drm/drm_drv.h>
33 
34 #include "amdgpu.h"
35 #include "amdgpu_pm.h"
36 #include "amdgpu_vcn.h"
37 #include "soc15d.h"
38 
39 /* Firmware Names */
40 #define FIRMWARE_RAVEN			"amdgpu/raven_vcn.bin"
41 #define FIRMWARE_PICASSO		"amdgpu/picasso_vcn.bin"
42 #define FIRMWARE_RAVEN2			"amdgpu/raven2_vcn.bin"
43 #define FIRMWARE_ARCTURUS		"amdgpu/arcturus_vcn.bin"
44 #define FIRMWARE_RENOIR			"amdgpu/renoir_vcn.bin"
45 #define FIRMWARE_GREEN_SARDINE		"amdgpu/green_sardine_vcn.bin"
46 #define FIRMWARE_NAVI10			"amdgpu/navi10_vcn.bin"
47 #define FIRMWARE_NAVI14			"amdgpu/navi14_vcn.bin"
48 #define FIRMWARE_NAVI12			"amdgpu/navi12_vcn.bin"
49 #define FIRMWARE_SIENNA_CICHLID		"amdgpu/sienna_cichlid_vcn.bin"
50 #define FIRMWARE_NAVY_FLOUNDER		"amdgpu/navy_flounder_vcn.bin"
51 #define FIRMWARE_VANGOGH		"amdgpu/vangogh_vcn.bin"
52 #define FIRMWARE_DIMGREY_CAVEFISH	"amdgpu/dimgrey_cavefish_vcn.bin"
53 #define FIRMWARE_ALDEBARAN		"amdgpu/aldebaran_vcn.bin"
54 #define FIRMWARE_BEIGE_GOBY		"amdgpu/beige_goby_vcn.bin"
55 #define FIRMWARE_YELLOW_CARP		"amdgpu/yellow_carp_vcn.bin"
56 #define FIRMWARE_VCN_3_1_2		"amdgpu/vcn_3_1_2.bin"
57 #define FIRMWARE_VCN4_0_0		"amdgpu/vcn_4_0_0.bin"
58 #define FIRMWARE_VCN4_0_2		"amdgpu/vcn_4_0_2.bin"
59 #define FIRMWARE_VCN4_0_3		"amdgpu/vcn_4_0_3.bin"
60 #define FIRMWARE_VCN4_0_4		"amdgpu/vcn_4_0_4.bin"
61 #define FIRMWARE_VCN4_0_5		"amdgpu/vcn_4_0_5.bin"
62 #define FIRMWARE_VCN4_0_6		"amdgpu/vcn_4_0_6.bin"
63 #define FIRMWARE_VCN4_0_6_1		"amdgpu/vcn_4_0_6_1.bin"
64 #define FIRMWARE_VCN5_0_0		"amdgpu/vcn_5_0_0.bin"
65 #define FIRMWARE_VCN5_0_1		"amdgpu/vcn_5_0_1.bin"
66 
67 MODULE_FIRMWARE(FIRMWARE_RAVEN);
68 MODULE_FIRMWARE(FIRMWARE_PICASSO);
69 MODULE_FIRMWARE(FIRMWARE_RAVEN2);
70 MODULE_FIRMWARE(FIRMWARE_ARCTURUS);
71 MODULE_FIRMWARE(FIRMWARE_RENOIR);
72 MODULE_FIRMWARE(FIRMWARE_GREEN_SARDINE);
73 MODULE_FIRMWARE(FIRMWARE_ALDEBARAN);
74 MODULE_FIRMWARE(FIRMWARE_NAVI10);
75 MODULE_FIRMWARE(FIRMWARE_NAVI14);
76 MODULE_FIRMWARE(FIRMWARE_NAVI12);
77 MODULE_FIRMWARE(FIRMWARE_SIENNA_CICHLID);
78 MODULE_FIRMWARE(FIRMWARE_NAVY_FLOUNDER);
79 MODULE_FIRMWARE(FIRMWARE_VANGOGH);
80 MODULE_FIRMWARE(FIRMWARE_DIMGREY_CAVEFISH);
81 MODULE_FIRMWARE(FIRMWARE_BEIGE_GOBY);
82 MODULE_FIRMWARE(FIRMWARE_YELLOW_CARP);
83 MODULE_FIRMWARE(FIRMWARE_VCN_3_1_2);
84 MODULE_FIRMWARE(FIRMWARE_VCN4_0_0);
85 MODULE_FIRMWARE(FIRMWARE_VCN4_0_2);
86 MODULE_FIRMWARE(FIRMWARE_VCN4_0_3);
87 MODULE_FIRMWARE(FIRMWARE_VCN4_0_4);
88 MODULE_FIRMWARE(FIRMWARE_VCN4_0_5);
89 MODULE_FIRMWARE(FIRMWARE_VCN4_0_6);
90 MODULE_FIRMWARE(FIRMWARE_VCN4_0_6_1);
91 MODULE_FIRMWARE(FIRMWARE_VCN5_0_0);
92 MODULE_FIRMWARE(FIRMWARE_VCN5_0_1);
93 
94 static void amdgpu_vcn_idle_work_handler(struct work_struct *work);
95 static void amdgpu_vcn_reg_dump_fini(struct amdgpu_device *adev);
96 
97 int amdgpu_vcn_early_init(struct amdgpu_device *adev, int i)
98 {
99 	char ucode_prefix[25];
100 	int r;
101 
102 	adev->vcn.inst[i].adev = adev;
103 	adev->vcn.inst[i].inst = i;
104 	amdgpu_ucode_ip_version_decode(adev, UVD_HWIP, ucode_prefix, sizeof(ucode_prefix));
105 
106 	if (i != 0 && adev->vcn.per_inst_fw) {
107 		r = amdgpu_ucode_request(adev, &adev->vcn.inst[i].fw,
108 					 AMDGPU_UCODE_REQUIRED,
109 					 "amdgpu/%s_%d.bin", ucode_prefix, i);
110 		if (r)
111 			amdgpu_ucode_release(&adev->vcn.inst[i].fw);
112 	} else {
113 		if (!adev->vcn.inst[0].fw) {
114 			r = amdgpu_ucode_request(adev, &adev->vcn.inst[0].fw,
115 						 AMDGPU_UCODE_REQUIRED,
116 						 "amdgpu/%s.bin", ucode_prefix);
117 			if (r)
118 				amdgpu_ucode_release(&adev->vcn.inst[0].fw);
119 		} else {
120 			r = 0;
121 		}
122 		adev->vcn.inst[i].fw = adev->vcn.inst[0].fw;
123 	}
124 
125 	return r;
126 }
127 
128 int amdgpu_vcn_sw_init(struct amdgpu_device *adev, int i)
129 {
130 	unsigned long bo_size;
131 	const struct common_firmware_header *hdr;
132 	unsigned char fw_check;
133 	unsigned int fw_shared_size, log_offset;
134 	int r;
135 
136 	mutex_init(&adev->vcn.inst[i].vcn1_jpeg1_workaround);
137 	mutex_init(&adev->vcn.inst[i].vcn_pg_lock);
138 	mutex_init(&adev->vcn.inst[i].engine_reset_mutex);
139 	atomic_set(&adev->vcn.inst[i].total_submission_cnt, 0);
140 	INIT_DELAYED_WORK(&adev->vcn.inst[i].idle_work, amdgpu_vcn_idle_work_handler);
141 	atomic_set(&adev->vcn.inst[i].dpg_enc_submission_cnt, 0);
142 	if ((adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) &&
143 	    (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG))
144 		adev->vcn.inst[i].indirect_sram = true;
145 
146 	/*
147 	 * Some Steam Deck's BIOS versions are incompatible with the
148 	 * indirect SRAM mode, leading to amdgpu being unable to get
149 	 * properly probed (and even potentially crashing the kernel).
150 	 * Hence, check for these versions here - notice this is
151 	 * restricted to Vangogh (Deck's APU).
152 	 */
153 	if (amdgpu_ip_version(adev, UVD_HWIP, 0) == IP_VERSION(3, 0, 2)) {
154 		const char *bios_ver = dmi_get_system_info(DMI_BIOS_VERSION);
155 
156 		if (bios_ver && (!strncmp("F7A0113", bios_ver, 7) ||
157 				 !strncmp("F7A0114", bios_ver, 7))) {
158 			adev->vcn.inst[i].indirect_sram = false;
159 			dev_info(adev->dev,
160 				 "Steam Deck quirk: indirect SRAM disabled on BIOS %s\n", bios_ver);
161 		}
162 	}
163 
164 	/* from vcn4 and above, only unified queue is used */
165 	adev->vcn.inst[i].using_unified_queue =
166 		amdgpu_ip_version(adev, UVD_HWIP, 0) >= IP_VERSION(4, 0, 0);
167 
168 	hdr = (const struct common_firmware_header *)adev->vcn.inst[i].fw->data;
169 	adev->vcn.inst[i].fw_version = le32_to_cpu(hdr->ucode_version);
170 	adev->vcn.fw_version = le32_to_cpu(hdr->ucode_version);
171 
172 	/* Bit 20-23, it is encode major and non-zero for new naming convention.
173 	 * This field is part of version minor and DRM_DISABLED_FLAG in old naming
174 	 * convention. Since the l:wq!atest version minor is 0x5B and DRM_DISABLED_FLAG
175 	 * is zero in old naming convention, this field is always zero so far.
176 	 * These four bits are used to tell which naming convention is present.
177 	 */
178 	fw_check = (le32_to_cpu(hdr->ucode_version) >> 20) & 0xf;
179 	if (fw_check) {
180 		unsigned int dec_ver, enc_major, enc_minor, vep, fw_rev;
181 
182 		fw_rev = le32_to_cpu(hdr->ucode_version) & 0xfff;
183 		enc_minor = (le32_to_cpu(hdr->ucode_version) >> 12) & 0xff;
184 		enc_major = fw_check;
185 		dec_ver = (le32_to_cpu(hdr->ucode_version) >> 24) & 0xf;
186 		vep = (le32_to_cpu(hdr->ucode_version) >> 28) & 0xf;
187 		dev_info(adev->dev,
188 			 "[VCN instance %d] Found VCN firmware Version ENC: %u.%u DEC: %u VEP: %u Revision: %u\n",
189 			 i, enc_major, enc_minor, dec_ver, vep, fw_rev);
190 	} else {
191 		unsigned int version_major, version_minor, family_id;
192 
193 		family_id = le32_to_cpu(hdr->ucode_version) & 0xff;
194 		version_major = (le32_to_cpu(hdr->ucode_version) >> 24) & 0xff;
195 		version_minor = (le32_to_cpu(hdr->ucode_version) >> 8) & 0xff;
196 		dev_info(adev->dev, "[VCN instance %d] Found VCN firmware Version: %u.%u Family ID: %u\n",
197 			 i, version_major, version_minor, family_id);
198 	}
199 
200 	bo_size = AMDGPU_VCN_STACK_SIZE + AMDGPU_VCN_CONTEXT_SIZE;
201 	if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP)
202 		bo_size += AMDGPU_GPU_PAGE_ALIGN(le32_to_cpu(hdr->ucode_size_bytes) + 8);
203 
204 	if (amdgpu_ip_version(adev, UVD_HWIP, 0) >= IP_VERSION(5, 0, 0)) {
205 		fw_shared_size = AMDGPU_GPU_PAGE_ALIGN(sizeof(struct amdgpu_vcn5_fw_shared));
206 		log_offset = offsetof(struct amdgpu_vcn5_fw_shared, fw_log);
207 	} else if (amdgpu_ip_version(adev, UVD_HWIP, 0) >= IP_VERSION(4, 0, 0)) {
208 		fw_shared_size = AMDGPU_GPU_PAGE_ALIGN(sizeof(struct amdgpu_vcn4_fw_shared));
209 		log_offset = offsetof(struct amdgpu_vcn4_fw_shared, fw_log);
210 	} else {
211 		fw_shared_size = AMDGPU_GPU_PAGE_ALIGN(sizeof(struct amdgpu_fw_shared));
212 		log_offset = offsetof(struct amdgpu_fw_shared, fw_log);
213 	}
214 
215 	bo_size += fw_shared_size;
216 
217 	if (amdgpu_vcnfw_log)
218 		bo_size += AMDGPU_VCNFW_LOG_SIZE;
219 
220 	r = amdgpu_bo_create_kernel(adev, bo_size, PAGE_SIZE,
221 				    AMDGPU_GEM_DOMAIN_VRAM |
222 				    AMDGPU_GEM_DOMAIN_GTT,
223 				    &adev->vcn.inst[i].vcpu_bo,
224 				    &adev->vcn.inst[i].gpu_addr,
225 				    &adev->vcn.inst[i].cpu_addr);
226 	if (r) {
227 		dev_err(adev->dev, "(%d) failed to allocate vcn bo\n", r);
228 		return r;
229 	}
230 
231 	adev->vcn.inst[i].fw_shared.cpu_addr = adev->vcn.inst[i].cpu_addr +
232 		bo_size - fw_shared_size;
233 	adev->vcn.inst[i].fw_shared.gpu_addr = adev->vcn.inst[i].gpu_addr +
234 		bo_size - fw_shared_size;
235 
236 	adev->vcn.inst[i].fw_shared.mem_size = fw_shared_size;
237 
238 	if (amdgpu_vcnfw_log) {
239 		adev->vcn.inst[i].fw_shared.cpu_addr -= AMDGPU_VCNFW_LOG_SIZE;
240 		adev->vcn.inst[i].fw_shared.gpu_addr -= AMDGPU_VCNFW_LOG_SIZE;
241 		adev->vcn.inst[i].fw_shared.log_offset = log_offset;
242 	}
243 
244 	if (adev->vcn.inst[i].indirect_sram) {
245 		r = amdgpu_bo_create_kernel(adev, 64 * 2 * 4, PAGE_SIZE,
246 					    AMDGPU_GEM_DOMAIN_VRAM |
247 					    AMDGPU_GEM_DOMAIN_GTT,
248 					    &adev->vcn.inst[i].dpg_sram_bo,
249 					    &adev->vcn.inst[i].dpg_sram_gpu_addr,
250 					    &adev->vcn.inst[i].dpg_sram_cpu_addr);
251 		if (r) {
252 			dev_err(adev->dev, "VCN %d (%d) failed to allocate DPG bo\n", i, r);
253 			return r;
254 		}
255 	}
256 
257 	return 0;
258 }
259 
260 int amdgpu_vcn_sw_fini(struct amdgpu_device *adev, int i)
261 {
262 	int j;
263 
264 	if (adev->vcn.harvest_config & (1 << i))
265 		return 0;
266 
267 	amdgpu_bo_free_kernel(
268 		&adev->vcn.inst[i].dpg_sram_bo,
269 		&adev->vcn.inst[i].dpg_sram_gpu_addr,
270 		(void **)&adev->vcn.inst[i].dpg_sram_cpu_addr);
271 
272 	kvfree(adev->vcn.inst[i].saved_bo);
273 
274 	amdgpu_bo_free_kernel(&adev->vcn.inst[i].vcpu_bo,
275 			      &adev->vcn.inst[i].gpu_addr,
276 			      (void **)&adev->vcn.inst[i].cpu_addr);
277 
278 	amdgpu_ring_fini(&adev->vcn.inst[i].ring_dec);
279 
280 	for (j = 0; j < adev->vcn.inst[i].num_enc_rings; ++j)
281 		amdgpu_ring_fini(&adev->vcn.inst[i].ring_enc[j]);
282 
283 	if (adev->vcn.per_inst_fw) {
284 		amdgpu_ucode_release(&adev->vcn.inst[i].fw);
285 	} else {
286 		amdgpu_ucode_release(&adev->vcn.inst[0].fw);
287 		adev->vcn.inst[i].fw = NULL;
288 	}
289 
290 	if (adev->vcn.reg_list)
291 		amdgpu_vcn_reg_dump_fini(adev);
292 
293 	mutex_destroy(&adev->vcn.inst[i].vcn_pg_lock);
294 	mutex_destroy(&adev->vcn.inst[i].vcn1_jpeg1_workaround);
295 
296 	return 0;
297 }
298 
299 bool amdgpu_vcn_is_disabled_vcn(struct amdgpu_device *adev, enum vcn_ring_type type, uint32_t vcn_instance)
300 {
301 	bool ret = false;
302 	int vcn_config = adev->vcn.inst[vcn_instance].vcn_config;
303 
304 	if ((type == VCN_ENCODE_RING) && (vcn_config & VCN_BLOCK_ENCODE_DISABLE_MASK))
305 		ret = true;
306 	else if ((type == VCN_DECODE_RING) && (vcn_config & VCN_BLOCK_DECODE_DISABLE_MASK))
307 		ret = true;
308 	else if ((type == VCN_UNIFIED_RING) && (vcn_config & VCN_BLOCK_QUEUE_DISABLE_MASK))
309 		ret = true;
310 
311 	return ret;
312 }
313 
314 static int amdgpu_vcn_save_vcpu_bo_inst(struct amdgpu_device *adev, int i)
315 {
316 	unsigned int size;
317 	void *ptr;
318 	int idx;
319 
320 	if (adev->vcn.harvest_config & (1 << i))
321 		return 0;
322 	if (adev->vcn.inst[i].vcpu_bo == NULL)
323 		return 0;
324 
325 	size = amdgpu_bo_size(adev->vcn.inst[i].vcpu_bo);
326 	ptr = adev->vcn.inst[i].cpu_addr;
327 
328 	adev->vcn.inst[i].saved_bo = kvmalloc(size, GFP_KERNEL);
329 	if (!adev->vcn.inst[i].saved_bo)
330 		return -ENOMEM;
331 
332 	if (drm_dev_enter(adev_to_drm(adev), &idx)) {
333 		memcpy_fromio(adev->vcn.inst[i].saved_bo, ptr, size);
334 		drm_dev_exit(idx);
335 	}
336 
337 	return 0;
338 }
339 
340 int amdgpu_vcn_save_vcpu_bo(struct amdgpu_device *adev)
341 {
342 	int ret, i;
343 
344 	for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
345 		ret = amdgpu_vcn_save_vcpu_bo_inst(adev, i);
346 		if (ret)
347 			return ret;
348 	}
349 
350 	return 0;
351 }
352 
353 int amdgpu_vcn_suspend(struct amdgpu_device *adev, int i)
354 {
355 	bool in_ras_intr = amdgpu_ras_intr_triggered();
356 
357 	if (adev->vcn.harvest_config & (1 << i))
358 		return 0;
359 
360 	/* err_event_athub and dpc recovery will corrupt VCPU buffer, so we need to
361 	 * restore fw data and clear buffer in amdgpu_vcn_resume() */
362 	if (in_ras_intr || adev->pcie_reset_ctx.in_link_reset)
363 		return 0;
364 
365 	return amdgpu_vcn_save_vcpu_bo_inst(adev, i);
366 }
367 
368 int amdgpu_vcn_resume(struct amdgpu_device *adev, int i)
369 {
370 	unsigned int size;
371 	void *ptr;
372 	int idx;
373 
374 	if (adev->vcn.harvest_config & (1 << i))
375 		return 0;
376 	if (adev->vcn.inst[i].vcpu_bo == NULL)
377 		return -EINVAL;
378 
379 	size = amdgpu_bo_size(adev->vcn.inst[i].vcpu_bo);
380 	ptr = adev->vcn.inst[i].cpu_addr;
381 
382 	if (adev->vcn.inst[i].saved_bo != NULL) {
383 		if (drm_dev_enter(adev_to_drm(adev), &idx)) {
384 			memcpy_toio(ptr, adev->vcn.inst[i].saved_bo, size);
385 			drm_dev_exit(idx);
386 		}
387 		kvfree(adev->vcn.inst[i].saved_bo);
388 		adev->vcn.inst[i].saved_bo = NULL;
389 	} else {
390 		const struct common_firmware_header *hdr;
391 		unsigned int offset;
392 
393 		hdr = (const struct common_firmware_header *)adev->vcn.inst[i].fw->data;
394 		if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
395 			offset = le32_to_cpu(hdr->ucode_array_offset_bytes);
396 			if (drm_dev_enter(adev_to_drm(adev), &idx)) {
397 				memcpy_toio(adev->vcn.inst[i].cpu_addr,
398 					    adev->vcn.inst[i].fw->data + offset,
399 					    le32_to_cpu(hdr->ucode_size_bytes));
400 				drm_dev_exit(idx);
401 			}
402 			size -= le32_to_cpu(hdr->ucode_size_bytes);
403 			ptr += le32_to_cpu(hdr->ucode_size_bytes);
404 		}
405 		memset_io(ptr, 0, size);
406 	}
407 
408 	return 0;
409 }
410 
411 void amdgpu_vcn_get_profile(struct amdgpu_device *adev)
412 {
413 	int r;
414 
415 	mutex_lock(&adev->vcn.workload_profile_mutex);
416 
417 	if (adev->vcn.workload_profile_active) {
418 		mutex_unlock(&adev->vcn.workload_profile_mutex);
419 		return;
420 	}
421 	r = amdgpu_dpm_switch_power_profile(adev, PP_SMC_POWER_PROFILE_VIDEO,
422 					    true);
423 	if (r)
424 		dev_warn(adev->dev,
425 			 "(%d) failed to enable video power profile mode\n", r);
426 	else
427 		adev->vcn.workload_profile_active = true;
428 	mutex_unlock(&adev->vcn.workload_profile_mutex);
429 }
430 
431 void amdgpu_vcn_put_profile(struct amdgpu_device *adev)
432 {
433 	bool pg = true;
434 	int r, i;
435 
436 	mutex_lock(&adev->vcn.workload_profile_mutex);
437 	for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
438 		if (adev->vcn.inst[i].cur_state != AMD_PG_STATE_GATE) {
439 			pg = false;
440 			break;
441 		}
442 	}
443 
444 	if (pg) {
445 		r = amdgpu_dpm_switch_power_profile(
446 			adev, PP_SMC_POWER_PROFILE_VIDEO, false);
447 		if (r)
448 			dev_warn(
449 				adev->dev,
450 				"(%d) failed to disable video power profile mode\n",
451 				r);
452 		else
453 			adev->vcn.workload_profile_active = false;
454 	}
455 
456 	mutex_unlock(&adev->vcn.workload_profile_mutex);
457 }
458 
459 static void amdgpu_vcn_idle_work_handler(struct work_struct *work)
460 {
461 	struct amdgpu_vcn_inst *vcn_inst =
462 		container_of(work, struct amdgpu_vcn_inst, idle_work.work);
463 	struct amdgpu_device *adev = vcn_inst->adev;
464 	unsigned int fences = 0, fence[AMDGPU_MAX_VCN_INSTANCES] = {0};
465 	unsigned int i = vcn_inst->inst, j;
466 
467 	if (adev->vcn.harvest_config & (1 << i))
468 		return;
469 
470 	for (j = 0; j < adev->vcn.inst[i].num_enc_rings; ++j)
471 		fence[i] += amdgpu_fence_count_emitted(&vcn_inst->ring_enc[j]);
472 
473 	/* Only set DPG pause for VCN3 or below, VCN4 and above will be handled by FW */
474 	if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG &&
475 	    !adev->vcn.inst[i].using_unified_queue) {
476 		struct dpg_pause_state new_state;
477 
478 		if (fence[i] ||
479 		    unlikely(atomic_read(&vcn_inst->dpg_enc_submission_cnt)))
480 			new_state.fw_based = VCN_DPG_STATE__PAUSE;
481 		else
482 			new_state.fw_based = VCN_DPG_STATE__UNPAUSE;
483 
484 		adev->vcn.inst[i].pause_dpg_mode(vcn_inst, &new_state);
485 	}
486 
487 	fence[i] += amdgpu_fence_count_emitted(&vcn_inst->ring_dec);
488 	fences += fence[i];
489 
490 	if (!fences && !atomic_read(&vcn_inst->total_submission_cnt)) {
491 		mutex_lock(&vcn_inst->vcn_pg_lock);
492 		vcn_inst->set_pg_state(vcn_inst, AMD_PG_STATE_GATE);
493 		mutex_unlock(&vcn_inst->vcn_pg_lock);
494 		amdgpu_vcn_put_profile(adev);
495 
496 	} else {
497 		schedule_delayed_work(&vcn_inst->idle_work, VCN_IDLE_TIMEOUT);
498 	}
499 }
500 
501 void amdgpu_vcn_ring_begin_use(struct amdgpu_ring *ring)
502 {
503 	struct amdgpu_device *adev = ring->adev;
504 	struct amdgpu_vcn_inst *vcn_inst = &adev->vcn.inst[ring->me];
505 
506 	atomic_inc(&vcn_inst->total_submission_cnt);
507 
508 	cancel_delayed_work_sync(&vcn_inst->idle_work);
509 
510 	mutex_lock(&vcn_inst->vcn_pg_lock);
511 	vcn_inst->set_pg_state(vcn_inst, AMD_PG_STATE_UNGATE);
512 
513 	/* Only set DPG pause for VCN3 or below, VCN4 and above will be handled by FW */
514 	if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG &&
515 	    !vcn_inst->using_unified_queue) {
516 		struct dpg_pause_state new_state;
517 
518 		if (ring->funcs->type == AMDGPU_RING_TYPE_VCN_ENC) {
519 			atomic_inc(&vcn_inst->dpg_enc_submission_cnt);
520 			new_state.fw_based = VCN_DPG_STATE__PAUSE;
521 		} else {
522 			unsigned int fences = 0;
523 			unsigned int i;
524 
525 			for (i = 0; i < vcn_inst->num_enc_rings; ++i)
526 				fences += amdgpu_fence_count_emitted(&vcn_inst->ring_enc[i]);
527 
528 			if (fences || atomic_read(&vcn_inst->dpg_enc_submission_cnt))
529 				new_state.fw_based = VCN_DPG_STATE__PAUSE;
530 			else
531 				new_state.fw_based = VCN_DPG_STATE__UNPAUSE;
532 		}
533 
534 		vcn_inst->pause_dpg_mode(vcn_inst, &new_state);
535 	}
536 	mutex_unlock(&vcn_inst->vcn_pg_lock);
537 	amdgpu_vcn_get_profile(adev);
538 }
539 
540 void amdgpu_vcn_ring_end_use(struct amdgpu_ring *ring)
541 {
542 	struct amdgpu_device *adev = ring->adev;
543 
544 	/* Only set DPG pause for VCN3 or below, VCN4 and above will be handled by FW */
545 	if (ring->adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG &&
546 	    ring->funcs->type == AMDGPU_RING_TYPE_VCN_ENC &&
547 	    !adev->vcn.inst[ring->me].using_unified_queue)
548 		atomic_dec(&ring->adev->vcn.inst[ring->me].dpg_enc_submission_cnt);
549 
550 	atomic_dec(&ring->adev->vcn.inst[ring->me].total_submission_cnt);
551 
552 	schedule_delayed_work(&ring->adev->vcn.inst[ring->me].idle_work,
553 			      VCN_IDLE_TIMEOUT);
554 }
555 
556 int amdgpu_vcn_dec_ring_test_ring(struct amdgpu_ring *ring)
557 {
558 	struct amdgpu_device *adev = ring->adev;
559 	uint32_t tmp = 0;
560 	unsigned int i;
561 	int r;
562 
563 	/* VCN in SRIOV does not support direct register read/write */
564 	if (amdgpu_sriov_vf(adev))
565 		return 0;
566 
567 	WREG32(adev->vcn.inst[ring->me].external.scratch9, 0xCAFEDEAD);
568 	r = amdgpu_ring_alloc(ring, 3);
569 	if (r)
570 		return r;
571 	amdgpu_ring_write(ring, PACKET0(adev->vcn.inst[ring->me].internal.scratch9, 0));
572 	amdgpu_ring_write(ring, 0xDEADBEEF);
573 	amdgpu_ring_commit(ring);
574 	for (i = 0; i < adev->usec_timeout; i++) {
575 		tmp = RREG32(adev->vcn.inst[ring->me].external.scratch9);
576 		if (tmp == 0xDEADBEEF)
577 			break;
578 		udelay(1);
579 	}
580 
581 	if (i >= adev->usec_timeout)
582 		r = -ETIMEDOUT;
583 
584 	return r;
585 }
586 
587 int amdgpu_vcn_dec_sw_ring_test_ring(struct amdgpu_ring *ring)
588 {
589 	struct amdgpu_device *adev = ring->adev;
590 	uint32_t rptr;
591 	unsigned int i;
592 	int r;
593 
594 	if (amdgpu_sriov_vf(adev))
595 		return 0;
596 
597 	r = amdgpu_ring_alloc(ring, 16);
598 	if (r)
599 		return r;
600 
601 	rptr = amdgpu_ring_get_rptr(ring);
602 
603 	amdgpu_ring_write(ring, VCN_DEC_SW_CMD_END);
604 	amdgpu_ring_commit(ring);
605 
606 	for (i = 0; i < adev->usec_timeout; i++) {
607 		if (amdgpu_ring_get_rptr(ring) != rptr)
608 			break;
609 		udelay(1);
610 	}
611 
612 	if (i >= adev->usec_timeout)
613 		r = -ETIMEDOUT;
614 
615 	return r;
616 }
617 
618 static int amdgpu_vcn_dec_send_msg(struct amdgpu_ring *ring,
619 				   struct amdgpu_ib *ib_msg,
620 				   struct dma_fence **fence)
621 {
622 	u64 addr = AMDGPU_GPU_PAGE_ALIGN(ib_msg->gpu_addr);
623 	struct amdgpu_device *adev = ring->adev;
624 	struct dma_fence *f = NULL;
625 	struct amdgpu_job *job;
626 	struct amdgpu_ib *ib;
627 	int i, r;
628 
629 	r = amdgpu_job_alloc_with_ib(ring->adev, NULL, NULL,
630 				     64, AMDGPU_IB_POOL_DIRECT,
631 				     &job);
632 	if (r)
633 		goto err;
634 
635 	ib = &job->ibs[0];
636 	ib->ptr[0] = PACKET0(adev->vcn.inst[ring->me].internal.data0, 0);
637 	ib->ptr[1] = addr;
638 	ib->ptr[2] = PACKET0(adev->vcn.inst[ring->me].internal.data1, 0);
639 	ib->ptr[3] = addr >> 32;
640 	ib->ptr[4] = PACKET0(adev->vcn.inst[ring->me].internal.cmd, 0);
641 	ib->ptr[5] = 0;
642 	for (i = 6; i < 16; i += 2) {
643 		ib->ptr[i] = PACKET0(adev->vcn.inst[ring->me].internal.nop, 0);
644 		ib->ptr[i+1] = 0;
645 	}
646 	ib->length_dw = 16;
647 
648 	r = amdgpu_job_submit_direct(job, ring, &f);
649 	if (r)
650 		goto err_free;
651 
652 	amdgpu_ib_free(ib_msg, f);
653 
654 	if (fence)
655 		*fence = dma_fence_get(f);
656 	dma_fence_put(f);
657 
658 	return 0;
659 
660 err_free:
661 	amdgpu_job_free(job);
662 err:
663 	amdgpu_ib_free(ib_msg, f);
664 	return r;
665 }
666 
667 static int amdgpu_vcn_dec_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
668 		struct amdgpu_ib *ib)
669 {
670 	struct amdgpu_device *adev = ring->adev;
671 	uint32_t *msg;
672 	int r, i;
673 
674 	memset(ib, 0, sizeof(*ib));
675 	r = amdgpu_ib_get(adev, NULL, AMDGPU_GPU_PAGE_SIZE * 2,
676 			AMDGPU_IB_POOL_DIRECT,
677 			ib);
678 	if (r)
679 		return r;
680 
681 	msg = (uint32_t *)AMDGPU_GPU_PAGE_ALIGN((unsigned long)ib->ptr);
682 	msg[0] = cpu_to_le32(0x00000028);
683 	msg[1] = cpu_to_le32(0x00000038);
684 	msg[2] = cpu_to_le32(0x00000001);
685 	msg[3] = cpu_to_le32(0x00000000);
686 	msg[4] = cpu_to_le32(handle);
687 	msg[5] = cpu_to_le32(0x00000000);
688 	msg[6] = cpu_to_le32(0x00000001);
689 	msg[7] = cpu_to_le32(0x00000028);
690 	msg[8] = cpu_to_le32(0x00000010);
691 	msg[9] = cpu_to_le32(0x00000000);
692 	msg[10] = cpu_to_le32(0x00000007);
693 	msg[11] = cpu_to_le32(0x00000000);
694 	msg[12] = cpu_to_le32(0x00000780);
695 	msg[13] = cpu_to_le32(0x00000440);
696 	for (i = 14; i < 1024; ++i)
697 		msg[i] = cpu_to_le32(0x0);
698 
699 	return 0;
700 }
701 
702 static int amdgpu_vcn_dec_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
703 					  struct amdgpu_ib *ib)
704 {
705 	struct amdgpu_device *adev = ring->adev;
706 	uint32_t *msg;
707 	int r, i;
708 
709 	memset(ib, 0, sizeof(*ib));
710 	r = amdgpu_ib_get(adev, NULL, AMDGPU_GPU_PAGE_SIZE * 2,
711 			AMDGPU_IB_POOL_DIRECT,
712 			ib);
713 	if (r)
714 		return r;
715 
716 	msg = (uint32_t *)AMDGPU_GPU_PAGE_ALIGN((unsigned long)ib->ptr);
717 	msg[0] = cpu_to_le32(0x00000028);
718 	msg[1] = cpu_to_le32(0x00000018);
719 	msg[2] = cpu_to_le32(0x00000000);
720 	msg[3] = cpu_to_le32(0x00000002);
721 	msg[4] = cpu_to_le32(handle);
722 	msg[5] = cpu_to_le32(0x00000000);
723 	for (i = 6; i < 1024; ++i)
724 		msg[i] = cpu_to_le32(0x0);
725 
726 	return 0;
727 }
728 
729 int amdgpu_vcn_dec_ring_test_ib(struct amdgpu_ring *ring, long timeout)
730 {
731 	struct dma_fence *fence = NULL;
732 	struct amdgpu_ib ib;
733 	long r;
734 
735 	r = amdgpu_vcn_dec_get_create_msg(ring, 1, &ib);
736 	if (r)
737 		goto error;
738 
739 	r = amdgpu_vcn_dec_send_msg(ring, &ib, NULL);
740 	if (r)
741 		goto error;
742 	r = amdgpu_vcn_dec_get_destroy_msg(ring, 1, &ib);
743 	if (r)
744 		goto error;
745 
746 	r = amdgpu_vcn_dec_send_msg(ring, &ib, &fence);
747 	if (r)
748 		goto error;
749 
750 	r = dma_fence_wait_timeout(fence, false, timeout);
751 	if (r == 0)
752 		r = -ETIMEDOUT;
753 	else if (r > 0)
754 		r = 0;
755 
756 	dma_fence_put(fence);
757 error:
758 	return r;
759 }
760 
761 static uint32_t *amdgpu_vcn_unified_ring_ib_header(struct amdgpu_ib *ib,
762 						uint32_t ib_pack_in_dw, bool enc)
763 {
764 	uint32_t *ib_checksum;
765 
766 	ib->ptr[ib->length_dw++] = 0x00000010; /* single queue checksum */
767 	ib->ptr[ib->length_dw++] = 0x30000002;
768 	ib_checksum = &ib->ptr[ib->length_dw++];
769 	ib->ptr[ib->length_dw++] = ib_pack_in_dw;
770 
771 	ib->ptr[ib->length_dw++] = 0x00000010; /* engine info */
772 	ib->ptr[ib->length_dw++] = 0x30000001;
773 	ib->ptr[ib->length_dw++] = enc ? 0x2 : 0x3;
774 	ib->ptr[ib->length_dw++] = ib_pack_in_dw * sizeof(uint32_t);
775 
776 	return ib_checksum;
777 }
778 
779 static void amdgpu_vcn_unified_ring_ib_checksum(uint32_t **ib_checksum,
780 						uint32_t ib_pack_in_dw)
781 {
782 	uint32_t i;
783 	uint32_t checksum = 0;
784 
785 	for (i = 0; i < ib_pack_in_dw; i++)
786 		checksum += *(*ib_checksum + 2 + i);
787 
788 	**ib_checksum = checksum;
789 }
790 
791 static int amdgpu_vcn_dec_sw_send_msg(struct amdgpu_ring *ring,
792 				      struct amdgpu_ib *ib_msg,
793 				      struct dma_fence **fence)
794 {
795 	struct amdgpu_vcn_decode_buffer *decode_buffer = NULL;
796 	unsigned int ib_size_dw = 64;
797 	struct amdgpu_device *adev = ring->adev;
798 	struct dma_fence *f = NULL;
799 	struct amdgpu_job *job;
800 	struct amdgpu_ib *ib;
801 	uint64_t addr = AMDGPU_GPU_PAGE_ALIGN(ib_msg->gpu_addr);
802 	uint32_t *ib_checksum;
803 	uint32_t ib_pack_in_dw;
804 	int i, r;
805 
806 	if (adev->vcn.inst[ring->me].using_unified_queue)
807 		ib_size_dw += 8;
808 
809 	r = amdgpu_job_alloc_with_ib(ring->adev, NULL, NULL,
810 				     ib_size_dw * 4, AMDGPU_IB_POOL_DIRECT,
811 				     &job);
812 	if (r)
813 		goto err;
814 
815 	ib = &job->ibs[0];
816 	ib->length_dw = 0;
817 
818 	/* single queue headers */
819 	if (adev->vcn.inst[ring->me].using_unified_queue) {
820 		ib_pack_in_dw = sizeof(struct amdgpu_vcn_decode_buffer) / sizeof(uint32_t)
821 						+ 4 + 2; /* engine info + decoding ib in dw */
822 		ib_checksum = amdgpu_vcn_unified_ring_ib_header(ib, ib_pack_in_dw, false);
823 	}
824 
825 	ib->ptr[ib->length_dw++] = sizeof(struct amdgpu_vcn_decode_buffer) + 8;
826 	ib->ptr[ib->length_dw++] = cpu_to_le32(AMDGPU_VCN_IB_FLAG_DECODE_BUFFER);
827 	decode_buffer = (struct amdgpu_vcn_decode_buffer *)&(ib->ptr[ib->length_dw]);
828 	ib->length_dw += sizeof(struct amdgpu_vcn_decode_buffer) / 4;
829 	memset(decode_buffer, 0, sizeof(struct amdgpu_vcn_decode_buffer));
830 
831 	decode_buffer->valid_buf_flag |= cpu_to_le32(AMDGPU_VCN_CMD_FLAG_MSG_BUFFER);
832 	decode_buffer->msg_buffer_address_hi = cpu_to_le32(addr >> 32);
833 	decode_buffer->msg_buffer_address_lo = cpu_to_le32(addr);
834 
835 	for (i = ib->length_dw; i < ib_size_dw; ++i)
836 		ib->ptr[i] = 0x0;
837 
838 	if (adev->vcn.inst[ring->me].using_unified_queue)
839 		amdgpu_vcn_unified_ring_ib_checksum(&ib_checksum, ib_pack_in_dw);
840 
841 	r = amdgpu_job_submit_direct(job, ring, &f);
842 	if (r)
843 		goto err_free;
844 
845 	amdgpu_ib_free(ib_msg, f);
846 
847 	if (fence)
848 		*fence = dma_fence_get(f);
849 	dma_fence_put(f);
850 
851 	return 0;
852 
853 err_free:
854 	amdgpu_job_free(job);
855 err:
856 	amdgpu_ib_free(ib_msg, f);
857 	return r;
858 }
859 
860 int amdgpu_vcn_dec_sw_ring_test_ib(struct amdgpu_ring *ring, long timeout)
861 {
862 	struct dma_fence *fence = NULL;
863 	struct amdgpu_ib ib;
864 	long r;
865 
866 	r = amdgpu_vcn_dec_get_create_msg(ring, 1, &ib);
867 	if (r)
868 		goto error;
869 
870 	r = amdgpu_vcn_dec_sw_send_msg(ring, &ib, NULL);
871 	if (r)
872 		goto error;
873 	r = amdgpu_vcn_dec_get_destroy_msg(ring, 1, &ib);
874 	if (r)
875 		goto error;
876 
877 	r = amdgpu_vcn_dec_sw_send_msg(ring, &ib, &fence);
878 	if (r)
879 		goto error;
880 
881 	r = dma_fence_wait_timeout(fence, false, timeout);
882 	if (r == 0)
883 		r = -ETIMEDOUT;
884 	else if (r > 0)
885 		r = 0;
886 
887 	dma_fence_put(fence);
888 error:
889 	return r;
890 }
891 
892 int amdgpu_vcn_enc_ring_test_ring(struct amdgpu_ring *ring)
893 {
894 	struct amdgpu_device *adev = ring->adev;
895 	uint32_t rptr;
896 	unsigned int i;
897 	int r;
898 
899 	if (amdgpu_sriov_vf(adev))
900 		return 0;
901 
902 	r = amdgpu_ring_alloc(ring, 16);
903 	if (r)
904 		return r;
905 
906 	rptr = amdgpu_ring_get_rptr(ring);
907 
908 	amdgpu_ring_write(ring, VCN_ENC_CMD_END);
909 	amdgpu_ring_commit(ring);
910 
911 	for (i = 0; i < adev->usec_timeout; i++) {
912 		if (amdgpu_ring_get_rptr(ring) != rptr)
913 			break;
914 		udelay(1);
915 	}
916 
917 	if (i >= adev->usec_timeout)
918 		r = -ETIMEDOUT;
919 
920 	return r;
921 }
922 
923 static int amdgpu_vcn_enc_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
924 					 struct amdgpu_ib *ib_msg,
925 					 struct dma_fence **fence)
926 {
927 	unsigned int ib_size_dw = 16;
928 	struct amdgpu_device *adev = ring->adev;
929 	struct amdgpu_job *job;
930 	struct amdgpu_ib *ib;
931 	struct dma_fence *f = NULL;
932 	uint32_t *ib_checksum = NULL;
933 	uint64_t addr;
934 	int i, r;
935 
936 	if (adev->vcn.inst[ring->me].using_unified_queue)
937 		ib_size_dw += 8;
938 
939 	r = amdgpu_job_alloc_with_ib(ring->adev, NULL, NULL,
940 				     ib_size_dw * 4, AMDGPU_IB_POOL_DIRECT,
941 				     &job);
942 	if (r)
943 		return r;
944 
945 	ib = &job->ibs[0];
946 	addr = AMDGPU_GPU_PAGE_ALIGN(ib_msg->gpu_addr);
947 
948 	ib->length_dw = 0;
949 
950 	if (adev->vcn.inst[ring->me].using_unified_queue)
951 		ib_checksum = amdgpu_vcn_unified_ring_ib_header(ib, 0x11, true);
952 
953 	ib->ptr[ib->length_dw++] = 0x00000018;
954 	ib->ptr[ib->length_dw++] = 0x00000001; /* session info */
955 	ib->ptr[ib->length_dw++] = handle;
956 	ib->ptr[ib->length_dw++] = upper_32_bits(addr);
957 	ib->ptr[ib->length_dw++] = addr;
958 	ib->ptr[ib->length_dw++] = 0x00000000;
959 
960 	ib->ptr[ib->length_dw++] = 0x00000014;
961 	ib->ptr[ib->length_dw++] = 0x00000002; /* task info */
962 	ib->ptr[ib->length_dw++] = 0x0000001c;
963 	ib->ptr[ib->length_dw++] = 0x00000000;
964 	ib->ptr[ib->length_dw++] = 0x00000000;
965 
966 	ib->ptr[ib->length_dw++] = 0x00000008;
967 	ib->ptr[ib->length_dw++] = 0x08000001; /* op initialize */
968 
969 	for (i = ib->length_dw; i < ib_size_dw; ++i)
970 		ib->ptr[i] = 0x0;
971 
972 	if (adev->vcn.inst[ring->me].using_unified_queue)
973 		amdgpu_vcn_unified_ring_ib_checksum(&ib_checksum, 0x11);
974 
975 	r = amdgpu_job_submit_direct(job, ring, &f);
976 	if (r)
977 		goto err;
978 
979 	if (fence)
980 		*fence = dma_fence_get(f);
981 	dma_fence_put(f);
982 
983 	return 0;
984 
985 err:
986 	amdgpu_job_free(job);
987 	return r;
988 }
989 
990 static int amdgpu_vcn_enc_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
991 					  struct amdgpu_ib *ib_msg,
992 					  struct dma_fence **fence)
993 {
994 	unsigned int ib_size_dw = 16;
995 	struct amdgpu_device *adev = ring->adev;
996 	struct amdgpu_job *job;
997 	struct amdgpu_ib *ib;
998 	struct dma_fence *f = NULL;
999 	uint32_t *ib_checksum = NULL;
1000 	uint64_t addr;
1001 	int i, r;
1002 
1003 	if (adev->vcn.inst[ring->me].using_unified_queue)
1004 		ib_size_dw += 8;
1005 
1006 	r = amdgpu_job_alloc_with_ib(ring->adev, NULL, NULL,
1007 				     ib_size_dw * 4, AMDGPU_IB_POOL_DIRECT,
1008 				     &job);
1009 	if (r)
1010 		return r;
1011 
1012 	ib = &job->ibs[0];
1013 	addr = AMDGPU_GPU_PAGE_ALIGN(ib_msg->gpu_addr);
1014 
1015 	ib->length_dw = 0;
1016 
1017 	if (adev->vcn.inst[ring->me].using_unified_queue)
1018 		ib_checksum = amdgpu_vcn_unified_ring_ib_header(ib, 0x11, true);
1019 
1020 	ib->ptr[ib->length_dw++] = 0x00000018;
1021 	ib->ptr[ib->length_dw++] = 0x00000001;
1022 	ib->ptr[ib->length_dw++] = handle;
1023 	ib->ptr[ib->length_dw++] = upper_32_bits(addr);
1024 	ib->ptr[ib->length_dw++] = addr;
1025 	ib->ptr[ib->length_dw++] = 0x00000000;
1026 
1027 	ib->ptr[ib->length_dw++] = 0x00000014;
1028 	ib->ptr[ib->length_dw++] = 0x00000002;
1029 	ib->ptr[ib->length_dw++] = 0x0000001c;
1030 	ib->ptr[ib->length_dw++] = 0x00000000;
1031 	ib->ptr[ib->length_dw++] = 0x00000000;
1032 
1033 	ib->ptr[ib->length_dw++] = 0x00000008;
1034 	ib->ptr[ib->length_dw++] = 0x08000002; /* op close session */
1035 
1036 	for (i = ib->length_dw; i < ib_size_dw; ++i)
1037 		ib->ptr[i] = 0x0;
1038 
1039 	if (adev->vcn.inst[ring->me].using_unified_queue)
1040 		amdgpu_vcn_unified_ring_ib_checksum(&ib_checksum, 0x11);
1041 
1042 	r = amdgpu_job_submit_direct(job, ring, &f);
1043 	if (r)
1044 		goto err;
1045 
1046 	if (fence)
1047 		*fence = dma_fence_get(f);
1048 	dma_fence_put(f);
1049 
1050 	return 0;
1051 
1052 err:
1053 	amdgpu_job_free(job);
1054 	return r;
1055 }
1056 
1057 int amdgpu_vcn_enc_ring_test_ib(struct amdgpu_ring *ring, long timeout)
1058 {
1059 	struct amdgpu_device *adev = ring->adev;
1060 	struct dma_fence *fence = NULL;
1061 	struct amdgpu_ib ib;
1062 	long r;
1063 
1064 	memset(&ib, 0, sizeof(ib));
1065 	r = amdgpu_ib_get(adev, NULL, (128 << 10) + AMDGPU_GPU_PAGE_SIZE,
1066 			AMDGPU_IB_POOL_DIRECT,
1067 			&ib);
1068 	if (r)
1069 		return r;
1070 
1071 	r = amdgpu_vcn_enc_get_create_msg(ring, 1, &ib, NULL);
1072 	if (r)
1073 		goto error;
1074 
1075 	r = amdgpu_vcn_enc_get_destroy_msg(ring, 1, &ib, &fence);
1076 	if (r)
1077 		goto error;
1078 
1079 	r = dma_fence_wait_timeout(fence, false, timeout);
1080 	if (r == 0)
1081 		r = -ETIMEDOUT;
1082 	else if (r > 0)
1083 		r = 0;
1084 
1085 error:
1086 	amdgpu_ib_free(&ib, fence);
1087 	dma_fence_put(fence);
1088 
1089 	return r;
1090 }
1091 
1092 int amdgpu_vcn_unified_ring_test_ib(struct amdgpu_ring *ring, long timeout)
1093 {
1094 	struct amdgpu_device *adev = ring->adev;
1095 	long r;
1096 
1097 	if ((amdgpu_ip_version(adev, UVD_HWIP, 0) != IP_VERSION(4, 0, 3)) &&
1098 	    (amdgpu_ip_version(adev, UVD_HWIP, 0) != IP_VERSION(5, 0, 1))) {
1099 		r = amdgpu_vcn_enc_ring_test_ib(ring, timeout);
1100 		if (r)
1101 			goto error;
1102 	}
1103 
1104 	r =  amdgpu_vcn_dec_sw_ring_test_ib(ring, timeout);
1105 
1106 error:
1107 	return r;
1108 }
1109 
1110 enum amdgpu_ring_priority_level amdgpu_vcn_get_enc_ring_prio(int ring)
1111 {
1112 	switch (ring) {
1113 	case 0:
1114 		return AMDGPU_RING_PRIO_0;
1115 	case 1:
1116 		return AMDGPU_RING_PRIO_1;
1117 	case 2:
1118 		return AMDGPU_RING_PRIO_2;
1119 	default:
1120 		return AMDGPU_RING_PRIO_0;
1121 	}
1122 }
1123 
1124 void amdgpu_vcn_setup_ucode(struct amdgpu_device *adev, int i)
1125 {
1126 	unsigned int idx;
1127 
1128 	if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
1129 		const struct common_firmware_header *hdr;
1130 
1131 		if (adev->vcn.harvest_config & (1 << i))
1132 			return;
1133 
1134 		if ((amdgpu_ip_version(adev, UVD_HWIP, 0) == IP_VERSION(4, 0, 3) ||
1135 		     amdgpu_ip_version(adev, UVD_HWIP, 0) == IP_VERSION(5, 0, 1))
1136 		    && (i > 0))
1137 			return;
1138 
1139 		hdr = (const struct common_firmware_header *)adev->vcn.inst[i].fw->data;
1140 		/* currently only support 2 FW instances */
1141 		if (i >= 2) {
1142 			dev_info(adev->dev, "More then 2 VCN FW instances!\n");
1143 			return;
1144 		}
1145 		idx = AMDGPU_UCODE_ID_VCN + i;
1146 		adev->firmware.ucode[idx].ucode_id = idx;
1147 		adev->firmware.ucode[idx].fw = adev->vcn.inst[i].fw;
1148 		adev->firmware.fw_size +=
1149 			ALIGN(le32_to_cpu(hdr->ucode_size_bytes), PAGE_SIZE);
1150 	}
1151 }
1152 
1153 /*
1154  * debugfs for mapping vcn firmware log buffer.
1155  */
1156 #if defined(CONFIG_DEBUG_FS)
1157 static ssize_t amdgpu_debugfs_vcn_fwlog_read(struct file *f, char __user *buf,
1158 					     size_t size, loff_t *pos)
1159 {
1160 	struct amdgpu_vcn_inst *vcn;
1161 	void *log_buf;
1162 	volatile struct amdgpu_vcn_fwlog *plog;
1163 	unsigned int read_pos, write_pos, available, i, read_bytes = 0;
1164 	unsigned int read_num[2] = {0};
1165 
1166 	vcn = file_inode(f)->i_private;
1167 	if (!vcn)
1168 		return -ENODEV;
1169 
1170 	if (!vcn->fw_shared.cpu_addr || !amdgpu_vcnfw_log)
1171 		return -EFAULT;
1172 
1173 	log_buf = vcn->fw_shared.cpu_addr + vcn->fw_shared.mem_size;
1174 
1175 	plog = (volatile struct amdgpu_vcn_fwlog *)log_buf;
1176 	read_pos = plog->rptr;
1177 	write_pos = plog->wptr;
1178 
1179 	if (read_pos > AMDGPU_VCNFW_LOG_SIZE || write_pos > AMDGPU_VCNFW_LOG_SIZE)
1180 		return -EFAULT;
1181 
1182 	if (!size || (read_pos == write_pos))
1183 		return 0;
1184 
1185 	if (write_pos > read_pos) {
1186 		available = write_pos - read_pos;
1187 		read_num[0] = min_t(size_t, size, available);
1188 	} else {
1189 		read_num[0] = AMDGPU_VCNFW_LOG_SIZE - read_pos;
1190 		available = read_num[0] + write_pos - plog->header_size;
1191 		if (size > available)
1192 			read_num[1] = write_pos - plog->header_size;
1193 		else if (size > read_num[0])
1194 			read_num[1] = size - read_num[0];
1195 		else
1196 			read_num[0] = size;
1197 	}
1198 
1199 	for (i = 0; i < 2; i++) {
1200 		if (read_num[i]) {
1201 			if (read_pos == AMDGPU_VCNFW_LOG_SIZE)
1202 				read_pos = plog->header_size;
1203 			if (read_num[i] == copy_to_user((buf + read_bytes),
1204 							(log_buf + read_pos), read_num[i]))
1205 				return -EFAULT;
1206 
1207 			read_bytes += read_num[i];
1208 			read_pos += read_num[i];
1209 		}
1210 	}
1211 
1212 	plog->rptr = read_pos;
1213 	*pos += read_bytes;
1214 	return read_bytes;
1215 }
1216 
1217 static const struct file_operations amdgpu_debugfs_vcnfwlog_fops = {
1218 	.owner = THIS_MODULE,
1219 	.read = amdgpu_debugfs_vcn_fwlog_read,
1220 	.llseek = default_llseek
1221 };
1222 #endif
1223 
1224 void amdgpu_debugfs_vcn_fwlog_init(struct amdgpu_device *adev, uint8_t i,
1225 				   struct amdgpu_vcn_inst *vcn)
1226 {
1227 #if defined(CONFIG_DEBUG_FS)
1228 	struct drm_minor *minor = adev_to_drm(adev)->primary;
1229 	struct dentry *root = minor->debugfs_root;
1230 	char name[32];
1231 
1232 	sprintf(name, "amdgpu_vcn_%d_fwlog", i);
1233 	debugfs_create_file_size(name, S_IFREG | 0444, root, vcn,
1234 				 &amdgpu_debugfs_vcnfwlog_fops,
1235 				 AMDGPU_VCNFW_LOG_SIZE);
1236 #endif
1237 }
1238 
1239 void amdgpu_vcn_fwlog_init(struct amdgpu_vcn_inst *vcn)
1240 {
1241 #if defined(CONFIG_DEBUG_FS)
1242 	volatile uint32_t *flag = vcn->fw_shared.cpu_addr;
1243 	void *fw_log_cpu_addr = vcn->fw_shared.cpu_addr + vcn->fw_shared.mem_size;
1244 	uint64_t fw_log_gpu_addr = vcn->fw_shared.gpu_addr + vcn->fw_shared.mem_size;
1245 	volatile struct amdgpu_vcn_fwlog *log_buf = fw_log_cpu_addr;
1246 	volatile struct amdgpu_fw_shared_fw_logging *fw_log = vcn->fw_shared.cpu_addr
1247 							 + vcn->fw_shared.log_offset;
1248 	*flag |= cpu_to_le32(AMDGPU_VCN_FW_LOGGING_FLAG);
1249 	fw_log->is_enabled = 1;
1250 	fw_log->addr_lo = cpu_to_le32(fw_log_gpu_addr & 0xFFFFFFFF);
1251 	fw_log->addr_hi = cpu_to_le32(fw_log_gpu_addr >> 32);
1252 	fw_log->size = cpu_to_le32(AMDGPU_VCNFW_LOG_SIZE);
1253 
1254 	log_buf->header_size = sizeof(struct amdgpu_vcn_fwlog);
1255 	log_buf->buffer_size = AMDGPU_VCNFW_LOG_SIZE;
1256 	log_buf->rptr = log_buf->header_size;
1257 	log_buf->wptr = log_buf->header_size;
1258 	log_buf->wrapped = 0;
1259 #endif
1260 }
1261 
1262 int amdgpu_vcn_process_poison_irq(struct amdgpu_device *adev,
1263 				struct amdgpu_irq_src *source,
1264 				struct amdgpu_iv_entry *entry)
1265 {
1266 	struct ras_common_if *ras_if = adev->vcn.ras_if;
1267 	struct ras_dispatch_if ih_data = {
1268 		.entry = entry,
1269 	};
1270 
1271 	if (!ras_if)
1272 		return 0;
1273 
1274 	if (!amdgpu_sriov_vf(adev)) {
1275 		ih_data.head = *ras_if;
1276 		amdgpu_ras_interrupt_dispatch(adev, &ih_data);
1277 	} else {
1278 		if (adev->virt.ops && adev->virt.ops->ras_poison_handler)
1279 			adev->virt.ops->ras_poison_handler(adev, ras_if->block);
1280 		else
1281 			dev_warn(adev->dev,
1282 				"No ras_poison_handler interface in SRIOV for VCN!\n");
1283 	}
1284 
1285 	return 0;
1286 }
1287 
1288 int amdgpu_vcn_ras_late_init(struct amdgpu_device *adev, struct ras_common_if *ras_block)
1289 {
1290 	int r, i;
1291 
1292 	r = amdgpu_ras_block_late_init(adev, ras_block);
1293 	if (r)
1294 		return r;
1295 
1296 	if (amdgpu_ras_is_supported(adev, ras_block->block)) {
1297 		for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
1298 			if (adev->vcn.harvest_config & (1 << i) ||
1299 			    !adev->vcn.inst[i].ras_poison_irq.funcs)
1300 				continue;
1301 
1302 			r = amdgpu_irq_get(adev, &adev->vcn.inst[i].ras_poison_irq, 0);
1303 			if (r)
1304 				goto late_fini;
1305 		}
1306 	}
1307 	return 0;
1308 
1309 late_fini:
1310 	amdgpu_ras_block_late_fini(adev, ras_block);
1311 	return r;
1312 }
1313 
1314 int amdgpu_vcn_ras_sw_init(struct amdgpu_device *adev)
1315 {
1316 	int err;
1317 	struct amdgpu_vcn_ras *ras;
1318 
1319 	if (!adev->vcn.ras)
1320 		return 0;
1321 
1322 	ras = adev->vcn.ras;
1323 	err = amdgpu_ras_register_ras_block(adev, &ras->ras_block);
1324 	if (err) {
1325 		dev_err(adev->dev, "Failed to register vcn ras block!\n");
1326 		return err;
1327 	}
1328 
1329 	strcpy(ras->ras_block.ras_comm.name, "vcn");
1330 	ras->ras_block.ras_comm.block = AMDGPU_RAS_BLOCK__VCN;
1331 	ras->ras_block.ras_comm.type = AMDGPU_RAS_ERROR__POISON;
1332 	adev->vcn.ras_if = &ras->ras_block.ras_comm;
1333 
1334 	if (!ras->ras_block.ras_late_init)
1335 		ras->ras_block.ras_late_init = amdgpu_vcn_ras_late_init;
1336 
1337 	return 0;
1338 }
1339 
1340 int amdgpu_vcn_psp_update_sram(struct amdgpu_device *adev, int inst_idx,
1341 			       enum AMDGPU_UCODE_ID ucode_id)
1342 {
1343 	struct amdgpu_firmware_info ucode = {
1344 		.ucode_id = (ucode_id ? ucode_id :
1345 			    (inst_idx ? AMDGPU_UCODE_ID_VCN1_RAM :
1346 					AMDGPU_UCODE_ID_VCN0_RAM)),
1347 		.mc_addr = adev->vcn.inst[inst_idx].dpg_sram_gpu_addr,
1348 		.ucode_size = ((uintptr_t)adev->vcn.inst[inst_idx].dpg_sram_curr_addr -
1349 			      (uintptr_t)adev->vcn.inst[inst_idx].dpg_sram_cpu_addr),
1350 	};
1351 
1352 	return psp_execute_ip_fw_load(&adev->psp, &ucode);
1353 }
1354 
1355 static ssize_t amdgpu_get_vcn_reset_mask(struct device *dev,
1356 						struct device_attribute *attr,
1357 						char *buf)
1358 {
1359 	struct drm_device *ddev = dev_get_drvdata(dev);
1360 	struct amdgpu_device *adev = drm_to_adev(ddev);
1361 
1362 	if (!adev)
1363 		return -ENODEV;
1364 
1365 	return amdgpu_show_reset_mask(buf, adev->vcn.supported_reset);
1366 }
1367 
1368 static DEVICE_ATTR(vcn_reset_mask, 0444,
1369 		   amdgpu_get_vcn_reset_mask, NULL);
1370 
1371 int amdgpu_vcn_sysfs_reset_mask_init(struct amdgpu_device *adev)
1372 {
1373 	int r = 0;
1374 
1375 	if (adev->vcn.num_vcn_inst) {
1376 		r = device_create_file(adev->dev, &dev_attr_vcn_reset_mask);
1377 		if (r)
1378 			return r;
1379 	}
1380 
1381 	return r;
1382 }
1383 
1384 void amdgpu_vcn_sysfs_reset_mask_fini(struct amdgpu_device *adev)
1385 {
1386 	if (adev->dev->kobj.sd) {
1387 		if (adev->vcn.num_vcn_inst)
1388 			device_remove_file(adev->dev, &dev_attr_vcn_reset_mask);
1389 	}
1390 }
1391 
1392 /*
1393  * debugfs to enable/disable vcn job submission to specific core or
1394  * instance. It is created only if the queue type is unified.
1395  */
1396 #if defined(CONFIG_DEBUG_FS)
1397 static int amdgpu_debugfs_vcn_sched_mask_set(void *data, u64 val)
1398 {
1399 	struct amdgpu_device *adev = (struct amdgpu_device *)data;
1400 	u32 i;
1401 	u64 mask;
1402 	struct amdgpu_ring *ring;
1403 
1404 	if (!adev)
1405 		return -ENODEV;
1406 
1407 	mask = (1ULL << adev->vcn.num_vcn_inst) - 1;
1408 	if ((val & mask) == 0)
1409 		return -EINVAL;
1410 	for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
1411 		ring = &adev->vcn.inst[i].ring_enc[0];
1412 		if (val & (1ULL << i))
1413 			ring->sched.ready = true;
1414 		else
1415 			ring->sched.ready = false;
1416 	}
1417 	/* publish sched.ready flag update effective immediately across smp */
1418 	smp_rmb();
1419 	return 0;
1420 }
1421 
1422 static int amdgpu_debugfs_vcn_sched_mask_get(void *data, u64 *val)
1423 {
1424 	struct amdgpu_device *adev = (struct amdgpu_device *)data;
1425 	u32 i;
1426 	u64 mask = 0;
1427 	struct amdgpu_ring *ring;
1428 
1429 	if (!adev)
1430 		return -ENODEV;
1431 	for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
1432 		ring = &adev->vcn.inst[i].ring_enc[0];
1433 		if (ring->sched.ready)
1434 			mask |= 1ULL << i;
1435 		}
1436 	*val = mask;
1437 	return 0;
1438 }
1439 
1440 DEFINE_DEBUGFS_ATTRIBUTE(amdgpu_debugfs_vcn_sched_mask_fops,
1441 			 amdgpu_debugfs_vcn_sched_mask_get,
1442 			 amdgpu_debugfs_vcn_sched_mask_set, "%llx\n");
1443 #endif
1444 
1445 void amdgpu_debugfs_vcn_sched_mask_init(struct amdgpu_device *adev)
1446 {
1447 #if defined(CONFIG_DEBUG_FS)
1448 	struct drm_minor *minor = adev_to_drm(adev)->primary;
1449 	struct dentry *root = minor->debugfs_root;
1450 	char name[32];
1451 
1452 	if (adev->vcn.num_vcn_inst <= 1 || !adev->vcn.inst[0].using_unified_queue)
1453 		return;
1454 	sprintf(name, "amdgpu_vcn_sched_mask");
1455 	debugfs_create_file(name, 0600, root, adev,
1456 			    &amdgpu_debugfs_vcn_sched_mask_fops);
1457 #endif
1458 }
1459 
1460 /**
1461  * vcn_set_powergating_state - set VCN block powergating state
1462  *
1463  * @ip_block: amdgpu_ip_block pointer
1464  * @state: power gating state
1465  *
1466  * Set VCN block powergating state
1467  */
1468 int vcn_set_powergating_state(struct amdgpu_ip_block *ip_block,
1469 			      enum amd_powergating_state state)
1470 {
1471 	struct amdgpu_device *adev = ip_block->adev;
1472 	int ret = 0, i;
1473 
1474 	for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
1475 		struct amdgpu_vcn_inst *vinst = &adev->vcn.inst[i];
1476 
1477 		ret |= vinst->set_pg_state(vinst, state);
1478 	}
1479 
1480 	return ret;
1481 }
1482 
1483 /**
1484  * amdgpu_vcn_reset_engine - Reset a specific VCN engine
1485  * @adev: Pointer to the AMDGPU device
1486  * @instance_id: VCN engine instance to reset
1487  *
1488  * Returns: 0 on success, or a negative error code on failure.
1489  */
1490 static int amdgpu_vcn_reset_engine(struct amdgpu_device *adev,
1491 				   uint32_t instance_id)
1492 {
1493 	struct amdgpu_vcn_inst *vinst = &adev->vcn.inst[instance_id];
1494 	int r, i;
1495 
1496 	mutex_lock(&vinst->engine_reset_mutex);
1497 	/* Stop the scheduler's work queue for the dec and enc rings if they are running.
1498 	 * This ensures that no new tasks are submitted to the queues while
1499 	 * the reset is in progress.
1500 	 */
1501 	drm_sched_wqueue_stop(&vinst->ring_dec.sched);
1502 	for (i = 0; i < vinst->num_enc_rings; i++)
1503 		drm_sched_wqueue_stop(&vinst->ring_enc[i].sched);
1504 
1505 	/* Perform the VCN reset for the specified instance */
1506 	r = vinst->reset(vinst);
1507 	if (r)
1508 		goto unlock;
1509 	r = amdgpu_ring_test_ring(&vinst->ring_dec);
1510 	if (r)
1511 		goto unlock;
1512 	for (i = 0; i < vinst->num_enc_rings; i++) {
1513 		r = amdgpu_ring_test_ring(&vinst->ring_enc[i]);
1514 		if (r)
1515 			goto unlock;
1516 	}
1517 	amdgpu_fence_driver_force_completion(&vinst->ring_dec);
1518 	for (i = 0; i < vinst->num_enc_rings; i++)
1519 		amdgpu_fence_driver_force_completion(&vinst->ring_enc[i]);
1520 
1521 	/* Restart the scheduler's work queue for the dec and enc rings
1522 	 * if they were stopped by this function. This allows new tasks
1523 	 * to be submitted to the queues after the reset is complete.
1524 	 */
1525 	drm_sched_wqueue_start(&vinst->ring_dec.sched);
1526 	for (i = 0; i < vinst->num_enc_rings; i++)
1527 		drm_sched_wqueue_start(&vinst->ring_enc[i].sched);
1528 
1529 unlock:
1530 	mutex_unlock(&vinst->engine_reset_mutex);
1531 
1532 	return r;
1533 }
1534 
1535 /**
1536  * amdgpu_vcn_ring_reset - Reset a VCN ring
1537  * @ring: ring to reset
1538  * @vmid: vmid of guilty job
1539  * @timedout_fence: fence of timed out job
1540  *
1541  * This helper is for VCN blocks without unified queues because
1542  * resetting the engine resets all queues in that case.  With
1543  * unified queues we have one queue per engine.
1544  * Returns: 0 on success, or a negative error code on failure.
1545  */
1546 int amdgpu_vcn_ring_reset(struct amdgpu_ring *ring,
1547 			  unsigned int vmid,
1548 			  struct amdgpu_fence *timedout_fence)
1549 {
1550 	struct amdgpu_device *adev = ring->adev;
1551 
1552 	if (adev->vcn.inst[ring->me].using_unified_queue)
1553 		return -EINVAL;
1554 
1555 	return amdgpu_vcn_reset_engine(adev, ring->me);
1556 }
1557 
1558 int amdgpu_vcn_reg_dump_init(struct amdgpu_device *adev,
1559 			     const struct amdgpu_hwip_reg_entry *reg, u32 count)
1560 {
1561 	adev->vcn.ip_dump = kcalloc(adev->vcn.num_vcn_inst * count,
1562 				     sizeof(uint32_t), GFP_KERNEL);
1563 	if (!adev->vcn.ip_dump)
1564 		return -ENOMEM;
1565 	adev->vcn.reg_list = reg;
1566 	adev->vcn.reg_count = count;
1567 
1568 	return 0;
1569 }
1570 
1571 static void amdgpu_vcn_reg_dump_fini(struct amdgpu_device *adev)
1572 {
1573 	kfree(adev->vcn.ip_dump);
1574 	adev->vcn.ip_dump = NULL;
1575 	adev->vcn.reg_list = NULL;
1576 	adev->vcn.reg_count = 0;
1577 }
1578 
1579 void amdgpu_vcn_dump_ip_state(struct amdgpu_ip_block *ip_block)
1580 {
1581 	struct amdgpu_device *adev = ip_block->adev;
1582 	int i, j;
1583 	bool is_powered;
1584 	u32 inst_off;
1585 
1586 	if (!adev->vcn.ip_dump)
1587 		return;
1588 
1589 	for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
1590 		if (adev->vcn.harvest_config & (1 << i))
1591 			continue;
1592 
1593 		inst_off = i * adev->vcn.reg_count;
1594 		/* mmUVD_POWER_STATUS is always readable and is the first in reg_list */
1595 		adev->vcn.ip_dump[inst_off] =
1596 			RREG32(SOC15_REG_ENTRY_OFFSET_INST(adev->vcn.reg_list[0], i));
1597 		is_powered = (adev->vcn.ip_dump[inst_off] &
1598 			      UVD_POWER_STATUS__UVD_POWER_STATUS_TILES_OFF) !=
1599 			      UVD_POWER_STATUS__UVD_POWER_STATUS_TILES_OFF;
1600 
1601 		if (is_powered)
1602 			for (j = 1; j < adev->vcn.reg_count; j++)
1603 				adev->vcn.ip_dump[inst_off + j] =
1604 				RREG32(SOC15_REG_ENTRY_OFFSET_INST(adev->vcn.reg_list[j], i));
1605 	}
1606 }
1607 
1608 void amdgpu_vcn_print_ip_state(struct amdgpu_ip_block *ip_block, struct drm_printer *p)
1609 {
1610 	struct amdgpu_device *adev = ip_block->adev;
1611 	int i, j;
1612 	bool is_powered;
1613 	u32 inst_off;
1614 
1615 	if (!adev->vcn.ip_dump)
1616 		return;
1617 
1618 	drm_printf(p, "num_instances:%d\n", adev->vcn.num_vcn_inst);
1619 	for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
1620 		if (adev->vcn.harvest_config & (1 << i)) {
1621 			drm_printf(p, "\nHarvested Instance:VCN%d Skipping dump\n", i);
1622 			continue;
1623 		}
1624 
1625 		inst_off = i * adev->vcn.reg_count;
1626 		is_powered = (adev->vcn.ip_dump[inst_off] &
1627 			      UVD_POWER_STATUS__UVD_POWER_STATUS_TILES_OFF) !=
1628 			      UVD_POWER_STATUS__UVD_POWER_STATUS_TILES_OFF;
1629 
1630 		if (is_powered) {
1631 			drm_printf(p, "\nActive Instance:VCN%d\n", i);
1632 			for (j = 0; j < adev->vcn.reg_count; j++)
1633 				drm_printf(p, "%-50s \t 0x%08x\n", adev->vcn.reg_list[j].reg_name,
1634 					   adev->vcn.ip_dump[inst_off + j]);
1635 		} else {
1636 			drm_printf(p, "\nInactive Instance:VCN%d\n", i);
1637 		}
1638 	}
1639 }
1640