xref: /linux/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c (revision 92c4c9fdc838d3b41a996bb700ea64b9e78fc7ea)
1 /*
2  * Copyright 2016-2024 Advanced Micro Devices, Inc.
3  * All Rights Reserved.
4  *
5  * Permission is hereby granted, free of charge, to any person obtaining a
6  * copy of this software and associated documentation files (the
7  * "Software"), to deal in the Software without restriction, including
8  * without limitation the rights to use, copy, modify, merge, publish,
9  * distribute, sub license, and/or sell copies of the Software, and to
10  * permit persons to whom the Software is furnished to do so, subject to
11  * the following conditions:
12  *
13  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
16  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
17  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
18  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
19  * USE OR OTHER DEALINGS IN THE SOFTWARE.
20  *
21  * The above copyright notice and this permission notice (including the
22  * next paragraph) shall be included in all copies or substantial portions
23  * of the Software.
24  *
25  */
26 
27 #include <linux/firmware.h>
28 #include <linux/module.h>
29 #include <linux/dmi.h>
30 #include <linux/pci.h>
31 #include <linux/debugfs.h>
32 #include <drm/drm_drv.h>
33 
34 #include "amdgpu.h"
35 #include "amdgpu_pm.h"
36 #include "amdgpu_vcn.h"
37 #include "amdgpu_reset.h"
38 #include "soc15d.h"
39 
40 /* Firmware Names */
41 #define FIRMWARE_RAVEN			"amdgpu/raven_vcn.bin"
42 #define FIRMWARE_PICASSO		"amdgpu/picasso_vcn.bin"
43 #define FIRMWARE_RAVEN2			"amdgpu/raven2_vcn.bin"
44 #define FIRMWARE_ARCTURUS		"amdgpu/arcturus_vcn.bin"
45 #define FIRMWARE_RENOIR			"amdgpu/renoir_vcn.bin"
46 #define FIRMWARE_GREEN_SARDINE		"amdgpu/green_sardine_vcn.bin"
47 #define FIRMWARE_NAVI10			"amdgpu/navi10_vcn.bin"
48 #define FIRMWARE_NAVI14			"amdgpu/navi14_vcn.bin"
49 #define FIRMWARE_NAVI12			"amdgpu/navi12_vcn.bin"
50 #define FIRMWARE_SIENNA_CICHLID		"amdgpu/sienna_cichlid_vcn.bin"
51 #define FIRMWARE_NAVY_FLOUNDER		"amdgpu/navy_flounder_vcn.bin"
52 #define FIRMWARE_VANGOGH		"amdgpu/vangogh_vcn.bin"
53 #define FIRMWARE_DIMGREY_CAVEFISH	"amdgpu/dimgrey_cavefish_vcn.bin"
54 #define FIRMWARE_ALDEBARAN		"amdgpu/aldebaran_vcn.bin"
55 #define FIRMWARE_BEIGE_GOBY		"amdgpu/beige_goby_vcn.bin"
56 #define FIRMWARE_YELLOW_CARP		"amdgpu/yellow_carp_vcn.bin"
57 #define FIRMWARE_VCN_3_1_2		"amdgpu/vcn_3_1_2.bin"
58 #define FIRMWARE_VCN4_0_0		"amdgpu/vcn_4_0_0.bin"
59 #define FIRMWARE_VCN4_0_2		"amdgpu/vcn_4_0_2.bin"
60 #define FIRMWARE_VCN4_0_3		"amdgpu/vcn_4_0_3.bin"
61 #define FIRMWARE_VCN4_0_4		"amdgpu/vcn_4_0_4.bin"
62 #define FIRMWARE_VCN4_0_5		"amdgpu/vcn_4_0_5.bin"
63 #define FIRMWARE_VCN4_0_6		"amdgpu/vcn_4_0_6.bin"
64 #define FIRMWARE_VCN4_0_6_1		"amdgpu/vcn_4_0_6_1.bin"
65 #define FIRMWARE_VCN5_0_0		"amdgpu/vcn_5_0_0.bin"
66 #define FIRMWARE_VCN5_0_1		"amdgpu/vcn_5_0_1.bin"
67 #define FIRMWARE_VCN5_0_2		"amdgpu/vcn_5_0_2.bin"
68 #define FIRMWARE_VCN5_3_0		"amdgpu/vcn_5_3_0.bin"
69 
70 MODULE_FIRMWARE(FIRMWARE_RAVEN);
71 MODULE_FIRMWARE(FIRMWARE_PICASSO);
72 MODULE_FIRMWARE(FIRMWARE_RAVEN2);
73 MODULE_FIRMWARE(FIRMWARE_ARCTURUS);
74 MODULE_FIRMWARE(FIRMWARE_RENOIR);
75 MODULE_FIRMWARE(FIRMWARE_GREEN_SARDINE);
76 MODULE_FIRMWARE(FIRMWARE_ALDEBARAN);
77 MODULE_FIRMWARE(FIRMWARE_NAVI10);
78 MODULE_FIRMWARE(FIRMWARE_NAVI14);
79 MODULE_FIRMWARE(FIRMWARE_NAVI12);
80 MODULE_FIRMWARE(FIRMWARE_SIENNA_CICHLID);
81 MODULE_FIRMWARE(FIRMWARE_NAVY_FLOUNDER);
82 MODULE_FIRMWARE(FIRMWARE_VANGOGH);
83 MODULE_FIRMWARE(FIRMWARE_DIMGREY_CAVEFISH);
84 MODULE_FIRMWARE(FIRMWARE_BEIGE_GOBY);
85 MODULE_FIRMWARE(FIRMWARE_YELLOW_CARP);
86 MODULE_FIRMWARE(FIRMWARE_VCN_3_1_2);
87 MODULE_FIRMWARE(FIRMWARE_VCN4_0_0);
88 MODULE_FIRMWARE(FIRMWARE_VCN4_0_2);
89 MODULE_FIRMWARE(FIRMWARE_VCN4_0_3);
90 MODULE_FIRMWARE(FIRMWARE_VCN4_0_4);
91 MODULE_FIRMWARE(FIRMWARE_VCN4_0_5);
92 MODULE_FIRMWARE(FIRMWARE_VCN4_0_6);
93 MODULE_FIRMWARE(FIRMWARE_VCN4_0_6_1);
94 MODULE_FIRMWARE(FIRMWARE_VCN5_0_0);
95 MODULE_FIRMWARE(FIRMWARE_VCN5_0_1);
96 MODULE_FIRMWARE(FIRMWARE_VCN5_0_2);
97 MODULE_FIRMWARE(FIRMWARE_VCN5_3_0);
98 
99 static void amdgpu_vcn_idle_work_handler(struct work_struct *work);
100 static void amdgpu_vcn_reg_dump_fini(struct amdgpu_device *adev);
101 
amdgpu_vcn_early_init(struct amdgpu_device * adev,int i)102 int amdgpu_vcn_early_init(struct amdgpu_device *adev, int i)
103 {
104 	char ucode_prefix[25];
105 	int r;
106 
107 	adev->vcn.inst[i].adev = adev;
108 	adev->vcn.inst[i].inst = i;
109 	amdgpu_ucode_ip_version_decode(adev, UVD_HWIP, ucode_prefix, sizeof(ucode_prefix));
110 
111 	if (i != 0 && adev->vcn.per_inst_fw) {
112 		r = amdgpu_ucode_request(adev, &adev->vcn.inst[i].fw,
113 					 AMDGPU_UCODE_REQUIRED,
114 					 "amdgpu/%s_%d.bin", ucode_prefix, i);
115 		if (r)
116 			amdgpu_ucode_release(&adev->vcn.inst[i].fw);
117 	} else {
118 		if (!adev->vcn.inst[0].fw) {
119 			r = amdgpu_ucode_request(adev, &adev->vcn.inst[0].fw,
120 						 AMDGPU_UCODE_REQUIRED,
121 						 "amdgpu/%s.bin", ucode_prefix);
122 			if (r)
123 				amdgpu_ucode_release(&adev->vcn.inst[0].fw);
124 		} else {
125 			r = 0;
126 		}
127 		adev->vcn.inst[i].fw = adev->vcn.inst[0].fw;
128 	}
129 
130 	return r;
131 }
132 
amdgpu_vcn_sw_init(struct amdgpu_device * adev,int i)133 int amdgpu_vcn_sw_init(struct amdgpu_device *adev, int i)
134 {
135 	unsigned long bo_size;
136 	const struct common_firmware_header *hdr;
137 	unsigned char fw_check;
138 	unsigned int fw_shared_size, log_offset;
139 	int r;
140 
141 	mutex_init(&adev->vcn.inst[i].vcn1_jpeg1_workaround);
142 	mutex_init(&adev->vcn.inst[i].vcn_pg_lock);
143 	mutex_init(&adev->vcn.inst[i].engine_reset_mutex);
144 	atomic_set(&adev->vcn.inst[i].total_submission_cnt, 0);
145 	INIT_DELAYED_WORK(&adev->vcn.inst[i].idle_work, amdgpu_vcn_idle_work_handler);
146 	atomic_set(&adev->vcn.inst[i].dpg_enc_submission_cnt, 0);
147 	if ((adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) &&
148 	    (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG))
149 		adev->vcn.inst[i].indirect_sram = true;
150 
151 	/*
152 	 * Some Steam Deck's BIOS versions are incompatible with the
153 	 * indirect SRAM mode, leading to amdgpu being unable to get
154 	 * properly probed (and even potentially crashing the kernel).
155 	 * Hence, check for these versions here - notice this is
156 	 * restricted to Vangogh (Deck's APU).
157 	 */
158 	if (amdgpu_ip_version(adev, UVD_HWIP, 0) == IP_VERSION(3, 0, 2)) {
159 		const char *bios_ver = dmi_get_system_info(DMI_BIOS_VERSION);
160 
161 		if (bios_ver && (!strncmp("F7A0113", bios_ver, 7) ||
162 				 !strncmp("F7A0114", bios_ver, 7))) {
163 			adev->vcn.inst[i].indirect_sram = false;
164 			dev_info(adev->dev,
165 				 "Steam Deck quirk: indirect SRAM disabled on BIOS %s\n", bios_ver);
166 		}
167 	}
168 
169 	/* from vcn4 and above, only unified queue is used */
170 	adev->vcn.inst[i].using_unified_queue =
171 		amdgpu_ip_version(adev, UVD_HWIP, 0) >= IP_VERSION(4, 0, 0);
172 
173 	hdr = (const struct common_firmware_header *)adev->vcn.inst[i].fw->data;
174 	adev->vcn.inst[i].fw_version = le32_to_cpu(hdr->ucode_version);
175 	adev->vcn.fw_version = le32_to_cpu(hdr->ucode_version);
176 
177 	/* Bit 20-23, it is encode major and non-zero for new naming convention.
178 	 * This field is part of version minor and DRM_DISABLED_FLAG in old naming
179 	 * convention. Since the l:wq!atest version minor is 0x5B and DRM_DISABLED_FLAG
180 	 * is zero in old naming convention, this field is always zero so far.
181 	 * These four bits are used to tell which naming convention is present.
182 	 */
183 	fw_check = (le32_to_cpu(hdr->ucode_version) >> 20) & 0xf;
184 	if (fw_check) {
185 		unsigned int dec_ver, enc_major, enc_minor, vep, fw_rev;
186 
187 		fw_rev = le32_to_cpu(hdr->ucode_version) & 0xfff;
188 		enc_minor = (le32_to_cpu(hdr->ucode_version) >> 12) & 0xff;
189 		enc_major = fw_check;
190 		dec_ver = (le32_to_cpu(hdr->ucode_version) >> 24) & 0xf;
191 		vep = (le32_to_cpu(hdr->ucode_version) >> 28) & 0xf;
192 		dev_info(adev->dev,
193 			 "[VCN instance %d] Found VCN firmware Version ENC: %u.%u DEC: %u VEP: %u Revision: %u\n",
194 			 i, enc_major, enc_minor, dec_ver, vep, fw_rev);
195 	} else {
196 		unsigned int version_major, version_minor, family_id;
197 
198 		family_id = le32_to_cpu(hdr->ucode_version) & 0xff;
199 		version_major = (le32_to_cpu(hdr->ucode_version) >> 24) & 0xff;
200 		version_minor = (le32_to_cpu(hdr->ucode_version) >> 8) & 0xff;
201 		dev_info(adev->dev, "[VCN instance %d] Found VCN firmware Version: %u.%u Family ID: %u\n",
202 			 i, version_major, version_minor, family_id);
203 	}
204 
205 	bo_size = AMDGPU_VCN_STACK_SIZE + AMDGPU_VCN_CONTEXT_SIZE;
206 	if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP)
207 		bo_size += AMDGPU_GPU_PAGE_ALIGN(le32_to_cpu(hdr->ucode_size_bytes) + 8);
208 
209 	if (amdgpu_ip_version(adev, UVD_HWIP, 0) >= IP_VERSION(5, 0, 0)) {
210 		fw_shared_size = AMDGPU_GPU_PAGE_ALIGN(sizeof(struct amdgpu_vcn5_fw_shared));
211 		log_offset = offsetof(struct amdgpu_vcn5_fw_shared, fw_log);
212 	} else if (amdgpu_ip_version(adev, UVD_HWIP, 0) >= IP_VERSION(4, 0, 0)) {
213 		fw_shared_size = AMDGPU_GPU_PAGE_ALIGN(sizeof(struct amdgpu_vcn4_fw_shared));
214 		log_offset = offsetof(struct amdgpu_vcn4_fw_shared, fw_log);
215 	} else {
216 		fw_shared_size = AMDGPU_GPU_PAGE_ALIGN(sizeof(struct amdgpu_fw_shared));
217 		log_offset = offsetof(struct amdgpu_fw_shared, fw_log);
218 	}
219 
220 	bo_size += fw_shared_size;
221 
222 	if (amdgpu_vcnfw_log)
223 		bo_size += AMDGPU_VCNFW_LOG_SIZE;
224 
225 	r = amdgpu_bo_create_kernel(adev, bo_size, PAGE_SIZE,
226 				    AMDGPU_GEM_DOMAIN_VRAM |
227 				    AMDGPU_GEM_DOMAIN_GTT,
228 				    &adev->vcn.inst[i].vcpu_bo,
229 				    &adev->vcn.inst[i].gpu_addr,
230 				    &adev->vcn.inst[i].cpu_addr);
231 	if (r) {
232 		dev_err(adev->dev, "(%d) failed to allocate vcn bo\n", r);
233 		return r;
234 	}
235 
236 	adev->vcn.inst[i].fw_shared.cpu_addr = adev->vcn.inst[i].cpu_addr +
237 		bo_size - fw_shared_size;
238 	adev->vcn.inst[i].fw_shared.gpu_addr = adev->vcn.inst[i].gpu_addr +
239 		bo_size - fw_shared_size;
240 
241 	adev->vcn.inst[i].fw_shared.mem_size = fw_shared_size;
242 
243 	if (amdgpu_vcnfw_log) {
244 		adev->vcn.inst[i].fw_shared.cpu_addr -= AMDGPU_VCNFW_LOG_SIZE;
245 		adev->vcn.inst[i].fw_shared.gpu_addr -= AMDGPU_VCNFW_LOG_SIZE;
246 		adev->vcn.inst[i].fw_shared.log_offset = log_offset;
247 	}
248 
249 	if (adev->vcn.inst[i].indirect_sram) {
250 		r = amdgpu_bo_create_kernel(adev, 64 * 2 * 4, PAGE_SIZE,
251 					    AMDGPU_GEM_DOMAIN_VRAM |
252 					    AMDGPU_GEM_DOMAIN_GTT,
253 					    &adev->vcn.inst[i].dpg_sram_bo,
254 					    &adev->vcn.inst[i].dpg_sram_gpu_addr,
255 					    &adev->vcn.inst[i].dpg_sram_cpu_addr);
256 		if (r) {
257 			dev_err(adev->dev, "VCN %d (%d) failed to allocate DPG bo\n", i, r);
258 			return r;
259 		}
260 	}
261 
262 	return 0;
263 }
264 
amdgpu_vcn_sw_fini(struct amdgpu_device * adev,int i)265 void amdgpu_vcn_sw_fini(struct amdgpu_device *adev, int i)
266 {
267 	int j;
268 
269 	if (adev->vcn.harvest_config & (1 << i))
270 		return;
271 
272 	amdgpu_bo_free_kernel(
273 		&adev->vcn.inst[i].dpg_sram_bo,
274 		&adev->vcn.inst[i].dpg_sram_gpu_addr,
275 		(void **)&adev->vcn.inst[i].dpg_sram_cpu_addr);
276 
277 	kvfree(adev->vcn.inst[i].saved_bo);
278 
279 	amdgpu_bo_free_kernel(&adev->vcn.inst[i].vcpu_bo,
280 			      &adev->vcn.inst[i].gpu_addr,
281 			      (void **)&adev->vcn.inst[i].cpu_addr);
282 
283 	amdgpu_ring_fini(&adev->vcn.inst[i].ring_dec);
284 
285 	for (j = 0; j < adev->vcn.inst[i].num_enc_rings; ++j)
286 		amdgpu_ring_fini(&adev->vcn.inst[i].ring_enc[j]);
287 
288 	if (adev->vcn.per_inst_fw) {
289 		amdgpu_ucode_release(&adev->vcn.inst[i].fw);
290 	} else {
291 		amdgpu_ucode_release(&adev->vcn.inst[0].fw);
292 		adev->vcn.inst[i].fw = NULL;
293 	}
294 
295 	if (adev->vcn.reg_list)
296 		amdgpu_vcn_reg_dump_fini(adev);
297 
298 	mutex_destroy(&adev->vcn.inst[i].vcn_pg_lock);
299 	mutex_destroy(&adev->vcn.inst[i].vcn1_jpeg1_workaround);
300 }
301 
amdgpu_vcn_is_disabled_vcn(struct amdgpu_device * adev,enum vcn_ring_type type,uint32_t vcn_instance)302 bool amdgpu_vcn_is_disabled_vcn(struct amdgpu_device *adev, enum vcn_ring_type type, uint32_t vcn_instance)
303 {
304 	bool ret = false;
305 	int vcn_config = adev->vcn.inst[vcn_instance].vcn_config;
306 
307 	if ((type == VCN_ENCODE_RING) && (vcn_config & VCN_BLOCK_ENCODE_DISABLE_MASK))
308 		ret = true;
309 	else if ((type == VCN_DECODE_RING) && (vcn_config & VCN_BLOCK_DECODE_DISABLE_MASK))
310 		ret = true;
311 	else if ((type == VCN_UNIFIED_RING) && (vcn_config & VCN_BLOCK_QUEUE_DISABLE_MASK))
312 		ret = true;
313 
314 	return ret;
315 }
316 
amdgpu_vcn_save_vcpu_bo_inst(struct amdgpu_device * adev,int i)317 static int amdgpu_vcn_save_vcpu_bo_inst(struct amdgpu_device *adev, int i)
318 {
319 	unsigned int size;
320 	void *ptr;
321 	int idx;
322 
323 	if (adev->vcn.harvest_config & (1 << i))
324 		return 0;
325 	if (adev->vcn.inst[i].vcpu_bo == NULL)
326 		return 0;
327 
328 	size = amdgpu_bo_size(adev->vcn.inst[i].vcpu_bo);
329 	ptr = adev->vcn.inst[i].cpu_addr;
330 
331 	adev->vcn.inst[i].saved_bo = kvmalloc(size, GFP_KERNEL);
332 	if (!adev->vcn.inst[i].saved_bo)
333 		return -ENOMEM;
334 
335 	if (drm_dev_enter(adev_to_drm(adev), &idx)) {
336 		memcpy_fromio(adev->vcn.inst[i].saved_bo, ptr, size);
337 		drm_dev_exit(idx);
338 	}
339 
340 	return 0;
341 }
342 
amdgpu_vcn_save_vcpu_bo(struct amdgpu_device * adev)343 int amdgpu_vcn_save_vcpu_bo(struct amdgpu_device *adev)
344 {
345 	int ret, i;
346 
347 	for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
348 		ret = amdgpu_vcn_save_vcpu_bo_inst(adev, i);
349 		if (ret)
350 			return ret;
351 	}
352 
353 	return 0;
354 }
355 
amdgpu_vcn_suspend(struct amdgpu_device * adev,int i)356 int amdgpu_vcn_suspend(struct amdgpu_device *adev, int i)
357 {
358 	bool in_ras_intr = amdgpu_ras_intr_triggered();
359 
360 	if (adev->vcn.harvest_config & (1 << i))
361 		return 0;
362 
363 	/* err_event_athub and dpc recovery will corrupt VCPU buffer, so we need to
364 	 * restore fw data and clear buffer in amdgpu_vcn_resume() */
365 	if (in_ras_intr || amdgpu_reset_in_dpc(adev))
366 		return 0;
367 
368 	return amdgpu_vcn_save_vcpu_bo_inst(adev, i);
369 }
370 
amdgpu_vcn_resume(struct amdgpu_device * adev,int i)371 int amdgpu_vcn_resume(struct amdgpu_device *adev, int i)
372 {
373 	unsigned int size;
374 	void *ptr;
375 	int idx;
376 
377 	if (adev->vcn.harvest_config & (1 << i))
378 		return 0;
379 	if (adev->vcn.inst[i].vcpu_bo == NULL)
380 		return -EINVAL;
381 
382 	size = amdgpu_bo_size(adev->vcn.inst[i].vcpu_bo);
383 	ptr = adev->vcn.inst[i].cpu_addr;
384 
385 	if (adev->vcn.inst[i].saved_bo != NULL) {
386 		if (drm_dev_enter(adev_to_drm(adev), &idx)) {
387 			memcpy_toio(ptr, adev->vcn.inst[i].saved_bo, size);
388 			drm_dev_exit(idx);
389 		}
390 		kvfree(adev->vcn.inst[i].saved_bo);
391 		adev->vcn.inst[i].saved_bo = NULL;
392 	} else {
393 		const struct common_firmware_header *hdr;
394 		unsigned int offset;
395 
396 		hdr = (const struct common_firmware_header *)adev->vcn.inst[i].fw->data;
397 		if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
398 			offset = le32_to_cpu(hdr->ucode_array_offset_bytes);
399 			if (drm_dev_enter(adev_to_drm(adev), &idx)) {
400 				memcpy_toio(adev->vcn.inst[i].cpu_addr,
401 					    adev->vcn.inst[i].fw->data + offset,
402 					    le32_to_cpu(hdr->ucode_size_bytes));
403 				drm_dev_exit(idx);
404 			}
405 			size -= le32_to_cpu(hdr->ucode_size_bytes);
406 			ptr += le32_to_cpu(hdr->ucode_size_bytes);
407 		}
408 		memset_io(ptr, 0, size);
409 	}
410 
411 	return 0;
412 }
413 
amdgpu_vcn_get_profile(struct amdgpu_device * adev)414 void amdgpu_vcn_get_profile(struct amdgpu_device *adev)
415 {
416 	int r;
417 
418 	mutex_lock(&adev->vcn.workload_profile_mutex);
419 
420 	if (adev->vcn.workload_profile_active) {
421 		mutex_unlock(&adev->vcn.workload_profile_mutex);
422 		return;
423 	}
424 	r = amdgpu_dpm_switch_power_profile(adev, PP_SMC_POWER_PROFILE_VIDEO,
425 					    true);
426 	if (r)
427 		dev_warn(adev->dev,
428 			 "(%d) failed to enable video power profile mode\n", r);
429 	else
430 		adev->vcn.workload_profile_active = true;
431 	mutex_unlock(&adev->vcn.workload_profile_mutex);
432 }
433 
amdgpu_vcn_put_profile(struct amdgpu_device * adev)434 void amdgpu_vcn_put_profile(struct amdgpu_device *adev)
435 {
436 	bool pg = true;
437 	int r, i;
438 
439 	mutex_lock(&adev->vcn.workload_profile_mutex);
440 	for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
441 		if (adev->vcn.inst[i].cur_state != AMD_PG_STATE_GATE) {
442 			pg = false;
443 			break;
444 		}
445 	}
446 
447 	if (pg) {
448 		r = amdgpu_dpm_switch_power_profile(
449 			adev, PP_SMC_POWER_PROFILE_VIDEO, false);
450 		if (r)
451 			dev_warn(
452 				adev->dev,
453 				"(%d) failed to disable video power profile mode\n",
454 				r);
455 		else
456 			adev->vcn.workload_profile_active = false;
457 	}
458 
459 	mutex_unlock(&adev->vcn.workload_profile_mutex);
460 }
461 
amdgpu_vcn_idle_work_handler(struct work_struct * work)462 static void amdgpu_vcn_idle_work_handler(struct work_struct *work)
463 {
464 	struct amdgpu_vcn_inst *vcn_inst =
465 		container_of(work, struct amdgpu_vcn_inst, idle_work.work);
466 	struct amdgpu_device *adev = vcn_inst->adev;
467 	unsigned int fences = 0, fence[AMDGPU_MAX_VCN_INSTANCES] = {0};
468 	unsigned int i = vcn_inst->inst, j;
469 
470 	if (adev->vcn.harvest_config & (1 << i))
471 		return;
472 
473 	for (j = 0; j < adev->vcn.inst[i].num_enc_rings; ++j)
474 		fence[i] += amdgpu_fence_count_emitted(&vcn_inst->ring_enc[j]);
475 
476 	/* Only set DPG pause for VCN3 or below, VCN4 and above will be handled by FW */
477 	if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG &&
478 	    !adev->vcn.inst[i].using_unified_queue) {
479 		struct dpg_pause_state new_state;
480 
481 		if (fence[i] ||
482 		    unlikely(atomic_read(&vcn_inst->dpg_enc_submission_cnt)))
483 			new_state.fw_based = VCN_DPG_STATE__PAUSE;
484 		else
485 			new_state.fw_based = VCN_DPG_STATE__UNPAUSE;
486 
487 		adev->vcn.inst[i].pause_dpg_mode(vcn_inst, &new_state);
488 	}
489 
490 	fence[i] += amdgpu_fence_count_emitted(&vcn_inst->ring_dec);
491 	fences += fence[i];
492 
493 	if (!fences && !atomic_read(&vcn_inst->total_submission_cnt)) {
494 		mutex_lock(&vcn_inst->vcn_pg_lock);
495 		vcn_inst->set_pg_state(vcn_inst, AMD_PG_STATE_GATE);
496 		mutex_unlock(&vcn_inst->vcn_pg_lock);
497 		amdgpu_vcn_put_profile(adev);
498 
499 	} else {
500 		schedule_delayed_work(&vcn_inst->idle_work, VCN_IDLE_TIMEOUT);
501 	}
502 }
503 
amdgpu_vcn_ring_begin_use(struct amdgpu_ring * ring)504 void amdgpu_vcn_ring_begin_use(struct amdgpu_ring *ring)
505 {
506 	struct amdgpu_device *adev = ring->adev;
507 	struct amdgpu_vcn_inst *vcn_inst = &adev->vcn.inst[ring->me];
508 
509 	atomic_inc(&vcn_inst->total_submission_cnt);
510 
511 	cancel_delayed_work_sync(&vcn_inst->idle_work);
512 
513 	mutex_lock(&vcn_inst->vcn_pg_lock);
514 	vcn_inst->set_pg_state(vcn_inst, AMD_PG_STATE_UNGATE);
515 
516 	/* Only set DPG pause for VCN3 or below, VCN4 and above will be handled by FW */
517 	if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG &&
518 	    !vcn_inst->using_unified_queue) {
519 		struct dpg_pause_state new_state;
520 
521 		if (ring->funcs->type == AMDGPU_RING_TYPE_VCN_ENC) {
522 			atomic_inc(&vcn_inst->dpg_enc_submission_cnt);
523 			new_state.fw_based = VCN_DPG_STATE__PAUSE;
524 		} else {
525 			unsigned int fences = 0;
526 			unsigned int i;
527 
528 			for (i = 0; i < vcn_inst->num_enc_rings; ++i)
529 				fences += amdgpu_fence_count_emitted(&vcn_inst->ring_enc[i]);
530 
531 			if (fences || atomic_read(&vcn_inst->dpg_enc_submission_cnt))
532 				new_state.fw_based = VCN_DPG_STATE__PAUSE;
533 			else
534 				new_state.fw_based = VCN_DPG_STATE__UNPAUSE;
535 		}
536 
537 		vcn_inst->pause_dpg_mode(vcn_inst, &new_state);
538 	}
539 	mutex_unlock(&vcn_inst->vcn_pg_lock);
540 	amdgpu_vcn_get_profile(adev);
541 }
542 
amdgpu_vcn_ring_end_use(struct amdgpu_ring * ring)543 void amdgpu_vcn_ring_end_use(struct amdgpu_ring *ring)
544 {
545 	struct amdgpu_device *adev = ring->adev;
546 
547 	/* Only set DPG pause for VCN3 or below, VCN4 and above will be handled by FW */
548 	if (ring->adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG &&
549 	    ring->funcs->type == AMDGPU_RING_TYPE_VCN_ENC &&
550 	    !adev->vcn.inst[ring->me].using_unified_queue)
551 		atomic_dec(&ring->adev->vcn.inst[ring->me].dpg_enc_submission_cnt);
552 
553 	atomic_dec(&ring->adev->vcn.inst[ring->me].total_submission_cnt);
554 
555 	schedule_delayed_work(&ring->adev->vcn.inst[ring->me].idle_work,
556 			      VCN_IDLE_TIMEOUT);
557 }
558 
amdgpu_vcn_dec_ring_test_ring(struct amdgpu_ring * ring)559 int amdgpu_vcn_dec_ring_test_ring(struct amdgpu_ring *ring)
560 {
561 	struct amdgpu_device *adev = ring->adev;
562 	uint32_t tmp = 0;
563 	unsigned int i;
564 	int r;
565 
566 	/* VCN in SRIOV does not support direct register read/write */
567 	if (amdgpu_sriov_vf(adev))
568 		return 0;
569 
570 	WREG32(adev->vcn.inst[ring->me].external.scratch9, 0xCAFEDEAD);
571 	r = amdgpu_ring_alloc(ring, 3);
572 	if (r)
573 		return r;
574 	amdgpu_ring_write(ring, PACKET0(adev->vcn.inst[ring->me].internal.scratch9, 0));
575 	amdgpu_ring_write(ring, 0xDEADBEEF);
576 	amdgpu_ring_commit(ring);
577 	for (i = 0; i < adev->usec_timeout; i++) {
578 		tmp = RREG32(adev->vcn.inst[ring->me].external.scratch9);
579 		if (tmp == 0xDEADBEEF)
580 			break;
581 		udelay(1);
582 	}
583 
584 	if (i >= adev->usec_timeout)
585 		r = -ETIMEDOUT;
586 
587 	return r;
588 }
589 
amdgpu_vcn_dec_sw_ring_test_ring(struct amdgpu_ring * ring)590 int amdgpu_vcn_dec_sw_ring_test_ring(struct amdgpu_ring *ring)
591 {
592 	struct amdgpu_device *adev = ring->adev;
593 	uint32_t rptr;
594 	unsigned int i;
595 	int r;
596 
597 	if (amdgpu_sriov_vf(adev))
598 		return 0;
599 
600 	r = amdgpu_ring_alloc(ring, 16);
601 	if (r)
602 		return r;
603 
604 	rptr = amdgpu_ring_get_rptr(ring);
605 
606 	amdgpu_ring_write(ring, VCN_DEC_SW_CMD_END);
607 	amdgpu_ring_commit(ring);
608 
609 	for (i = 0; i < adev->usec_timeout; i++) {
610 		if (amdgpu_ring_get_rptr(ring) != rptr)
611 			break;
612 		udelay(1);
613 	}
614 
615 	if (i >= adev->usec_timeout)
616 		r = -ETIMEDOUT;
617 
618 	return r;
619 }
620 
amdgpu_vcn_dec_send_msg(struct amdgpu_ring * ring,struct amdgpu_ib * ib_msg,struct dma_fence ** fence)621 static int amdgpu_vcn_dec_send_msg(struct amdgpu_ring *ring,
622 				   struct amdgpu_ib *ib_msg,
623 				   struct dma_fence **fence)
624 {
625 	u64 addr = AMDGPU_GPU_PAGE_ALIGN(ib_msg->gpu_addr);
626 	struct amdgpu_device *adev = ring->adev;
627 	struct dma_fence *f = NULL;
628 	struct amdgpu_job *job;
629 	struct amdgpu_ib *ib;
630 	int i, r;
631 
632 	r = amdgpu_job_alloc_with_ib(ring->adev, NULL, NULL,
633 				     64, AMDGPU_IB_POOL_DIRECT,
634 				     &job, AMDGPU_KERNEL_JOB_ID_VCN_RING_TEST);
635 	if (r)
636 		goto err;
637 
638 	ib = &job->ibs[0];
639 	ib->ptr[0] = PACKET0(adev->vcn.inst[ring->me].internal.data0, 0);
640 	ib->ptr[1] = addr;
641 	ib->ptr[2] = PACKET0(adev->vcn.inst[ring->me].internal.data1, 0);
642 	ib->ptr[3] = addr >> 32;
643 	ib->ptr[4] = PACKET0(adev->vcn.inst[ring->me].internal.cmd, 0);
644 	ib->ptr[5] = 0;
645 	for (i = 6; i < 16; i += 2) {
646 		ib->ptr[i] = PACKET0(adev->vcn.inst[ring->me].internal.nop, 0);
647 		ib->ptr[i+1] = 0;
648 	}
649 	ib->length_dw = 16;
650 
651 	r = amdgpu_job_submit_direct(job, ring, &f);
652 	if (r)
653 		goto err_free;
654 
655 	amdgpu_ib_free(ib_msg, f);
656 
657 	if (fence)
658 		*fence = dma_fence_get(f);
659 	dma_fence_put(f);
660 
661 	return 0;
662 
663 err_free:
664 	amdgpu_job_free(job);
665 err:
666 	amdgpu_ib_free(ib_msg, f);
667 	return r;
668 }
669 
amdgpu_vcn_dec_get_create_msg(struct amdgpu_ring * ring,uint32_t handle,struct amdgpu_ib * ib)670 static int amdgpu_vcn_dec_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
671 		struct amdgpu_ib *ib)
672 {
673 	struct amdgpu_device *adev = ring->adev;
674 	uint32_t *msg;
675 	int r, i;
676 
677 	memset(ib, 0, sizeof(*ib));
678 	r = amdgpu_ib_get(adev, NULL, AMDGPU_GPU_PAGE_SIZE * 2,
679 			AMDGPU_IB_POOL_DIRECT,
680 			ib);
681 	if (r)
682 		return r;
683 
684 	msg = (uint32_t *)AMDGPU_GPU_PAGE_ALIGN((unsigned long)ib->ptr);
685 	msg[0] = cpu_to_le32(0x00000028);
686 	msg[1] = cpu_to_le32(0x00000038);
687 	msg[2] = cpu_to_le32(0x00000001);
688 	msg[3] = cpu_to_le32(0x00000000);
689 	msg[4] = cpu_to_le32(handle);
690 	msg[5] = cpu_to_le32(0x00000000);
691 	msg[6] = cpu_to_le32(0x00000001);
692 	msg[7] = cpu_to_le32(0x00000028);
693 	msg[8] = cpu_to_le32(0x00000010);
694 	msg[9] = cpu_to_le32(0x00000000);
695 	msg[10] = cpu_to_le32(0x00000007);
696 	msg[11] = cpu_to_le32(0x00000000);
697 	msg[12] = cpu_to_le32(0x00000780);
698 	msg[13] = cpu_to_le32(0x00000440);
699 	for (i = 14; i < 1024; ++i)
700 		msg[i] = cpu_to_le32(0x0);
701 
702 	return 0;
703 }
704 
amdgpu_vcn_dec_get_destroy_msg(struct amdgpu_ring * ring,uint32_t handle,struct amdgpu_ib * ib)705 static int amdgpu_vcn_dec_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
706 					  struct amdgpu_ib *ib)
707 {
708 	struct amdgpu_device *adev = ring->adev;
709 	uint32_t *msg;
710 	int r, i;
711 
712 	memset(ib, 0, sizeof(*ib));
713 	r = amdgpu_ib_get(adev, NULL, AMDGPU_GPU_PAGE_SIZE * 2,
714 			AMDGPU_IB_POOL_DIRECT,
715 			ib);
716 	if (r)
717 		return r;
718 
719 	msg = (uint32_t *)AMDGPU_GPU_PAGE_ALIGN((unsigned long)ib->ptr);
720 	msg[0] = cpu_to_le32(0x00000028);
721 	msg[1] = cpu_to_le32(0x00000018);
722 	msg[2] = cpu_to_le32(0x00000000);
723 	msg[3] = cpu_to_le32(0x00000002);
724 	msg[4] = cpu_to_le32(handle);
725 	msg[5] = cpu_to_le32(0x00000000);
726 	for (i = 6; i < 1024; ++i)
727 		msg[i] = cpu_to_le32(0x0);
728 
729 	return 0;
730 }
731 
amdgpu_vcn_dec_ring_test_ib(struct amdgpu_ring * ring,long timeout)732 int amdgpu_vcn_dec_ring_test_ib(struct amdgpu_ring *ring, long timeout)
733 {
734 	struct dma_fence *fence = NULL;
735 	struct amdgpu_ib ib;
736 	long r;
737 
738 	r = amdgpu_vcn_dec_get_create_msg(ring, 1, &ib);
739 	if (r)
740 		goto error;
741 
742 	r = amdgpu_vcn_dec_send_msg(ring, &ib, NULL);
743 	if (r)
744 		goto error;
745 	r = amdgpu_vcn_dec_get_destroy_msg(ring, 1, &ib);
746 	if (r)
747 		goto error;
748 
749 	r = amdgpu_vcn_dec_send_msg(ring, &ib, &fence);
750 	if (r)
751 		goto error;
752 
753 	r = dma_fence_wait_timeout(fence, false, timeout);
754 	if (r == 0)
755 		r = -ETIMEDOUT;
756 	else if (r > 0)
757 		r = 0;
758 
759 	dma_fence_put(fence);
760 error:
761 	return r;
762 }
763 
amdgpu_vcn_unified_ring_ib_header(struct amdgpu_ib * ib,uint32_t ib_pack_in_dw,bool enc)764 static uint32_t *amdgpu_vcn_unified_ring_ib_header(struct amdgpu_ib *ib,
765 						uint32_t ib_pack_in_dw, bool enc)
766 {
767 	uint32_t *ib_checksum;
768 
769 	ib->ptr[ib->length_dw++] = 0x00000010; /* single queue checksum */
770 	ib->ptr[ib->length_dw++] = 0x30000002;
771 	ib_checksum = &ib->ptr[ib->length_dw++];
772 	ib->ptr[ib->length_dw++] = ib_pack_in_dw;
773 
774 	ib->ptr[ib->length_dw++] = 0x00000010; /* engine info */
775 	ib->ptr[ib->length_dw++] = 0x30000001;
776 	ib->ptr[ib->length_dw++] = enc ? 0x2 : 0x3;
777 	ib->ptr[ib->length_dw++] = ib_pack_in_dw * sizeof(uint32_t);
778 
779 	return ib_checksum;
780 }
781 
amdgpu_vcn_unified_ring_ib_checksum(uint32_t ** ib_checksum,uint32_t ib_pack_in_dw)782 static void amdgpu_vcn_unified_ring_ib_checksum(uint32_t **ib_checksum,
783 						uint32_t ib_pack_in_dw)
784 {
785 	uint32_t i;
786 	uint32_t checksum = 0;
787 
788 	for (i = 0; i < ib_pack_in_dw; i++)
789 		checksum += *(*ib_checksum + 2 + i);
790 
791 	**ib_checksum = checksum;
792 }
793 
amdgpu_vcn_dec_sw_send_msg(struct amdgpu_ring * ring,struct amdgpu_ib * ib_msg,struct dma_fence ** fence)794 static int amdgpu_vcn_dec_sw_send_msg(struct amdgpu_ring *ring,
795 				      struct amdgpu_ib *ib_msg,
796 				      struct dma_fence **fence)
797 {
798 	struct amdgpu_vcn_decode_buffer *decode_buffer = NULL;
799 	unsigned int ib_size_dw = 64;
800 	struct amdgpu_device *adev = ring->adev;
801 	struct dma_fence *f = NULL;
802 	struct amdgpu_job *job;
803 	struct amdgpu_ib *ib;
804 	uint64_t addr = AMDGPU_GPU_PAGE_ALIGN(ib_msg->gpu_addr);
805 	uint32_t *ib_checksum;
806 	uint32_t ib_pack_in_dw;
807 	int i, r;
808 
809 	if (adev->vcn.inst[ring->me].using_unified_queue)
810 		ib_size_dw += 8;
811 
812 	r = amdgpu_job_alloc_with_ib(ring->adev, NULL, NULL,
813 				     ib_size_dw * 4, AMDGPU_IB_POOL_DIRECT,
814 				     &job, AMDGPU_KERNEL_JOB_ID_VCN_RING_TEST);
815 	if (r)
816 		goto err;
817 
818 	ib = &job->ibs[0];
819 	ib->length_dw = 0;
820 
821 	/* single queue headers */
822 	if (adev->vcn.inst[ring->me].using_unified_queue) {
823 		ib_pack_in_dw = sizeof(struct amdgpu_vcn_decode_buffer) / sizeof(uint32_t)
824 						+ 4 + 2; /* engine info + decoding ib in dw */
825 		ib_checksum = amdgpu_vcn_unified_ring_ib_header(ib, ib_pack_in_dw, false);
826 	}
827 
828 	ib->ptr[ib->length_dw++] = sizeof(struct amdgpu_vcn_decode_buffer) + 8;
829 	ib->ptr[ib->length_dw++] = cpu_to_le32(AMDGPU_VCN_IB_FLAG_DECODE_BUFFER);
830 	decode_buffer = (struct amdgpu_vcn_decode_buffer *)&(ib->ptr[ib->length_dw]);
831 	ib->length_dw += sizeof(struct amdgpu_vcn_decode_buffer) / 4;
832 	memset(decode_buffer, 0, sizeof(struct amdgpu_vcn_decode_buffer));
833 
834 	decode_buffer->valid_buf_flag |= cpu_to_le32(AMDGPU_VCN_CMD_FLAG_MSG_BUFFER);
835 	decode_buffer->msg_buffer_address_hi = cpu_to_le32(addr >> 32);
836 	decode_buffer->msg_buffer_address_lo = cpu_to_le32(addr);
837 
838 	for (i = ib->length_dw; i < ib_size_dw; ++i)
839 		ib->ptr[i] = 0x0;
840 
841 	if (adev->vcn.inst[ring->me].using_unified_queue)
842 		amdgpu_vcn_unified_ring_ib_checksum(&ib_checksum, ib_pack_in_dw);
843 
844 	r = amdgpu_job_submit_direct(job, ring, &f);
845 	if (r)
846 		goto err_free;
847 
848 	amdgpu_ib_free(ib_msg, f);
849 
850 	if (fence)
851 		*fence = dma_fence_get(f);
852 	dma_fence_put(f);
853 
854 	return 0;
855 
856 err_free:
857 	amdgpu_job_free(job);
858 err:
859 	amdgpu_ib_free(ib_msg, f);
860 	return r;
861 }
862 
amdgpu_vcn_dec_sw_ring_test_ib(struct amdgpu_ring * ring,long timeout)863 int amdgpu_vcn_dec_sw_ring_test_ib(struct amdgpu_ring *ring, long timeout)
864 {
865 	struct dma_fence *fence = NULL;
866 	struct amdgpu_ib ib;
867 	long r;
868 
869 	r = amdgpu_vcn_dec_get_create_msg(ring, 1, &ib);
870 	if (r)
871 		goto error;
872 
873 	r = amdgpu_vcn_dec_sw_send_msg(ring, &ib, NULL);
874 	if (r)
875 		goto error;
876 	r = amdgpu_vcn_dec_get_destroy_msg(ring, 1, &ib);
877 	if (r)
878 		goto error;
879 
880 	r = amdgpu_vcn_dec_sw_send_msg(ring, &ib, &fence);
881 	if (r)
882 		goto error;
883 
884 	r = dma_fence_wait_timeout(fence, false, timeout);
885 	if (r == 0)
886 		r = -ETIMEDOUT;
887 	else if (r > 0)
888 		r = 0;
889 
890 	dma_fence_put(fence);
891 error:
892 	return r;
893 }
894 
amdgpu_vcn_enc_ring_test_ring(struct amdgpu_ring * ring)895 int amdgpu_vcn_enc_ring_test_ring(struct amdgpu_ring *ring)
896 {
897 	struct amdgpu_device *adev = ring->adev;
898 	uint32_t rptr;
899 	unsigned int i;
900 	int r;
901 
902 	if (amdgpu_sriov_vf(adev))
903 		return 0;
904 
905 	r = amdgpu_ring_alloc(ring, 16);
906 	if (r)
907 		return r;
908 
909 	rptr = amdgpu_ring_get_rptr(ring);
910 
911 	amdgpu_ring_write(ring, VCN_ENC_CMD_END);
912 	amdgpu_ring_commit(ring);
913 
914 	for (i = 0; i < adev->usec_timeout; i++) {
915 		if (amdgpu_ring_get_rptr(ring) != rptr)
916 			break;
917 		udelay(1);
918 	}
919 
920 	if (i >= adev->usec_timeout)
921 		r = -ETIMEDOUT;
922 
923 	return r;
924 }
925 
amdgpu_vcn_enc_get_create_msg(struct amdgpu_ring * ring,uint32_t handle,struct amdgpu_ib * ib_msg,struct dma_fence ** fence)926 static int amdgpu_vcn_enc_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
927 					 struct amdgpu_ib *ib_msg,
928 					 struct dma_fence **fence)
929 {
930 	unsigned int ib_size_dw = 16;
931 	struct amdgpu_device *adev = ring->adev;
932 	struct amdgpu_job *job;
933 	struct amdgpu_ib *ib;
934 	struct dma_fence *f = NULL;
935 	uint32_t *ib_checksum = NULL;
936 	uint64_t addr;
937 	int i, r;
938 
939 	if (adev->vcn.inst[ring->me].using_unified_queue)
940 		ib_size_dw += 8;
941 
942 	r = amdgpu_job_alloc_with_ib(ring->adev, NULL, NULL,
943 				     ib_size_dw * 4, AMDGPU_IB_POOL_DIRECT,
944 				     &job, AMDGPU_KERNEL_JOB_ID_VCN_RING_TEST);
945 	if (r)
946 		return r;
947 
948 	ib = &job->ibs[0];
949 	addr = AMDGPU_GPU_PAGE_ALIGN(ib_msg->gpu_addr);
950 
951 	ib->length_dw = 0;
952 
953 	if (adev->vcn.inst[ring->me].using_unified_queue)
954 		ib_checksum = amdgpu_vcn_unified_ring_ib_header(ib, 0x11, true);
955 
956 	ib->ptr[ib->length_dw++] = 0x00000018;
957 	ib->ptr[ib->length_dw++] = 0x00000001; /* session info */
958 	ib->ptr[ib->length_dw++] = handle;
959 	ib->ptr[ib->length_dw++] = upper_32_bits(addr);
960 	ib->ptr[ib->length_dw++] = addr;
961 	ib->ptr[ib->length_dw++] = 0x00000000;
962 
963 	ib->ptr[ib->length_dw++] = 0x00000014;
964 	ib->ptr[ib->length_dw++] = 0x00000002; /* task info */
965 	ib->ptr[ib->length_dw++] = 0x0000001c;
966 	ib->ptr[ib->length_dw++] = 0x00000000;
967 	ib->ptr[ib->length_dw++] = 0x00000000;
968 
969 	ib->ptr[ib->length_dw++] = 0x00000008;
970 	ib->ptr[ib->length_dw++] = 0x08000001; /* op initialize */
971 
972 	for (i = ib->length_dw; i < ib_size_dw; ++i)
973 		ib->ptr[i] = 0x0;
974 
975 	if (adev->vcn.inst[ring->me].using_unified_queue)
976 		amdgpu_vcn_unified_ring_ib_checksum(&ib_checksum, 0x11);
977 
978 	r = amdgpu_job_submit_direct(job, ring, &f);
979 	if (r)
980 		goto err;
981 
982 	if (fence)
983 		*fence = dma_fence_get(f);
984 	dma_fence_put(f);
985 
986 	return 0;
987 
988 err:
989 	amdgpu_job_free(job);
990 	return r;
991 }
992 
amdgpu_vcn_enc_get_destroy_msg(struct amdgpu_ring * ring,uint32_t handle,struct amdgpu_ib * ib_msg,struct dma_fence ** fence)993 static int amdgpu_vcn_enc_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
994 					  struct amdgpu_ib *ib_msg,
995 					  struct dma_fence **fence)
996 {
997 	unsigned int ib_size_dw = 16;
998 	struct amdgpu_device *adev = ring->adev;
999 	struct amdgpu_job *job;
1000 	struct amdgpu_ib *ib;
1001 	struct dma_fence *f = NULL;
1002 	uint32_t *ib_checksum = NULL;
1003 	uint64_t addr;
1004 	int i, r;
1005 
1006 	if (adev->vcn.inst[ring->me].using_unified_queue)
1007 		ib_size_dw += 8;
1008 
1009 	r = amdgpu_job_alloc_with_ib(ring->adev, NULL, NULL,
1010 				     ib_size_dw * 4, AMDGPU_IB_POOL_DIRECT,
1011 				     &job, AMDGPU_KERNEL_JOB_ID_VCN_RING_TEST);
1012 	if (r)
1013 		return r;
1014 
1015 	ib = &job->ibs[0];
1016 	addr = AMDGPU_GPU_PAGE_ALIGN(ib_msg->gpu_addr);
1017 
1018 	ib->length_dw = 0;
1019 
1020 	if (adev->vcn.inst[ring->me].using_unified_queue)
1021 		ib_checksum = amdgpu_vcn_unified_ring_ib_header(ib, 0x11, true);
1022 
1023 	ib->ptr[ib->length_dw++] = 0x00000018;
1024 	ib->ptr[ib->length_dw++] = 0x00000001;
1025 	ib->ptr[ib->length_dw++] = handle;
1026 	ib->ptr[ib->length_dw++] = upper_32_bits(addr);
1027 	ib->ptr[ib->length_dw++] = addr;
1028 	ib->ptr[ib->length_dw++] = 0x00000000;
1029 
1030 	ib->ptr[ib->length_dw++] = 0x00000014;
1031 	ib->ptr[ib->length_dw++] = 0x00000002;
1032 	ib->ptr[ib->length_dw++] = 0x0000001c;
1033 	ib->ptr[ib->length_dw++] = 0x00000000;
1034 	ib->ptr[ib->length_dw++] = 0x00000000;
1035 
1036 	ib->ptr[ib->length_dw++] = 0x00000008;
1037 	ib->ptr[ib->length_dw++] = 0x08000002; /* op close session */
1038 
1039 	for (i = ib->length_dw; i < ib_size_dw; ++i)
1040 		ib->ptr[i] = 0x0;
1041 
1042 	if (adev->vcn.inst[ring->me].using_unified_queue)
1043 		amdgpu_vcn_unified_ring_ib_checksum(&ib_checksum, 0x11);
1044 
1045 	r = amdgpu_job_submit_direct(job, ring, &f);
1046 	if (r)
1047 		goto err;
1048 
1049 	if (fence)
1050 		*fence = dma_fence_get(f);
1051 	dma_fence_put(f);
1052 
1053 	return 0;
1054 
1055 err:
1056 	amdgpu_job_free(job);
1057 	return r;
1058 }
1059 
amdgpu_vcn_enc_ring_test_ib(struct amdgpu_ring * ring,long timeout)1060 int amdgpu_vcn_enc_ring_test_ib(struct amdgpu_ring *ring, long timeout)
1061 {
1062 	struct amdgpu_device *adev = ring->adev;
1063 	struct dma_fence *fence = NULL;
1064 	struct amdgpu_ib ib;
1065 	long r;
1066 
1067 	memset(&ib, 0, sizeof(ib));
1068 	r = amdgpu_ib_get(adev, NULL, (128 << 10) + AMDGPU_GPU_PAGE_SIZE,
1069 			AMDGPU_IB_POOL_DIRECT,
1070 			&ib);
1071 	if (r)
1072 		return r;
1073 
1074 	r = amdgpu_vcn_enc_get_create_msg(ring, 1, &ib, NULL);
1075 	if (r)
1076 		goto error;
1077 
1078 	r = amdgpu_vcn_enc_get_destroy_msg(ring, 1, &ib, &fence);
1079 	if (r)
1080 		goto error;
1081 
1082 	r = dma_fence_wait_timeout(fence, false, timeout);
1083 	if (r == 0)
1084 		r = -ETIMEDOUT;
1085 	else if (r > 0)
1086 		r = 0;
1087 
1088 error:
1089 	amdgpu_ib_free(&ib, fence);
1090 	dma_fence_put(fence);
1091 
1092 	return r;
1093 }
1094 
amdgpu_vcn_unified_ring_test_ib(struct amdgpu_ring * ring,long timeout)1095 int amdgpu_vcn_unified_ring_test_ib(struct amdgpu_ring *ring, long timeout)
1096 {
1097 	struct amdgpu_device *adev = ring->adev;
1098 	long r;
1099 
1100 	if ((amdgpu_ip_version(adev, UVD_HWIP, 0) != IP_VERSION(4, 0, 3)) &&
1101 	    (amdgpu_ip_version(adev, UVD_HWIP, 0) != IP_VERSION(5, 0, 1)) &&
1102 	    (amdgpu_ip_version(adev, UVD_HWIP, 0) != IP_VERSION(5, 0, 2))) {
1103 		r = amdgpu_vcn_enc_ring_test_ib(ring, timeout);
1104 		if (r)
1105 			goto error;
1106 	}
1107 
1108 	r =  amdgpu_vcn_dec_sw_ring_test_ib(ring, timeout);
1109 
1110 error:
1111 	return r;
1112 }
1113 
amdgpu_vcn_get_enc_ring_prio(int ring)1114 enum amdgpu_ring_priority_level amdgpu_vcn_get_enc_ring_prio(int ring)
1115 {
1116 	switch (ring) {
1117 	case 0:
1118 		return AMDGPU_RING_PRIO_0;
1119 	case 1:
1120 		return AMDGPU_RING_PRIO_1;
1121 	case 2:
1122 		return AMDGPU_RING_PRIO_2;
1123 	default:
1124 		return AMDGPU_RING_PRIO_0;
1125 	}
1126 }
1127 
amdgpu_vcn_setup_ucode(struct amdgpu_device * adev,int i)1128 void amdgpu_vcn_setup_ucode(struct amdgpu_device *adev, int i)
1129 {
1130 	unsigned int idx;
1131 
1132 	if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
1133 		const struct common_firmware_header *hdr;
1134 
1135 		if (adev->vcn.harvest_config & (1 << i))
1136 			return;
1137 
1138 		if ((amdgpu_ip_version(adev, UVD_HWIP, 0) == IP_VERSION(4, 0, 3) ||
1139 		     amdgpu_ip_version(adev, UVD_HWIP, 0) == IP_VERSION(5, 0, 1) ||
1140 		     amdgpu_ip_version(adev, UVD_HWIP, 0) == IP_VERSION(5, 0, 2))
1141 		    && (i > 0))
1142 			return;
1143 
1144 		hdr = (const struct common_firmware_header *)adev->vcn.inst[i].fw->data;
1145 		/* currently only support 2 FW instances */
1146 		if (i >= 2) {
1147 			dev_info(adev->dev, "More then 2 VCN FW instances!\n");
1148 			return;
1149 		}
1150 		idx = AMDGPU_UCODE_ID_VCN + i;
1151 		adev->firmware.ucode[idx].ucode_id = idx;
1152 		adev->firmware.ucode[idx].fw = adev->vcn.inst[i].fw;
1153 		adev->firmware.fw_size +=
1154 			ALIGN(le32_to_cpu(hdr->ucode_size_bytes), PAGE_SIZE);
1155 	}
1156 }
1157 
1158 /*
1159  * debugfs for mapping vcn firmware log buffer.
1160  */
1161 #if defined(CONFIG_DEBUG_FS)
amdgpu_debugfs_vcn_fwlog_read(struct file * f,char __user * buf,size_t size,loff_t * pos)1162 static ssize_t amdgpu_debugfs_vcn_fwlog_read(struct file *f, char __user *buf,
1163 					     size_t size, loff_t *pos)
1164 {
1165 	struct amdgpu_vcn_inst *vcn;
1166 	void *log_buf;
1167 	struct amdgpu_vcn_fwlog *plog;
1168 	unsigned int read_pos, write_pos, available, i, read_bytes = 0;
1169 	unsigned int read_num[2] = {0};
1170 
1171 	vcn = file_inode(f)->i_private;
1172 	if (!vcn)
1173 		return -ENODEV;
1174 
1175 	if (!vcn->fw_shared.cpu_addr || !amdgpu_vcnfw_log)
1176 		return -EFAULT;
1177 
1178 	log_buf = vcn->fw_shared.cpu_addr + vcn->fw_shared.mem_size;
1179 
1180 	plog = (struct amdgpu_vcn_fwlog *)log_buf;
1181 	read_pos = plog->rptr;
1182 	write_pos = plog->wptr;
1183 
1184 	if (read_pos > AMDGPU_VCNFW_LOG_SIZE || write_pos > AMDGPU_VCNFW_LOG_SIZE)
1185 		return -EFAULT;
1186 
1187 	if (!size || (read_pos == write_pos))
1188 		return 0;
1189 
1190 	if (write_pos > read_pos) {
1191 		available = write_pos - read_pos;
1192 		read_num[0] = min_t(size_t, size, available);
1193 	} else {
1194 		read_num[0] = AMDGPU_VCNFW_LOG_SIZE - read_pos;
1195 		available = read_num[0] + write_pos - plog->header_size;
1196 		if (size > available)
1197 			read_num[1] = write_pos - plog->header_size;
1198 		else if (size > read_num[0])
1199 			read_num[1] = size - read_num[0];
1200 		else
1201 			read_num[0] = size;
1202 	}
1203 
1204 	for (i = 0; i < 2; i++) {
1205 		if (read_num[i]) {
1206 			if (read_pos == AMDGPU_VCNFW_LOG_SIZE)
1207 				read_pos = plog->header_size;
1208 			if (read_num[i] == copy_to_user((buf + read_bytes),
1209 							(log_buf + read_pos), read_num[i]))
1210 				return -EFAULT;
1211 
1212 			read_bytes += read_num[i];
1213 			read_pos += read_num[i];
1214 		}
1215 	}
1216 
1217 	plog->rptr = read_pos;
1218 	*pos += read_bytes;
1219 	return read_bytes;
1220 }
1221 
1222 static const struct file_operations amdgpu_debugfs_vcnfwlog_fops = {
1223 	.owner = THIS_MODULE,
1224 	.read = amdgpu_debugfs_vcn_fwlog_read,
1225 	.llseek = default_llseek
1226 };
1227 #endif
1228 
amdgpu_debugfs_vcn_fwlog_init(struct amdgpu_device * adev,uint8_t i,struct amdgpu_vcn_inst * vcn)1229 void amdgpu_debugfs_vcn_fwlog_init(struct amdgpu_device *adev, uint8_t i,
1230 				   struct amdgpu_vcn_inst *vcn)
1231 {
1232 #if defined(CONFIG_DEBUG_FS)
1233 	struct drm_minor *minor = adev_to_drm(adev)->primary;
1234 	struct dentry *root = minor->debugfs_root;
1235 	char name[32];
1236 
1237 	sprintf(name, "amdgpu_vcn_%d_fwlog", i);
1238 	debugfs_create_file_size(name, S_IFREG | 0444, root, vcn,
1239 				 &amdgpu_debugfs_vcnfwlog_fops,
1240 				 AMDGPU_VCNFW_LOG_SIZE);
1241 #endif
1242 }
1243 
amdgpu_vcn_fwlog_init(struct amdgpu_vcn_inst * vcn)1244 void amdgpu_vcn_fwlog_init(struct amdgpu_vcn_inst *vcn)
1245 {
1246 #if defined(CONFIG_DEBUG_FS)
1247 	uint32_t *flag = vcn->fw_shared.cpu_addr;
1248 	void *fw_log_cpu_addr = vcn->fw_shared.cpu_addr + vcn->fw_shared.mem_size;
1249 	uint64_t fw_log_gpu_addr = vcn->fw_shared.gpu_addr + vcn->fw_shared.mem_size;
1250 	struct amdgpu_vcn_fwlog *log_buf = fw_log_cpu_addr;
1251 	struct amdgpu_fw_shared_fw_logging *fw_log = vcn->fw_shared.cpu_addr
1252 							 + vcn->fw_shared.log_offset;
1253 	*flag |= cpu_to_le32(AMDGPU_VCN_FW_LOGGING_FLAG);
1254 	fw_log->is_enabled = 1;
1255 	fw_log->addr_lo = cpu_to_le32(fw_log_gpu_addr & 0xFFFFFFFF);
1256 	fw_log->addr_hi = cpu_to_le32(fw_log_gpu_addr >> 32);
1257 	fw_log->size = cpu_to_le32(AMDGPU_VCNFW_LOG_SIZE);
1258 
1259 	log_buf->header_size = sizeof(struct amdgpu_vcn_fwlog);
1260 	log_buf->buffer_size = AMDGPU_VCNFW_LOG_SIZE;
1261 	log_buf->rptr = log_buf->header_size;
1262 	log_buf->wptr = log_buf->header_size;
1263 	log_buf->wrapped = 0;
1264 #endif
1265 }
1266 
amdgpu_vcn_process_poison_irq(struct amdgpu_device * adev,struct amdgpu_irq_src * source,struct amdgpu_iv_entry * entry)1267 int amdgpu_vcn_process_poison_irq(struct amdgpu_device *adev,
1268 				struct amdgpu_irq_src *source,
1269 				struct amdgpu_iv_entry *entry)
1270 {
1271 	struct ras_common_if *ras_if = adev->vcn.ras_if;
1272 	struct ras_dispatch_if ih_data = {
1273 		.entry = entry,
1274 	};
1275 
1276 	if (!ras_if)
1277 		return 0;
1278 
1279 	if (!amdgpu_sriov_vf(adev)) {
1280 		ih_data.head = *ras_if;
1281 		amdgpu_ras_interrupt_dispatch(adev, &ih_data);
1282 	} else {
1283 		if (adev->virt.ops && adev->virt.ops->ras_poison_handler)
1284 			adev->virt.ops->ras_poison_handler(adev, ras_if->block);
1285 		else
1286 			dev_warn(adev->dev,
1287 				"No ras_poison_handler interface in SRIOV for VCN!\n");
1288 	}
1289 
1290 	return 0;
1291 }
1292 
amdgpu_vcn_ras_late_init(struct amdgpu_device * adev,struct ras_common_if * ras_block)1293 int amdgpu_vcn_ras_late_init(struct amdgpu_device *adev, struct ras_common_if *ras_block)
1294 {
1295 	int r, i;
1296 
1297 	r = amdgpu_ras_block_late_init(adev, ras_block);
1298 	if (r)
1299 		return r;
1300 
1301 	if (amdgpu_ras_is_supported(adev, ras_block->block)) {
1302 		for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
1303 			if (adev->vcn.harvest_config & (1 << i) ||
1304 			    !adev->vcn.inst[i].ras_poison_irq.funcs)
1305 				continue;
1306 
1307 			r = amdgpu_irq_get(adev, &adev->vcn.inst[i].ras_poison_irq, 0);
1308 			if (r)
1309 				goto late_fini;
1310 		}
1311 	}
1312 	return 0;
1313 
1314 late_fini:
1315 	amdgpu_ras_block_late_fini(adev, ras_block);
1316 	return r;
1317 }
1318 
amdgpu_vcn_ras_sw_init(struct amdgpu_device * adev)1319 int amdgpu_vcn_ras_sw_init(struct amdgpu_device *adev)
1320 {
1321 	int err;
1322 	struct amdgpu_vcn_ras *ras;
1323 
1324 	if (!adev->vcn.ras)
1325 		return 0;
1326 
1327 	ras = adev->vcn.ras;
1328 	err = amdgpu_ras_register_ras_block(adev, &ras->ras_block);
1329 	if (err) {
1330 		dev_err(adev->dev, "Failed to register vcn ras block!\n");
1331 		return err;
1332 	}
1333 
1334 	strcpy(ras->ras_block.ras_comm.name, "vcn");
1335 	ras->ras_block.ras_comm.block = AMDGPU_RAS_BLOCK__VCN;
1336 	ras->ras_block.ras_comm.type = AMDGPU_RAS_ERROR__POISON;
1337 	adev->vcn.ras_if = &ras->ras_block.ras_comm;
1338 
1339 	if (!ras->ras_block.ras_late_init)
1340 		ras->ras_block.ras_late_init = amdgpu_vcn_ras_late_init;
1341 
1342 	return 0;
1343 }
1344 
amdgpu_vcn_psp_update_sram(struct amdgpu_device * adev,int inst_idx,enum AMDGPU_UCODE_ID ucode_id)1345 int amdgpu_vcn_psp_update_sram(struct amdgpu_device *adev, int inst_idx,
1346 			       enum AMDGPU_UCODE_ID ucode_id)
1347 {
1348 	struct amdgpu_firmware_info ucode = {
1349 		.ucode_id = (ucode_id ? ucode_id :
1350 			    (inst_idx ? AMDGPU_UCODE_ID_VCN1_RAM :
1351 					AMDGPU_UCODE_ID_VCN0_RAM)),
1352 		.mc_addr = adev->vcn.inst[inst_idx].dpg_sram_gpu_addr,
1353 		.ucode_size = ((uintptr_t)adev->vcn.inst[inst_idx].dpg_sram_curr_addr -
1354 			      (uintptr_t)adev->vcn.inst[inst_idx].dpg_sram_cpu_addr),
1355 	};
1356 
1357 	return psp_execute_ip_fw_load(&adev->psp, &ucode);
1358 }
1359 
amdgpu_get_vcn_reset_mask(struct device * dev,struct device_attribute * attr,char * buf)1360 static ssize_t amdgpu_get_vcn_reset_mask(struct device *dev,
1361 						struct device_attribute *attr,
1362 						char *buf)
1363 {
1364 	struct drm_device *ddev = dev_get_drvdata(dev);
1365 	struct amdgpu_device *adev = drm_to_adev(ddev);
1366 
1367 	if (!adev)
1368 		return -ENODEV;
1369 
1370 	return amdgpu_show_reset_mask(buf, adev->vcn.supported_reset);
1371 }
1372 
1373 static DEVICE_ATTR(vcn_reset_mask, 0444,
1374 		   amdgpu_get_vcn_reset_mask, NULL);
1375 
amdgpu_vcn_sysfs_reset_mask_init(struct amdgpu_device * adev)1376 int amdgpu_vcn_sysfs_reset_mask_init(struct amdgpu_device *adev)
1377 {
1378 	int r = 0;
1379 
1380 	if (adev->vcn.num_vcn_inst) {
1381 		r = device_create_file(adev->dev, &dev_attr_vcn_reset_mask);
1382 		if (r)
1383 			return r;
1384 	}
1385 
1386 	return r;
1387 }
1388 
amdgpu_vcn_sysfs_reset_mask_fini(struct amdgpu_device * adev)1389 void amdgpu_vcn_sysfs_reset_mask_fini(struct amdgpu_device *adev)
1390 {
1391 	if (adev->dev->kobj.sd) {
1392 		if (adev->vcn.num_vcn_inst)
1393 			device_remove_file(adev->dev, &dev_attr_vcn_reset_mask);
1394 	}
1395 }
1396 
1397 /*
1398  * debugfs to enable/disable vcn job submission to specific core or
1399  * instance. It is created only if the queue type is unified.
1400  */
1401 #if defined(CONFIG_DEBUG_FS)
amdgpu_debugfs_vcn_sched_mask_set(void * data,u64 val)1402 static int amdgpu_debugfs_vcn_sched_mask_set(void *data, u64 val)
1403 {
1404 	struct amdgpu_device *adev = (struct amdgpu_device *)data;
1405 	u32 i;
1406 	u64 mask;
1407 	struct amdgpu_ring *ring;
1408 
1409 	if (!adev)
1410 		return -ENODEV;
1411 
1412 	mask = (1ULL << adev->vcn.num_vcn_inst) - 1;
1413 	if ((val & mask) == 0)
1414 		return -EINVAL;
1415 	for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
1416 		ring = &adev->vcn.inst[i].ring_enc[0];
1417 		if (val & (1ULL << i))
1418 			ring->sched.ready = true;
1419 		else
1420 			ring->sched.ready = false;
1421 	}
1422 	/* publish sched.ready flag update effective immediately across smp */
1423 	smp_rmb();
1424 	return 0;
1425 }
1426 
amdgpu_debugfs_vcn_sched_mask_get(void * data,u64 * val)1427 static int amdgpu_debugfs_vcn_sched_mask_get(void *data, u64 *val)
1428 {
1429 	struct amdgpu_device *adev = (struct amdgpu_device *)data;
1430 	u32 i;
1431 	u64 mask = 0;
1432 	struct amdgpu_ring *ring;
1433 
1434 	if (!adev)
1435 		return -ENODEV;
1436 	for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
1437 		ring = &adev->vcn.inst[i].ring_enc[0];
1438 		if (ring->sched.ready)
1439 			mask |= 1ULL << i;
1440 		}
1441 	*val = mask;
1442 	return 0;
1443 }
1444 
1445 DEFINE_DEBUGFS_ATTRIBUTE(amdgpu_debugfs_vcn_sched_mask_fops,
1446 			 amdgpu_debugfs_vcn_sched_mask_get,
1447 			 amdgpu_debugfs_vcn_sched_mask_set, "%llx\n");
1448 #endif
1449 
amdgpu_debugfs_vcn_sched_mask_init(struct amdgpu_device * adev)1450 void amdgpu_debugfs_vcn_sched_mask_init(struct amdgpu_device *adev)
1451 {
1452 #if defined(CONFIG_DEBUG_FS)
1453 	struct drm_minor *minor = adev_to_drm(adev)->primary;
1454 	struct dentry *root = minor->debugfs_root;
1455 	char name[32];
1456 
1457 	if (adev->vcn.num_vcn_inst <= 1 || !adev->vcn.inst[0].using_unified_queue)
1458 		return;
1459 	sprintf(name, "amdgpu_vcn_sched_mask");
1460 	debugfs_create_file(name, 0600, root, adev,
1461 			    &amdgpu_debugfs_vcn_sched_mask_fops);
1462 #endif
1463 }
1464 
1465 /**
1466  * vcn_set_powergating_state - set VCN block powergating state
1467  *
1468  * @ip_block: amdgpu_ip_block pointer
1469  * @state: power gating state
1470  *
1471  * Set VCN block powergating state
1472  */
vcn_set_powergating_state(struct amdgpu_ip_block * ip_block,enum amd_powergating_state state)1473 int vcn_set_powergating_state(struct amdgpu_ip_block *ip_block,
1474 			      enum amd_powergating_state state)
1475 {
1476 	struct amdgpu_device *adev = ip_block->adev;
1477 	int ret = 0, i;
1478 
1479 	for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
1480 		struct amdgpu_vcn_inst *vinst = &adev->vcn.inst[i];
1481 
1482 		ret |= vinst->set_pg_state(vinst, state);
1483 	}
1484 
1485 	return ret;
1486 }
1487 
1488 /**
1489  * amdgpu_vcn_reset_engine - Reset a specific VCN engine
1490  * @adev: Pointer to the AMDGPU device
1491  * @instance_id: VCN engine instance to reset
1492  *
1493  * Returns: 0 on success, or a negative error code on failure.
1494  */
amdgpu_vcn_reset_engine(struct amdgpu_device * adev,uint32_t instance_id)1495 static int amdgpu_vcn_reset_engine(struct amdgpu_device *adev,
1496 				   uint32_t instance_id)
1497 {
1498 	struct amdgpu_vcn_inst *vinst = &adev->vcn.inst[instance_id];
1499 	int r, i;
1500 
1501 	mutex_lock(&vinst->engine_reset_mutex);
1502 	/* Stop the scheduler's work queue for the dec and enc rings if they are running.
1503 	 * This ensures that no new tasks are submitted to the queues while
1504 	 * the reset is in progress.
1505 	 */
1506 	drm_sched_wqueue_stop(&vinst->ring_dec.sched);
1507 	for (i = 0; i < vinst->num_enc_rings; i++)
1508 		drm_sched_wqueue_stop(&vinst->ring_enc[i].sched);
1509 
1510 	/* Perform the VCN reset for the specified instance */
1511 	r = vinst->reset(vinst);
1512 	if (r)
1513 		goto unlock;
1514 	r = amdgpu_ring_test_ring(&vinst->ring_dec);
1515 	if (r)
1516 		goto unlock;
1517 	for (i = 0; i < vinst->num_enc_rings; i++) {
1518 		r = amdgpu_ring_test_ring(&vinst->ring_enc[i]);
1519 		if (r)
1520 			goto unlock;
1521 	}
1522 	amdgpu_fence_driver_force_completion(&vinst->ring_dec);
1523 	for (i = 0; i < vinst->num_enc_rings; i++)
1524 		amdgpu_fence_driver_force_completion(&vinst->ring_enc[i]);
1525 
1526 	/* Restart the scheduler's work queue for the dec and enc rings
1527 	 * if they were stopped by this function. This allows new tasks
1528 	 * to be submitted to the queues after the reset is complete.
1529 	 */
1530 	drm_sched_wqueue_start(&vinst->ring_dec.sched);
1531 	for (i = 0; i < vinst->num_enc_rings; i++)
1532 		drm_sched_wqueue_start(&vinst->ring_enc[i].sched);
1533 
1534 unlock:
1535 	mutex_unlock(&vinst->engine_reset_mutex);
1536 
1537 	return r;
1538 }
1539 
1540 /**
1541  * amdgpu_vcn_ring_reset - Reset a VCN ring
1542  * @ring: ring to reset
1543  * @vmid: vmid of guilty job
1544  * @timedout_fence: fence of timed out job
1545  *
1546  * This helper is for VCN blocks without unified queues because
1547  * resetting the engine resets all queues in that case.  With
1548  * unified queues we have one queue per engine.
1549  * Returns: 0 on success, or a negative error code on failure.
1550  */
amdgpu_vcn_ring_reset(struct amdgpu_ring * ring,unsigned int vmid,struct amdgpu_fence * timedout_fence)1551 int amdgpu_vcn_ring_reset(struct amdgpu_ring *ring,
1552 			  unsigned int vmid,
1553 			  struct amdgpu_fence *timedout_fence)
1554 {
1555 	struct amdgpu_device *adev = ring->adev;
1556 
1557 	if (adev->vcn.inst[ring->me].using_unified_queue)
1558 		return -EINVAL;
1559 
1560 	return amdgpu_vcn_reset_engine(adev, ring->me);
1561 }
1562 
amdgpu_vcn_reg_dump_init(struct amdgpu_device * adev,const struct amdgpu_hwip_reg_entry * reg,u32 count)1563 int amdgpu_vcn_reg_dump_init(struct amdgpu_device *adev,
1564 			     const struct amdgpu_hwip_reg_entry *reg, u32 count)
1565 {
1566 	adev->vcn.ip_dump = kcalloc(adev->vcn.num_vcn_inst * count,
1567 				     sizeof(uint32_t), GFP_KERNEL);
1568 	if (!adev->vcn.ip_dump)
1569 		return -ENOMEM;
1570 	adev->vcn.reg_list = reg;
1571 	adev->vcn.reg_count = count;
1572 
1573 	return 0;
1574 }
1575 
amdgpu_vcn_reg_dump_fini(struct amdgpu_device * adev)1576 static void amdgpu_vcn_reg_dump_fini(struct amdgpu_device *adev)
1577 {
1578 	kfree(adev->vcn.ip_dump);
1579 	adev->vcn.ip_dump = NULL;
1580 	adev->vcn.reg_list = NULL;
1581 	adev->vcn.reg_count = 0;
1582 }
1583 
amdgpu_vcn_dump_ip_state(struct amdgpu_ip_block * ip_block)1584 void amdgpu_vcn_dump_ip_state(struct amdgpu_ip_block *ip_block)
1585 {
1586 	struct amdgpu_device *adev = ip_block->adev;
1587 	int i, j;
1588 	bool is_powered;
1589 	u32 inst_off;
1590 
1591 	if (!adev->vcn.ip_dump)
1592 		return;
1593 
1594 	for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
1595 		if (adev->vcn.harvest_config & (1 << i))
1596 			continue;
1597 
1598 		inst_off = i * adev->vcn.reg_count;
1599 		/* mmUVD_POWER_STATUS is always readable and is the first in reg_list */
1600 		adev->vcn.ip_dump[inst_off] =
1601 			RREG32(SOC15_REG_ENTRY_OFFSET_INST(adev->vcn.reg_list[0], i));
1602 		is_powered = (adev->vcn.ip_dump[inst_off] &
1603 			      UVD_POWER_STATUS__UVD_POWER_STATUS_TILES_OFF) !=
1604 			      UVD_POWER_STATUS__UVD_POWER_STATUS_TILES_OFF;
1605 
1606 		if (is_powered)
1607 			for (j = 1; j < adev->vcn.reg_count; j++)
1608 				adev->vcn.ip_dump[inst_off + j] =
1609 				RREG32(SOC15_REG_ENTRY_OFFSET_INST(adev->vcn.reg_list[j], i));
1610 	}
1611 }
1612 
amdgpu_vcn_print_ip_state(struct amdgpu_ip_block * ip_block,struct drm_printer * p)1613 void amdgpu_vcn_print_ip_state(struct amdgpu_ip_block *ip_block, struct drm_printer *p)
1614 {
1615 	struct amdgpu_device *adev = ip_block->adev;
1616 	int i, j;
1617 	bool is_powered;
1618 	u32 inst_off;
1619 
1620 	if (!adev->vcn.ip_dump)
1621 		return;
1622 
1623 	drm_printf(p, "num_instances:%d\n", adev->vcn.num_vcn_inst);
1624 	for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
1625 		if (adev->vcn.harvest_config & (1 << i)) {
1626 			drm_printf(p, "\nHarvested Instance:VCN%d Skipping dump\n", i);
1627 			continue;
1628 		}
1629 
1630 		inst_off = i * adev->vcn.reg_count;
1631 		is_powered = (adev->vcn.ip_dump[inst_off] &
1632 			      UVD_POWER_STATUS__UVD_POWER_STATUS_TILES_OFF) !=
1633 			      UVD_POWER_STATUS__UVD_POWER_STATUS_TILES_OFF;
1634 
1635 		if (is_powered) {
1636 			drm_printf(p, "\nActive Instance:VCN%d\n", i);
1637 			for (j = 0; j < adev->vcn.reg_count; j++)
1638 				drm_printf(p, "%-50s \t 0x%08x\n", adev->vcn.reg_list[j].reg_name,
1639 					   adev->vcn.ip_dump[inst_off + j]);
1640 		} else {
1641 			drm_printf(p, "\nInactive Instance:VCN%d\n", i);
1642 		}
1643 	}
1644 }
1645