xref: /linux/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c (revision 13b9eb15179de69e3c6f7ed714b0499b0abf4394)
1 /*
2  * Copyright 2016 Advanced Micro Devices, Inc.
3  * All Rights Reserved.
4  *
5  * Permission is hereby granted, free of charge, to any person obtaining a
6  * copy of this software and associated documentation files (the
7  * "Software"), to deal in the Software without restriction, including
8  * without limitation the rights to use, copy, modify, merge, publish,
9  * distribute, sub license, and/or sell copies of the Software, and to
10  * permit persons to whom the Software is furnished to do so, subject to
11  * the following conditions:
12  *
13  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
16  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
17  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
18  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
19  * USE OR OTHER DEALINGS IN THE SOFTWARE.
20  *
21  * The above copyright notice and this permission notice (including the
22  * next paragraph) shall be included in all copies or substantial portions
23  * of the Software.
24  *
25  */
26 
27 #include <linux/firmware.h>
28 #include <linux/module.h>
29 #include <linux/pci.h>
30 #include <linux/debugfs.h>
31 #include <drm/drm_drv.h>
32 
33 #include "amdgpu.h"
34 #include "amdgpu_pm.h"
35 #include "amdgpu_vcn.h"
36 #include "soc15d.h"
37 
38 /* Firmware Names */
39 #define FIRMWARE_RAVEN		"amdgpu/raven_vcn.bin"
40 #define FIRMWARE_PICASSO	"amdgpu/picasso_vcn.bin"
41 #define FIRMWARE_RAVEN2		"amdgpu/raven2_vcn.bin"
42 #define FIRMWARE_ARCTURUS	"amdgpu/arcturus_vcn.bin"
43 #define FIRMWARE_RENOIR		"amdgpu/renoir_vcn.bin"
44 #define FIRMWARE_GREEN_SARDINE	"amdgpu/green_sardine_vcn.bin"
45 #define FIRMWARE_NAVI10		"amdgpu/navi10_vcn.bin"
46 #define FIRMWARE_NAVI14		"amdgpu/navi14_vcn.bin"
47 #define FIRMWARE_NAVI12		"amdgpu/navi12_vcn.bin"
48 #define FIRMWARE_SIENNA_CICHLID	"amdgpu/sienna_cichlid_vcn.bin"
49 #define FIRMWARE_NAVY_FLOUNDER	"amdgpu/navy_flounder_vcn.bin"
50 #define FIRMWARE_VANGOGH	"amdgpu/vangogh_vcn.bin"
51 #define FIRMWARE_DIMGREY_CAVEFISH	"amdgpu/dimgrey_cavefish_vcn.bin"
52 #define FIRMWARE_ALDEBARAN	"amdgpu/aldebaran_vcn.bin"
53 #define FIRMWARE_BEIGE_GOBY	"amdgpu/beige_goby_vcn.bin"
54 #define FIRMWARE_YELLOW_CARP	"amdgpu/yellow_carp_vcn.bin"
55 #define FIRMWARE_VCN_3_1_2	"amdgpu/vcn_3_1_2.bin"
56 #define FIRMWARE_VCN4_0_0	"amdgpu/vcn_4_0_0.bin"
57 #define FIRMWARE_VCN4_0_2	"amdgpu/vcn_4_0_2.bin"
58 #define FIRMWARE_VCN4_0_4      "amdgpu/vcn_4_0_4.bin"
59 
60 MODULE_FIRMWARE(FIRMWARE_RAVEN);
61 MODULE_FIRMWARE(FIRMWARE_PICASSO);
62 MODULE_FIRMWARE(FIRMWARE_RAVEN2);
63 MODULE_FIRMWARE(FIRMWARE_ARCTURUS);
64 MODULE_FIRMWARE(FIRMWARE_RENOIR);
65 MODULE_FIRMWARE(FIRMWARE_GREEN_SARDINE);
66 MODULE_FIRMWARE(FIRMWARE_ALDEBARAN);
67 MODULE_FIRMWARE(FIRMWARE_NAVI10);
68 MODULE_FIRMWARE(FIRMWARE_NAVI14);
69 MODULE_FIRMWARE(FIRMWARE_NAVI12);
70 MODULE_FIRMWARE(FIRMWARE_SIENNA_CICHLID);
71 MODULE_FIRMWARE(FIRMWARE_NAVY_FLOUNDER);
72 MODULE_FIRMWARE(FIRMWARE_VANGOGH);
73 MODULE_FIRMWARE(FIRMWARE_DIMGREY_CAVEFISH);
74 MODULE_FIRMWARE(FIRMWARE_BEIGE_GOBY);
75 MODULE_FIRMWARE(FIRMWARE_YELLOW_CARP);
76 MODULE_FIRMWARE(FIRMWARE_VCN_3_1_2);
77 MODULE_FIRMWARE(FIRMWARE_VCN4_0_0);
78 MODULE_FIRMWARE(FIRMWARE_VCN4_0_2);
79 MODULE_FIRMWARE(FIRMWARE_VCN4_0_4);
80 
81 static void amdgpu_vcn_idle_work_handler(struct work_struct *work);
82 
83 int amdgpu_vcn_sw_init(struct amdgpu_device *adev)
84 {
85 	unsigned long bo_size;
86 	const char *fw_name;
87 	const struct common_firmware_header *hdr;
88 	unsigned char fw_check;
89 	unsigned int fw_shared_size, log_offset;
90 	int i, r;
91 
92 	INIT_DELAYED_WORK(&adev->vcn.idle_work, amdgpu_vcn_idle_work_handler);
93 	mutex_init(&adev->vcn.vcn_pg_lock);
94 	mutex_init(&adev->vcn.vcn1_jpeg1_workaround);
95 	atomic_set(&adev->vcn.total_submission_cnt, 0);
96 	for (i = 0; i < adev->vcn.num_vcn_inst; i++)
97 		atomic_set(&adev->vcn.inst[i].dpg_enc_submission_cnt, 0);
98 
99 	switch (adev->ip_versions[UVD_HWIP][0]) {
100 	case IP_VERSION(1, 0, 0):
101 	case IP_VERSION(1, 0, 1):
102 		if (adev->apu_flags & AMD_APU_IS_RAVEN2)
103 			fw_name = FIRMWARE_RAVEN2;
104 		else if (adev->apu_flags & AMD_APU_IS_PICASSO)
105 			fw_name = FIRMWARE_PICASSO;
106 		else
107 			fw_name = FIRMWARE_RAVEN;
108 		break;
109 	case IP_VERSION(2, 5, 0):
110 		fw_name = FIRMWARE_ARCTURUS;
111 		if ((adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) &&
112 		    (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG))
113 			adev->vcn.indirect_sram = true;
114 		break;
115 	case IP_VERSION(2, 2, 0):
116 		if (adev->apu_flags & AMD_APU_IS_RENOIR)
117 			fw_name = FIRMWARE_RENOIR;
118 		else
119 			fw_name = FIRMWARE_GREEN_SARDINE;
120 
121 		if ((adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) &&
122 		    (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG))
123 			adev->vcn.indirect_sram = true;
124 		break;
125 	case IP_VERSION(2, 6, 0):
126 		fw_name = FIRMWARE_ALDEBARAN;
127 		if ((adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) &&
128 		    (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG))
129 			adev->vcn.indirect_sram = true;
130 		break;
131 	case IP_VERSION(2, 0, 0):
132 		fw_name = FIRMWARE_NAVI10;
133 		if ((adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) &&
134 		    (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG))
135 			adev->vcn.indirect_sram = true;
136 		break;
137 	case IP_VERSION(2, 0, 2):
138 		if (adev->asic_type == CHIP_NAVI12)
139 			fw_name = FIRMWARE_NAVI12;
140 		else
141 			fw_name = FIRMWARE_NAVI14;
142 		if ((adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) &&
143 		    (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG))
144 			adev->vcn.indirect_sram = true;
145 		break;
146 	case IP_VERSION(3, 0, 0):
147 	case IP_VERSION(3, 0, 64):
148 	case IP_VERSION(3, 0, 192):
149 		if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(10, 3, 0))
150 			fw_name = FIRMWARE_SIENNA_CICHLID;
151 		else
152 			fw_name = FIRMWARE_NAVY_FLOUNDER;
153 		if ((adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) &&
154 		    (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG))
155 			adev->vcn.indirect_sram = true;
156 		break;
157 	case IP_VERSION(3, 0, 2):
158 		fw_name = FIRMWARE_VANGOGH;
159 		if ((adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) &&
160 		    (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG))
161 			adev->vcn.indirect_sram = true;
162 		break;
163 	case IP_VERSION(3, 0, 16):
164 		fw_name = FIRMWARE_DIMGREY_CAVEFISH;
165 		if ((adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) &&
166 		    (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG))
167 			adev->vcn.indirect_sram = true;
168 		break;
169 	case IP_VERSION(3, 0, 33):
170 		fw_name = FIRMWARE_BEIGE_GOBY;
171 		if ((adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) &&
172 		    (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG))
173 			adev->vcn.indirect_sram = true;
174 		break;
175 	case IP_VERSION(3, 1, 1):
176 		fw_name = FIRMWARE_YELLOW_CARP;
177 		if ((adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) &&
178 		    (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG))
179 			adev->vcn.indirect_sram = true;
180 		break;
181 	case IP_VERSION(3, 1, 2):
182 		fw_name = FIRMWARE_VCN_3_1_2;
183 		if ((adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) &&
184 		    (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG))
185 			adev->vcn.indirect_sram = true;
186 		break;
187 	case IP_VERSION(4, 0, 0):
188 		fw_name = FIRMWARE_VCN4_0_0;
189 		if ((adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) &&
190 			(adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG))
191 			adev->vcn.indirect_sram = true;
192 		break;
193 	case IP_VERSION(4, 0, 2):
194 		fw_name = FIRMWARE_VCN4_0_2;
195 		if ((adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) &&
196 			(adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG))
197 			adev->vcn.indirect_sram = true;
198 		break;
199 	case IP_VERSION(4, 0, 4):
200 		fw_name = FIRMWARE_VCN4_0_4;
201 		if ((adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) &&
202 			(adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG))
203 			adev->vcn.indirect_sram = true;
204 		break;
205 	default:
206 		return -EINVAL;
207 	}
208 
209 	r = request_firmware(&adev->vcn.fw, fw_name, adev->dev);
210 	if (r) {
211 		dev_err(adev->dev, "amdgpu_vcn: Can't load firmware \"%s\"\n",
212 			fw_name);
213 		return r;
214 	}
215 
216 	r = amdgpu_ucode_validate(adev->vcn.fw);
217 	if (r) {
218 		dev_err(adev->dev, "amdgpu_vcn: Can't validate firmware \"%s\"\n",
219 			fw_name);
220 		release_firmware(adev->vcn.fw);
221 		adev->vcn.fw = NULL;
222 		return r;
223 	}
224 
225 	hdr = (const struct common_firmware_header *)adev->vcn.fw->data;
226 	adev->vcn.fw_version = le32_to_cpu(hdr->ucode_version);
227 
228 	/* Bit 20-23, it is encode major and non-zero for new naming convention.
229 	 * This field is part of version minor and DRM_DISABLED_FLAG in old naming
230 	 * convention. Since the l:wq!atest version minor is 0x5B and DRM_DISABLED_FLAG
231 	 * is zero in old naming convention, this field is always zero so far.
232 	 * These four bits are used to tell which naming convention is present.
233 	 */
234 	fw_check = (le32_to_cpu(hdr->ucode_version) >> 20) & 0xf;
235 	if (fw_check) {
236 		unsigned int dec_ver, enc_major, enc_minor, vep, fw_rev;
237 
238 		fw_rev = le32_to_cpu(hdr->ucode_version) & 0xfff;
239 		enc_minor = (le32_to_cpu(hdr->ucode_version) >> 12) & 0xff;
240 		enc_major = fw_check;
241 		dec_ver = (le32_to_cpu(hdr->ucode_version) >> 24) & 0xf;
242 		vep = (le32_to_cpu(hdr->ucode_version) >> 28) & 0xf;
243 		DRM_INFO("Found VCN firmware Version ENC: %u.%u DEC: %u VEP: %u Revision: %u\n",
244 			enc_major, enc_minor, dec_ver, vep, fw_rev);
245 	} else {
246 		unsigned int version_major, version_minor, family_id;
247 
248 		family_id = le32_to_cpu(hdr->ucode_version) & 0xff;
249 		version_major = (le32_to_cpu(hdr->ucode_version) >> 24) & 0xff;
250 		version_minor = (le32_to_cpu(hdr->ucode_version) >> 8) & 0xff;
251 		DRM_INFO("Found VCN firmware Version: %u.%u Family ID: %u\n",
252 			version_major, version_minor, family_id);
253 	}
254 
255 	bo_size = AMDGPU_VCN_STACK_SIZE + AMDGPU_VCN_CONTEXT_SIZE;
256 	if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP)
257 		bo_size += AMDGPU_GPU_PAGE_ALIGN(le32_to_cpu(hdr->ucode_size_bytes) + 8);
258 
259 	if (adev->ip_versions[UVD_HWIP][0] >= IP_VERSION(4, 0, 0)){
260 		fw_shared_size = AMDGPU_GPU_PAGE_ALIGN(sizeof(struct amdgpu_vcn4_fw_shared));
261 		log_offset = offsetof(struct amdgpu_vcn4_fw_shared, fw_log);
262 	} else {
263 		fw_shared_size = AMDGPU_GPU_PAGE_ALIGN(sizeof(struct amdgpu_fw_shared));
264 		log_offset = offsetof(struct amdgpu_fw_shared, fw_log);
265 	}
266 
267 	bo_size += fw_shared_size;
268 
269 	if (amdgpu_vcnfw_log)
270 		bo_size += AMDGPU_VCNFW_LOG_SIZE;
271 
272 	for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
273 		if (adev->vcn.harvest_config & (1 << i))
274 			continue;
275 
276 		r = amdgpu_bo_create_kernel(adev, bo_size, PAGE_SIZE,
277 					    AMDGPU_GEM_DOMAIN_VRAM |
278 					    AMDGPU_GEM_DOMAIN_GTT,
279 					    &adev->vcn.inst[i].vcpu_bo,
280 					    &adev->vcn.inst[i].gpu_addr,
281 					    &adev->vcn.inst[i].cpu_addr);
282 		if (r) {
283 			dev_err(adev->dev, "(%d) failed to allocate vcn bo\n", r);
284 			return r;
285 		}
286 
287 		adev->vcn.inst[i].fw_shared.cpu_addr = adev->vcn.inst[i].cpu_addr +
288 				bo_size - fw_shared_size;
289 		adev->vcn.inst[i].fw_shared.gpu_addr = adev->vcn.inst[i].gpu_addr +
290 				bo_size - fw_shared_size;
291 
292 		adev->vcn.inst[i].fw_shared.mem_size = fw_shared_size;
293 
294 		if (amdgpu_vcnfw_log) {
295 			adev->vcn.inst[i].fw_shared.cpu_addr -= AMDGPU_VCNFW_LOG_SIZE;
296 			adev->vcn.inst[i].fw_shared.gpu_addr -= AMDGPU_VCNFW_LOG_SIZE;
297 			adev->vcn.inst[i].fw_shared.log_offset = log_offset;
298 		}
299 
300 		if (adev->vcn.indirect_sram) {
301 			r = amdgpu_bo_create_kernel(adev, 64 * 2 * 4, PAGE_SIZE,
302 					AMDGPU_GEM_DOMAIN_VRAM |
303 					AMDGPU_GEM_DOMAIN_GTT,
304 					&adev->vcn.inst[i].dpg_sram_bo,
305 					&adev->vcn.inst[i].dpg_sram_gpu_addr,
306 					&adev->vcn.inst[i].dpg_sram_cpu_addr);
307 			if (r) {
308 				dev_err(adev->dev, "VCN %d (%d) failed to allocate DPG bo\n", i, r);
309 				return r;
310 			}
311 		}
312 	}
313 
314 	return 0;
315 }
316 
317 int amdgpu_vcn_sw_fini(struct amdgpu_device *adev)
318 {
319 	int i, j;
320 
321 	for (j = 0; j < adev->vcn.num_vcn_inst; ++j) {
322 		if (adev->vcn.harvest_config & (1 << j))
323 			continue;
324 
325 		if (adev->vcn.indirect_sram) {
326 			amdgpu_bo_free_kernel(&adev->vcn.inst[j].dpg_sram_bo,
327 						  &adev->vcn.inst[j].dpg_sram_gpu_addr,
328 						  (void **)&adev->vcn.inst[j].dpg_sram_cpu_addr);
329 		}
330 		kvfree(adev->vcn.inst[j].saved_bo);
331 
332 		amdgpu_bo_free_kernel(&adev->vcn.inst[j].vcpu_bo,
333 					  &adev->vcn.inst[j].gpu_addr,
334 					  (void **)&adev->vcn.inst[j].cpu_addr);
335 
336 		amdgpu_ring_fini(&adev->vcn.inst[j].ring_dec);
337 
338 		for (i = 0; i < adev->vcn.num_enc_rings; ++i)
339 			amdgpu_ring_fini(&adev->vcn.inst[j].ring_enc[i]);
340 	}
341 
342 	release_firmware(adev->vcn.fw);
343 	mutex_destroy(&adev->vcn.vcn1_jpeg1_workaround);
344 	mutex_destroy(&adev->vcn.vcn_pg_lock);
345 
346 	return 0;
347 }
348 
349 /* from vcn4 and above, only unified queue is used */
350 static bool amdgpu_vcn_using_unified_queue(struct amdgpu_ring *ring)
351 {
352 	struct amdgpu_device *adev = ring->adev;
353 	bool ret = false;
354 
355 	if (adev->ip_versions[UVD_HWIP][0] >= IP_VERSION(4, 0, 0))
356 		ret = true;
357 
358 	return ret;
359 }
360 
361 bool amdgpu_vcn_is_disabled_vcn(struct amdgpu_device *adev, enum vcn_ring_type type, uint32_t vcn_instance)
362 {
363 	bool ret = false;
364 	int vcn_config = adev->vcn.vcn_config[vcn_instance];
365 
366 	if ((type == VCN_ENCODE_RING) && (vcn_config & VCN_BLOCK_ENCODE_DISABLE_MASK)) {
367 		ret = true;
368 	} else if ((type == VCN_DECODE_RING) && (vcn_config & VCN_BLOCK_DECODE_DISABLE_MASK)) {
369 		ret = true;
370 	} else if ((type == VCN_UNIFIED_RING) && (vcn_config & VCN_BLOCK_QUEUE_DISABLE_MASK)) {
371 		ret = true;
372 	}
373 
374 	return ret;
375 }
376 
377 int amdgpu_vcn_suspend(struct amdgpu_device *adev)
378 {
379 	unsigned size;
380 	void *ptr;
381 	int i, idx;
382 
383 	cancel_delayed_work_sync(&adev->vcn.idle_work);
384 
385 	for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
386 		if (adev->vcn.harvest_config & (1 << i))
387 			continue;
388 		if (adev->vcn.inst[i].vcpu_bo == NULL)
389 			return 0;
390 
391 		size = amdgpu_bo_size(adev->vcn.inst[i].vcpu_bo);
392 		ptr = adev->vcn.inst[i].cpu_addr;
393 
394 		adev->vcn.inst[i].saved_bo = kvmalloc(size, GFP_KERNEL);
395 		if (!adev->vcn.inst[i].saved_bo)
396 			return -ENOMEM;
397 
398 		if (drm_dev_enter(adev_to_drm(adev), &idx)) {
399 			memcpy_fromio(adev->vcn.inst[i].saved_bo, ptr, size);
400 			drm_dev_exit(idx);
401 		}
402 	}
403 	return 0;
404 }
405 
406 int amdgpu_vcn_resume(struct amdgpu_device *adev)
407 {
408 	unsigned size;
409 	void *ptr;
410 	int i, idx;
411 
412 	for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
413 		if (adev->vcn.harvest_config & (1 << i))
414 			continue;
415 		if (adev->vcn.inst[i].vcpu_bo == NULL)
416 			return -EINVAL;
417 
418 		size = amdgpu_bo_size(adev->vcn.inst[i].vcpu_bo);
419 		ptr = adev->vcn.inst[i].cpu_addr;
420 
421 		if (adev->vcn.inst[i].saved_bo != NULL) {
422 			if (drm_dev_enter(adev_to_drm(adev), &idx)) {
423 				memcpy_toio(ptr, adev->vcn.inst[i].saved_bo, size);
424 				drm_dev_exit(idx);
425 			}
426 			kvfree(adev->vcn.inst[i].saved_bo);
427 			adev->vcn.inst[i].saved_bo = NULL;
428 		} else {
429 			const struct common_firmware_header *hdr;
430 			unsigned offset;
431 
432 			hdr = (const struct common_firmware_header *)adev->vcn.fw->data;
433 			if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
434 				offset = le32_to_cpu(hdr->ucode_array_offset_bytes);
435 				if (drm_dev_enter(adev_to_drm(adev), &idx)) {
436 					memcpy_toio(adev->vcn.inst[i].cpu_addr, adev->vcn.fw->data + offset,
437 						    le32_to_cpu(hdr->ucode_size_bytes));
438 					drm_dev_exit(idx);
439 				}
440 				size -= le32_to_cpu(hdr->ucode_size_bytes);
441 				ptr += le32_to_cpu(hdr->ucode_size_bytes);
442 			}
443 			memset_io(ptr, 0, size);
444 		}
445 	}
446 	return 0;
447 }
448 
449 static void amdgpu_vcn_idle_work_handler(struct work_struct *work)
450 {
451 	struct amdgpu_device *adev =
452 		container_of(work, struct amdgpu_device, vcn.idle_work.work);
453 	unsigned int fences = 0, fence[AMDGPU_MAX_VCN_INSTANCES] = {0};
454 	unsigned int i, j;
455 	int r = 0;
456 
457 	for (j = 0; j < adev->vcn.num_vcn_inst; ++j) {
458 		if (adev->vcn.harvest_config & (1 << j))
459 			continue;
460 
461 		for (i = 0; i < adev->vcn.num_enc_rings; ++i) {
462 			fence[j] += amdgpu_fence_count_emitted(&adev->vcn.inst[j].ring_enc[i]);
463 		}
464 
465 		if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG)	{
466 			struct dpg_pause_state new_state;
467 
468 			if (fence[j] ||
469 				unlikely(atomic_read(&adev->vcn.inst[j].dpg_enc_submission_cnt)))
470 				new_state.fw_based = VCN_DPG_STATE__PAUSE;
471 			else
472 				new_state.fw_based = VCN_DPG_STATE__UNPAUSE;
473 
474 			adev->vcn.pause_dpg_mode(adev, j, &new_state);
475 		}
476 
477 		fence[j] += amdgpu_fence_count_emitted(&adev->vcn.inst[j].ring_dec);
478 		fences += fence[j];
479 	}
480 
481 	if (!fences && !atomic_read(&adev->vcn.total_submission_cnt)) {
482 		amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCN,
483 		       AMD_PG_STATE_GATE);
484 		r = amdgpu_dpm_switch_power_profile(adev, PP_SMC_POWER_PROFILE_VIDEO,
485 				false);
486 		if (r)
487 			dev_warn(adev->dev, "(%d) failed to disable video power profile mode\n", r);
488 	} else {
489 		schedule_delayed_work(&adev->vcn.idle_work, VCN_IDLE_TIMEOUT);
490 	}
491 }
492 
493 void amdgpu_vcn_ring_begin_use(struct amdgpu_ring *ring)
494 {
495 	struct amdgpu_device *adev = ring->adev;
496 	int r = 0;
497 
498 	atomic_inc(&adev->vcn.total_submission_cnt);
499 
500 	if (!cancel_delayed_work_sync(&adev->vcn.idle_work)) {
501 		r = amdgpu_dpm_switch_power_profile(adev, PP_SMC_POWER_PROFILE_VIDEO,
502 				true);
503 		if (r)
504 			dev_warn(adev->dev, "(%d) failed to switch to video power profile mode\n", r);
505 	}
506 
507 	mutex_lock(&adev->vcn.vcn_pg_lock);
508 	amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCN,
509 	       AMD_PG_STATE_UNGATE);
510 
511 	if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG)	{
512 		struct dpg_pause_state new_state;
513 
514 		if (ring->funcs->type == AMDGPU_RING_TYPE_VCN_ENC) {
515 			atomic_inc(&adev->vcn.inst[ring->me].dpg_enc_submission_cnt);
516 			new_state.fw_based = VCN_DPG_STATE__PAUSE;
517 		} else {
518 			unsigned int fences = 0;
519 			unsigned int i;
520 
521 			for (i = 0; i < adev->vcn.num_enc_rings; ++i)
522 				fences += amdgpu_fence_count_emitted(&adev->vcn.inst[ring->me].ring_enc[i]);
523 
524 			if (fences || atomic_read(&adev->vcn.inst[ring->me].dpg_enc_submission_cnt))
525 				new_state.fw_based = VCN_DPG_STATE__PAUSE;
526 			else
527 				new_state.fw_based = VCN_DPG_STATE__UNPAUSE;
528 		}
529 
530 		adev->vcn.pause_dpg_mode(adev, ring->me, &new_state);
531 	}
532 	mutex_unlock(&adev->vcn.vcn_pg_lock);
533 }
534 
535 void amdgpu_vcn_ring_end_use(struct amdgpu_ring *ring)
536 {
537 	if (ring->adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG &&
538 		ring->funcs->type == AMDGPU_RING_TYPE_VCN_ENC)
539 		atomic_dec(&ring->adev->vcn.inst[ring->me].dpg_enc_submission_cnt);
540 
541 	atomic_dec(&ring->adev->vcn.total_submission_cnt);
542 
543 	schedule_delayed_work(&ring->adev->vcn.idle_work, VCN_IDLE_TIMEOUT);
544 }
545 
546 int amdgpu_vcn_dec_ring_test_ring(struct amdgpu_ring *ring)
547 {
548 	struct amdgpu_device *adev = ring->adev;
549 	uint32_t tmp = 0;
550 	unsigned i;
551 	int r;
552 
553 	/* VCN in SRIOV does not support direct register read/write */
554 	if (amdgpu_sriov_vf(adev))
555 		return 0;
556 
557 	WREG32(adev->vcn.inst[ring->me].external.scratch9, 0xCAFEDEAD);
558 	r = amdgpu_ring_alloc(ring, 3);
559 	if (r)
560 		return r;
561 	amdgpu_ring_write(ring, PACKET0(adev->vcn.internal.scratch9, 0));
562 	amdgpu_ring_write(ring, 0xDEADBEEF);
563 	amdgpu_ring_commit(ring);
564 	for (i = 0; i < adev->usec_timeout; i++) {
565 		tmp = RREG32(adev->vcn.inst[ring->me].external.scratch9);
566 		if (tmp == 0xDEADBEEF)
567 			break;
568 		udelay(1);
569 	}
570 
571 	if (i >= adev->usec_timeout)
572 		r = -ETIMEDOUT;
573 
574 	return r;
575 }
576 
577 int amdgpu_vcn_dec_sw_ring_test_ring(struct amdgpu_ring *ring)
578 {
579 	struct amdgpu_device *adev = ring->adev;
580 	uint32_t rptr;
581 	unsigned int i;
582 	int r;
583 
584 	if (amdgpu_sriov_vf(adev))
585 		return 0;
586 
587 	r = amdgpu_ring_alloc(ring, 16);
588 	if (r)
589 		return r;
590 
591 	rptr = amdgpu_ring_get_rptr(ring);
592 
593 	amdgpu_ring_write(ring, VCN_DEC_SW_CMD_END);
594 	amdgpu_ring_commit(ring);
595 
596 	for (i = 0; i < adev->usec_timeout; i++) {
597 		if (amdgpu_ring_get_rptr(ring) != rptr)
598 			break;
599 		udelay(1);
600 	}
601 
602 	if (i >= adev->usec_timeout)
603 		r = -ETIMEDOUT;
604 
605 	return r;
606 }
607 
608 static int amdgpu_vcn_dec_send_msg(struct amdgpu_ring *ring,
609 				   struct amdgpu_ib *ib_msg,
610 				   struct dma_fence **fence)
611 {
612 	u64 addr = AMDGPU_GPU_PAGE_ALIGN(ib_msg->gpu_addr);
613 	struct amdgpu_device *adev = ring->adev;
614 	struct dma_fence *f = NULL;
615 	struct amdgpu_job *job;
616 	struct amdgpu_ib *ib;
617 	int i, r;
618 
619 	r = amdgpu_job_alloc_with_ib(ring->adev, NULL, NULL,
620 				     64, AMDGPU_IB_POOL_DIRECT,
621 				     &job);
622 	if (r)
623 		goto err;
624 
625 	ib = &job->ibs[0];
626 	ib->ptr[0] = PACKET0(adev->vcn.internal.data0, 0);
627 	ib->ptr[1] = addr;
628 	ib->ptr[2] = PACKET0(adev->vcn.internal.data1, 0);
629 	ib->ptr[3] = addr >> 32;
630 	ib->ptr[4] = PACKET0(adev->vcn.internal.cmd, 0);
631 	ib->ptr[5] = 0;
632 	for (i = 6; i < 16; i += 2) {
633 		ib->ptr[i] = PACKET0(adev->vcn.internal.nop, 0);
634 		ib->ptr[i+1] = 0;
635 	}
636 	ib->length_dw = 16;
637 
638 	r = amdgpu_job_submit_direct(job, ring, &f);
639 	if (r)
640 		goto err_free;
641 
642 	amdgpu_ib_free(adev, ib_msg, f);
643 
644 	if (fence)
645 		*fence = dma_fence_get(f);
646 	dma_fence_put(f);
647 
648 	return 0;
649 
650 err_free:
651 	amdgpu_job_free(job);
652 err:
653 	amdgpu_ib_free(adev, ib_msg, f);
654 	return r;
655 }
656 
657 static int amdgpu_vcn_dec_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
658 		struct amdgpu_ib *ib)
659 {
660 	struct amdgpu_device *adev = ring->adev;
661 	uint32_t *msg;
662 	int r, i;
663 
664 	memset(ib, 0, sizeof(*ib));
665 	r = amdgpu_ib_get(adev, NULL, AMDGPU_GPU_PAGE_SIZE * 2,
666 			AMDGPU_IB_POOL_DIRECT,
667 			ib);
668 	if (r)
669 		return r;
670 
671 	msg = (uint32_t *)AMDGPU_GPU_PAGE_ALIGN((unsigned long)ib->ptr);
672 	msg[0] = cpu_to_le32(0x00000028);
673 	msg[1] = cpu_to_le32(0x00000038);
674 	msg[2] = cpu_to_le32(0x00000001);
675 	msg[3] = cpu_to_le32(0x00000000);
676 	msg[4] = cpu_to_le32(handle);
677 	msg[5] = cpu_to_le32(0x00000000);
678 	msg[6] = cpu_to_le32(0x00000001);
679 	msg[7] = cpu_to_le32(0x00000028);
680 	msg[8] = cpu_to_le32(0x00000010);
681 	msg[9] = cpu_to_le32(0x00000000);
682 	msg[10] = cpu_to_le32(0x00000007);
683 	msg[11] = cpu_to_le32(0x00000000);
684 	msg[12] = cpu_to_le32(0x00000780);
685 	msg[13] = cpu_to_le32(0x00000440);
686 	for (i = 14; i < 1024; ++i)
687 		msg[i] = cpu_to_le32(0x0);
688 
689 	return 0;
690 }
691 
692 static int amdgpu_vcn_dec_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
693 					  struct amdgpu_ib *ib)
694 {
695 	struct amdgpu_device *adev = ring->adev;
696 	uint32_t *msg;
697 	int r, i;
698 
699 	memset(ib, 0, sizeof(*ib));
700 	r = amdgpu_ib_get(adev, NULL, AMDGPU_GPU_PAGE_SIZE * 2,
701 			AMDGPU_IB_POOL_DIRECT,
702 			ib);
703 	if (r)
704 		return r;
705 
706 	msg = (uint32_t *)AMDGPU_GPU_PAGE_ALIGN((unsigned long)ib->ptr);
707 	msg[0] = cpu_to_le32(0x00000028);
708 	msg[1] = cpu_to_le32(0x00000018);
709 	msg[2] = cpu_to_le32(0x00000000);
710 	msg[3] = cpu_to_le32(0x00000002);
711 	msg[4] = cpu_to_le32(handle);
712 	msg[5] = cpu_to_le32(0x00000000);
713 	for (i = 6; i < 1024; ++i)
714 		msg[i] = cpu_to_le32(0x0);
715 
716 	return 0;
717 }
718 
719 int amdgpu_vcn_dec_ring_test_ib(struct amdgpu_ring *ring, long timeout)
720 {
721 	struct dma_fence *fence = NULL;
722 	struct amdgpu_ib ib;
723 	long r;
724 
725 	r = amdgpu_vcn_dec_get_create_msg(ring, 1, &ib);
726 	if (r)
727 		goto error;
728 
729 	r = amdgpu_vcn_dec_send_msg(ring, &ib, NULL);
730 	if (r)
731 		goto error;
732 	r = amdgpu_vcn_dec_get_destroy_msg(ring, 1, &ib);
733 	if (r)
734 		goto error;
735 
736 	r = amdgpu_vcn_dec_send_msg(ring, &ib, &fence);
737 	if (r)
738 		goto error;
739 
740 	r = dma_fence_wait_timeout(fence, false, timeout);
741 	if (r == 0)
742 		r = -ETIMEDOUT;
743 	else if (r > 0)
744 		r = 0;
745 
746 	dma_fence_put(fence);
747 error:
748 	return r;
749 }
750 
751 static uint32_t *amdgpu_vcn_unified_ring_ib_header(struct amdgpu_ib *ib,
752 						uint32_t ib_pack_in_dw, bool enc)
753 {
754 	uint32_t *ib_checksum;
755 
756 	ib->ptr[ib->length_dw++] = 0x00000010; /* single queue checksum */
757 	ib->ptr[ib->length_dw++] = 0x30000002;
758 	ib_checksum = &ib->ptr[ib->length_dw++];
759 	ib->ptr[ib->length_dw++] = ib_pack_in_dw;
760 
761 	ib->ptr[ib->length_dw++] = 0x00000010; /* engine info */
762 	ib->ptr[ib->length_dw++] = 0x30000001;
763 	ib->ptr[ib->length_dw++] = enc ? 0x2 : 0x3;
764 	ib->ptr[ib->length_dw++] = ib_pack_in_dw * sizeof(uint32_t);
765 
766 	return ib_checksum;
767 }
768 
769 static void amdgpu_vcn_unified_ring_ib_checksum(uint32_t **ib_checksum,
770 						uint32_t ib_pack_in_dw)
771 {
772 	uint32_t i;
773 	uint32_t checksum = 0;
774 
775 	for (i = 0; i < ib_pack_in_dw; i++)
776 		checksum += *(*ib_checksum + 2 + i);
777 
778 	**ib_checksum = checksum;
779 }
780 
781 static int amdgpu_vcn_dec_sw_send_msg(struct amdgpu_ring *ring,
782 				      struct amdgpu_ib *ib_msg,
783 				      struct dma_fence **fence)
784 {
785 	struct amdgpu_vcn_decode_buffer *decode_buffer = NULL;
786 	unsigned int ib_size_dw = 64;
787 	struct amdgpu_device *adev = ring->adev;
788 	struct dma_fence *f = NULL;
789 	struct amdgpu_job *job;
790 	struct amdgpu_ib *ib;
791 	uint64_t addr = AMDGPU_GPU_PAGE_ALIGN(ib_msg->gpu_addr);
792 	bool sq = amdgpu_vcn_using_unified_queue(ring);
793 	uint32_t *ib_checksum;
794 	uint32_t ib_pack_in_dw;
795 	int i, r;
796 
797 	if (sq)
798 		ib_size_dw += 8;
799 
800 	r = amdgpu_job_alloc_with_ib(ring->adev, NULL, NULL,
801 				     ib_size_dw * 4, AMDGPU_IB_POOL_DIRECT,
802 				     &job);
803 	if (r)
804 		goto err;
805 
806 	ib = &job->ibs[0];
807 	ib->length_dw = 0;
808 
809 	/* single queue headers */
810 	if (sq) {
811 		ib_pack_in_dw = sizeof(struct amdgpu_vcn_decode_buffer) / sizeof(uint32_t)
812 						+ 4 + 2; /* engine info + decoding ib in dw */
813 		ib_checksum = amdgpu_vcn_unified_ring_ib_header(ib, ib_pack_in_dw, false);
814 	}
815 
816 	ib->ptr[ib->length_dw++] = sizeof(struct amdgpu_vcn_decode_buffer) + 8;
817 	ib->ptr[ib->length_dw++] = cpu_to_le32(AMDGPU_VCN_IB_FLAG_DECODE_BUFFER);
818 	decode_buffer = (struct amdgpu_vcn_decode_buffer *)&(ib->ptr[ib->length_dw]);
819 	ib->length_dw += sizeof(struct amdgpu_vcn_decode_buffer) / 4;
820 	memset(decode_buffer, 0, sizeof(struct amdgpu_vcn_decode_buffer));
821 
822 	decode_buffer->valid_buf_flag |= cpu_to_le32(AMDGPU_VCN_CMD_FLAG_MSG_BUFFER);
823 	decode_buffer->msg_buffer_address_hi = cpu_to_le32(addr >> 32);
824 	decode_buffer->msg_buffer_address_lo = cpu_to_le32(addr);
825 
826 	for (i = ib->length_dw; i < ib_size_dw; ++i)
827 		ib->ptr[i] = 0x0;
828 
829 	if (sq)
830 		amdgpu_vcn_unified_ring_ib_checksum(&ib_checksum, ib_pack_in_dw);
831 
832 	r = amdgpu_job_submit_direct(job, ring, &f);
833 	if (r)
834 		goto err_free;
835 
836 	amdgpu_ib_free(adev, ib_msg, f);
837 
838 	if (fence)
839 		*fence = dma_fence_get(f);
840 	dma_fence_put(f);
841 
842 	return 0;
843 
844 err_free:
845 	amdgpu_job_free(job);
846 err:
847 	amdgpu_ib_free(adev, ib_msg, f);
848 	return r;
849 }
850 
851 int amdgpu_vcn_dec_sw_ring_test_ib(struct amdgpu_ring *ring, long timeout)
852 {
853 	struct dma_fence *fence = NULL;
854 	struct amdgpu_ib ib;
855 	long r;
856 
857 	r = amdgpu_vcn_dec_get_create_msg(ring, 1, &ib);
858 	if (r)
859 		goto error;
860 
861 	r = amdgpu_vcn_dec_sw_send_msg(ring, &ib, NULL);
862 	if (r)
863 		goto error;
864 	r = amdgpu_vcn_dec_get_destroy_msg(ring, 1, &ib);
865 	if (r)
866 		goto error;
867 
868 	r = amdgpu_vcn_dec_sw_send_msg(ring, &ib, &fence);
869 	if (r)
870 		goto error;
871 
872 	r = dma_fence_wait_timeout(fence, false, timeout);
873 	if (r == 0)
874 		r = -ETIMEDOUT;
875 	else if (r > 0)
876 		r = 0;
877 
878 	dma_fence_put(fence);
879 error:
880 	return r;
881 }
882 
883 int amdgpu_vcn_enc_ring_test_ring(struct amdgpu_ring *ring)
884 {
885 	struct amdgpu_device *adev = ring->adev;
886 	uint32_t rptr;
887 	unsigned i;
888 	int r;
889 
890 	if (amdgpu_sriov_vf(adev))
891 		return 0;
892 
893 	r = amdgpu_ring_alloc(ring, 16);
894 	if (r)
895 		return r;
896 
897 	rptr = amdgpu_ring_get_rptr(ring);
898 
899 	amdgpu_ring_write(ring, VCN_ENC_CMD_END);
900 	amdgpu_ring_commit(ring);
901 
902 	for (i = 0; i < adev->usec_timeout; i++) {
903 		if (amdgpu_ring_get_rptr(ring) != rptr)
904 			break;
905 		udelay(1);
906 	}
907 
908 	if (i >= adev->usec_timeout)
909 		r = -ETIMEDOUT;
910 
911 	return r;
912 }
913 
914 static int amdgpu_vcn_enc_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
915 					 struct amdgpu_ib *ib_msg,
916 					 struct dma_fence **fence)
917 {
918 	unsigned int ib_size_dw = 16;
919 	struct amdgpu_job *job;
920 	struct amdgpu_ib *ib;
921 	struct dma_fence *f = NULL;
922 	uint32_t *ib_checksum = NULL;
923 	uint64_t addr;
924 	bool sq = amdgpu_vcn_using_unified_queue(ring);
925 	int i, r;
926 
927 	if (sq)
928 		ib_size_dw += 8;
929 
930 	r = amdgpu_job_alloc_with_ib(ring->adev, NULL, NULL,
931 				     ib_size_dw * 4, AMDGPU_IB_POOL_DIRECT,
932 				     &job);
933 	if (r)
934 		return r;
935 
936 	ib = &job->ibs[0];
937 	addr = AMDGPU_GPU_PAGE_ALIGN(ib_msg->gpu_addr);
938 
939 	ib->length_dw = 0;
940 
941 	if (sq)
942 		ib_checksum = amdgpu_vcn_unified_ring_ib_header(ib, 0x11, true);
943 
944 	ib->ptr[ib->length_dw++] = 0x00000018;
945 	ib->ptr[ib->length_dw++] = 0x00000001; /* session info */
946 	ib->ptr[ib->length_dw++] = handle;
947 	ib->ptr[ib->length_dw++] = upper_32_bits(addr);
948 	ib->ptr[ib->length_dw++] = addr;
949 	ib->ptr[ib->length_dw++] = 0x0000000b;
950 
951 	ib->ptr[ib->length_dw++] = 0x00000014;
952 	ib->ptr[ib->length_dw++] = 0x00000002; /* task info */
953 	ib->ptr[ib->length_dw++] = 0x0000001c;
954 	ib->ptr[ib->length_dw++] = 0x00000000;
955 	ib->ptr[ib->length_dw++] = 0x00000000;
956 
957 	ib->ptr[ib->length_dw++] = 0x00000008;
958 	ib->ptr[ib->length_dw++] = 0x08000001; /* op initialize */
959 
960 	for (i = ib->length_dw; i < ib_size_dw; ++i)
961 		ib->ptr[i] = 0x0;
962 
963 	if (sq)
964 		amdgpu_vcn_unified_ring_ib_checksum(&ib_checksum, 0x11);
965 
966 	r = amdgpu_job_submit_direct(job, ring, &f);
967 	if (r)
968 		goto err;
969 
970 	if (fence)
971 		*fence = dma_fence_get(f);
972 	dma_fence_put(f);
973 
974 	return 0;
975 
976 err:
977 	amdgpu_job_free(job);
978 	return r;
979 }
980 
981 static int amdgpu_vcn_enc_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
982 					  struct amdgpu_ib *ib_msg,
983 					  struct dma_fence **fence)
984 {
985 	unsigned int ib_size_dw = 16;
986 	struct amdgpu_job *job;
987 	struct amdgpu_ib *ib;
988 	struct dma_fence *f = NULL;
989 	uint32_t *ib_checksum = NULL;
990 	uint64_t addr;
991 	bool sq = amdgpu_vcn_using_unified_queue(ring);
992 	int i, r;
993 
994 	if (sq)
995 		ib_size_dw += 8;
996 
997 	r = amdgpu_job_alloc_with_ib(ring->adev, NULL, NULL,
998 				     ib_size_dw * 4, AMDGPU_IB_POOL_DIRECT,
999 				     &job);
1000 	if (r)
1001 		return r;
1002 
1003 	ib = &job->ibs[0];
1004 	addr = AMDGPU_GPU_PAGE_ALIGN(ib_msg->gpu_addr);
1005 
1006 	ib->length_dw = 0;
1007 
1008 	if (sq)
1009 		ib_checksum = amdgpu_vcn_unified_ring_ib_header(ib, 0x11, true);
1010 
1011 	ib->ptr[ib->length_dw++] = 0x00000018;
1012 	ib->ptr[ib->length_dw++] = 0x00000001;
1013 	ib->ptr[ib->length_dw++] = handle;
1014 	ib->ptr[ib->length_dw++] = upper_32_bits(addr);
1015 	ib->ptr[ib->length_dw++] = addr;
1016 	ib->ptr[ib->length_dw++] = 0x0000000b;
1017 
1018 	ib->ptr[ib->length_dw++] = 0x00000014;
1019 	ib->ptr[ib->length_dw++] = 0x00000002;
1020 	ib->ptr[ib->length_dw++] = 0x0000001c;
1021 	ib->ptr[ib->length_dw++] = 0x00000000;
1022 	ib->ptr[ib->length_dw++] = 0x00000000;
1023 
1024 	ib->ptr[ib->length_dw++] = 0x00000008;
1025 	ib->ptr[ib->length_dw++] = 0x08000002; /* op close session */
1026 
1027 	for (i = ib->length_dw; i < ib_size_dw; ++i)
1028 		ib->ptr[i] = 0x0;
1029 
1030 	if (sq)
1031 		amdgpu_vcn_unified_ring_ib_checksum(&ib_checksum, 0x11);
1032 
1033 	r = amdgpu_job_submit_direct(job, ring, &f);
1034 	if (r)
1035 		goto err;
1036 
1037 	if (fence)
1038 		*fence = dma_fence_get(f);
1039 	dma_fence_put(f);
1040 
1041 	return 0;
1042 
1043 err:
1044 	amdgpu_job_free(job);
1045 	return r;
1046 }
1047 
1048 int amdgpu_vcn_enc_ring_test_ib(struct amdgpu_ring *ring, long timeout)
1049 {
1050 	struct amdgpu_device *adev = ring->adev;
1051 	struct dma_fence *fence = NULL;
1052 	struct amdgpu_ib ib;
1053 	long r;
1054 
1055 	memset(&ib, 0, sizeof(ib));
1056 	r = amdgpu_ib_get(adev, NULL, (128 << 10) + AMDGPU_GPU_PAGE_SIZE,
1057 			AMDGPU_IB_POOL_DIRECT,
1058 			&ib);
1059 	if (r)
1060 		return r;
1061 
1062 	r = amdgpu_vcn_enc_get_create_msg(ring, 1, &ib, NULL);
1063 	if (r)
1064 		goto error;
1065 
1066 	r = amdgpu_vcn_enc_get_destroy_msg(ring, 1, &ib, &fence);
1067 	if (r)
1068 		goto error;
1069 
1070 	r = dma_fence_wait_timeout(fence, false, timeout);
1071 	if (r == 0)
1072 		r = -ETIMEDOUT;
1073 	else if (r > 0)
1074 		r = 0;
1075 
1076 error:
1077 	amdgpu_ib_free(adev, &ib, fence);
1078 	dma_fence_put(fence);
1079 
1080 	return r;
1081 }
1082 
1083 int amdgpu_vcn_unified_ring_test_ib(struct amdgpu_ring *ring, long timeout)
1084 {
1085 	long r;
1086 
1087 	r = amdgpu_vcn_enc_ring_test_ib(ring, timeout);
1088 	if (r)
1089 		goto error;
1090 
1091 	r =  amdgpu_vcn_dec_sw_ring_test_ib(ring, timeout);
1092 
1093 error:
1094 	return r;
1095 }
1096 
1097 enum amdgpu_ring_priority_level amdgpu_vcn_get_enc_ring_prio(int ring)
1098 {
1099 	switch(ring) {
1100 	case 0:
1101 		return AMDGPU_RING_PRIO_0;
1102 	case 1:
1103 		return AMDGPU_RING_PRIO_1;
1104 	case 2:
1105 		return AMDGPU_RING_PRIO_2;
1106 	default:
1107 		return AMDGPU_RING_PRIO_0;
1108 	}
1109 }
1110 
1111 void amdgpu_vcn_setup_ucode(struct amdgpu_device *adev)
1112 {
1113 	int i;
1114 	unsigned int idx;
1115 
1116 	if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
1117 		const struct common_firmware_header *hdr;
1118 		hdr = (const struct common_firmware_header *)adev->vcn.fw->data;
1119 
1120 		for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
1121 			if (adev->vcn.harvest_config & (1 << i))
1122 				continue;
1123 			/* currently only support 2 FW instances */
1124 			if (i >= 2) {
1125 				dev_info(adev->dev, "More then 2 VCN FW instances!\n");
1126 				break;
1127 			}
1128 			idx = AMDGPU_UCODE_ID_VCN + i;
1129 			adev->firmware.ucode[idx].ucode_id = idx;
1130 			adev->firmware.ucode[idx].fw = adev->vcn.fw;
1131 			adev->firmware.fw_size +=
1132 				ALIGN(le32_to_cpu(hdr->ucode_size_bytes), PAGE_SIZE);
1133 		}
1134 		dev_info(adev->dev, "Will use PSP to load VCN firmware\n");
1135 	}
1136 }
1137 
1138 /*
1139  * debugfs for mapping vcn firmware log buffer.
1140  */
1141 #if defined(CONFIG_DEBUG_FS)
1142 static ssize_t amdgpu_debugfs_vcn_fwlog_read(struct file *f, char __user *buf,
1143                                              size_t size, loff_t *pos)
1144 {
1145 	struct amdgpu_vcn_inst *vcn;
1146 	void *log_buf;
1147 	volatile struct amdgpu_vcn_fwlog *plog;
1148 	unsigned int read_pos, write_pos, available, i, read_bytes = 0;
1149 	unsigned int read_num[2] = {0};
1150 
1151 	vcn = file_inode(f)->i_private;
1152 	if (!vcn)
1153 		return -ENODEV;
1154 
1155 	if (!vcn->fw_shared.cpu_addr || !amdgpu_vcnfw_log)
1156 		return -EFAULT;
1157 
1158 	log_buf = vcn->fw_shared.cpu_addr + vcn->fw_shared.mem_size;
1159 
1160 	plog = (volatile struct amdgpu_vcn_fwlog *)log_buf;
1161 	read_pos = plog->rptr;
1162 	write_pos = plog->wptr;
1163 
1164 	if (read_pos > AMDGPU_VCNFW_LOG_SIZE || write_pos > AMDGPU_VCNFW_LOG_SIZE)
1165 		return -EFAULT;
1166 
1167 	if (!size || (read_pos == write_pos))
1168 		return 0;
1169 
1170 	if (write_pos > read_pos) {
1171 		available = write_pos - read_pos;
1172 		read_num[0] = min(size, (size_t)available);
1173 	} else {
1174 		read_num[0] = AMDGPU_VCNFW_LOG_SIZE - read_pos;
1175 		available = read_num[0] + write_pos - plog->header_size;
1176 		if (size > available)
1177 			read_num[1] = write_pos - plog->header_size;
1178 		else if (size > read_num[0])
1179 			read_num[1] = size - read_num[0];
1180 		else
1181 			read_num[0] = size;
1182 	}
1183 
1184 	for (i = 0; i < 2; i++) {
1185 		if (read_num[i]) {
1186 			if (read_pos == AMDGPU_VCNFW_LOG_SIZE)
1187 				read_pos = plog->header_size;
1188 			if (read_num[i] == copy_to_user((buf + read_bytes),
1189 			                                (log_buf + read_pos), read_num[i]))
1190 				return -EFAULT;
1191 
1192 			read_bytes += read_num[i];
1193 			read_pos += read_num[i];
1194 		}
1195 	}
1196 
1197 	plog->rptr = read_pos;
1198 	*pos += read_bytes;
1199 	return read_bytes;
1200 }
1201 
1202 static const struct file_operations amdgpu_debugfs_vcnfwlog_fops = {
1203 	.owner = THIS_MODULE,
1204 	.read = amdgpu_debugfs_vcn_fwlog_read,
1205 	.llseek = default_llseek
1206 };
1207 #endif
1208 
1209 void amdgpu_debugfs_vcn_fwlog_init(struct amdgpu_device *adev, uint8_t i,
1210                                    struct amdgpu_vcn_inst *vcn)
1211 {
1212 #if defined(CONFIG_DEBUG_FS)
1213 	struct drm_minor *minor = adev_to_drm(adev)->primary;
1214 	struct dentry *root = minor->debugfs_root;
1215 	char name[32];
1216 
1217 	sprintf(name, "amdgpu_vcn_%d_fwlog", i);
1218 	debugfs_create_file_size(name, S_IFREG | S_IRUGO, root, vcn,
1219 				 &amdgpu_debugfs_vcnfwlog_fops,
1220 				 AMDGPU_VCNFW_LOG_SIZE);
1221 #endif
1222 }
1223 
1224 void amdgpu_vcn_fwlog_init(struct amdgpu_vcn_inst *vcn)
1225 {
1226 #if defined(CONFIG_DEBUG_FS)
1227 	volatile uint32_t *flag = vcn->fw_shared.cpu_addr;
1228 	void *fw_log_cpu_addr = vcn->fw_shared.cpu_addr + vcn->fw_shared.mem_size;
1229 	uint64_t fw_log_gpu_addr = vcn->fw_shared.gpu_addr + vcn->fw_shared.mem_size;
1230 	volatile struct amdgpu_vcn_fwlog *log_buf = fw_log_cpu_addr;
1231 	volatile struct amdgpu_fw_shared_fw_logging *fw_log = vcn->fw_shared.cpu_addr
1232                                                          + vcn->fw_shared.log_offset;
1233 	*flag |= cpu_to_le32(AMDGPU_VCN_FW_LOGGING_FLAG);
1234 	fw_log->is_enabled = 1;
1235 	fw_log->addr_lo = cpu_to_le32(fw_log_gpu_addr & 0xFFFFFFFF);
1236 	fw_log->addr_hi = cpu_to_le32(fw_log_gpu_addr >> 32);
1237 	fw_log->size = cpu_to_le32(AMDGPU_VCNFW_LOG_SIZE);
1238 
1239 	log_buf->header_size = sizeof(struct amdgpu_vcn_fwlog);
1240 	log_buf->buffer_size = AMDGPU_VCNFW_LOG_SIZE;
1241 	log_buf->rptr = log_buf->header_size;
1242 	log_buf->wptr = log_buf->header_size;
1243 	log_buf->wrapped = 0;
1244 #endif
1245 }
1246 
1247 int amdgpu_vcn_process_poison_irq(struct amdgpu_device *adev,
1248 				struct amdgpu_irq_src *source,
1249 				struct amdgpu_iv_entry *entry)
1250 {
1251 	struct ras_common_if *ras_if = adev->vcn.ras_if;
1252 	struct ras_dispatch_if ih_data = {
1253 		.entry = entry,
1254 	};
1255 
1256 	if (!ras_if)
1257 		return 0;
1258 
1259 	if (!amdgpu_sriov_vf(adev)) {
1260 		ih_data.head = *ras_if;
1261 		amdgpu_ras_interrupt_dispatch(adev, &ih_data);
1262 	} else {
1263 		if (adev->virt.ops && adev->virt.ops->ras_poison_handler)
1264 			adev->virt.ops->ras_poison_handler(adev);
1265 		else
1266 			dev_warn(adev->dev,
1267 				"No ras_poison_handler interface in SRIOV for VCN!\n");
1268 	}
1269 
1270 	return 0;
1271 }
1272 
1273 void amdgpu_vcn_set_ras_funcs(struct amdgpu_device *adev)
1274 {
1275 	if (!adev->vcn.ras)
1276 		return;
1277 
1278 	amdgpu_ras_register_ras_block(adev, &adev->vcn.ras->ras_block);
1279 
1280 	strcpy(adev->vcn.ras->ras_block.ras_comm.name, "vcn");
1281 	adev->vcn.ras->ras_block.ras_comm.block = AMDGPU_RAS_BLOCK__VCN;
1282 	adev->vcn.ras->ras_block.ras_comm.type = AMDGPU_RAS_ERROR__POISON;
1283 	adev->vcn.ras_if = &adev->vcn.ras->ras_block.ras_comm;
1284 
1285 	/* If don't define special ras_late_init function, use default ras_late_init */
1286 	if (!adev->vcn.ras->ras_block.ras_late_init)
1287 		adev->vcn.ras->ras_block.ras_late_init = amdgpu_ras_block_late_init;
1288 }
1289