xref: /linux/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c (revision 2c1ed907520c50326b8f604907a8478b27881a2e)
1 /*
2  * Copyright 2016-2024 Advanced Micro Devices, Inc.
3  * All Rights Reserved.
4  *
5  * Permission is hereby granted, free of charge, to any person obtaining a
6  * copy of this software and associated documentation files (the
7  * "Software"), to deal in the Software without restriction, including
8  * without limitation the rights to use, copy, modify, merge, publish,
9  * distribute, sub license, and/or sell copies of the Software, and to
10  * permit persons to whom the Software is furnished to do so, subject to
11  * the following conditions:
12  *
13  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
16  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
17  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
18  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
19  * USE OR OTHER DEALINGS IN THE SOFTWARE.
20  *
21  * The above copyright notice and this permission notice (including the
22  * next paragraph) shall be included in all copies or substantial portions
23  * of the Software.
24  *
25  */
26 
27 #include <linux/firmware.h>
28 #include <linux/module.h>
29 #include <linux/dmi.h>
30 #include <linux/pci.h>
31 #include <linux/debugfs.h>
32 #include <drm/drm_drv.h>
33 
34 #include "amdgpu.h"
35 #include "amdgpu_pm.h"
36 #include "amdgpu_vcn.h"
37 #include "soc15d.h"
38 
39 /* Firmware Names */
40 #define FIRMWARE_RAVEN			"amdgpu/raven_vcn.bin"
41 #define FIRMWARE_PICASSO		"amdgpu/picasso_vcn.bin"
42 #define FIRMWARE_RAVEN2			"amdgpu/raven2_vcn.bin"
43 #define FIRMWARE_ARCTURUS		"amdgpu/arcturus_vcn.bin"
44 #define FIRMWARE_RENOIR			"amdgpu/renoir_vcn.bin"
45 #define FIRMWARE_GREEN_SARDINE		"amdgpu/green_sardine_vcn.bin"
46 #define FIRMWARE_NAVI10			"amdgpu/navi10_vcn.bin"
47 #define FIRMWARE_NAVI14			"amdgpu/navi14_vcn.bin"
48 #define FIRMWARE_NAVI12			"amdgpu/navi12_vcn.bin"
49 #define FIRMWARE_SIENNA_CICHLID		"amdgpu/sienna_cichlid_vcn.bin"
50 #define FIRMWARE_NAVY_FLOUNDER		"amdgpu/navy_flounder_vcn.bin"
51 #define FIRMWARE_VANGOGH		"amdgpu/vangogh_vcn.bin"
52 #define FIRMWARE_DIMGREY_CAVEFISH	"amdgpu/dimgrey_cavefish_vcn.bin"
53 #define FIRMWARE_ALDEBARAN		"amdgpu/aldebaran_vcn.bin"
54 #define FIRMWARE_BEIGE_GOBY		"amdgpu/beige_goby_vcn.bin"
55 #define FIRMWARE_YELLOW_CARP		"amdgpu/yellow_carp_vcn.bin"
56 #define FIRMWARE_VCN_3_1_2		"amdgpu/vcn_3_1_2.bin"
57 #define FIRMWARE_VCN4_0_0		"amdgpu/vcn_4_0_0.bin"
58 #define FIRMWARE_VCN4_0_2		"amdgpu/vcn_4_0_2.bin"
59 #define FIRMWARE_VCN4_0_3		"amdgpu/vcn_4_0_3.bin"
60 #define FIRMWARE_VCN4_0_4		"amdgpu/vcn_4_0_4.bin"
61 #define FIRMWARE_VCN4_0_5		"amdgpu/vcn_4_0_5.bin"
62 #define FIRMWARE_VCN4_0_6		"amdgpu/vcn_4_0_6.bin"
63 #define FIRMWARE_VCN4_0_6_1		"amdgpu/vcn_4_0_6_1.bin"
64 #define FIRMWARE_VCN5_0_0		"amdgpu/vcn_5_0_0.bin"
65 #define FIRMWARE_VCN5_0_1		"amdgpu/vcn_5_0_1.bin"
66 
67 MODULE_FIRMWARE(FIRMWARE_RAVEN);
68 MODULE_FIRMWARE(FIRMWARE_PICASSO);
69 MODULE_FIRMWARE(FIRMWARE_RAVEN2);
70 MODULE_FIRMWARE(FIRMWARE_ARCTURUS);
71 MODULE_FIRMWARE(FIRMWARE_RENOIR);
72 MODULE_FIRMWARE(FIRMWARE_GREEN_SARDINE);
73 MODULE_FIRMWARE(FIRMWARE_ALDEBARAN);
74 MODULE_FIRMWARE(FIRMWARE_NAVI10);
75 MODULE_FIRMWARE(FIRMWARE_NAVI14);
76 MODULE_FIRMWARE(FIRMWARE_NAVI12);
77 MODULE_FIRMWARE(FIRMWARE_SIENNA_CICHLID);
78 MODULE_FIRMWARE(FIRMWARE_NAVY_FLOUNDER);
79 MODULE_FIRMWARE(FIRMWARE_VANGOGH);
80 MODULE_FIRMWARE(FIRMWARE_DIMGREY_CAVEFISH);
81 MODULE_FIRMWARE(FIRMWARE_BEIGE_GOBY);
82 MODULE_FIRMWARE(FIRMWARE_YELLOW_CARP);
83 MODULE_FIRMWARE(FIRMWARE_VCN_3_1_2);
84 MODULE_FIRMWARE(FIRMWARE_VCN4_0_0);
85 MODULE_FIRMWARE(FIRMWARE_VCN4_0_2);
86 MODULE_FIRMWARE(FIRMWARE_VCN4_0_3);
87 MODULE_FIRMWARE(FIRMWARE_VCN4_0_4);
88 MODULE_FIRMWARE(FIRMWARE_VCN4_0_5);
89 MODULE_FIRMWARE(FIRMWARE_VCN4_0_6);
90 MODULE_FIRMWARE(FIRMWARE_VCN4_0_6_1);
91 MODULE_FIRMWARE(FIRMWARE_VCN5_0_0);
92 MODULE_FIRMWARE(FIRMWARE_VCN5_0_1);
93 
94 static void amdgpu_vcn_idle_work_handler(struct work_struct *work);
95 
amdgpu_vcn_early_init(struct amdgpu_device * adev)96 int amdgpu_vcn_early_init(struct amdgpu_device *adev)
97 {
98 	char ucode_prefix[25];
99 	int r, i;
100 
101 	amdgpu_ucode_ip_version_decode(adev, UVD_HWIP, ucode_prefix, sizeof(ucode_prefix));
102 	for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
103 		if (i == 1 && amdgpu_ip_version(adev, UVD_HWIP, 0) ==  IP_VERSION(4, 0, 6))
104 			r = amdgpu_ucode_request(adev, &adev->vcn.inst[i].fw,
105 						 AMDGPU_UCODE_REQUIRED,
106 						 "amdgpu/%s_%d.bin", ucode_prefix, i);
107 		else
108 			r = amdgpu_ucode_request(adev, &adev->vcn.inst[i].fw,
109 						 AMDGPU_UCODE_REQUIRED,
110 						 "amdgpu/%s.bin", ucode_prefix);
111 		if (r) {
112 			amdgpu_ucode_release(&adev->vcn.inst[i].fw);
113 			return r;
114 		}
115 	}
116 	return r;
117 }
118 
amdgpu_vcn_sw_init(struct amdgpu_device * adev)119 int amdgpu_vcn_sw_init(struct amdgpu_device *adev)
120 {
121 	unsigned long bo_size;
122 	const struct common_firmware_header *hdr;
123 	unsigned char fw_check;
124 	unsigned int fw_shared_size, log_offset;
125 	int i, r;
126 
127 	INIT_DELAYED_WORK(&adev->vcn.idle_work, amdgpu_vcn_idle_work_handler);
128 	mutex_init(&adev->vcn.vcn_pg_lock);
129 	mutex_init(&adev->vcn.vcn1_jpeg1_workaround);
130 	atomic_set(&adev->vcn.total_submission_cnt, 0);
131 	for (i = 0; i < adev->vcn.num_vcn_inst; i++)
132 		atomic_set(&adev->vcn.inst[i].dpg_enc_submission_cnt, 0);
133 
134 	if ((adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) &&
135 	    (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG))
136 		adev->vcn.indirect_sram = true;
137 
138 	/*
139 	 * Some Steam Deck's BIOS versions are incompatible with the
140 	 * indirect SRAM mode, leading to amdgpu being unable to get
141 	 * properly probed (and even potentially crashing the kernel).
142 	 * Hence, check for these versions here - notice this is
143 	 * restricted to Vangogh (Deck's APU).
144 	 */
145 	if (amdgpu_ip_version(adev, UVD_HWIP, 0) == IP_VERSION(3, 0, 2)) {
146 		const char *bios_ver = dmi_get_system_info(DMI_BIOS_VERSION);
147 
148 		if (bios_ver && (!strncmp("F7A0113", bios_ver, 7) ||
149 		     !strncmp("F7A0114", bios_ver, 7))) {
150 			adev->vcn.indirect_sram = false;
151 			dev_info(adev->dev,
152 				"Steam Deck quirk: indirect SRAM disabled on BIOS %s\n", bios_ver);
153 		}
154 	}
155 
156 	/* from vcn4 and above, only unified queue is used */
157 	adev->vcn.using_unified_queue =
158 		amdgpu_ip_version(adev, UVD_HWIP, 0) >= IP_VERSION(4, 0, 0);
159 
160 	hdr = (const struct common_firmware_header *)adev->vcn.inst[0].fw->data;
161 	adev->vcn.fw_version = le32_to_cpu(hdr->ucode_version);
162 
163 	/* Bit 20-23, it is encode major and non-zero for new naming convention.
164 	 * This field is part of version minor and DRM_DISABLED_FLAG in old naming
165 	 * convention. Since the l:wq!atest version minor is 0x5B and DRM_DISABLED_FLAG
166 	 * is zero in old naming convention, this field is always zero so far.
167 	 * These four bits are used to tell which naming convention is present.
168 	 */
169 	fw_check = (le32_to_cpu(hdr->ucode_version) >> 20) & 0xf;
170 	if (fw_check) {
171 		unsigned int dec_ver, enc_major, enc_minor, vep, fw_rev;
172 
173 		fw_rev = le32_to_cpu(hdr->ucode_version) & 0xfff;
174 		enc_minor = (le32_to_cpu(hdr->ucode_version) >> 12) & 0xff;
175 		enc_major = fw_check;
176 		dec_ver = (le32_to_cpu(hdr->ucode_version) >> 24) & 0xf;
177 		vep = (le32_to_cpu(hdr->ucode_version) >> 28) & 0xf;
178 		DRM_INFO("Found VCN firmware Version ENC: %u.%u DEC: %u VEP: %u Revision: %u\n",
179 			enc_major, enc_minor, dec_ver, vep, fw_rev);
180 	} else {
181 		unsigned int version_major, version_minor, family_id;
182 
183 		family_id = le32_to_cpu(hdr->ucode_version) & 0xff;
184 		version_major = (le32_to_cpu(hdr->ucode_version) >> 24) & 0xff;
185 		version_minor = (le32_to_cpu(hdr->ucode_version) >> 8) & 0xff;
186 		DRM_INFO("Found VCN firmware Version: %u.%u Family ID: %u\n",
187 			version_major, version_minor, family_id);
188 	}
189 
190 	bo_size = AMDGPU_VCN_STACK_SIZE + AMDGPU_VCN_CONTEXT_SIZE;
191 	if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP)
192 		bo_size += AMDGPU_GPU_PAGE_ALIGN(le32_to_cpu(hdr->ucode_size_bytes) + 8);
193 
194 	if (amdgpu_ip_version(adev, UVD_HWIP, 0) >= IP_VERSION(5, 0, 0)) {
195 		fw_shared_size = AMDGPU_GPU_PAGE_ALIGN(sizeof(struct amdgpu_vcn5_fw_shared));
196 		log_offset = offsetof(struct amdgpu_vcn5_fw_shared, fw_log);
197 	} else if (amdgpu_ip_version(adev, UVD_HWIP, 0) >= IP_VERSION(4, 0, 0)) {
198 		fw_shared_size = AMDGPU_GPU_PAGE_ALIGN(sizeof(struct amdgpu_vcn4_fw_shared));
199 		log_offset = offsetof(struct amdgpu_vcn4_fw_shared, fw_log);
200 	} else {
201 		fw_shared_size = AMDGPU_GPU_PAGE_ALIGN(sizeof(struct amdgpu_fw_shared));
202 		log_offset = offsetof(struct amdgpu_fw_shared, fw_log);
203 	}
204 
205 	bo_size += fw_shared_size;
206 
207 	if (amdgpu_vcnfw_log)
208 		bo_size += AMDGPU_VCNFW_LOG_SIZE;
209 
210 	for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
211 		if (adev->vcn.harvest_config & (1 << i))
212 			continue;
213 
214 		r = amdgpu_bo_create_kernel(adev, bo_size, PAGE_SIZE,
215 					    AMDGPU_GEM_DOMAIN_VRAM |
216 					    AMDGPU_GEM_DOMAIN_GTT,
217 					    &adev->vcn.inst[i].vcpu_bo,
218 					    &adev->vcn.inst[i].gpu_addr,
219 					    &adev->vcn.inst[i].cpu_addr);
220 		if (r) {
221 			dev_err(adev->dev, "(%d) failed to allocate vcn bo\n", r);
222 			return r;
223 		}
224 
225 		adev->vcn.inst[i].fw_shared.cpu_addr = adev->vcn.inst[i].cpu_addr +
226 				bo_size - fw_shared_size;
227 		adev->vcn.inst[i].fw_shared.gpu_addr = adev->vcn.inst[i].gpu_addr +
228 				bo_size - fw_shared_size;
229 
230 		adev->vcn.inst[i].fw_shared.mem_size = fw_shared_size;
231 
232 		if (amdgpu_vcnfw_log) {
233 			adev->vcn.inst[i].fw_shared.cpu_addr -= AMDGPU_VCNFW_LOG_SIZE;
234 			adev->vcn.inst[i].fw_shared.gpu_addr -= AMDGPU_VCNFW_LOG_SIZE;
235 			adev->vcn.inst[i].fw_shared.log_offset = log_offset;
236 		}
237 
238 		if (adev->vcn.indirect_sram) {
239 			r = amdgpu_bo_create_kernel(adev, 64 * 2 * 4, PAGE_SIZE,
240 					AMDGPU_GEM_DOMAIN_VRAM |
241 					AMDGPU_GEM_DOMAIN_GTT,
242 					&adev->vcn.inst[i].dpg_sram_bo,
243 					&adev->vcn.inst[i].dpg_sram_gpu_addr,
244 					&adev->vcn.inst[i].dpg_sram_cpu_addr);
245 			if (r) {
246 				dev_err(adev->dev, "VCN %d (%d) failed to allocate DPG bo\n", i, r);
247 				return r;
248 			}
249 		}
250 	}
251 
252 	return 0;
253 }
254 
amdgpu_vcn_sw_fini(struct amdgpu_device * adev)255 int amdgpu_vcn_sw_fini(struct amdgpu_device *adev)
256 {
257 	int i, j;
258 
259 	for (j = 0; j < adev->vcn.num_vcn_inst; ++j) {
260 		if (adev->vcn.harvest_config & (1 << j))
261 			continue;
262 
263 		amdgpu_bo_free_kernel(
264 			&adev->vcn.inst[j].dpg_sram_bo,
265 			&adev->vcn.inst[j].dpg_sram_gpu_addr,
266 			(void **)&adev->vcn.inst[j].dpg_sram_cpu_addr);
267 
268 		kvfree(adev->vcn.inst[j].saved_bo);
269 
270 		amdgpu_bo_free_kernel(&adev->vcn.inst[j].vcpu_bo,
271 					  &adev->vcn.inst[j].gpu_addr,
272 					  (void **)&adev->vcn.inst[j].cpu_addr);
273 
274 		amdgpu_ring_fini(&adev->vcn.inst[j].ring_dec);
275 
276 		for (i = 0; i < adev->vcn.num_enc_rings; ++i)
277 			amdgpu_ring_fini(&adev->vcn.inst[j].ring_enc[i]);
278 
279 		amdgpu_ucode_release(&adev->vcn.inst[j].fw);
280 	}
281 
282 	mutex_destroy(&adev->vcn.vcn1_jpeg1_workaround);
283 	mutex_destroy(&adev->vcn.vcn_pg_lock);
284 
285 	return 0;
286 }
287 
amdgpu_vcn_is_disabled_vcn(struct amdgpu_device * adev,enum vcn_ring_type type,uint32_t vcn_instance)288 bool amdgpu_vcn_is_disabled_vcn(struct amdgpu_device *adev, enum vcn_ring_type type, uint32_t vcn_instance)
289 {
290 	bool ret = false;
291 	int vcn_config = adev->vcn.inst[vcn_instance].vcn_config;
292 
293 	if ((type == VCN_ENCODE_RING) && (vcn_config & VCN_BLOCK_ENCODE_DISABLE_MASK))
294 		ret = true;
295 	else if ((type == VCN_DECODE_RING) && (vcn_config & VCN_BLOCK_DECODE_DISABLE_MASK))
296 		ret = true;
297 	else if ((type == VCN_UNIFIED_RING) && (vcn_config & VCN_BLOCK_QUEUE_DISABLE_MASK))
298 		ret = true;
299 
300 	return ret;
301 }
302 
amdgpu_vcn_save_vcpu_bo(struct amdgpu_device * adev)303 int amdgpu_vcn_save_vcpu_bo(struct amdgpu_device *adev)
304 {
305 	unsigned int size;
306 	void *ptr;
307 	int i, idx;
308 
309 	for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
310 		if (adev->vcn.harvest_config & (1 << i))
311 			continue;
312 		if (adev->vcn.inst[i].vcpu_bo == NULL)
313 			return 0;
314 
315 		size = amdgpu_bo_size(adev->vcn.inst[i].vcpu_bo);
316 		ptr = adev->vcn.inst[i].cpu_addr;
317 
318 		adev->vcn.inst[i].saved_bo = kvmalloc(size, GFP_KERNEL);
319 		if (!adev->vcn.inst[i].saved_bo)
320 			return -ENOMEM;
321 
322 		if (drm_dev_enter(adev_to_drm(adev), &idx)) {
323 			memcpy_fromio(adev->vcn.inst[i].saved_bo, ptr, size);
324 			drm_dev_exit(idx);
325 		}
326 	}
327 
328 	return 0;
329 }
330 
amdgpu_vcn_suspend(struct amdgpu_device * adev)331 int amdgpu_vcn_suspend(struct amdgpu_device *adev)
332 {
333 	bool in_ras_intr = amdgpu_ras_intr_triggered();
334 
335 	cancel_delayed_work_sync(&adev->vcn.idle_work);
336 
337 	/* err_event_athub will corrupt VCPU buffer, so we need to
338 	 * restore fw data and clear buffer in amdgpu_vcn_resume() */
339 	if (in_ras_intr)
340 		return 0;
341 
342 	return amdgpu_vcn_save_vcpu_bo(adev);
343 }
344 
amdgpu_vcn_resume(struct amdgpu_device * adev)345 int amdgpu_vcn_resume(struct amdgpu_device *adev)
346 {
347 	unsigned int size;
348 	void *ptr;
349 	int i, idx;
350 
351 	for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
352 		if (adev->vcn.harvest_config & (1 << i))
353 			continue;
354 		if (adev->vcn.inst[i].vcpu_bo == NULL)
355 			return -EINVAL;
356 
357 		size = amdgpu_bo_size(adev->vcn.inst[i].vcpu_bo);
358 		ptr = adev->vcn.inst[i].cpu_addr;
359 
360 		if (adev->vcn.inst[i].saved_bo != NULL) {
361 			if (drm_dev_enter(adev_to_drm(adev), &idx)) {
362 				memcpy_toio(ptr, adev->vcn.inst[i].saved_bo, size);
363 				drm_dev_exit(idx);
364 			}
365 			kvfree(adev->vcn.inst[i].saved_bo);
366 			adev->vcn.inst[i].saved_bo = NULL;
367 		} else {
368 			const struct common_firmware_header *hdr;
369 			unsigned int offset;
370 
371 			hdr = (const struct common_firmware_header *)adev->vcn.inst[i].fw->data;
372 			if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
373 				offset = le32_to_cpu(hdr->ucode_array_offset_bytes);
374 				if (drm_dev_enter(adev_to_drm(adev), &idx)) {
375 					memcpy_toio(adev->vcn.inst[i].cpu_addr,
376 						    adev->vcn.inst[i].fw->data + offset,
377 						    le32_to_cpu(hdr->ucode_size_bytes));
378 					drm_dev_exit(idx);
379 				}
380 				size -= le32_to_cpu(hdr->ucode_size_bytes);
381 				ptr += le32_to_cpu(hdr->ucode_size_bytes);
382 			}
383 			memset_io(ptr, 0, size);
384 		}
385 	}
386 	return 0;
387 }
388 
amdgpu_vcn_idle_work_handler(struct work_struct * work)389 static void amdgpu_vcn_idle_work_handler(struct work_struct *work)
390 {
391 	struct amdgpu_device *adev =
392 		container_of(work, struct amdgpu_device, vcn.idle_work.work);
393 	unsigned int fences = 0, fence[AMDGPU_MAX_VCN_INSTANCES] = {0};
394 	unsigned int i, j;
395 	int r = 0;
396 
397 	for (j = 0; j < adev->vcn.num_vcn_inst; ++j) {
398 		if (adev->vcn.harvest_config & (1 << j))
399 			continue;
400 
401 		for (i = 0; i < adev->vcn.num_enc_rings; ++i)
402 			fence[j] += amdgpu_fence_count_emitted(&adev->vcn.inst[j].ring_enc[i]);
403 
404 		/* Only set DPG pause for VCN3 or below, VCN4 and above will be handled by FW */
405 		if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG &&
406 		    !adev->vcn.using_unified_queue) {
407 			struct dpg_pause_state new_state;
408 
409 			if (fence[j] ||
410 				unlikely(atomic_read(&adev->vcn.inst[j].dpg_enc_submission_cnt)))
411 				new_state.fw_based = VCN_DPG_STATE__PAUSE;
412 			else
413 				new_state.fw_based = VCN_DPG_STATE__UNPAUSE;
414 
415 			adev->vcn.pause_dpg_mode(adev, j, &new_state);
416 		}
417 
418 		fence[j] += amdgpu_fence_count_emitted(&adev->vcn.inst[j].ring_dec);
419 		fences += fence[j];
420 	}
421 
422 	if (!fences && !atomic_read(&adev->vcn.total_submission_cnt)) {
423 		amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCN,
424 		       AMD_PG_STATE_GATE);
425 		r = amdgpu_dpm_switch_power_profile(adev, PP_SMC_POWER_PROFILE_VIDEO,
426 				false);
427 		if (r)
428 			dev_warn(adev->dev, "(%d) failed to disable video power profile mode\n", r);
429 	} else {
430 		schedule_delayed_work(&adev->vcn.idle_work, VCN_IDLE_TIMEOUT);
431 	}
432 }
433 
amdgpu_vcn_ring_begin_use(struct amdgpu_ring * ring)434 void amdgpu_vcn_ring_begin_use(struct amdgpu_ring *ring)
435 {
436 	struct amdgpu_device *adev = ring->adev;
437 	int r = 0;
438 
439 	atomic_inc(&adev->vcn.total_submission_cnt);
440 
441 	if (!cancel_delayed_work_sync(&adev->vcn.idle_work)) {
442 		r = amdgpu_dpm_switch_power_profile(adev, PP_SMC_POWER_PROFILE_VIDEO,
443 				true);
444 		if (r)
445 			dev_warn(adev->dev, "(%d) failed to switch to video power profile mode\n", r);
446 	}
447 
448 	mutex_lock(&adev->vcn.vcn_pg_lock);
449 	amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCN,
450 	       AMD_PG_STATE_UNGATE);
451 
452 	/* Only set DPG pause for VCN3 or below, VCN4 and above will be handled by FW */
453 	if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG &&
454 	    !adev->vcn.using_unified_queue) {
455 		struct dpg_pause_state new_state;
456 
457 		if (ring->funcs->type == AMDGPU_RING_TYPE_VCN_ENC) {
458 			atomic_inc(&adev->vcn.inst[ring->me].dpg_enc_submission_cnt);
459 			new_state.fw_based = VCN_DPG_STATE__PAUSE;
460 		} else {
461 			unsigned int fences = 0;
462 			unsigned int i;
463 
464 			for (i = 0; i < adev->vcn.num_enc_rings; ++i)
465 				fences += amdgpu_fence_count_emitted(&adev->vcn.inst[ring->me].ring_enc[i]);
466 
467 			if (fences || atomic_read(&adev->vcn.inst[ring->me].dpg_enc_submission_cnt))
468 				new_state.fw_based = VCN_DPG_STATE__PAUSE;
469 			else
470 				new_state.fw_based = VCN_DPG_STATE__UNPAUSE;
471 		}
472 
473 		adev->vcn.pause_dpg_mode(adev, ring->me, &new_state);
474 	}
475 	mutex_unlock(&adev->vcn.vcn_pg_lock);
476 }
477 
amdgpu_vcn_ring_end_use(struct amdgpu_ring * ring)478 void amdgpu_vcn_ring_end_use(struct amdgpu_ring *ring)
479 {
480 	struct amdgpu_device *adev = ring->adev;
481 
482 	/* Only set DPG pause for VCN3 or below, VCN4 and above will be handled by FW */
483 	if (ring->adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG &&
484 	    ring->funcs->type == AMDGPU_RING_TYPE_VCN_ENC &&
485 	    !adev->vcn.using_unified_queue)
486 		atomic_dec(&ring->adev->vcn.inst[ring->me].dpg_enc_submission_cnt);
487 
488 	atomic_dec(&ring->adev->vcn.total_submission_cnt);
489 
490 	schedule_delayed_work(&ring->adev->vcn.idle_work, VCN_IDLE_TIMEOUT);
491 }
492 
amdgpu_vcn_dec_ring_test_ring(struct amdgpu_ring * ring)493 int amdgpu_vcn_dec_ring_test_ring(struct amdgpu_ring *ring)
494 {
495 	struct amdgpu_device *adev = ring->adev;
496 	uint32_t tmp = 0;
497 	unsigned int i;
498 	int r;
499 
500 	/* VCN in SRIOV does not support direct register read/write */
501 	if (amdgpu_sriov_vf(adev))
502 		return 0;
503 
504 	WREG32(adev->vcn.inst[ring->me].external.scratch9, 0xCAFEDEAD);
505 	r = amdgpu_ring_alloc(ring, 3);
506 	if (r)
507 		return r;
508 	amdgpu_ring_write(ring, PACKET0(adev->vcn.internal.scratch9, 0));
509 	amdgpu_ring_write(ring, 0xDEADBEEF);
510 	amdgpu_ring_commit(ring);
511 	for (i = 0; i < adev->usec_timeout; i++) {
512 		tmp = RREG32(adev->vcn.inst[ring->me].external.scratch9);
513 		if (tmp == 0xDEADBEEF)
514 			break;
515 		udelay(1);
516 	}
517 
518 	if (i >= adev->usec_timeout)
519 		r = -ETIMEDOUT;
520 
521 	return r;
522 }
523 
amdgpu_vcn_dec_sw_ring_test_ring(struct amdgpu_ring * ring)524 int amdgpu_vcn_dec_sw_ring_test_ring(struct amdgpu_ring *ring)
525 {
526 	struct amdgpu_device *adev = ring->adev;
527 	uint32_t rptr;
528 	unsigned int i;
529 	int r;
530 
531 	if (amdgpu_sriov_vf(adev))
532 		return 0;
533 
534 	r = amdgpu_ring_alloc(ring, 16);
535 	if (r)
536 		return r;
537 
538 	rptr = amdgpu_ring_get_rptr(ring);
539 
540 	amdgpu_ring_write(ring, VCN_DEC_SW_CMD_END);
541 	amdgpu_ring_commit(ring);
542 
543 	for (i = 0; i < adev->usec_timeout; i++) {
544 		if (amdgpu_ring_get_rptr(ring) != rptr)
545 			break;
546 		udelay(1);
547 	}
548 
549 	if (i >= adev->usec_timeout)
550 		r = -ETIMEDOUT;
551 
552 	return r;
553 }
554 
amdgpu_vcn_dec_send_msg(struct amdgpu_ring * ring,struct amdgpu_ib * ib_msg,struct dma_fence ** fence)555 static int amdgpu_vcn_dec_send_msg(struct amdgpu_ring *ring,
556 				   struct amdgpu_ib *ib_msg,
557 				   struct dma_fence **fence)
558 {
559 	u64 addr = AMDGPU_GPU_PAGE_ALIGN(ib_msg->gpu_addr);
560 	struct amdgpu_device *adev = ring->adev;
561 	struct dma_fence *f = NULL;
562 	struct amdgpu_job *job;
563 	struct amdgpu_ib *ib;
564 	int i, r;
565 
566 	r = amdgpu_job_alloc_with_ib(ring->adev, NULL, NULL,
567 				     64, AMDGPU_IB_POOL_DIRECT,
568 				     &job);
569 	if (r)
570 		goto err;
571 
572 	ib = &job->ibs[0];
573 	ib->ptr[0] = PACKET0(adev->vcn.internal.data0, 0);
574 	ib->ptr[1] = addr;
575 	ib->ptr[2] = PACKET0(adev->vcn.internal.data1, 0);
576 	ib->ptr[3] = addr >> 32;
577 	ib->ptr[4] = PACKET0(adev->vcn.internal.cmd, 0);
578 	ib->ptr[5] = 0;
579 	for (i = 6; i < 16; i += 2) {
580 		ib->ptr[i] = PACKET0(adev->vcn.internal.nop, 0);
581 		ib->ptr[i+1] = 0;
582 	}
583 	ib->length_dw = 16;
584 
585 	r = amdgpu_job_submit_direct(job, ring, &f);
586 	if (r)
587 		goto err_free;
588 
589 	amdgpu_ib_free(ib_msg, f);
590 
591 	if (fence)
592 		*fence = dma_fence_get(f);
593 	dma_fence_put(f);
594 
595 	return 0;
596 
597 err_free:
598 	amdgpu_job_free(job);
599 err:
600 	amdgpu_ib_free(ib_msg, f);
601 	return r;
602 }
603 
amdgpu_vcn_dec_get_create_msg(struct amdgpu_ring * ring,uint32_t handle,struct amdgpu_ib * ib)604 static int amdgpu_vcn_dec_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
605 		struct amdgpu_ib *ib)
606 {
607 	struct amdgpu_device *adev = ring->adev;
608 	uint32_t *msg;
609 	int r, i;
610 
611 	memset(ib, 0, sizeof(*ib));
612 	r = amdgpu_ib_get(adev, NULL, AMDGPU_GPU_PAGE_SIZE * 2,
613 			AMDGPU_IB_POOL_DIRECT,
614 			ib);
615 	if (r)
616 		return r;
617 
618 	msg = (uint32_t *)AMDGPU_GPU_PAGE_ALIGN((unsigned long)ib->ptr);
619 	msg[0] = cpu_to_le32(0x00000028);
620 	msg[1] = cpu_to_le32(0x00000038);
621 	msg[2] = cpu_to_le32(0x00000001);
622 	msg[3] = cpu_to_le32(0x00000000);
623 	msg[4] = cpu_to_le32(handle);
624 	msg[5] = cpu_to_le32(0x00000000);
625 	msg[6] = cpu_to_le32(0x00000001);
626 	msg[7] = cpu_to_le32(0x00000028);
627 	msg[8] = cpu_to_le32(0x00000010);
628 	msg[9] = cpu_to_le32(0x00000000);
629 	msg[10] = cpu_to_le32(0x00000007);
630 	msg[11] = cpu_to_le32(0x00000000);
631 	msg[12] = cpu_to_le32(0x00000780);
632 	msg[13] = cpu_to_le32(0x00000440);
633 	for (i = 14; i < 1024; ++i)
634 		msg[i] = cpu_to_le32(0x0);
635 
636 	return 0;
637 }
638 
amdgpu_vcn_dec_get_destroy_msg(struct amdgpu_ring * ring,uint32_t handle,struct amdgpu_ib * ib)639 static int amdgpu_vcn_dec_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
640 					  struct amdgpu_ib *ib)
641 {
642 	struct amdgpu_device *adev = ring->adev;
643 	uint32_t *msg;
644 	int r, i;
645 
646 	memset(ib, 0, sizeof(*ib));
647 	r = amdgpu_ib_get(adev, NULL, AMDGPU_GPU_PAGE_SIZE * 2,
648 			AMDGPU_IB_POOL_DIRECT,
649 			ib);
650 	if (r)
651 		return r;
652 
653 	msg = (uint32_t *)AMDGPU_GPU_PAGE_ALIGN((unsigned long)ib->ptr);
654 	msg[0] = cpu_to_le32(0x00000028);
655 	msg[1] = cpu_to_le32(0x00000018);
656 	msg[2] = cpu_to_le32(0x00000000);
657 	msg[3] = cpu_to_le32(0x00000002);
658 	msg[4] = cpu_to_le32(handle);
659 	msg[5] = cpu_to_le32(0x00000000);
660 	for (i = 6; i < 1024; ++i)
661 		msg[i] = cpu_to_le32(0x0);
662 
663 	return 0;
664 }
665 
amdgpu_vcn_dec_ring_test_ib(struct amdgpu_ring * ring,long timeout)666 int amdgpu_vcn_dec_ring_test_ib(struct amdgpu_ring *ring, long timeout)
667 {
668 	struct dma_fence *fence = NULL;
669 	struct amdgpu_ib ib;
670 	long r;
671 
672 	r = amdgpu_vcn_dec_get_create_msg(ring, 1, &ib);
673 	if (r)
674 		goto error;
675 
676 	r = amdgpu_vcn_dec_send_msg(ring, &ib, NULL);
677 	if (r)
678 		goto error;
679 	r = amdgpu_vcn_dec_get_destroy_msg(ring, 1, &ib);
680 	if (r)
681 		goto error;
682 
683 	r = amdgpu_vcn_dec_send_msg(ring, &ib, &fence);
684 	if (r)
685 		goto error;
686 
687 	r = dma_fence_wait_timeout(fence, false, timeout);
688 	if (r == 0)
689 		r = -ETIMEDOUT;
690 	else if (r > 0)
691 		r = 0;
692 
693 	dma_fence_put(fence);
694 error:
695 	return r;
696 }
697 
amdgpu_vcn_unified_ring_ib_header(struct amdgpu_ib * ib,uint32_t ib_pack_in_dw,bool enc)698 static uint32_t *amdgpu_vcn_unified_ring_ib_header(struct amdgpu_ib *ib,
699 						uint32_t ib_pack_in_dw, bool enc)
700 {
701 	uint32_t *ib_checksum;
702 
703 	ib->ptr[ib->length_dw++] = 0x00000010; /* single queue checksum */
704 	ib->ptr[ib->length_dw++] = 0x30000002;
705 	ib_checksum = &ib->ptr[ib->length_dw++];
706 	ib->ptr[ib->length_dw++] = ib_pack_in_dw;
707 
708 	ib->ptr[ib->length_dw++] = 0x00000010; /* engine info */
709 	ib->ptr[ib->length_dw++] = 0x30000001;
710 	ib->ptr[ib->length_dw++] = enc ? 0x2 : 0x3;
711 	ib->ptr[ib->length_dw++] = ib_pack_in_dw * sizeof(uint32_t);
712 
713 	return ib_checksum;
714 }
715 
amdgpu_vcn_unified_ring_ib_checksum(uint32_t ** ib_checksum,uint32_t ib_pack_in_dw)716 static void amdgpu_vcn_unified_ring_ib_checksum(uint32_t **ib_checksum,
717 						uint32_t ib_pack_in_dw)
718 {
719 	uint32_t i;
720 	uint32_t checksum = 0;
721 
722 	for (i = 0; i < ib_pack_in_dw; i++)
723 		checksum += *(*ib_checksum + 2 + i);
724 
725 	**ib_checksum = checksum;
726 }
727 
amdgpu_vcn_dec_sw_send_msg(struct amdgpu_ring * ring,struct amdgpu_ib * ib_msg,struct dma_fence ** fence)728 static int amdgpu_vcn_dec_sw_send_msg(struct amdgpu_ring *ring,
729 				      struct amdgpu_ib *ib_msg,
730 				      struct dma_fence **fence)
731 {
732 	struct amdgpu_vcn_decode_buffer *decode_buffer = NULL;
733 	unsigned int ib_size_dw = 64;
734 	struct amdgpu_device *adev = ring->adev;
735 	struct dma_fence *f = NULL;
736 	struct amdgpu_job *job;
737 	struct amdgpu_ib *ib;
738 	uint64_t addr = AMDGPU_GPU_PAGE_ALIGN(ib_msg->gpu_addr);
739 	uint32_t *ib_checksum;
740 	uint32_t ib_pack_in_dw;
741 	int i, r;
742 
743 	if (adev->vcn.using_unified_queue)
744 		ib_size_dw += 8;
745 
746 	r = amdgpu_job_alloc_with_ib(ring->adev, NULL, NULL,
747 				     ib_size_dw * 4, AMDGPU_IB_POOL_DIRECT,
748 				     &job);
749 	if (r)
750 		goto err;
751 
752 	ib = &job->ibs[0];
753 	ib->length_dw = 0;
754 
755 	/* single queue headers */
756 	if (adev->vcn.using_unified_queue) {
757 		ib_pack_in_dw = sizeof(struct amdgpu_vcn_decode_buffer) / sizeof(uint32_t)
758 						+ 4 + 2; /* engine info + decoding ib in dw */
759 		ib_checksum = amdgpu_vcn_unified_ring_ib_header(ib, ib_pack_in_dw, false);
760 	}
761 
762 	ib->ptr[ib->length_dw++] = sizeof(struct amdgpu_vcn_decode_buffer) + 8;
763 	ib->ptr[ib->length_dw++] = cpu_to_le32(AMDGPU_VCN_IB_FLAG_DECODE_BUFFER);
764 	decode_buffer = (struct amdgpu_vcn_decode_buffer *)&(ib->ptr[ib->length_dw]);
765 	ib->length_dw += sizeof(struct amdgpu_vcn_decode_buffer) / 4;
766 	memset(decode_buffer, 0, sizeof(struct amdgpu_vcn_decode_buffer));
767 
768 	decode_buffer->valid_buf_flag |= cpu_to_le32(AMDGPU_VCN_CMD_FLAG_MSG_BUFFER);
769 	decode_buffer->msg_buffer_address_hi = cpu_to_le32(addr >> 32);
770 	decode_buffer->msg_buffer_address_lo = cpu_to_le32(addr);
771 
772 	for (i = ib->length_dw; i < ib_size_dw; ++i)
773 		ib->ptr[i] = 0x0;
774 
775 	if (adev->vcn.using_unified_queue)
776 		amdgpu_vcn_unified_ring_ib_checksum(&ib_checksum, ib_pack_in_dw);
777 
778 	r = amdgpu_job_submit_direct(job, ring, &f);
779 	if (r)
780 		goto err_free;
781 
782 	amdgpu_ib_free(ib_msg, f);
783 
784 	if (fence)
785 		*fence = dma_fence_get(f);
786 	dma_fence_put(f);
787 
788 	return 0;
789 
790 err_free:
791 	amdgpu_job_free(job);
792 err:
793 	amdgpu_ib_free(ib_msg, f);
794 	return r;
795 }
796 
amdgpu_vcn_dec_sw_ring_test_ib(struct amdgpu_ring * ring,long timeout)797 int amdgpu_vcn_dec_sw_ring_test_ib(struct amdgpu_ring *ring, long timeout)
798 {
799 	struct dma_fence *fence = NULL;
800 	struct amdgpu_ib ib;
801 	long r;
802 
803 	r = amdgpu_vcn_dec_get_create_msg(ring, 1, &ib);
804 	if (r)
805 		goto error;
806 
807 	r = amdgpu_vcn_dec_sw_send_msg(ring, &ib, NULL);
808 	if (r)
809 		goto error;
810 	r = amdgpu_vcn_dec_get_destroy_msg(ring, 1, &ib);
811 	if (r)
812 		goto error;
813 
814 	r = amdgpu_vcn_dec_sw_send_msg(ring, &ib, &fence);
815 	if (r)
816 		goto error;
817 
818 	r = dma_fence_wait_timeout(fence, false, timeout);
819 	if (r == 0)
820 		r = -ETIMEDOUT;
821 	else if (r > 0)
822 		r = 0;
823 
824 	dma_fence_put(fence);
825 error:
826 	return r;
827 }
828 
amdgpu_vcn_enc_ring_test_ring(struct amdgpu_ring * ring)829 int amdgpu_vcn_enc_ring_test_ring(struct amdgpu_ring *ring)
830 {
831 	struct amdgpu_device *adev = ring->adev;
832 	uint32_t rptr;
833 	unsigned int i;
834 	int r;
835 
836 	if (amdgpu_sriov_vf(adev))
837 		return 0;
838 
839 	r = amdgpu_ring_alloc(ring, 16);
840 	if (r)
841 		return r;
842 
843 	rptr = amdgpu_ring_get_rptr(ring);
844 
845 	amdgpu_ring_write(ring, VCN_ENC_CMD_END);
846 	amdgpu_ring_commit(ring);
847 
848 	for (i = 0; i < adev->usec_timeout; i++) {
849 		if (amdgpu_ring_get_rptr(ring) != rptr)
850 			break;
851 		udelay(1);
852 	}
853 
854 	if (i >= adev->usec_timeout)
855 		r = -ETIMEDOUT;
856 
857 	return r;
858 }
859 
amdgpu_vcn_enc_get_create_msg(struct amdgpu_ring * ring,uint32_t handle,struct amdgpu_ib * ib_msg,struct dma_fence ** fence)860 static int amdgpu_vcn_enc_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
861 					 struct amdgpu_ib *ib_msg,
862 					 struct dma_fence **fence)
863 {
864 	unsigned int ib_size_dw = 16;
865 	struct amdgpu_device *adev = ring->adev;
866 	struct amdgpu_job *job;
867 	struct amdgpu_ib *ib;
868 	struct dma_fence *f = NULL;
869 	uint32_t *ib_checksum = NULL;
870 	uint64_t addr;
871 	int i, r;
872 
873 	if (adev->vcn.using_unified_queue)
874 		ib_size_dw += 8;
875 
876 	r = amdgpu_job_alloc_with_ib(ring->adev, NULL, NULL,
877 				     ib_size_dw * 4, AMDGPU_IB_POOL_DIRECT,
878 				     &job);
879 	if (r)
880 		return r;
881 
882 	ib = &job->ibs[0];
883 	addr = AMDGPU_GPU_PAGE_ALIGN(ib_msg->gpu_addr);
884 
885 	ib->length_dw = 0;
886 
887 	if (adev->vcn.using_unified_queue)
888 		ib_checksum = amdgpu_vcn_unified_ring_ib_header(ib, 0x11, true);
889 
890 	ib->ptr[ib->length_dw++] = 0x00000018;
891 	ib->ptr[ib->length_dw++] = 0x00000001; /* session info */
892 	ib->ptr[ib->length_dw++] = handle;
893 	ib->ptr[ib->length_dw++] = upper_32_bits(addr);
894 	ib->ptr[ib->length_dw++] = addr;
895 	ib->ptr[ib->length_dw++] = 0x00000000;
896 
897 	ib->ptr[ib->length_dw++] = 0x00000014;
898 	ib->ptr[ib->length_dw++] = 0x00000002; /* task info */
899 	ib->ptr[ib->length_dw++] = 0x0000001c;
900 	ib->ptr[ib->length_dw++] = 0x00000000;
901 	ib->ptr[ib->length_dw++] = 0x00000000;
902 
903 	ib->ptr[ib->length_dw++] = 0x00000008;
904 	ib->ptr[ib->length_dw++] = 0x08000001; /* op initialize */
905 
906 	for (i = ib->length_dw; i < ib_size_dw; ++i)
907 		ib->ptr[i] = 0x0;
908 
909 	if (adev->vcn.using_unified_queue)
910 		amdgpu_vcn_unified_ring_ib_checksum(&ib_checksum, 0x11);
911 
912 	r = amdgpu_job_submit_direct(job, ring, &f);
913 	if (r)
914 		goto err;
915 
916 	if (fence)
917 		*fence = dma_fence_get(f);
918 	dma_fence_put(f);
919 
920 	return 0;
921 
922 err:
923 	amdgpu_job_free(job);
924 	return r;
925 }
926 
amdgpu_vcn_enc_get_destroy_msg(struct amdgpu_ring * ring,uint32_t handle,struct amdgpu_ib * ib_msg,struct dma_fence ** fence)927 static int amdgpu_vcn_enc_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
928 					  struct amdgpu_ib *ib_msg,
929 					  struct dma_fence **fence)
930 {
931 	unsigned int ib_size_dw = 16;
932 	struct amdgpu_device *adev = ring->adev;
933 	struct amdgpu_job *job;
934 	struct amdgpu_ib *ib;
935 	struct dma_fence *f = NULL;
936 	uint32_t *ib_checksum = NULL;
937 	uint64_t addr;
938 	int i, r;
939 
940 	if (adev->vcn.using_unified_queue)
941 		ib_size_dw += 8;
942 
943 	r = amdgpu_job_alloc_with_ib(ring->adev, NULL, NULL,
944 				     ib_size_dw * 4, AMDGPU_IB_POOL_DIRECT,
945 				     &job);
946 	if (r)
947 		return r;
948 
949 	ib = &job->ibs[0];
950 	addr = AMDGPU_GPU_PAGE_ALIGN(ib_msg->gpu_addr);
951 
952 	ib->length_dw = 0;
953 
954 	if (adev->vcn.using_unified_queue)
955 		ib_checksum = amdgpu_vcn_unified_ring_ib_header(ib, 0x11, true);
956 
957 	ib->ptr[ib->length_dw++] = 0x00000018;
958 	ib->ptr[ib->length_dw++] = 0x00000001;
959 	ib->ptr[ib->length_dw++] = handle;
960 	ib->ptr[ib->length_dw++] = upper_32_bits(addr);
961 	ib->ptr[ib->length_dw++] = addr;
962 	ib->ptr[ib->length_dw++] = 0x00000000;
963 
964 	ib->ptr[ib->length_dw++] = 0x00000014;
965 	ib->ptr[ib->length_dw++] = 0x00000002;
966 	ib->ptr[ib->length_dw++] = 0x0000001c;
967 	ib->ptr[ib->length_dw++] = 0x00000000;
968 	ib->ptr[ib->length_dw++] = 0x00000000;
969 
970 	ib->ptr[ib->length_dw++] = 0x00000008;
971 	ib->ptr[ib->length_dw++] = 0x08000002; /* op close session */
972 
973 	for (i = ib->length_dw; i < ib_size_dw; ++i)
974 		ib->ptr[i] = 0x0;
975 
976 	if (adev->vcn.using_unified_queue)
977 		amdgpu_vcn_unified_ring_ib_checksum(&ib_checksum, 0x11);
978 
979 	r = amdgpu_job_submit_direct(job, ring, &f);
980 	if (r)
981 		goto err;
982 
983 	if (fence)
984 		*fence = dma_fence_get(f);
985 	dma_fence_put(f);
986 
987 	return 0;
988 
989 err:
990 	amdgpu_job_free(job);
991 	return r;
992 }
993 
amdgpu_vcn_enc_ring_test_ib(struct amdgpu_ring * ring,long timeout)994 int amdgpu_vcn_enc_ring_test_ib(struct amdgpu_ring *ring, long timeout)
995 {
996 	struct amdgpu_device *adev = ring->adev;
997 	struct dma_fence *fence = NULL;
998 	struct amdgpu_ib ib;
999 	long r;
1000 
1001 	memset(&ib, 0, sizeof(ib));
1002 	r = amdgpu_ib_get(adev, NULL, (128 << 10) + AMDGPU_GPU_PAGE_SIZE,
1003 			AMDGPU_IB_POOL_DIRECT,
1004 			&ib);
1005 	if (r)
1006 		return r;
1007 
1008 	r = amdgpu_vcn_enc_get_create_msg(ring, 1, &ib, NULL);
1009 	if (r)
1010 		goto error;
1011 
1012 	r = amdgpu_vcn_enc_get_destroy_msg(ring, 1, &ib, &fence);
1013 	if (r)
1014 		goto error;
1015 
1016 	r = dma_fence_wait_timeout(fence, false, timeout);
1017 	if (r == 0)
1018 		r = -ETIMEDOUT;
1019 	else if (r > 0)
1020 		r = 0;
1021 
1022 error:
1023 	amdgpu_ib_free(&ib, fence);
1024 	dma_fence_put(fence);
1025 
1026 	return r;
1027 }
1028 
amdgpu_vcn_unified_ring_test_ib(struct amdgpu_ring * ring,long timeout)1029 int amdgpu_vcn_unified_ring_test_ib(struct amdgpu_ring *ring, long timeout)
1030 {
1031 	struct amdgpu_device *adev = ring->adev;
1032 	long r;
1033 
1034 	if ((amdgpu_ip_version(adev, UVD_HWIP, 0) != IP_VERSION(4, 0, 3)) &&
1035 	    (amdgpu_ip_version(adev, UVD_HWIP, 0) != IP_VERSION(5, 0, 1))) {
1036 		r = amdgpu_vcn_enc_ring_test_ib(ring, timeout);
1037 		if (r)
1038 			goto error;
1039 	}
1040 
1041 	r =  amdgpu_vcn_dec_sw_ring_test_ib(ring, timeout);
1042 
1043 error:
1044 	return r;
1045 }
1046 
amdgpu_vcn_get_enc_ring_prio(int ring)1047 enum amdgpu_ring_priority_level amdgpu_vcn_get_enc_ring_prio(int ring)
1048 {
1049 	switch (ring) {
1050 	case 0:
1051 		return AMDGPU_RING_PRIO_0;
1052 	case 1:
1053 		return AMDGPU_RING_PRIO_1;
1054 	case 2:
1055 		return AMDGPU_RING_PRIO_2;
1056 	default:
1057 		return AMDGPU_RING_PRIO_0;
1058 	}
1059 }
1060 
amdgpu_vcn_setup_ucode(struct amdgpu_device * adev)1061 void amdgpu_vcn_setup_ucode(struct amdgpu_device *adev)
1062 {
1063 	int i;
1064 	unsigned int idx;
1065 
1066 	if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
1067 		const struct common_firmware_header *hdr;
1068 
1069 		for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
1070 			if (adev->vcn.harvest_config & (1 << i))
1071 				continue;
1072 
1073 			hdr = (const struct common_firmware_header *)adev->vcn.inst[i].fw->data;
1074 			/* currently only support 2 FW instances */
1075 			if (i >= 2) {
1076 				dev_info(adev->dev, "More then 2 VCN FW instances!\n");
1077 				break;
1078 			}
1079 			idx = AMDGPU_UCODE_ID_VCN + i;
1080 			adev->firmware.ucode[idx].ucode_id = idx;
1081 			adev->firmware.ucode[idx].fw = adev->vcn.inst[i].fw;
1082 			adev->firmware.fw_size +=
1083 				ALIGN(le32_to_cpu(hdr->ucode_size_bytes), PAGE_SIZE);
1084 
1085 			if (amdgpu_ip_version(adev, UVD_HWIP, 0) ==
1086 			    IP_VERSION(4, 0, 3) ||
1087 			    amdgpu_ip_version(adev, UVD_HWIP, 0) ==
1088 			    IP_VERSION(5, 0, 1))
1089 				break;
1090 		}
1091 	}
1092 }
1093 
1094 /*
1095  * debugfs for mapping vcn firmware log buffer.
1096  */
1097 #if defined(CONFIG_DEBUG_FS)
amdgpu_debugfs_vcn_fwlog_read(struct file * f,char __user * buf,size_t size,loff_t * pos)1098 static ssize_t amdgpu_debugfs_vcn_fwlog_read(struct file *f, char __user *buf,
1099 					     size_t size, loff_t *pos)
1100 {
1101 	struct amdgpu_vcn_inst *vcn;
1102 	void *log_buf;
1103 	volatile struct amdgpu_vcn_fwlog *plog;
1104 	unsigned int read_pos, write_pos, available, i, read_bytes = 0;
1105 	unsigned int read_num[2] = {0};
1106 
1107 	vcn = file_inode(f)->i_private;
1108 	if (!vcn)
1109 		return -ENODEV;
1110 
1111 	if (!vcn->fw_shared.cpu_addr || !amdgpu_vcnfw_log)
1112 		return -EFAULT;
1113 
1114 	log_buf = vcn->fw_shared.cpu_addr + vcn->fw_shared.mem_size;
1115 
1116 	plog = (volatile struct amdgpu_vcn_fwlog *)log_buf;
1117 	read_pos = plog->rptr;
1118 	write_pos = plog->wptr;
1119 
1120 	if (read_pos > AMDGPU_VCNFW_LOG_SIZE || write_pos > AMDGPU_VCNFW_LOG_SIZE)
1121 		return -EFAULT;
1122 
1123 	if (!size || (read_pos == write_pos))
1124 		return 0;
1125 
1126 	if (write_pos > read_pos) {
1127 		available = write_pos - read_pos;
1128 		read_num[0] = min_t(size_t, size, available);
1129 	} else {
1130 		read_num[0] = AMDGPU_VCNFW_LOG_SIZE - read_pos;
1131 		available = read_num[0] + write_pos - plog->header_size;
1132 		if (size > available)
1133 			read_num[1] = write_pos - plog->header_size;
1134 		else if (size > read_num[0])
1135 			read_num[1] = size - read_num[0];
1136 		else
1137 			read_num[0] = size;
1138 	}
1139 
1140 	for (i = 0; i < 2; i++) {
1141 		if (read_num[i]) {
1142 			if (read_pos == AMDGPU_VCNFW_LOG_SIZE)
1143 				read_pos = plog->header_size;
1144 			if (read_num[i] == copy_to_user((buf + read_bytes),
1145 							(log_buf + read_pos), read_num[i]))
1146 				return -EFAULT;
1147 
1148 			read_bytes += read_num[i];
1149 			read_pos += read_num[i];
1150 		}
1151 	}
1152 
1153 	plog->rptr = read_pos;
1154 	*pos += read_bytes;
1155 	return read_bytes;
1156 }
1157 
1158 static const struct file_operations amdgpu_debugfs_vcnfwlog_fops = {
1159 	.owner = THIS_MODULE,
1160 	.read = amdgpu_debugfs_vcn_fwlog_read,
1161 	.llseek = default_llseek
1162 };
1163 #endif
1164 
amdgpu_debugfs_vcn_fwlog_init(struct amdgpu_device * adev,uint8_t i,struct amdgpu_vcn_inst * vcn)1165 void amdgpu_debugfs_vcn_fwlog_init(struct amdgpu_device *adev, uint8_t i,
1166 				   struct amdgpu_vcn_inst *vcn)
1167 {
1168 #if defined(CONFIG_DEBUG_FS)
1169 	struct drm_minor *minor = adev_to_drm(adev)->primary;
1170 	struct dentry *root = minor->debugfs_root;
1171 	char name[32];
1172 
1173 	sprintf(name, "amdgpu_vcn_%d_fwlog", i);
1174 	debugfs_create_file_size(name, S_IFREG | 0444, root, vcn,
1175 				 &amdgpu_debugfs_vcnfwlog_fops,
1176 				 AMDGPU_VCNFW_LOG_SIZE);
1177 #endif
1178 }
1179 
amdgpu_vcn_fwlog_init(struct amdgpu_vcn_inst * vcn)1180 void amdgpu_vcn_fwlog_init(struct amdgpu_vcn_inst *vcn)
1181 {
1182 #if defined(CONFIG_DEBUG_FS)
1183 	volatile uint32_t *flag = vcn->fw_shared.cpu_addr;
1184 	void *fw_log_cpu_addr = vcn->fw_shared.cpu_addr + vcn->fw_shared.mem_size;
1185 	uint64_t fw_log_gpu_addr = vcn->fw_shared.gpu_addr + vcn->fw_shared.mem_size;
1186 	volatile struct amdgpu_vcn_fwlog *log_buf = fw_log_cpu_addr;
1187 	volatile struct amdgpu_fw_shared_fw_logging *fw_log = vcn->fw_shared.cpu_addr
1188 							 + vcn->fw_shared.log_offset;
1189 	*flag |= cpu_to_le32(AMDGPU_VCN_FW_LOGGING_FLAG);
1190 	fw_log->is_enabled = 1;
1191 	fw_log->addr_lo = cpu_to_le32(fw_log_gpu_addr & 0xFFFFFFFF);
1192 	fw_log->addr_hi = cpu_to_le32(fw_log_gpu_addr >> 32);
1193 	fw_log->size = cpu_to_le32(AMDGPU_VCNFW_LOG_SIZE);
1194 
1195 	log_buf->header_size = sizeof(struct amdgpu_vcn_fwlog);
1196 	log_buf->buffer_size = AMDGPU_VCNFW_LOG_SIZE;
1197 	log_buf->rptr = log_buf->header_size;
1198 	log_buf->wptr = log_buf->header_size;
1199 	log_buf->wrapped = 0;
1200 #endif
1201 }
1202 
amdgpu_vcn_process_poison_irq(struct amdgpu_device * adev,struct amdgpu_irq_src * source,struct amdgpu_iv_entry * entry)1203 int amdgpu_vcn_process_poison_irq(struct amdgpu_device *adev,
1204 				struct amdgpu_irq_src *source,
1205 				struct amdgpu_iv_entry *entry)
1206 {
1207 	struct ras_common_if *ras_if = adev->vcn.ras_if;
1208 	struct ras_dispatch_if ih_data = {
1209 		.entry = entry,
1210 	};
1211 
1212 	if (!ras_if)
1213 		return 0;
1214 
1215 	if (!amdgpu_sriov_vf(adev)) {
1216 		ih_data.head = *ras_if;
1217 		amdgpu_ras_interrupt_dispatch(adev, &ih_data);
1218 	} else {
1219 		if (adev->virt.ops && adev->virt.ops->ras_poison_handler)
1220 			adev->virt.ops->ras_poison_handler(adev, ras_if->block);
1221 		else
1222 			dev_warn(adev->dev,
1223 				"No ras_poison_handler interface in SRIOV for VCN!\n");
1224 	}
1225 
1226 	return 0;
1227 }
1228 
amdgpu_vcn_ras_late_init(struct amdgpu_device * adev,struct ras_common_if * ras_block)1229 int amdgpu_vcn_ras_late_init(struct amdgpu_device *adev, struct ras_common_if *ras_block)
1230 {
1231 	int r, i;
1232 
1233 	r = amdgpu_ras_block_late_init(adev, ras_block);
1234 	if (r)
1235 		return r;
1236 
1237 	if (amdgpu_ras_is_supported(adev, ras_block->block)) {
1238 		for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
1239 			if (adev->vcn.harvest_config & (1 << i) ||
1240 			    !adev->vcn.inst[i].ras_poison_irq.funcs)
1241 				continue;
1242 
1243 			r = amdgpu_irq_get(adev, &adev->vcn.inst[i].ras_poison_irq, 0);
1244 			if (r)
1245 				goto late_fini;
1246 		}
1247 	}
1248 	return 0;
1249 
1250 late_fini:
1251 	amdgpu_ras_block_late_fini(adev, ras_block);
1252 	return r;
1253 }
1254 
amdgpu_vcn_ras_sw_init(struct amdgpu_device * adev)1255 int amdgpu_vcn_ras_sw_init(struct amdgpu_device *adev)
1256 {
1257 	int err;
1258 	struct amdgpu_vcn_ras *ras;
1259 
1260 	if (!adev->vcn.ras)
1261 		return 0;
1262 
1263 	ras = adev->vcn.ras;
1264 	err = amdgpu_ras_register_ras_block(adev, &ras->ras_block);
1265 	if (err) {
1266 		dev_err(adev->dev, "Failed to register vcn ras block!\n");
1267 		return err;
1268 	}
1269 
1270 	strcpy(ras->ras_block.ras_comm.name, "vcn");
1271 	ras->ras_block.ras_comm.block = AMDGPU_RAS_BLOCK__VCN;
1272 	ras->ras_block.ras_comm.type = AMDGPU_RAS_ERROR__POISON;
1273 	adev->vcn.ras_if = &ras->ras_block.ras_comm;
1274 
1275 	if (!ras->ras_block.ras_late_init)
1276 		ras->ras_block.ras_late_init = amdgpu_vcn_ras_late_init;
1277 
1278 	return 0;
1279 }
1280 
amdgpu_vcn_psp_update_sram(struct amdgpu_device * adev,int inst_idx,enum AMDGPU_UCODE_ID ucode_id)1281 int amdgpu_vcn_psp_update_sram(struct amdgpu_device *adev, int inst_idx,
1282 			       enum AMDGPU_UCODE_ID ucode_id)
1283 {
1284 	struct amdgpu_firmware_info ucode = {
1285 		.ucode_id = (ucode_id ? ucode_id :
1286 			    (inst_idx ? AMDGPU_UCODE_ID_VCN1_RAM :
1287 					AMDGPU_UCODE_ID_VCN0_RAM)),
1288 		.mc_addr = adev->vcn.inst[inst_idx].dpg_sram_gpu_addr,
1289 		.ucode_size = ((uintptr_t)adev->vcn.inst[inst_idx].dpg_sram_curr_addr -
1290 			      (uintptr_t)adev->vcn.inst[inst_idx].dpg_sram_cpu_addr),
1291 	};
1292 
1293 	return psp_execute_ip_fw_load(&adev->psp, &ucode);
1294 }
1295 
amdgpu_get_vcn_reset_mask(struct device * dev,struct device_attribute * attr,char * buf)1296 static ssize_t amdgpu_get_vcn_reset_mask(struct device *dev,
1297 						struct device_attribute *attr,
1298 						char *buf)
1299 {
1300 	struct drm_device *ddev = dev_get_drvdata(dev);
1301 	struct amdgpu_device *adev = drm_to_adev(ddev);
1302 
1303 	if (!adev)
1304 		return -ENODEV;
1305 
1306 	return amdgpu_show_reset_mask(buf, adev->vcn.supported_reset);
1307 }
1308 
1309 static DEVICE_ATTR(vcn_reset_mask, 0444,
1310 		   amdgpu_get_vcn_reset_mask, NULL);
1311 
amdgpu_vcn_sysfs_reset_mask_init(struct amdgpu_device * adev)1312 int amdgpu_vcn_sysfs_reset_mask_init(struct amdgpu_device *adev)
1313 {
1314 	int r = 0;
1315 
1316 	if (adev->vcn.num_vcn_inst) {
1317 		r = device_create_file(adev->dev, &dev_attr_vcn_reset_mask);
1318 		if (r)
1319 			return r;
1320 	}
1321 
1322 	return r;
1323 }
1324 
amdgpu_vcn_sysfs_reset_mask_fini(struct amdgpu_device * adev)1325 void amdgpu_vcn_sysfs_reset_mask_fini(struct amdgpu_device *adev)
1326 {
1327 	if (adev->dev->kobj.sd) {
1328 		if (adev->vcn.num_vcn_inst)
1329 			device_remove_file(adev->dev, &dev_attr_vcn_reset_mask);
1330 	}
1331 }
1332 
1333 /*
1334  * debugfs to enable/disable vcn job submission to specific core or
1335  * instance. It is created only if the queue type is unified.
1336  */
1337 #if defined(CONFIG_DEBUG_FS)
amdgpu_debugfs_vcn_sched_mask_set(void * data,u64 val)1338 static int amdgpu_debugfs_vcn_sched_mask_set(void *data, u64 val)
1339 {
1340 	struct amdgpu_device *adev = (struct amdgpu_device *)data;
1341 	u32 i;
1342 	u64 mask;
1343 	struct amdgpu_ring *ring;
1344 
1345 	if (!adev)
1346 		return -ENODEV;
1347 
1348 	mask = (1ULL << adev->vcn.num_vcn_inst) - 1;
1349 	if ((val & mask) == 0)
1350 		return -EINVAL;
1351 	for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
1352 		ring = &adev->vcn.inst[i].ring_enc[0];
1353 		if (val & (1ULL << i))
1354 			ring->sched.ready = true;
1355 		else
1356 			ring->sched.ready = false;
1357 	}
1358 	/* publish sched.ready flag update effective immediately across smp */
1359 	smp_rmb();
1360 	return 0;
1361 }
1362 
amdgpu_debugfs_vcn_sched_mask_get(void * data,u64 * val)1363 static int amdgpu_debugfs_vcn_sched_mask_get(void *data, u64 *val)
1364 {
1365 	struct amdgpu_device *adev = (struct amdgpu_device *)data;
1366 	u32 i;
1367 	u64 mask = 0;
1368 	struct amdgpu_ring *ring;
1369 
1370 	if (!adev)
1371 		return -ENODEV;
1372 	for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
1373 		ring = &adev->vcn.inst[i].ring_enc[0];
1374 		if (ring->sched.ready)
1375 			mask |= 1ULL << i;
1376 		}
1377 	*val = mask;
1378 	return 0;
1379 }
1380 
1381 DEFINE_DEBUGFS_ATTRIBUTE(amdgpu_debugfs_vcn_sched_mask_fops,
1382 			 amdgpu_debugfs_vcn_sched_mask_get,
1383 			 amdgpu_debugfs_vcn_sched_mask_set, "%llx\n");
1384 #endif
1385 
amdgpu_debugfs_vcn_sched_mask_init(struct amdgpu_device * adev)1386 void amdgpu_debugfs_vcn_sched_mask_init(struct amdgpu_device *adev)
1387 {
1388 #if defined(CONFIG_DEBUG_FS)
1389 	struct drm_minor *minor = adev_to_drm(adev)->primary;
1390 	struct dentry *root = minor->debugfs_root;
1391 	char name[32];
1392 
1393 	if (adev->vcn.num_vcn_inst <= 1 || !adev->vcn.using_unified_queue)
1394 		return;
1395 	sprintf(name, "amdgpu_vcn_sched_mask");
1396 	debugfs_create_file(name, 0600, root, adev,
1397 			    &amdgpu_debugfs_vcn_sched_mask_fops);
1398 #endif
1399 }
1400