xref: /linux/drivers/gpu/drm/amd/amdgpu/vcn_v5_0_0.c (revision e9ef810dfee7a2227da9d423aecb0ced35faddbe)
1 /*
2  * Copyright 2023 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  */
23 
24 #include <linux/firmware.h>
25 #include "amdgpu.h"
26 #include "amdgpu_vcn.h"
27 #include "amdgpu_pm.h"
28 #include "soc15.h"
29 #include "soc15d.h"
30 #include "soc15_hw_ip.h"
31 #include "vcn_v2_0.h"
32 
33 #include "vcn/vcn_5_0_0_offset.h"
34 #include "vcn/vcn_5_0_0_sh_mask.h"
35 #include "ivsrcid/vcn/irqsrcs_vcn_5_0.h"
36 #include "vcn_v5_0_0.h"
37 
38 #include <drm/drm_drv.h>
39 
40 static const struct amdgpu_hwip_reg_entry vcn_reg_list_5_0[] = {
41 	SOC15_REG_ENTRY_STR(VCN, 0, regUVD_POWER_STATUS),
42 	SOC15_REG_ENTRY_STR(VCN, 0, regUVD_STATUS),
43 	SOC15_REG_ENTRY_STR(VCN, 0, regUVD_CONTEXT_ID),
44 	SOC15_REG_ENTRY_STR(VCN, 0, regUVD_CONTEXT_ID2),
45 	SOC15_REG_ENTRY_STR(VCN, 0, regUVD_GPCOM_VCPU_DATA0),
46 	SOC15_REG_ENTRY_STR(VCN, 0, regUVD_GPCOM_VCPU_DATA1),
47 	SOC15_REG_ENTRY_STR(VCN, 0, regUVD_GPCOM_VCPU_CMD),
48 	SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_BASE_HI),
49 	SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_BASE_LO),
50 	SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_BASE_HI2),
51 	SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_BASE_LO2),
52 	SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_BASE_HI3),
53 	SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_BASE_LO3),
54 	SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_BASE_HI4),
55 	SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_BASE_LO4),
56 	SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_RPTR),
57 	SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_WPTR),
58 	SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_RPTR2),
59 	SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_WPTR2),
60 	SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_RPTR3),
61 	SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_WPTR3),
62 	SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_RPTR4),
63 	SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_WPTR4),
64 	SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_SIZE),
65 	SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_SIZE2),
66 	SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_SIZE3),
67 	SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_SIZE4),
68 	SOC15_REG_ENTRY_STR(VCN, 0, regUVD_DPG_LMA_CTL),
69 	SOC15_REG_ENTRY_STR(VCN, 0, regUVD_DPG_LMA_DATA),
70 	SOC15_REG_ENTRY_STR(VCN, 0, regUVD_DPG_LMA_MASK),
71 	SOC15_REG_ENTRY_STR(VCN, 0, regUVD_DPG_PAUSE)
72 };
73 
74 static int amdgpu_ih_clientid_vcns[] = {
75 	SOC15_IH_CLIENTID_VCN,
76 	SOC15_IH_CLIENTID_VCN1
77 };
78 
79 static void vcn_v5_0_0_set_unified_ring_funcs(struct amdgpu_device *adev);
80 static void vcn_v5_0_0_set_irq_funcs(struct amdgpu_device *adev);
81 static int vcn_v5_0_0_set_pg_state(struct amdgpu_vcn_inst *vinst,
82 				   enum amd_powergating_state state);
83 static int vcn_v5_0_0_pause_dpg_mode(struct amdgpu_vcn_inst *vinst,
84 				     struct dpg_pause_state *new_state);
85 static void vcn_v5_0_0_unified_ring_set_wptr(struct amdgpu_ring *ring);
86 
87 /**
88  * vcn_v5_0_0_early_init - set function pointers and load microcode
89  *
90  * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
91  *
92  * Set ring and irq function pointers
93  * Load microcode from filesystem
94  */
vcn_v5_0_0_early_init(struct amdgpu_ip_block * ip_block)95 static int vcn_v5_0_0_early_init(struct amdgpu_ip_block *ip_block)
96 {
97 	struct amdgpu_device *adev = ip_block->adev;
98 	int i, r;
99 
100 	for (i = 0; i < adev->vcn.num_vcn_inst; ++i)
101 		/* re-use enc ring as unified ring */
102 		adev->vcn.inst[i].num_enc_rings = 1;
103 
104 	vcn_v5_0_0_set_unified_ring_funcs(adev);
105 	vcn_v5_0_0_set_irq_funcs(adev);
106 
107 	for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
108 		adev->vcn.inst[i].set_pg_state = vcn_v5_0_0_set_pg_state;
109 
110 		r = amdgpu_vcn_early_init(adev, i);
111 		if (r)
112 			return r;
113 	}
114 
115 	return 0;
116 }
117 
vcn_v5_0_0_alloc_ip_dump(struct amdgpu_device * adev)118 void vcn_v5_0_0_alloc_ip_dump(struct amdgpu_device *adev)
119 {
120 	uint32_t reg_count = ARRAY_SIZE(vcn_reg_list_5_0);
121 	uint32_t *ptr;
122 
123 	/* Allocate memory for VCN IP Dump buffer */
124 	ptr = kcalloc(adev->vcn.num_vcn_inst * reg_count, sizeof(uint32_t), GFP_KERNEL);
125 	if (!ptr) {
126 		DRM_ERROR("Failed to allocate memory for VCN IP Dump\n");
127 		adev->vcn.ip_dump = NULL;
128 	} else {
129 		adev->vcn.ip_dump = ptr;
130 	}
131 }
132 
133 /**
134  * vcn_v5_0_0_sw_init - sw init for VCN block
135  *
136  * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
137  *
138  * Load firmware and sw initialization
139  */
vcn_v5_0_0_sw_init(struct amdgpu_ip_block * ip_block)140 static int vcn_v5_0_0_sw_init(struct amdgpu_ip_block *ip_block)
141 {
142 	struct amdgpu_ring *ring;
143 	struct amdgpu_device *adev = ip_block->adev;
144 	int i, r;
145 
146 	for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
147 		volatile struct amdgpu_vcn5_fw_shared *fw_shared;
148 
149 		if (adev->vcn.harvest_config & (1 << i))
150 			continue;
151 
152 		r = amdgpu_vcn_sw_init(adev, i);
153 		if (r)
154 			return r;
155 
156 		amdgpu_vcn_setup_ucode(adev, i);
157 
158 		r = amdgpu_vcn_resume(adev, i);
159 		if (r)
160 			return r;
161 
162 		atomic_set(&adev->vcn.inst[i].sched_score, 0);
163 
164 		/* VCN UNIFIED TRAP */
165 		r = amdgpu_irq_add_id(adev, amdgpu_ih_clientid_vcns[i],
166 				VCN_5_0__SRCID__UVD_ENC_GENERAL_PURPOSE, &adev->vcn.inst[i].irq);
167 		if (r)
168 			return r;
169 
170 		/* VCN POISON TRAP */
171 		r = amdgpu_irq_add_id(adev, amdgpu_ih_clientid_vcns[i],
172 				VCN_5_0__SRCID_UVD_POISON, &adev->vcn.inst[i].irq);
173 		if (r)
174 			return r;
175 
176 		ring = &adev->vcn.inst[i].ring_enc[0];
177 		ring->use_doorbell = true;
178 		ring->doorbell_index = (adev->doorbell_index.vcn.vcn_ring0_1 << 1) + 2 + 8 * i;
179 
180 		ring->vm_hub = AMDGPU_MMHUB0(0);
181 		sprintf(ring->name, "vcn_unified_%d", i);
182 
183 		r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.inst[i].irq, 0,
184 						AMDGPU_RING_PRIO_0, &adev->vcn.inst[i].sched_score);
185 		if (r)
186 			return r;
187 
188 		fw_shared = adev->vcn.inst[i].fw_shared.cpu_addr;
189 		fw_shared->present_flag_0 = cpu_to_le32(AMDGPU_FW_SHARED_FLAG_0_UNIFIED_QUEUE);
190 		fw_shared->sq.is_enabled = 1;
191 
192 		if (amdgpu_vcnfw_log)
193 			amdgpu_vcn_fwlog_init(&adev->vcn.inst[i]);
194 
195 		if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG)
196 			adev->vcn.inst[i].pause_dpg_mode = vcn_v5_0_0_pause_dpg_mode;
197 	}
198 
199 	adev->vcn.supported_reset =
200 		amdgpu_get_soft_full_reset_mask(&adev->vcn.inst[0].ring_enc[0]);
201 	if (!amdgpu_sriov_vf(adev))
202 		adev->vcn.supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE;
203 
204 	vcn_v5_0_0_alloc_ip_dump(adev);
205 
206 	r = amdgpu_vcn_sysfs_reset_mask_init(adev);
207 	if (r)
208 		return r;
209 
210 	return 0;
211 }
212 
213 /**
214  * vcn_v5_0_0_sw_fini - sw fini for VCN block
215  *
216  * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
217  *
218  * VCN suspend and free up sw allocation
219  */
vcn_v5_0_0_sw_fini(struct amdgpu_ip_block * ip_block)220 static int vcn_v5_0_0_sw_fini(struct amdgpu_ip_block *ip_block)
221 {
222 	struct amdgpu_device *adev = ip_block->adev;
223 	int i, r, idx;
224 
225 	if (drm_dev_enter(adev_to_drm(adev), &idx)) {
226 		for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
227 			volatile struct amdgpu_vcn5_fw_shared *fw_shared;
228 
229 			if (adev->vcn.harvest_config & (1 << i))
230 				continue;
231 
232 			fw_shared = adev->vcn.inst[i].fw_shared.cpu_addr;
233 			fw_shared->present_flag_0 = 0;
234 			fw_shared->sq.is_enabled = 0;
235 		}
236 
237 		drm_dev_exit(idx);
238 	}
239 
240 	for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
241 		r = amdgpu_vcn_suspend(adev, i);
242 		if (r)
243 			return r;
244 	}
245 
246 	amdgpu_vcn_sysfs_reset_mask_fini(adev);
247 
248 	for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
249 		r = amdgpu_vcn_sw_fini(adev, i);
250 		if (r)
251 			return r;
252 	}
253 
254 	kfree(adev->vcn.ip_dump);
255 
256 	return 0;
257 }
258 
259 /**
260  * vcn_v5_0_0_hw_init - start and test VCN block
261  *
262  * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
263  *
264  * Initialize the hardware, boot up the VCPU and do some testing
265  */
vcn_v5_0_0_hw_init(struct amdgpu_ip_block * ip_block)266 static int vcn_v5_0_0_hw_init(struct amdgpu_ip_block *ip_block)
267 {
268 	struct amdgpu_device *adev = ip_block->adev;
269 	struct amdgpu_ring *ring;
270 	int i, r;
271 
272 	for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
273 		if (adev->vcn.harvest_config & (1 << i))
274 			continue;
275 
276 		ring = &adev->vcn.inst[i].ring_enc[0];
277 
278 		adev->nbio.funcs->vcn_doorbell_range(adev, ring->use_doorbell,
279 			((adev->doorbell_index.vcn.vcn_ring0_1 << 1) + 8 * i), i);
280 
281 		r = amdgpu_ring_test_helper(ring);
282 		if (r)
283 			return r;
284 	}
285 
286 	return 0;
287 }
288 
289 /**
290  * vcn_v5_0_0_hw_fini - stop the hardware block
291  *
292  * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
293  *
294  * Stop the VCN block, mark ring as not ready any more
295  */
vcn_v5_0_0_hw_fini(struct amdgpu_ip_block * ip_block)296 static int vcn_v5_0_0_hw_fini(struct amdgpu_ip_block *ip_block)
297 {
298 	struct amdgpu_device *adev = ip_block->adev;
299 	int i;
300 
301 	for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
302 		struct amdgpu_vcn_inst *vinst = &adev->vcn.inst[i];
303 
304 		if (adev->vcn.harvest_config & (1 << i))
305 			continue;
306 
307 		cancel_delayed_work_sync(&vinst->idle_work);
308 
309 		if (!amdgpu_sriov_vf(adev)) {
310 			if ((adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) ||
311 			    (vinst->cur_state != AMD_PG_STATE_GATE &&
312 			     RREG32_SOC15(VCN, i, regUVD_STATUS))) {
313 				vinst->set_pg_state(vinst, AMD_PG_STATE_GATE);
314 			}
315 		}
316 	}
317 
318 	return 0;
319 }
320 
321 /**
322  * vcn_v5_0_0_suspend - suspend VCN block
323  *
324  * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
325  *
326  * HW fini and suspend VCN block
327  */
vcn_v5_0_0_suspend(struct amdgpu_ip_block * ip_block)328 static int vcn_v5_0_0_suspend(struct amdgpu_ip_block *ip_block)
329 {
330 	struct amdgpu_device *adev = ip_block->adev;
331 	int r, i;
332 
333 	r = vcn_v5_0_0_hw_fini(ip_block);
334 	if (r)
335 		return r;
336 
337 	for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
338 		r = amdgpu_vcn_suspend(ip_block->adev, i);
339 		if (r)
340 			return r;
341 	}
342 
343 	return r;
344 }
345 
346 /**
347  * vcn_v5_0_0_resume - resume VCN block
348  *
349  * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
350  *
351  * Resume firmware and hw init VCN block
352  */
vcn_v5_0_0_resume(struct amdgpu_ip_block * ip_block)353 static int vcn_v5_0_0_resume(struct amdgpu_ip_block *ip_block)
354 {
355 	struct amdgpu_device *adev = ip_block->adev;
356 	int r, i;
357 
358 	for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
359 		r = amdgpu_vcn_resume(ip_block->adev, i);
360 		if (r)
361 			return r;
362 	}
363 
364 	r = vcn_v5_0_0_hw_init(ip_block);
365 
366 	return r;
367 }
368 
369 /**
370  * vcn_v5_0_0_mc_resume - memory controller programming
371  *
372  * @vinst: VCN instance
373  *
374  * Let the VCN memory controller know it's offsets
375  */
vcn_v5_0_0_mc_resume(struct amdgpu_vcn_inst * vinst)376 static void vcn_v5_0_0_mc_resume(struct amdgpu_vcn_inst *vinst)
377 {
378 	struct amdgpu_device *adev = vinst->adev;
379 	int inst = vinst->inst;
380 	uint32_t offset, size;
381 	const struct common_firmware_header *hdr;
382 
383 	hdr = (const struct common_firmware_header *)adev->vcn.inst[inst].fw->data;
384 	size = AMDGPU_GPU_PAGE_ALIGN(le32_to_cpu(hdr->ucode_size_bytes) + 8);
385 
386 	/* cache window 0: fw */
387 	if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
388 		WREG32_SOC15(VCN, inst, regUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW,
389 			(adev->firmware.ucode[AMDGPU_UCODE_ID_VCN + inst].tmr_mc_addr_lo));
390 		WREG32_SOC15(VCN, inst, regUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH,
391 			(adev->firmware.ucode[AMDGPU_UCODE_ID_VCN + inst].tmr_mc_addr_hi));
392 		WREG32_SOC15(VCN, inst, regUVD_VCPU_CACHE_OFFSET0, 0);
393 		offset = 0;
394 	} else {
395 		WREG32_SOC15(VCN, inst, regUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW,
396 			lower_32_bits(adev->vcn.inst[inst].gpu_addr));
397 		WREG32_SOC15(VCN, inst, regUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH,
398 			upper_32_bits(adev->vcn.inst[inst].gpu_addr));
399 		offset = size;
400 		WREG32_SOC15(VCN, inst, regUVD_VCPU_CACHE_OFFSET0, AMDGPU_UVD_FIRMWARE_OFFSET >> 3);
401 	}
402 	WREG32_SOC15(VCN, inst, regUVD_VCPU_CACHE_SIZE0, size);
403 
404 	/* cache window 1: stack */
405 	WREG32_SOC15(VCN, inst, regUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW,
406 		lower_32_bits(adev->vcn.inst[inst].gpu_addr + offset));
407 	WREG32_SOC15(VCN, inst, regUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH,
408 		upper_32_bits(adev->vcn.inst[inst].gpu_addr + offset));
409 	WREG32_SOC15(VCN, inst, regUVD_VCPU_CACHE_OFFSET1, 0);
410 	WREG32_SOC15(VCN, inst, regUVD_VCPU_CACHE_SIZE1, AMDGPU_VCN_STACK_SIZE);
411 
412 	/* cache window 2: context */
413 	WREG32_SOC15(VCN, inst, regUVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW,
414 		lower_32_bits(adev->vcn.inst[inst].gpu_addr + offset + AMDGPU_VCN_STACK_SIZE));
415 	WREG32_SOC15(VCN, inst, regUVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH,
416 		upper_32_bits(adev->vcn.inst[inst].gpu_addr + offset + AMDGPU_VCN_STACK_SIZE));
417 	WREG32_SOC15(VCN, inst, regUVD_VCPU_CACHE_OFFSET2, 0);
418 	WREG32_SOC15(VCN, inst, regUVD_VCPU_CACHE_SIZE2, AMDGPU_VCN_CONTEXT_SIZE);
419 
420 	/* non-cache window */
421 	WREG32_SOC15(VCN, inst, regUVD_LMI_VCPU_NC0_64BIT_BAR_LOW,
422 		lower_32_bits(adev->vcn.inst[inst].fw_shared.gpu_addr));
423 	WREG32_SOC15(VCN, inst, regUVD_LMI_VCPU_NC0_64BIT_BAR_HIGH,
424 		upper_32_bits(adev->vcn.inst[inst].fw_shared.gpu_addr));
425 	WREG32_SOC15(VCN, inst, regUVD_VCPU_NONCACHE_OFFSET0, 0);
426 	WREG32_SOC15(VCN, inst, regUVD_VCPU_NONCACHE_SIZE0,
427 		AMDGPU_GPU_PAGE_ALIGN(sizeof(struct amdgpu_vcn5_fw_shared)));
428 }
429 
430 /**
431  * vcn_v5_0_0_mc_resume_dpg_mode - memory controller programming for dpg mode
432  *
433  * @vinst: VCN instance
434  * @indirect: indirectly write sram
435  *
436  * Let the VCN memory controller know it's offsets with dpg mode
437  */
vcn_v5_0_0_mc_resume_dpg_mode(struct amdgpu_vcn_inst * vinst,bool indirect)438 static void vcn_v5_0_0_mc_resume_dpg_mode(struct amdgpu_vcn_inst *vinst,
439 					  bool indirect)
440 {
441 	struct amdgpu_device *adev = vinst->adev;
442 	int inst_idx = vinst->inst;
443 	uint32_t offset, size;
444 	const struct common_firmware_header *hdr;
445 
446 	hdr = (const struct common_firmware_header *)adev->vcn.inst[inst_idx].fw->data;
447 	size = AMDGPU_GPU_PAGE_ALIGN(le32_to_cpu(hdr->ucode_size_bytes) + 8);
448 
449 	/* cache window 0: fw */
450 	if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
451 		if (!indirect) {
452 			WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET(
453 				VCN, inst_idx, regUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW),
454 				(adev->firmware.ucode[AMDGPU_UCODE_ID_VCN + inst_idx].tmr_mc_addr_lo), 0, indirect);
455 			WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET(
456 				VCN, inst_idx, regUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH),
457 				(adev->firmware.ucode[AMDGPU_UCODE_ID_VCN + inst_idx].tmr_mc_addr_hi), 0, indirect);
458 			WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET(
459 				VCN, inst_idx, regUVD_VCPU_CACHE_OFFSET0), 0, 0, indirect);
460 		} else {
461 			WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET(
462 				VCN, inst_idx, regUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW), 0, 0, indirect);
463 			WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET(
464 				VCN, inst_idx, regUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH), 0, 0, indirect);
465 			WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET(
466 				VCN, inst_idx, regUVD_VCPU_CACHE_OFFSET0), 0, 0, indirect);
467 		}
468 		offset = 0;
469 	} else {
470 		WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET(
471 			VCN, inst_idx, regUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW),
472 			lower_32_bits(adev->vcn.inst[inst_idx].gpu_addr), 0, indirect);
473 		WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET(
474 			VCN, inst_idx, regUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH),
475 			upper_32_bits(adev->vcn.inst[inst_idx].gpu_addr), 0, indirect);
476 		offset = size;
477 		WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET(
478 			VCN, inst_idx, regUVD_VCPU_CACHE_OFFSET0),
479 			AMDGPU_UVD_FIRMWARE_OFFSET >> 3, 0, indirect);
480 	}
481 
482 	if (!indirect)
483 		WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET(
484 			VCN, inst_idx, regUVD_VCPU_CACHE_SIZE0), size, 0, indirect);
485 	else
486 		WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET(
487 			VCN, inst_idx, regUVD_VCPU_CACHE_SIZE0), 0, 0, indirect);
488 
489 	/* cache window 1: stack */
490 	if (!indirect) {
491 		WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET(
492 			VCN, inst_idx, regUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW),
493 			lower_32_bits(adev->vcn.inst[inst_idx].gpu_addr + offset), 0, indirect);
494 		WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET(
495 			VCN, inst_idx, regUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH),
496 			upper_32_bits(adev->vcn.inst[inst_idx].gpu_addr + offset), 0, indirect);
497 		WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET(
498 			VCN, inst_idx, regUVD_VCPU_CACHE_OFFSET1), 0, 0, indirect);
499 	} else {
500 		WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET(
501 			VCN, inst_idx, regUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW), 0, 0, indirect);
502 		WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET(
503 			VCN, inst_idx, regUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH), 0, 0, indirect);
504 		WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET(
505 			VCN, inst_idx, regUVD_VCPU_CACHE_OFFSET1), 0, 0, indirect);
506 	}
507 		WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET(
508 			VCN, inst_idx, regUVD_VCPU_CACHE_SIZE1), AMDGPU_VCN_STACK_SIZE, 0, indirect);
509 
510 	/* cache window 2: context */
511 	WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET(
512 		VCN, inst_idx, regUVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW),
513 		lower_32_bits(adev->vcn.inst[inst_idx].gpu_addr + offset + AMDGPU_VCN_STACK_SIZE), 0, indirect);
514 	WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET(
515 		VCN, inst_idx, regUVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH),
516 		upper_32_bits(adev->vcn.inst[inst_idx].gpu_addr + offset + AMDGPU_VCN_STACK_SIZE), 0, indirect);
517 	WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET(
518 		VCN, inst_idx, regUVD_VCPU_CACHE_OFFSET2), 0, 0, indirect);
519 	WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET(
520 		VCN, inst_idx, regUVD_VCPU_CACHE_SIZE2), AMDGPU_VCN_CONTEXT_SIZE, 0, indirect);
521 
522 	/* non-cache window */
523 	WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET(
524 		VCN, inst_idx, regUVD_LMI_VCPU_NC0_64BIT_BAR_LOW),
525 		lower_32_bits(adev->vcn.inst[inst_idx].fw_shared.gpu_addr), 0, indirect);
526 	WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET(
527 		VCN, inst_idx, regUVD_LMI_VCPU_NC0_64BIT_BAR_HIGH),
528 		upper_32_bits(adev->vcn.inst[inst_idx].fw_shared.gpu_addr), 0, indirect);
529 	WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET(
530 		VCN, inst_idx, regUVD_VCPU_NONCACHE_OFFSET0), 0, 0, indirect);
531 	WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET(
532 		VCN, inst_idx, regUVD_VCPU_NONCACHE_SIZE0),
533 		AMDGPU_GPU_PAGE_ALIGN(sizeof(struct amdgpu_vcn5_fw_shared)), 0, indirect);
534 
535 	/* VCN global tiling registers */
536 	WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET(
537 		VCN, inst_idx, regUVD_GFX10_ADDR_CONFIG),
538 		adev->gfx.config.gb_addr_config, 0, indirect);
539 
540 	return;
541 }
542 
543 /**
544  * vcn_v5_0_0_disable_static_power_gating - disable VCN static power gating
545  *
546  * @vinst: VCN instance
547  *
548  * Disable static power gating for VCN block
549  */
vcn_v5_0_0_disable_static_power_gating(struct amdgpu_vcn_inst * vinst)550 static void vcn_v5_0_0_disable_static_power_gating(struct amdgpu_vcn_inst *vinst)
551 {
552 	struct amdgpu_device *adev = vinst->adev;
553 	int inst = vinst->inst;
554 	uint32_t data = 0;
555 
556 	if (adev->pg_flags & AMD_PG_SUPPORT_VCN) {
557 		data = 1 << UVD_IPX_DLDO_CONFIG__ONO2_PWR_CONFIG__SHIFT;
558 		WREG32_SOC15(VCN, inst, regUVD_IPX_DLDO_CONFIG, data);
559 		SOC15_WAIT_ON_RREG(VCN, inst, regUVD_IPX_DLDO_STATUS, 0,
560 				UVD_IPX_DLDO_STATUS__ONO2_PWR_STATUS_MASK);
561 
562 		data = 2 << UVD_IPX_DLDO_CONFIG__ONO3_PWR_CONFIG__SHIFT;
563 		WREG32_SOC15(VCN, inst, regUVD_IPX_DLDO_CONFIG, data);
564 		SOC15_WAIT_ON_RREG(VCN, inst, regUVD_IPX_DLDO_STATUS,
565 				1 << UVD_IPX_DLDO_STATUS__ONO3_PWR_STATUS__SHIFT,
566 				UVD_IPX_DLDO_STATUS__ONO3_PWR_STATUS_MASK);
567 
568 		data = 2 << UVD_IPX_DLDO_CONFIG__ONO4_PWR_CONFIG__SHIFT;
569 		WREG32_SOC15(VCN, inst, regUVD_IPX_DLDO_CONFIG, data);
570 		SOC15_WAIT_ON_RREG(VCN, inst, regUVD_IPX_DLDO_STATUS,
571 				1 << UVD_IPX_DLDO_STATUS__ONO4_PWR_STATUS__SHIFT,
572 				UVD_IPX_DLDO_STATUS__ONO4_PWR_STATUS_MASK);
573 
574 		data = 2 << UVD_IPX_DLDO_CONFIG__ONO5_PWR_CONFIG__SHIFT;
575 		WREG32_SOC15(VCN, inst, regUVD_IPX_DLDO_CONFIG, data);
576 		SOC15_WAIT_ON_RREG(VCN, inst, regUVD_IPX_DLDO_STATUS,
577 				1 << UVD_IPX_DLDO_STATUS__ONO5_PWR_STATUS__SHIFT,
578 				UVD_IPX_DLDO_STATUS__ONO5_PWR_STATUS_MASK);
579 	} else {
580 		data = 1 << UVD_IPX_DLDO_CONFIG__ONO2_PWR_CONFIG__SHIFT;
581 		WREG32_SOC15(VCN, inst, regUVD_IPX_DLDO_CONFIG, data);
582 		SOC15_WAIT_ON_RREG(VCN, inst, regUVD_IPX_DLDO_STATUS, 0,
583 				UVD_IPX_DLDO_STATUS__ONO2_PWR_STATUS_MASK);
584 
585 		data = 1 << UVD_IPX_DLDO_CONFIG__ONO3_PWR_CONFIG__SHIFT;
586 		WREG32_SOC15(VCN, inst, regUVD_IPX_DLDO_CONFIG, data);
587 		SOC15_WAIT_ON_RREG(VCN, inst, regUVD_IPX_DLDO_STATUS, 0,
588 				UVD_IPX_DLDO_STATUS__ONO3_PWR_STATUS_MASK);
589 
590 		data = 1 << UVD_IPX_DLDO_CONFIG__ONO4_PWR_CONFIG__SHIFT;
591 		WREG32_SOC15(VCN, inst, regUVD_IPX_DLDO_CONFIG, data);
592 		SOC15_WAIT_ON_RREG(VCN, inst, regUVD_IPX_DLDO_STATUS, 0,
593 				UVD_IPX_DLDO_STATUS__ONO4_PWR_STATUS_MASK);
594 
595 		data = 1 << UVD_IPX_DLDO_CONFIG__ONO5_PWR_CONFIG__SHIFT;
596 		WREG32_SOC15(VCN, inst, regUVD_IPX_DLDO_CONFIG, data);
597 		SOC15_WAIT_ON_RREG(VCN, inst, regUVD_IPX_DLDO_STATUS, 0,
598 				UVD_IPX_DLDO_STATUS__ONO5_PWR_STATUS_MASK);
599 	}
600 
601 	data = RREG32_SOC15(VCN, inst, regUVD_POWER_STATUS);
602 	data &= ~0x103;
603 	if (adev->pg_flags & AMD_PG_SUPPORT_VCN)
604 		data |= UVD_PGFSM_CONFIG__UVDM_UVDU_PWR_ON |
605 			UVD_POWER_STATUS__UVD_PG_EN_MASK;
606 
607 	WREG32_SOC15(VCN, inst, regUVD_POWER_STATUS, data);
608 	return;
609 }
610 
611 /**
612  * vcn_v5_0_0_enable_static_power_gating - enable VCN static power gating
613  *
614  * @vinst: VCN instance
615  *
616  * Enable static power gating for VCN block
617  */
vcn_v5_0_0_enable_static_power_gating(struct amdgpu_vcn_inst * vinst)618 static void vcn_v5_0_0_enable_static_power_gating(struct amdgpu_vcn_inst *vinst)
619 {
620 	struct amdgpu_device *adev = vinst->adev;
621 	int inst = vinst->inst;
622 	uint32_t data;
623 
624 	if (adev->pg_flags & AMD_PG_SUPPORT_VCN) {
625 		/* Before power off, this indicator has to be turned on */
626 		data = RREG32_SOC15(VCN, inst, regUVD_POWER_STATUS);
627 		data &= ~UVD_POWER_STATUS__UVD_POWER_STATUS_MASK;
628 		data |= UVD_POWER_STATUS__UVD_POWER_STATUS_TILES_OFF;
629 		WREG32_SOC15(VCN, inst, regUVD_POWER_STATUS, data);
630 
631 		data = 2 << UVD_IPX_DLDO_CONFIG__ONO5_PWR_CONFIG__SHIFT;
632 		WREG32_SOC15(VCN, inst, regUVD_IPX_DLDO_CONFIG, data);
633 		SOC15_WAIT_ON_RREG(VCN, inst, regUVD_IPX_DLDO_STATUS,
634 				1 << UVD_IPX_DLDO_STATUS__ONO5_PWR_STATUS__SHIFT,
635 				UVD_IPX_DLDO_STATUS__ONO5_PWR_STATUS_MASK);
636 
637 		data = 2 << UVD_IPX_DLDO_CONFIG__ONO4_PWR_CONFIG__SHIFT;
638 		WREG32_SOC15(VCN, inst, regUVD_IPX_DLDO_CONFIG, data);
639 		SOC15_WAIT_ON_RREG(VCN, inst, regUVD_IPX_DLDO_STATUS,
640 				1 << UVD_IPX_DLDO_STATUS__ONO4_PWR_STATUS__SHIFT,
641 				UVD_IPX_DLDO_STATUS__ONO4_PWR_STATUS_MASK);
642 
643 		data = 2 << UVD_IPX_DLDO_CONFIG__ONO3_PWR_CONFIG__SHIFT;
644 		WREG32_SOC15(VCN, inst, regUVD_IPX_DLDO_CONFIG, data);
645 		SOC15_WAIT_ON_RREG(VCN, inst, regUVD_IPX_DLDO_STATUS,
646 				1 << UVD_IPX_DLDO_STATUS__ONO3_PWR_STATUS__SHIFT,
647 				UVD_IPX_DLDO_STATUS__ONO3_PWR_STATUS_MASK);
648 
649 		data = 2 << UVD_IPX_DLDO_CONFIG__ONO2_PWR_CONFIG__SHIFT;
650 		WREG32_SOC15(VCN, inst, regUVD_IPX_DLDO_CONFIG, data);
651 		SOC15_WAIT_ON_RREG(VCN, inst, regUVD_IPX_DLDO_STATUS,
652 				1 << UVD_IPX_DLDO_STATUS__ONO2_PWR_STATUS__SHIFT,
653 				UVD_IPX_DLDO_STATUS__ONO2_PWR_STATUS_MASK);
654 	}
655 	return;
656 }
657 
658 /**
659  * vcn_v5_0_0_disable_clock_gating - disable VCN clock gating
660  *
661  * @vinst: VCN instance
662  *
663  * Disable clock gating for VCN block
664  */
vcn_v5_0_0_disable_clock_gating(struct amdgpu_vcn_inst * vinst)665 static void vcn_v5_0_0_disable_clock_gating(struct amdgpu_vcn_inst *vinst)
666 {
667 	return;
668 }
669 
670 #if 0
671 /**
672  * vcn_v5_0_0_disable_clock_gating_dpg_mode - disable VCN clock gating dpg mode
673  *
674  * @vinst: VCN instance
675  * @sram_sel: sram select
676  * @indirect: indirectly write sram
677  *
678  * Disable clock gating for VCN block with dpg mode
679  */
680 static void vcn_v5_0_0_disable_clock_gating_dpg_mode(struct amdgpu_vcn_inst *vinst,
681 						     uint8_t sram_sel,
682 						     uint8_t indirect)
683 {
684 	return;
685 }
686 #endif
687 
688 /**
689  * vcn_v5_0_0_enable_clock_gating - enable VCN clock gating
690  *
691  * @vinst: VCN instance
692  *
693  * Enable clock gating for VCN block
694  */
vcn_v5_0_0_enable_clock_gating(struct amdgpu_vcn_inst * vinst)695 static void vcn_v5_0_0_enable_clock_gating(struct amdgpu_vcn_inst *vinst)
696 {
697 	return;
698 }
699 
700 /**
701  * vcn_v5_0_0_start_dpg_mode - VCN start with dpg mode
702  *
703  * @vinst: VCN instance
704  * @indirect: indirectly write sram
705  *
706  * Start VCN block with dpg mode
707  */
vcn_v5_0_0_start_dpg_mode(struct amdgpu_vcn_inst * vinst,bool indirect)708 static int vcn_v5_0_0_start_dpg_mode(struct amdgpu_vcn_inst *vinst,
709 				     bool indirect)
710 {
711 	struct amdgpu_device *adev = vinst->adev;
712 	int inst_idx = vinst->inst;
713 	volatile struct amdgpu_vcn5_fw_shared *fw_shared = adev->vcn.inst[inst_idx].fw_shared.cpu_addr;
714 	struct amdgpu_ring *ring;
715 	uint32_t tmp;
716 
717 	/* disable register anti-hang mechanism */
718 	WREG32_P(SOC15_REG_OFFSET(VCN, inst_idx, regUVD_POWER_STATUS), 1,
719 		~UVD_POWER_STATUS__UVD_POWER_STATUS_MASK);
720 
721 	/* enable dynamic power gating mode */
722 	tmp = RREG32_SOC15(VCN, inst_idx, regUVD_POWER_STATUS);
723 	tmp |= UVD_POWER_STATUS__UVD_PG_MODE_MASK;
724 	tmp |= UVD_POWER_STATUS__UVD_PG_EN_MASK;
725 	WREG32_SOC15(VCN, inst_idx, regUVD_POWER_STATUS, tmp);
726 
727 	if (indirect)
728 		adev->vcn.inst[inst_idx].dpg_sram_curr_addr = (uint32_t *)adev->vcn.inst[inst_idx].dpg_sram_cpu_addr;
729 
730 	/* enable VCPU clock */
731 	tmp = (0xFF << UVD_VCPU_CNTL__PRB_TIMEOUT_VAL__SHIFT);
732 	tmp |= UVD_VCPU_CNTL__CLK_EN_MASK | UVD_VCPU_CNTL__BLK_RST_MASK;
733 	WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET(
734 		VCN, inst_idx, regUVD_VCPU_CNTL), tmp, 0, indirect);
735 
736 	/* disable master interrupt */
737 	WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET(
738 		VCN, inst_idx, regUVD_MASTINT_EN), 0, 0, indirect);
739 
740 	/* setup regUVD_LMI_CTRL */
741 	tmp = (UVD_LMI_CTRL__WRITE_CLEAN_TIMER_EN_MASK |
742 		UVD_LMI_CTRL__REQ_MODE_MASK |
743 		UVD_LMI_CTRL__CRC_RESET_MASK |
744 		UVD_LMI_CTRL__MASK_MC_URGENT_MASK |
745 		UVD_LMI_CTRL__DATA_COHERENCY_EN_MASK |
746 		UVD_LMI_CTRL__VCPU_DATA_COHERENCY_EN_MASK |
747 		(8 << UVD_LMI_CTRL__WRITE_CLEAN_TIMER__SHIFT) |
748 		0x00100000L);
749 	WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET(
750 		VCN, inst_idx, regUVD_LMI_CTRL), tmp, 0, indirect);
751 
752 	vcn_v5_0_0_mc_resume_dpg_mode(vinst, indirect);
753 
754 	tmp = (0xFF << UVD_VCPU_CNTL__PRB_TIMEOUT_VAL__SHIFT);
755 	tmp |= UVD_VCPU_CNTL__CLK_EN_MASK;
756 	WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET(
757 		VCN, inst_idx, regUVD_VCPU_CNTL), tmp, 0, indirect);
758 
759 	/* enable LMI MC and UMC channels */
760 	tmp = 0x1f << UVD_LMI_CTRL2__RE_OFLD_MIF_WR_REQ_NUM__SHIFT;
761 	WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET(
762 		VCN, inst_idx, regUVD_LMI_CTRL2), tmp, 0, indirect);
763 
764 	/* enable master interrupt */
765 	WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET(
766 		VCN, inst_idx, regUVD_MASTINT_EN),
767 		UVD_MASTINT_EN__VCPU_EN_MASK, 0, indirect);
768 
769 	if (indirect)
770 		amdgpu_vcn_psp_update_sram(adev, inst_idx, 0);
771 
772 	ring = &adev->vcn.inst[inst_idx].ring_enc[0];
773 
774 	WREG32_SOC15(VCN, inst_idx, regUVD_RB_BASE_LO, ring->gpu_addr);
775 	WREG32_SOC15(VCN, inst_idx, regUVD_RB_BASE_HI, upper_32_bits(ring->gpu_addr));
776 	WREG32_SOC15(VCN, inst_idx, regUVD_RB_SIZE, ring->ring_size / 4);
777 
778 	tmp = RREG32_SOC15(VCN, inst_idx, regVCN_RB_ENABLE);
779 	tmp &= ~(VCN_RB_ENABLE__RB1_EN_MASK);
780 	WREG32_SOC15(VCN, inst_idx, regVCN_RB_ENABLE, tmp);
781 	fw_shared->sq.queue_mode |= FW_QUEUE_RING_RESET;
782 	WREG32_SOC15(VCN, inst_idx, regUVD_RB_RPTR, 0);
783 	WREG32_SOC15(VCN, inst_idx, regUVD_RB_WPTR, 0);
784 
785 	tmp = RREG32_SOC15(VCN, inst_idx, regUVD_RB_RPTR);
786 	WREG32_SOC15(VCN, inst_idx, regUVD_RB_WPTR, tmp);
787 	ring->wptr = RREG32_SOC15(VCN, inst_idx, regUVD_RB_WPTR);
788 
789 	tmp = RREG32_SOC15(VCN, inst_idx, regVCN_RB_ENABLE);
790 	tmp |= VCN_RB_ENABLE__RB1_EN_MASK;
791 	WREG32_SOC15(VCN, inst_idx, regVCN_RB_ENABLE, tmp);
792 	fw_shared->sq.queue_mode &= ~(FW_QUEUE_RING_RESET | FW_QUEUE_DPG_HOLD_OFF);
793 
794 	WREG32_SOC15(VCN, inst_idx, regVCN_RB1_DB_CTRL,
795 		ring->doorbell_index << VCN_RB1_DB_CTRL__OFFSET__SHIFT |
796 		VCN_RB1_DB_CTRL__EN_MASK);
797 
798 	/* Keeping one read-back to ensure all register writes are done,
799 	 * otherwise it may introduce race conditions.
800 	 */
801 	RREG32_SOC15(VCN, inst_idx, regUVD_STATUS);
802 
803 	return 0;
804 }
805 
806 /**
807  * vcn_v5_0_0_start - VCN start
808  *
809  * @vinst: VCN instance
810  *
811  * Start VCN block
812  */
vcn_v5_0_0_start(struct amdgpu_vcn_inst * vinst)813 static int vcn_v5_0_0_start(struct amdgpu_vcn_inst *vinst)
814 {
815 	struct amdgpu_device *adev = vinst->adev;
816 	int i = vinst->inst;
817 	volatile struct amdgpu_vcn5_fw_shared *fw_shared;
818 	struct amdgpu_ring *ring;
819 	uint32_t tmp;
820 	int j, k, r;
821 
822 	if (adev->vcn.harvest_config & (1 << i))
823 		return 0;
824 
825 	if (adev->pm.dpm_enabled)
826 		amdgpu_dpm_enable_vcn(adev, true, i);
827 
828 	fw_shared = adev->vcn.inst[i].fw_shared.cpu_addr;
829 
830 	if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG)
831 		return vcn_v5_0_0_start_dpg_mode(vinst, adev->vcn.inst[i].indirect_sram);
832 
833 	/* disable VCN power gating */
834 	vcn_v5_0_0_disable_static_power_gating(vinst);
835 
836 	/* set VCN status busy */
837 	tmp = RREG32_SOC15(VCN, i, regUVD_STATUS) | UVD_STATUS__UVD_BUSY;
838 	WREG32_SOC15(VCN, i, regUVD_STATUS, tmp);
839 
840 	/* enable VCPU clock */
841 	WREG32_P(SOC15_REG_OFFSET(VCN, i, regUVD_VCPU_CNTL),
842 		 UVD_VCPU_CNTL__CLK_EN_MASK, ~UVD_VCPU_CNTL__CLK_EN_MASK);
843 
844 	/* disable master interrupt */
845 	WREG32_P(SOC15_REG_OFFSET(VCN, i, regUVD_MASTINT_EN), 0,
846 		 ~UVD_MASTINT_EN__VCPU_EN_MASK);
847 
848 	/* enable LMI MC and UMC channels */
849 	WREG32_P(SOC15_REG_OFFSET(VCN, i, regUVD_LMI_CTRL2), 0,
850 		 ~UVD_LMI_CTRL2__STALL_ARB_UMC_MASK);
851 
852 	tmp = RREG32_SOC15(VCN, i, regUVD_SOFT_RESET);
853 	tmp &= ~UVD_SOFT_RESET__LMI_SOFT_RESET_MASK;
854 	tmp &= ~UVD_SOFT_RESET__LMI_UMC_SOFT_RESET_MASK;
855 	WREG32_SOC15(VCN, i, regUVD_SOFT_RESET, tmp);
856 
857 	/* setup regUVD_LMI_CTRL */
858 	tmp = RREG32_SOC15(VCN, i, regUVD_LMI_CTRL);
859 	WREG32_SOC15(VCN, i, regUVD_LMI_CTRL, tmp |
860 		     UVD_LMI_CTRL__WRITE_CLEAN_TIMER_EN_MASK |
861 		     UVD_LMI_CTRL__MASK_MC_URGENT_MASK |
862 		     UVD_LMI_CTRL__DATA_COHERENCY_EN_MASK |
863 		     UVD_LMI_CTRL__VCPU_DATA_COHERENCY_EN_MASK);
864 
865 	vcn_v5_0_0_mc_resume(vinst);
866 
867 	/* VCN global tiling registers */
868 	WREG32_SOC15(VCN, i, regUVD_GFX10_ADDR_CONFIG,
869 		     adev->gfx.config.gb_addr_config);
870 
871 	/* unblock VCPU register access */
872 	WREG32_P(SOC15_REG_OFFSET(VCN, i, regUVD_RB_ARB_CTRL), 0,
873 		 ~UVD_RB_ARB_CTRL__VCPU_DIS_MASK);
874 
875 	/* release VCPU reset to boot */
876 	WREG32_P(SOC15_REG_OFFSET(VCN, i, regUVD_VCPU_CNTL), 0,
877 		 ~UVD_VCPU_CNTL__BLK_RST_MASK);
878 
879 	for (j = 0; j < 10; ++j) {
880 		uint32_t status;
881 
882 		for (k = 0; k < 100; ++k) {
883 			status = RREG32_SOC15(VCN, i, regUVD_STATUS);
884 			if (status & 2)
885 				break;
886 			mdelay(10);
887 			if (amdgpu_emu_mode == 1)
888 				msleep(1);
889 		}
890 
891 		if (amdgpu_emu_mode == 1) {
892 			r = -1;
893 			if (status & 2) {
894 				r = 0;
895 				break;
896 			}
897 		} else {
898 			r = 0;
899 			if (status & 2)
900 				break;
901 
902 			dev_err(adev->dev,
903 				"VCN[%d] is not responding, trying to reset the VCPU!!!\n", i);
904 			WREG32_P(SOC15_REG_OFFSET(VCN, i, regUVD_VCPU_CNTL),
905 				 UVD_VCPU_CNTL__BLK_RST_MASK,
906 				 ~UVD_VCPU_CNTL__BLK_RST_MASK);
907 			mdelay(10);
908 			WREG32_P(SOC15_REG_OFFSET(VCN, i, regUVD_VCPU_CNTL), 0,
909 				 ~UVD_VCPU_CNTL__BLK_RST_MASK);
910 
911 			mdelay(10);
912 			r = -1;
913 		}
914 	}
915 
916 	if (r) {
917 		dev_err(adev->dev, "VCN[%d] is not responding, giving up!!!\n", i);
918 		return r;
919 	}
920 
921 	/* enable master interrupt */
922 	WREG32_P(SOC15_REG_OFFSET(VCN, i, regUVD_MASTINT_EN),
923 		 UVD_MASTINT_EN__VCPU_EN_MASK,
924 		 ~UVD_MASTINT_EN__VCPU_EN_MASK);
925 
926 	/* clear the busy bit of VCN_STATUS */
927 	WREG32_P(SOC15_REG_OFFSET(VCN, i, regUVD_STATUS), 0,
928 		 ~(2 << UVD_STATUS__VCPU_REPORT__SHIFT));
929 
930 	ring = &adev->vcn.inst[i].ring_enc[0];
931 	WREG32_SOC15(VCN, i, regVCN_RB1_DB_CTRL,
932 		     ring->doorbell_index << VCN_RB1_DB_CTRL__OFFSET__SHIFT |
933 		     VCN_RB1_DB_CTRL__EN_MASK);
934 
935 	WREG32_SOC15(VCN, i, regUVD_RB_BASE_LO, ring->gpu_addr);
936 	WREG32_SOC15(VCN, i, regUVD_RB_BASE_HI, upper_32_bits(ring->gpu_addr));
937 	WREG32_SOC15(VCN, i, regUVD_RB_SIZE, ring->ring_size / 4);
938 
939 	tmp = RREG32_SOC15(VCN, i, regVCN_RB_ENABLE);
940 	tmp &= ~(VCN_RB_ENABLE__RB1_EN_MASK);
941 	WREG32_SOC15(VCN, i, regVCN_RB_ENABLE, tmp);
942 	fw_shared->sq.queue_mode |= FW_QUEUE_RING_RESET;
943 	WREG32_SOC15(VCN, i, regUVD_RB_RPTR, 0);
944 	WREG32_SOC15(VCN, i, regUVD_RB_WPTR, 0);
945 
946 	tmp = RREG32_SOC15(VCN, i, regUVD_RB_RPTR);
947 	WREG32_SOC15(VCN, i, regUVD_RB_WPTR, tmp);
948 	ring->wptr = RREG32_SOC15(VCN, i, regUVD_RB_WPTR);
949 
950 	tmp = RREG32_SOC15(VCN, i, regVCN_RB_ENABLE);
951 	tmp |= VCN_RB_ENABLE__RB1_EN_MASK;
952 	WREG32_SOC15(VCN, i, regVCN_RB_ENABLE, tmp);
953 	fw_shared->sq.queue_mode &= ~(FW_QUEUE_RING_RESET | FW_QUEUE_DPG_HOLD_OFF);
954 
955 	/* Keeping one read-back to ensure all register writes are done,
956 	 * otherwise it may introduce race conditions.
957 	 */
958 	RREG32_SOC15(VCN, i, regUVD_STATUS);
959 
960 	return 0;
961 }
962 
963 /**
964  * vcn_v5_0_0_stop_dpg_mode - VCN stop with dpg mode
965  *
966  * @vinst: VCN instance
967  *
968  * Stop VCN block with dpg mode
969  */
vcn_v5_0_0_stop_dpg_mode(struct amdgpu_vcn_inst * vinst)970 static void vcn_v5_0_0_stop_dpg_mode(struct amdgpu_vcn_inst *vinst)
971 {
972 	struct amdgpu_device *adev = vinst->adev;
973 	int inst_idx = vinst->inst;
974 	struct dpg_pause_state state = {.fw_based = VCN_DPG_STATE__UNPAUSE};
975 	uint32_t tmp;
976 
977 	vcn_v5_0_0_pause_dpg_mode(vinst, &state);
978 
979 	/* Wait for power status to be 1 */
980 	SOC15_WAIT_ON_RREG(VCN, inst_idx, regUVD_POWER_STATUS, 1,
981 		UVD_POWER_STATUS__UVD_POWER_STATUS_MASK);
982 
983 	/* wait for read ptr to be equal to write ptr */
984 	tmp = RREG32_SOC15(VCN, inst_idx, regUVD_RB_WPTR);
985 	SOC15_WAIT_ON_RREG(VCN, inst_idx, regUVD_RB_RPTR, tmp, 0xFFFFFFFF);
986 
987 	/* disable dynamic power gating mode */
988 	WREG32_P(SOC15_REG_OFFSET(VCN, inst_idx, regUVD_POWER_STATUS), 0,
989 		~UVD_POWER_STATUS__UVD_PG_MODE_MASK);
990 
991 	/* Keeping one read-back to ensure all register writes are done,
992 	 * otherwise it may introduce race conditions.
993 	 */
994 	RREG32_SOC15(VCN, inst_idx, regUVD_STATUS);
995 
996 	return;
997 }
998 
999 /**
1000  * vcn_v5_0_0_stop - VCN stop
1001  *
1002  * @vinst: VCN instance
1003  *
1004  * Stop VCN block
1005  */
vcn_v5_0_0_stop(struct amdgpu_vcn_inst * vinst)1006 static int vcn_v5_0_0_stop(struct amdgpu_vcn_inst *vinst)
1007 {
1008 	struct amdgpu_device *adev = vinst->adev;
1009 	int i = vinst->inst;
1010 	volatile struct amdgpu_vcn5_fw_shared *fw_shared;
1011 	uint32_t tmp;
1012 	int r = 0;
1013 
1014 	if (adev->vcn.harvest_config & (1 << i))
1015 		return 0;
1016 
1017 	fw_shared = adev->vcn.inst[i].fw_shared.cpu_addr;
1018 	fw_shared->sq.queue_mode |= FW_QUEUE_DPG_HOLD_OFF;
1019 
1020 	if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) {
1021 		vcn_v5_0_0_stop_dpg_mode(vinst);
1022 		r = 0;
1023 		goto done;
1024 	}
1025 
1026 	/* wait for vcn idle */
1027 	r = SOC15_WAIT_ON_RREG(VCN, i, regUVD_STATUS, UVD_STATUS__IDLE, 0x7);
1028 	if (r)
1029 		goto done;
1030 
1031 	tmp = UVD_LMI_STATUS__VCPU_LMI_WRITE_CLEAN_MASK |
1032 		UVD_LMI_STATUS__READ_CLEAN_MASK |
1033 		UVD_LMI_STATUS__WRITE_CLEAN_MASK |
1034 		UVD_LMI_STATUS__WRITE_CLEAN_RAW_MASK;
1035 	r = SOC15_WAIT_ON_RREG(VCN, i, regUVD_LMI_STATUS, tmp, tmp);
1036 	if (r)
1037 		goto done;
1038 
1039 	/* disable LMI UMC channel */
1040 	tmp = RREG32_SOC15(VCN, i, regUVD_LMI_CTRL2);
1041 	tmp |= UVD_LMI_CTRL2__STALL_ARB_UMC_MASK;
1042 	WREG32_SOC15(VCN, i, regUVD_LMI_CTRL2, tmp);
1043 	tmp = UVD_LMI_STATUS__UMC_READ_CLEAN_RAW_MASK |
1044 		UVD_LMI_STATUS__UMC_WRITE_CLEAN_RAW_MASK;
1045 	r = SOC15_WAIT_ON_RREG(VCN, i, regUVD_LMI_STATUS, tmp, tmp);
1046 	if (r)
1047 		goto done;
1048 
1049 	/* block VCPU register access */
1050 	WREG32_P(SOC15_REG_OFFSET(VCN, i, regUVD_RB_ARB_CTRL),
1051 		 UVD_RB_ARB_CTRL__VCPU_DIS_MASK,
1052 		 ~UVD_RB_ARB_CTRL__VCPU_DIS_MASK);
1053 
1054 	/* reset VCPU */
1055 	WREG32_P(SOC15_REG_OFFSET(VCN, i, regUVD_VCPU_CNTL),
1056 		 UVD_VCPU_CNTL__BLK_RST_MASK,
1057 		 ~UVD_VCPU_CNTL__BLK_RST_MASK);
1058 
1059 	/* disable VCPU clock */
1060 	WREG32_P(SOC15_REG_OFFSET(VCN, i, regUVD_VCPU_CNTL), 0,
1061 		 ~(UVD_VCPU_CNTL__CLK_EN_MASK));
1062 
1063 	/* apply soft reset */
1064 	tmp = RREG32_SOC15(VCN, i, regUVD_SOFT_RESET);
1065 	tmp |= UVD_SOFT_RESET__LMI_UMC_SOFT_RESET_MASK;
1066 	WREG32_SOC15(VCN, i, regUVD_SOFT_RESET, tmp);
1067 	tmp = RREG32_SOC15(VCN, i, regUVD_SOFT_RESET);
1068 	tmp |= UVD_SOFT_RESET__LMI_SOFT_RESET_MASK;
1069 	WREG32_SOC15(VCN, i, regUVD_SOFT_RESET, tmp);
1070 
1071 	/* clear status */
1072 	WREG32_SOC15(VCN, i, regUVD_STATUS, 0);
1073 
1074 	/* enable VCN power gating */
1075 	vcn_v5_0_0_enable_static_power_gating(vinst);
1076 
1077 	/* Keeping one read-back to ensure all register writes are done,
1078 	 * otherwise it may introduce race conditions.
1079 	 */
1080 	RREG32_SOC15(VCN, i, regUVD_STATUS);
1081 
1082 done:
1083 	if (adev->pm.dpm_enabled)
1084 		amdgpu_dpm_enable_vcn(adev, false, i);
1085 
1086 	return r;
1087 }
1088 
1089 /**
1090  * vcn_v5_0_0_pause_dpg_mode - VCN pause with dpg mode
1091  *
1092  * @vinst: VCN instance
1093  * @new_state: pause state
1094  *
1095  * Pause dpg mode for VCN block
1096  */
vcn_v5_0_0_pause_dpg_mode(struct amdgpu_vcn_inst * vinst,struct dpg_pause_state * new_state)1097 static int vcn_v5_0_0_pause_dpg_mode(struct amdgpu_vcn_inst *vinst,
1098 				     struct dpg_pause_state *new_state)
1099 {
1100 	struct amdgpu_device *adev = vinst->adev;
1101 	int inst_idx = vinst->inst;
1102 	uint32_t reg_data = 0;
1103 	int ret_code;
1104 
1105 	/* pause/unpause if state is changed */
1106 	if (adev->vcn.inst[inst_idx].pause_state.fw_based != new_state->fw_based) {
1107 		DRM_DEV_DEBUG(adev->dev, "dpg pause state changed %d -> %d",
1108 			adev->vcn.inst[inst_idx].pause_state.fw_based,  new_state->fw_based);
1109 		reg_data = RREG32_SOC15(VCN, inst_idx, regUVD_DPG_PAUSE) &
1110 			(~UVD_DPG_PAUSE__NJ_PAUSE_DPG_ACK_MASK);
1111 
1112 		if (new_state->fw_based == VCN_DPG_STATE__PAUSE) {
1113 			ret_code = SOC15_WAIT_ON_RREG(VCN, inst_idx, regUVD_POWER_STATUS, 0x1,
1114 					UVD_POWER_STATUS__UVD_POWER_STATUS_MASK);
1115 
1116 			if (!ret_code) {
1117 				/* pause DPG */
1118 				reg_data |= UVD_DPG_PAUSE__NJ_PAUSE_DPG_REQ_MASK;
1119 				WREG32_SOC15(VCN, inst_idx, regUVD_DPG_PAUSE, reg_data);
1120 
1121 				/* wait for ACK */
1122 				SOC15_WAIT_ON_RREG(VCN, inst_idx, regUVD_DPG_PAUSE,
1123 					UVD_DPG_PAUSE__NJ_PAUSE_DPG_ACK_MASK,
1124 					UVD_DPG_PAUSE__NJ_PAUSE_DPG_ACK_MASK);
1125 			}
1126 		} else {
1127 			/* unpause dpg, no need to wait */
1128 			reg_data &= ~UVD_DPG_PAUSE__NJ_PAUSE_DPG_REQ_MASK;
1129 			WREG32_SOC15(VCN, inst_idx, regUVD_DPG_PAUSE, reg_data);
1130 		}
1131 		adev->vcn.inst[inst_idx].pause_state.fw_based = new_state->fw_based;
1132 	}
1133 
1134 	return 0;
1135 }
1136 
1137 /**
1138  * vcn_v5_0_0_unified_ring_get_rptr - get unified read pointer
1139  *
1140  * @ring: amdgpu_ring pointer
1141  *
1142  * Returns the current hardware unified read pointer
1143  */
vcn_v5_0_0_unified_ring_get_rptr(struct amdgpu_ring * ring)1144 static uint64_t vcn_v5_0_0_unified_ring_get_rptr(struct amdgpu_ring *ring)
1145 {
1146 	struct amdgpu_device *adev = ring->adev;
1147 
1148 	if (ring != &adev->vcn.inst[ring->me].ring_enc[0])
1149 		DRM_ERROR("wrong ring id is identified in %s", __func__);
1150 
1151 	return RREG32_SOC15(VCN, ring->me, regUVD_RB_RPTR);
1152 }
1153 
1154 /**
1155  * vcn_v5_0_0_unified_ring_get_wptr - get unified write pointer
1156  *
1157  * @ring: amdgpu_ring pointer
1158  *
1159  * Returns the current hardware unified write pointer
1160  */
vcn_v5_0_0_unified_ring_get_wptr(struct amdgpu_ring * ring)1161 static uint64_t vcn_v5_0_0_unified_ring_get_wptr(struct amdgpu_ring *ring)
1162 {
1163 	struct amdgpu_device *adev = ring->adev;
1164 
1165 	if (ring != &adev->vcn.inst[ring->me].ring_enc[0])
1166 		DRM_ERROR("wrong ring id is identified in %s", __func__);
1167 
1168 	if (ring->use_doorbell)
1169 		return *ring->wptr_cpu_addr;
1170 	else
1171 		return RREG32_SOC15(VCN, ring->me, regUVD_RB_WPTR);
1172 }
1173 
1174 /**
1175  * vcn_v5_0_0_unified_ring_set_wptr - set enc write pointer
1176  *
1177  * @ring: amdgpu_ring pointer
1178  *
1179  * Commits the enc write pointer to the hardware
1180  */
vcn_v5_0_0_unified_ring_set_wptr(struct amdgpu_ring * ring)1181 static void vcn_v5_0_0_unified_ring_set_wptr(struct amdgpu_ring *ring)
1182 {
1183 	struct amdgpu_device *adev = ring->adev;
1184 
1185 	if (ring != &adev->vcn.inst[ring->me].ring_enc[0])
1186 		DRM_ERROR("wrong ring id is identified in %s", __func__);
1187 
1188 	if (ring->use_doorbell) {
1189 		*ring->wptr_cpu_addr = lower_32_bits(ring->wptr);
1190 		WDOORBELL32(ring->doorbell_index, lower_32_bits(ring->wptr));
1191 	} else {
1192 		WREG32_SOC15(VCN, ring->me, regUVD_RB_WPTR, lower_32_bits(ring->wptr));
1193 	}
1194 }
1195 
vcn_v5_0_0_ring_reset(struct amdgpu_ring * ring,unsigned int vmid,struct amdgpu_fence * timedout_fence)1196 static int vcn_v5_0_0_ring_reset(struct amdgpu_ring *ring,
1197 				 unsigned int vmid,
1198 				 struct amdgpu_fence *timedout_fence)
1199 {
1200 	struct amdgpu_device *adev = ring->adev;
1201 	struct amdgpu_vcn_inst *vinst = &adev->vcn.inst[ring->me];
1202 	int r;
1203 
1204 	amdgpu_ring_reset_helper_begin(ring, timedout_fence);
1205 	r = vcn_v5_0_0_stop(vinst);
1206 	if (r)
1207 		return r;
1208 	r = vcn_v5_0_0_start(vinst);
1209 	if (r)
1210 		return r;
1211 	return amdgpu_ring_reset_helper_end(ring, timedout_fence);
1212 }
1213 
1214 static const struct amdgpu_ring_funcs vcn_v5_0_0_unified_ring_vm_funcs = {
1215 	.type = AMDGPU_RING_TYPE_VCN_ENC,
1216 	.align_mask = 0x3f,
1217 	.nop = VCN_ENC_CMD_NO_OP,
1218 	.get_rptr = vcn_v5_0_0_unified_ring_get_rptr,
1219 	.get_wptr = vcn_v5_0_0_unified_ring_get_wptr,
1220 	.set_wptr = vcn_v5_0_0_unified_ring_set_wptr,
1221 	.emit_frame_size =
1222 		SOC15_FLUSH_GPU_TLB_NUM_WREG * 3 +
1223 		SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 4 +
1224 		4 + /* vcn_v2_0_enc_ring_emit_vm_flush */
1225 		5 + 5 + /* vcn_v2_0_enc_ring_emit_fence x2 vm fence */
1226 		1, /* vcn_v2_0_enc_ring_insert_end */
1227 	.emit_ib_size = 5, /* vcn_v2_0_enc_ring_emit_ib */
1228 	.emit_ib = vcn_v2_0_enc_ring_emit_ib,
1229 	.emit_fence = vcn_v2_0_enc_ring_emit_fence,
1230 	.emit_vm_flush = vcn_v2_0_enc_ring_emit_vm_flush,
1231 	.test_ring = amdgpu_vcn_enc_ring_test_ring,
1232 	.test_ib = amdgpu_vcn_unified_ring_test_ib,
1233 	.insert_nop = amdgpu_ring_insert_nop,
1234 	.insert_end = vcn_v2_0_enc_ring_insert_end,
1235 	.pad_ib = amdgpu_ring_generic_pad_ib,
1236 	.begin_use = amdgpu_vcn_ring_begin_use,
1237 	.end_use = amdgpu_vcn_ring_end_use,
1238 	.emit_wreg = vcn_v2_0_enc_ring_emit_wreg,
1239 	.emit_reg_wait = vcn_v2_0_enc_ring_emit_reg_wait,
1240 	.emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper,
1241 	.reset = vcn_v5_0_0_ring_reset,
1242 };
1243 
1244 /**
1245  * vcn_v5_0_0_set_unified_ring_funcs - set unified ring functions
1246  *
1247  * @adev: amdgpu_device pointer
1248  *
1249  * Set unified ring functions
1250  */
vcn_v5_0_0_set_unified_ring_funcs(struct amdgpu_device * adev)1251 static void vcn_v5_0_0_set_unified_ring_funcs(struct amdgpu_device *adev)
1252 {
1253 	int i;
1254 
1255 	for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
1256 		if (adev->vcn.harvest_config & (1 << i))
1257 			continue;
1258 
1259 		adev->vcn.inst[i].ring_enc[0].funcs = &vcn_v5_0_0_unified_ring_vm_funcs;
1260 		adev->vcn.inst[i].ring_enc[0].me = i;
1261 	}
1262 }
1263 
1264 /**
1265  * vcn_v5_0_0_is_idle - check VCN block is idle
1266  *
1267  * @ip_block: Pointer to the amdgpu_ip_block structure
1268  *
1269  * Check whether VCN block is idle
1270  */
vcn_v5_0_0_is_idle(struct amdgpu_ip_block * ip_block)1271 static bool vcn_v5_0_0_is_idle(struct amdgpu_ip_block *ip_block)
1272 {
1273 	struct amdgpu_device *adev = ip_block->adev;
1274 	int i, ret = 1;
1275 
1276 	for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
1277 		if (adev->vcn.harvest_config & (1 << i))
1278 			continue;
1279 
1280 		ret &= (RREG32_SOC15(VCN, i, regUVD_STATUS) == UVD_STATUS__IDLE);
1281 	}
1282 
1283 	return ret;
1284 }
1285 
1286 /**
1287  * vcn_v5_0_0_wait_for_idle - wait for VCN block idle
1288  *
1289  * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
1290  *
1291  * Wait for VCN block idle
1292  */
vcn_v5_0_0_wait_for_idle(struct amdgpu_ip_block * ip_block)1293 static int vcn_v5_0_0_wait_for_idle(struct amdgpu_ip_block *ip_block)
1294 {
1295 	struct amdgpu_device *adev = ip_block->adev;
1296 	int i, ret = 0;
1297 
1298 	for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
1299 		if (adev->vcn.harvest_config & (1 << i))
1300 			continue;
1301 
1302 		ret = SOC15_WAIT_ON_RREG(VCN, i, regUVD_STATUS, UVD_STATUS__IDLE,
1303 			UVD_STATUS__IDLE);
1304 		if (ret)
1305 			return ret;
1306 	}
1307 
1308 	return ret;
1309 }
1310 
1311 /**
1312  * vcn_v5_0_0_set_clockgating_state - set VCN block clockgating state
1313  *
1314  * @ip_block: amdgpu_ip_block pointer
1315  * @state: clock gating state
1316  *
1317  * Set VCN block clockgating state
1318  */
vcn_v5_0_0_set_clockgating_state(struct amdgpu_ip_block * ip_block,enum amd_clockgating_state state)1319 static int vcn_v5_0_0_set_clockgating_state(struct amdgpu_ip_block *ip_block,
1320 					  enum amd_clockgating_state state)
1321 {
1322 	struct amdgpu_device *adev = ip_block->adev;
1323 	bool enable = (state == AMD_CG_STATE_GATE) ? true : false;
1324 	int i;
1325 
1326 	for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
1327 		struct amdgpu_vcn_inst *vinst = &adev->vcn.inst[i];
1328 
1329 		if (adev->vcn.harvest_config & (1 << i))
1330 			continue;
1331 
1332 		if (enable) {
1333 			if (RREG32_SOC15(VCN, i, regUVD_STATUS) != UVD_STATUS__IDLE)
1334 				return -EBUSY;
1335 			vcn_v5_0_0_enable_clock_gating(vinst);
1336 		} else {
1337 			vcn_v5_0_0_disable_clock_gating(vinst);
1338 		}
1339 	}
1340 
1341 	return 0;
1342 }
1343 
vcn_v5_0_0_set_pg_state(struct amdgpu_vcn_inst * vinst,enum amd_powergating_state state)1344 static int vcn_v5_0_0_set_pg_state(struct amdgpu_vcn_inst *vinst,
1345 				   enum amd_powergating_state state)
1346 {
1347 	int ret = 0;
1348 
1349 	if (state == vinst->cur_state)
1350 		return 0;
1351 
1352 	if (state == AMD_PG_STATE_GATE)
1353 		ret = vcn_v5_0_0_stop(vinst);
1354 	else
1355 		ret = vcn_v5_0_0_start(vinst);
1356 
1357 	if (!ret)
1358 		vinst->cur_state = state;
1359 
1360 	return ret;
1361 }
1362 
1363 /**
1364  * vcn_v5_0_0_process_interrupt - process VCN block interrupt
1365  *
1366  * @adev: amdgpu_device pointer
1367  * @source: interrupt sources
1368  * @entry: interrupt entry from clients and sources
1369  *
1370  * Process VCN block interrupt
1371  */
vcn_v5_0_0_process_interrupt(struct amdgpu_device * adev,struct amdgpu_irq_src * source,struct amdgpu_iv_entry * entry)1372 static int vcn_v5_0_0_process_interrupt(struct amdgpu_device *adev, struct amdgpu_irq_src *source,
1373 	struct amdgpu_iv_entry *entry)
1374 {
1375 	uint32_t ip_instance;
1376 
1377 	switch (entry->client_id) {
1378 	case SOC15_IH_CLIENTID_VCN:
1379 		ip_instance = 0;
1380 		break;
1381 	case SOC15_IH_CLIENTID_VCN1:
1382 		ip_instance = 1;
1383 		break;
1384 	default:
1385 		DRM_ERROR("Unhandled client id: %d\n", entry->client_id);
1386 		return 0;
1387 	}
1388 
1389 	DRM_DEBUG("IH: VCN TRAP\n");
1390 
1391 	switch (entry->src_id) {
1392 	case VCN_5_0__SRCID__UVD_ENC_GENERAL_PURPOSE:
1393 		amdgpu_fence_process(&adev->vcn.inst[ip_instance].ring_enc[0]);
1394 		break;
1395 	case VCN_5_0__SRCID_UVD_POISON:
1396 		amdgpu_vcn_process_poison_irq(adev, source, entry);
1397 		break;
1398 	default:
1399 		DRM_ERROR("Unhandled interrupt: %d %d\n",
1400 			  entry->src_id, entry->src_data[0]);
1401 		break;
1402 	}
1403 
1404 	return 0;
1405 }
1406 
1407 static const struct amdgpu_irq_src_funcs vcn_v5_0_0_irq_funcs = {
1408 	.process = vcn_v5_0_0_process_interrupt,
1409 };
1410 
1411 /**
1412  * vcn_v5_0_0_set_irq_funcs - set VCN block interrupt irq functions
1413  *
1414  * @adev: amdgpu_device pointer
1415  *
1416  * Set VCN block interrupt irq functions
1417  */
vcn_v5_0_0_set_irq_funcs(struct amdgpu_device * adev)1418 static void vcn_v5_0_0_set_irq_funcs(struct amdgpu_device *adev)
1419 {
1420 	int i;
1421 
1422 	for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
1423 		if (adev->vcn.harvest_config & (1 << i))
1424 			continue;
1425 
1426 		adev->vcn.inst[i].irq.num_types = adev->vcn.inst[i].num_enc_rings + 1;
1427 		adev->vcn.inst[i].irq.funcs = &vcn_v5_0_0_irq_funcs;
1428 	}
1429 }
1430 
vcn_v5_0_0_print_ip_state(struct amdgpu_ip_block * ip_block,struct drm_printer * p)1431 void vcn_v5_0_0_print_ip_state(struct amdgpu_ip_block *ip_block,
1432 			       struct drm_printer *p)
1433 {
1434 	struct amdgpu_device *adev = ip_block->adev;
1435 	int i, j;
1436 	uint32_t reg_count = ARRAY_SIZE(vcn_reg_list_5_0);
1437 	uint32_t inst_off, is_powered;
1438 
1439 	if (!adev->vcn.ip_dump)
1440 		return;
1441 
1442 	drm_printf(p, "num_instances:%d\n", adev->vcn.num_vcn_inst);
1443 	for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
1444 		if (adev->vcn.harvest_config & (1 << i)) {
1445 			drm_printf(p, "\nHarvested Instance:VCN%d Skipping dump\n", i);
1446 			continue;
1447 		}
1448 
1449 		inst_off = i * reg_count;
1450 		is_powered = (adev->vcn.ip_dump[inst_off] &
1451 				UVD_POWER_STATUS__UVD_POWER_STATUS_MASK) != 1;
1452 
1453 		if (is_powered) {
1454 			drm_printf(p, "\nActive Instance:VCN%d\n", i);
1455 			for (j = 0; j < reg_count; j++)
1456 				drm_printf(p, "%-50s \t 0x%08x\n", vcn_reg_list_5_0[j].reg_name,
1457 					   adev->vcn.ip_dump[inst_off + j]);
1458 		} else {
1459 			drm_printf(p, "\nInactive Instance:VCN%d\n", i);
1460 		}
1461 	}
1462 }
1463 
vcn_v5_0_0_dump_ip_state(struct amdgpu_ip_block * ip_block)1464 void vcn_v5_0_0_dump_ip_state(struct amdgpu_ip_block *ip_block)
1465 {
1466 	struct amdgpu_device *adev = ip_block->adev;
1467 	int i, j;
1468 	bool is_powered;
1469 	uint32_t inst_off;
1470 	uint32_t reg_count = ARRAY_SIZE(vcn_reg_list_5_0);
1471 
1472 	if (!adev->vcn.ip_dump)
1473 		return;
1474 
1475 	for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
1476 		if (adev->vcn.harvest_config & (1 << i))
1477 			continue;
1478 
1479 		inst_off = i * reg_count;
1480 		/* mmUVD_POWER_STATUS is always readable and is first element of the array */
1481 		adev->vcn.ip_dump[inst_off] = RREG32_SOC15(VCN, i, regUVD_POWER_STATUS);
1482 		is_powered = (adev->vcn.ip_dump[inst_off] &
1483 				UVD_POWER_STATUS__UVD_POWER_STATUS_MASK) != 1;
1484 
1485 		if (is_powered)
1486 			for (j = 1; j < reg_count; j++)
1487 				adev->vcn.ip_dump[inst_off + j] =
1488 					RREG32(SOC15_REG_ENTRY_OFFSET_INST(vcn_reg_list_5_0[j], i));
1489 	}
1490 }
1491 
1492 static const struct amd_ip_funcs vcn_v5_0_0_ip_funcs = {
1493 	.name = "vcn_v5_0_0",
1494 	.early_init = vcn_v5_0_0_early_init,
1495 	.sw_init = vcn_v5_0_0_sw_init,
1496 	.sw_fini = vcn_v5_0_0_sw_fini,
1497 	.hw_init = vcn_v5_0_0_hw_init,
1498 	.hw_fini = vcn_v5_0_0_hw_fini,
1499 	.suspend = vcn_v5_0_0_suspend,
1500 	.resume = vcn_v5_0_0_resume,
1501 	.is_idle = vcn_v5_0_0_is_idle,
1502 	.wait_for_idle = vcn_v5_0_0_wait_for_idle,
1503 	.set_clockgating_state = vcn_v5_0_0_set_clockgating_state,
1504 	.set_powergating_state = vcn_set_powergating_state,
1505 	.dump_ip_state = vcn_v5_0_0_dump_ip_state,
1506 	.print_ip_state = vcn_v5_0_0_print_ip_state,
1507 };
1508 
1509 const struct amdgpu_ip_block_version vcn_v5_0_0_ip_block = {
1510 	.type = AMD_IP_BLOCK_TYPE_VCN,
1511 	.major = 5,
1512 	.minor = 0,
1513 	.rev = 0,
1514 	.funcs = &vcn_v5_0_0_ip_funcs,
1515 };
1516