1 /*
2 * Copyright 2022 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23
24 #include <linux/firmware.h>
25 #include <drm/drm_drv.h>
26
27 #include "amdgpu.h"
28 #include "amdgpu_vcn.h"
29 #include "amdgpu_pm.h"
30 #include "soc15.h"
31 #include "soc15d.h"
32 #include "soc15_hw_ip.h"
33 #include "vcn_v2_0.h"
34 #include "mmsch_v4_0_3.h"
35
36 #include "vcn/vcn_4_0_3_offset.h"
37 #include "vcn/vcn_4_0_3_sh_mask.h"
38 #include "ivsrcid/vcn/irqsrcs_vcn_4_0.h"
39
40 #define mmUVD_DPG_LMA_CTL regUVD_DPG_LMA_CTL
41 #define mmUVD_DPG_LMA_CTL_BASE_IDX regUVD_DPG_LMA_CTL_BASE_IDX
42 #define mmUVD_DPG_LMA_DATA regUVD_DPG_LMA_DATA
43 #define mmUVD_DPG_LMA_DATA_BASE_IDX regUVD_DPG_LMA_DATA_BASE_IDX
44
45 #define VCN_VID_SOC_ADDRESS_2_0 0x1fb00
46 #define VCN1_VID_SOC_ADDRESS_3_0 0x48300
47
48 static const struct amdgpu_hwip_reg_entry vcn_reg_list_4_0_3[] = {
49 SOC15_REG_ENTRY_STR(VCN, 0, regUVD_POWER_STATUS),
50 SOC15_REG_ENTRY_STR(VCN, 0, regUVD_STATUS),
51 SOC15_REG_ENTRY_STR(VCN, 0, regUVD_CONTEXT_ID),
52 SOC15_REG_ENTRY_STR(VCN, 0, regUVD_CONTEXT_ID2),
53 SOC15_REG_ENTRY_STR(VCN, 0, regUVD_GPCOM_VCPU_DATA0),
54 SOC15_REG_ENTRY_STR(VCN, 0, regUVD_GPCOM_VCPU_DATA1),
55 SOC15_REG_ENTRY_STR(VCN, 0, regUVD_GPCOM_VCPU_CMD),
56 SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_BASE_HI),
57 SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_BASE_LO),
58 SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_BASE_HI2),
59 SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_BASE_LO2),
60 SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_BASE_HI3),
61 SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_BASE_LO3),
62 SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_BASE_HI4),
63 SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_BASE_LO4),
64 SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_RPTR),
65 SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_WPTR),
66 SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_RPTR2),
67 SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_WPTR2),
68 SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_RPTR3),
69 SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_WPTR3),
70 SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_RPTR4),
71 SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_WPTR4),
72 SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_SIZE),
73 SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_SIZE2),
74 SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_SIZE3),
75 SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_SIZE4),
76 SOC15_REG_ENTRY_STR(VCN, 0, regUVD_PGFSM_CONFIG),
77 SOC15_REG_ENTRY_STR(VCN, 0, regUVD_PGFSM_STATUS),
78 SOC15_REG_ENTRY_STR(VCN, 0, regUVD_DPG_LMA_CTL),
79 SOC15_REG_ENTRY_STR(VCN, 0, regUVD_DPG_LMA_DATA),
80 SOC15_REG_ENTRY_STR(VCN, 0, regUVD_DPG_LMA_MASK),
81 SOC15_REG_ENTRY_STR(VCN, 0, regUVD_DPG_PAUSE)
82 };
83
84 #define NORMALIZE_VCN_REG_OFFSET(offset) \
85 (offset & 0x1FFFF)
86
87 static int vcn_v4_0_3_start_sriov(struct amdgpu_device *adev);
88 static void vcn_v4_0_3_set_unified_ring_funcs(struct amdgpu_device *adev);
89 static void vcn_v4_0_3_set_irq_funcs(struct amdgpu_device *adev);
90 static int vcn_v4_0_3_set_powergating_state(struct amdgpu_ip_block *ip_block,
91 enum amd_powergating_state state);
92 static int vcn_v4_0_3_pause_dpg_mode(struct amdgpu_device *adev,
93 int inst_idx, struct dpg_pause_state *new_state);
94 static void vcn_v4_0_3_unified_ring_set_wptr(struct amdgpu_ring *ring);
95 static void vcn_v4_0_3_set_ras_funcs(struct amdgpu_device *adev);
96 static void vcn_v4_0_3_enable_ras(struct amdgpu_device *adev,
97 int inst_idx, bool indirect);
98
vcn_v4_0_3_normalizn_reqd(struct amdgpu_device * adev)99 static inline bool vcn_v4_0_3_normalizn_reqd(struct amdgpu_device *adev)
100 {
101 return (amdgpu_sriov_vf(adev) ||
102 (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 4)));
103 }
104
105 /**
106 * vcn_v4_0_3_early_init - set function pointers
107 *
108 * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
109 *
110 * Set ring and irq function pointers
111 */
vcn_v4_0_3_early_init(struct amdgpu_ip_block * ip_block)112 static int vcn_v4_0_3_early_init(struct amdgpu_ip_block *ip_block)
113 {
114 struct amdgpu_device *adev = ip_block->adev;
115
116 /* re-use enc ring as unified ring */
117 adev->vcn.num_enc_rings = 1;
118
119 vcn_v4_0_3_set_unified_ring_funcs(adev);
120 vcn_v4_0_3_set_irq_funcs(adev);
121 vcn_v4_0_3_set_ras_funcs(adev);
122
123 return amdgpu_vcn_early_init(adev);
124 }
125
vcn_v4_0_3_fw_shared_init(struct amdgpu_device * adev,int inst_idx)126 static int vcn_v4_0_3_fw_shared_init(struct amdgpu_device *adev, int inst_idx)
127 {
128 struct amdgpu_vcn4_fw_shared *fw_shared;
129
130 fw_shared = adev->vcn.inst[inst_idx].fw_shared.cpu_addr;
131 fw_shared->present_flag_0 = cpu_to_le32(AMDGPU_FW_SHARED_FLAG_0_UNIFIED_QUEUE);
132 fw_shared->sq.is_enabled = 1;
133
134 if (amdgpu_vcnfw_log)
135 amdgpu_vcn_fwlog_init(&adev->vcn.inst[inst_idx]);
136
137 return 0;
138 }
139
140 /**
141 * vcn_v4_0_3_sw_init - sw init for VCN block
142 *
143 * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
144 *
145 * Load firmware and sw initialization
146 */
vcn_v4_0_3_sw_init(struct amdgpu_ip_block * ip_block)147 static int vcn_v4_0_3_sw_init(struct amdgpu_ip_block *ip_block)
148 {
149 struct amdgpu_device *adev = ip_block->adev;
150 struct amdgpu_ring *ring;
151 int i, r, vcn_inst;
152 uint32_t reg_count = ARRAY_SIZE(vcn_reg_list_4_0_3);
153 uint32_t *ptr;
154
155 r = amdgpu_vcn_sw_init(adev);
156 if (r)
157 return r;
158
159 amdgpu_vcn_setup_ucode(adev);
160
161 r = amdgpu_vcn_resume(adev);
162 if (r)
163 return r;
164
165 /* VCN DEC TRAP */
166 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VCN,
167 VCN_4_0__SRCID__UVD_ENC_GENERAL_PURPOSE, &adev->vcn.inst->irq);
168 if (r)
169 return r;
170
171 for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
172 vcn_inst = GET_INST(VCN, i);
173
174 ring = &adev->vcn.inst[i].ring_enc[0];
175 ring->use_doorbell = true;
176
177 if (!amdgpu_sriov_vf(adev))
178 ring->doorbell_index =
179 (adev->doorbell_index.vcn.vcn_ring0_1 << 1) +
180 9 * vcn_inst;
181 else
182 ring->doorbell_index =
183 (adev->doorbell_index.vcn.vcn_ring0_1 << 1) +
184 32 * vcn_inst;
185
186 ring->vm_hub = AMDGPU_MMHUB0(adev->vcn.inst[i].aid_id);
187 sprintf(ring->name, "vcn_unified_%d", adev->vcn.inst[i].aid_id);
188 r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.inst->irq, 0,
189 AMDGPU_RING_PRIO_DEFAULT,
190 &adev->vcn.inst[i].sched_score);
191 if (r)
192 return r;
193
194 vcn_v4_0_3_fw_shared_init(adev, i);
195 }
196
197 /* TODO: Add queue reset mask when FW fully supports it */
198 adev->vcn.supported_reset =
199 amdgpu_get_soft_full_reset_mask(&adev->vcn.inst[0].ring_enc[0]);
200
201 if (amdgpu_sriov_vf(adev)) {
202 r = amdgpu_virt_alloc_mm_table(adev);
203 if (r)
204 return r;
205 }
206
207 if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG)
208 adev->vcn.pause_dpg_mode = vcn_v4_0_3_pause_dpg_mode;
209
210 if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__VCN)) {
211 r = amdgpu_vcn_ras_sw_init(adev);
212 if (r) {
213 dev_err(adev->dev, "Failed to initialize vcn ras block!\n");
214 return r;
215 }
216 }
217
218 /* Allocate memory for VCN IP Dump buffer */
219 ptr = kcalloc(adev->vcn.num_vcn_inst * reg_count, sizeof(uint32_t), GFP_KERNEL);
220 if (!ptr) {
221 DRM_ERROR("Failed to allocate memory for VCN IP Dump\n");
222 adev->vcn.ip_dump = NULL;
223 } else {
224 adev->vcn.ip_dump = ptr;
225 }
226
227 r = amdgpu_vcn_sysfs_reset_mask_init(adev);
228 if (r)
229 return r;
230
231 return 0;
232 }
233
234 /**
235 * vcn_v4_0_3_sw_fini - sw fini for VCN block
236 *
237 * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
238 *
239 * VCN suspend and free up sw allocation
240 */
vcn_v4_0_3_sw_fini(struct amdgpu_ip_block * ip_block)241 static int vcn_v4_0_3_sw_fini(struct amdgpu_ip_block *ip_block)
242 {
243 struct amdgpu_device *adev = ip_block->adev;
244 int i, r, idx;
245
246 if (drm_dev_enter(&adev->ddev, &idx)) {
247 for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
248 volatile struct amdgpu_vcn4_fw_shared *fw_shared;
249
250 fw_shared = adev->vcn.inst[i].fw_shared.cpu_addr;
251 fw_shared->present_flag_0 = 0;
252 fw_shared->sq.is_enabled = cpu_to_le32(false);
253 }
254 drm_dev_exit(idx);
255 }
256
257 if (amdgpu_sriov_vf(adev))
258 amdgpu_virt_free_mm_table(adev);
259
260 r = amdgpu_vcn_suspend(adev);
261 if (r)
262 return r;
263
264 amdgpu_vcn_sysfs_reset_mask_fini(adev);
265 r = amdgpu_vcn_sw_fini(adev);
266
267 kfree(adev->vcn.ip_dump);
268
269 return r;
270 }
271
272 /**
273 * vcn_v4_0_3_hw_init - start and test VCN block
274 *
275 * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
276 *
277 * Initialize the hardware, boot up the VCPU and do some testing
278 */
vcn_v4_0_3_hw_init(struct amdgpu_ip_block * ip_block)279 static int vcn_v4_0_3_hw_init(struct amdgpu_ip_block *ip_block)
280 {
281 struct amdgpu_device *adev = ip_block->adev;
282 struct amdgpu_ring *ring;
283 int i, r, vcn_inst;
284
285 if (amdgpu_sriov_vf(adev)) {
286 r = vcn_v4_0_3_start_sriov(adev);
287 if (r)
288 return r;
289
290 for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
291 ring = &adev->vcn.inst[i].ring_enc[0];
292 ring->wptr = 0;
293 ring->wptr_old = 0;
294 vcn_v4_0_3_unified_ring_set_wptr(ring);
295 ring->sched.ready = true;
296 }
297 } else {
298 for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
299 struct amdgpu_vcn4_fw_shared *fw_shared;
300
301 vcn_inst = GET_INST(VCN, i);
302 ring = &adev->vcn.inst[i].ring_enc[0];
303
304 if (ring->use_doorbell) {
305 adev->nbio.funcs->vcn_doorbell_range(
306 adev, ring->use_doorbell,
307 (adev->doorbell_index.vcn.vcn_ring0_1 << 1) +
308 9 * vcn_inst,
309 adev->vcn.inst[i].aid_id);
310
311 WREG32_SOC15(
312 VCN, GET_INST(VCN, ring->me),
313 regVCN_RB1_DB_CTRL,
314 ring->doorbell_index
315 << VCN_RB1_DB_CTRL__OFFSET__SHIFT |
316 VCN_RB1_DB_CTRL__EN_MASK);
317
318 /* Read DB_CTRL to flush the write DB_CTRL command. */
319 RREG32_SOC15(
320 VCN, GET_INST(VCN, ring->me),
321 regVCN_RB1_DB_CTRL);
322 }
323
324 /* Re-init fw_shared when RAS fatal error occurred */
325 fw_shared = adev->vcn.inst[i].fw_shared.cpu_addr;
326 if (!fw_shared->sq.is_enabled)
327 vcn_v4_0_3_fw_shared_init(adev, i);
328
329 r = amdgpu_ring_test_helper(ring);
330 if (r)
331 return r;
332 }
333 }
334
335 return r;
336 }
337
338 /**
339 * vcn_v4_0_3_hw_fini - stop the hardware block
340 *
341 * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
342 *
343 * Stop the VCN block, mark ring as not ready any more
344 */
vcn_v4_0_3_hw_fini(struct amdgpu_ip_block * ip_block)345 static int vcn_v4_0_3_hw_fini(struct amdgpu_ip_block *ip_block)
346 {
347 struct amdgpu_device *adev = ip_block->adev;
348
349 cancel_delayed_work_sync(&adev->vcn.idle_work);
350
351 if (adev->vcn.cur_state != AMD_PG_STATE_GATE)
352 vcn_v4_0_3_set_powergating_state(ip_block, AMD_PG_STATE_GATE);
353
354 return 0;
355 }
356
357 /**
358 * vcn_v4_0_3_suspend - suspend VCN block
359 *
360 * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
361 *
362 * HW fini and suspend VCN block
363 */
vcn_v4_0_3_suspend(struct amdgpu_ip_block * ip_block)364 static int vcn_v4_0_3_suspend(struct amdgpu_ip_block *ip_block)
365 {
366 int r;
367
368 r = vcn_v4_0_3_hw_fini(ip_block);
369 if (r)
370 return r;
371
372 r = amdgpu_vcn_suspend(ip_block->adev);
373
374 return r;
375 }
376
377 /**
378 * vcn_v4_0_3_resume - resume VCN block
379 *
380 * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
381 *
382 * Resume firmware and hw init VCN block
383 */
vcn_v4_0_3_resume(struct amdgpu_ip_block * ip_block)384 static int vcn_v4_0_3_resume(struct amdgpu_ip_block *ip_block)
385 {
386 int r;
387
388 r = amdgpu_vcn_resume(ip_block->adev);
389 if (r)
390 return r;
391
392 r = vcn_v4_0_3_hw_init(ip_block);
393
394 return r;
395 }
396
397 /**
398 * vcn_v4_0_3_mc_resume - memory controller programming
399 *
400 * @adev: amdgpu_device pointer
401 * @inst_idx: instance number
402 *
403 * Let the VCN memory controller know it's offsets
404 */
vcn_v4_0_3_mc_resume(struct amdgpu_device * adev,int inst_idx)405 static void vcn_v4_0_3_mc_resume(struct amdgpu_device *adev, int inst_idx)
406 {
407 uint32_t offset, size, vcn_inst;
408 const struct common_firmware_header *hdr;
409
410 hdr = (const struct common_firmware_header *)adev->vcn.inst[inst_idx].fw->data;
411 size = AMDGPU_GPU_PAGE_ALIGN(le32_to_cpu(hdr->ucode_size_bytes) + 8);
412
413 vcn_inst = GET_INST(VCN, inst_idx);
414 /* cache window 0: fw */
415 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
416 WREG32_SOC15(
417 VCN, vcn_inst, regUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW,
418 (adev->firmware.ucode[AMDGPU_UCODE_ID_VCN + inst_idx]
419 .tmr_mc_addr_lo));
420 WREG32_SOC15(
421 VCN, vcn_inst, regUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH,
422 (adev->firmware.ucode[AMDGPU_UCODE_ID_VCN + inst_idx]
423 .tmr_mc_addr_hi));
424 WREG32_SOC15(VCN, vcn_inst, regUVD_VCPU_CACHE_OFFSET0, 0);
425 offset = 0;
426 } else {
427 WREG32_SOC15(VCN, vcn_inst, regUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW,
428 lower_32_bits(adev->vcn.inst[inst_idx].gpu_addr));
429 WREG32_SOC15(VCN, vcn_inst,
430 regUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH,
431 upper_32_bits(adev->vcn.inst[inst_idx].gpu_addr));
432 offset = size;
433 WREG32_SOC15(VCN, vcn_inst, regUVD_VCPU_CACHE_OFFSET0,
434 AMDGPU_UVD_FIRMWARE_OFFSET >> 3);
435 }
436 WREG32_SOC15(VCN, vcn_inst, regUVD_VCPU_CACHE_SIZE0, size);
437
438 /* cache window 1: stack */
439 WREG32_SOC15(VCN, vcn_inst, regUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW,
440 lower_32_bits(adev->vcn.inst[inst_idx].gpu_addr + offset));
441 WREG32_SOC15(VCN, vcn_inst, regUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH,
442 upper_32_bits(adev->vcn.inst[inst_idx].gpu_addr + offset));
443 WREG32_SOC15(VCN, vcn_inst, regUVD_VCPU_CACHE_OFFSET1, 0);
444 WREG32_SOC15(VCN, vcn_inst, regUVD_VCPU_CACHE_SIZE1,
445 AMDGPU_VCN_STACK_SIZE);
446
447 /* cache window 2: context */
448 WREG32_SOC15(VCN, vcn_inst, regUVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW,
449 lower_32_bits(adev->vcn.inst[inst_idx].gpu_addr + offset +
450 AMDGPU_VCN_STACK_SIZE));
451 WREG32_SOC15(VCN, vcn_inst, regUVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH,
452 upper_32_bits(adev->vcn.inst[inst_idx].gpu_addr + offset +
453 AMDGPU_VCN_STACK_SIZE));
454 WREG32_SOC15(VCN, vcn_inst, regUVD_VCPU_CACHE_OFFSET2, 0);
455 WREG32_SOC15(VCN, vcn_inst, regUVD_VCPU_CACHE_SIZE2,
456 AMDGPU_VCN_CONTEXT_SIZE);
457
458 /* non-cache window */
459 WREG32_SOC15(
460 VCN, vcn_inst, regUVD_LMI_VCPU_NC0_64BIT_BAR_LOW,
461 lower_32_bits(adev->vcn.inst[inst_idx].fw_shared.gpu_addr));
462 WREG32_SOC15(
463 VCN, vcn_inst, regUVD_LMI_VCPU_NC0_64BIT_BAR_HIGH,
464 upper_32_bits(adev->vcn.inst[inst_idx].fw_shared.gpu_addr));
465 WREG32_SOC15(VCN, vcn_inst, regUVD_VCPU_NONCACHE_OFFSET0, 0);
466 WREG32_SOC15(
467 VCN, vcn_inst, regUVD_VCPU_NONCACHE_SIZE0,
468 AMDGPU_GPU_PAGE_ALIGN(sizeof(struct amdgpu_vcn4_fw_shared)));
469 }
470
471 /**
472 * vcn_v4_0_3_mc_resume_dpg_mode - memory controller programming for dpg mode
473 *
474 * @adev: amdgpu_device pointer
475 * @inst_idx: instance number index
476 * @indirect: indirectly write sram
477 *
478 * Let the VCN memory controller know it's offsets with dpg mode
479 */
vcn_v4_0_3_mc_resume_dpg_mode(struct amdgpu_device * adev,int inst_idx,bool indirect)480 static void vcn_v4_0_3_mc_resume_dpg_mode(struct amdgpu_device *adev, int inst_idx, bool indirect)
481 {
482 uint32_t offset, size;
483 const struct common_firmware_header *hdr;
484
485 hdr = (const struct common_firmware_header *)adev->vcn.inst[inst_idx].fw->data;
486 size = AMDGPU_GPU_PAGE_ALIGN(le32_to_cpu(hdr->ucode_size_bytes) + 8);
487
488 /* cache window 0: fw */
489 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
490 if (!indirect) {
491 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
492 VCN, 0, regUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW),
493 (adev->firmware.ucode[AMDGPU_UCODE_ID_VCN +
494 inst_idx].tmr_mc_addr_lo), 0, indirect);
495 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
496 VCN, 0, regUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH),
497 (adev->firmware.ucode[AMDGPU_UCODE_ID_VCN +
498 inst_idx].tmr_mc_addr_hi), 0, indirect);
499 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
500 VCN, 0, regUVD_VCPU_CACHE_OFFSET0), 0, 0, indirect);
501 } else {
502 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
503 VCN, 0, regUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW), 0, 0, indirect);
504 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
505 VCN, 0, regUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH), 0, 0, indirect);
506 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
507 VCN, 0, regUVD_VCPU_CACHE_OFFSET0), 0, 0, indirect);
508 }
509 offset = 0;
510 } else {
511 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
512 VCN, 0, regUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW),
513 lower_32_bits(adev->vcn.inst[inst_idx].gpu_addr), 0, indirect);
514 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
515 VCN, 0, regUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH),
516 upper_32_bits(adev->vcn.inst[inst_idx].gpu_addr), 0, indirect);
517 offset = size;
518 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
519 VCN, 0, regUVD_VCPU_CACHE_OFFSET0),
520 AMDGPU_UVD_FIRMWARE_OFFSET >> 3, 0, indirect);
521 }
522
523 if (!indirect)
524 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
525 VCN, 0, regUVD_VCPU_CACHE_SIZE0), size, 0, indirect);
526 else
527 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
528 VCN, 0, regUVD_VCPU_CACHE_SIZE0), 0, 0, indirect);
529
530 /* cache window 1: stack */
531 if (!indirect) {
532 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
533 VCN, 0, regUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW),
534 lower_32_bits(adev->vcn.inst[inst_idx].gpu_addr + offset), 0, indirect);
535 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
536 VCN, 0, regUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH),
537 upper_32_bits(adev->vcn.inst[inst_idx].gpu_addr + offset), 0, indirect);
538 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
539 VCN, 0, regUVD_VCPU_CACHE_OFFSET1), 0, 0, indirect);
540 } else {
541 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
542 VCN, 0, regUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW), 0, 0, indirect);
543 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
544 VCN, 0, regUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH), 0, 0, indirect);
545 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
546 VCN, 0, regUVD_VCPU_CACHE_OFFSET1), 0, 0, indirect);
547 }
548 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
549 VCN, 0, regUVD_VCPU_CACHE_SIZE1), AMDGPU_VCN_STACK_SIZE, 0, indirect);
550
551 /* cache window 2: context */
552 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
553 VCN, 0, regUVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW),
554 lower_32_bits(adev->vcn.inst[inst_idx].gpu_addr + offset +
555 AMDGPU_VCN_STACK_SIZE), 0, indirect);
556 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
557 VCN, 0, regUVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH),
558 upper_32_bits(adev->vcn.inst[inst_idx].gpu_addr + offset +
559 AMDGPU_VCN_STACK_SIZE), 0, indirect);
560 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
561 VCN, 0, regUVD_VCPU_CACHE_OFFSET2), 0, 0, indirect);
562 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
563 VCN, 0, regUVD_VCPU_CACHE_SIZE2), AMDGPU_VCN_CONTEXT_SIZE, 0, indirect);
564
565 /* non-cache window */
566 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
567 VCN, 0, regUVD_LMI_VCPU_NC0_64BIT_BAR_LOW),
568 lower_32_bits(adev->vcn.inst[inst_idx].fw_shared.gpu_addr), 0, indirect);
569 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
570 VCN, 0, regUVD_LMI_VCPU_NC0_64BIT_BAR_HIGH),
571 upper_32_bits(adev->vcn.inst[inst_idx].fw_shared.gpu_addr), 0, indirect);
572 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
573 VCN, 0, regUVD_VCPU_NONCACHE_OFFSET0), 0, 0, indirect);
574 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
575 VCN, 0, regUVD_VCPU_NONCACHE_SIZE0),
576 AMDGPU_GPU_PAGE_ALIGN(sizeof(struct amdgpu_vcn4_fw_shared)), 0, indirect);
577
578 /* VCN global tiling registers */
579 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
580 VCN, 0, regUVD_GFX8_ADDR_CONFIG), adev->gfx.config.gb_addr_config, 0, indirect);
581 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
582 VCN, 0, regUVD_GFX10_ADDR_CONFIG), adev->gfx.config.gb_addr_config, 0, indirect);
583 }
584
585 /**
586 * vcn_v4_0_3_disable_clock_gating - disable VCN clock gating
587 *
588 * @adev: amdgpu_device pointer
589 * @inst_idx: instance number
590 *
591 * Disable clock gating for VCN block
592 */
vcn_v4_0_3_disable_clock_gating(struct amdgpu_device * adev,int inst_idx)593 static void vcn_v4_0_3_disable_clock_gating(struct amdgpu_device *adev, int inst_idx)
594 {
595 uint32_t data;
596 int vcn_inst;
597
598 if (adev->cg_flags & AMD_CG_SUPPORT_VCN_MGCG)
599 return;
600
601 vcn_inst = GET_INST(VCN, inst_idx);
602
603 /* VCN disable CGC */
604 data = RREG32_SOC15(VCN, vcn_inst, regUVD_CGC_CTRL);
605 data &= ~UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK;
606 data |= 1 << UVD_CGC_CTRL__CLK_GATE_DLY_TIMER__SHIFT;
607 data |= 4 << UVD_CGC_CTRL__CLK_OFF_DELAY__SHIFT;
608 WREG32_SOC15(VCN, vcn_inst, regUVD_CGC_CTRL, data);
609
610 data = RREG32_SOC15(VCN, vcn_inst, regUVD_CGC_GATE);
611 data &= ~(UVD_CGC_GATE__SYS_MASK
612 | UVD_CGC_GATE__MPEG2_MASK
613 | UVD_CGC_GATE__REGS_MASK
614 | UVD_CGC_GATE__RBC_MASK
615 | UVD_CGC_GATE__LMI_MC_MASK
616 | UVD_CGC_GATE__LMI_UMC_MASK
617 | UVD_CGC_GATE__MPC_MASK
618 | UVD_CGC_GATE__LBSI_MASK
619 | UVD_CGC_GATE__LRBBM_MASK
620 | UVD_CGC_GATE__WCB_MASK
621 | UVD_CGC_GATE__VCPU_MASK
622 | UVD_CGC_GATE__MMSCH_MASK);
623
624 WREG32_SOC15(VCN, vcn_inst, regUVD_CGC_GATE, data);
625 SOC15_WAIT_ON_RREG(VCN, vcn_inst, regUVD_CGC_GATE, 0, 0xFFFFFFFF);
626
627 data = RREG32_SOC15(VCN, vcn_inst, regUVD_CGC_CTRL);
628 data &= ~(UVD_CGC_CTRL__SYS_MODE_MASK
629 | UVD_CGC_CTRL__MPEG2_MODE_MASK
630 | UVD_CGC_CTRL__REGS_MODE_MASK
631 | UVD_CGC_CTRL__RBC_MODE_MASK
632 | UVD_CGC_CTRL__LMI_MC_MODE_MASK
633 | UVD_CGC_CTRL__LMI_UMC_MODE_MASK
634 | UVD_CGC_CTRL__MPC_MODE_MASK
635 | UVD_CGC_CTRL__LBSI_MODE_MASK
636 | UVD_CGC_CTRL__LRBBM_MODE_MASK
637 | UVD_CGC_CTRL__WCB_MODE_MASK
638 | UVD_CGC_CTRL__VCPU_MODE_MASK
639 | UVD_CGC_CTRL__MMSCH_MODE_MASK);
640 WREG32_SOC15(VCN, vcn_inst, regUVD_CGC_CTRL, data);
641
642 data = RREG32_SOC15(VCN, vcn_inst, regUVD_SUVD_CGC_GATE);
643 data |= (UVD_SUVD_CGC_GATE__SRE_MASK
644 | UVD_SUVD_CGC_GATE__SIT_MASK
645 | UVD_SUVD_CGC_GATE__SMP_MASK
646 | UVD_SUVD_CGC_GATE__SCM_MASK
647 | UVD_SUVD_CGC_GATE__SDB_MASK
648 | UVD_SUVD_CGC_GATE__SRE_H264_MASK
649 | UVD_SUVD_CGC_GATE__SRE_HEVC_MASK
650 | UVD_SUVD_CGC_GATE__SIT_H264_MASK
651 | UVD_SUVD_CGC_GATE__SIT_HEVC_MASK
652 | UVD_SUVD_CGC_GATE__SCM_H264_MASK
653 | UVD_SUVD_CGC_GATE__SCM_HEVC_MASK
654 | UVD_SUVD_CGC_GATE__SDB_H264_MASK
655 | UVD_SUVD_CGC_GATE__SDB_HEVC_MASK
656 | UVD_SUVD_CGC_GATE__ENT_MASK
657 | UVD_SUVD_CGC_GATE__SIT_HEVC_DEC_MASK
658 | UVD_SUVD_CGC_GATE__SITE_MASK
659 | UVD_SUVD_CGC_GATE__SRE_VP9_MASK
660 | UVD_SUVD_CGC_GATE__SCM_VP9_MASK
661 | UVD_SUVD_CGC_GATE__SIT_VP9_DEC_MASK
662 | UVD_SUVD_CGC_GATE__SDB_VP9_MASK
663 | UVD_SUVD_CGC_GATE__IME_HEVC_MASK);
664 WREG32_SOC15(VCN, vcn_inst, regUVD_SUVD_CGC_GATE, data);
665
666 data = RREG32_SOC15(VCN, vcn_inst, regUVD_SUVD_CGC_CTRL);
667 data &= ~(UVD_SUVD_CGC_CTRL__SRE_MODE_MASK
668 | UVD_SUVD_CGC_CTRL__SIT_MODE_MASK
669 | UVD_SUVD_CGC_CTRL__SMP_MODE_MASK
670 | UVD_SUVD_CGC_CTRL__SCM_MODE_MASK
671 | UVD_SUVD_CGC_CTRL__SDB_MODE_MASK
672 | UVD_SUVD_CGC_CTRL__ENT_MODE_MASK
673 | UVD_SUVD_CGC_CTRL__IME_MODE_MASK
674 | UVD_SUVD_CGC_CTRL__SITE_MODE_MASK);
675 WREG32_SOC15(VCN, vcn_inst, regUVD_SUVD_CGC_CTRL, data);
676 }
677
678 /**
679 * vcn_v4_0_3_disable_clock_gating_dpg_mode - disable VCN clock gating dpg mode
680 *
681 * @adev: amdgpu_device pointer
682 * @sram_sel: sram select
683 * @inst_idx: instance number index
684 * @indirect: indirectly write sram
685 *
686 * Disable clock gating for VCN block with dpg mode
687 */
vcn_v4_0_3_disable_clock_gating_dpg_mode(struct amdgpu_device * adev,uint8_t sram_sel,int inst_idx,uint8_t indirect)688 static void vcn_v4_0_3_disable_clock_gating_dpg_mode(struct amdgpu_device *adev, uint8_t sram_sel,
689 int inst_idx, uint8_t indirect)
690 {
691 uint32_t reg_data = 0;
692
693 if (adev->cg_flags & AMD_CG_SUPPORT_VCN_MGCG)
694 return;
695
696 /* enable sw clock gating control */
697 reg_data = 0 << UVD_CGC_CTRL__DYN_CLOCK_MODE__SHIFT;
698 reg_data |= 1 << UVD_CGC_CTRL__CLK_GATE_DLY_TIMER__SHIFT;
699 reg_data |= 4 << UVD_CGC_CTRL__CLK_OFF_DELAY__SHIFT;
700 reg_data &= ~(UVD_CGC_CTRL__SYS_MODE_MASK |
701 UVD_CGC_CTRL__MPEG2_MODE_MASK |
702 UVD_CGC_CTRL__REGS_MODE_MASK |
703 UVD_CGC_CTRL__RBC_MODE_MASK |
704 UVD_CGC_CTRL__LMI_MC_MODE_MASK |
705 UVD_CGC_CTRL__LMI_UMC_MODE_MASK |
706 UVD_CGC_CTRL__IDCT_MODE_MASK |
707 UVD_CGC_CTRL__MPRD_MODE_MASK |
708 UVD_CGC_CTRL__MPC_MODE_MASK |
709 UVD_CGC_CTRL__LBSI_MODE_MASK |
710 UVD_CGC_CTRL__LRBBM_MODE_MASK |
711 UVD_CGC_CTRL__WCB_MODE_MASK |
712 UVD_CGC_CTRL__VCPU_MODE_MASK);
713 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
714 VCN, 0, regUVD_CGC_CTRL), reg_data, sram_sel, indirect);
715
716 /* turn off clock gating */
717 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
718 VCN, 0, regUVD_CGC_GATE), 0, sram_sel, indirect);
719
720 /* turn on SUVD clock gating */
721 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
722 VCN, 0, regUVD_SUVD_CGC_GATE), 1, sram_sel, indirect);
723
724 /* turn on sw mode in UVD_SUVD_CGC_CTRL */
725 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
726 VCN, 0, regUVD_SUVD_CGC_CTRL), 0, sram_sel, indirect);
727 }
728
729 /**
730 * vcn_v4_0_3_enable_clock_gating - enable VCN clock gating
731 *
732 * @adev: amdgpu_device pointer
733 * @inst_idx: instance number
734 *
735 * Enable clock gating for VCN block
736 */
vcn_v4_0_3_enable_clock_gating(struct amdgpu_device * adev,int inst_idx)737 static void vcn_v4_0_3_enable_clock_gating(struct amdgpu_device *adev, int inst_idx)
738 {
739 uint32_t data;
740 int vcn_inst;
741
742 if (adev->cg_flags & AMD_CG_SUPPORT_VCN_MGCG)
743 return;
744
745 vcn_inst = GET_INST(VCN, inst_idx);
746
747 /* enable VCN CGC */
748 data = RREG32_SOC15(VCN, vcn_inst, regUVD_CGC_CTRL);
749 data |= 0 << UVD_CGC_CTRL__DYN_CLOCK_MODE__SHIFT;
750 data |= 1 << UVD_CGC_CTRL__CLK_GATE_DLY_TIMER__SHIFT;
751 data |= 4 << UVD_CGC_CTRL__CLK_OFF_DELAY__SHIFT;
752 WREG32_SOC15(VCN, vcn_inst, regUVD_CGC_CTRL, data);
753
754 data = RREG32_SOC15(VCN, vcn_inst, regUVD_CGC_CTRL);
755 data |= (UVD_CGC_CTRL__SYS_MODE_MASK
756 | UVD_CGC_CTRL__MPEG2_MODE_MASK
757 | UVD_CGC_CTRL__REGS_MODE_MASK
758 | UVD_CGC_CTRL__RBC_MODE_MASK
759 | UVD_CGC_CTRL__LMI_MC_MODE_MASK
760 | UVD_CGC_CTRL__LMI_UMC_MODE_MASK
761 | UVD_CGC_CTRL__MPC_MODE_MASK
762 | UVD_CGC_CTRL__LBSI_MODE_MASK
763 | UVD_CGC_CTRL__LRBBM_MODE_MASK
764 | UVD_CGC_CTRL__WCB_MODE_MASK
765 | UVD_CGC_CTRL__VCPU_MODE_MASK);
766 WREG32_SOC15(VCN, vcn_inst, regUVD_CGC_CTRL, data);
767
768 data = RREG32_SOC15(VCN, vcn_inst, regUVD_SUVD_CGC_CTRL);
769 data |= (UVD_SUVD_CGC_CTRL__SRE_MODE_MASK
770 | UVD_SUVD_CGC_CTRL__SIT_MODE_MASK
771 | UVD_SUVD_CGC_CTRL__SMP_MODE_MASK
772 | UVD_SUVD_CGC_CTRL__SCM_MODE_MASK
773 | UVD_SUVD_CGC_CTRL__SDB_MODE_MASK
774 | UVD_SUVD_CGC_CTRL__ENT_MODE_MASK
775 | UVD_SUVD_CGC_CTRL__IME_MODE_MASK
776 | UVD_SUVD_CGC_CTRL__SITE_MODE_MASK);
777 WREG32_SOC15(VCN, vcn_inst, regUVD_SUVD_CGC_CTRL, data);
778 }
779
780 /**
781 * vcn_v4_0_3_start_dpg_mode - VCN start with dpg mode
782 *
783 * @adev: amdgpu_device pointer
784 * @inst_idx: instance number index
785 * @indirect: indirectly write sram
786 *
787 * Start VCN block with dpg mode
788 */
vcn_v4_0_3_start_dpg_mode(struct amdgpu_device * adev,int inst_idx,bool indirect)789 static int vcn_v4_0_3_start_dpg_mode(struct amdgpu_device *adev, int inst_idx, bool indirect)
790 {
791 volatile struct amdgpu_vcn4_fw_shared *fw_shared =
792 adev->vcn.inst[inst_idx].fw_shared.cpu_addr;
793 struct amdgpu_ring *ring;
794 int vcn_inst;
795 uint32_t tmp;
796
797 vcn_inst = GET_INST(VCN, inst_idx);
798 /* disable register anti-hang mechanism */
799 WREG32_P(SOC15_REG_OFFSET(VCN, vcn_inst, regUVD_POWER_STATUS), 1,
800 ~UVD_POWER_STATUS__UVD_POWER_STATUS_MASK);
801 /* enable dynamic power gating mode */
802 tmp = RREG32_SOC15(VCN, vcn_inst, regUVD_POWER_STATUS);
803 tmp |= UVD_POWER_STATUS__UVD_PG_MODE_MASK;
804 tmp |= UVD_POWER_STATUS__UVD_PG_EN_MASK;
805 WREG32_SOC15(VCN, vcn_inst, regUVD_POWER_STATUS, tmp);
806
807 if (indirect) {
808 DRM_DEV_DEBUG(adev->dev, "VCN %d start: on AID %d",
809 inst_idx, adev->vcn.inst[inst_idx].aid_id);
810 adev->vcn.inst[inst_idx].dpg_sram_curr_addr =
811 (uint32_t *)adev->vcn.inst[inst_idx].dpg_sram_cpu_addr;
812 /* Use dummy register 0xDEADBEEF passing AID selection to PSP FW */
813 WREG32_SOC15_DPG_MODE(inst_idx, 0xDEADBEEF,
814 adev->vcn.inst[inst_idx].aid_id, 0, true);
815 }
816
817 /* enable clock gating */
818 vcn_v4_0_3_disable_clock_gating_dpg_mode(adev, 0, inst_idx, indirect);
819
820 /* enable VCPU clock */
821 tmp = (0xFF << UVD_VCPU_CNTL__PRB_TIMEOUT_VAL__SHIFT);
822 tmp |= UVD_VCPU_CNTL__CLK_EN_MASK;
823 tmp |= UVD_VCPU_CNTL__BLK_RST_MASK;
824
825 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
826 VCN, 0, regUVD_VCPU_CNTL), tmp, 0, indirect);
827
828 /* disable master interrupt */
829 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
830 VCN, 0, regUVD_MASTINT_EN), 0, 0, indirect);
831
832 /* setup regUVD_LMI_CTRL */
833 tmp = (UVD_LMI_CTRL__WRITE_CLEAN_TIMER_EN_MASK |
834 UVD_LMI_CTRL__REQ_MODE_MASK |
835 UVD_LMI_CTRL__CRC_RESET_MASK |
836 UVD_LMI_CTRL__MASK_MC_URGENT_MASK |
837 UVD_LMI_CTRL__DATA_COHERENCY_EN_MASK |
838 UVD_LMI_CTRL__VCPU_DATA_COHERENCY_EN_MASK |
839 (8 << UVD_LMI_CTRL__WRITE_CLEAN_TIMER__SHIFT) |
840 0x00100000L);
841 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
842 VCN, 0, regUVD_LMI_CTRL), tmp, 0, indirect);
843
844 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
845 VCN, 0, regUVD_MPC_CNTL),
846 0x2 << UVD_MPC_CNTL__REPLACEMENT_MODE__SHIFT, 0, indirect);
847
848 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
849 VCN, 0, regUVD_MPC_SET_MUXA0),
850 ((0x1 << UVD_MPC_SET_MUXA0__VARA_1__SHIFT) |
851 (0x2 << UVD_MPC_SET_MUXA0__VARA_2__SHIFT) |
852 (0x3 << UVD_MPC_SET_MUXA0__VARA_3__SHIFT) |
853 (0x4 << UVD_MPC_SET_MUXA0__VARA_4__SHIFT)), 0, indirect);
854
855 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
856 VCN, 0, regUVD_MPC_SET_MUXB0),
857 ((0x1 << UVD_MPC_SET_MUXB0__VARB_1__SHIFT) |
858 (0x2 << UVD_MPC_SET_MUXB0__VARB_2__SHIFT) |
859 (0x3 << UVD_MPC_SET_MUXB0__VARB_3__SHIFT) |
860 (0x4 << UVD_MPC_SET_MUXB0__VARB_4__SHIFT)), 0, indirect);
861
862 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
863 VCN, 0, regUVD_MPC_SET_MUX),
864 ((0x0 << UVD_MPC_SET_MUX__SET_0__SHIFT) |
865 (0x1 << UVD_MPC_SET_MUX__SET_1__SHIFT) |
866 (0x2 << UVD_MPC_SET_MUX__SET_2__SHIFT)), 0, indirect);
867
868 vcn_v4_0_3_mc_resume_dpg_mode(adev, inst_idx, indirect);
869
870 tmp = (0xFF << UVD_VCPU_CNTL__PRB_TIMEOUT_VAL__SHIFT);
871 tmp |= UVD_VCPU_CNTL__CLK_EN_MASK;
872 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
873 VCN, 0, regUVD_VCPU_CNTL), tmp, 0, indirect);
874
875 /* enable LMI MC and UMC channels */
876 tmp = 0x1f << UVD_LMI_CTRL2__RE_OFLD_MIF_WR_REQ_NUM__SHIFT;
877 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
878 VCN, 0, regUVD_LMI_CTRL2), tmp, 0, indirect);
879
880 vcn_v4_0_3_enable_ras(adev, inst_idx, indirect);
881
882 /* enable master interrupt */
883 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
884 VCN, 0, regUVD_MASTINT_EN),
885 UVD_MASTINT_EN__VCPU_EN_MASK, 0, indirect);
886
887 if (indirect)
888 amdgpu_vcn_psp_update_sram(adev, inst_idx, AMDGPU_UCODE_ID_VCN0_RAM);
889
890 ring = &adev->vcn.inst[inst_idx].ring_enc[0];
891
892 /* program the RB_BASE for ring buffer */
893 WREG32_SOC15(VCN, vcn_inst, regUVD_RB_BASE_LO,
894 lower_32_bits(ring->gpu_addr));
895 WREG32_SOC15(VCN, vcn_inst, regUVD_RB_BASE_HI,
896 upper_32_bits(ring->gpu_addr));
897
898 WREG32_SOC15(VCN, vcn_inst, regUVD_RB_SIZE,
899 ring->ring_size / sizeof(uint32_t));
900
901 /* resetting ring, fw should not check RB ring */
902 tmp = RREG32_SOC15(VCN, vcn_inst, regVCN_RB_ENABLE);
903 tmp &= ~(VCN_RB_ENABLE__RB_EN_MASK);
904 WREG32_SOC15(VCN, vcn_inst, regVCN_RB_ENABLE, tmp);
905 fw_shared->sq.queue_mode |= FW_QUEUE_RING_RESET;
906
907 /* Initialize the ring buffer's read and write pointers */
908 WREG32_SOC15(VCN, vcn_inst, regUVD_RB_RPTR, 0);
909 WREG32_SOC15(VCN, vcn_inst, regUVD_RB_WPTR, 0);
910 ring->wptr = RREG32_SOC15(VCN, vcn_inst, regUVD_RB_WPTR);
911
912 tmp = RREG32_SOC15(VCN, vcn_inst, regVCN_RB_ENABLE);
913 tmp |= VCN_RB_ENABLE__RB_EN_MASK;
914 WREG32_SOC15(VCN, vcn_inst, regVCN_RB_ENABLE, tmp);
915 fw_shared->sq.queue_mode &= ~(FW_QUEUE_RING_RESET | FW_QUEUE_DPG_HOLD_OFF);
916
917 /*resetting done, fw can check RB ring */
918 fw_shared->sq.queue_mode &= cpu_to_le32(~FW_QUEUE_RING_RESET);
919
920 return 0;
921 }
922
vcn_v4_0_3_start_sriov(struct amdgpu_device * adev)923 static int vcn_v4_0_3_start_sriov(struct amdgpu_device *adev)
924 {
925 int i, vcn_inst;
926 struct amdgpu_ring *ring_enc;
927 uint64_t cache_addr;
928 uint64_t rb_enc_addr;
929 uint64_t ctx_addr;
930 uint32_t param, resp, expected;
931 uint32_t offset, cache_size;
932 uint32_t tmp, timeout;
933
934 struct amdgpu_mm_table *table = &adev->virt.mm_table;
935 uint32_t *table_loc;
936 uint32_t table_size;
937 uint32_t size, size_dw;
938 uint32_t init_status;
939 uint32_t enabled_vcn;
940
941 struct mmsch_v4_0_cmd_direct_write
942 direct_wt = { {0} };
943 struct mmsch_v4_0_cmd_direct_read_modify_write
944 direct_rd_mod_wt = { {0} };
945 struct mmsch_v4_0_cmd_end end = { {0} };
946 struct mmsch_v4_0_3_init_header header;
947
948 volatile struct amdgpu_vcn4_fw_shared *fw_shared;
949 volatile struct amdgpu_fw_shared_rb_setup *rb_setup;
950
951 direct_wt.cmd_header.command_type =
952 MMSCH_COMMAND__DIRECT_REG_WRITE;
953 direct_rd_mod_wt.cmd_header.command_type =
954 MMSCH_COMMAND__DIRECT_REG_READ_MODIFY_WRITE;
955 end.cmd_header.command_type = MMSCH_COMMAND__END;
956
957 for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
958 vcn_inst = GET_INST(VCN, i);
959
960 vcn_v4_0_3_fw_shared_init(adev, vcn_inst);
961
962 memset(&header, 0, sizeof(struct mmsch_v4_0_3_init_header));
963 header.version = MMSCH_VERSION;
964 header.total_size = sizeof(struct mmsch_v4_0_3_init_header) >> 2;
965
966 table_loc = (uint32_t *)table->cpu_addr;
967 table_loc += header.total_size;
968
969 table_size = 0;
970
971 MMSCH_V4_0_INSERT_DIRECT_RD_MOD_WT(SOC15_REG_OFFSET(VCN, 0, regUVD_STATUS),
972 ~UVD_STATUS__UVD_BUSY, UVD_STATUS__UVD_BUSY);
973
974 cache_size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.inst[i].fw->size + 4);
975
976 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
977 MMSCH_V4_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, 0,
978 regUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW),
979 adev->firmware.ucode[AMDGPU_UCODE_ID_VCN + i].tmr_mc_addr_lo);
980
981 MMSCH_V4_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, 0,
982 regUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH),
983 adev->firmware.ucode[AMDGPU_UCODE_ID_VCN + i].tmr_mc_addr_hi);
984
985 offset = 0;
986 MMSCH_V4_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, 0,
987 regUVD_VCPU_CACHE_OFFSET0), 0);
988 } else {
989 MMSCH_V4_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, 0,
990 regUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW),
991 lower_32_bits(adev->vcn.inst[i].gpu_addr));
992 MMSCH_V4_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, 0,
993 regUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH),
994 upper_32_bits(adev->vcn.inst[i].gpu_addr));
995 offset = cache_size;
996 MMSCH_V4_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, 0,
997 regUVD_VCPU_CACHE_OFFSET0),
998 AMDGPU_UVD_FIRMWARE_OFFSET >> 3);
999 }
1000
1001 MMSCH_V4_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, 0,
1002 regUVD_VCPU_CACHE_SIZE0),
1003 cache_size);
1004
1005 cache_addr = adev->vcn.inst[vcn_inst].gpu_addr + offset;
1006 MMSCH_V4_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, 0,
1007 regUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW), lower_32_bits(cache_addr));
1008 MMSCH_V4_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, 0,
1009 regUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH), upper_32_bits(cache_addr));
1010 MMSCH_V4_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, 0,
1011 regUVD_VCPU_CACHE_OFFSET1), 0);
1012 MMSCH_V4_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, 0,
1013 regUVD_VCPU_CACHE_SIZE1), AMDGPU_VCN_STACK_SIZE);
1014
1015 cache_addr = adev->vcn.inst[vcn_inst].gpu_addr + offset +
1016 AMDGPU_VCN_STACK_SIZE;
1017
1018 MMSCH_V4_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, 0,
1019 regUVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW), lower_32_bits(cache_addr));
1020
1021 MMSCH_V4_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, 0,
1022 regUVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH), upper_32_bits(cache_addr));
1023
1024 MMSCH_V4_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, 0,
1025 regUVD_VCPU_CACHE_OFFSET2), 0);
1026
1027 MMSCH_V4_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, 0,
1028 regUVD_VCPU_CACHE_SIZE2), AMDGPU_VCN_CONTEXT_SIZE);
1029
1030 fw_shared = adev->vcn.inst[vcn_inst].fw_shared.cpu_addr;
1031 rb_setup = &fw_shared->rb_setup;
1032
1033 ring_enc = &adev->vcn.inst[vcn_inst].ring_enc[0];
1034 ring_enc->wptr = 0;
1035 rb_enc_addr = ring_enc->gpu_addr;
1036
1037 rb_setup->is_rb_enabled_flags |= RB_ENABLED;
1038 rb_setup->rb_addr_lo = lower_32_bits(rb_enc_addr);
1039 rb_setup->rb_addr_hi = upper_32_bits(rb_enc_addr);
1040 rb_setup->rb_size = ring_enc->ring_size / 4;
1041 fw_shared->present_flag_0 |= cpu_to_le32(AMDGPU_VCN_VF_RB_SETUP_FLAG);
1042
1043 MMSCH_V4_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, 0,
1044 regUVD_LMI_VCPU_NC0_64BIT_BAR_LOW),
1045 lower_32_bits(adev->vcn.inst[vcn_inst].fw_shared.gpu_addr));
1046 MMSCH_V4_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, 0,
1047 regUVD_LMI_VCPU_NC0_64BIT_BAR_HIGH),
1048 upper_32_bits(adev->vcn.inst[vcn_inst].fw_shared.gpu_addr));
1049 MMSCH_V4_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, 0,
1050 regUVD_VCPU_NONCACHE_SIZE0),
1051 AMDGPU_GPU_PAGE_ALIGN(sizeof(struct amdgpu_vcn4_fw_shared)));
1052 MMSCH_V4_0_INSERT_END();
1053
1054 header.vcn0.init_status = 0;
1055 header.vcn0.table_offset = header.total_size;
1056 header.vcn0.table_size = table_size;
1057 header.total_size += table_size;
1058
1059 /* Send init table to mmsch */
1060 size = sizeof(struct mmsch_v4_0_3_init_header);
1061 table_loc = (uint32_t *)table->cpu_addr;
1062 memcpy((void *)table_loc, &header, size);
1063
1064 ctx_addr = table->gpu_addr;
1065 WREG32_SOC15(VCN, vcn_inst, regMMSCH_VF_CTX_ADDR_LO, lower_32_bits(ctx_addr));
1066 WREG32_SOC15(VCN, vcn_inst, regMMSCH_VF_CTX_ADDR_HI, upper_32_bits(ctx_addr));
1067
1068 tmp = RREG32_SOC15(VCN, vcn_inst, regMMSCH_VF_VMID);
1069 tmp &= ~MMSCH_VF_VMID__VF_CTX_VMID_MASK;
1070 tmp |= (0 << MMSCH_VF_VMID__VF_CTX_VMID__SHIFT);
1071 WREG32_SOC15(VCN, vcn_inst, regMMSCH_VF_VMID, tmp);
1072
1073 size = header.total_size;
1074 WREG32_SOC15(VCN, vcn_inst, regMMSCH_VF_CTX_SIZE, size);
1075
1076 WREG32_SOC15(VCN, vcn_inst, regMMSCH_VF_MAILBOX_RESP, 0);
1077
1078 param = 0x00000001;
1079 WREG32_SOC15(VCN, vcn_inst, regMMSCH_VF_MAILBOX_HOST, param);
1080 tmp = 0;
1081 timeout = 1000;
1082 resp = 0;
1083 expected = MMSCH_VF_MAILBOX_RESP__OK;
1084 while (resp != expected) {
1085 resp = RREG32_SOC15(VCN, vcn_inst, regMMSCH_VF_MAILBOX_RESP);
1086 if (resp != 0)
1087 break;
1088
1089 udelay(10);
1090 tmp = tmp + 10;
1091 if (tmp >= timeout) {
1092 DRM_ERROR("failed to init MMSCH. TIME-OUT after %d usec"\
1093 " waiting for regMMSCH_VF_MAILBOX_RESP "\
1094 "(expected=0x%08x, readback=0x%08x)\n",
1095 tmp, expected, resp);
1096 return -EBUSY;
1097 }
1098 }
1099
1100 enabled_vcn = amdgpu_vcn_is_disabled_vcn(adev, VCN_DECODE_RING, 0) ? 1 : 0;
1101 init_status = ((struct mmsch_v4_0_3_init_header *)(table_loc))->vcn0.init_status;
1102 if (resp != expected && resp != MMSCH_VF_MAILBOX_RESP__INCOMPLETE
1103 && init_status != MMSCH_VF_ENGINE_STATUS__PASS) {
1104 DRM_ERROR("MMSCH init status is incorrect! readback=0x%08x, header init "\
1105 "status for VCN%x: 0x%x\n", resp, enabled_vcn, init_status);
1106 }
1107 }
1108
1109 return 0;
1110 }
1111
1112 /**
1113 * vcn_v4_0_3_start - VCN start
1114 *
1115 * @adev: amdgpu_device pointer
1116 *
1117 * Start VCN block
1118 */
vcn_v4_0_3_start(struct amdgpu_device * adev)1119 static int vcn_v4_0_3_start(struct amdgpu_device *adev)
1120 {
1121 volatile struct amdgpu_vcn4_fw_shared *fw_shared;
1122 struct amdgpu_ring *ring;
1123 int i, j, k, r, vcn_inst;
1124 uint32_t tmp;
1125
1126 for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
1127 if (adev->pm.dpm_enabled)
1128 amdgpu_dpm_enable_vcn(adev, true, i);
1129 }
1130
1131 for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
1132 if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) {
1133 r = vcn_v4_0_3_start_dpg_mode(adev, i, adev->vcn.indirect_sram);
1134 continue;
1135 }
1136
1137 vcn_inst = GET_INST(VCN, i);
1138 /* set VCN status busy */
1139 tmp = RREG32_SOC15(VCN, vcn_inst, regUVD_STATUS) |
1140 UVD_STATUS__UVD_BUSY;
1141 WREG32_SOC15(VCN, vcn_inst, regUVD_STATUS, tmp);
1142
1143 /*SW clock gating */
1144 vcn_v4_0_3_disable_clock_gating(adev, i);
1145
1146 /* enable VCPU clock */
1147 WREG32_P(SOC15_REG_OFFSET(VCN, vcn_inst, regUVD_VCPU_CNTL),
1148 UVD_VCPU_CNTL__CLK_EN_MASK,
1149 ~UVD_VCPU_CNTL__CLK_EN_MASK);
1150
1151 /* disable master interrupt */
1152 WREG32_P(SOC15_REG_OFFSET(VCN, vcn_inst, regUVD_MASTINT_EN), 0,
1153 ~UVD_MASTINT_EN__VCPU_EN_MASK);
1154
1155 /* enable LMI MC and UMC channels */
1156 WREG32_P(SOC15_REG_OFFSET(VCN, vcn_inst, regUVD_LMI_CTRL2), 0,
1157 ~UVD_LMI_CTRL2__STALL_ARB_UMC_MASK);
1158
1159 tmp = RREG32_SOC15(VCN, vcn_inst, regUVD_SOFT_RESET);
1160 tmp &= ~UVD_SOFT_RESET__LMI_SOFT_RESET_MASK;
1161 tmp &= ~UVD_SOFT_RESET__LMI_UMC_SOFT_RESET_MASK;
1162 WREG32_SOC15(VCN, vcn_inst, regUVD_SOFT_RESET, tmp);
1163
1164 /* setup regUVD_LMI_CTRL */
1165 tmp = RREG32_SOC15(VCN, vcn_inst, regUVD_LMI_CTRL);
1166 WREG32_SOC15(VCN, vcn_inst, regUVD_LMI_CTRL,
1167 tmp | UVD_LMI_CTRL__WRITE_CLEAN_TIMER_EN_MASK |
1168 UVD_LMI_CTRL__MASK_MC_URGENT_MASK |
1169 UVD_LMI_CTRL__DATA_COHERENCY_EN_MASK |
1170 UVD_LMI_CTRL__VCPU_DATA_COHERENCY_EN_MASK);
1171
1172 /* setup regUVD_MPC_CNTL */
1173 tmp = RREG32_SOC15(VCN, vcn_inst, regUVD_MPC_CNTL);
1174 tmp &= ~UVD_MPC_CNTL__REPLACEMENT_MODE_MASK;
1175 tmp |= 0x2 << UVD_MPC_CNTL__REPLACEMENT_MODE__SHIFT;
1176 WREG32_SOC15(VCN, vcn_inst, regUVD_MPC_CNTL, tmp);
1177
1178 /* setup UVD_MPC_SET_MUXA0 */
1179 WREG32_SOC15(VCN, vcn_inst, regUVD_MPC_SET_MUXA0,
1180 ((0x1 << UVD_MPC_SET_MUXA0__VARA_1__SHIFT) |
1181 (0x2 << UVD_MPC_SET_MUXA0__VARA_2__SHIFT) |
1182 (0x3 << UVD_MPC_SET_MUXA0__VARA_3__SHIFT) |
1183 (0x4 << UVD_MPC_SET_MUXA0__VARA_4__SHIFT)));
1184
1185 /* setup UVD_MPC_SET_MUXB0 */
1186 WREG32_SOC15(VCN, vcn_inst, regUVD_MPC_SET_MUXB0,
1187 ((0x1 << UVD_MPC_SET_MUXB0__VARB_1__SHIFT) |
1188 (0x2 << UVD_MPC_SET_MUXB0__VARB_2__SHIFT) |
1189 (0x3 << UVD_MPC_SET_MUXB0__VARB_3__SHIFT) |
1190 (0x4 << UVD_MPC_SET_MUXB0__VARB_4__SHIFT)));
1191
1192 /* setup UVD_MPC_SET_MUX */
1193 WREG32_SOC15(VCN, vcn_inst, regUVD_MPC_SET_MUX,
1194 ((0x0 << UVD_MPC_SET_MUX__SET_0__SHIFT) |
1195 (0x1 << UVD_MPC_SET_MUX__SET_1__SHIFT) |
1196 (0x2 << UVD_MPC_SET_MUX__SET_2__SHIFT)));
1197
1198 vcn_v4_0_3_mc_resume(adev, i);
1199
1200 /* VCN global tiling registers */
1201 WREG32_SOC15(VCN, vcn_inst, regUVD_GFX8_ADDR_CONFIG,
1202 adev->gfx.config.gb_addr_config);
1203 WREG32_SOC15(VCN, vcn_inst, regUVD_GFX10_ADDR_CONFIG,
1204 adev->gfx.config.gb_addr_config);
1205
1206 /* unblock VCPU register access */
1207 WREG32_P(SOC15_REG_OFFSET(VCN, vcn_inst, regUVD_RB_ARB_CTRL), 0,
1208 ~UVD_RB_ARB_CTRL__VCPU_DIS_MASK);
1209
1210 /* release VCPU reset to boot */
1211 WREG32_P(SOC15_REG_OFFSET(VCN, vcn_inst, regUVD_VCPU_CNTL), 0,
1212 ~UVD_VCPU_CNTL__BLK_RST_MASK);
1213
1214 for (j = 0; j < 10; ++j) {
1215 uint32_t status;
1216
1217 for (k = 0; k < 100; ++k) {
1218 status = RREG32_SOC15(VCN, vcn_inst,
1219 regUVD_STATUS);
1220 if (status & 2)
1221 break;
1222 mdelay(10);
1223 }
1224 r = 0;
1225 if (status & 2)
1226 break;
1227
1228 DRM_DEV_ERROR(adev->dev,
1229 "VCN decode not responding, trying to reset the VCPU!!!\n");
1230 WREG32_P(SOC15_REG_OFFSET(VCN, vcn_inst,
1231 regUVD_VCPU_CNTL),
1232 UVD_VCPU_CNTL__BLK_RST_MASK,
1233 ~UVD_VCPU_CNTL__BLK_RST_MASK);
1234 mdelay(10);
1235 WREG32_P(SOC15_REG_OFFSET(VCN, vcn_inst,
1236 regUVD_VCPU_CNTL),
1237 0, ~UVD_VCPU_CNTL__BLK_RST_MASK);
1238
1239 mdelay(10);
1240 r = -1;
1241 }
1242
1243 if (r) {
1244 DRM_DEV_ERROR(adev->dev, "VCN decode not responding, giving up!!!\n");
1245 return r;
1246 }
1247
1248 /* enable master interrupt */
1249 WREG32_P(SOC15_REG_OFFSET(VCN, vcn_inst, regUVD_MASTINT_EN),
1250 UVD_MASTINT_EN__VCPU_EN_MASK,
1251 ~UVD_MASTINT_EN__VCPU_EN_MASK);
1252
1253 /* clear the busy bit of VCN_STATUS */
1254 WREG32_P(SOC15_REG_OFFSET(VCN, vcn_inst, regUVD_STATUS), 0,
1255 ~(2 << UVD_STATUS__VCPU_REPORT__SHIFT));
1256
1257 ring = &adev->vcn.inst[i].ring_enc[0];
1258 fw_shared = adev->vcn.inst[i].fw_shared.cpu_addr;
1259
1260 /* program the RB_BASE for ring buffer */
1261 WREG32_SOC15(VCN, vcn_inst, regUVD_RB_BASE_LO,
1262 lower_32_bits(ring->gpu_addr));
1263 WREG32_SOC15(VCN, vcn_inst, regUVD_RB_BASE_HI,
1264 upper_32_bits(ring->gpu_addr));
1265
1266 WREG32_SOC15(VCN, vcn_inst, regUVD_RB_SIZE,
1267 ring->ring_size / sizeof(uint32_t));
1268
1269 /* resetting ring, fw should not check RB ring */
1270 tmp = RREG32_SOC15(VCN, vcn_inst, regVCN_RB_ENABLE);
1271 tmp &= ~(VCN_RB_ENABLE__RB_EN_MASK);
1272 WREG32_SOC15(VCN, vcn_inst, regVCN_RB_ENABLE, tmp);
1273
1274 /* Initialize the ring buffer's read and write pointers */
1275 WREG32_SOC15(VCN, vcn_inst, regUVD_RB_RPTR, 0);
1276 WREG32_SOC15(VCN, vcn_inst, regUVD_RB_WPTR, 0);
1277
1278 tmp = RREG32_SOC15(VCN, vcn_inst, regVCN_RB_ENABLE);
1279 tmp |= VCN_RB_ENABLE__RB_EN_MASK;
1280 WREG32_SOC15(VCN, vcn_inst, regVCN_RB_ENABLE, tmp);
1281
1282 ring->wptr = RREG32_SOC15(VCN, vcn_inst, regUVD_RB_WPTR);
1283 fw_shared->sq.queue_mode &=
1284 cpu_to_le32(~(FW_QUEUE_RING_RESET | FW_QUEUE_DPG_HOLD_OFF));
1285
1286 }
1287 return 0;
1288 }
1289
1290 /**
1291 * vcn_v4_0_3_stop_dpg_mode - VCN stop with dpg mode
1292 *
1293 * @adev: amdgpu_device pointer
1294 * @inst_idx: instance number index
1295 *
1296 * Stop VCN block with dpg mode
1297 */
vcn_v4_0_3_stop_dpg_mode(struct amdgpu_device * adev,int inst_idx)1298 static int vcn_v4_0_3_stop_dpg_mode(struct amdgpu_device *adev, int inst_idx)
1299 {
1300 uint32_t tmp;
1301 int vcn_inst;
1302
1303 vcn_inst = GET_INST(VCN, inst_idx);
1304
1305 /* Wait for power status to be 1 */
1306 SOC15_WAIT_ON_RREG(VCN, vcn_inst, regUVD_POWER_STATUS, 1,
1307 UVD_POWER_STATUS__UVD_POWER_STATUS_MASK);
1308
1309 /* wait for read ptr to be equal to write ptr */
1310 tmp = RREG32_SOC15(VCN, vcn_inst, regUVD_RB_WPTR);
1311 SOC15_WAIT_ON_RREG(VCN, vcn_inst, regUVD_RB_RPTR, tmp, 0xFFFFFFFF);
1312
1313 SOC15_WAIT_ON_RREG(VCN, vcn_inst, regUVD_POWER_STATUS, 1,
1314 UVD_POWER_STATUS__UVD_POWER_STATUS_MASK);
1315
1316 /* disable dynamic power gating mode */
1317 WREG32_P(SOC15_REG_OFFSET(VCN, vcn_inst, regUVD_POWER_STATUS), 0,
1318 ~UVD_POWER_STATUS__UVD_PG_MODE_MASK);
1319 return 0;
1320 }
1321
1322 /**
1323 * vcn_v4_0_3_stop - VCN stop
1324 *
1325 * @adev: amdgpu_device pointer
1326 *
1327 * Stop VCN block
1328 */
vcn_v4_0_3_stop(struct amdgpu_device * adev)1329 static int vcn_v4_0_3_stop(struct amdgpu_device *adev)
1330 {
1331 volatile struct amdgpu_vcn4_fw_shared *fw_shared;
1332 int i, r = 0, vcn_inst;
1333 uint32_t tmp;
1334
1335 for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
1336 vcn_inst = GET_INST(VCN, i);
1337
1338 fw_shared = adev->vcn.inst[i].fw_shared.cpu_addr;
1339 fw_shared->sq.queue_mode |= FW_QUEUE_DPG_HOLD_OFF;
1340
1341 if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) {
1342 vcn_v4_0_3_stop_dpg_mode(adev, i);
1343 continue;
1344 }
1345
1346 /* wait for vcn idle */
1347 r = SOC15_WAIT_ON_RREG(VCN, vcn_inst, regUVD_STATUS,
1348 UVD_STATUS__IDLE, 0x7);
1349 if (r)
1350 goto Done;
1351
1352 tmp = UVD_LMI_STATUS__VCPU_LMI_WRITE_CLEAN_MASK |
1353 UVD_LMI_STATUS__READ_CLEAN_MASK |
1354 UVD_LMI_STATUS__WRITE_CLEAN_MASK |
1355 UVD_LMI_STATUS__WRITE_CLEAN_RAW_MASK;
1356 r = SOC15_WAIT_ON_RREG(VCN, vcn_inst, regUVD_LMI_STATUS, tmp,
1357 tmp);
1358 if (r)
1359 goto Done;
1360
1361 /* stall UMC channel */
1362 tmp = RREG32_SOC15(VCN, vcn_inst, regUVD_LMI_CTRL2);
1363 tmp |= UVD_LMI_CTRL2__STALL_ARB_UMC_MASK;
1364 WREG32_SOC15(VCN, vcn_inst, regUVD_LMI_CTRL2, tmp);
1365 tmp = UVD_LMI_STATUS__UMC_READ_CLEAN_RAW_MASK |
1366 UVD_LMI_STATUS__UMC_WRITE_CLEAN_RAW_MASK;
1367 r = SOC15_WAIT_ON_RREG(VCN, vcn_inst, regUVD_LMI_STATUS, tmp,
1368 tmp);
1369 if (r)
1370 goto Done;
1371
1372 /* Unblock VCPU Register access */
1373 WREG32_P(SOC15_REG_OFFSET(VCN, vcn_inst, regUVD_RB_ARB_CTRL),
1374 UVD_RB_ARB_CTRL__VCPU_DIS_MASK,
1375 ~UVD_RB_ARB_CTRL__VCPU_DIS_MASK);
1376
1377 /* release VCPU reset to boot */
1378 WREG32_P(SOC15_REG_OFFSET(VCN, vcn_inst, regUVD_VCPU_CNTL),
1379 UVD_VCPU_CNTL__BLK_RST_MASK,
1380 ~UVD_VCPU_CNTL__BLK_RST_MASK);
1381
1382 /* disable VCPU clock */
1383 WREG32_P(SOC15_REG_OFFSET(VCN, vcn_inst, regUVD_VCPU_CNTL), 0,
1384 ~(UVD_VCPU_CNTL__CLK_EN_MASK));
1385
1386 /* reset LMI UMC/LMI/VCPU */
1387 tmp = RREG32_SOC15(VCN, vcn_inst, regUVD_SOFT_RESET);
1388 tmp |= UVD_SOFT_RESET__LMI_UMC_SOFT_RESET_MASK;
1389 WREG32_SOC15(VCN, vcn_inst, regUVD_SOFT_RESET, tmp);
1390
1391 tmp = RREG32_SOC15(VCN, vcn_inst, regUVD_SOFT_RESET);
1392 tmp |= UVD_SOFT_RESET__LMI_SOFT_RESET_MASK;
1393 WREG32_SOC15(VCN, vcn_inst, regUVD_SOFT_RESET, tmp);
1394
1395 /* clear VCN status */
1396 WREG32_SOC15(VCN, vcn_inst, regUVD_STATUS, 0);
1397
1398 /* apply HW clock gating */
1399 vcn_v4_0_3_enable_clock_gating(adev, i);
1400 }
1401 Done:
1402 for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
1403 if (adev->pm.dpm_enabled)
1404 amdgpu_dpm_enable_vcn(adev, false, i);
1405 }
1406
1407 return 0;
1408 }
1409
1410 /**
1411 * vcn_v4_0_3_pause_dpg_mode - VCN pause with dpg mode
1412 *
1413 * @adev: amdgpu_device pointer
1414 * @inst_idx: instance number index
1415 * @new_state: pause state
1416 *
1417 * Pause dpg mode for VCN block
1418 */
vcn_v4_0_3_pause_dpg_mode(struct amdgpu_device * adev,int inst_idx,struct dpg_pause_state * new_state)1419 static int vcn_v4_0_3_pause_dpg_mode(struct amdgpu_device *adev, int inst_idx,
1420 struct dpg_pause_state *new_state)
1421 {
1422
1423 return 0;
1424 }
1425
1426 /**
1427 * vcn_v4_0_3_unified_ring_get_rptr - get unified read pointer
1428 *
1429 * @ring: amdgpu_ring pointer
1430 *
1431 * Returns the current hardware unified read pointer
1432 */
vcn_v4_0_3_unified_ring_get_rptr(struct amdgpu_ring * ring)1433 static uint64_t vcn_v4_0_3_unified_ring_get_rptr(struct amdgpu_ring *ring)
1434 {
1435 struct amdgpu_device *adev = ring->adev;
1436
1437 if (ring != &adev->vcn.inst[ring->me].ring_enc[0])
1438 DRM_ERROR("wrong ring id is identified in %s", __func__);
1439
1440 return RREG32_SOC15(VCN, GET_INST(VCN, ring->me), regUVD_RB_RPTR);
1441 }
1442
1443 /**
1444 * vcn_v4_0_3_unified_ring_get_wptr - get unified write pointer
1445 *
1446 * @ring: amdgpu_ring pointer
1447 *
1448 * Returns the current hardware unified write pointer
1449 */
vcn_v4_0_3_unified_ring_get_wptr(struct amdgpu_ring * ring)1450 static uint64_t vcn_v4_0_3_unified_ring_get_wptr(struct amdgpu_ring *ring)
1451 {
1452 struct amdgpu_device *adev = ring->adev;
1453
1454 if (ring != &adev->vcn.inst[ring->me].ring_enc[0])
1455 DRM_ERROR("wrong ring id is identified in %s", __func__);
1456
1457 if (ring->use_doorbell)
1458 return *ring->wptr_cpu_addr;
1459 else
1460 return RREG32_SOC15(VCN, GET_INST(VCN, ring->me),
1461 regUVD_RB_WPTR);
1462 }
1463
vcn_v4_0_3_enc_ring_emit_reg_wait(struct amdgpu_ring * ring,uint32_t reg,uint32_t val,uint32_t mask)1464 static void vcn_v4_0_3_enc_ring_emit_reg_wait(struct amdgpu_ring *ring, uint32_t reg,
1465 uint32_t val, uint32_t mask)
1466 {
1467 /* Use normalized offsets when required */
1468 if (vcn_v4_0_3_normalizn_reqd(ring->adev))
1469 reg = NORMALIZE_VCN_REG_OFFSET(reg);
1470
1471 amdgpu_ring_write(ring, VCN_ENC_CMD_REG_WAIT);
1472 amdgpu_ring_write(ring, reg << 2);
1473 amdgpu_ring_write(ring, mask);
1474 amdgpu_ring_write(ring, val);
1475 }
1476
vcn_v4_0_3_enc_ring_emit_wreg(struct amdgpu_ring * ring,uint32_t reg,uint32_t val)1477 static void vcn_v4_0_3_enc_ring_emit_wreg(struct amdgpu_ring *ring, uint32_t reg, uint32_t val)
1478 {
1479 /* Use normalized offsets when required */
1480 if (vcn_v4_0_3_normalizn_reqd(ring->adev))
1481 reg = NORMALIZE_VCN_REG_OFFSET(reg);
1482
1483 amdgpu_ring_write(ring, VCN_ENC_CMD_REG_WRITE);
1484 amdgpu_ring_write(ring, reg << 2);
1485 amdgpu_ring_write(ring, val);
1486 }
1487
vcn_v4_0_3_enc_ring_emit_vm_flush(struct amdgpu_ring * ring,unsigned int vmid,uint64_t pd_addr)1488 static void vcn_v4_0_3_enc_ring_emit_vm_flush(struct amdgpu_ring *ring,
1489 unsigned int vmid, uint64_t pd_addr)
1490 {
1491 struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->vm_hub];
1492
1493 pd_addr = amdgpu_gmc_emit_flush_gpu_tlb(ring, vmid, pd_addr);
1494
1495 /* wait for reg writes */
1496 vcn_v4_0_3_enc_ring_emit_reg_wait(ring, hub->ctx0_ptb_addr_lo32 +
1497 vmid * hub->ctx_addr_distance,
1498 lower_32_bits(pd_addr), 0xffffffff);
1499 }
1500
vcn_v4_0_3_ring_emit_hdp_flush(struct amdgpu_ring * ring)1501 static void vcn_v4_0_3_ring_emit_hdp_flush(struct amdgpu_ring *ring)
1502 {
1503 /* VCN engine access for HDP flush doesn't work when RRMT is enabled.
1504 * This is a workaround to avoid any HDP flush through VCN ring.
1505 */
1506 }
1507
1508 /**
1509 * vcn_v4_0_3_unified_ring_set_wptr - set enc write pointer
1510 *
1511 * @ring: amdgpu_ring pointer
1512 *
1513 * Commits the enc write pointer to the hardware
1514 */
vcn_v4_0_3_unified_ring_set_wptr(struct amdgpu_ring * ring)1515 static void vcn_v4_0_3_unified_ring_set_wptr(struct amdgpu_ring *ring)
1516 {
1517 struct amdgpu_device *adev = ring->adev;
1518
1519 if (ring != &adev->vcn.inst[ring->me].ring_enc[0])
1520 DRM_ERROR("wrong ring id is identified in %s", __func__);
1521
1522 if (ring->use_doorbell) {
1523 *ring->wptr_cpu_addr = lower_32_bits(ring->wptr);
1524 WDOORBELL32(ring->doorbell_index, lower_32_bits(ring->wptr));
1525 } else {
1526 WREG32_SOC15(VCN, GET_INST(VCN, ring->me), regUVD_RB_WPTR,
1527 lower_32_bits(ring->wptr));
1528 }
1529 }
1530
1531 static const struct amdgpu_ring_funcs vcn_v4_0_3_unified_ring_vm_funcs = {
1532 .type = AMDGPU_RING_TYPE_VCN_ENC,
1533 .align_mask = 0x3f,
1534 .nop = VCN_ENC_CMD_NO_OP,
1535 .get_rptr = vcn_v4_0_3_unified_ring_get_rptr,
1536 .get_wptr = vcn_v4_0_3_unified_ring_get_wptr,
1537 .set_wptr = vcn_v4_0_3_unified_ring_set_wptr,
1538 .emit_frame_size =
1539 SOC15_FLUSH_GPU_TLB_NUM_WREG * 3 +
1540 SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 4 +
1541 4 + /* vcn_v2_0_enc_ring_emit_vm_flush */
1542 5 + 5 + /* vcn_v2_0_enc_ring_emit_fence x2 vm fence */
1543 1, /* vcn_v2_0_enc_ring_insert_end */
1544 .emit_ib_size = 5, /* vcn_v2_0_enc_ring_emit_ib */
1545 .emit_ib = vcn_v2_0_enc_ring_emit_ib,
1546 .emit_fence = vcn_v2_0_enc_ring_emit_fence,
1547 .emit_vm_flush = vcn_v4_0_3_enc_ring_emit_vm_flush,
1548 .emit_hdp_flush = vcn_v4_0_3_ring_emit_hdp_flush,
1549 .test_ring = amdgpu_vcn_enc_ring_test_ring,
1550 .test_ib = amdgpu_vcn_unified_ring_test_ib,
1551 .insert_nop = amdgpu_ring_insert_nop,
1552 .insert_end = vcn_v2_0_enc_ring_insert_end,
1553 .pad_ib = amdgpu_ring_generic_pad_ib,
1554 .begin_use = amdgpu_vcn_ring_begin_use,
1555 .end_use = amdgpu_vcn_ring_end_use,
1556 .emit_wreg = vcn_v4_0_3_enc_ring_emit_wreg,
1557 .emit_reg_wait = vcn_v4_0_3_enc_ring_emit_reg_wait,
1558 .emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper,
1559 };
1560
1561 /**
1562 * vcn_v4_0_3_set_unified_ring_funcs - set unified ring functions
1563 *
1564 * @adev: amdgpu_device pointer
1565 *
1566 * Set unified ring functions
1567 */
vcn_v4_0_3_set_unified_ring_funcs(struct amdgpu_device * adev)1568 static void vcn_v4_0_3_set_unified_ring_funcs(struct amdgpu_device *adev)
1569 {
1570 int i, vcn_inst;
1571
1572 for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
1573 adev->vcn.inst[i].ring_enc[0].funcs = &vcn_v4_0_3_unified_ring_vm_funcs;
1574 adev->vcn.inst[i].ring_enc[0].me = i;
1575 vcn_inst = GET_INST(VCN, i);
1576 adev->vcn.inst[i].aid_id =
1577 vcn_inst / adev->vcn.num_inst_per_aid;
1578 }
1579 }
1580
1581 /**
1582 * vcn_v4_0_3_is_idle - check VCN block is idle
1583 *
1584 * @handle: amdgpu_device pointer
1585 *
1586 * Check whether VCN block is idle
1587 */
vcn_v4_0_3_is_idle(void * handle)1588 static bool vcn_v4_0_3_is_idle(void *handle)
1589 {
1590 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1591 int i, ret = 1;
1592
1593 for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
1594 ret &= (RREG32_SOC15(VCN, GET_INST(VCN, i), regUVD_STATUS) ==
1595 UVD_STATUS__IDLE);
1596 }
1597
1598 return ret;
1599 }
1600
1601 /**
1602 * vcn_v4_0_3_wait_for_idle - wait for VCN block idle
1603 *
1604 * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
1605 *
1606 * Wait for VCN block idle
1607 */
vcn_v4_0_3_wait_for_idle(struct amdgpu_ip_block * ip_block)1608 static int vcn_v4_0_3_wait_for_idle(struct amdgpu_ip_block *ip_block)
1609 {
1610 struct amdgpu_device *adev = ip_block->adev;
1611 int i, ret = 0;
1612
1613 for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
1614 ret = SOC15_WAIT_ON_RREG(VCN, GET_INST(VCN, i), regUVD_STATUS,
1615 UVD_STATUS__IDLE, UVD_STATUS__IDLE);
1616 if (ret)
1617 return ret;
1618 }
1619
1620 return ret;
1621 }
1622
1623 /* vcn_v4_0_3_set_clockgating_state - set VCN block clockgating state
1624 *
1625 * @ip_block: amdgpu_ip_block pointer
1626 * @state: clock gating state
1627 *
1628 * Set VCN block clockgating state
1629 */
vcn_v4_0_3_set_clockgating_state(struct amdgpu_ip_block * ip_block,enum amd_clockgating_state state)1630 static int vcn_v4_0_3_set_clockgating_state(struct amdgpu_ip_block *ip_block,
1631 enum amd_clockgating_state state)
1632 {
1633 struct amdgpu_device *adev = ip_block->adev;
1634 bool enable = state == AMD_CG_STATE_GATE;
1635 int i;
1636
1637 for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
1638 if (enable) {
1639 if (RREG32_SOC15(VCN, GET_INST(VCN, i),
1640 regUVD_STATUS) != UVD_STATUS__IDLE)
1641 return -EBUSY;
1642 vcn_v4_0_3_enable_clock_gating(adev, i);
1643 } else {
1644 vcn_v4_0_3_disable_clock_gating(adev, i);
1645 }
1646 }
1647 return 0;
1648 }
1649
1650 /**
1651 * vcn_v4_0_3_set_powergating_state - set VCN block powergating state
1652 *
1653 * @ip_block: amdgpu_ip_block pointer
1654 * @state: power gating state
1655 *
1656 * Set VCN block powergating state
1657 */
vcn_v4_0_3_set_powergating_state(struct amdgpu_ip_block * ip_block,enum amd_powergating_state state)1658 static int vcn_v4_0_3_set_powergating_state(struct amdgpu_ip_block *ip_block,
1659 enum amd_powergating_state state)
1660 {
1661 struct amdgpu_device *adev = ip_block->adev;
1662 int ret;
1663
1664 /* for SRIOV, guest should not control VCN Power-gating
1665 * MMSCH FW should control Power-gating and clock-gating
1666 * guest should avoid touching CGC and PG
1667 */
1668 if (amdgpu_sriov_vf(adev)) {
1669 adev->vcn.cur_state = AMD_PG_STATE_UNGATE;
1670 return 0;
1671 }
1672
1673 if (state == adev->vcn.cur_state)
1674 return 0;
1675
1676 if (state == AMD_PG_STATE_GATE)
1677 ret = vcn_v4_0_3_stop(adev);
1678 else
1679 ret = vcn_v4_0_3_start(adev);
1680
1681 if (!ret)
1682 adev->vcn.cur_state = state;
1683
1684 return ret;
1685 }
1686
1687 /**
1688 * vcn_v4_0_3_set_interrupt_state - set VCN block interrupt state
1689 *
1690 * @adev: amdgpu_device pointer
1691 * @source: interrupt sources
1692 * @type: interrupt types
1693 * @state: interrupt states
1694 *
1695 * Set VCN block interrupt state
1696 */
vcn_v4_0_3_set_interrupt_state(struct amdgpu_device * adev,struct amdgpu_irq_src * source,unsigned int type,enum amdgpu_interrupt_state state)1697 static int vcn_v4_0_3_set_interrupt_state(struct amdgpu_device *adev,
1698 struct amdgpu_irq_src *source,
1699 unsigned int type,
1700 enum amdgpu_interrupt_state state)
1701 {
1702 return 0;
1703 }
1704
1705 /**
1706 * vcn_v4_0_3_process_interrupt - process VCN block interrupt
1707 *
1708 * @adev: amdgpu_device pointer
1709 * @source: interrupt sources
1710 * @entry: interrupt entry from clients and sources
1711 *
1712 * Process VCN block interrupt
1713 */
vcn_v4_0_3_process_interrupt(struct amdgpu_device * adev,struct amdgpu_irq_src * source,struct amdgpu_iv_entry * entry)1714 static int vcn_v4_0_3_process_interrupt(struct amdgpu_device *adev,
1715 struct amdgpu_irq_src *source,
1716 struct amdgpu_iv_entry *entry)
1717 {
1718 uint32_t i, inst;
1719
1720 i = node_id_to_phys_map[entry->node_id];
1721
1722 DRM_DEV_DEBUG(adev->dev, "IH: VCN TRAP\n");
1723
1724 for (inst = 0; inst < adev->vcn.num_vcn_inst; ++inst)
1725 if (adev->vcn.inst[inst].aid_id == i)
1726 break;
1727
1728 if (inst >= adev->vcn.num_vcn_inst) {
1729 dev_WARN_ONCE(adev->dev, 1,
1730 "Interrupt received for unknown VCN instance %d",
1731 entry->node_id);
1732 return 0;
1733 }
1734
1735 switch (entry->src_id) {
1736 case VCN_4_0__SRCID__UVD_ENC_GENERAL_PURPOSE:
1737 amdgpu_fence_process(&adev->vcn.inst[inst].ring_enc[0]);
1738 break;
1739 default:
1740 DRM_DEV_ERROR(adev->dev, "Unhandled interrupt: %d %d\n",
1741 entry->src_id, entry->src_data[0]);
1742 break;
1743 }
1744
1745 return 0;
1746 }
1747
1748 static const struct amdgpu_irq_src_funcs vcn_v4_0_3_irq_funcs = {
1749 .set = vcn_v4_0_3_set_interrupt_state,
1750 .process = vcn_v4_0_3_process_interrupt,
1751 };
1752
1753 /**
1754 * vcn_v4_0_3_set_irq_funcs - set VCN block interrupt irq functions
1755 *
1756 * @adev: amdgpu_device pointer
1757 *
1758 * Set VCN block interrupt irq functions
1759 */
vcn_v4_0_3_set_irq_funcs(struct amdgpu_device * adev)1760 static void vcn_v4_0_3_set_irq_funcs(struct amdgpu_device *adev)
1761 {
1762 int i;
1763
1764 for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
1765 adev->vcn.inst->irq.num_types++;
1766 }
1767 adev->vcn.inst->irq.funcs = &vcn_v4_0_3_irq_funcs;
1768 }
1769
vcn_v4_0_3_print_ip_state(struct amdgpu_ip_block * ip_block,struct drm_printer * p)1770 static void vcn_v4_0_3_print_ip_state(struct amdgpu_ip_block *ip_block, struct drm_printer *p)
1771 {
1772 struct amdgpu_device *adev = ip_block->adev;
1773 int i, j;
1774 uint32_t reg_count = ARRAY_SIZE(vcn_reg_list_4_0_3);
1775 uint32_t inst_off, is_powered;
1776
1777 if (!adev->vcn.ip_dump)
1778 return;
1779
1780 drm_printf(p, "num_instances:%d\n", adev->vcn.num_vcn_inst);
1781 for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
1782 if (adev->vcn.harvest_config & (1 << i)) {
1783 drm_printf(p, "\nHarvested Instance:VCN%d Skipping dump\n", i);
1784 continue;
1785 }
1786
1787 inst_off = i * reg_count;
1788 is_powered = (adev->vcn.ip_dump[inst_off] &
1789 UVD_POWER_STATUS__UVD_POWER_STATUS_MASK) != 1;
1790
1791 if (is_powered) {
1792 drm_printf(p, "\nActive Instance:VCN%d\n", i);
1793 for (j = 0; j < reg_count; j++)
1794 drm_printf(p, "%-50s \t 0x%08x\n", vcn_reg_list_4_0_3[j].reg_name,
1795 adev->vcn.ip_dump[inst_off + j]);
1796 } else {
1797 drm_printf(p, "\nInactive Instance:VCN%d\n", i);
1798 }
1799 }
1800 }
1801
vcn_v4_0_3_dump_ip_state(struct amdgpu_ip_block * ip_block)1802 static void vcn_v4_0_3_dump_ip_state(struct amdgpu_ip_block *ip_block)
1803 {
1804 struct amdgpu_device *adev = ip_block->adev;
1805 int i, j;
1806 bool is_powered;
1807 uint32_t inst_off, inst_id;
1808 uint32_t reg_count = ARRAY_SIZE(vcn_reg_list_4_0_3);
1809
1810 if (!adev->vcn.ip_dump)
1811 return;
1812
1813 for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
1814 if (adev->vcn.harvest_config & (1 << i))
1815 continue;
1816
1817 inst_id = GET_INST(VCN, i);
1818 inst_off = i * reg_count;
1819 /* mmUVD_POWER_STATUS is always readable and is first element of the array */
1820 adev->vcn.ip_dump[inst_off] = RREG32_SOC15(VCN, inst_id, regUVD_POWER_STATUS);
1821 is_powered = (adev->vcn.ip_dump[inst_off] &
1822 UVD_POWER_STATUS__UVD_POWER_STATUS_MASK) != 1;
1823
1824 if (is_powered)
1825 for (j = 1; j < reg_count; j++)
1826 adev->vcn.ip_dump[inst_off + j] =
1827 RREG32(SOC15_REG_ENTRY_OFFSET_INST(vcn_reg_list_4_0_3[j],
1828 inst_id));
1829 }
1830 }
1831
1832 static const struct amd_ip_funcs vcn_v4_0_3_ip_funcs = {
1833 .name = "vcn_v4_0_3",
1834 .early_init = vcn_v4_0_3_early_init,
1835 .sw_init = vcn_v4_0_3_sw_init,
1836 .sw_fini = vcn_v4_0_3_sw_fini,
1837 .hw_init = vcn_v4_0_3_hw_init,
1838 .hw_fini = vcn_v4_0_3_hw_fini,
1839 .suspend = vcn_v4_0_3_suspend,
1840 .resume = vcn_v4_0_3_resume,
1841 .is_idle = vcn_v4_0_3_is_idle,
1842 .wait_for_idle = vcn_v4_0_3_wait_for_idle,
1843 .set_clockgating_state = vcn_v4_0_3_set_clockgating_state,
1844 .set_powergating_state = vcn_v4_0_3_set_powergating_state,
1845 .dump_ip_state = vcn_v4_0_3_dump_ip_state,
1846 .print_ip_state = vcn_v4_0_3_print_ip_state,
1847 };
1848
1849 const struct amdgpu_ip_block_version vcn_v4_0_3_ip_block = {
1850 .type = AMD_IP_BLOCK_TYPE_VCN,
1851 .major = 4,
1852 .minor = 0,
1853 .rev = 3,
1854 .funcs = &vcn_v4_0_3_ip_funcs,
1855 };
1856
1857 static const struct amdgpu_ras_err_status_reg_entry vcn_v4_0_3_ue_reg_list[] = {
1858 {AMDGPU_RAS_REG_ENTRY(VCN, 0, regVCN_UE_ERR_STATUS_LO_VIDD, regVCN_UE_ERR_STATUS_HI_VIDD),
1859 1, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "VIDD"},
1860 {AMDGPU_RAS_REG_ENTRY(VCN, 0, regVCN_UE_ERR_STATUS_LO_VIDV, regVCN_UE_ERR_STATUS_HI_VIDV),
1861 1, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "VIDV"},
1862 };
1863
vcn_v4_0_3_inst_query_ras_error_count(struct amdgpu_device * adev,uint32_t vcn_inst,void * ras_err_status)1864 static void vcn_v4_0_3_inst_query_ras_error_count(struct amdgpu_device *adev,
1865 uint32_t vcn_inst,
1866 void *ras_err_status)
1867 {
1868 struct ras_err_data *err_data = (struct ras_err_data *)ras_err_status;
1869
1870 /* vcn v4_0_3 only support query uncorrectable errors */
1871 amdgpu_ras_inst_query_ras_error_count(adev,
1872 vcn_v4_0_3_ue_reg_list,
1873 ARRAY_SIZE(vcn_v4_0_3_ue_reg_list),
1874 NULL, 0, GET_INST(VCN, vcn_inst),
1875 AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE,
1876 &err_data->ue_count);
1877 }
1878
vcn_v4_0_3_query_ras_error_count(struct amdgpu_device * adev,void * ras_err_status)1879 static void vcn_v4_0_3_query_ras_error_count(struct amdgpu_device *adev,
1880 void *ras_err_status)
1881 {
1882 uint32_t i;
1883
1884 if (!amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__VCN)) {
1885 dev_warn(adev->dev, "VCN RAS is not supported\n");
1886 return;
1887 }
1888
1889 for (i = 0; i < adev->vcn.num_vcn_inst; i++)
1890 vcn_v4_0_3_inst_query_ras_error_count(adev, i, ras_err_status);
1891 }
1892
vcn_v4_0_3_inst_reset_ras_error_count(struct amdgpu_device * adev,uint32_t vcn_inst)1893 static void vcn_v4_0_3_inst_reset_ras_error_count(struct amdgpu_device *adev,
1894 uint32_t vcn_inst)
1895 {
1896 amdgpu_ras_inst_reset_ras_error_count(adev,
1897 vcn_v4_0_3_ue_reg_list,
1898 ARRAY_SIZE(vcn_v4_0_3_ue_reg_list),
1899 GET_INST(VCN, vcn_inst));
1900 }
1901
vcn_v4_0_3_reset_ras_error_count(struct amdgpu_device * adev)1902 static void vcn_v4_0_3_reset_ras_error_count(struct amdgpu_device *adev)
1903 {
1904 uint32_t i;
1905
1906 if (!amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__VCN)) {
1907 dev_warn(adev->dev, "VCN RAS is not supported\n");
1908 return;
1909 }
1910
1911 for (i = 0; i < adev->vcn.num_vcn_inst; i++)
1912 vcn_v4_0_3_inst_reset_ras_error_count(adev, i);
1913 }
1914
1915 static const struct amdgpu_ras_block_hw_ops vcn_v4_0_3_ras_hw_ops = {
1916 .query_ras_error_count = vcn_v4_0_3_query_ras_error_count,
1917 .reset_ras_error_count = vcn_v4_0_3_reset_ras_error_count,
1918 };
1919
vcn_v4_0_3_aca_bank_parser(struct aca_handle * handle,struct aca_bank * bank,enum aca_smu_type type,void * data)1920 static int vcn_v4_0_3_aca_bank_parser(struct aca_handle *handle, struct aca_bank *bank,
1921 enum aca_smu_type type, void *data)
1922 {
1923 struct aca_bank_info info;
1924 u64 misc0;
1925 int ret;
1926
1927 ret = aca_bank_info_decode(bank, &info);
1928 if (ret)
1929 return ret;
1930
1931 misc0 = bank->regs[ACA_REG_IDX_MISC0];
1932 switch (type) {
1933 case ACA_SMU_TYPE_UE:
1934 ret = aca_error_cache_log_bank_error(handle, &info, ACA_ERROR_TYPE_UE,
1935 1ULL);
1936 break;
1937 case ACA_SMU_TYPE_CE:
1938 ret = aca_error_cache_log_bank_error(handle, &info, ACA_ERROR_TYPE_CE,
1939 ACA_REG__MISC0__ERRCNT(misc0));
1940 break;
1941 default:
1942 return -EINVAL;
1943 }
1944
1945 return ret;
1946 }
1947
1948 /* reference to smu driver if header file */
1949 static int vcn_v4_0_3_err_codes[] = {
1950 14, 15, /* VCN */
1951 };
1952
vcn_v4_0_3_aca_bank_is_valid(struct aca_handle * handle,struct aca_bank * bank,enum aca_smu_type type,void * data)1953 static bool vcn_v4_0_3_aca_bank_is_valid(struct aca_handle *handle, struct aca_bank *bank,
1954 enum aca_smu_type type, void *data)
1955 {
1956 u32 instlo;
1957
1958 instlo = ACA_REG__IPID__INSTANCEIDLO(bank->regs[ACA_REG_IDX_IPID]);
1959 instlo &= GENMASK(31, 1);
1960
1961 if (instlo != mmSMNAID_AID0_MCA_SMU)
1962 return false;
1963
1964 if (aca_bank_check_error_codes(handle->adev, bank,
1965 vcn_v4_0_3_err_codes,
1966 ARRAY_SIZE(vcn_v4_0_3_err_codes)))
1967 return false;
1968
1969 return true;
1970 }
1971
1972 static const struct aca_bank_ops vcn_v4_0_3_aca_bank_ops = {
1973 .aca_bank_parser = vcn_v4_0_3_aca_bank_parser,
1974 .aca_bank_is_valid = vcn_v4_0_3_aca_bank_is_valid,
1975 };
1976
1977 static const struct aca_info vcn_v4_0_3_aca_info = {
1978 .hwip = ACA_HWIP_TYPE_SMU,
1979 .mask = ACA_ERROR_UE_MASK,
1980 .bank_ops = &vcn_v4_0_3_aca_bank_ops,
1981 };
1982
vcn_v4_0_3_ras_late_init(struct amdgpu_device * adev,struct ras_common_if * ras_block)1983 static int vcn_v4_0_3_ras_late_init(struct amdgpu_device *adev, struct ras_common_if *ras_block)
1984 {
1985 int r;
1986
1987 r = amdgpu_ras_block_late_init(adev, ras_block);
1988 if (r)
1989 return r;
1990
1991 r = amdgpu_ras_bind_aca(adev, AMDGPU_RAS_BLOCK__VCN,
1992 &vcn_v4_0_3_aca_info, NULL);
1993 if (r)
1994 goto late_fini;
1995
1996 return 0;
1997
1998 late_fini:
1999 amdgpu_ras_block_late_fini(adev, ras_block);
2000
2001 return r;
2002 }
2003
2004 static struct amdgpu_vcn_ras vcn_v4_0_3_ras = {
2005 .ras_block = {
2006 .hw_ops = &vcn_v4_0_3_ras_hw_ops,
2007 .ras_late_init = vcn_v4_0_3_ras_late_init,
2008 },
2009 };
2010
vcn_v4_0_3_set_ras_funcs(struct amdgpu_device * adev)2011 static void vcn_v4_0_3_set_ras_funcs(struct amdgpu_device *adev)
2012 {
2013 adev->vcn.ras = &vcn_v4_0_3_ras;
2014 }
2015
vcn_v4_0_3_enable_ras(struct amdgpu_device * adev,int inst_idx,bool indirect)2016 static void vcn_v4_0_3_enable_ras(struct amdgpu_device *adev,
2017 int inst_idx, bool indirect)
2018 {
2019 uint32_t tmp;
2020
2021 if (!amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__VCN))
2022 return;
2023
2024 tmp = VCN_RAS_CNTL__VCPU_VCODEC_REARM_MASK |
2025 VCN_RAS_CNTL__VCPU_VCODEC_IH_EN_MASK |
2026 VCN_RAS_CNTL__VCPU_VCODEC_PMI_EN_MASK |
2027 VCN_RAS_CNTL__VCPU_VCODEC_STALL_EN_MASK;
2028 WREG32_SOC15_DPG_MODE(inst_idx,
2029 SOC15_DPG_MODE_OFFSET(VCN, 0, regVCN_RAS_CNTL),
2030 tmp, 0, indirect);
2031
2032 tmp = UVD_VCPU_INT_EN2__RASCNTL_VCPU_VCODEC_EN_MASK;
2033 WREG32_SOC15_DPG_MODE(inst_idx,
2034 SOC15_DPG_MODE_OFFSET(VCN, 0, regUVD_VCPU_INT_EN2),
2035 tmp, 0, indirect);
2036
2037 tmp = UVD_SYS_INT_EN__RASCNTL_VCPU_VCODEC_EN_MASK;
2038 WREG32_SOC15_DPG_MODE(inst_idx,
2039 SOC15_DPG_MODE_OFFSET(VCN, 0, regUVD_SYS_INT_EN),
2040 tmp, 0, indirect);
2041 }
2042