1 /*
2 * Copyright 2016 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23
24 #include <linux/firmware.h>
25
26 #include "amdgpu.h"
27 #include "amdgpu_cs.h"
28 #include "amdgpu_vcn.h"
29 #include "amdgpu_pm.h"
30 #include "soc15.h"
31 #include "soc15d.h"
32 #include "soc15_common.h"
33
34 #include "vcn/vcn_1_0_offset.h"
35 #include "vcn/vcn_1_0_sh_mask.h"
36 #include "mmhub/mmhub_9_1_offset.h"
37 #include "mmhub/mmhub_9_1_sh_mask.h"
38
39 #include "ivsrcid/vcn/irqsrcs_vcn_1_0.h"
40 #include "jpeg_v1_0.h"
41 #include "vcn_v1_0.h"
42
43 #define mmUVD_RBC_XX_IB_REG_CHECK_1_0 0x05ab
44 #define mmUVD_RBC_XX_IB_REG_CHECK_1_0_BASE_IDX 1
45 #define mmUVD_REG_XX_MASK_1_0 0x05ac
46 #define mmUVD_REG_XX_MASK_1_0_BASE_IDX 1
47
48 static const struct amdgpu_hwip_reg_entry vcn_reg_list_1_0[] = {
49 SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_POWER_STATUS),
50 SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_STATUS),
51 SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_CONTEXT_ID),
52 SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_CONTEXT_ID2),
53 SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_GPCOM_VCPU_DATA0),
54 SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_GPCOM_VCPU_DATA1),
55 SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_GPCOM_VCPU_CMD),
56 SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_BASE_HI),
57 SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_BASE_LO),
58 SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_BASE_HI2),
59 SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_BASE_LO2),
60 SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_BASE_HI3),
61 SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_BASE_LO3),
62 SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_BASE_HI4),
63 SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_BASE_LO4),
64 SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_RPTR),
65 SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_WPTR),
66 SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_RPTR2),
67 SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_WPTR2),
68 SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_RPTR3),
69 SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_WPTR3),
70 SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_RPTR4),
71 SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_WPTR4),
72 SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_SIZE),
73 SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_SIZE2),
74 SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_SIZE3),
75 SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_SIZE4),
76 SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_PGFSM_CONFIG),
77 SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_PGFSM_STATUS),
78 SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_DPG_LMA_CTL),
79 SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_DPG_LMA_DATA),
80 SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_DPG_LMA_MASK),
81 SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_DPG_PAUSE)
82 };
83
84 static int vcn_v1_0_stop(struct amdgpu_vcn_inst *vinst);
85 static void vcn_v1_0_set_dec_ring_funcs(struct amdgpu_device *adev);
86 static void vcn_v1_0_set_enc_ring_funcs(struct amdgpu_device *adev);
87 static void vcn_v1_0_set_irq_funcs(struct amdgpu_device *adev);
88 static int vcn_v1_0_set_pg_state(struct amdgpu_vcn_inst *vinst,
89 enum amd_powergating_state state);
90 static int vcn_v1_0_pause_dpg_mode(struct amdgpu_vcn_inst *vinst,
91 struct dpg_pause_state *new_state);
92
93 static void vcn_v1_0_idle_work_handler(struct work_struct *work);
94 static void vcn_v1_0_ring_begin_use(struct amdgpu_ring *ring);
95
96 /**
97 * vcn_v1_0_early_init - set function pointers and load microcode
98 *
99 * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
100 *
101 * Set ring and irq function pointers
102 * Load microcode from filesystem
103 */
vcn_v1_0_early_init(struct amdgpu_ip_block * ip_block)104 static int vcn_v1_0_early_init(struct amdgpu_ip_block *ip_block)
105 {
106 struct amdgpu_device *adev = ip_block->adev;
107
108 adev->vcn.inst[0].num_enc_rings = 2;
109 adev->vcn.inst[0].set_pg_state = vcn_v1_0_set_pg_state;
110
111 vcn_v1_0_set_dec_ring_funcs(adev);
112 vcn_v1_0_set_enc_ring_funcs(adev);
113 vcn_v1_0_set_irq_funcs(adev);
114
115 jpeg_v1_0_early_init(ip_block);
116
117 return amdgpu_vcn_early_init(adev, 0);
118 }
119
120 /**
121 * vcn_v1_0_sw_init - sw init for VCN block
122 *
123 * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
124 *
125 * Load firmware and sw initialization
126 */
vcn_v1_0_sw_init(struct amdgpu_ip_block * ip_block)127 static int vcn_v1_0_sw_init(struct amdgpu_ip_block *ip_block)
128 {
129 struct amdgpu_ring *ring;
130 int i, r;
131 uint32_t reg_count = ARRAY_SIZE(vcn_reg_list_1_0);
132 uint32_t *ptr;
133 struct amdgpu_device *adev = ip_block->adev;
134
135 /* VCN DEC TRAP */
136 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VCN,
137 VCN_1_0__SRCID__UVD_SYSTEM_MESSAGE_INTERRUPT, &adev->vcn.inst->irq);
138 if (r)
139 return r;
140
141 /* VCN ENC TRAP */
142 for (i = 0; i < adev->vcn.inst[0].num_enc_rings; ++i) {
143 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VCN, i + VCN_1_0__SRCID__UVD_ENC_GENERAL_PURPOSE,
144 &adev->vcn.inst->irq);
145 if (r)
146 return r;
147 }
148
149 r = amdgpu_vcn_sw_init(adev, 0);
150 if (r)
151 return r;
152
153 /* Override the work func */
154 adev->vcn.inst[0].idle_work.work.func = vcn_v1_0_idle_work_handler;
155
156 amdgpu_vcn_setup_ucode(adev, 0);
157
158 r = amdgpu_vcn_resume(adev, 0);
159 if (r)
160 return r;
161
162 ring = &adev->vcn.inst->ring_dec;
163 ring->vm_hub = AMDGPU_MMHUB0(0);
164 sprintf(ring->name, "vcn_dec");
165 r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.inst->irq, 0,
166 AMDGPU_RING_PRIO_DEFAULT, NULL);
167 if (r)
168 return r;
169
170 adev->vcn.inst[0].internal.scratch9 = adev->vcn.inst->external.scratch9 =
171 SOC15_REG_OFFSET(UVD, 0, mmUVD_SCRATCH9);
172 adev->vcn.inst[0].internal.data0 = adev->vcn.inst->external.data0 =
173 SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_DATA0);
174 adev->vcn.inst[0].internal.data1 = adev->vcn.inst->external.data1 =
175 SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_DATA1);
176 adev->vcn.inst[0].internal.cmd = adev->vcn.inst->external.cmd =
177 SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_CMD);
178 adev->vcn.inst[0].internal.nop = adev->vcn.inst->external.nop =
179 SOC15_REG_OFFSET(UVD, 0, mmUVD_NO_OP);
180
181 for (i = 0; i < adev->vcn.inst[0].num_enc_rings; ++i) {
182 enum amdgpu_ring_priority_level hw_prio = amdgpu_vcn_get_enc_ring_prio(i);
183
184 ring = &adev->vcn.inst->ring_enc[i];
185 ring->vm_hub = AMDGPU_MMHUB0(0);
186 sprintf(ring->name, "vcn_enc%d", i);
187 r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.inst->irq, 0,
188 hw_prio, NULL);
189 if (r)
190 return r;
191 }
192
193 adev->vcn.inst[0].pause_dpg_mode = vcn_v1_0_pause_dpg_mode;
194
195 if (amdgpu_vcnfw_log) {
196 volatile struct amdgpu_fw_shared *fw_shared = adev->vcn.inst->fw_shared.cpu_addr;
197
198 fw_shared->present_flag_0 = 0;
199 amdgpu_vcn_fwlog_init(adev->vcn.inst);
200 }
201
202 r = jpeg_v1_0_sw_init(ip_block);
203
204 /* Allocate memory for VCN IP Dump buffer */
205 ptr = kcalloc(adev->vcn.num_vcn_inst * reg_count, sizeof(uint32_t), GFP_KERNEL);
206 if (!ptr) {
207 DRM_ERROR("Failed to allocate memory for VCN IP Dump\n");
208 adev->vcn.ip_dump = NULL;
209 } else {
210 adev->vcn.ip_dump = ptr;
211 }
212 return r;
213 }
214
215 /**
216 * vcn_v1_0_sw_fini - sw fini for VCN block
217 *
218 * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
219 *
220 * VCN suspend and free up sw allocation
221 */
vcn_v1_0_sw_fini(struct amdgpu_ip_block * ip_block)222 static int vcn_v1_0_sw_fini(struct amdgpu_ip_block *ip_block)
223 {
224 int r;
225 struct amdgpu_device *adev = ip_block->adev;
226
227 r = amdgpu_vcn_suspend(adev, 0);
228 if (r)
229 return r;
230
231 jpeg_v1_0_sw_fini(ip_block);
232
233 r = amdgpu_vcn_sw_fini(adev, 0);
234
235 kfree(adev->vcn.ip_dump);
236
237 return r;
238 }
239
240 /**
241 * vcn_v1_0_hw_init - start and test VCN block
242 *
243 * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
244 *
245 * Initialize the hardware, boot up the VCPU and do some testing
246 */
vcn_v1_0_hw_init(struct amdgpu_ip_block * ip_block)247 static int vcn_v1_0_hw_init(struct amdgpu_ip_block *ip_block)
248 {
249 struct amdgpu_device *adev = ip_block->adev;
250 struct amdgpu_ring *ring = &adev->vcn.inst->ring_dec;
251 int i, r;
252
253 r = amdgpu_ring_test_helper(ring);
254 if (r)
255 return r;
256
257 for (i = 0; i < adev->vcn.inst[0].num_enc_rings; ++i) {
258 ring = &adev->vcn.inst->ring_enc[i];
259 r = amdgpu_ring_test_helper(ring);
260 if (r)
261 return r;
262 }
263
264 ring = adev->jpeg.inst->ring_dec;
265 r = amdgpu_ring_test_helper(ring);
266
267 return r;
268 }
269
270 /**
271 * vcn_v1_0_hw_fini - stop the hardware block
272 *
273 * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
274 *
275 * Stop the VCN block, mark ring as not ready any more
276 */
vcn_v1_0_hw_fini(struct amdgpu_ip_block * ip_block)277 static int vcn_v1_0_hw_fini(struct amdgpu_ip_block *ip_block)
278 {
279 struct amdgpu_device *adev = ip_block->adev;
280 struct amdgpu_vcn_inst *vinst = adev->vcn.inst;
281
282 cancel_delayed_work_sync(&vinst->idle_work);
283
284 if ((adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) ||
285 (vinst->cur_state != AMD_PG_STATE_GATE &&
286 RREG32_SOC15(VCN, 0, mmUVD_STATUS))) {
287 vinst->set_pg_state(vinst, AMD_PG_STATE_GATE);
288 }
289
290 return 0;
291 }
292
293 /**
294 * vcn_v1_0_suspend - suspend VCN block
295 *
296 * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
297 *
298 * HW fini and suspend VCN block
299 */
vcn_v1_0_suspend(struct amdgpu_ip_block * ip_block)300 static int vcn_v1_0_suspend(struct amdgpu_ip_block *ip_block)
301 {
302 int r;
303 struct amdgpu_device *adev = ip_block->adev;
304 bool idle_work_unexecuted;
305
306 idle_work_unexecuted = cancel_delayed_work_sync(&adev->vcn.inst[0].idle_work);
307 if (idle_work_unexecuted) {
308 if (adev->pm.dpm_enabled)
309 amdgpu_dpm_enable_vcn(adev, false, 0);
310 }
311
312 r = vcn_v1_0_hw_fini(ip_block);
313 if (r)
314 return r;
315
316 r = amdgpu_vcn_suspend(adev, 0);
317
318 return r;
319 }
320
321 /**
322 * vcn_v1_0_resume - resume VCN block
323 *
324 * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
325 *
326 * Resume firmware and hw init VCN block
327 */
vcn_v1_0_resume(struct amdgpu_ip_block * ip_block)328 static int vcn_v1_0_resume(struct amdgpu_ip_block *ip_block)
329 {
330 int r;
331
332 r = amdgpu_vcn_resume(ip_block->adev, 0);
333 if (r)
334 return r;
335
336 r = vcn_v1_0_hw_init(ip_block);
337
338 return r;
339 }
340
341 /**
342 * vcn_v1_0_mc_resume_spg_mode - memory controller programming
343 *
344 * @vinst: VCN instance
345 *
346 * Let the VCN memory controller know it's offsets
347 */
vcn_v1_0_mc_resume_spg_mode(struct amdgpu_vcn_inst * vinst)348 static void vcn_v1_0_mc_resume_spg_mode(struct amdgpu_vcn_inst *vinst)
349 {
350 struct amdgpu_device *adev = vinst->adev;
351 uint32_t size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.inst[0].fw->size + 4);
352 uint32_t offset;
353
354 /* cache window 0: fw */
355 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
356 WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW,
357 (adev->firmware.ucode[AMDGPU_UCODE_ID_VCN].tmr_mc_addr_lo));
358 WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH,
359 (adev->firmware.ucode[AMDGPU_UCODE_ID_VCN].tmr_mc_addr_hi));
360 WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_OFFSET0, 0);
361 offset = 0;
362 } else {
363 WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW,
364 lower_32_bits(adev->vcn.inst->gpu_addr));
365 WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH,
366 upper_32_bits(adev->vcn.inst->gpu_addr));
367 offset = size;
368 WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_OFFSET0,
369 AMDGPU_UVD_FIRMWARE_OFFSET >> 3);
370 }
371
372 WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_SIZE0, size);
373
374 /* cache window 1: stack */
375 WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW,
376 lower_32_bits(adev->vcn.inst->gpu_addr + offset));
377 WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH,
378 upper_32_bits(adev->vcn.inst->gpu_addr + offset));
379 WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_OFFSET1, 0);
380 WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_SIZE1, AMDGPU_VCN_STACK_SIZE);
381
382 /* cache window 2: context */
383 WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW,
384 lower_32_bits(adev->vcn.inst->gpu_addr + offset + AMDGPU_VCN_STACK_SIZE));
385 WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH,
386 upper_32_bits(adev->vcn.inst->gpu_addr + offset + AMDGPU_VCN_STACK_SIZE));
387 WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_OFFSET2, 0);
388 WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_SIZE2, AMDGPU_VCN_CONTEXT_SIZE);
389
390 WREG32_SOC15(UVD, 0, mmUVD_UDEC_ADDR_CONFIG,
391 adev->gfx.config.gb_addr_config);
392 WREG32_SOC15(UVD, 0, mmUVD_UDEC_DB_ADDR_CONFIG,
393 adev->gfx.config.gb_addr_config);
394 WREG32_SOC15(UVD, 0, mmUVD_UDEC_DBW_ADDR_CONFIG,
395 adev->gfx.config.gb_addr_config);
396 WREG32_SOC15(UVD, 0, mmUVD_UDEC_DBW_UV_ADDR_CONFIG,
397 adev->gfx.config.gb_addr_config);
398 WREG32_SOC15(UVD, 0, mmUVD_MIF_CURR_ADDR_CONFIG,
399 adev->gfx.config.gb_addr_config);
400 WREG32_SOC15(UVD, 0, mmUVD_MIF_CURR_UV_ADDR_CONFIG,
401 adev->gfx.config.gb_addr_config);
402 WREG32_SOC15(UVD, 0, mmUVD_MIF_RECON1_ADDR_CONFIG,
403 adev->gfx.config.gb_addr_config);
404 WREG32_SOC15(UVD, 0, mmUVD_MIF_RECON1_UV_ADDR_CONFIG,
405 adev->gfx.config.gb_addr_config);
406 WREG32_SOC15(UVD, 0, mmUVD_MIF_REF_ADDR_CONFIG,
407 adev->gfx.config.gb_addr_config);
408 WREG32_SOC15(UVD, 0, mmUVD_MIF_REF_UV_ADDR_CONFIG,
409 adev->gfx.config.gb_addr_config);
410 WREG32_SOC15(UVD, 0, mmUVD_JPEG_ADDR_CONFIG,
411 adev->gfx.config.gb_addr_config);
412 WREG32_SOC15(UVD, 0, mmUVD_JPEG_UV_ADDR_CONFIG,
413 adev->gfx.config.gb_addr_config);
414 }
415
vcn_v1_0_mc_resume_dpg_mode(struct amdgpu_vcn_inst * vinst)416 static void vcn_v1_0_mc_resume_dpg_mode(struct amdgpu_vcn_inst *vinst)
417 {
418 struct amdgpu_device *adev = vinst->adev;
419 uint32_t size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.inst[0].fw->size + 4);
420 uint32_t offset;
421
422 /* cache window 0: fw */
423 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
424 WREG32_SOC15_DPG_MODE_1_0(UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW,
425 (adev->firmware.ucode[AMDGPU_UCODE_ID_VCN].tmr_mc_addr_lo),
426 0xFFFFFFFF, 0);
427 WREG32_SOC15_DPG_MODE_1_0(UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH,
428 (adev->firmware.ucode[AMDGPU_UCODE_ID_VCN].tmr_mc_addr_hi),
429 0xFFFFFFFF, 0);
430 WREG32_SOC15_DPG_MODE_1_0(UVD, 0, mmUVD_VCPU_CACHE_OFFSET0, 0,
431 0xFFFFFFFF, 0);
432 offset = 0;
433 } else {
434 WREG32_SOC15_DPG_MODE_1_0(UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW,
435 lower_32_bits(adev->vcn.inst->gpu_addr), 0xFFFFFFFF, 0);
436 WREG32_SOC15_DPG_MODE_1_0(UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH,
437 upper_32_bits(adev->vcn.inst->gpu_addr), 0xFFFFFFFF, 0);
438 offset = size;
439 WREG32_SOC15_DPG_MODE_1_0(UVD, 0, mmUVD_VCPU_CACHE_OFFSET0,
440 AMDGPU_UVD_FIRMWARE_OFFSET >> 3, 0xFFFFFFFF, 0);
441 }
442
443 WREG32_SOC15_DPG_MODE_1_0(UVD, 0, mmUVD_VCPU_CACHE_SIZE0, size, 0xFFFFFFFF, 0);
444
445 /* cache window 1: stack */
446 WREG32_SOC15_DPG_MODE_1_0(UVD, 0, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW,
447 lower_32_bits(adev->vcn.inst->gpu_addr + offset), 0xFFFFFFFF, 0);
448 WREG32_SOC15_DPG_MODE_1_0(UVD, 0, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH,
449 upper_32_bits(adev->vcn.inst->gpu_addr + offset), 0xFFFFFFFF, 0);
450 WREG32_SOC15_DPG_MODE_1_0(UVD, 0, mmUVD_VCPU_CACHE_OFFSET1, 0,
451 0xFFFFFFFF, 0);
452 WREG32_SOC15_DPG_MODE_1_0(UVD, 0, mmUVD_VCPU_CACHE_SIZE1, AMDGPU_VCN_STACK_SIZE,
453 0xFFFFFFFF, 0);
454
455 /* cache window 2: context */
456 WREG32_SOC15_DPG_MODE_1_0(UVD, 0, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW,
457 lower_32_bits(adev->vcn.inst->gpu_addr + offset + AMDGPU_VCN_STACK_SIZE),
458 0xFFFFFFFF, 0);
459 WREG32_SOC15_DPG_MODE_1_0(UVD, 0, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH,
460 upper_32_bits(adev->vcn.inst->gpu_addr + offset + AMDGPU_VCN_STACK_SIZE),
461 0xFFFFFFFF, 0);
462 WREG32_SOC15_DPG_MODE_1_0(UVD, 0, mmUVD_VCPU_CACHE_OFFSET2, 0, 0xFFFFFFFF, 0);
463 WREG32_SOC15_DPG_MODE_1_0(UVD, 0, mmUVD_VCPU_CACHE_SIZE2, AMDGPU_VCN_CONTEXT_SIZE,
464 0xFFFFFFFF, 0);
465
466 /* VCN global tiling registers */
467 WREG32_SOC15_DPG_MODE_1_0(UVD, 0, mmUVD_UDEC_ADDR_CONFIG,
468 adev->gfx.config.gb_addr_config, 0xFFFFFFFF, 0);
469 WREG32_SOC15_DPG_MODE_1_0(UVD, 0, mmUVD_UDEC_DB_ADDR_CONFIG,
470 adev->gfx.config.gb_addr_config, 0xFFFFFFFF, 0);
471 WREG32_SOC15_DPG_MODE_1_0(UVD, 0, mmUVD_UDEC_DBW_ADDR_CONFIG,
472 adev->gfx.config.gb_addr_config, 0xFFFFFFFF, 0);
473 WREG32_SOC15_DPG_MODE_1_0(UVD, 0, mmUVD_UDEC_DBW_UV_ADDR_CONFIG,
474 adev->gfx.config.gb_addr_config, 0xFFFFFFFF, 0);
475 WREG32_SOC15_DPG_MODE_1_0(UVD, 0, mmUVD_MIF_CURR_ADDR_CONFIG,
476 adev->gfx.config.gb_addr_config, 0xFFFFFFFF, 0);
477 WREG32_SOC15_DPG_MODE_1_0(UVD, 0, mmUVD_MIF_CURR_UV_ADDR_CONFIG,
478 adev->gfx.config.gb_addr_config, 0xFFFFFFFF, 0);
479 WREG32_SOC15_DPG_MODE_1_0(UVD, 0, mmUVD_MIF_RECON1_ADDR_CONFIG,
480 adev->gfx.config.gb_addr_config, 0xFFFFFFFF, 0);
481 WREG32_SOC15_DPG_MODE_1_0(UVD, 0, mmUVD_MIF_RECON1_UV_ADDR_CONFIG,
482 adev->gfx.config.gb_addr_config, 0xFFFFFFFF, 0);
483 WREG32_SOC15_DPG_MODE_1_0(UVD, 0, mmUVD_MIF_REF_ADDR_CONFIG,
484 adev->gfx.config.gb_addr_config, 0xFFFFFFFF, 0);
485 WREG32_SOC15_DPG_MODE_1_0(UVD, 0, mmUVD_MIF_REF_UV_ADDR_CONFIG,
486 adev->gfx.config.gb_addr_config, 0xFFFFFFFF, 0);
487 }
488
489 /**
490 * vcn_v1_0_disable_clock_gating - disable VCN clock gating
491 *
492 * @vinst: VCN instance
493 *
494 * Disable clock gating for VCN block
495 */
vcn_v1_0_disable_clock_gating(struct amdgpu_vcn_inst * vinst)496 static void vcn_v1_0_disable_clock_gating(struct amdgpu_vcn_inst *vinst)
497 {
498 struct amdgpu_device *adev = vinst->adev;
499 uint32_t data;
500
501 /* JPEG disable CGC */
502 data = RREG32_SOC15(VCN, 0, mmJPEG_CGC_CTRL);
503
504 if (adev->cg_flags & AMD_CG_SUPPORT_VCN_MGCG)
505 data |= 1 << JPEG_CGC_CTRL__DYN_CLOCK_MODE__SHIFT;
506 else
507 data &= ~JPEG_CGC_CTRL__DYN_CLOCK_MODE_MASK;
508
509 data |= 1 << JPEG_CGC_CTRL__CLK_GATE_DLY_TIMER__SHIFT;
510 data |= 4 << JPEG_CGC_CTRL__CLK_OFF_DELAY__SHIFT;
511 WREG32_SOC15(VCN, 0, mmJPEG_CGC_CTRL, data);
512
513 data = RREG32_SOC15(VCN, 0, mmJPEG_CGC_GATE);
514 data &= ~(JPEG_CGC_GATE__JPEG_MASK | JPEG_CGC_GATE__JPEG2_MASK);
515 WREG32_SOC15(VCN, 0, mmJPEG_CGC_GATE, data);
516
517 /* UVD disable CGC */
518 data = RREG32_SOC15(VCN, 0, mmUVD_CGC_CTRL);
519 if (adev->cg_flags & AMD_CG_SUPPORT_VCN_MGCG)
520 data |= 1 << UVD_CGC_CTRL__DYN_CLOCK_MODE__SHIFT;
521 else
522 data &= ~UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK;
523
524 data |= 1 << UVD_CGC_CTRL__CLK_GATE_DLY_TIMER__SHIFT;
525 data |= 4 << UVD_CGC_CTRL__CLK_OFF_DELAY__SHIFT;
526 WREG32_SOC15(VCN, 0, mmUVD_CGC_CTRL, data);
527
528 data = RREG32_SOC15(VCN, 0, mmUVD_CGC_GATE);
529 data &= ~(UVD_CGC_GATE__SYS_MASK
530 | UVD_CGC_GATE__UDEC_MASK
531 | UVD_CGC_GATE__MPEG2_MASK
532 | UVD_CGC_GATE__REGS_MASK
533 | UVD_CGC_GATE__RBC_MASK
534 | UVD_CGC_GATE__LMI_MC_MASK
535 | UVD_CGC_GATE__LMI_UMC_MASK
536 | UVD_CGC_GATE__IDCT_MASK
537 | UVD_CGC_GATE__MPRD_MASK
538 | UVD_CGC_GATE__MPC_MASK
539 | UVD_CGC_GATE__LBSI_MASK
540 | UVD_CGC_GATE__LRBBM_MASK
541 | UVD_CGC_GATE__UDEC_RE_MASK
542 | UVD_CGC_GATE__UDEC_CM_MASK
543 | UVD_CGC_GATE__UDEC_IT_MASK
544 | UVD_CGC_GATE__UDEC_DB_MASK
545 | UVD_CGC_GATE__UDEC_MP_MASK
546 | UVD_CGC_GATE__WCB_MASK
547 | UVD_CGC_GATE__VCPU_MASK
548 | UVD_CGC_GATE__SCPU_MASK);
549 WREG32_SOC15(VCN, 0, mmUVD_CGC_GATE, data);
550
551 data = RREG32_SOC15(VCN, 0, mmUVD_CGC_CTRL);
552 data &= ~(UVD_CGC_CTRL__UDEC_RE_MODE_MASK
553 | UVD_CGC_CTRL__UDEC_CM_MODE_MASK
554 | UVD_CGC_CTRL__UDEC_IT_MODE_MASK
555 | UVD_CGC_CTRL__UDEC_DB_MODE_MASK
556 | UVD_CGC_CTRL__UDEC_MP_MODE_MASK
557 | UVD_CGC_CTRL__SYS_MODE_MASK
558 | UVD_CGC_CTRL__UDEC_MODE_MASK
559 | UVD_CGC_CTRL__MPEG2_MODE_MASK
560 | UVD_CGC_CTRL__REGS_MODE_MASK
561 | UVD_CGC_CTRL__RBC_MODE_MASK
562 | UVD_CGC_CTRL__LMI_MC_MODE_MASK
563 | UVD_CGC_CTRL__LMI_UMC_MODE_MASK
564 | UVD_CGC_CTRL__IDCT_MODE_MASK
565 | UVD_CGC_CTRL__MPRD_MODE_MASK
566 | UVD_CGC_CTRL__MPC_MODE_MASK
567 | UVD_CGC_CTRL__LBSI_MODE_MASK
568 | UVD_CGC_CTRL__LRBBM_MODE_MASK
569 | UVD_CGC_CTRL__WCB_MODE_MASK
570 | UVD_CGC_CTRL__VCPU_MODE_MASK
571 | UVD_CGC_CTRL__SCPU_MODE_MASK);
572 WREG32_SOC15(VCN, 0, mmUVD_CGC_CTRL, data);
573
574 /* turn on */
575 data = RREG32_SOC15(VCN, 0, mmUVD_SUVD_CGC_GATE);
576 data |= (UVD_SUVD_CGC_GATE__SRE_MASK
577 | UVD_SUVD_CGC_GATE__SIT_MASK
578 | UVD_SUVD_CGC_GATE__SMP_MASK
579 | UVD_SUVD_CGC_GATE__SCM_MASK
580 | UVD_SUVD_CGC_GATE__SDB_MASK
581 | UVD_SUVD_CGC_GATE__SRE_H264_MASK
582 | UVD_SUVD_CGC_GATE__SRE_HEVC_MASK
583 | UVD_SUVD_CGC_GATE__SIT_H264_MASK
584 | UVD_SUVD_CGC_GATE__SIT_HEVC_MASK
585 | UVD_SUVD_CGC_GATE__SCM_H264_MASK
586 | UVD_SUVD_CGC_GATE__SCM_HEVC_MASK
587 | UVD_SUVD_CGC_GATE__SDB_H264_MASK
588 | UVD_SUVD_CGC_GATE__SDB_HEVC_MASK
589 | UVD_SUVD_CGC_GATE__SCLR_MASK
590 | UVD_SUVD_CGC_GATE__UVD_SC_MASK
591 | UVD_SUVD_CGC_GATE__ENT_MASK
592 | UVD_SUVD_CGC_GATE__SIT_HEVC_DEC_MASK
593 | UVD_SUVD_CGC_GATE__SIT_HEVC_ENC_MASK
594 | UVD_SUVD_CGC_GATE__SITE_MASK
595 | UVD_SUVD_CGC_GATE__SRE_VP9_MASK
596 | UVD_SUVD_CGC_GATE__SCM_VP9_MASK
597 | UVD_SUVD_CGC_GATE__SIT_VP9_DEC_MASK
598 | UVD_SUVD_CGC_GATE__SDB_VP9_MASK
599 | UVD_SUVD_CGC_GATE__IME_HEVC_MASK);
600 WREG32_SOC15(VCN, 0, mmUVD_SUVD_CGC_GATE, data);
601
602 data = RREG32_SOC15(VCN, 0, mmUVD_SUVD_CGC_CTRL);
603 data &= ~(UVD_SUVD_CGC_CTRL__SRE_MODE_MASK
604 | UVD_SUVD_CGC_CTRL__SIT_MODE_MASK
605 | UVD_SUVD_CGC_CTRL__SMP_MODE_MASK
606 | UVD_SUVD_CGC_CTRL__SCM_MODE_MASK
607 | UVD_SUVD_CGC_CTRL__SDB_MODE_MASK
608 | UVD_SUVD_CGC_CTRL__SCLR_MODE_MASK
609 | UVD_SUVD_CGC_CTRL__UVD_SC_MODE_MASK
610 | UVD_SUVD_CGC_CTRL__ENT_MODE_MASK
611 | UVD_SUVD_CGC_CTRL__IME_MODE_MASK
612 | UVD_SUVD_CGC_CTRL__SITE_MODE_MASK);
613 WREG32_SOC15(VCN, 0, mmUVD_SUVD_CGC_CTRL, data);
614 }
615
616 /**
617 * vcn_v1_0_enable_clock_gating - enable VCN clock gating
618 *
619 * @vinst: Pointer to the VCN instance structure
620 *
621 * Enable clock gating for VCN block
622 */
vcn_v1_0_enable_clock_gating(struct amdgpu_vcn_inst * vinst)623 static void vcn_v1_0_enable_clock_gating(struct amdgpu_vcn_inst *vinst)
624 {
625 struct amdgpu_device *adev = vinst->adev;
626 uint32_t data = 0;
627
628 /* enable JPEG CGC */
629 data = RREG32_SOC15(VCN, 0, mmJPEG_CGC_CTRL);
630 if (adev->cg_flags & AMD_CG_SUPPORT_VCN_MGCG)
631 data |= 1 << JPEG_CGC_CTRL__DYN_CLOCK_MODE__SHIFT;
632 else
633 data |= 0 << JPEG_CGC_CTRL__DYN_CLOCK_MODE__SHIFT;
634 data |= 1 << JPEG_CGC_CTRL__CLK_GATE_DLY_TIMER__SHIFT;
635 data |= 4 << JPEG_CGC_CTRL__CLK_OFF_DELAY__SHIFT;
636 WREG32_SOC15(VCN, 0, mmJPEG_CGC_CTRL, data);
637
638 data = RREG32_SOC15(VCN, 0, mmJPEG_CGC_GATE);
639 data |= (JPEG_CGC_GATE__JPEG_MASK | JPEG_CGC_GATE__JPEG2_MASK);
640 WREG32_SOC15(VCN, 0, mmJPEG_CGC_GATE, data);
641
642 /* enable UVD CGC */
643 data = RREG32_SOC15(VCN, 0, mmUVD_CGC_CTRL);
644 if (adev->cg_flags & AMD_CG_SUPPORT_VCN_MGCG)
645 data |= 1 << UVD_CGC_CTRL__DYN_CLOCK_MODE__SHIFT;
646 else
647 data |= 0 << UVD_CGC_CTRL__DYN_CLOCK_MODE__SHIFT;
648 data |= 1 << UVD_CGC_CTRL__CLK_GATE_DLY_TIMER__SHIFT;
649 data |= 4 << UVD_CGC_CTRL__CLK_OFF_DELAY__SHIFT;
650 WREG32_SOC15(VCN, 0, mmUVD_CGC_CTRL, data);
651
652 data = RREG32_SOC15(VCN, 0, mmUVD_CGC_CTRL);
653 data |= (UVD_CGC_CTRL__UDEC_RE_MODE_MASK
654 | UVD_CGC_CTRL__UDEC_CM_MODE_MASK
655 | UVD_CGC_CTRL__UDEC_IT_MODE_MASK
656 | UVD_CGC_CTRL__UDEC_DB_MODE_MASK
657 | UVD_CGC_CTRL__UDEC_MP_MODE_MASK
658 | UVD_CGC_CTRL__SYS_MODE_MASK
659 | UVD_CGC_CTRL__UDEC_MODE_MASK
660 | UVD_CGC_CTRL__MPEG2_MODE_MASK
661 | UVD_CGC_CTRL__REGS_MODE_MASK
662 | UVD_CGC_CTRL__RBC_MODE_MASK
663 | UVD_CGC_CTRL__LMI_MC_MODE_MASK
664 | UVD_CGC_CTRL__LMI_UMC_MODE_MASK
665 | UVD_CGC_CTRL__IDCT_MODE_MASK
666 | UVD_CGC_CTRL__MPRD_MODE_MASK
667 | UVD_CGC_CTRL__MPC_MODE_MASK
668 | UVD_CGC_CTRL__LBSI_MODE_MASK
669 | UVD_CGC_CTRL__LRBBM_MODE_MASK
670 | UVD_CGC_CTRL__WCB_MODE_MASK
671 | UVD_CGC_CTRL__VCPU_MODE_MASK
672 | UVD_CGC_CTRL__SCPU_MODE_MASK);
673 WREG32_SOC15(VCN, 0, mmUVD_CGC_CTRL, data);
674
675 data = RREG32_SOC15(VCN, 0, mmUVD_SUVD_CGC_CTRL);
676 data |= (UVD_SUVD_CGC_CTRL__SRE_MODE_MASK
677 | UVD_SUVD_CGC_CTRL__SIT_MODE_MASK
678 | UVD_SUVD_CGC_CTRL__SMP_MODE_MASK
679 | UVD_SUVD_CGC_CTRL__SCM_MODE_MASK
680 | UVD_SUVD_CGC_CTRL__SDB_MODE_MASK
681 | UVD_SUVD_CGC_CTRL__SCLR_MODE_MASK
682 | UVD_SUVD_CGC_CTRL__UVD_SC_MODE_MASK
683 | UVD_SUVD_CGC_CTRL__ENT_MODE_MASK
684 | UVD_SUVD_CGC_CTRL__IME_MODE_MASK
685 | UVD_SUVD_CGC_CTRL__SITE_MODE_MASK);
686 WREG32_SOC15(VCN, 0, mmUVD_SUVD_CGC_CTRL, data);
687 }
688
vcn_v1_0_clock_gating_dpg_mode(struct amdgpu_vcn_inst * vinst,uint8_t sram_sel)689 static void vcn_v1_0_clock_gating_dpg_mode(struct amdgpu_vcn_inst *vinst,
690 uint8_t sram_sel)
691 {
692 struct amdgpu_device *adev = vinst->adev;
693 uint32_t reg_data = 0;
694
695 /* disable JPEG CGC */
696 if (adev->cg_flags & AMD_CG_SUPPORT_VCN_MGCG)
697 reg_data = 1 << JPEG_CGC_CTRL__DYN_CLOCK_MODE__SHIFT;
698 else
699 reg_data = 0 << JPEG_CGC_CTRL__DYN_CLOCK_MODE__SHIFT;
700 reg_data |= 1 << JPEG_CGC_CTRL__CLK_GATE_DLY_TIMER__SHIFT;
701 reg_data |= 4 << JPEG_CGC_CTRL__CLK_OFF_DELAY__SHIFT;
702 WREG32_SOC15_DPG_MODE_1_0(UVD, 0, mmJPEG_CGC_CTRL, reg_data, 0xFFFFFFFF, sram_sel);
703
704 WREG32_SOC15_DPG_MODE_1_0(UVD, 0, mmJPEG_CGC_GATE, 0, 0xFFFFFFFF, sram_sel);
705
706 /* enable sw clock gating control */
707 if (adev->cg_flags & AMD_CG_SUPPORT_VCN_MGCG)
708 reg_data = 1 << UVD_CGC_CTRL__DYN_CLOCK_MODE__SHIFT;
709 else
710 reg_data = 0 << UVD_CGC_CTRL__DYN_CLOCK_MODE__SHIFT;
711 reg_data |= 1 << UVD_CGC_CTRL__CLK_GATE_DLY_TIMER__SHIFT;
712 reg_data |= 4 << UVD_CGC_CTRL__CLK_OFF_DELAY__SHIFT;
713 reg_data &= ~(UVD_CGC_CTRL__UDEC_RE_MODE_MASK |
714 UVD_CGC_CTRL__UDEC_CM_MODE_MASK |
715 UVD_CGC_CTRL__UDEC_IT_MODE_MASK |
716 UVD_CGC_CTRL__UDEC_DB_MODE_MASK |
717 UVD_CGC_CTRL__UDEC_MP_MODE_MASK |
718 UVD_CGC_CTRL__SYS_MODE_MASK |
719 UVD_CGC_CTRL__UDEC_MODE_MASK |
720 UVD_CGC_CTRL__MPEG2_MODE_MASK |
721 UVD_CGC_CTRL__REGS_MODE_MASK |
722 UVD_CGC_CTRL__RBC_MODE_MASK |
723 UVD_CGC_CTRL__LMI_MC_MODE_MASK |
724 UVD_CGC_CTRL__LMI_UMC_MODE_MASK |
725 UVD_CGC_CTRL__IDCT_MODE_MASK |
726 UVD_CGC_CTRL__MPRD_MODE_MASK |
727 UVD_CGC_CTRL__MPC_MODE_MASK |
728 UVD_CGC_CTRL__LBSI_MODE_MASK |
729 UVD_CGC_CTRL__LRBBM_MODE_MASK |
730 UVD_CGC_CTRL__WCB_MODE_MASK |
731 UVD_CGC_CTRL__VCPU_MODE_MASK |
732 UVD_CGC_CTRL__SCPU_MODE_MASK);
733 WREG32_SOC15_DPG_MODE_1_0(UVD, 0, mmUVD_CGC_CTRL, reg_data, 0xFFFFFFFF, sram_sel);
734
735 /* turn off clock gating */
736 WREG32_SOC15_DPG_MODE_1_0(UVD, 0, mmUVD_CGC_GATE, 0, 0xFFFFFFFF, sram_sel);
737
738 /* turn on SUVD clock gating */
739 WREG32_SOC15_DPG_MODE_1_0(UVD, 0, mmUVD_SUVD_CGC_GATE, 1, 0xFFFFFFFF, sram_sel);
740
741 /* turn on sw mode in UVD_SUVD_CGC_CTRL */
742 WREG32_SOC15_DPG_MODE_1_0(UVD, 0, mmUVD_SUVD_CGC_CTRL, 0, 0xFFFFFFFF, sram_sel);
743 }
744
vcn_1_0_disable_static_power_gating(struct amdgpu_vcn_inst * vinst)745 static void vcn_1_0_disable_static_power_gating(struct amdgpu_vcn_inst *vinst)
746 {
747 struct amdgpu_device *adev = vinst->adev;
748 uint32_t data = 0;
749
750 if (adev->pg_flags & AMD_PG_SUPPORT_VCN) {
751 data = (1 << UVD_PGFSM_CONFIG__UVDM_PWR_CONFIG__SHIFT
752 | 1 << UVD_PGFSM_CONFIG__UVDU_PWR_CONFIG__SHIFT
753 | 2 << UVD_PGFSM_CONFIG__UVDF_PWR_CONFIG__SHIFT
754 | 2 << UVD_PGFSM_CONFIG__UVDC_PWR_CONFIG__SHIFT
755 | 2 << UVD_PGFSM_CONFIG__UVDB_PWR_CONFIG__SHIFT
756 | 2 << UVD_PGFSM_CONFIG__UVDIL_PWR_CONFIG__SHIFT
757 | 2 << UVD_PGFSM_CONFIG__UVDIR_PWR_CONFIG__SHIFT
758 | 2 << UVD_PGFSM_CONFIG__UVDTD_PWR_CONFIG__SHIFT
759 | 2 << UVD_PGFSM_CONFIG__UVDTE_PWR_CONFIG__SHIFT
760 | 2 << UVD_PGFSM_CONFIG__UVDE_PWR_CONFIG__SHIFT
761 | 2 << UVD_PGFSM_CONFIG__UVDW_PWR_CONFIG__SHIFT);
762
763 WREG32_SOC15(VCN, 0, mmUVD_PGFSM_CONFIG, data);
764 SOC15_WAIT_ON_RREG(VCN, 0, mmUVD_PGFSM_STATUS, UVD_PGFSM_STATUS__UVDM_UVDU_PWR_ON, 0xFFFFFF);
765 } else {
766 data = (1 << UVD_PGFSM_CONFIG__UVDM_PWR_CONFIG__SHIFT
767 | 1 << UVD_PGFSM_CONFIG__UVDU_PWR_CONFIG__SHIFT
768 | 1 << UVD_PGFSM_CONFIG__UVDF_PWR_CONFIG__SHIFT
769 | 1 << UVD_PGFSM_CONFIG__UVDC_PWR_CONFIG__SHIFT
770 | 1 << UVD_PGFSM_CONFIG__UVDB_PWR_CONFIG__SHIFT
771 | 1 << UVD_PGFSM_CONFIG__UVDIL_PWR_CONFIG__SHIFT
772 | 1 << UVD_PGFSM_CONFIG__UVDIR_PWR_CONFIG__SHIFT
773 | 1 << UVD_PGFSM_CONFIG__UVDTD_PWR_CONFIG__SHIFT
774 | 1 << UVD_PGFSM_CONFIG__UVDTE_PWR_CONFIG__SHIFT
775 | 1 << UVD_PGFSM_CONFIG__UVDE_PWR_CONFIG__SHIFT
776 | 1 << UVD_PGFSM_CONFIG__UVDW_PWR_CONFIG__SHIFT);
777 WREG32_SOC15(VCN, 0, mmUVD_PGFSM_CONFIG, data);
778 SOC15_WAIT_ON_RREG(VCN, 0, mmUVD_PGFSM_STATUS, 0, 0xFFFFFFFF);
779 }
780
781 /* polling UVD_PGFSM_STATUS to confirm UVDM_PWR_STATUS , UVDU_PWR_STATUS are 0 (power on) */
782
783 data = RREG32_SOC15(VCN, 0, mmUVD_POWER_STATUS);
784 data &= ~0x103;
785 if (adev->pg_flags & AMD_PG_SUPPORT_VCN)
786 data |= UVD_PGFSM_CONFIG__UVDM_UVDU_PWR_ON | UVD_POWER_STATUS__UVD_PG_EN_MASK;
787
788 WREG32_SOC15(VCN, 0, mmUVD_POWER_STATUS, data);
789 }
790
vcn_1_0_enable_static_power_gating(struct amdgpu_vcn_inst * vinst)791 static void vcn_1_0_enable_static_power_gating(struct amdgpu_vcn_inst *vinst)
792 {
793 struct amdgpu_device *adev = vinst->adev;
794 uint32_t data = 0;
795
796 if (adev->pg_flags & AMD_PG_SUPPORT_VCN) {
797 /* Before power off, this indicator has to be turned on */
798 data = RREG32_SOC15(VCN, 0, mmUVD_POWER_STATUS);
799 data &= ~UVD_POWER_STATUS__UVD_POWER_STATUS_MASK;
800 data |= UVD_POWER_STATUS__UVD_POWER_STATUS_TILES_OFF;
801 WREG32_SOC15(VCN, 0, mmUVD_POWER_STATUS, data);
802
803
804 data = (2 << UVD_PGFSM_CONFIG__UVDM_PWR_CONFIG__SHIFT
805 | 2 << UVD_PGFSM_CONFIG__UVDU_PWR_CONFIG__SHIFT
806 | 2 << UVD_PGFSM_CONFIG__UVDF_PWR_CONFIG__SHIFT
807 | 2 << UVD_PGFSM_CONFIG__UVDC_PWR_CONFIG__SHIFT
808 | 2 << UVD_PGFSM_CONFIG__UVDB_PWR_CONFIG__SHIFT
809 | 2 << UVD_PGFSM_CONFIG__UVDIL_PWR_CONFIG__SHIFT
810 | 2 << UVD_PGFSM_CONFIG__UVDIR_PWR_CONFIG__SHIFT
811 | 2 << UVD_PGFSM_CONFIG__UVDTD_PWR_CONFIG__SHIFT
812 | 2 << UVD_PGFSM_CONFIG__UVDTE_PWR_CONFIG__SHIFT
813 | 2 << UVD_PGFSM_CONFIG__UVDE_PWR_CONFIG__SHIFT
814 | 2 << UVD_PGFSM_CONFIG__UVDW_PWR_CONFIG__SHIFT);
815
816 WREG32_SOC15(VCN, 0, mmUVD_PGFSM_CONFIG, data);
817
818 data = (2 << UVD_PGFSM_STATUS__UVDM_PWR_STATUS__SHIFT
819 | 2 << UVD_PGFSM_STATUS__UVDU_PWR_STATUS__SHIFT
820 | 2 << UVD_PGFSM_STATUS__UVDF_PWR_STATUS__SHIFT
821 | 2 << UVD_PGFSM_STATUS__UVDC_PWR_STATUS__SHIFT
822 | 2 << UVD_PGFSM_STATUS__UVDB_PWR_STATUS__SHIFT
823 | 2 << UVD_PGFSM_STATUS__UVDIL_PWR_STATUS__SHIFT
824 | 2 << UVD_PGFSM_STATUS__UVDIR_PWR_STATUS__SHIFT
825 | 2 << UVD_PGFSM_STATUS__UVDTD_PWR_STATUS__SHIFT
826 | 2 << UVD_PGFSM_STATUS__UVDTE_PWR_STATUS__SHIFT
827 | 2 << UVD_PGFSM_STATUS__UVDE_PWR_STATUS__SHIFT
828 | 2 << UVD_PGFSM_STATUS__UVDW_PWR_STATUS__SHIFT);
829 SOC15_WAIT_ON_RREG(VCN, 0, mmUVD_PGFSM_STATUS, data, 0xFFFFFFFF);
830 }
831 }
832
833 /**
834 * vcn_v1_0_start_spg_mode - start VCN block
835 *
836 * @vinst: VCN instance
837 *
838 * Setup and start the VCN block
839 */
vcn_v1_0_start_spg_mode(struct amdgpu_vcn_inst * vinst)840 static int vcn_v1_0_start_spg_mode(struct amdgpu_vcn_inst *vinst)
841 {
842 struct amdgpu_device *adev = vinst->adev;
843 struct amdgpu_ring *ring = &adev->vcn.inst->ring_dec;
844 uint32_t rb_bufsz, tmp;
845 uint32_t lmi_swap_cntl;
846 int i, j, r;
847
848 /* disable byte swapping */
849 lmi_swap_cntl = 0;
850
851 vcn_1_0_disable_static_power_gating(vinst);
852
853 tmp = RREG32_SOC15(UVD, 0, mmUVD_STATUS) | UVD_STATUS__UVD_BUSY;
854 WREG32_SOC15(UVD, 0, mmUVD_STATUS, tmp);
855
856 /* disable clock gating */
857 vcn_v1_0_disable_clock_gating(vinst);
858
859 /* disable interupt */
860 WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_MASTINT_EN), 0,
861 ~UVD_MASTINT_EN__VCPU_EN_MASK);
862
863 /* initialize VCN memory controller */
864 tmp = RREG32_SOC15(UVD, 0, mmUVD_LMI_CTRL);
865 WREG32_SOC15(UVD, 0, mmUVD_LMI_CTRL, tmp |
866 UVD_LMI_CTRL__WRITE_CLEAN_TIMER_EN_MASK |
867 UVD_LMI_CTRL__MASK_MC_URGENT_MASK |
868 UVD_LMI_CTRL__DATA_COHERENCY_EN_MASK |
869 UVD_LMI_CTRL__VCPU_DATA_COHERENCY_EN_MASK);
870
871 #ifdef __BIG_ENDIAN
872 /* swap (8 in 32) RB and IB */
873 lmi_swap_cntl = 0xa;
874 #endif
875 WREG32_SOC15(UVD, 0, mmUVD_LMI_SWAP_CNTL, lmi_swap_cntl);
876
877 tmp = RREG32_SOC15(UVD, 0, mmUVD_MPC_CNTL);
878 tmp &= ~UVD_MPC_CNTL__REPLACEMENT_MODE_MASK;
879 tmp |= 0x2 << UVD_MPC_CNTL__REPLACEMENT_MODE__SHIFT;
880 WREG32_SOC15(UVD, 0, mmUVD_MPC_CNTL, tmp);
881
882 WREG32_SOC15(UVD, 0, mmUVD_MPC_SET_MUXA0,
883 ((0x1 << UVD_MPC_SET_MUXA0__VARA_1__SHIFT) |
884 (0x2 << UVD_MPC_SET_MUXA0__VARA_2__SHIFT) |
885 (0x3 << UVD_MPC_SET_MUXA0__VARA_3__SHIFT) |
886 (0x4 << UVD_MPC_SET_MUXA0__VARA_4__SHIFT)));
887
888 WREG32_SOC15(UVD, 0, mmUVD_MPC_SET_MUXB0,
889 ((0x1 << UVD_MPC_SET_MUXB0__VARB_1__SHIFT) |
890 (0x2 << UVD_MPC_SET_MUXB0__VARB_2__SHIFT) |
891 (0x3 << UVD_MPC_SET_MUXB0__VARB_3__SHIFT) |
892 (0x4 << UVD_MPC_SET_MUXB0__VARB_4__SHIFT)));
893
894 WREG32_SOC15(UVD, 0, mmUVD_MPC_SET_MUX,
895 ((0x0 << UVD_MPC_SET_MUX__SET_0__SHIFT) |
896 (0x1 << UVD_MPC_SET_MUX__SET_1__SHIFT) |
897 (0x2 << UVD_MPC_SET_MUX__SET_2__SHIFT)));
898
899 vcn_v1_0_mc_resume_spg_mode(vinst);
900
901 WREG32_SOC15(UVD, 0, mmUVD_REG_XX_MASK_1_0, 0x10);
902 WREG32_SOC15(UVD, 0, mmUVD_RBC_XX_IB_REG_CHECK_1_0,
903 RREG32_SOC15(UVD, 0, mmUVD_RBC_XX_IB_REG_CHECK_1_0) | 0x3);
904
905 /* enable VCPU clock */
906 WREG32_SOC15(UVD, 0, mmUVD_VCPU_CNTL, UVD_VCPU_CNTL__CLK_EN_MASK);
907
908 /* boot up the VCPU */
909 WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_SOFT_RESET), 0,
910 ~UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK);
911
912 /* enable UMC */
913 WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_CTRL2), 0,
914 ~UVD_LMI_CTRL2__STALL_ARB_UMC_MASK);
915
916 tmp = RREG32_SOC15(UVD, 0, mmUVD_SOFT_RESET);
917 tmp &= ~UVD_SOFT_RESET__LMI_SOFT_RESET_MASK;
918 tmp &= ~UVD_SOFT_RESET__LMI_UMC_SOFT_RESET_MASK;
919 WREG32_SOC15(UVD, 0, mmUVD_SOFT_RESET, tmp);
920
921 for (i = 0; i < 10; ++i) {
922 uint32_t status;
923
924 for (j = 0; j < 100; ++j) {
925 status = RREG32_SOC15(UVD, 0, mmUVD_STATUS);
926 if (status & UVD_STATUS__IDLE)
927 break;
928 mdelay(10);
929 }
930 r = 0;
931 if (status & UVD_STATUS__IDLE)
932 break;
933
934 DRM_ERROR("VCN decode not responding, trying to reset the VCPU!!!\n");
935 WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_SOFT_RESET),
936 UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK,
937 ~UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK);
938 mdelay(10);
939 WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_SOFT_RESET), 0,
940 ~UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK);
941 mdelay(10);
942 r = -1;
943 }
944
945 if (r) {
946 DRM_ERROR("VCN decode not responding, giving up!!!\n");
947 return r;
948 }
949 /* enable master interrupt */
950 WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_MASTINT_EN),
951 UVD_MASTINT_EN__VCPU_EN_MASK, ~UVD_MASTINT_EN__VCPU_EN_MASK);
952
953 /* enable system interrupt for JRBC, TODO: move to set interrupt*/
954 WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_SYS_INT_EN),
955 UVD_SYS_INT_EN__UVD_JRBC_EN_MASK,
956 ~UVD_SYS_INT_EN__UVD_JRBC_EN_MASK);
957
958 /* clear the busy bit of UVD_STATUS */
959 tmp = RREG32_SOC15(UVD, 0, mmUVD_STATUS) & ~UVD_STATUS__UVD_BUSY;
960 WREG32_SOC15(UVD, 0, mmUVD_STATUS, tmp);
961
962 /* force RBC into idle state */
963 rb_bufsz = order_base_2(ring->ring_size);
964 tmp = REG_SET_FIELD(0, UVD_RBC_RB_CNTL, RB_BUFSZ, rb_bufsz);
965 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_BLKSZ, 1);
966 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_FETCH, 1);
967 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_UPDATE, 1);
968 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_RPTR_WR_EN, 1);
969 WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_CNTL, tmp);
970
971 /* set the write pointer delay */
972 WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_WPTR_CNTL, 0);
973
974 /* set the wb address */
975 WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_RPTR_ADDR,
976 (upper_32_bits(ring->gpu_addr) >> 2));
977
978 /* program the RB_BASE for ring buffer */
979 WREG32_SOC15(UVD, 0, mmUVD_LMI_RBC_RB_64BIT_BAR_LOW,
980 lower_32_bits(ring->gpu_addr));
981 WREG32_SOC15(UVD, 0, mmUVD_LMI_RBC_RB_64BIT_BAR_HIGH,
982 upper_32_bits(ring->gpu_addr));
983
984 /* Initialize the ring buffer's read and write pointers */
985 WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_RPTR, 0);
986
987 WREG32_SOC15(UVD, 0, mmUVD_SCRATCH2, 0);
988
989 ring->wptr = RREG32_SOC15(UVD, 0, mmUVD_RBC_RB_RPTR);
990 WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_WPTR,
991 lower_32_bits(ring->wptr));
992
993 WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_RBC_RB_CNTL), 0,
994 ~UVD_RBC_RB_CNTL__RB_NO_FETCH_MASK);
995
996 ring = &adev->vcn.inst->ring_enc[0];
997 WREG32_SOC15(UVD, 0, mmUVD_RB_RPTR, lower_32_bits(ring->wptr));
998 WREG32_SOC15(UVD, 0, mmUVD_RB_WPTR, lower_32_bits(ring->wptr));
999 WREG32_SOC15(UVD, 0, mmUVD_RB_BASE_LO, ring->gpu_addr);
1000 WREG32_SOC15(UVD, 0, mmUVD_RB_BASE_HI, upper_32_bits(ring->gpu_addr));
1001 WREG32_SOC15(UVD, 0, mmUVD_RB_SIZE, ring->ring_size / 4);
1002
1003 ring = &adev->vcn.inst->ring_enc[1];
1004 WREG32_SOC15(UVD, 0, mmUVD_RB_RPTR2, lower_32_bits(ring->wptr));
1005 WREG32_SOC15(UVD, 0, mmUVD_RB_WPTR2, lower_32_bits(ring->wptr));
1006 WREG32_SOC15(UVD, 0, mmUVD_RB_BASE_LO2, ring->gpu_addr);
1007 WREG32_SOC15(UVD, 0, mmUVD_RB_BASE_HI2, upper_32_bits(ring->gpu_addr));
1008 WREG32_SOC15(UVD, 0, mmUVD_RB_SIZE2, ring->ring_size / 4);
1009
1010 jpeg_v1_0_start(adev, 0);
1011
1012 /* Keeping one read-back to ensure all register writes are done,
1013 * otherwise it may introduce race conditions.
1014 */
1015 RREG32_SOC15(UVD, 0, mmUVD_STATUS);
1016
1017 return 0;
1018 }
1019
vcn_v1_0_start_dpg_mode(struct amdgpu_vcn_inst * vinst)1020 static int vcn_v1_0_start_dpg_mode(struct amdgpu_vcn_inst *vinst)
1021 {
1022 struct amdgpu_device *adev = vinst->adev;
1023 struct amdgpu_ring *ring = &adev->vcn.inst->ring_dec;
1024 uint32_t rb_bufsz, tmp;
1025 uint32_t lmi_swap_cntl;
1026
1027 /* disable byte swapping */
1028 lmi_swap_cntl = 0;
1029
1030 vcn_1_0_enable_static_power_gating(vinst);
1031
1032 /* enable dynamic power gating mode */
1033 tmp = RREG32_SOC15(UVD, 0, mmUVD_POWER_STATUS);
1034 tmp |= UVD_POWER_STATUS__UVD_PG_MODE_MASK;
1035 tmp |= UVD_POWER_STATUS__UVD_PG_EN_MASK;
1036 WREG32_SOC15(UVD, 0, mmUVD_POWER_STATUS, tmp);
1037
1038 /* enable clock gating */
1039 vcn_v1_0_clock_gating_dpg_mode(vinst, 0);
1040
1041 /* enable VCPU clock */
1042 tmp = (0xFF << UVD_VCPU_CNTL__PRB_TIMEOUT_VAL__SHIFT);
1043 tmp |= UVD_VCPU_CNTL__CLK_EN_MASK;
1044 tmp |= UVD_VCPU_CNTL__MIF_WR_LOW_THRESHOLD_BP_MASK;
1045 WREG32_SOC15_DPG_MODE_1_0(UVD, 0, mmUVD_VCPU_CNTL, tmp, 0xFFFFFFFF, 0);
1046
1047 /* disable interupt */
1048 WREG32_SOC15_DPG_MODE_1_0(UVD, 0, mmUVD_MASTINT_EN,
1049 0, UVD_MASTINT_EN__VCPU_EN_MASK, 0);
1050
1051 /* initialize VCN memory controller */
1052 WREG32_SOC15_DPG_MODE_1_0(UVD, 0, mmUVD_LMI_CTRL,
1053 (8 << UVD_LMI_CTRL__WRITE_CLEAN_TIMER__SHIFT) |
1054 UVD_LMI_CTRL__WRITE_CLEAN_TIMER_EN_MASK |
1055 UVD_LMI_CTRL__DATA_COHERENCY_EN_MASK |
1056 UVD_LMI_CTRL__VCPU_DATA_COHERENCY_EN_MASK |
1057 UVD_LMI_CTRL__REQ_MODE_MASK |
1058 UVD_LMI_CTRL__CRC_RESET_MASK |
1059 UVD_LMI_CTRL__MASK_MC_URGENT_MASK |
1060 0x00100000L, 0xFFFFFFFF, 0);
1061
1062 #ifdef __BIG_ENDIAN
1063 /* swap (8 in 32) RB and IB */
1064 lmi_swap_cntl = 0xa;
1065 #endif
1066 WREG32_SOC15_DPG_MODE_1_0(UVD, 0, mmUVD_LMI_SWAP_CNTL, lmi_swap_cntl, 0xFFFFFFFF, 0);
1067
1068 WREG32_SOC15_DPG_MODE_1_0(UVD, 0, mmUVD_MPC_CNTL,
1069 0x2 << UVD_MPC_CNTL__REPLACEMENT_MODE__SHIFT, 0xFFFFFFFF, 0);
1070
1071 WREG32_SOC15_DPG_MODE_1_0(UVD, 0, mmUVD_MPC_SET_MUXA0,
1072 ((0x1 << UVD_MPC_SET_MUXA0__VARA_1__SHIFT) |
1073 (0x2 << UVD_MPC_SET_MUXA0__VARA_2__SHIFT) |
1074 (0x3 << UVD_MPC_SET_MUXA0__VARA_3__SHIFT) |
1075 (0x4 << UVD_MPC_SET_MUXA0__VARA_4__SHIFT)), 0xFFFFFFFF, 0);
1076
1077 WREG32_SOC15_DPG_MODE_1_0(UVD, 0, mmUVD_MPC_SET_MUXB0,
1078 ((0x1 << UVD_MPC_SET_MUXB0__VARB_1__SHIFT) |
1079 (0x2 << UVD_MPC_SET_MUXB0__VARB_2__SHIFT) |
1080 (0x3 << UVD_MPC_SET_MUXB0__VARB_3__SHIFT) |
1081 (0x4 << UVD_MPC_SET_MUXB0__VARB_4__SHIFT)), 0xFFFFFFFF, 0);
1082
1083 WREG32_SOC15_DPG_MODE_1_0(UVD, 0, mmUVD_MPC_SET_MUX,
1084 ((0x0 << UVD_MPC_SET_MUX__SET_0__SHIFT) |
1085 (0x1 << UVD_MPC_SET_MUX__SET_1__SHIFT) |
1086 (0x2 << UVD_MPC_SET_MUX__SET_2__SHIFT)), 0xFFFFFFFF, 0);
1087
1088 vcn_v1_0_mc_resume_dpg_mode(vinst);
1089
1090 WREG32_SOC15_DPG_MODE_1_0(UVD, 0, mmUVD_REG_XX_MASK, 0x10, 0xFFFFFFFF, 0);
1091 WREG32_SOC15_DPG_MODE_1_0(UVD, 0, mmUVD_RBC_XX_IB_REG_CHECK, 0x3, 0xFFFFFFFF, 0);
1092
1093 /* boot up the VCPU */
1094 WREG32_SOC15_DPG_MODE_1_0(UVD, 0, mmUVD_SOFT_RESET, 0, 0xFFFFFFFF, 0);
1095
1096 /* enable UMC */
1097 WREG32_SOC15_DPG_MODE_1_0(UVD, 0, mmUVD_LMI_CTRL2,
1098 0x1F << UVD_LMI_CTRL2__RE_OFLD_MIF_WR_REQ_NUM__SHIFT,
1099 0xFFFFFFFF, 0);
1100
1101 /* enable master interrupt */
1102 WREG32_SOC15_DPG_MODE_1_0(UVD, 0, mmUVD_MASTINT_EN,
1103 UVD_MASTINT_EN__VCPU_EN_MASK, UVD_MASTINT_EN__VCPU_EN_MASK, 0);
1104
1105 vcn_v1_0_clock_gating_dpg_mode(vinst, 1);
1106 /* setup mmUVD_LMI_CTRL */
1107 WREG32_SOC15_DPG_MODE_1_0(UVD, 0, mmUVD_LMI_CTRL,
1108 (8 << UVD_LMI_CTRL__WRITE_CLEAN_TIMER__SHIFT) |
1109 UVD_LMI_CTRL__WRITE_CLEAN_TIMER_EN_MASK |
1110 UVD_LMI_CTRL__DATA_COHERENCY_EN_MASK |
1111 UVD_LMI_CTRL__VCPU_DATA_COHERENCY_EN_MASK |
1112 UVD_LMI_CTRL__REQ_MODE_MASK |
1113 UVD_LMI_CTRL__CRC_RESET_MASK |
1114 UVD_LMI_CTRL__MASK_MC_URGENT_MASK |
1115 0x00100000L, 0xFFFFFFFF, 1);
1116
1117 tmp = adev->gfx.config.gb_addr_config;
1118 /* setup VCN global tiling registers */
1119 WREG32_SOC15_DPG_MODE_1_0(UVD, 0, mmUVD_JPEG_ADDR_CONFIG, tmp, 0xFFFFFFFF, 1);
1120 WREG32_SOC15_DPG_MODE_1_0(UVD, 0, mmUVD_JPEG_UV_ADDR_CONFIG, tmp, 0xFFFFFFFF, 1);
1121
1122 /* enable System Interrupt for JRBC */
1123 WREG32_SOC15_DPG_MODE_1_0(UVD, 0, mmUVD_SYS_INT_EN,
1124 UVD_SYS_INT_EN__UVD_JRBC_EN_MASK, 0xFFFFFFFF, 1);
1125
1126 /* force RBC into idle state */
1127 rb_bufsz = order_base_2(ring->ring_size);
1128 tmp = REG_SET_FIELD(0, UVD_RBC_RB_CNTL, RB_BUFSZ, rb_bufsz);
1129 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_BLKSZ, 1);
1130 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_FETCH, 1);
1131 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_UPDATE, 1);
1132 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_RPTR_WR_EN, 1);
1133 WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_CNTL, tmp);
1134
1135 /* set the write pointer delay */
1136 WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_WPTR_CNTL, 0);
1137
1138 /* set the wb address */
1139 WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_RPTR_ADDR,
1140 (upper_32_bits(ring->gpu_addr) >> 2));
1141
1142 /* program the RB_BASE for ring buffer */
1143 WREG32_SOC15(UVD, 0, mmUVD_LMI_RBC_RB_64BIT_BAR_LOW,
1144 lower_32_bits(ring->gpu_addr));
1145 WREG32_SOC15(UVD, 0, mmUVD_LMI_RBC_RB_64BIT_BAR_HIGH,
1146 upper_32_bits(ring->gpu_addr));
1147
1148 /* Initialize the ring buffer's read and write pointers */
1149 WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_RPTR, 0);
1150
1151 WREG32_SOC15(UVD, 0, mmUVD_SCRATCH2, 0);
1152
1153 ring->wptr = RREG32_SOC15(UVD, 0, mmUVD_RBC_RB_RPTR);
1154 WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_WPTR,
1155 lower_32_bits(ring->wptr));
1156
1157 WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_RBC_RB_CNTL), 0,
1158 ~UVD_RBC_RB_CNTL__RB_NO_FETCH_MASK);
1159
1160 jpeg_v1_0_start(adev, 1);
1161
1162 /* Keeping one read-back to ensure all register writes are done,
1163 * otherwise it may introduce race conditions.
1164 */
1165 RREG32_SOC15(UVD, 0, mmUVD_STATUS);
1166
1167 return 0;
1168 }
1169
vcn_v1_0_start(struct amdgpu_vcn_inst * vinst)1170 static int vcn_v1_0_start(struct amdgpu_vcn_inst *vinst)
1171 {
1172 struct amdgpu_device *adev = vinst->adev;
1173
1174 return (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) ?
1175 vcn_v1_0_start_dpg_mode(vinst) : vcn_v1_0_start_spg_mode(vinst);
1176 }
1177
1178 /**
1179 * vcn_v1_0_stop_spg_mode - stop VCN block
1180 *
1181 * @vinst: VCN instance
1182 *
1183 * stop the VCN block
1184 */
vcn_v1_0_stop_spg_mode(struct amdgpu_vcn_inst * vinst)1185 static int vcn_v1_0_stop_spg_mode(struct amdgpu_vcn_inst *vinst)
1186 {
1187 struct amdgpu_device *adev = vinst->adev;
1188 int tmp;
1189
1190 SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_STATUS, UVD_STATUS__IDLE, 0x7);
1191
1192 tmp = UVD_LMI_STATUS__VCPU_LMI_WRITE_CLEAN_MASK |
1193 UVD_LMI_STATUS__READ_CLEAN_MASK |
1194 UVD_LMI_STATUS__WRITE_CLEAN_MASK |
1195 UVD_LMI_STATUS__WRITE_CLEAN_RAW_MASK;
1196 SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_LMI_STATUS, tmp, tmp);
1197
1198 /* stall UMC channel */
1199 WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_CTRL2),
1200 UVD_LMI_CTRL2__STALL_ARB_UMC_MASK,
1201 ~UVD_LMI_CTRL2__STALL_ARB_UMC_MASK);
1202
1203 tmp = UVD_LMI_STATUS__UMC_READ_CLEAN_RAW_MASK |
1204 UVD_LMI_STATUS__UMC_WRITE_CLEAN_RAW_MASK;
1205 SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_LMI_STATUS, tmp, tmp);
1206
1207 /* disable VCPU clock */
1208 WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_VCPU_CNTL), 0,
1209 ~UVD_VCPU_CNTL__CLK_EN_MASK);
1210
1211 /* reset LMI UMC/LMI */
1212 WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_SOFT_RESET),
1213 UVD_SOFT_RESET__LMI_UMC_SOFT_RESET_MASK,
1214 ~UVD_SOFT_RESET__LMI_UMC_SOFT_RESET_MASK);
1215
1216 WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_SOFT_RESET),
1217 UVD_SOFT_RESET__LMI_SOFT_RESET_MASK,
1218 ~UVD_SOFT_RESET__LMI_SOFT_RESET_MASK);
1219
1220 /* put VCPU into reset */
1221 WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_SOFT_RESET),
1222 UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK,
1223 ~UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK);
1224
1225 WREG32_SOC15(UVD, 0, mmUVD_STATUS, 0);
1226
1227 vcn_v1_0_enable_clock_gating(vinst);
1228 vcn_1_0_enable_static_power_gating(vinst);
1229
1230 /* Keeping one read-back to ensure all register writes are done,
1231 * otherwise it may introduce race conditions.
1232 */
1233 RREG32_SOC15(UVD, 0, mmUVD_STATUS);
1234
1235 return 0;
1236 }
1237
vcn_v1_0_stop_dpg_mode(struct amdgpu_vcn_inst * vinst)1238 static int vcn_v1_0_stop_dpg_mode(struct amdgpu_vcn_inst *vinst)
1239 {
1240 struct amdgpu_device *adev = vinst->adev;
1241 uint32_t tmp;
1242
1243 /* Wait for power status to be UVD_POWER_STATUS__UVD_POWER_STATUS_TILES_OFF */
1244 SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_POWER_STATUS,
1245 UVD_POWER_STATUS__UVD_POWER_STATUS_TILES_OFF,
1246 UVD_POWER_STATUS__UVD_POWER_STATUS_MASK);
1247
1248 /* wait for read ptr to be equal to write ptr */
1249 tmp = RREG32_SOC15(UVD, 0, mmUVD_RB_WPTR);
1250 SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_RB_RPTR, tmp, 0xFFFFFFFF);
1251
1252 tmp = RREG32_SOC15(UVD, 0, mmUVD_RB_WPTR2);
1253 SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_RB_RPTR2, tmp, 0xFFFFFFFF);
1254
1255 tmp = RREG32_SOC15(UVD, 0, mmUVD_JRBC_RB_WPTR);
1256 SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_JRBC_RB_RPTR, tmp, 0xFFFFFFFF);
1257
1258 tmp = RREG32_SOC15(UVD, 0, mmUVD_RBC_RB_WPTR) & 0x7FFFFFFF;
1259 SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_RBC_RB_RPTR, tmp, 0xFFFFFFFF);
1260
1261 SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_POWER_STATUS,
1262 UVD_POWER_STATUS__UVD_POWER_STATUS_TILES_OFF,
1263 UVD_POWER_STATUS__UVD_POWER_STATUS_MASK);
1264
1265 /* disable dynamic power gating mode */
1266 WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_POWER_STATUS), 0,
1267 ~UVD_POWER_STATUS__UVD_PG_MODE_MASK);
1268
1269 /* Keeping one read-back to ensure all register writes are done,
1270 * otherwise it may introduce race conditions.
1271 */
1272 RREG32_SOC15(UVD, 0, mmUVD_STATUS);
1273
1274 return 0;
1275 }
1276
vcn_v1_0_stop(struct amdgpu_vcn_inst * vinst)1277 static int vcn_v1_0_stop(struct amdgpu_vcn_inst *vinst)
1278 {
1279 struct amdgpu_device *adev = vinst->adev;
1280 int r;
1281
1282 if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG)
1283 r = vcn_v1_0_stop_dpg_mode(vinst);
1284 else
1285 r = vcn_v1_0_stop_spg_mode(vinst);
1286
1287 return r;
1288 }
1289
vcn_v1_0_pause_dpg_mode(struct amdgpu_vcn_inst * vinst,struct dpg_pause_state * new_state)1290 static int vcn_v1_0_pause_dpg_mode(struct amdgpu_vcn_inst *vinst,
1291 struct dpg_pause_state *new_state)
1292 {
1293 struct amdgpu_device *adev = vinst->adev;
1294 int inst_idx = vinst->inst;
1295 int ret_code;
1296 uint32_t reg_data = 0;
1297 uint32_t reg_data2 = 0;
1298 struct amdgpu_ring *ring;
1299
1300 /* pause/unpause if state is changed */
1301 if (adev->vcn.inst[inst_idx].pause_state.fw_based != new_state->fw_based) {
1302 DRM_DEBUG("dpg pause state changed %d:%d -> %d:%d",
1303 adev->vcn.inst[inst_idx].pause_state.fw_based,
1304 adev->vcn.inst[inst_idx].pause_state.jpeg,
1305 new_state->fw_based, new_state->jpeg);
1306
1307 reg_data = RREG32_SOC15(UVD, 0, mmUVD_DPG_PAUSE) &
1308 (~UVD_DPG_PAUSE__NJ_PAUSE_DPG_ACK_MASK);
1309
1310 if (new_state->fw_based == VCN_DPG_STATE__PAUSE) {
1311 ret_code = 0;
1312
1313 if (!(reg_data & UVD_DPG_PAUSE__JPEG_PAUSE_DPG_ACK_MASK))
1314 ret_code = SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_POWER_STATUS,
1315 UVD_POWER_STATUS__UVD_POWER_STATUS_TILES_OFF,
1316 UVD_POWER_STATUS__UVD_POWER_STATUS_MASK);
1317
1318 if (!ret_code) {
1319 /* pause DPG non-jpeg */
1320 reg_data |= UVD_DPG_PAUSE__NJ_PAUSE_DPG_REQ_MASK;
1321 WREG32_SOC15(UVD, 0, mmUVD_DPG_PAUSE, reg_data);
1322 SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_DPG_PAUSE,
1323 UVD_DPG_PAUSE__NJ_PAUSE_DPG_ACK_MASK,
1324 UVD_DPG_PAUSE__NJ_PAUSE_DPG_ACK_MASK);
1325
1326 /* Restore */
1327 ring = &adev->vcn.inst->ring_enc[0];
1328 WREG32_SOC15(UVD, 0, mmUVD_RB_BASE_LO, ring->gpu_addr);
1329 WREG32_SOC15(UVD, 0, mmUVD_RB_BASE_HI, upper_32_bits(ring->gpu_addr));
1330 WREG32_SOC15(UVD, 0, mmUVD_RB_SIZE, ring->ring_size / 4);
1331 WREG32_SOC15(UVD, 0, mmUVD_RB_RPTR, lower_32_bits(ring->wptr));
1332 WREG32_SOC15(UVD, 0, mmUVD_RB_WPTR, lower_32_bits(ring->wptr));
1333
1334 ring = &adev->vcn.inst->ring_enc[1];
1335 WREG32_SOC15(UVD, 0, mmUVD_RB_BASE_LO2, ring->gpu_addr);
1336 WREG32_SOC15(UVD, 0, mmUVD_RB_BASE_HI2, upper_32_bits(ring->gpu_addr));
1337 WREG32_SOC15(UVD, 0, mmUVD_RB_SIZE2, ring->ring_size / 4);
1338 WREG32_SOC15(UVD, 0, mmUVD_RB_RPTR2, lower_32_bits(ring->wptr));
1339 WREG32_SOC15(UVD, 0, mmUVD_RB_WPTR2, lower_32_bits(ring->wptr));
1340
1341 ring = &adev->vcn.inst->ring_dec;
1342 WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_WPTR,
1343 RREG32_SOC15(UVD, 0, mmUVD_SCRATCH2) & 0x7FFFFFFF);
1344 SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_POWER_STATUS,
1345 UVD_PGFSM_CONFIG__UVDM_UVDU_PWR_ON,
1346 UVD_POWER_STATUS__UVD_POWER_STATUS_MASK);
1347 }
1348 } else {
1349 /* unpause dpg non-jpeg, no need to wait */
1350 reg_data &= ~UVD_DPG_PAUSE__NJ_PAUSE_DPG_REQ_MASK;
1351 WREG32_SOC15(UVD, 0, mmUVD_DPG_PAUSE, reg_data);
1352 }
1353 adev->vcn.inst[inst_idx].pause_state.fw_based = new_state->fw_based;
1354 }
1355
1356 /* pause/unpause if state is changed */
1357 if (adev->vcn.inst[inst_idx].pause_state.jpeg != new_state->jpeg) {
1358 DRM_DEBUG("dpg pause state changed %d:%d -> %d:%d",
1359 adev->vcn.inst[inst_idx].pause_state.fw_based,
1360 adev->vcn.inst[inst_idx].pause_state.jpeg,
1361 new_state->fw_based, new_state->jpeg);
1362
1363 reg_data = RREG32_SOC15(UVD, 0, mmUVD_DPG_PAUSE) &
1364 (~UVD_DPG_PAUSE__JPEG_PAUSE_DPG_ACK_MASK);
1365
1366 if (new_state->jpeg == VCN_DPG_STATE__PAUSE) {
1367 ret_code = 0;
1368
1369 if (!(reg_data & UVD_DPG_PAUSE__NJ_PAUSE_DPG_ACK_MASK))
1370 ret_code = SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_POWER_STATUS,
1371 UVD_POWER_STATUS__UVD_POWER_STATUS_TILES_OFF,
1372 UVD_POWER_STATUS__UVD_POWER_STATUS_MASK);
1373
1374 if (!ret_code) {
1375 /* Make sure JPRG Snoop is disabled before sending the pause */
1376 reg_data2 = RREG32_SOC15(UVD, 0, mmUVD_POWER_STATUS);
1377 reg_data2 |= UVD_POWER_STATUS__JRBC_SNOOP_DIS_MASK;
1378 WREG32_SOC15(UVD, 0, mmUVD_POWER_STATUS, reg_data2);
1379
1380 /* pause DPG jpeg */
1381 reg_data |= UVD_DPG_PAUSE__JPEG_PAUSE_DPG_REQ_MASK;
1382 WREG32_SOC15(UVD, 0, mmUVD_DPG_PAUSE, reg_data);
1383 SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_DPG_PAUSE,
1384 UVD_DPG_PAUSE__JPEG_PAUSE_DPG_ACK_MASK,
1385 UVD_DPG_PAUSE__JPEG_PAUSE_DPG_ACK_MASK);
1386
1387 /* Restore */
1388 ring = adev->jpeg.inst->ring_dec;
1389 WREG32_SOC15(UVD, 0, mmUVD_LMI_JRBC_RB_VMID, 0);
1390 WREG32_SOC15(UVD, 0, mmUVD_JRBC_RB_CNTL,
1391 UVD_JRBC_RB_CNTL__RB_NO_FETCH_MASK |
1392 UVD_JRBC_RB_CNTL__RB_RPTR_WR_EN_MASK);
1393 WREG32_SOC15(UVD, 0, mmUVD_LMI_JRBC_RB_64BIT_BAR_LOW,
1394 lower_32_bits(ring->gpu_addr));
1395 WREG32_SOC15(UVD, 0, mmUVD_LMI_JRBC_RB_64BIT_BAR_HIGH,
1396 upper_32_bits(ring->gpu_addr));
1397 WREG32_SOC15(UVD, 0, mmUVD_JRBC_RB_RPTR, ring->wptr);
1398 WREG32_SOC15(UVD, 0, mmUVD_JRBC_RB_WPTR, ring->wptr);
1399 WREG32_SOC15(UVD, 0, mmUVD_JRBC_RB_CNTL,
1400 UVD_JRBC_RB_CNTL__RB_RPTR_WR_EN_MASK);
1401
1402 ring = &adev->vcn.inst->ring_dec;
1403 WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_WPTR,
1404 RREG32_SOC15(UVD, 0, mmUVD_SCRATCH2) & 0x7FFFFFFF);
1405 SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_POWER_STATUS,
1406 UVD_PGFSM_CONFIG__UVDM_UVDU_PWR_ON,
1407 UVD_POWER_STATUS__UVD_POWER_STATUS_MASK);
1408 }
1409 } else {
1410 /* unpause dpg jpeg, no need to wait */
1411 reg_data &= ~UVD_DPG_PAUSE__JPEG_PAUSE_DPG_REQ_MASK;
1412 WREG32_SOC15(UVD, 0, mmUVD_DPG_PAUSE, reg_data);
1413 }
1414 adev->vcn.inst[inst_idx].pause_state.jpeg = new_state->jpeg;
1415 }
1416
1417 return 0;
1418 }
1419
vcn_v1_0_is_idle(struct amdgpu_ip_block * ip_block)1420 static bool vcn_v1_0_is_idle(struct amdgpu_ip_block *ip_block)
1421 {
1422 struct amdgpu_device *adev = ip_block->adev;
1423
1424 return (RREG32_SOC15(VCN, 0, mmUVD_STATUS) == UVD_STATUS__IDLE);
1425 }
1426
vcn_v1_0_wait_for_idle(struct amdgpu_ip_block * ip_block)1427 static int vcn_v1_0_wait_for_idle(struct amdgpu_ip_block *ip_block)
1428 {
1429 struct amdgpu_device *adev = ip_block->adev;
1430 int ret;
1431
1432 ret = SOC15_WAIT_ON_RREG(VCN, 0, mmUVD_STATUS, UVD_STATUS__IDLE,
1433 UVD_STATUS__IDLE);
1434
1435 return ret;
1436 }
1437
vcn_v1_0_set_clockgating_state(struct amdgpu_ip_block * ip_block,enum amd_clockgating_state state)1438 static int vcn_v1_0_set_clockgating_state(struct amdgpu_ip_block *ip_block,
1439 enum amd_clockgating_state state)
1440 {
1441 struct amdgpu_device *adev = ip_block->adev;
1442 struct amdgpu_vcn_inst *vinst = adev->vcn.inst;
1443 bool enable = (state == AMD_CG_STATE_GATE);
1444
1445 if (enable) {
1446 /* wait for STATUS to clear */
1447 if (!vcn_v1_0_is_idle(ip_block))
1448 return -EBUSY;
1449 vcn_v1_0_enable_clock_gating(vinst);
1450 } else {
1451 /* disable HW gating and enable Sw gating */
1452 vcn_v1_0_disable_clock_gating(vinst);
1453 }
1454 return 0;
1455 }
1456
1457 /**
1458 * vcn_v1_0_dec_ring_get_rptr - get read pointer
1459 *
1460 * @ring: amdgpu_ring pointer
1461 *
1462 * Returns the current hardware read pointer
1463 */
vcn_v1_0_dec_ring_get_rptr(struct amdgpu_ring * ring)1464 static uint64_t vcn_v1_0_dec_ring_get_rptr(struct amdgpu_ring *ring)
1465 {
1466 struct amdgpu_device *adev = ring->adev;
1467
1468 return RREG32_SOC15(UVD, 0, mmUVD_RBC_RB_RPTR);
1469 }
1470
1471 /**
1472 * vcn_v1_0_dec_ring_get_wptr - get write pointer
1473 *
1474 * @ring: amdgpu_ring pointer
1475 *
1476 * Returns the current hardware write pointer
1477 */
vcn_v1_0_dec_ring_get_wptr(struct amdgpu_ring * ring)1478 static uint64_t vcn_v1_0_dec_ring_get_wptr(struct amdgpu_ring *ring)
1479 {
1480 struct amdgpu_device *adev = ring->adev;
1481
1482 return RREG32_SOC15(UVD, 0, mmUVD_RBC_RB_WPTR);
1483 }
1484
1485 /**
1486 * vcn_v1_0_dec_ring_set_wptr - set write pointer
1487 *
1488 * @ring: amdgpu_ring pointer
1489 *
1490 * Commits the write pointer to the hardware
1491 */
vcn_v1_0_dec_ring_set_wptr(struct amdgpu_ring * ring)1492 static void vcn_v1_0_dec_ring_set_wptr(struct amdgpu_ring *ring)
1493 {
1494 struct amdgpu_device *adev = ring->adev;
1495
1496 if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG)
1497 WREG32_SOC15(UVD, 0, mmUVD_SCRATCH2,
1498 lower_32_bits(ring->wptr) | 0x80000000);
1499
1500 WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_WPTR, lower_32_bits(ring->wptr));
1501 }
1502
1503 /**
1504 * vcn_v1_0_dec_ring_insert_start - insert a start command
1505 *
1506 * @ring: amdgpu_ring pointer
1507 *
1508 * Write a start command to the ring.
1509 */
vcn_v1_0_dec_ring_insert_start(struct amdgpu_ring * ring)1510 static void vcn_v1_0_dec_ring_insert_start(struct amdgpu_ring *ring)
1511 {
1512 struct amdgpu_device *adev = ring->adev;
1513
1514 amdgpu_ring_write(ring,
1515 PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_DATA0), 0));
1516 amdgpu_ring_write(ring, 0);
1517 amdgpu_ring_write(ring,
1518 PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_CMD), 0));
1519 amdgpu_ring_write(ring, VCN_DEC_CMD_PACKET_START << 1);
1520 }
1521
1522 /**
1523 * vcn_v1_0_dec_ring_insert_end - insert a end command
1524 *
1525 * @ring: amdgpu_ring pointer
1526 *
1527 * Write a end command to the ring.
1528 */
vcn_v1_0_dec_ring_insert_end(struct amdgpu_ring * ring)1529 static void vcn_v1_0_dec_ring_insert_end(struct amdgpu_ring *ring)
1530 {
1531 struct amdgpu_device *adev = ring->adev;
1532
1533 amdgpu_ring_write(ring,
1534 PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_CMD), 0));
1535 amdgpu_ring_write(ring, VCN_DEC_CMD_PACKET_END << 1);
1536 }
1537
1538 /**
1539 * vcn_v1_0_dec_ring_emit_fence - emit an fence & trap command
1540 *
1541 * @ring: amdgpu_ring pointer
1542 * @addr: address
1543 * @seq: sequence number
1544 * @flags: fence related flags
1545 *
1546 * Write a fence and a trap command to the ring.
1547 */
vcn_v1_0_dec_ring_emit_fence(struct amdgpu_ring * ring,u64 addr,u64 seq,unsigned flags)1548 static void vcn_v1_0_dec_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq,
1549 unsigned flags)
1550 {
1551 struct amdgpu_device *adev = ring->adev;
1552
1553 WARN_ON(flags & AMDGPU_FENCE_FLAG_64BIT);
1554
1555 amdgpu_ring_write(ring,
1556 PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_CONTEXT_ID), 0));
1557 amdgpu_ring_write(ring, seq);
1558 amdgpu_ring_write(ring,
1559 PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_DATA0), 0));
1560 amdgpu_ring_write(ring, addr & 0xffffffff);
1561 amdgpu_ring_write(ring,
1562 PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_DATA1), 0));
1563 amdgpu_ring_write(ring, upper_32_bits(addr) & 0xff);
1564 amdgpu_ring_write(ring,
1565 PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_CMD), 0));
1566 amdgpu_ring_write(ring, VCN_DEC_CMD_FENCE << 1);
1567
1568 amdgpu_ring_write(ring,
1569 PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_DATA0), 0));
1570 amdgpu_ring_write(ring, 0);
1571 amdgpu_ring_write(ring,
1572 PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_DATA1), 0));
1573 amdgpu_ring_write(ring, 0);
1574 amdgpu_ring_write(ring,
1575 PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_CMD), 0));
1576 amdgpu_ring_write(ring, VCN_DEC_CMD_TRAP << 1);
1577 }
1578
1579 /**
1580 * vcn_v1_0_dec_ring_emit_ib - execute indirect buffer
1581 *
1582 * @ring: amdgpu_ring pointer
1583 * @job: job to retrieve vmid from
1584 * @ib: indirect buffer to execute
1585 * @flags: unused
1586 *
1587 * Write ring commands to execute the indirect buffer
1588 */
vcn_v1_0_dec_ring_emit_ib(struct amdgpu_ring * ring,struct amdgpu_job * job,struct amdgpu_ib * ib,uint32_t flags)1589 static void vcn_v1_0_dec_ring_emit_ib(struct amdgpu_ring *ring,
1590 struct amdgpu_job *job,
1591 struct amdgpu_ib *ib,
1592 uint32_t flags)
1593 {
1594 struct amdgpu_device *adev = ring->adev;
1595 unsigned vmid = AMDGPU_JOB_GET_VMID(job);
1596
1597 amdgpu_ring_write(ring,
1598 PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_RBC_IB_VMID), 0));
1599 amdgpu_ring_write(ring, vmid);
1600
1601 amdgpu_ring_write(ring,
1602 PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_RBC_IB_64BIT_BAR_LOW), 0));
1603 amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr));
1604 amdgpu_ring_write(ring,
1605 PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_RBC_IB_64BIT_BAR_HIGH), 0));
1606 amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr));
1607 amdgpu_ring_write(ring,
1608 PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_RBC_IB_SIZE), 0));
1609 amdgpu_ring_write(ring, ib->length_dw);
1610 }
1611
vcn_v1_0_dec_ring_emit_reg_wait(struct amdgpu_ring * ring,uint32_t reg,uint32_t val,uint32_t mask)1612 static void vcn_v1_0_dec_ring_emit_reg_wait(struct amdgpu_ring *ring,
1613 uint32_t reg, uint32_t val,
1614 uint32_t mask)
1615 {
1616 struct amdgpu_device *adev = ring->adev;
1617
1618 amdgpu_ring_write(ring,
1619 PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_DATA0), 0));
1620 amdgpu_ring_write(ring, reg << 2);
1621 amdgpu_ring_write(ring,
1622 PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_DATA1), 0));
1623 amdgpu_ring_write(ring, val);
1624 amdgpu_ring_write(ring,
1625 PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GP_SCRATCH8), 0));
1626 amdgpu_ring_write(ring, mask);
1627 amdgpu_ring_write(ring,
1628 PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_CMD), 0));
1629 amdgpu_ring_write(ring, VCN_DEC_CMD_REG_READ_COND_WAIT << 1);
1630 }
1631
vcn_v1_0_dec_ring_emit_vm_flush(struct amdgpu_ring * ring,unsigned vmid,uint64_t pd_addr)1632 static void vcn_v1_0_dec_ring_emit_vm_flush(struct amdgpu_ring *ring,
1633 unsigned vmid, uint64_t pd_addr)
1634 {
1635 struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->vm_hub];
1636 uint32_t data0, data1, mask;
1637
1638 pd_addr = amdgpu_gmc_emit_flush_gpu_tlb(ring, vmid, pd_addr);
1639
1640 /* wait for register write */
1641 data0 = hub->ctx0_ptb_addr_lo32 + vmid * hub->ctx_addr_distance;
1642 data1 = lower_32_bits(pd_addr);
1643 mask = 0xffffffff;
1644 vcn_v1_0_dec_ring_emit_reg_wait(ring, data0, data1, mask);
1645 }
1646
vcn_v1_0_dec_ring_emit_wreg(struct amdgpu_ring * ring,uint32_t reg,uint32_t val)1647 static void vcn_v1_0_dec_ring_emit_wreg(struct amdgpu_ring *ring,
1648 uint32_t reg, uint32_t val)
1649 {
1650 struct amdgpu_device *adev = ring->adev;
1651
1652 amdgpu_ring_write(ring,
1653 PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_DATA0), 0));
1654 amdgpu_ring_write(ring, reg << 2);
1655 amdgpu_ring_write(ring,
1656 PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_DATA1), 0));
1657 amdgpu_ring_write(ring, val);
1658 amdgpu_ring_write(ring,
1659 PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_CMD), 0));
1660 amdgpu_ring_write(ring, VCN_DEC_CMD_WRITE_REG << 1);
1661 }
1662
1663 /**
1664 * vcn_v1_0_enc_ring_get_rptr - get enc read pointer
1665 *
1666 * @ring: amdgpu_ring pointer
1667 *
1668 * Returns the current hardware enc read pointer
1669 */
vcn_v1_0_enc_ring_get_rptr(struct amdgpu_ring * ring)1670 static uint64_t vcn_v1_0_enc_ring_get_rptr(struct amdgpu_ring *ring)
1671 {
1672 struct amdgpu_device *adev = ring->adev;
1673
1674 if (ring == &adev->vcn.inst->ring_enc[0])
1675 return RREG32_SOC15(UVD, 0, mmUVD_RB_RPTR);
1676 else
1677 return RREG32_SOC15(UVD, 0, mmUVD_RB_RPTR2);
1678 }
1679
1680 /**
1681 * vcn_v1_0_enc_ring_get_wptr - get enc write pointer
1682 *
1683 * @ring: amdgpu_ring pointer
1684 *
1685 * Returns the current hardware enc write pointer
1686 */
vcn_v1_0_enc_ring_get_wptr(struct amdgpu_ring * ring)1687 static uint64_t vcn_v1_0_enc_ring_get_wptr(struct amdgpu_ring *ring)
1688 {
1689 struct amdgpu_device *adev = ring->adev;
1690
1691 if (ring == &adev->vcn.inst->ring_enc[0])
1692 return RREG32_SOC15(UVD, 0, mmUVD_RB_WPTR);
1693 else
1694 return RREG32_SOC15(UVD, 0, mmUVD_RB_WPTR2);
1695 }
1696
1697 /**
1698 * vcn_v1_0_enc_ring_set_wptr - set enc write pointer
1699 *
1700 * @ring: amdgpu_ring pointer
1701 *
1702 * Commits the enc write pointer to the hardware
1703 */
vcn_v1_0_enc_ring_set_wptr(struct amdgpu_ring * ring)1704 static void vcn_v1_0_enc_ring_set_wptr(struct amdgpu_ring *ring)
1705 {
1706 struct amdgpu_device *adev = ring->adev;
1707
1708 if (ring == &adev->vcn.inst->ring_enc[0])
1709 WREG32_SOC15(UVD, 0, mmUVD_RB_WPTR,
1710 lower_32_bits(ring->wptr));
1711 else
1712 WREG32_SOC15(UVD, 0, mmUVD_RB_WPTR2,
1713 lower_32_bits(ring->wptr));
1714 }
1715
1716 /**
1717 * vcn_v1_0_enc_ring_emit_fence - emit an enc fence & trap command
1718 *
1719 * @ring: amdgpu_ring pointer
1720 * @addr: address
1721 * @seq: sequence number
1722 * @flags: fence related flags
1723 *
1724 * Write enc a fence and a trap command to the ring.
1725 */
vcn_v1_0_enc_ring_emit_fence(struct amdgpu_ring * ring,u64 addr,u64 seq,unsigned flags)1726 static void vcn_v1_0_enc_ring_emit_fence(struct amdgpu_ring *ring, u64 addr,
1727 u64 seq, unsigned flags)
1728 {
1729 WARN_ON(flags & AMDGPU_FENCE_FLAG_64BIT);
1730
1731 amdgpu_ring_write(ring, VCN_ENC_CMD_FENCE);
1732 amdgpu_ring_write(ring, addr);
1733 amdgpu_ring_write(ring, upper_32_bits(addr));
1734 amdgpu_ring_write(ring, seq);
1735 amdgpu_ring_write(ring, VCN_ENC_CMD_TRAP);
1736 }
1737
vcn_v1_0_enc_ring_insert_end(struct amdgpu_ring * ring)1738 static void vcn_v1_0_enc_ring_insert_end(struct amdgpu_ring *ring)
1739 {
1740 amdgpu_ring_write(ring, VCN_ENC_CMD_END);
1741 }
1742
1743 /**
1744 * vcn_v1_0_enc_ring_emit_ib - enc execute indirect buffer
1745 *
1746 * @ring: amdgpu_ring pointer
1747 * @job: job to retrive vmid from
1748 * @ib: indirect buffer to execute
1749 * @flags: unused
1750 *
1751 * Write enc ring commands to execute the indirect buffer
1752 */
vcn_v1_0_enc_ring_emit_ib(struct amdgpu_ring * ring,struct amdgpu_job * job,struct amdgpu_ib * ib,uint32_t flags)1753 static void vcn_v1_0_enc_ring_emit_ib(struct amdgpu_ring *ring,
1754 struct amdgpu_job *job,
1755 struct amdgpu_ib *ib,
1756 uint32_t flags)
1757 {
1758 unsigned vmid = AMDGPU_JOB_GET_VMID(job);
1759
1760 amdgpu_ring_write(ring, VCN_ENC_CMD_IB);
1761 amdgpu_ring_write(ring, vmid);
1762 amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr));
1763 amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr));
1764 amdgpu_ring_write(ring, ib->length_dw);
1765 }
1766
vcn_v1_0_enc_ring_emit_reg_wait(struct amdgpu_ring * ring,uint32_t reg,uint32_t val,uint32_t mask)1767 static void vcn_v1_0_enc_ring_emit_reg_wait(struct amdgpu_ring *ring,
1768 uint32_t reg, uint32_t val,
1769 uint32_t mask)
1770 {
1771 amdgpu_ring_write(ring, VCN_ENC_CMD_REG_WAIT);
1772 amdgpu_ring_write(ring, reg << 2);
1773 amdgpu_ring_write(ring, mask);
1774 amdgpu_ring_write(ring, val);
1775 }
1776
vcn_v1_0_enc_ring_emit_vm_flush(struct amdgpu_ring * ring,unsigned int vmid,uint64_t pd_addr)1777 static void vcn_v1_0_enc_ring_emit_vm_flush(struct amdgpu_ring *ring,
1778 unsigned int vmid, uint64_t pd_addr)
1779 {
1780 struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->vm_hub];
1781
1782 pd_addr = amdgpu_gmc_emit_flush_gpu_tlb(ring, vmid, pd_addr);
1783
1784 /* wait for reg writes */
1785 vcn_v1_0_enc_ring_emit_reg_wait(ring, hub->ctx0_ptb_addr_lo32 +
1786 vmid * hub->ctx_addr_distance,
1787 lower_32_bits(pd_addr), 0xffffffff);
1788 }
1789
vcn_v1_0_enc_ring_emit_wreg(struct amdgpu_ring * ring,uint32_t reg,uint32_t val)1790 static void vcn_v1_0_enc_ring_emit_wreg(struct amdgpu_ring *ring,
1791 uint32_t reg, uint32_t val)
1792 {
1793 amdgpu_ring_write(ring, VCN_ENC_CMD_REG_WRITE);
1794 amdgpu_ring_write(ring, reg << 2);
1795 amdgpu_ring_write(ring, val);
1796 }
1797
vcn_v1_0_set_interrupt_state(struct amdgpu_device * adev,struct amdgpu_irq_src * source,unsigned type,enum amdgpu_interrupt_state state)1798 static int vcn_v1_0_set_interrupt_state(struct amdgpu_device *adev,
1799 struct amdgpu_irq_src *source,
1800 unsigned type,
1801 enum amdgpu_interrupt_state state)
1802 {
1803 return 0;
1804 }
1805
vcn_v1_0_process_interrupt(struct amdgpu_device * adev,struct amdgpu_irq_src * source,struct amdgpu_iv_entry * entry)1806 static int vcn_v1_0_process_interrupt(struct amdgpu_device *adev,
1807 struct amdgpu_irq_src *source,
1808 struct amdgpu_iv_entry *entry)
1809 {
1810 DRM_DEBUG("IH: VCN TRAP\n");
1811
1812 switch (entry->src_id) {
1813 case 124:
1814 amdgpu_fence_process(&adev->vcn.inst->ring_dec);
1815 break;
1816 case 119:
1817 amdgpu_fence_process(&adev->vcn.inst->ring_enc[0]);
1818 break;
1819 case 120:
1820 amdgpu_fence_process(&adev->vcn.inst->ring_enc[1]);
1821 break;
1822 default:
1823 DRM_ERROR("Unhandled interrupt: %d %d\n",
1824 entry->src_id, entry->src_data[0]);
1825 break;
1826 }
1827
1828 return 0;
1829 }
1830
vcn_v1_0_dec_ring_insert_nop(struct amdgpu_ring * ring,uint32_t count)1831 static void vcn_v1_0_dec_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count)
1832 {
1833 struct amdgpu_device *adev = ring->adev;
1834 int i;
1835
1836 WARN_ON(ring->wptr % 2 || count % 2);
1837
1838 for (i = 0; i < count / 2; i++) {
1839 amdgpu_ring_write(ring, PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_NO_OP), 0));
1840 amdgpu_ring_write(ring, 0);
1841 }
1842 }
1843
vcn_v1_0_set_pg_state(struct amdgpu_vcn_inst * vinst,enum amd_powergating_state state)1844 static int vcn_v1_0_set_pg_state(struct amdgpu_vcn_inst *vinst,
1845 enum amd_powergating_state state)
1846 {
1847 /* This doesn't actually powergate the VCN block.
1848 * That's done in the dpm code via the SMC. This
1849 * just re-inits the block as necessary. The actual
1850 * gating still happens in the dpm code. We should
1851 * revisit this when there is a cleaner line between
1852 * the smc and the hw blocks
1853 */
1854 int ret;
1855
1856 if (state == vinst->cur_state)
1857 return 0;
1858
1859 if (state == AMD_PG_STATE_GATE)
1860 ret = vcn_v1_0_stop(vinst);
1861 else
1862 ret = vcn_v1_0_start(vinst);
1863
1864 if (!ret)
1865 vinst->cur_state = state;
1866
1867 return ret;
1868 }
1869
vcn_v1_0_idle_work_handler(struct work_struct * work)1870 static void vcn_v1_0_idle_work_handler(struct work_struct *work)
1871 {
1872 struct amdgpu_vcn_inst *vcn_inst =
1873 container_of(work, struct amdgpu_vcn_inst, idle_work.work);
1874 struct amdgpu_device *adev = vcn_inst->adev;
1875 unsigned int fences = 0, i;
1876
1877 for (i = 0; i < adev->vcn.inst[0].num_enc_rings; ++i)
1878 fences += amdgpu_fence_count_emitted(&adev->vcn.inst->ring_enc[i]);
1879
1880 if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) {
1881 struct dpg_pause_state new_state;
1882
1883 if (fences)
1884 new_state.fw_based = VCN_DPG_STATE__PAUSE;
1885 else
1886 new_state.fw_based = VCN_DPG_STATE__UNPAUSE;
1887
1888 if (amdgpu_fence_count_emitted(adev->jpeg.inst->ring_dec))
1889 new_state.jpeg = VCN_DPG_STATE__PAUSE;
1890 else
1891 new_state.jpeg = VCN_DPG_STATE__UNPAUSE;
1892
1893 adev->vcn.inst->pause_dpg_mode(vcn_inst, &new_state);
1894 }
1895
1896 fences += amdgpu_fence_count_emitted(adev->jpeg.inst->ring_dec);
1897 fences += amdgpu_fence_count_emitted(&adev->vcn.inst->ring_dec);
1898
1899 if (fences == 0) {
1900 amdgpu_gfx_off_ctrl(adev, true);
1901 if (adev->pm.dpm_enabled)
1902 amdgpu_dpm_enable_vcn(adev, false, 0);
1903 else
1904 amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCN,
1905 AMD_PG_STATE_GATE);
1906 } else {
1907 schedule_delayed_work(&adev->vcn.inst[0].idle_work, VCN_IDLE_TIMEOUT);
1908 }
1909 }
1910
vcn_v1_0_ring_begin_use(struct amdgpu_ring * ring)1911 static void vcn_v1_0_ring_begin_use(struct amdgpu_ring *ring)
1912 {
1913 struct amdgpu_device *adev = ring->adev;
1914 bool set_clocks = !cancel_delayed_work_sync(&adev->vcn.inst[0].idle_work);
1915
1916 mutex_lock(&adev->vcn.inst[0].vcn1_jpeg1_workaround);
1917
1918 if (amdgpu_fence_wait_empty(ring->adev->jpeg.inst->ring_dec))
1919 DRM_ERROR("VCN dec: jpeg dec ring may not be empty\n");
1920
1921 vcn_v1_0_set_pg_for_begin_use(ring, set_clocks);
1922
1923 }
1924
vcn_v1_0_set_pg_for_begin_use(struct amdgpu_ring * ring,bool set_clocks)1925 void vcn_v1_0_set_pg_for_begin_use(struct amdgpu_ring *ring, bool set_clocks)
1926 {
1927 struct amdgpu_device *adev = ring->adev;
1928
1929 if (set_clocks) {
1930 amdgpu_gfx_off_ctrl(adev, false);
1931 if (adev->pm.dpm_enabled)
1932 amdgpu_dpm_enable_vcn(adev, true, 0);
1933 else
1934 amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCN,
1935 AMD_PG_STATE_UNGATE);
1936 }
1937
1938 if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) {
1939 struct dpg_pause_state new_state;
1940 unsigned int fences = 0, i;
1941
1942 for (i = 0; i < adev->vcn.inst[0].num_enc_rings; ++i)
1943 fences += amdgpu_fence_count_emitted(&adev->vcn.inst->ring_enc[i]);
1944
1945 if (fences)
1946 new_state.fw_based = VCN_DPG_STATE__PAUSE;
1947 else
1948 new_state.fw_based = VCN_DPG_STATE__UNPAUSE;
1949
1950 if (amdgpu_fence_count_emitted(adev->jpeg.inst->ring_dec))
1951 new_state.jpeg = VCN_DPG_STATE__PAUSE;
1952 else
1953 new_state.jpeg = VCN_DPG_STATE__UNPAUSE;
1954
1955 if (ring->funcs->type == AMDGPU_RING_TYPE_VCN_ENC)
1956 new_state.fw_based = VCN_DPG_STATE__PAUSE;
1957 else if (ring->funcs->type == AMDGPU_RING_TYPE_VCN_JPEG)
1958 new_state.jpeg = VCN_DPG_STATE__PAUSE;
1959
1960 adev->vcn.inst->pause_dpg_mode(adev->vcn.inst, &new_state);
1961 }
1962 }
1963
vcn_v1_0_ring_end_use(struct amdgpu_ring * ring)1964 void vcn_v1_0_ring_end_use(struct amdgpu_ring *ring)
1965 {
1966 schedule_delayed_work(&ring->adev->vcn.inst[0].idle_work, VCN_IDLE_TIMEOUT);
1967 mutex_unlock(&ring->adev->vcn.inst[0].vcn1_jpeg1_workaround);
1968 }
1969
vcn_v1_0_print_ip_state(struct amdgpu_ip_block * ip_block,struct drm_printer * p)1970 static void vcn_v1_0_print_ip_state(struct amdgpu_ip_block *ip_block, struct drm_printer *p)
1971 {
1972 struct amdgpu_device *adev = ip_block->adev;
1973 int i, j;
1974 uint32_t reg_count = ARRAY_SIZE(vcn_reg_list_1_0);
1975 uint32_t inst_off, is_powered;
1976
1977 if (!adev->vcn.ip_dump)
1978 return;
1979
1980 drm_printf(p, "num_instances:%d\n", adev->vcn.num_vcn_inst);
1981 for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
1982 if (adev->vcn.harvest_config & (1 << i)) {
1983 drm_printf(p, "\nHarvested Instance:VCN%d Skipping dump\n", i);
1984 continue;
1985 }
1986
1987 inst_off = i * reg_count;
1988 is_powered = (adev->vcn.ip_dump[inst_off] &
1989 UVD_POWER_STATUS__UVD_POWER_STATUS_MASK) != 1;
1990
1991 if (is_powered) {
1992 drm_printf(p, "\nActive Instance:VCN%d\n", i);
1993 for (j = 0; j < reg_count; j++)
1994 drm_printf(p, "%-50s \t 0x%08x\n", vcn_reg_list_1_0[j].reg_name,
1995 adev->vcn.ip_dump[inst_off + j]);
1996 } else {
1997 drm_printf(p, "\nInactive Instance:VCN%d\n", i);
1998 }
1999 }
2000 }
2001
vcn_v1_0_dump_ip_state(struct amdgpu_ip_block * ip_block)2002 static void vcn_v1_0_dump_ip_state(struct amdgpu_ip_block *ip_block)
2003 {
2004 struct amdgpu_device *adev = ip_block->adev;
2005 int i, j;
2006 bool is_powered;
2007 uint32_t inst_off;
2008 uint32_t reg_count = ARRAY_SIZE(vcn_reg_list_1_0);
2009
2010 if (!adev->vcn.ip_dump)
2011 return;
2012
2013 for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
2014 if (adev->vcn.harvest_config & (1 << i))
2015 continue;
2016
2017 inst_off = i * reg_count;
2018 /* mmUVD_POWER_STATUS is always readable and is first element of the array */
2019 adev->vcn.ip_dump[inst_off] = RREG32_SOC15(VCN, i, mmUVD_POWER_STATUS);
2020 is_powered = (adev->vcn.ip_dump[inst_off] &
2021 UVD_POWER_STATUS__UVD_POWER_STATUS_MASK) != 1;
2022
2023 if (is_powered)
2024 for (j = 1; j < reg_count; j++)
2025 adev->vcn.ip_dump[inst_off + j] =
2026 RREG32(SOC15_REG_ENTRY_OFFSET_INST(vcn_reg_list_1_0[j], i));
2027 }
2028 }
2029
2030 static const struct amd_ip_funcs vcn_v1_0_ip_funcs = {
2031 .name = "vcn_v1_0",
2032 .early_init = vcn_v1_0_early_init,
2033 .sw_init = vcn_v1_0_sw_init,
2034 .sw_fini = vcn_v1_0_sw_fini,
2035 .hw_init = vcn_v1_0_hw_init,
2036 .hw_fini = vcn_v1_0_hw_fini,
2037 .suspend = vcn_v1_0_suspend,
2038 .resume = vcn_v1_0_resume,
2039 .is_idle = vcn_v1_0_is_idle,
2040 .wait_for_idle = vcn_v1_0_wait_for_idle,
2041 .set_clockgating_state = vcn_v1_0_set_clockgating_state,
2042 .set_powergating_state = vcn_set_powergating_state,
2043 .dump_ip_state = vcn_v1_0_dump_ip_state,
2044 .print_ip_state = vcn_v1_0_print_ip_state,
2045 };
2046
2047 /*
2048 * It is a hardware issue that VCN can't handle a GTT TMZ buffer on
2049 * CHIP_RAVEN series ASIC. Move such a GTT TMZ buffer to VRAM domain
2050 * before command submission as a workaround.
2051 */
vcn_v1_0_validate_bo(struct amdgpu_cs_parser * parser,struct amdgpu_job * job,uint64_t addr)2052 static int vcn_v1_0_validate_bo(struct amdgpu_cs_parser *parser,
2053 struct amdgpu_job *job,
2054 uint64_t addr)
2055 {
2056 struct ttm_operation_ctx ctx = { false, false };
2057 struct amdgpu_fpriv *fpriv = parser->filp->driver_priv;
2058 struct amdgpu_vm *vm = &fpriv->vm;
2059 struct amdgpu_bo_va_mapping *mapping;
2060 struct amdgpu_bo *bo;
2061 int r;
2062
2063 addr &= AMDGPU_GMC_HOLE_MASK;
2064 if (addr & 0x7) {
2065 DRM_ERROR("VCN messages must be 8 byte aligned!\n");
2066 return -EINVAL;
2067 }
2068
2069 mapping = amdgpu_vm_bo_lookup_mapping(vm, addr/AMDGPU_GPU_PAGE_SIZE);
2070 if (!mapping || !mapping->bo_va || !mapping->bo_va->base.bo)
2071 return -EINVAL;
2072
2073 bo = mapping->bo_va->base.bo;
2074 if (!(bo->flags & AMDGPU_GEM_CREATE_ENCRYPTED))
2075 return 0;
2076
2077 amdgpu_bo_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_VRAM);
2078 r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
2079 if (r) {
2080 DRM_ERROR("Failed to validate the VCN message BO (%d)!\n", r);
2081 return r;
2082 }
2083
2084 return r;
2085 }
2086
vcn_v1_0_ring_patch_cs_in_place(struct amdgpu_cs_parser * p,struct amdgpu_job * job,struct amdgpu_ib * ib)2087 static int vcn_v1_0_ring_patch_cs_in_place(struct amdgpu_cs_parser *p,
2088 struct amdgpu_job *job,
2089 struct amdgpu_ib *ib)
2090 {
2091 uint32_t msg_lo = 0, msg_hi = 0;
2092 int i, r;
2093
2094 if (!(ib->flags & AMDGPU_IB_FLAGS_SECURE))
2095 return 0;
2096
2097 for (i = 0; i < ib->length_dw; i += 2) {
2098 uint32_t reg = amdgpu_ib_get_value(ib, i);
2099 uint32_t val = amdgpu_ib_get_value(ib, i + 1);
2100
2101 if (reg == PACKET0(p->adev->vcn.inst[0].internal.data0, 0)) {
2102 msg_lo = val;
2103 } else if (reg == PACKET0(p->adev->vcn.inst[0].internal.data1, 0)) {
2104 msg_hi = val;
2105 } else if (reg == PACKET0(p->adev->vcn.inst[0].internal.cmd, 0)) {
2106 r = vcn_v1_0_validate_bo(p, job,
2107 ((u64)msg_hi) << 32 | msg_lo);
2108 if (r)
2109 return r;
2110 }
2111 }
2112
2113 return 0;
2114 }
2115
2116 static const struct amdgpu_ring_funcs vcn_v1_0_dec_ring_vm_funcs = {
2117 .type = AMDGPU_RING_TYPE_VCN_DEC,
2118 .align_mask = 0xf,
2119 .support_64bit_ptrs = false,
2120 .no_user_fence = true,
2121 .secure_submission_supported = true,
2122 .get_rptr = vcn_v1_0_dec_ring_get_rptr,
2123 .get_wptr = vcn_v1_0_dec_ring_get_wptr,
2124 .set_wptr = vcn_v1_0_dec_ring_set_wptr,
2125 .patch_cs_in_place = vcn_v1_0_ring_patch_cs_in_place,
2126 .emit_frame_size =
2127 6 + 6 + /* hdp invalidate / flush */
2128 SOC15_FLUSH_GPU_TLB_NUM_WREG * 6 +
2129 SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 8 +
2130 8 + /* vcn_v1_0_dec_ring_emit_vm_flush */
2131 14 + 14 + /* vcn_v1_0_dec_ring_emit_fence x2 vm fence */
2132 6,
2133 .emit_ib_size = 8, /* vcn_v1_0_dec_ring_emit_ib */
2134 .emit_ib = vcn_v1_0_dec_ring_emit_ib,
2135 .emit_fence = vcn_v1_0_dec_ring_emit_fence,
2136 .emit_vm_flush = vcn_v1_0_dec_ring_emit_vm_flush,
2137 .test_ring = amdgpu_vcn_dec_ring_test_ring,
2138 .test_ib = amdgpu_vcn_dec_ring_test_ib,
2139 .insert_nop = vcn_v1_0_dec_ring_insert_nop,
2140 .insert_start = vcn_v1_0_dec_ring_insert_start,
2141 .insert_end = vcn_v1_0_dec_ring_insert_end,
2142 .pad_ib = amdgpu_ring_generic_pad_ib,
2143 .begin_use = vcn_v1_0_ring_begin_use,
2144 .end_use = vcn_v1_0_ring_end_use,
2145 .emit_wreg = vcn_v1_0_dec_ring_emit_wreg,
2146 .emit_reg_wait = vcn_v1_0_dec_ring_emit_reg_wait,
2147 .emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper,
2148 };
2149
2150 static const struct amdgpu_ring_funcs vcn_v1_0_enc_ring_vm_funcs = {
2151 .type = AMDGPU_RING_TYPE_VCN_ENC,
2152 .align_mask = 0x3f,
2153 .nop = VCN_ENC_CMD_NO_OP,
2154 .support_64bit_ptrs = false,
2155 .no_user_fence = true,
2156 .get_rptr = vcn_v1_0_enc_ring_get_rptr,
2157 .get_wptr = vcn_v1_0_enc_ring_get_wptr,
2158 .set_wptr = vcn_v1_0_enc_ring_set_wptr,
2159 .emit_frame_size =
2160 SOC15_FLUSH_GPU_TLB_NUM_WREG * 3 +
2161 SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 4 +
2162 4 + /* vcn_v1_0_enc_ring_emit_vm_flush */
2163 5 + 5 + /* vcn_v1_0_enc_ring_emit_fence x2 vm fence */
2164 1, /* vcn_v1_0_enc_ring_insert_end */
2165 .emit_ib_size = 5, /* vcn_v1_0_enc_ring_emit_ib */
2166 .emit_ib = vcn_v1_0_enc_ring_emit_ib,
2167 .emit_fence = vcn_v1_0_enc_ring_emit_fence,
2168 .emit_vm_flush = vcn_v1_0_enc_ring_emit_vm_flush,
2169 .test_ring = amdgpu_vcn_enc_ring_test_ring,
2170 .test_ib = amdgpu_vcn_enc_ring_test_ib,
2171 .insert_nop = amdgpu_ring_insert_nop,
2172 .insert_end = vcn_v1_0_enc_ring_insert_end,
2173 .pad_ib = amdgpu_ring_generic_pad_ib,
2174 .begin_use = vcn_v1_0_ring_begin_use,
2175 .end_use = vcn_v1_0_ring_end_use,
2176 .emit_wreg = vcn_v1_0_enc_ring_emit_wreg,
2177 .emit_reg_wait = vcn_v1_0_enc_ring_emit_reg_wait,
2178 .emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper,
2179 };
2180
vcn_v1_0_set_dec_ring_funcs(struct amdgpu_device * adev)2181 static void vcn_v1_0_set_dec_ring_funcs(struct amdgpu_device *adev)
2182 {
2183 adev->vcn.inst->ring_dec.funcs = &vcn_v1_0_dec_ring_vm_funcs;
2184 }
2185
vcn_v1_0_set_enc_ring_funcs(struct amdgpu_device * adev)2186 static void vcn_v1_0_set_enc_ring_funcs(struct amdgpu_device *adev)
2187 {
2188 int i;
2189
2190 for (i = 0; i < adev->vcn.inst[0].num_enc_rings; ++i)
2191 adev->vcn.inst->ring_enc[i].funcs = &vcn_v1_0_enc_ring_vm_funcs;
2192 }
2193
2194 static const struct amdgpu_irq_src_funcs vcn_v1_0_irq_funcs = {
2195 .set = vcn_v1_0_set_interrupt_state,
2196 .process = vcn_v1_0_process_interrupt,
2197 };
2198
vcn_v1_0_set_irq_funcs(struct amdgpu_device * adev)2199 static void vcn_v1_0_set_irq_funcs(struct amdgpu_device *adev)
2200 {
2201 adev->vcn.inst->irq.num_types = adev->vcn.inst[0].num_enc_rings + 2;
2202 adev->vcn.inst->irq.funcs = &vcn_v1_0_irq_funcs;
2203 }
2204
2205 const struct amdgpu_ip_block_version vcn_v1_0_ip_block = {
2206 .type = AMD_IP_BLOCK_TYPE_VCN,
2207 .major = 1,
2208 .minor = 0,
2209 .rev = 0,
2210 .funcs = &vcn_v1_0_ip_funcs,
2211 };
2212