1 /* 2 * Copyright 2016 Advanced Micro Devices, Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 */ 23 24 #include <linux/firmware.h> 25 26 #include "amdgpu.h" 27 #include "amdgpu_cs.h" 28 #include "amdgpu_vcn.h" 29 #include "amdgpu_pm.h" 30 #include "soc15.h" 31 #include "soc15d.h" 32 #include "soc15_common.h" 33 34 #include "vcn/vcn_1_0_offset.h" 35 #include "vcn/vcn_1_0_sh_mask.h" 36 #include "mmhub/mmhub_9_1_offset.h" 37 #include "mmhub/mmhub_9_1_sh_mask.h" 38 39 #include "ivsrcid/vcn/irqsrcs_vcn_1_0.h" 40 #include "jpeg_v1_0.h" 41 #include "vcn_v1_0.h" 42 43 #define mmUVD_RBC_XX_IB_REG_CHECK_1_0 0x05ab 44 #define mmUVD_RBC_XX_IB_REG_CHECK_1_0_BASE_IDX 1 45 #define mmUVD_REG_XX_MASK_1_0 0x05ac 46 #define mmUVD_REG_XX_MASK_1_0_BASE_IDX 1 47 48 static const struct amdgpu_hwip_reg_entry vcn_reg_list_1_0[] = { 49 SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_POWER_STATUS), 50 SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_STATUS), 51 SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_CONTEXT_ID), 52 SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_CONTEXT_ID2), 53 SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_GPCOM_VCPU_DATA0), 54 SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_GPCOM_VCPU_DATA1), 55 SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_GPCOM_VCPU_CMD), 56 SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_BASE_HI), 57 SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_BASE_LO), 58 SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_BASE_HI2), 59 SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_BASE_LO2), 60 SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_BASE_HI3), 61 SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_BASE_LO3), 62 SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_BASE_HI4), 63 SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_BASE_LO4), 64 SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_RPTR), 65 SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_WPTR), 66 SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_RPTR2), 67 SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_WPTR2), 68 SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_RPTR3), 69 SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_WPTR3), 70 SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_RPTR4), 71 SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_WPTR4), 72 SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_SIZE), 73 SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_SIZE2), 74 SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_SIZE3), 75 SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_SIZE4), 76 SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_PGFSM_CONFIG), 77 SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_PGFSM_STATUS), 78 SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_DPG_LMA_CTL), 79 SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_DPG_LMA_DATA), 80 SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_DPG_LMA_MASK), 81 SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_DPG_PAUSE) 82 }; 83 84 static int vcn_v1_0_stop(struct amdgpu_device *adev); 85 static void vcn_v1_0_set_dec_ring_funcs(struct amdgpu_device *adev); 86 static void vcn_v1_0_set_enc_ring_funcs(struct amdgpu_device *adev); 87 static void vcn_v1_0_set_irq_funcs(struct amdgpu_device *adev); 88 static int vcn_v1_0_set_powergating_state(void *handle, enum amd_powergating_state state); 89 static int vcn_v1_0_pause_dpg_mode(struct amdgpu_device *adev, 90 int inst_idx, struct dpg_pause_state *new_state); 91 92 static void vcn_v1_0_idle_work_handler(struct work_struct *work); 93 static void vcn_v1_0_ring_begin_use(struct amdgpu_ring *ring); 94 95 /** 96 * vcn_v1_0_early_init - set function pointers and load microcode 97 * 98 * @handle: amdgpu_device pointer 99 * 100 * Set ring and irq function pointers 101 * Load microcode from filesystem 102 */ 103 static int vcn_v1_0_early_init(void *handle) 104 { 105 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 106 107 adev->vcn.num_enc_rings = 2; 108 109 vcn_v1_0_set_dec_ring_funcs(adev); 110 vcn_v1_0_set_enc_ring_funcs(adev); 111 vcn_v1_0_set_irq_funcs(adev); 112 113 jpeg_v1_0_early_init(handle); 114 115 return amdgpu_vcn_early_init(adev); 116 } 117 118 /** 119 * vcn_v1_0_sw_init - sw init for VCN block 120 * 121 * @handle: amdgpu_device pointer 122 * 123 * Load firmware and sw initialization 124 */ 125 static int vcn_v1_0_sw_init(void *handle) 126 { 127 struct amdgpu_ring *ring; 128 int i, r; 129 uint32_t reg_count = ARRAY_SIZE(vcn_reg_list_1_0); 130 uint32_t *ptr; 131 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 132 133 /* VCN DEC TRAP */ 134 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VCN, 135 VCN_1_0__SRCID__UVD_SYSTEM_MESSAGE_INTERRUPT, &adev->vcn.inst->irq); 136 if (r) 137 return r; 138 139 /* VCN ENC TRAP */ 140 for (i = 0; i < adev->vcn.num_enc_rings; ++i) { 141 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VCN, i + VCN_1_0__SRCID__UVD_ENC_GENERAL_PURPOSE, 142 &adev->vcn.inst->irq); 143 if (r) 144 return r; 145 } 146 147 r = amdgpu_vcn_sw_init(adev); 148 if (r) 149 return r; 150 151 /* Override the work func */ 152 adev->vcn.idle_work.work.func = vcn_v1_0_idle_work_handler; 153 154 amdgpu_vcn_setup_ucode(adev); 155 156 r = amdgpu_vcn_resume(adev); 157 if (r) 158 return r; 159 160 ring = &adev->vcn.inst->ring_dec; 161 ring->vm_hub = AMDGPU_MMHUB0(0); 162 sprintf(ring->name, "vcn_dec"); 163 r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.inst->irq, 0, 164 AMDGPU_RING_PRIO_DEFAULT, NULL); 165 if (r) 166 return r; 167 168 adev->vcn.internal.scratch9 = adev->vcn.inst->external.scratch9 = 169 SOC15_REG_OFFSET(UVD, 0, mmUVD_SCRATCH9); 170 adev->vcn.internal.data0 = adev->vcn.inst->external.data0 = 171 SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_DATA0); 172 adev->vcn.internal.data1 = adev->vcn.inst->external.data1 = 173 SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_DATA1); 174 adev->vcn.internal.cmd = adev->vcn.inst->external.cmd = 175 SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_CMD); 176 adev->vcn.internal.nop = adev->vcn.inst->external.nop = 177 SOC15_REG_OFFSET(UVD, 0, mmUVD_NO_OP); 178 179 for (i = 0; i < adev->vcn.num_enc_rings; ++i) { 180 enum amdgpu_ring_priority_level hw_prio = amdgpu_vcn_get_enc_ring_prio(i); 181 182 ring = &adev->vcn.inst->ring_enc[i]; 183 ring->vm_hub = AMDGPU_MMHUB0(0); 184 sprintf(ring->name, "vcn_enc%d", i); 185 r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.inst->irq, 0, 186 hw_prio, NULL); 187 if (r) 188 return r; 189 } 190 191 adev->vcn.pause_dpg_mode = vcn_v1_0_pause_dpg_mode; 192 193 if (amdgpu_vcnfw_log) { 194 volatile struct amdgpu_fw_shared *fw_shared = adev->vcn.inst->fw_shared.cpu_addr; 195 196 fw_shared->present_flag_0 = 0; 197 amdgpu_vcn_fwlog_init(adev->vcn.inst); 198 } 199 200 r = jpeg_v1_0_sw_init(handle); 201 202 /* Allocate memory for VCN IP Dump buffer */ 203 ptr = kcalloc(adev->vcn.num_vcn_inst * reg_count, sizeof(uint32_t), GFP_KERNEL); 204 if (!ptr) { 205 DRM_ERROR("Failed to allocate memory for VCN IP Dump\n"); 206 adev->vcn.ip_dump = NULL; 207 } else { 208 adev->vcn.ip_dump = ptr; 209 } 210 return r; 211 } 212 213 /** 214 * vcn_v1_0_sw_fini - sw fini for VCN block 215 * 216 * @handle: amdgpu_device pointer 217 * 218 * VCN suspend and free up sw allocation 219 */ 220 static int vcn_v1_0_sw_fini(void *handle) 221 { 222 int r; 223 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 224 225 r = amdgpu_vcn_suspend(adev); 226 if (r) 227 return r; 228 229 jpeg_v1_0_sw_fini(handle); 230 231 r = amdgpu_vcn_sw_fini(adev); 232 233 kfree(adev->vcn.ip_dump); 234 235 return r; 236 } 237 238 /** 239 * vcn_v1_0_hw_init - start and test VCN block 240 * 241 * @handle: amdgpu_device pointer 242 * 243 * Initialize the hardware, boot up the VCPU and do some testing 244 */ 245 static int vcn_v1_0_hw_init(void *handle) 246 { 247 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 248 struct amdgpu_ring *ring = &adev->vcn.inst->ring_dec; 249 int i, r; 250 251 r = amdgpu_ring_test_helper(ring); 252 if (r) 253 return r; 254 255 for (i = 0; i < adev->vcn.num_enc_rings; ++i) { 256 ring = &adev->vcn.inst->ring_enc[i]; 257 r = amdgpu_ring_test_helper(ring); 258 if (r) 259 return r; 260 } 261 262 ring = adev->jpeg.inst->ring_dec; 263 r = amdgpu_ring_test_helper(ring); 264 265 return r; 266 } 267 268 /** 269 * vcn_v1_0_hw_fini - stop the hardware block 270 * 271 * @handle: amdgpu_device pointer 272 * 273 * Stop the VCN block, mark ring as not ready any more 274 */ 275 static int vcn_v1_0_hw_fini(void *handle) 276 { 277 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 278 279 cancel_delayed_work_sync(&adev->vcn.idle_work); 280 281 if ((adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) || 282 (adev->vcn.cur_state != AMD_PG_STATE_GATE && 283 RREG32_SOC15(VCN, 0, mmUVD_STATUS))) { 284 vcn_v1_0_set_powergating_state(adev, AMD_PG_STATE_GATE); 285 } 286 287 return 0; 288 } 289 290 /** 291 * vcn_v1_0_suspend - suspend VCN block 292 * 293 * @handle: amdgpu_device pointer 294 * 295 * HW fini and suspend VCN block 296 */ 297 static int vcn_v1_0_suspend(void *handle) 298 { 299 int r; 300 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 301 bool idle_work_unexecuted; 302 303 idle_work_unexecuted = cancel_delayed_work_sync(&adev->vcn.idle_work); 304 if (idle_work_unexecuted) { 305 if (adev->pm.dpm_enabled) 306 amdgpu_dpm_enable_uvd(adev, false); 307 } 308 309 r = vcn_v1_0_hw_fini(adev); 310 if (r) 311 return r; 312 313 r = amdgpu_vcn_suspend(adev); 314 315 return r; 316 } 317 318 /** 319 * vcn_v1_0_resume - resume VCN block 320 * 321 * @handle: amdgpu_device pointer 322 * 323 * Resume firmware and hw init VCN block 324 */ 325 static int vcn_v1_0_resume(void *handle) 326 { 327 int r; 328 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 329 330 r = amdgpu_vcn_resume(adev); 331 if (r) 332 return r; 333 334 r = vcn_v1_0_hw_init(adev); 335 336 return r; 337 } 338 339 /** 340 * vcn_v1_0_mc_resume_spg_mode - memory controller programming 341 * 342 * @adev: amdgpu_device pointer 343 * 344 * Let the VCN memory controller know it's offsets 345 */ 346 static void vcn_v1_0_mc_resume_spg_mode(struct amdgpu_device *adev) 347 { 348 uint32_t size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.fw[0]->size + 4); 349 uint32_t offset; 350 351 /* cache window 0: fw */ 352 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) { 353 WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW, 354 (adev->firmware.ucode[AMDGPU_UCODE_ID_VCN].tmr_mc_addr_lo)); 355 WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH, 356 (adev->firmware.ucode[AMDGPU_UCODE_ID_VCN].tmr_mc_addr_hi)); 357 WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_OFFSET0, 0); 358 offset = 0; 359 } else { 360 WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW, 361 lower_32_bits(adev->vcn.inst->gpu_addr)); 362 WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH, 363 upper_32_bits(adev->vcn.inst->gpu_addr)); 364 offset = size; 365 WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_OFFSET0, 366 AMDGPU_UVD_FIRMWARE_OFFSET >> 3); 367 } 368 369 WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_SIZE0, size); 370 371 /* cache window 1: stack */ 372 WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW, 373 lower_32_bits(adev->vcn.inst->gpu_addr + offset)); 374 WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH, 375 upper_32_bits(adev->vcn.inst->gpu_addr + offset)); 376 WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_OFFSET1, 0); 377 WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_SIZE1, AMDGPU_VCN_STACK_SIZE); 378 379 /* cache window 2: context */ 380 WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW, 381 lower_32_bits(adev->vcn.inst->gpu_addr + offset + AMDGPU_VCN_STACK_SIZE)); 382 WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH, 383 upper_32_bits(adev->vcn.inst->gpu_addr + offset + AMDGPU_VCN_STACK_SIZE)); 384 WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_OFFSET2, 0); 385 WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_SIZE2, AMDGPU_VCN_CONTEXT_SIZE); 386 387 WREG32_SOC15(UVD, 0, mmUVD_UDEC_ADDR_CONFIG, 388 adev->gfx.config.gb_addr_config); 389 WREG32_SOC15(UVD, 0, mmUVD_UDEC_DB_ADDR_CONFIG, 390 adev->gfx.config.gb_addr_config); 391 WREG32_SOC15(UVD, 0, mmUVD_UDEC_DBW_ADDR_CONFIG, 392 adev->gfx.config.gb_addr_config); 393 WREG32_SOC15(UVD, 0, mmUVD_UDEC_DBW_UV_ADDR_CONFIG, 394 adev->gfx.config.gb_addr_config); 395 WREG32_SOC15(UVD, 0, mmUVD_MIF_CURR_ADDR_CONFIG, 396 adev->gfx.config.gb_addr_config); 397 WREG32_SOC15(UVD, 0, mmUVD_MIF_CURR_UV_ADDR_CONFIG, 398 adev->gfx.config.gb_addr_config); 399 WREG32_SOC15(UVD, 0, mmUVD_MIF_RECON1_ADDR_CONFIG, 400 adev->gfx.config.gb_addr_config); 401 WREG32_SOC15(UVD, 0, mmUVD_MIF_RECON1_UV_ADDR_CONFIG, 402 adev->gfx.config.gb_addr_config); 403 WREG32_SOC15(UVD, 0, mmUVD_MIF_REF_ADDR_CONFIG, 404 adev->gfx.config.gb_addr_config); 405 WREG32_SOC15(UVD, 0, mmUVD_MIF_REF_UV_ADDR_CONFIG, 406 adev->gfx.config.gb_addr_config); 407 WREG32_SOC15(UVD, 0, mmUVD_JPEG_ADDR_CONFIG, 408 adev->gfx.config.gb_addr_config); 409 WREG32_SOC15(UVD, 0, mmUVD_JPEG_UV_ADDR_CONFIG, 410 adev->gfx.config.gb_addr_config); 411 } 412 413 static void vcn_v1_0_mc_resume_dpg_mode(struct amdgpu_device *adev) 414 { 415 uint32_t size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.fw[0]->size + 4); 416 uint32_t offset; 417 418 /* cache window 0: fw */ 419 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) { 420 WREG32_SOC15_DPG_MODE_1_0(UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW, 421 (adev->firmware.ucode[AMDGPU_UCODE_ID_VCN].tmr_mc_addr_lo), 422 0xFFFFFFFF, 0); 423 WREG32_SOC15_DPG_MODE_1_0(UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH, 424 (adev->firmware.ucode[AMDGPU_UCODE_ID_VCN].tmr_mc_addr_hi), 425 0xFFFFFFFF, 0); 426 WREG32_SOC15_DPG_MODE_1_0(UVD, 0, mmUVD_VCPU_CACHE_OFFSET0, 0, 427 0xFFFFFFFF, 0); 428 offset = 0; 429 } else { 430 WREG32_SOC15_DPG_MODE_1_0(UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW, 431 lower_32_bits(adev->vcn.inst->gpu_addr), 0xFFFFFFFF, 0); 432 WREG32_SOC15_DPG_MODE_1_0(UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH, 433 upper_32_bits(adev->vcn.inst->gpu_addr), 0xFFFFFFFF, 0); 434 offset = size; 435 WREG32_SOC15_DPG_MODE_1_0(UVD, 0, mmUVD_VCPU_CACHE_OFFSET0, 436 AMDGPU_UVD_FIRMWARE_OFFSET >> 3, 0xFFFFFFFF, 0); 437 } 438 439 WREG32_SOC15_DPG_MODE_1_0(UVD, 0, mmUVD_VCPU_CACHE_SIZE0, size, 0xFFFFFFFF, 0); 440 441 /* cache window 1: stack */ 442 WREG32_SOC15_DPG_MODE_1_0(UVD, 0, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW, 443 lower_32_bits(adev->vcn.inst->gpu_addr + offset), 0xFFFFFFFF, 0); 444 WREG32_SOC15_DPG_MODE_1_0(UVD, 0, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH, 445 upper_32_bits(adev->vcn.inst->gpu_addr + offset), 0xFFFFFFFF, 0); 446 WREG32_SOC15_DPG_MODE_1_0(UVD, 0, mmUVD_VCPU_CACHE_OFFSET1, 0, 447 0xFFFFFFFF, 0); 448 WREG32_SOC15_DPG_MODE_1_0(UVD, 0, mmUVD_VCPU_CACHE_SIZE1, AMDGPU_VCN_STACK_SIZE, 449 0xFFFFFFFF, 0); 450 451 /* cache window 2: context */ 452 WREG32_SOC15_DPG_MODE_1_0(UVD, 0, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW, 453 lower_32_bits(adev->vcn.inst->gpu_addr + offset + AMDGPU_VCN_STACK_SIZE), 454 0xFFFFFFFF, 0); 455 WREG32_SOC15_DPG_MODE_1_0(UVD, 0, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH, 456 upper_32_bits(adev->vcn.inst->gpu_addr + offset + AMDGPU_VCN_STACK_SIZE), 457 0xFFFFFFFF, 0); 458 WREG32_SOC15_DPG_MODE_1_0(UVD, 0, mmUVD_VCPU_CACHE_OFFSET2, 0, 0xFFFFFFFF, 0); 459 WREG32_SOC15_DPG_MODE_1_0(UVD, 0, mmUVD_VCPU_CACHE_SIZE2, AMDGPU_VCN_CONTEXT_SIZE, 460 0xFFFFFFFF, 0); 461 462 /* VCN global tiling registers */ 463 WREG32_SOC15_DPG_MODE_1_0(UVD, 0, mmUVD_UDEC_ADDR_CONFIG, 464 adev->gfx.config.gb_addr_config, 0xFFFFFFFF, 0); 465 WREG32_SOC15_DPG_MODE_1_0(UVD, 0, mmUVD_UDEC_DB_ADDR_CONFIG, 466 adev->gfx.config.gb_addr_config, 0xFFFFFFFF, 0); 467 WREG32_SOC15_DPG_MODE_1_0(UVD, 0, mmUVD_UDEC_DBW_ADDR_CONFIG, 468 adev->gfx.config.gb_addr_config, 0xFFFFFFFF, 0); 469 WREG32_SOC15_DPG_MODE_1_0(UVD, 0, mmUVD_UDEC_DBW_UV_ADDR_CONFIG, 470 adev->gfx.config.gb_addr_config, 0xFFFFFFFF, 0); 471 WREG32_SOC15_DPG_MODE_1_0(UVD, 0, mmUVD_MIF_CURR_ADDR_CONFIG, 472 adev->gfx.config.gb_addr_config, 0xFFFFFFFF, 0); 473 WREG32_SOC15_DPG_MODE_1_0(UVD, 0, mmUVD_MIF_CURR_UV_ADDR_CONFIG, 474 adev->gfx.config.gb_addr_config, 0xFFFFFFFF, 0); 475 WREG32_SOC15_DPG_MODE_1_0(UVD, 0, mmUVD_MIF_RECON1_ADDR_CONFIG, 476 adev->gfx.config.gb_addr_config, 0xFFFFFFFF, 0); 477 WREG32_SOC15_DPG_MODE_1_0(UVD, 0, mmUVD_MIF_RECON1_UV_ADDR_CONFIG, 478 adev->gfx.config.gb_addr_config, 0xFFFFFFFF, 0); 479 WREG32_SOC15_DPG_MODE_1_0(UVD, 0, mmUVD_MIF_REF_ADDR_CONFIG, 480 adev->gfx.config.gb_addr_config, 0xFFFFFFFF, 0); 481 WREG32_SOC15_DPG_MODE_1_0(UVD, 0, mmUVD_MIF_REF_UV_ADDR_CONFIG, 482 adev->gfx.config.gb_addr_config, 0xFFFFFFFF, 0); 483 } 484 485 /** 486 * vcn_v1_0_disable_clock_gating - disable VCN clock gating 487 * 488 * @adev: amdgpu_device pointer 489 * 490 * Disable clock gating for VCN block 491 */ 492 static void vcn_v1_0_disable_clock_gating(struct amdgpu_device *adev) 493 { 494 uint32_t data; 495 496 /* JPEG disable CGC */ 497 data = RREG32_SOC15(VCN, 0, mmJPEG_CGC_CTRL); 498 499 if (adev->cg_flags & AMD_CG_SUPPORT_VCN_MGCG) 500 data |= 1 << JPEG_CGC_CTRL__DYN_CLOCK_MODE__SHIFT; 501 else 502 data &= ~JPEG_CGC_CTRL__DYN_CLOCK_MODE_MASK; 503 504 data |= 1 << JPEG_CGC_CTRL__CLK_GATE_DLY_TIMER__SHIFT; 505 data |= 4 << JPEG_CGC_CTRL__CLK_OFF_DELAY__SHIFT; 506 WREG32_SOC15(VCN, 0, mmJPEG_CGC_CTRL, data); 507 508 data = RREG32_SOC15(VCN, 0, mmJPEG_CGC_GATE); 509 data &= ~(JPEG_CGC_GATE__JPEG_MASK | JPEG_CGC_GATE__JPEG2_MASK); 510 WREG32_SOC15(VCN, 0, mmJPEG_CGC_GATE, data); 511 512 /* UVD disable CGC */ 513 data = RREG32_SOC15(VCN, 0, mmUVD_CGC_CTRL); 514 if (adev->cg_flags & AMD_CG_SUPPORT_VCN_MGCG) 515 data |= 1 << UVD_CGC_CTRL__DYN_CLOCK_MODE__SHIFT; 516 else 517 data &= ~UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK; 518 519 data |= 1 << UVD_CGC_CTRL__CLK_GATE_DLY_TIMER__SHIFT; 520 data |= 4 << UVD_CGC_CTRL__CLK_OFF_DELAY__SHIFT; 521 WREG32_SOC15(VCN, 0, mmUVD_CGC_CTRL, data); 522 523 data = RREG32_SOC15(VCN, 0, mmUVD_CGC_GATE); 524 data &= ~(UVD_CGC_GATE__SYS_MASK 525 | UVD_CGC_GATE__UDEC_MASK 526 | UVD_CGC_GATE__MPEG2_MASK 527 | UVD_CGC_GATE__REGS_MASK 528 | UVD_CGC_GATE__RBC_MASK 529 | UVD_CGC_GATE__LMI_MC_MASK 530 | UVD_CGC_GATE__LMI_UMC_MASK 531 | UVD_CGC_GATE__IDCT_MASK 532 | UVD_CGC_GATE__MPRD_MASK 533 | UVD_CGC_GATE__MPC_MASK 534 | UVD_CGC_GATE__LBSI_MASK 535 | UVD_CGC_GATE__LRBBM_MASK 536 | UVD_CGC_GATE__UDEC_RE_MASK 537 | UVD_CGC_GATE__UDEC_CM_MASK 538 | UVD_CGC_GATE__UDEC_IT_MASK 539 | UVD_CGC_GATE__UDEC_DB_MASK 540 | UVD_CGC_GATE__UDEC_MP_MASK 541 | UVD_CGC_GATE__WCB_MASK 542 | UVD_CGC_GATE__VCPU_MASK 543 | UVD_CGC_GATE__SCPU_MASK); 544 WREG32_SOC15(VCN, 0, mmUVD_CGC_GATE, data); 545 546 data = RREG32_SOC15(VCN, 0, mmUVD_CGC_CTRL); 547 data &= ~(UVD_CGC_CTRL__UDEC_RE_MODE_MASK 548 | UVD_CGC_CTRL__UDEC_CM_MODE_MASK 549 | UVD_CGC_CTRL__UDEC_IT_MODE_MASK 550 | UVD_CGC_CTRL__UDEC_DB_MODE_MASK 551 | UVD_CGC_CTRL__UDEC_MP_MODE_MASK 552 | UVD_CGC_CTRL__SYS_MODE_MASK 553 | UVD_CGC_CTRL__UDEC_MODE_MASK 554 | UVD_CGC_CTRL__MPEG2_MODE_MASK 555 | UVD_CGC_CTRL__REGS_MODE_MASK 556 | UVD_CGC_CTRL__RBC_MODE_MASK 557 | UVD_CGC_CTRL__LMI_MC_MODE_MASK 558 | UVD_CGC_CTRL__LMI_UMC_MODE_MASK 559 | UVD_CGC_CTRL__IDCT_MODE_MASK 560 | UVD_CGC_CTRL__MPRD_MODE_MASK 561 | UVD_CGC_CTRL__MPC_MODE_MASK 562 | UVD_CGC_CTRL__LBSI_MODE_MASK 563 | UVD_CGC_CTRL__LRBBM_MODE_MASK 564 | UVD_CGC_CTRL__WCB_MODE_MASK 565 | UVD_CGC_CTRL__VCPU_MODE_MASK 566 | UVD_CGC_CTRL__SCPU_MODE_MASK); 567 WREG32_SOC15(VCN, 0, mmUVD_CGC_CTRL, data); 568 569 /* turn on */ 570 data = RREG32_SOC15(VCN, 0, mmUVD_SUVD_CGC_GATE); 571 data |= (UVD_SUVD_CGC_GATE__SRE_MASK 572 | UVD_SUVD_CGC_GATE__SIT_MASK 573 | UVD_SUVD_CGC_GATE__SMP_MASK 574 | UVD_SUVD_CGC_GATE__SCM_MASK 575 | UVD_SUVD_CGC_GATE__SDB_MASK 576 | UVD_SUVD_CGC_GATE__SRE_H264_MASK 577 | UVD_SUVD_CGC_GATE__SRE_HEVC_MASK 578 | UVD_SUVD_CGC_GATE__SIT_H264_MASK 579 | UVD_SUVD_CGC_GATE__SIT_HEVC_MASK 580 | UVD_SUVD_CGC_GATE__SCM_H264_MASK 581 | UVD_SUVD_CGC_GATE__SCM_HEVC_MASK 582 | UVD_SUVD_CGC_GATE__SDB_H264_MASK 583 | UVD_SUVD_CGC_GATE__SDB_HEVC_MASK 584 | UVD_SUVD_CGC_GATE__SCLR_MASK 585 | UVD_SUVD_CGC_GATE__UVD_SC_MASK 586 | UVD_SUVD_CGC_GATE__ENT_MASK 587 | UVD_SUVD_CGC_GATE__SIT_HEVC_DEC_MASK 588 | UVD_SUVD_CGC_GATE__SIT_HEVC_ENC_MASK 589 | UVD_SUVD_CGC_GATE__SITE_MASK 590 | UVD_SUVD_CGC_GATE__SRE_VP9_MASK 591 | UVD_SUVD_CGC_GATE__SCM_VP9_MASK 592 | UVD_SUVD_CGC_GATE__SIT_VP9_DEC_MASK 593 | UVD_SUVD_CGC_GATE__SDB_VP9_MASK 594 | UVD_SUVD_CGC_GATE__IME_HEVC_MASK); 595 WREG32_SOC15(VCN, 0, mmUVD_SUVD_CGC_GATE, data); 596 597 data = RREG32_SOC15(VCN, 0, mmUVD_SUVD_CGC_CTRL); 598 data &= ~(UVD_SUVD_CGC_CTRL__SRE_MODE_MASK 599 | UVD_SUVD_CGC_CTRL__SIT_MODE_MASK 600 | UVD_SUVD_CGC_CTRL__SMP_MODE_MASK 601 | UVD_SUVD_CGC_CTRL__SCM_MODE_MASK 602 | UVD_SUVD_CGC_CTRL__SDB_MODE_MASK 603 | UVD_SUVD_CGC_CTRL__SCLR_MODE_MASK 604 | UVD_SUVD_CGC_CTRL__UVD_SC_MODE_MASK 605 | UVD_SUVD_CGC_CTRL__ENT_MODE_MASK 606 | UVD_SUVD_CGC_CTRL__IME_MODE_MASK 607 | UVD_SUVD_CGC_CTRL__SITE_MODE_MASK); 608 WREG32_SOC15(VCN, 0, mmUVD_SUVD_CGC_CTRL, data); 609 } 610 611 /** 612 * vcn_v1_0_enable_clock_gating - enable VCN clock gating 613 * 614 * @adev: amdgpu_device pointer 615 * 616 * Enable clock gating for VCN block 617 */ 618 static void vcn_v1_0_enable_clock_gating(struct amdgpu_device *adev) 619 { 620 uint32_t data = 0; 621 622 /* enable JPEG CGC */ 623 data = RREG32_SOC15(VCN, 0, mmJPEG_CGC_CTRL); 624 if (adev->cg_flags & AMD_CG_SUPPORT_VCN_MGCG) 625 data |= 1 << JPEG_CGC_CTRL__DYN_CLOCK_MODE__SHIFT; 626 else 627 data |= 0 << JPEG_CGC_CTRL__DYN_CLOCK_MODE__SHIFT; 628 data |= 1 << JPEG_CGC_CTRL__CLK_GATE_DLY_TIMER__SHIFT; 629 data |= 4 << JPEG_CGC_CTRL__CLK_OFF_DELAY__SHIFT; 630 WREG32_SOC15(VCN, 0, mmJPEG_CGC_CTRL, data); 631 632 data = RREG32_SOC15(VCN, 0, mmJPEG_CGC_GATE); 633 data |= (JPEG_CGC_GATE__JPEG_MASK | JPEG_CGC_GATE__JPEG2_MASK); 634 WREG32_SOC15(VCN, 0, mmJPEG_CGC_GATE, data); 635 636 /* enable UVD CGC */ 637 data = RREG32_SOC15(VCN, 0, mmUVD_CGC_CTRL); 638 if (adev->cg_flags & AMD_CG_SUPPORT_VCN_MGCG) 639 data |= 1 << UVD_CGC_CTRL__DYN_CLOCK_MODE__SHIFT; 640 else 641 data |= 0 << UVD_CGC_CTRL__DYN_CLOCK_MODE__SHIFT; 642 data |= 1 << UVD_CGC_CTRL__CLK_GATE_DLY_TIMER__SHIFT; 643 data |= 4 << UVD_CGC_CTRL__CLK_OFF_DELAY__SHIFT; 644 WREG32_SOC15(VCN, 0, mmUVD_CGC_CTRL, data); 645 646 data = RREG32_SOC15(VCN, 0, mmUVD_CGC_CTRL); 647 data |= (UVD_CGC_CTRL__UDEC_RE_MODE_MASK 648 | UVD_CGC_CTRL__UDEC_CM_MODE_MASK 649 | UVD_CGC_CTRL__UDEC_IT_MODE_MASK 650 | UVD_CGC_CTRL__UDEC_DB_MODE_MASK 651 | UVD_CGC_CTRL__UDEC_MP_MODE_MASK 652 | UVD_CGC_CTRL__SYS_MODE_MASK 653 | UVD_CGC_CTRL__UDEC_MODE_MASK 654 | UVD_CGC_CTRL__MPEG2_MODE_MASK 655 | UVD_CGC_CTRL__REGS_MODE_MASK 656 | UVD_CGC_CTRL__RBC_MODE_MASK 657 | UVD_CGC_CTRL__LMI_MC_MODE_MASK 658 | UVD_CGC_CTRL__LMI_UMC_MODE_MASK 659 | UVD_CGC_CTRL__IDCT_MODE_MASK 660 | UVD_CGC_CTRL__MPRD_MODE_MASK 661 | UVD_CGC_CTRL__MPC_MODE_MASK 662 | UVD_CGC_CTRL__LBSI_MODE_MASK 663 | UVD_CGC_CTRL__LRBBM_MODE_MASK 664 | UVD_CGC_CTRL__WCB_MODE_MASK 665 | UVD_CGC_CTRL__VCPU_MODE_MASK 666 | UVD_CGC_CTRL__SCPU_MODE_MASK); 667 WREG32_SOC15(VCN, 0, mmUVD_CGC_CTRL, data); 668 669 data = RREG32_SOC15(VCN, 0, mmUVD_SUVD_CGC_CTRL); 670 data |= (UVD_SUVD_CGC_CTRL__SRE_MODE_MASK 671 | UVD_SUVD_CGC_CTRL__SIT_MODE_MASK 672 | UVD_SUVD_CGC_CTRL__SMP_MODE_MASK 673 | UVD_SUVD_CGC_CTRL__SCM_MODE_MASK 674 | UVD_SUVD_CGC_CTRL__SDB_MODE_MASK 675 | UVD_SUVD_CGC_CTRL__SCLR_MODE_MASK 676 | UVD_SUVD_CGC_CTRL__UVD_SC_MODE_MASK 677 | UVD_SUVD_CGC_CTRL__ENT_MODE_MASK 678 | UVD_SUVD_CGC_CTRL__IME_MODE_MASK 679 | UVD_SUVD_CGC_CTRL__SITE_MODE_MASK); 680 WREG32_SOC15(VCN, 0, mmUVD_SUVD_CGC_CTRL, data); 681 } 682 683 static void vcn_v1_0_clock_gating_dpg_mode(struct amdgpu_device *adev, uint8_t sram_sel) 684 { 685 uint32_t reg_data = 0; 686 687 /* disable JPEG CGC */ 688 if (adev->cg_flags & AMD_CG_SUPPORT_VCN_MGCG) 689 reg_data = 1 << JPEG_CGC_CTRL__DYN_CLOCK_MODE__SHIFT; 690 else 691 reg_data = 0 << JPEG_CGC_CTRL__DYN_CLOCK_MODE__SHIFT; 692 reg_data |= 1 << JPEG_CGC_CTRL__CLK_GATE_DLY_TIMER__SHIFT; 693 reg_data |= 4 << JPEG_CGC_CTRL__CLK_OFF_DELAY__SHIFT; 694 WREG32_SOC15_DPG_MODE_1_0(UVD, 0, mmJPEG_CGC_CTRL, reg_data, 0xFFFFFFFF, sram_sel); 695 696 WREG32_SOC15_DPG_MODE_1_0(UVD, 0, mmJPEG_CGC_GATE, 0, 0xFFFFFFFF, sram_sel); 697 698 /* enable sw clock gating control */ 699 if (adev->cg_flags & AMD_CG_SUPPORT_VCN_MGCG) 700 reg_data = 1 << UVD_CGC_CTRL__DYN_CLOCK_MODE__SHIFT; 701 else 702 reg_data = 0 << UVD_CGC_CTRL__DYN_CLOCK_MODE__SHIFT; 703 reg_data |= 1 << UVD_CGC_CTRL__CLK_GATE_DLY_TIMER__SHIFT; 704 reg_data |= 4 << UVD_CGC_CTRL__CLK_OFF_DELAY__SHIFT; 705 reg_data &= ~(UVD_CGC_CTRL__UDEC_RE_MODE_MASK | 706 UVD_CGC_CTRL__UDEC_CM_MODE_MASK | 707 UVD_CGC_CTRL__UDEC_IT_MODE_MASK | 708 UVD_CGC_CTRL__UDEC_DB_MODE_MASK | 709 UVD_CGC_CTRL__UDEC_MP_MODE_MASK | 710 UVD_CGC_CTRL__SYS_MODE_MASK | 711 UVD_CGC_CTRL__UDEC_MODE_MASK | 712 UVD_CGC_CTRL__MPEG2_MODE_MASK | 713 UVD_CGC_CTRL__REGS_MODE_MASK | 714 UVD_CGC_CTRL__RBC_MODE_MASK | 715 UVD_CGC_CTRL__LMI_MC_MODE_MASK | 716 UVD_CGC_CTRL__LMI_UMC_MODE_MASK | 717 UVD_CGC_CTRL__IDCT_MODE_MASK | 718 UVD_CGC_CTRL__MPRD_MODE_MASK | 719 UVD_CGC_CTRL__MPC_MODE_MASK | 720 UVD_CGC_CTRL__LBSI_MODE_MASK | 721 UVD_CGC_CTRL__LRBBM_MODE_MASK | 722 UVD_CGC_CTRL__WCB_MODE_MASK | 723 UVD_CGC_CTRL__VCPU_MODE_MASK | 724 UVD_CGC_CTRL__SCPU_MODE_MASK); 725 WREG32_SOC15_DPG_MODE_1_0(UVD, 0, mmUVD_CGC_CTRL, reg_data, 0xFFFFFFFF, sram_sel); 726 727 /* turn off clock gating */ 728 WREG32_SOC15_DPG_MODE_1_0(UVD, 0, mmUVD_CGC_GATE, 0, 0xFFFFFFFF, sram_sel); 729 730 /* turn on SUVD clock gating */ 731 WREG32_SOC15_DPG_MODE_1_0(UVD, 0, mmUVD_SUVD_CGC_GATE, 1, 0xFFFFFFFF, sram_sel); 732 733 /* turn on sw mode in UVD_SUVD_CGC_CTRL */ 734 WREG32_SOC15_DPG_MODE_1_0(UVD, 0, mmUVD_SUVD_CGC_CTRL, 0, 0xFFFFFFFF, sram_sel); 735 } 736 737 static void vcn_1_0_disable_static_power_gating(struct amdgpu_device *adev) 738 { 739 uint32_t data = 0; 740 741 if (adev->pg_flags & AMD_PG_SUPPORT_VCN) { 742 data = (1 << UVD_PGFSM_CONFIG__UVDM_PWR_CONFIG__SHIFT 743 | 1 << UVD_PGFSM_CONFIG__UVDU_PWR_CONFIG__SHIFT 744 | 2 << UVD_PGFSM_CONFIG__UVDF_PWR_CONFIG__SHIFT 745 | 2 << UVD_PGFSM_CONFIG__UVDC_PWR_CONFIG__SHIFT 746 | 2 << UVD_PGFSM_CONFIG__UVDB_PWR_CONFIG__SHIFT 747 | 2 << UVD_PGFSM_CONFIG__UVDIL_PWR_CONFIG__SHIFT 748 | 2 << UVD_PGFSM_CONFIG__UVDIR_PWR_CONFIG__SHIFT 749 | 2 << UVD_PGFSM_CONFIG__UVDTD_PWR_CONFIG__SHIFT 750 | 2 << UVD_PGFSM_CONFIG__UVDTE_PWR_CONFIG__SHIFT 751 | 2 << UVD_PGFSM_CONFIG__UVDE_PWR_CONFIG__SHIFT 752 | 2 << UVD_PGFSM_CONFIG__UVDW_PWR_CONFIG__SHIFT); 753 754 WREG32_SOC15(VCN, 0, mmUVD_PGFSM_CONFIG, data); 755 SOC15_WAIT_ON_RREG(VCN, 0, mmUVD_PGFSM_STATUS, UVD_PGFSM_STATUS__UVDM_UVDU_PWR_ON, 0xFFFFFF); 756 } else { 757 data = (1 << UVD_PGFSM_CONFIG__UVDM_PWR_CONFIG__SHIFT 758 | 1 << UVD_PGFSM_CONFIG__UVDU_PWR_CONFIG__SHIFT 759 | 1 << UVD_PGFSM_CONFIG__UVDF_PWR_CONFIG__SHIFT 760 | 1 << UVD_PGFSM_CONFIG__UVDC_PWR_CONFIG__SHIFT 761 | 1 << UVD_PGFSM_CONFIG__UVDB_PWR_CONFIG__SHIFT 762 | 1 << UVD_PGFSM_CONFIG__UVDIL_PWR_CONFIG__SHIFT 763 | 1 << UVD_PGFSM_CONFIG__UVDIR_PWR_CONFIG__SHIFT 764 | 1 << UVD_PGFSM_CONFIG__UVDTD_PWR_CONFIG__SHIFT 765 | 1 << UVD_PGFSM_CONFIG__UVDTE_PWR_CONFIG__SHIFT 766 | 1 << UVD_PGFSM_CONFIG__UVDE_PWR_CONFIG__SHIFT 767 | 1 << UVD_PGFSM_CONFIG__UVDW_PWR_CONFIG__SHIFT); 768 WREG32_SOC15(VCN, 0, mmUVD_PGFSM_CONFIG, data); 769 SOC15_WAIT_ON_RREG(VCN, 0, mmUVD_PGFSM_STATUS, 0, 0xFFFFFFFF); 770 } 771 772 /* polling UVD_PGFSM_STATUS to confirm UVDM_PWR_STATUS , UVDU_PWR_STATUS are 0 (power on) */ 773 774 data = RREG32_SOC15(VCN, 0, mmUVD_POWER_STATUS); 775 data &= ~0x103; 776 if (adev->pg_flags & AMD_PG_SUPPORT_VCN) 777 data |= UVD_PGFSM_CONFIG__UVDM_UVDU_PWR_ON | UVD_POWER_STATUS__UVD_PG_EN_MASK; 778 779 WREG32_SOC15(VCN, 0, mmUVD_POWER_STATUS, data); 780 } 781 782 static void vcn_1_0_enable_static_power_gating(struct amdgpu_device *adev) 783 { 784 uint32_t data = 0; 785 786 if (adev->pg_flags & AMD_PG_SUPPORT_VCN) { 787 /* Before power off, this indicator has to be turned on */ 788 data = RREG32_SOC15(VCN, 0, mmUVD_POWER_STATUS); 789 data &= ~UVD_POWER_STATUS__UVD_POWER_STATUS_MASK; 790 data |= UVD_POWER_STATUS__UVD_POWER_STATUS_TILES_OFF; 791 WREG32_SOC15(VCN, 0, mmUVD_POWER_STATUS, data); 792 793 794 data = (2 << UVD_PGFSM_CONFIG__UVDM_PWR_CONFIG__SHIFT 795 | 2 << UVD_PGFSM_CONFIG__UVDU_PWR_CONFIG__SHIFT 796 | 2 << UVD_PGFSM_CONFIG__UVDF_PWR_CONFIG__SHIFT 797 | 2 << UVD_PGFSM_CONFIG__UVDC_PWR_CONFIG__SHIFT 798 | 2 << UVD_PGFSM_CONFIG__UVDB_PWR_CONFIG__SHIFT 799 | 2 << UVD_PGFSM_CONFIG__UVDIL_PWR_CONFIG__SHIFT 800 | 2 << UVD_PGFSM_CONFIG__UVDIR_PWR_CONFIG__SHIFT 801 | 2 << UVD_PGFSM_CONFIG__UVDTD_PWR_CONFIG__SHIFT 802 | 2 << UVD_PGFSM_CONFIG__UVDTE_PWR_CONFIG__SHIFT 803 | 2 << UVD_PGFSM_CONFIG__UVDE_PWR_CONFIG__SHIFT 804 | 2 << UVD_PGFSM_CONFIG__UVDW_PWR_CONFIG__SHIFT); 805 806 WREG32_SOC15(VCN, 0, mmUVD_PGFSM_CONFIG, data); 807 808 data = (2 << UVD_PGFSM_STATUS__UVDM_PWR_STATUS__SHIFT 809 | 2 << UVD_PGFSM_STATUS__UVDU_PWR_STATUS__SHIFT 810 | 2 << UVD_PGFSM_STATUS__UVDF_PWR_STATUS__SHIFT 811 | 2 << UVD_PGFSM_STATUS__UVDC_PWR_STATUS__SHIFT 812 | 2 << UVD_PGFSM_STATUS__UVDB_PWR_STATUS__SHIFT 813 | 2 << UVD_PGFSM_STATUS__UVDIL_PWR_STATUS__SHIFT 814 | 2 << UVD_PGFSM_STATUS__UVDIR_PWR_STATUS__SHIFT 815 | 2 << UVD_PGFSM_STATUS__UVDTD_PWR_STATUS__SHIFT 816 | 2 << UVD_PGFSM_STATUS__UVDTE_PWR_STATUS__SHIFT 817 | 2 << UVD_PGFSM_STATUS__UVDE_PWR_STATUS__SHIFT 818 | 2 << UVD_PGFSM_STATUS__UVDW_PWR_STATUS__SHIFT); 819 SOC15_WAIT_ON_RREG(VCN, 0, mmUVD_PGFSM_STATUS, data, 0xFFFFFFFF); 820 } 821 } 822 823 /** 824 * vcn_v1_0_start_spg_mode - start VCN block 825 * 826 * @adev: amdgpu_device pointer 827 * 828 * Setup and start the VCN block 829 */ 830 static int vcn_v1_0_start_spg_mode(struct amdgpu_device *adev) 831 { 832 struct amdgpu_ring *ring = &adev->vcn.inst->ring_dec; 833 uint32_t rb_bufsz, tmp; 834 uint32_t lmi_swap_cntl; 835 int i, j, r; 836 837 /* disable byte swapping */ 838 lmi_swap_cntl = 0; 839 840 vcn_1_0_disable_static_power_gating(adev); 841 842 tmp = RREG32_SOC15(UVD, 0, mmUVD_STATUS) | UVD_STATUS__UVD_BUSY; 843 WREG32_SOC15(UVD, 0, mmUVD_STATUS, tmp); 844 845 /* disable clock gating */ 846 vcn_v1_0_disable_clock_gating(adev); 847 848 /* disable interupt */ 849 WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_MASTINT_EN), 0, 850 ~UVD_MASTINT_EN__VCPU_EN_MASK); 851 852 /* initialize VCN memory controller */ 853 tmp = RREG32_SOC15(UVD, 0, mmUVD_LMI_CTRL); 854 WREG32_SOC15(UVD, 0, mmUVD_LMI_CTRL, tmp | 855 UVD_LMI_CTRL__WRITE_CLEAN_TIMER_EN_MASK | 856 UVD_LMI_CTRL__MASK_MC_URGENT_MASK | 857 UVD_LMI_CTRL__DATA_COHERENCY_EN_MASK | 858 UVD_LMI_CTRL__VCPU_DATA_COHERENCY_EN_MASK); 859 860 #ifdef __BIG_ENDIAN 861 /* swap (8 in 32) RB and IB */ 862 lmi_swap_cntl = 0xa; 863 #endif 864 WREG32_SOC15(UVD, 0, mmUVD_LMI_SWAP_CNTL, lmi_swap_cntl); 865 866 tmp = RREG32_SOC15(UVD, 0, mmUVD_MPC_CNTL); 867 tmp &= ~UVD_MPC_CNTL__REPLACEMENT_MODE_MASK; 868 tmp |= 0x2 << UVD_MPC_CNTL__REPLACEMENT_MODE__SHIFT; 869 WREG32_SOC15(UVD, 0, mmUVD_MPC_CNTL, tmp); 870 871 WREG32_SOC15(UVD, 0, mmUVD_MPC_SET_MUXA0, 872 ((0x1 << UVD_MPC_SET_MUXA0__VARA_1__SHIFT) | 873 (0x2 << UVD_MPC_SET_MUXA0__VARA_2__SHIFT) | 874 (0x3 << UVD_MPC_SET_MUXA0__VARA_3__SHIFT) | 875 (0x4 << UVD_MPC_SET_MUXA0__VARA_4__SHIFT))); 876 877 WREG32_SOC15(UVD, 0, mmUVD_MPC_SET_MUXB0, 878 ((0x1 << UVD_MPC_SET_MUXB0__VARB_1__SHIFT) | 879 (0x2 << UVD_MPC_SET_MUXB0__VARB_2__SHIFT) | 880 (0x3 << UVD_MPC_SET_MUXB0__VARB_3__SHIFT) | 881 (0x4 << UVD_MPC_SET_MUXB0__VARB_4__SHIFT))); 882 883 WREG32_SOC15(UVD, 0, mmUVD_MPC_SET_MUX, 884 ((0x0 << UVD_MPC_SET_MUX__SET_0__SHIFT) | 885 (0x1 << UVD_MPC_SET_MUX__SET_1__SHIFT) | 886 (0x2 << UVD_MPC_SET_MUX__SET_2__SHIFT))); 887 888 vcn_v1_0_mc_resume_spg_mode(adev); 889 890 WREG32_SOC15(UVD, 0, mmUVD_REG_XX_MASK_1_0, 0x10); 891 WREG32_SOC15(UVD, 0, mmUVD_RBC_XX_IB_REG_CHECK_1_0, 892 RREG32_SOC15(UVD, 0, mmUVD_RBC_XX_IB_REG_CHECK_1_0) | 0x3); 893 894 /* enable VCPU clock */ 895 WREG32_SOC15(UVD, 0, mmUVD_VCPU_CNTL, UVD_VCPU_CNTL__CLK_EN_MASK); 896 897 /* boot up the VCPU */ 898 WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_SOFT_RESET), 0, 899 ~UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK); 900 901 /* enable UMC */ 902 WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_CTRL2), 0, 903 ~UVD_LMI_CTRL2__STALL_ARB_UMC_MASK); 904 905 tmp = RREG32_SOC15(UVD, 0, mmUVD_SOFT_RESET); 906 tmp &= ~UVD_SOFT_RESET__LMI_SOFT_RESET_MASK; 907 tmp &= ~UVD_SOFT_RESET__LMI_UMC_SOFT_RESET_MASK; 908 WREG32_SOC15(UVD, 0, mmUVD_SOFT_RESET, tmp); 909 910 for (i = 0; i < 10; ++i) { 911 uint32_t status; 912 913 for (j = 0; j < 100; ++j) { 914 status = RREG32_SOC15(UVD, 0, mmUVD_STATUS); 915 if (status & UVD_STATUS__IDLE) 916 break; 917 mdelay(10); 918 } 919 r = 0; 920 if (status & UVD_STATUS__IDLE) 921 break; 922 923 DRM_ERROR("VCN decode not responding, trying to reset the VCPU!!!\n"); 924 WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_SOFT_RESET), 925 UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK, 926 ~UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK); 927 mdelay(10); 928 WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_SOFT_RESET), 0, 929 ~UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK); 930 mdelay(10); 931 r = -1; 932 } 933 934 if (r) { 935 DRM_ERROR("VCN decode not responding, giving up!!!\n"); 936 return r; 937 } 938 /* enable master interrupt */ 939 WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_MASTINT_EN), 940 UVD_MASTINT_EN__VCPU_EN_MASK, ~UVD_MASTINT_EN__VCPU_EN_MASK); 941 942 /* enable system interrupt for JRBC, TODO: move to set interrupt*/ 943 WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_SYS_INT_EN), 944 UVD_SYS_INT_EN__UVD_JRBC_EN_MASK, 945 ~UVD_SYS_INT_EN__UVD_JRBC_EN_MASK); 946 947 /* clear the busy bit of UVD_STATUS */ 948 tmp = RREG32_SOC15(UVD, 0, mmUVD_STATUS) & ~UVD_STATUS__UVD_BUSY; 949 WREG32_SOC15(UVD, 0, mmUVD_STATUS, tmp); 950 951 /* force RBC into idle state */ 952 rb_bufsz = order_base_2(ring->ring_size); 953 tmp = REG_SET_FIELD(0, UVD_RBC_RB_CNTL, RB_BUFSZ, rb_bufsz); 954 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_BLKSZ, 1); 955 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_FETCH, 1); 956 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_UPDATE, 1); 957 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_RPTR_WR_EN, 1); 958 WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_CNTL, tmp); 959 960 /* set the write pointer delay */ 961 WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_WPTR_CNTL, 0); 962 963 /* set the wb address */ 964 WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_RPTR_ADDR, 965 (upper_32_bits(ring->gpu_addr) >> 2)); 966 967 /* program the RB_BASE for ring buffer */ 968 WREG32_SOC15(UVD, 0, mmUVD_LMI_RBC_RB_64BIT_BAR_LOW, 969 lower_32_bits(ring->gpu_addr)); 970 WREG32_SOC15(UVD, 0, mmUVD_LMI_RBC_RB_64BIT_BAR_HIGH, 971 upper_32_bits(ring->gpu_addr)); 972 973 /* Initialize the ring buffer's read and write pointers */ 974 WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_RPTR, 0); 975 976 WREG32_SOC15(UVD, 0, mmUVD_SCRATCH2, 0); 977 978 ring->wptr = RREG32_SOC15(UVD, 0, mmUVD_RBC_RB_RPTR); 979 WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_WPTR, 980 lower_32_bits(ring->wptr)); 981 982 WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_RBC_RB_CNTL), 0, 983 ~UVD_RBC_RB_CNTL__RB_NO_FETCH_MASK); 984 985 ring = &adev->vcn.inst->ring_enc[0]; 986 WREG32_SOC15(UVD, 0, mmUVD_RB_RPTR, lower_32_bits(ring->wptr)); 987 WREG32_SOC15(UVD, 0, mmUVD_RB_WPTR, lower_32_bits(ring->wptr)); 988 WREG32_SOC15(UVD, 0, mmUVD_RB_BASE_LO, ring->gpu_addr); 989 WREG32_SOC15(UVD, 0, mmUVD_RB_BASE_HI, upper_32_bits(ring->gpu_addr)); 990 WREG32_SOC15(UVD, 0, mmUVD_RB_SIZE, ring->ring_size / 4); 991 992 ring = &adev->vcn.inst->ring_enc[1]; 993 WREG32_SOC15(UVD, 0, mmUVD_RB_RPTR2, lower_32_bits(ring->wptr)); 994 WREG32_SOC15(UVD, 0, mmUVD_RB_WPTR2, lower_32_bits(ring->wptr)); 995 WREG32_SOC15(UVD, 0, mmUVD_RB_BASE_LO2, ring->gpu_addr); 996 WREG32_SOC15(UVD, 0, mmUVD_RB_BASE_HI2, upper_32_bits(ring->gpu_addr)); 997 WREG32_SOC15(UVD, 0, mmUVD_RB_SIZE2, ring->ring_size / 4); 998 999 jpeg_v1_0_start(adev, 0); 1000 1001 return 0; 1002 } 1003 1004 static int vcn_v1_0_start_dpg_mode(struct amdgpu_device *adev) 1005 { 1006 struct amdgpu_ring *ring = &adev->vcn.inst->ring_dec; 1007 uint32_t rb_bufsz, tmp; 1008 uint32_t lmi_swap_cntl; 1009 1010 /* disable byte swapping */ 1011 lmi_swap_cntl = 0; 1012 1013 vcn_1_0_enable_static_power_gating(adev); 1014 1015 /* enable dynamic power gating mode */ 1016 tmp = RREG32_SOC15(UVD, 0, mmUVD_POWER_STATUS); 1017 tmp |= UVD_POWER_STATUS__UVD_PG_MODE_MASK; 1018 tmp |= UVD_POWER_STATUS__UVD_PG_EN_MASK; 1019 WREG32_SOC15(UVD, 0, mmUVD_POWER_STATUS, tmp); 1020 1021 /* enable clock gating */ 1022 vcn_v1_0_clock_gating_dpg_mode(adev, 0); 1023 1024 /* enable VCPU clock */ 1025 tmp = (0xFF << UVD_VCPU_CNTL__PRB_TIMEOUT_VAL__SHIFT); 1026 tmp |= UVD_VCPU_CNTL__CLK_EN_MASK; 1027 tmp |= UVD_VCPU_CNTL__MIF_WR_LOW_THRESHOLD_BP_MASK; 1028 WREG32_SOC15_DPG_MODE_1_0(UVD, 0, mmUVD_VCPU_CNTL, tmp, 0xFFFFFFFF, 0); 1029 1030 /* disable interupt */ 1031 WREG32_SOC15_DPG_MODE_1_0(UVD, 0, mmUVD_MASTINT_EN, 1032 0, UVD_MASTINT_EN__VCPU_EN_MASK, 0); 1033 1034 /* initialize VCN memory controller */ 1035 WREG32_SOC15_DPG_MODE_1_0(UVD, 0, mmUVD_LMI_CTRL, 1036 (8 << UVD_LMI_CTRL__WRITE_CLEAN_TIMER__SHIFT) | 1037 UVD_LMI_CTRL__WRITE_CLEAN_TIMER_EN_MASK | 1038 UVD_LMI_CTRL__DATA_COHERENCY_EN_MASK | 1039 UVD_LMI_CTRL__VCPU_DATA_COHERENCY_EN_MASK | 1040 UVD_LMI_CTRL__REQ_MODE_MASK | 1041 UVD_LMI_CTRL__CRC_RESET_MASK | 1042 UVD_LMI_CTRL__MASK_MC_URGENT_MASK | 1043 0x00100000L, 0xFFFFFFFF, 0); 1044 1045 #ifdef __BIG_ENDIAN 1046 /* swap (8 in 32) RB and IB */ 1047 lmi_swap_cntl = 0xa; 1048 #endif 1049 WREG32_SOC15_DPG_MODE_1_0(UVD, 0, mmUVD_LMI_SWAP_CNTL, lmi_swap_cntl, 0xFFFFFFFF, 0); 1050 1051 WREG32_SOC15_DPG_MODE_1_0(UVD, 0, mmUVD_MPC_CNTL, 1052 0x2 << UVD_MPC_CNTL__REPLACEMENT_MODE__SHIFT, 0xFFFFFFFF, 0); 1053 1054 WREG32_SOC15_DPG_MODE_1_0(UVD, 0, mmUVD_MPC_SET_MUXA0, 1055 ((0x1 << UVD_MPC_SET_MUXA0__VARA_1__SHIFT) | 1056 (0x2 << UVD_MPC_SET_MUXA0__VARA_2__SHIFT) | 1057 (0x3 << UVD_MPC_SET_MUXA0__VARA_3__SHIFT) | 1058 (0x4 << UVD_MPC_SET_MUXA0__VARA_4__SHIFT)), 0xFFFFFFFF, 0); 1059 1060 WREG32_SOC15_DPG_MODE_1_0(UVD, 0, mmUVD_MPC_SET_MUXB0, 1061 ((0x1 << UVD_MPC_SET_MUXB0__VARB_1__SHIFT) | 1062 (0x2 << UVD_MPC_SET_MUXB0__VARB_2__SHIFT) | 1063 (0x3 << UVD_MPC_SET_MUXB0__VARB_3__SHIFT) | 1064 (0x4 << UVD_MPC_SET_MUXB0__VARB_4__SHIFT)), 0xFFFFFFFF, 0); 1065 1066 WREG32_SOC15_DPG_MODE_1_0(UVD, 0, mmUVD_MPC_SET_MUX, 1067 ((0x0 << UVD_MPC_SET_MUX__SET_0__SHIFT) | 1068 (0x1 << UVD_MPC_SET_MUX__SET_1__SHIFT) | 1069 (0x2 << UVD_MPC_SET_MUX__SET_2__SHIFT)), 0xFFFFFFFF, 0); 1070 1071 vcn_v1_0_mc_resume_dpg_mode(adev); 1072 1073 WREG32_SOC15_DPG_MODE_1_0(UVD, 0, mmUVD_REG_XX_MASK, 0x10, 0xFFFFFFFF, 0); 1074 WREG32_SOC15_DPG_MODE_1_0(UVD, 0, mmUVD_RBC_XX_IB_REG_CHECK, 0x3, 0xFFFFFFFF, 0); 1075 1076 /* boot up the VCPU */ 1077 WREG32_SOC15_DPG_MODE_1_0(UVD, 0, mmUVD_SOFT_RESET, 0, 0xFFFFFFFF, 0); 1078 1079 /* enable UMC */ 1080 WREG32_SOC15_DPG_MODE_1_0(UVD, 0, mmUVD_LMI_CTRL2, 1081 0x1F << UVD_LMI_CTRL2__RE_OFLD_MIF_WR_REQ_NUM__SHIFT, 1082 0xFFFFFFFF, 0); 1083 1084 /* enable master interrupt */ 1085 WREG32_SOC15_DPG_MODE_1_0(UVD, 0, mmUVD_MASTINT_EN, 1086 UVD_MASTINT_EN__VCPU_EN_MASK, UVD_MASTINT_EN__VCPU_EN_MASK, 0); 1087 1088 vcn_v1_0_clock_gating_dpg_mode(adev, 1); 1089 /* setup mmUVD_LMI_CTRL */ 1090 WREG32_SOC15_DPG_MODE_1_0(UVD, 0, mmUVD_LMI_CTRL, 1091 (8 << UVD_LMI_CTRL__WRITE_CLEAN_TIMER__SHIFT) | 1092 UVD_LMI_CTRL__WRITE_CLEAN_TIMER_EN_MASK | 1093 UVD_LMI_CTRL__DATA_COHERENCY_EN_MASK | 1094 UVD_LMI_CTRL__VCPU_DATA_COHERENCY_EN_MASK | 1095 UVD_LMI_CTRL__REQ_MODE_MASK | 1096 UVD_LMI_CTRL__CRC_RESET_MASK | 1097 UVD_LMI_CTRL__MASK_MC_URGENT_MASK | 1098 0x00100000L, 0xFFFFFFFF, 1); 1099 1100 tmp = adev->gfx.config.gb_addr_config; 1101 /* setup VCN global tiling registers */ 1102 WREG32_SOC15_DPG_MODE_1_0(UVD, 0, mmUVD_JPEG_ADDR_CONFIG, tmp, 0xFFFFFFFF, 1); 1103 WREG32_SOC15_DPG_MODE_1_0(UVD, 0, mmUVD_JPEG_UV_ADDR_CONFIG, tmp, 0xFFFFFFFF, 1); 1104 1105 /* enable System Interrupt for JRBC */ 1106 WREG32_SOC15_DPG_MODE_1_0(UVD, 0, mmUVD_SYS_INT_EN, 1107 UVD_SYS_INT_EN__UVD_JRBC_EN_MASK, 0xFFFFFFFF, 1); 1108 1109 /* force RBC into idle state */ 1110 rb_bufsz = order_base_2(ring->ring_size); 1111 tmp = REG_SET_FIELD(0, UVD_RBC_RB_CNTL, RB_BUFSZ, rb_bufsz); 1112 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_BLKSZ, 1); 1113 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_FETCH, 1); 1114 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_UPDATE, 1); 1115 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_RPTR_WR_EN, 1); 1116 WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_CNTL, tmp); 1117 1118 /* set the write pointer delay */ 1119 WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_WPTR_CNTL, 0); 1120 1121 /* set the wb address */ 1122 WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_RPTR_ADDR, 1123 (upper_32_bits(ring->gpu_addr) >> 2)); 1124 1125 /* program the RB_BASE for ring buffer */ 1126 WREG32_SOC15(UVD, 0, mmUVD_LMI_RBC_RB_64BIT_BAR_LOW, 1127 lower_32_bits(ring->gpu_addr)); 1128 WREG32_SOC15(UVD, 0, mmUVD_LMI_RBC_RB_64BIT_BAR_HIGH, 1129 upper_32_bits(ring->gpu_addr)); 1130 1131 /* Initialize the ring buffer's read and write pointers */ 1132 WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_RPTR, 0); 1133 1134 WREG32_SOC15(UVD, 0, mmUVD_SCRATCH2, 0); 1135 1136 ring->wptr = RREG32_SOC15(UVD, 0, mmUVD_RBC_RB_RPTR); 1137 WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_WPTR, 1138 lower_32_bits(ring->wptr)); 1139 1140 WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_RBC_RB_CNTL), 0, 1141 ~UVD_RBC_RB_CNTL__RB_NO_FETCH_MASK); 1142 1143 jpeg_v1_0_start(adev, 1); 1144 1145 return 0; 1146 } 1147 1148 static int vcn_v1_0_start(struct amdgpu_device *adev) 1149 { 1150 return (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) ? 1151 vcn_v1_0_start_dpg_mode(adev) : vcn_v1_0_start_spg_mode(adev); 1152 } 1153 1154 /** 1155 * vcn_v1_0_stop_spg_mode - stop VCN block 1156 * 1157 * @adev: amdgpu_device pointer 1158 * 1159 * stop the VCN block 1160 */ 1161 static int vcn_v1_0_stop_spg_mode(struct amdgpu_device *adev) 1162 { 1163 int tmp; 1164 1165 SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_STATUS, UVD_STATUS__IDLE, 0x7); 1166 1167 tmp = UVD_LMI_STATUS__VCPU_LMI_WRITE_CLEAN_MASK | 1168 UVD_LMI_STATUS__READ_CLEAN_MASK | 1169 UVD_LMI_STATUS__WRITE_CLEAN_MASK | 1170 UVD_LMI_STATUS__WRITE_CLEAN_RAW_MASK; 1171 SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_LMI_STATUS, tmp, tmp); 1172 1173 /* stall UMC channel */ 1174 WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_CTRL2), 1175 UVD_LMI_CTRL2__STALL_ARB_UMC_MASK, 1176 ~UVD_LMI_CTRL2__STALL_ARB_UMC_MASK); 1177 1178 tmp = UVD_LMI_STATUS__UMC_READ_CLEAN_RAW_MASK | 1179 UVD_LMI_STATUS__UMC_WRITE_CLEAN_RAW_MASK; 1180 SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_LMI_STATUS, tmp, tmp); 1181 1182 /* disable VCPU clock */ 1183 WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_VCPU_CNTL), 0, 1184 ~UVD_VCPU_CNTL__CLK_EN_MASK); 1185 1186 /* reset LMI UMC/LMI */ 1187 WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_SOFT_RESET), 1188 UVD_SOFT_RESET__LMI_UMC_SOFT_RESET_MASK, 1189 ~UVD_SOFT_RESET__LMI_UMC_SOFT_RESET_MASK); 1190 1191 WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_SOFT_RESET), 1192 UVD_SOFT_RESET__LMI_SOFT_RESET_MASK, 1193 ~UVD_SOFT_RESET__LMI_SOFT_RESET_MASK); 1194 1195 /* put VCPU into reset */ 1196 WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_SOFT_RESET), 1197 UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK, 1198 ~UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK); 1199 1200 WREG32_SOC15(UVD, 0, mmUVD_STATUS, 0); 1201 1202 vcn_v1_0_enable_clock_gating(adev); 1203 vcn_1_0_enable_static_power_gating(adev); 1204 return 0; 1205 } 1206 1207 static int vcn_v1_0_stop_dpg_mode(struct amdgpu_device *adev) 1208 { 1209 uint32_t tmp; 1210 1211 /* Wait for power status to be UVD_POWER_STATUS__UVD_POWER_STATUS_TILES_OFF */ 1212 SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_POWER_STATUS, 1213 UVD_POWER_STATUS__UVD_POWER_STATUS_TILES_OFF, 1214 UVD_POWER_STATUS__UVD_POWER_STATUS_MASK); 1215 1216 /* wait for read ptr to be equal to write ptr */ 1217 tmp = RREG32_SOC15(UVD, 0, mmUVD_RB_WPTR); 1218 SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_RB_RPTR, tmp, 0xFFFFFFFF); 1219 1220 tmp = RREG32_SOC15(UVD, 0, mmUVD_RB_WPTR2); 1221 SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_RB_RPTR2, tmp, 0xFFFFFFFF); 1222 1223 tmp = RREG32_SOC15(UVD, 0, mmUVD_JRBC_RB_WPTR); 1224 SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_JRBC_RB_RPTR, tmp, 0xFFFFFFFF); 1225 1226 tmp = RREG32_SOC15(UVD, 0, mmUVD_RBC_RB_WPTR) & 0x7FFFFFFF; 1227 SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_RBC_RB_RPTR, tmp, 0xFFFFFFFF); 1228 1229 SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_POWER_STATUS, 1230 UVD_POWER_STATUS__UVD_POWER_STATUS_TILES_OFF, 1231 UVD_POWER_STATUS__UVD_POWER_STATUS_MASK); 1232 1233 /* disable dynamic power gating mode */ 1234 WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_POWER_STATUS), 0, 1235 ~UVD_POWER_STATUS__UVD_PG_MODE_MASK); 1236 1237 return 0; 1238 } 1239 1240 static int vcn_v1_0_stop(struct amdgpu_device *adev) 1241 { 1242 int r; 1243 1244 if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) 1245 r = vcn_v1_0_stop_dpg_mode(adev); 1246 else 1247 r = vcn_v1_0_stop_spg_mode(adev); 1248 1249 return r; 1250 } 1251 1252 static int vcn_v1_0_pause_dpg_mode(struct amdgpu_device *adev, 1253 int inst_idx, struct dpg_pause_state *new_state) 1254 { 1255 int ret_code; 1256 uint32_t reg_data = 0; 1257 uint32_t reg_data2 = 0; 1258 struct amdgpu_ring *ring; 1259 1260 /* pause/unpause if state is changed */ 1261 if (adev->vcn.inst[inst_idx].pause_state.fw_based != new_state->fw_based) { 1262 DRM_DEBUG("dpg pause state changed %d:%d -> %d:%d", 1263 adev->vcn.inst[inst_idx].pause_state.fw_based, 1264 adev->vcn.inst[inst_idx].pause_state.jpeg, 1265 new_state->fw_based, new_state->jpeg); 1266 1267 reg_data = RREG32_SOC15(UVD, 0, mmUVD_DPG_PAUSE) & 1268 (~UVD_DPG_PAUSE__NJ_PAUSE_DPG_ACK_MASK); 1269 1270 if (new_state->fw_based == VCN_DPG_STATE__PAUSE) { 1271 ret_code = 0; 1272 1273 if (!(reg_data & UVD_DPG_PAUSE__JPEG_PAUSE_DPG_ACK_MASK)) 1274 ret_code = SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_POWER_STATUS, 1275 UVD_POWER_STATUS__UVD_POWER_STATUS_TILES_OFF, 1276 UVD_POWER_STATUS__UVD_POWER_STATUS_MASK); 1277 1278 if (!ret_code) { 1279 /* pause DPG non-jpeg */ 1280 reg_data |= UVD_DPG_PAUSE__NJ_PAUSE_DPG_REQ_MASK; 1281 WREG32_SOC15(UVD, 0, mmUVD_DPG_PAUSE, reg_data); 1282 SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_DPG_PAUSE, 1283 UVD_DPG_PAUSE__NJ_PAUSE_DPG_ACK_MASK, 1284 UVD_DPG_PAUSE__NJ_PAUSE_DPG_ACK_MASK); 1285 1286 /* Restore */ 1287 ring = &adev->vcn.inst->ring_enc[0]; 1288 WREG32_SOC15(UVD, 0, mmUVD_RB_BASE_LO, ring->gpu_addr); 1289 WREG32_SOC15(UVD, 0, mmUVD_RB_BASE_HI, upper_32_bits(ring->gpu_addr)); 1290 WREG32_SOC15(UVD, 0, mmUVD_RB_SIZE, ring->ring_size / 4); 1291 WREG32_SOC15(UVD, 0, mmUVD_RB_RPTR, lower_32_bits(ring->wptr)); 1292 WREG32_SOC15(UVD, 0, mmUVD_RB_WPTR, lower_32_bits(ring->wptr)); 1293 1294 ring = &adev->vcn.inst->ring_enc[1]; 1295 WREG32_SOC15(UVD, 0, mmUVD_RB_BASE_LO2, ring->gpu_addr); 1296 WREG32_SOC15(UVD, 0, mmUVD_RB_BASE_HI2, upper_32_bits(ring->gpu_addr)); 1297 WREG32_SOC15(UVD, 0, mmUVD_RB_SIZE2, ring->ring_size / 4); 1298 WREG32_SOC15(UVD, 0, mmUVD_RB_RPTR2, lower_32_bits(ring->wptr)); 1299 WREG32_SOC15(UVD, 0, mmUVD_RB_WPTR2, lower_32_bits(ring->wptr)); 1300 1301 ring = &adev->vcn.inst->ring_dec; 1302 WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_WPTR, 1303 RREG32_SOC15(UVD, 0, mmUVD_SCRATCH2) & 0x7FFFFFFF); 1304 SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_POWER_STATUS, 1305 UVD_PGFSM_CONFIG__UVDM_UVDU_PWR_ON, 1306 UVD_POWER_STATUS__UVD_POWER_STATUS_MASK); 1307 } 1308 } else { 1309 /* unpause dpg non-jpeg, no need to wait */ 1310 reg_data &= ~UVD_DPG_PAUSE__NJ_PAUSE_DPG_REQ_MASK; 1311 WREG32_SOC15(UVD, 0, mmUVD_DPG_PAUSE, reg_data); 1312 } 1313 adev->vcn.inst[inst_idx].pause_state.fw_based = new_state->fw_based; 1314 } 1315 1316 /* pause/unpause if state is changed */ 1317 if (adev->vcn.inst[inst_idx].pause_state.jpeg != new_state->jpeg) { 1318 DRM_DEBUG("dpg pause state changed %d:%d -> %d:%d", 1319 adev->vcn.inst[inst_idx].pause_state.fw_based, 1320 adev->vcn.inst[inst_idx].pause_state.jpeg, 1321 new_state->fw_based, new_state->jpeg); 1322 1323 reg_data = RREG32_SOC15(UVD, 0, mmUVD_DPG_PAUSE) & 1324 (~UVD_DPG_PAUSE__JPEG_PAUSE_DPG_ACK_MASK); 1325 1326 if (new_state->jpeg == VCN_DPG_STATE__PAUSE) { 1327 ret_code = 0; 1328 1329 if (!(reg_data & UVD_DPG_PAUSE__NJ_PAUSE_DPG_ACK_MASK)) 1330 ret_code = SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_POWER_STATUS, 1331 UVD_POWER_STATUS__UVD_POWER_STATUS_TILES_OFF, 1332 UVD_POWER_STATUS__UVD_POWER_STATUS_MASK); 1333 1334 if (!ret_code) { 1335 /* Make sure JPRG Snoop is disabled before sending the pause */ 1336 reg_data2 = RREG32_SOC15(UVD, 0, mmUVD_POWER_STATUS); 1337 reg_data2 |= UVD_POWER_STATUS__JRBC_SNOOP_DIS_MASK; 1338 WREG32_SOC15(UVD, 0, mmUVD_POWER_STATUS, reg_data2); 1339 1340 /* pause DPG jpeg */ 1341 reg_data |= UVD_DPG_PAUSE__JPEG_PAUSE_DPG_REQ_MASK; 1342 WREG32_SOC15(UVD, 0, mmUVD_DPG_PAUSE, reg_data); 1343 SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_DPG_PAUSE, 1344 UVD_DPG_PAUSE__JPEG_PAUSE_DPG_ACK_MASK, 1345 UVD_DPG_PAUSE__JPEG_PAUSE_DPG_ACK_MASK); 1346 1347 /* Restore */ 1348 ring = adev->jpeg.inst->ring_dec; 1349 WREG32_SOC15(UVD, 0, mmUVD_LMI_JRBC_RB_VMID, 0); 1350 WREG32_SOC15(UVD, 0, mmUVD_JRBC_RB_CNTL, 1351 UVD_JRBC_RB_CNTL__RB_NO_FETCH_MASK | 1352 UVD_JRBC_RB_CNTL__RB_RPTR_WR_EN_MASK); 1353 WREG32_SOC15(UVD, 0, mmUVD_LMI_JRBC_RB_64BIT_BAR_LOW, 1354 lower_32_bits(ring->gpu_addr)); 1355 WREG32_SOC15(UVD, 0, mmUVD_LMI_JRBC_RB_64BIT_BAR_HIGH, 1356 upper_32_bits(ring->gpu_addr)); 1357 WREG32_SOC15(UVD, 0, mmUVD_JRBC_RB_RPTR, ring->wptr); 1358 WREG32_SOC15(UVD, 0, mmUVD_JRBC_RB_WPTR, ring->wptr); 1359 WREG32_SOC15(UVD, 0, mmUVD_JRBC_RB_CNTL, 1360 UVD_JRBC_RB_CNTL__RB_RPTR_WR_EN_MASK); 1361 1362 ring = &adev->vcn.inst->ring_dec; 1363 WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_WPTR, 1364 RREG32_SOC15(UVD, 0, mmUVD_SCRATCH2) & 0x7FFFFFFF); 1365 SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_POWER_STATUS, 1366 UVD_PGFSM_CONFIG__UVDM_UVDU_PWR_ON, 1367 UVD_POWER_STATUS__UVD_POWER_STATUS_MASK); 1368 } 1369 } else { 1370 /* unpause dpg jpeg, no need to wait */ 1371 reg_data &= ~UVD_DPG_PAUSE__JPEG_PAUSE_DPG_REQ_MASK; 1372 WREG32_SOC15(UVD, 0, mmUVD_DPG_PAUSE, reg_data); 1373 } 1374 adev->vcn.inst[inst_idx].pause_state.jpeg = new_state->jpeg; 1375 } 1376 1377 return 0; 1378 } 1379 1380 static bool vcn_v1_0_is_idle(void *handle) 1381 { 1382 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1383 1384 return (RREG32_SOC15(VCN, 0, mmUVD_STATUS) == UVD_STATUS__IDLE); 1385 } 1386 1387 static int vcn_v1_0_wait_for_idle(void *handle) 1388 { 1389 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1390 int ret; 1391 1392 ret = SOC15_WAIT_ON_RREG(VCN, 0, mmUVD_STATUS, UVD_STATUS__IDLE, 1393 UVD_STATUS__IDLE); 1394 1395 return ret; 1396 } 1397 1398 static int vcn_v1_0_set_clockgating_state(void *handle, 1399 enum amd_clockgating_state state) 1400 { 1401 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1402 bool enable = (state == AMD_CG_STATE_GATE); 1403 1404 if (enable) { 1405 /* wait for STATUS to clear */ 1406 if (!vcn_v1_0_is_idle(handle)) 1407 return -EBUSY; 1408 vcn_v1_0_enable_clock_gating(adev); 1409 } else { 1410 /* disable HW gating and enable Sw gating */ 1411 vcn_v1_0_disable_clock_gating(adev); 1412 } 1413 return 0; 1414 } 1415 1416 /** 1417 * vcn_v1_0_dec_ring_get_rptr - get read pointer 1418 * 1419 * @ring: amdgpu_ring pointer 1420 * 1421 * Returns the current hardware read pointer 1422 */ 1423 static uint64_t vcn_v1_0_dec_ring_get_rptr(struct amdgpu_ring *ring) 1424 { 1425 struct amdgpu_device *adev = ring->adev; 1426 1427 return RREG32_SOC15(UVD, 0, mmUVD_RBC_RB_RPTR); 1428 } 1429 1430 /** 1431 * vcn_v1_0_dec_ring_get_wptr - get write pointer 1432 * 1433 * @ring: amdgpu_ring pointer 1434 * 1435 * Returns the current hardware write pointer 1436 */ 1437 static uint64_t vcn_v1_0_dec_ring_get_wptr(struct amdgpu_ring *ring) 1438 { 1439 struct amdgpu_device *adev = ring->adev; 1440 1441 return RREG32_SOC15(UVD, 0, mmUVD_RBC_RB_WPTR); 1442 } 1443 1444 /** 1445 * vcn_v1_0_dec_ring_set_wptr - set write pointer 1446 * 1447 * @ring: amdgpu_ring pointer 1448 * 1449 * Commits the write pointer to the hardware 1450 */ 1451 static void vcn_v1_0_dec_ring_set_wptr(struct amdgpu_ring *ring) 1452 { 1453 struct amdgpu_device *adev = ring->adev; 1454 1455 if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) 1456 WREG32_SOC15(UVD, 0, mmUVD_SCRATCH2, 1457 lower_32_bits(ring->wptr) | 0x80000000); 1458 1459 WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_WPTR, lower_32_bits(ring->wptr)); 1460 } 1461 1462 /** 1463 * vcn_v1_0_dec_ring_insert_start - insert a start command 1464 * 1465 * @ring: amdgpu_ring pointer 1466 * 1467 * Write a start command to the ring. 1468 */ 1469 static void vcn_v1_0_dec_ring_insert_start(struct amdgpu_ring *ring) 1470 { 1471 struct amdgpu_device *adev = ring->adev; 1472 1473 amdgpu_ring_write(ring, 1474 PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_DATA0), 0)); 1475 amdgpu_ring_write(ring, 0); 1476 amdgpu_ring_write(ring, 1477 PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_CMD), 0)); 1478 amdgpu_ring_write(ring, VCN_DEC_CMD_PACKET_START << 1); 1479 } 1480 1481 /** 1482 * vcn_v1_0_dec_ring_insert_end - insert a end command 1483 * 1484 * @ring: amdgpu_ring pointer 1485 * 1486 * Write a end command to the ring. 1487 */ 1488 static void vcn_v1_0_dec_ring_insert_end(struct amdgpu_ring *ring) 1489 { 1490 struct amdgpu_device *adev = ring->adev; 1491 1492 amdgpu_ring_write(ring, 1493 PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_CMD), 0)); 1494 amdgpu_ring_write(ring, VCN_DEC_CMD_PACKET_END << 1); 1495 } 1496 1497 /** 1498 * vcn_v1_0_dec_ring_emit_fence - emit an fence & trap command 1499 * 1500 * @ring: amdgpu_ring pointer 1501 * @addr: address 1502 * @seq: sequence number 1503 * @flags: fence related flags 1504 * 1505 * Write a fence and a trap command to the ring. 1506 */ 1507 static void vcn_v1_0_dec_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq, 1508 unsigned flags) 1509 { 1510 struct amdgpu_device *adev = ring->adev; 1511 1512 WARN_ON(flags & AMDGPU_FENCE_FLAG_64BIT); 1513 1514 amdgpu_ring_write(ring, 1515 PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_CONTEXT_ID), 0)); 1516 amdgpu_ring_write(ring, seq); 1517 amdgpu_ring_write(ring, 1518 PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_DATA0), 0)); 1519 amdgpu_ring_write(ring, addr & 0xffffffff); 1520 amdgpu_ring_write(ring, 1521 PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_DATA1), 0)); 1522 amdgpu_ring_write(ring, upper_32_bits(addr) & 0xff); 1523 amdgpu_ring_write(ring, 1524 PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_CMD), 0)); 1525 amdgpu_ring_write(ring, VCN_DEC_CMD_FENCE << 1); 1526 1527 amdgpu_ring_write(ring, 1528 PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_DATA0), 0)); 1529 amdgpu_ring_write(ring, 0); 1530 amdgpu_ring_write(ring, 1531 PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_DATA1), 0)); 1532 amdgpu_ring_write(ring, 0); 1533 amdgpu_ring_write(ring, 1534 PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_CMD), 0)); 1535 amdgpu_ring_write(ring, VCN_DEC_CMD_TRAP << 1); 1536 } 1537 1538 /** 1539 * vcn_v1_0_dec_ring_emit_ib - execute indirect buffer 1540 * 1541 * @ring: amdgpu_ring pointer 1542 * @job: job to retrieve vmid from 1543 * @ib: indirect buffer to execute 1544 * @flags: unused 1545 * 1546 * Write ring commands to execute the indirect buffer 1547 */ 1548 static void vcn_v1_0_dec_ring_emit_ib(struct amdgpu_ring *ring, 1549 struct amdgpu_job *job, 1550 struct amdgpu_ib *ib, 1551 uint32_t flags) 1552 { 1553 struct amdgpu_device *adev = ring->adev; 1554 unsigned vmid = AMDGPU_JOB_GET_VMID(job); 1555 1556 amdgpu_ring_write(ring, 1557 PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_RBC_IB_VMID), 0)); 1558 amdgpu_ring_write(ring, vmid); 1559 1560 amdgpu_ring_write(ring, 1561 PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_RBC_IB_64BIT_BAR_LOW), 0)); 1562 amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr)); 1563 amdgpu_ring_write(ring, 1564 PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_RBC_IB_64BIT_BAR_HIGH), 0)); 1565 amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr)); 1566 amdgpu_ring_write(ring, 1567 PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_RBC_IB_SIZE), 0)); 1568 amdgpu_ring_write(ring, ib->length_dw); 1569 } 1570 1571 static void vcn_v1_0_dec_ring_emit_reg_wait(struct amdgpu_ring *ring, 1572 uint32_t reg, uint32_t val, 1573 uint32_t mask) 1574 { 1575 struct amdgpu_device *adev = ring->adev; 1576 1577 amdgpu_ring_write(ring, 1578 PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_DATA0), 0)); 1579 amdgpu_ring_write(ring, reg << 2); 1580 amdgpu_ring_write(ring, 1581 PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_DATA1), 0)); 1582 amdgpu_ring_write(ring, val); 1583 amdgpu_ring_write(ring, 1584 PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GP_SCRATCH8), 0)); 1585 amdgpu_ring_write(ring, mask); 1586 amdgpu_ring_write(ring, 1587 PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_CMD), 0)); 1588 amdgpu_ring_write(ring, VCN_DEC_CMD_REG_READ_COND_WAIT << 1); 1589 } 1590 1591 static void vcn_v1_0_dec_ring_emit_vm_flush(struct amdgpu_ring *ring, 1592 unsigned vmid, uint64_t pd_addr) 1593 { 1594 struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->vm_hub]; 1595 uint32_t data0, data1, mask; 1596 1597 pd_addr = amdgpu_gmc_emit_flush_gpu_tlb(ring, vmid, pd_addr); 1598 1599 /* wait for register write */ 1600 data0 = hub->ctx0_ptb_addr_lo32 + vmid * hub->ctx_addr_distance; 1601 data1 = lower_32_bits(pd_addr); 1602 mask = 0xffffffff; 1603 vcn_v1_0_dec_ring_emit_reg_wait(ring, data0, data1, mask); 1604 } 1605 1606 static void vcn_v1_0_dec_ring_emit_wreg(struct amdgpu_ring *ring, 1607 uint32_t reg, uint32_t val) 1608 { 1609 struct amdgpu_device *adev = ring->adev; 1610 1611 amdgpu_ring_write(ring, 1612 PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_DATA0), 0)); 1613 amdgpu_ring_write(ring, reg << 2); 1614 amdgpu_ring_write(ring, 1615 PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_DATA1), 0)); 1616 amdgpu_ring_write(ring, val); 1617 amdgpu_ring_write(ring, 1618 PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_CMD), 0)); 1619 amdgpu_ring_write(ring, VCN_DEC_CMD_WRITE_REG << 1); 1620 } 1621 1622 /** 1623 * vcn_v1_0_enc_ring_get_rptr - get enc read pointer 1624 * 1625 * @ring: amdgpu_ring pointer 1626 * 1627 * Returns the current hardware enc read pointer 1628 */ 1629 static uint64_t vcn_v1_0_enc_ring_get_rptr(struct amdgpu_ring *ring) 1630 { 1631 struct amdgpu_device *adev = ring->adev; 1632 1633 if (ring == &adev->vcn.inst->ring_enc[0]) 1634 return RREG32_SOC15(UVD, 0, mmUVD_RB_RPTR); 1635 else 1636 return RREG32_SOC15(UVD, 0, mmUVD_RB_RPTR2); 1637 } 1638 1639 /** 1640 * vcn_v1_0_enc_ring_get_wptr - get enc write pointer 1641 * 1642 * @ring: amdgpu_ring pointer 1643 * 1644 * Returns the current hardware enc write pointer 1645 */ 1646 static uint64_t vcn_v1_0_enc_ring_get_wptr(struct amdgpu_ring *ring) 1647 { 1648 struct amdgpu_device *adev = ring->adev; 1649 1650 if (ring == &adev->vcn.inst->ring_enc[0]) 1651 return RREG32_SOC15(UVD, 0, mmUVD_RB_WPTR); 1652 else 1653 return RREG32_SOC15(UVD, 0, mmUVD_RB_WPTR2); 1654 } 1655 1656 /** 1657 * vcn_v1_0_enc_ring_set_wptr - set enc write pointer 1658 * 1659 * @ring: amdgpu_ring pointer 1660 * 1661 * Commits the enc write pointer to the hardware 1662 */ 1663 static void vcn_v1_0_enc_ring_set_wptr(struct amdgpu_ring *ring) 1664 { 1665 struct amdgpu_device *adev = ring->adev; 1666 1667 if (ring == &adev->vcn.inst->ring_enc[0]) 1668 WREG32_SOC15(UVD, 0, mmUVD_RB_WPTR, 1669 lower_32_bits(ring->wptr)); 1670 else 1671 WREG32_SOC15(UVD, 0, mmUVD_RB_WPTR2, 1672 lower_32_bits(ring->wptr)); 1673 } 1674 1675 /** 1676 * vcn_v1_0_enc_ring_emit_fence - emit an enc fence & trap command 1677 * 1678 * @ring: amdgpu_ring pointer 1679 * @addr: address 1680 * @seq: sequence number 1681 * @flags: fence related flags 1682 * 1683 * Write enc a fence and a trap command to the ring. 1684 */ 1685 static void vcn_v1_0_enc_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, 1686 u64 seq, unsigned flags) 1687 { 1688 WARN_ON(flags & AMDGPU_FENCE_FLAG_64BIT); 1689 1690 amdgpu_ring_write(ring, VCN_ENC_CMD_FENCE); 1691 amdgpu_ring_write(ring, addr); 1692 amdgpu_ring_write(ring, upper_32_bits(addr)); 1693 amdgpu_ring_write(ring, seq); 1694 amdgpu_ring_write(ring, VCN_ENC_CMD_TRAP); 1695 } 1696 1697 static void vcn_v1_0_enc_ring_insert_end(struct amdgpu_ring *ring) 1698 { 1699 amdgpu_ring_write(ring, VCN_ENC_CMD_END); 1700 } 1701 1702 /** 1703 * vcn_v1_0_enc_ring_emit_ib - enc execute indirect buffer 1704 * 1705 * @ring: amdgpu_ring pointer 1706 * @job: job to retrive vmid from 1707 * @ib: indirect buffer to execute 1708 * @flags: unused 1709 * 1710 * Write enc ring commands to execute the indirect buffer 1711 */ 1712 static void vcn_v1_0_enc_ring_emit_ib(struct amdgpu_ring *ring, 1713 struct amdgpu_job *job, 1714 struct amdgpu_ib *ib, 1715 uint32_t flags) 1716 { 1717 unsigned vmid = AMDGPU_JOB_GET_VMID(job); 1718 1719 amdgpu_ring_write(ring, VCN_ENC_CMD_IB); 1720 amdgpu_ring_write(ring, vmid); 1721 amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr)); 1722 amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr)); 1723 amdgpu_ring_write(ring, ib->length_dw); 1724 } 1725 1726 static void vcn_v1_0_enc_ring_emit_reg_wait(struct amdgpu_ring *ring, 1727 uint32_t reg, uint32_t val, 1728 uint32_t mask) 1729 { 1730 amdgpu_ring_write(ring, VCN_ENC_CMD_REG_WAIT); 1731 amdgpu_ring_write(ring, reg << 2); 1732 amdgpu_ring_write(ring, mask); 1733 amdgpu_ring_write(ring, val); 1734 } 1735 1736 static void vcn_v1_0_enc_ring_emit_vm_flush(struct amdgpu_ring *ring, 1737 unsigned int vmid, uint64_t pd_addr) 1738 { 1739 struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->vm_hub]; 1740 1741 pd_addr = amdgpu_gmc_emit_flush_gpu_tlb(ring, vmid, pd_addr); 1742 1743 /* wait for reg writes */ 1744 vcn_v1_0_enc_ring_emit_reg_wait(ring, hub->ctx0_ptb_addr_lo32 + 1745 vmid * hub->ctx_addr_distance, 1746 lower_32_bits(pd_addr), 0xffffffff); 1747 } 1748 1749 static void vcn_v1_0_enc_ring_emit_wreg(struct amdgpu_ring *ring, 1750 uint32_t reg, uint32_t val) 1751 { 1752 amdgpu_ring_write(ring, VCN_ENC_CMD_REG_WRITE); 1753 amdgpu_ring_write(ring, reg << 2); 1754 amdgpu_ring_write(ring, val); 1755 } 1756 1757 static int vcn_v1_0_set_interrupt_state(struct amdgpu_device *adev, 1758 struct amdgpu_irq_src *source, 1759 unsigned type, 1760 enum amdgpu_interrupt_state state) 1761 { 1762 return 0; 1763 } 1764 1765 static int vcn_v1_0_process_interrupt(struct amdgpu_device *adev, 1766 struct amdgpu_irq_src *source, 1767 struct amdgpu_iv_entry *entry) 1768 { 1769 DRM_DEBUG("IH: VCN TRAP\n"); 1770 1771 switch (entry->src_id) { 1772 case 124: 1773 amdgpu_fence_process(&adev->vcn.inst->ring_dec); 1774 break; 1775 case 119: 1776 amdgpu_fence_process(&adev->vcn.inst->ring_enc[0]); 1777 break; 1778 case 120: 1779 amdgpu_fence_process(&adev->vcn.inst->ring_enc[1]); 1780 break; 1781 default: 1782 DRM_ERROR("Unhandled interrupt: %d %d\n", 1783 entry->src_id, entry->src_data[0]); 1784 break; 1785 } 1786 1787 return 0; 1788 } 1789 1790 static void vcn_v1_0_dec_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count) 1791 { 1792 struct amdgpu_device *adev = ring->adev; 1793 int i; 1794 1795 WARN_ON(ring->wptr % 2 || count % 2); 1796 1797 for (i = 0; i < count / 2; i++) { 1798 amdgpu_ring_write(ring, PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_NO_OP), 0)); 1799 amdgpu_ring_write(ring, 0); 1800 } 1801 } 1802 1803 static int vcn_v1_0_set_powergating_state(void *handle, 1804 enum amd_powergating_state state) 1805 { 1806 /* This doesn't actually powergate the VCN block. 1807 * That's done in the dpm code via the SMC. This 1808 * just re-inits the block as necessary. The actual 1809 * gating still happens in the dpm code. We should 1810 * revisit this when there is a cleaner line between 1811 * the smc and the hw blocks 1812 */ 1813 int ret; 1814 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1815 1816 if (state == adev->vcn.cur_state) 1817 return 0; 1818 1819 if (state == AMD_PG_STATE_GATE) 1820 ret = vcn_v1_0_stop(adev); 1821 else 1822 ret = vcn_v1_0_start(adev); 1823 1824 if (!ret) 1825 adev->vcn.cur_state = state; 1826 return ret; 1827 } 1828 1829 static void vcn_v1_0_idle_work_handler(struct work_struct *work) 1830 { 1831 struct amdgpu_device *adev = 1832 container_of(work, struct amdgpu_device, vcn.idle_work.work); 1833 unsigned int fences = 0, i; 1834 1835 for (i = 0; i < adev->vcn.num_enc_rings; ++i) 1836 fences += amdgpu_fence_count_emitted(&adev->vcn.inst->ring_enc[i]); 1837 1838 if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) { 1839 struct dpg_pause_state new_state; 1840 1841 if (fences) 1842 new_state.fw_based = VCN_DPG_STATE__PAUSE; 1843 else 1844 new_state.fw_based = VCN_DPG_STATE__UNPAUSE; 1845 1846 if (amdgpu_fence_count_emitted(adev->jpeg.inst->ring_dec)) 1847 new_state.jpeg = VCN_DPG_STATE__PAUSE; 1848 else 1849 new_state.jpeg = VCN_DPG_STATE__UNPAUSE; 1850 1851 adev->vcn.pause_dpg_mode(adev, 0, &new_state); 1852 } 1853 1854 fences += amdgpu_fence_count_emitted(adev->jpeg.inst->ring_dec); 1855 fences += amdgpu_fence_count_emitted(&adev->vcn.inst->ring_dec); 1856 1857 if (fences == 0) { 1858 amdgpu_gfx_off_ctrl(adev, true); 1859 if (adev->pm.dpm_enabled) 1860 amdgpu_dpm_enable_uvd(adev, false); 1861 else 1862 amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCN, 1863 AMD_PG_STATE_GATE); 1864 } else { 1865 schedule_delayed_work(&adev->vcn.idle_work, VCN_IDLE_TIMEOUT); 1866 } 1867 } 1868 1869 static void vcn_v1_0_ring_begin_use(struct amdgpu_ring *ring) 1870 { 1871 struct amdgpu_device *adev = ring->adev; 1872 bool set_clocks = !cancel_delayed_work_sync(&adev->vcn.idle_work); 1873 1874 mutex_lock(&adev->vcn.vcn1_jpeg1_workaround); 1875 1876 if (amdgpu_fence_wait_empty(ring->adev->jpeg.inst->ring_dec)) 1877 DRM_ERROR("VCN dec: jpeg dec ring may not be empty\n"); 1878 1879 vcn_v1_0_set_pg_for_begin_use(ring, set_clocks); 1880 1881 } 1882 1883 void vcn_v1_0_set_pg_for_begin_use(struct amdgpu_ring *ring, bool set_clocks) 1884 { 1885 struct amdgpu_device *adev = ring->adev; 1886 1887 if (set_clocks) { 1888 amdgpu_gfx_off_ctrl(adev, false); 1889 if (adev->pm.dpm_enabled) 1890 amdgpu_dpm_enable_uvd(adev, true); 1891 else 1892 amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCN, 1893 AMD_PG_STATE_UNGATE); 1894 } 1895 1896 if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) { 1897 struct dpg_pause_state new_state; 1898 unsigned int fences = 0, i; 1899 1900 for (i = 0; i < adev->vcn.num_enc_rings; ++i) 1901 fences += amdgpu_fence_count_emitted(&adev->vcn.inst->ring_enc[i]); 1902 1903 if (fences) 1904 new_state.fw_based = VCN_DPG_STATE__PAUSE; 1905 else 1906 new_state.fw_based = VCN_DPG_STATE__UNPAUSE; 1907 1908 if (amdgpu_fence_count_emitted(adev->jpeg.inst->ring_dec)) 1909 new_state.jpeg = VCN_DPG_STATE__PAUSE; 1910 else 1911 new_state.jpeg = VCN_DPG_STATE__UNPAUSE; 1912 1913 if (ring->funcs->type == AMDGPU_RING_TYPE_VCN_ENC) 1914 new_state.fw_based = VCN_DPG_STATE__PAUSE; 1915 else if (ring->funcs->type == AMDGPU_RING_TYPE_VCN_JPEG) 1916 new_state.jpeg = VCN_DPG_STATE__PAUSE; 1917 1918 adev->vcn.pause_dpg_mode(adev, 0, &new_state); 1919 } 1920 } 1921 1922 void vcn_v1_0_ring_end_use(struct amdgpu_ring *ring) 1923 { 1924 schedule_delayed_work(&ring->adev->vcn.idle_work, VCN_IDLE_TIMEOUT); 1925 mutex_unlock(&ring->adev->vcn.vcn1_jpeg1_workaround); 1926 } 1927 1928 static void vcn_v1_0_print_ip_state(void *handle, struct drm_printer *p) 1929 { 1930 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1931 int i, j; 1932 uint32_t reg_count = ARRAY_SIZE(vcn_reg_list_1_0); 1933 uint32_t inst_off, is_powered; 1934 1935 if (!adev->vcn.ip_dump) 1936 return; 1937 1938 drm_printf(p, "num_instances:%d\n", adev->vcn.num_vcn_inst); 1939 for (i = 0; i < adev->vcn.num_vcn_inst; i++) { 1940 if (adev->vcn.harvest_config & (1 << i)) { 1941 drm_printf(p, "\nHarvested Instance:VCN%d Skipping dump\n", i); 1942 continue; 1943 } 1944 1945 inst_off = i * reg_count; 1946 is_powered = (adev->vcn.ip_dump[inst_off] & 1947 UVD_POWER_STATUS__UVD_POWER_STATUS_MASK) != 1; 1948 1949 if (is_powered) { 1950 drm_printf(p, "\nActive Instance:VCN%d\n", i); 1951 for (j = 0; j < reg_count; j++) 1952 drm_printf(p, "%-50s \t 0x%08x\n", vcn_reg_list_1_0[j].reg_name, 1953 adev->vcn.ip_dump[inst_off + j]); 1954 } else { 1955 drm_printf(p, "\nInactive Instance:VCN%d\n", i); 1956 } 1957 } 1958 } 1959 1960 static void vcn_v1_0_dump_ip_state(void *handle) 1961 { 1962 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1963 int i, j; 1964 bool is_powered; 1965 uint32_t inst_off; 1966 uint32_t reg_count = ARRAY_SIZE(vcn_reg_list_1_0); 1967 1968 if (!adev->vcn.ip_dump) 1969 return; 1970 1971 for (i = 0; i < adev->vcn.num_vcn_inst; i++) { 1972 if (adev->vcn.harvest_config & (1 << i)) 1973 continue; 1974 1975 inst_off = i * reg_count; 1976 /* mmUVD_POWER_STATUS is always readable and is first element of the array */ 1977 adev->vcn.ip_dump[inst_off] = RREG32_SOC15(VCN, i, mmUVD_POWER_STATUS); 1978 is_powered = (adev->vcn.ip_dump[inst_off] & 1979 UVD_POWER_STATUS__UVD_POWER_STATUS_MASK) != 1; 1980 1981 if (is_powered) 1982 for (j = 1; j < reg_count; j++) 1983 adev->vcn.ip_dump[inst_off + j] = 1984 RREG32(SOC15_REG_ENTRY_OFFSET_INST(vcn_reg_list_1_0[j], i)); 1985 } 1986 } 1987 1988 static const struct amd_ip_funcs vcn_v1_0_ip_funcs = { 1989 .name = "vcn_v1_0", 1990 .early_init = vcn_v1_0_early_init, 1991 .late_init = NULL, 1992 .sw_init = vcn_v1_0_sw_init, 1993 .sw_fini = vcn_v1_0_sw_fini, 1994 .hw_init = vcn_v1_0_hw_init, 1995 .hw_fini = vcn_v1_0_hw_fini, 1996 .suspend = vcn_v1_0_suspend, 1997 .resume = vcn_v1_0_resume, 1998 .is_idle = vcn_v1_0_is_idle, 1999 .wait_for_idle = vcn_v1_0_wait_for_idle, 2000 .check_soft_reset = NULL /* vcn_v1_0_check_soft_reset */, 2001 .pre_soft_reset = NULL /* vcn_v1_0_pre_soft_reset */, 2002 .soft_reset = NULL /* vcn_v1_0_soft_reset */, 2003 .post_soft_reset = NULL /* vcn_v1_0_post_soft_reset */, 2004 .set_clockgating_state = vcn_v1_0_set_clockgating_state, 2005 .set_powergating_state = vcn_v1_0_set_powergating_state, 2006 .dump_ip_state = vcn_v1_0_dump_ip_state, 2007 .print_ip_state = vcn_v1_0_print_ip_state, 2008 }; 2009 2010 /* 2011 * It is a hardware issue that VCN can't handle a GTT TMZ buffer on 2012 * CHIP_RAVEN series ASIC. Move such a GTT TMZ buffer to VRAM domain 2013 * before command submission as a workaround. 2014 */ 2015 static int vcn_v1_0_validate_bo(struct amdgpu_cs_parser *parser, 2016 struct amdgpu_job *job, 2017 uint64_t addr) 2018 { 2019 struct ttm_operation_ctx ctx = { false, false }; 2020 struct amdgpu_fpriv *fpriv = parser->filp->driver_priv; 2021 struct amdgpu_vm *vm = &fpriv->vm; 2022 struct amdgpu_bo_va_mapping *mapping; 2023 struct amdgpu_bo *bo; 2024 int r; 2025 2026 addr &= AMDGPU_GMC_HOLE_MASK; 2027 if (addr & 0x7) { 2028 DRM_ERROR("VCN messages must be 8 byte aligned!\n"); 2029 return -EINVAL; 2030 } 2031 2032 mapping = amdgpu_vm_bo_lookup_mapping(vm, addr/AMDGPU_GPU_PAGE_SIZE); 2033 if (!mapping || !mapping->bo_va || !mapping->bo_va->base.bo) 2034 return -EINVAL; 2035 2036 bo = mapping->bo_va->base.bo; 2037 if (!(bo->flags & AMDGPU_GEM_CREATE_ENCRYPTED)) 2038 return 0; 2039 2040 amdgpu_bo_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_VRAM); 2041 r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx); 2042 if (r) { 2043 DRM_ERROR("Failed to validate the VCN message BO (%d)!\n", r); 2044 return r; 2045 } 2046 2047 return r; 2048 } 2049 2050 static int vcn_v1_0_ring_patch_cs_in_place(struct amdgpu_cs_parser *p, 2051 struct amdgpu_job *job, 2052 struct amdgpu_ib *ib) 2053 { 2054 uint32_t msg_lo = 0, msg_hi = 0; 2055 int i, r; 2056 2057 if (!(ib->flags & AMDGPU_IB_FLAGS_SECURE)) 2058 return 0; 2059 2060 for (i = 0; i < ib->length_dw; i += 2) { 2061 uint32_t reg = amdgpu_ib_get_value(ib, i); 2062 uint32_t val = amdgpu_ib_get_value(ib, i + 1); 2063 2064 if (reg == PACKET0(p->adev->vcn.internal.data0, 0)) { 2065 msg_lo = val; 2066 } else if (reg == PACKET0(p->adev->vcn.internal.data1, 0)) { 2067 msg_hi = val; 2068 } else if (reg == PACKET0(p->adev->vcn.internal.cmd, 0)) { 2069 r = vcn_v1_0_validate_bo(p, job, 2070 ((u64)msg_hi) << 32 | msg_lo); 2071 if (r) 2072 return r; 2073 } 2074 } 2075 2076 return 0; 2077 } 2078 2079 static const struct amdgpu_ring_funcs vcn_v1_0_dec_ring_vm_funcs = { 2080 .type = AMDGPU_RING_TYPE_VCN_DEC, 2081 .align_mask = 0xf, 2082 .support_64bit_ptrs = false, 2083 .no_user_fence = true, 2084 .secure_submission_supported = true, 2085 .get_rptr = vcn_v1_0_dec_ring_get_rptr, 2086 .get_wptr = vcn_v1_0_dec_ring_get_wptr, 2087 .set_wptr = vcn_v1_0_dec_ring_set_wptr, 2088 .patch_cs_in_place = vcn_v1_0_ring_patch_cs_in_place, 2089 .emit_frame_size = 2090 6 + 6 + /* hdp invalidate / flush */ 2091 SOC15_FLUSH_GPU_TLB_NUM_WREG * 6 + 2092 SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 8 + 2093 8 + /* vcn_v1_0_dec_ring_emit_vm_flush */ 2094 14 + 14 + /* vcn_v1_0_dec_ring_emit_fence x2 vm fence */ 2095 6, 2096 .emit_ib_size = 8, /* vcn_v1_0_dec_ring_emit_ib */ 2097 .emit_ib = vcn_v1_0_dec_ring_emit_ib, 2098 .emit_fence = vcn_v1_0_dec_ring_emit_fence, 2099 .emit_vm_flush = vcn_v1_0_dec_ring_emit_vm_flush, 2100 .test_ring = amdgpu_vcn_dec_ring_test_ring, 2101 .test_ib = amdgpu_vcn_dec_ring_test_ib, 2102 .insert_nop = vcn_v1_0_dec_ring_insert_nop, 2103 .insert_start = vcn_v1_0_dec_ring_insert_start, 2104 .insert_end = vcn_v1_0_dec_ring_insert_end, 2105 .pad_ib = amdgpu_ring_generic_pad_ib, 2106 .begin_use = vcn_v1_0_ring_begin_use, 2107 .end_use = vcn_v1_0_ring_end_use, 2108 .emit_wreg = vcn_v1_0_dec_ring_emit_wreg, 2109 .emit_reg_wait = vcn_v1_0_dec_ring_emit_reg_wait, 2110 .emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper, 2111 }; 2112 2113 static const struct amdgpu_ring_funcs vcn_v1_0_enc_ring_vm_funcs = { 2114 .type = AMDGPU_RING_TYPE_VCN_ENC, 2115 .align_mask = 0x3f, 2116 .nop = VCN_ENC_CMD_NO_OP, 2117 .support_64bit_ptrs = false, 2118 .no_user_fence = true, 2119 .get_rptr = vcn_v1_0_enc_ring_get_rptr, 2120 .get_wptr = vcn_v1_0_enc_ring_get_wptr, 2121 .set_wptr = vcn_v1_0_enc_ring_set_wptr, 2122 .emit_frame_size = 2123 SOC15_FLUSH_GPU_TLB_NUM_WREG * 3 + 2124 SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 4 + 2125 4 + /* vcn_v1_0_enc_ring_emit_vm_flush */ 2126 5 + 5 + /* vcn_v1_0_enc_ring_emit_fence x2 vm fence */ 2127 1, /* vcn_v1_0_enc_ring_insert_end */ 2128 .emit_ib_size = 5, /* vcn_v1_0_enc_ring_emit_ib */ 2129 .emit_ib = vcn_v1_0_enc_ring_emit_ib, 2130 .emit_fence = vcn_v1_0_enc_ring_emit_fence, 2131 .emit_vm_flush = vcn_v1_0_enc_ring_emit_vm_flush, 2132 .test_ring = amdgpu_vcn_enc_ring_test_ring, 2133 .test_ib = amdgpu_vcn_enc_ring_test_ib, 2134 .insert_nop = amdgpu_ring_insert_nop, 2135 .insert_end = vcn_v1_0_enc_ring_insert_end, 2136 .pad_ib = amdgpu_ring_generic_pad_ib, 2137 .begin_use = vcn_v1_0_ring_begin_use, 2138 .end_use = vcn_v1_0_ring_end_use, 2139 .emit_wreg = vcn_v1_0_enc_ring_emit_wreg, 2140 .emit_reg_wait = vcn_v1_0_enc_ring_emit_reg_wait, 2141 .emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper, 2142 }; 2143 2144 static void vcn_v1_0_set_dec_ring_funcs(struct amdgpu_device *adev) 2145 { 2146 adev->vcn.inst->ring_dec.funcs = &vcn_v1_0_dec_ring_vm_funcs; 2147 } 2148 2149 static void vcn_v1_0_set_enc_ring_funcs(struct amdgpu_device *adev) 2150 { 2151 int i; 2152 2153 for (i = 0; i < adev->vcn.num_enc_rings; ++i) 2154 adev->vcn.inst->ring_enc[i].funcs = &vcn_v1_0_enc_ring_vm_funcs; 2155 } 2156 2157 static const struct amdgpu_irq_src_funcs vcn_v1_0_irq_funcs = { 2158 .set = vcn_v1_0_set_interrupt_state, 2159 .process = vcn_v1_0_process_interrupt, 2160 }; 2161 2162 static void vcn_v1_0_set_irq_funcs(struct amdgpu_device *adev) 2163 { 2164 adev->vcn.inst->irq.num_types = adev->vcn.num_enc_rings + 2; 2165 adev->vcn.inst->irq.funcs = &vcn_v1_0_irq_funcs; 2166 } 2167 2168 const struct amdgpu_ip_block_version vcn_v1_0_ip_block = { 2169 .type = AMD_IP_BLOCK_TYPE_VCN, 2170 .major = 1, 2171 .minor = 0, 2172 .rev = 0, 2173 .funcs = &vcn_v1_0_ip_funcs, 2174 }; 2175