1 /* 2 * Copyright 2023 Advanced Micro Devices, Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 */ 23 24 #include <linux/firmware.h> 25 #include "amdgpu.h" 26 #include "amdgpu_vcn.h" 27 #include "amdgpu_pm.h" 28 #include "soc15.h" 29 #include "soc15d.h" 30 #include "soc15_hw_ip.h" 31 #include "vcn_v2_0.h" 32 33 #include "vcn/vcn_5_0_0_offset.h" 34 #include "vcn/vcn_5_0_0_sh_mask.h" 35 #include "ivsrcid/vcn/irqsrcs_vcn_5_0.h" 36 #include "vcn_v5_0_0.h" 37 38 #include <drm/drm_drv.h> 39 40 static const struct amdgpu_hwip_reg_entry vcn_reg_list_5_0[] = { 41 SOC15_REG_ENTRY_STR(VCN, 0, regUVD_POWER_STATUS), 42 SOC15_REG_ENTRY_STR(VCN, 0, regUVD_STATUS), 43 SOC15_REG_ENTRY_STR(VCN, 0, regUVD_CONTEXT_ID), 44 SOC15_REG_ENTRY_STR(VCN, 0, regUVD_CONTEXT_ID2), 45 SOC15_REG_ENTRY_STR(VCN, 0, regUVD_GPCOM_VCPU_DATA0), 46 SOC15_REG_ENTRY_STR(VCN, 0, regUVD_GPCOM_VCPU_DATA1), 47 SOC15_REG_ENTRY_STR(VCN, 0, regUVD_GPCOM_VCPU_CMD), 48 SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_BASE_HI), 49 SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_BASE_LO), 50 SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_BASE_HI2), 51 SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_BASE_LO2), 52 SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_BASE_HI3), 53 SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_BASE_LO3), 54 SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_BASE_HI4), 55 SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_BASE_LO4), 56 SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_RPTR), 57 SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_WPTR), 58 SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_RPTR2), 59 SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_WPTR2), 60 SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_RPTR3), 61 SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_WPTR3), 62 SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_RPTR4), 63 SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_WPTR4), 64 SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_SIZE), 65 SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_SIZE2), 66 SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_SIZE3), 67 SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_SIZE4), 68 SOC15_REG_ENTRY_STR(VCN, 0, regUVD_DPG_LMA_CTL), 69 SOC15_REG_ENTRY_STR(VCN, 0, regUVD_DPG_LMA_DATA), 70 SOC15_REG_ENTRY_STR(VCN, 0, regUVD_DPG_LMA_MASK), 71 SOC15_REG_ENTRY_STR(VCN, 0, regUVD_DPG_PAUSE) 72 }; 73 74 static int amdgpu_ih_clientid_vcns[] = { 75 SOC15_IH_CLIENTID_VCN, 76 SOC15_IH_CLIENTID_VCN1 77 }; 78 79 static void vcn_v5_0_0_set_unified_ring_funcs(struct amdgpu_device *adev); 80 static void vcn_v5_0_0_set_irq_funcs(struct amdgpu_device *adev); 81 static int vcn_v5_0_0_set_powergating_state(struct amdgpu_ip_block *ip_block, 82 enum amd_powergating_state state); 83 static int vcn_v5_0_0_pause_dpg_mode(struct amdgpu_device *adev, 84 int inst_idx, struct dpg_pause_state *new_state); 85 static void vcn_v5_0_0_unified_ring_set_wptr(struct amdgpu_ring *ring); 86 87 /** 88 * vcn_v5_0_0_early_init - set function pointers and load microcode 89 * 90 * @ip_block: Pointer to the amdgpu_ip_block for this hw instance. 91 * 92 * Set ring and irq function pointers 93 * Load microcode from filesystem 94 */ 95 static int vcn_v5_0_0_early_init(struct amdgpu_ip_block *ip_block) 96 { 97 struct amdgpu_device *adev = ip_block->adev; 98 99 /* re-use enc ring as unified ring */ 100 adev->vcn.num_enc_rings = 1; 101 102 vcn_v5_0_0_set_unified_ring_funcs(adev); 103 vcn_v5_0_0_set_irq_funcs(adev); 104 105 return amdgpu_vcn_early_init(adev); 106 } 107 108 void vcn_v5_0_0_alloc_ip_dump(struct amdgpu_device *adev) 109 { 110 uint32_t reg_count = ARRAY_SIZE(vcn_reg_list_5_0); 111 uint32_t *ptr; 112 113 /* Allocate memory for VCN IP Dump buffer */ 114 ptr = kcalloc(adev->vcn.num_vcn_inst * reg_count, sizeof(uint32_t), GFP_KERNEL); 115 if (!ptr) { 116 DRM_ERROR("Failed to allocate memory for VCN IP Dump\n"); 117 adev->vcn.ip_dump = NULL; 118 } else { 119 adev->vcn.ip_dump = ptr; 120 } 121 } 122 123 /** 124 * vcn_v5_0_0_sw_init - sw init for VCN block 125 * 126 * @ip_block: Pointer to the amdgpu_ip_block for this hw instance. 127 * 128 * Load firmware and sw initialization 129 */ 130 static int vcn_v5_0_0_sw_init(struct amdgpu_ip_block *ip_block) 131 { 132 struct amdgpu_ring *ring; 133 struct amdgpu_device *adev = ip_block->adev; 134 int i, r; 135 136 r = amdgpu_vcn_sw_init(adev); 137 if (r) 138 return r; 139 140 amdgpu_vcn_setup_ucode(adev); 141 142 r = amdgpu_vcn_resume(adev); 143 if (r) 144 return r; 145 146 for (i = 0; i < adev->vcn.num_vcn_inst; i++) { 147 volatile struct amdgpu_vcn5_fw_shared *fw_shared; 148 149 if (adev->vcn.harvest_config & (1 << i)) 150 continue; 151 152 atomic_set(&adev->vcn.inst[i].sched_score, 0); 153 154 /* VCN UNIFIED TRAP */ 155 r = amdgpu_irq_add_id(adev, amdgpu_ih_clientid_vcns[i], 156 VCN_5_0__SRCID__UVD_ENC_GENERAL_PURPOSE, &adev->vcn.inst[i].irq); 157 if (r) 158 return r; 159 160 /* VCN POISON TRAP */ 161 r = amdgpu_irq_add_id(adev, amdgpu_ih_clientid_vcns[i], 162 VCN_5_0__SRCID_UVD_POISON, &adev->vcn.inst[i].irq); 163 if (r) 164 return r; 165 166 ring = &adev->vcn.inst[i].ring_enc[0]; 167 ring->use_doorbell = true; 168 ring->doorbell_index = (adev->doorbell_index.vcn.vcn_ring0_1 << 1) + 2 + 8 * i; 169 170 ring->vm_hub = AMDGPU_MMHUB0(0); 171 sprintf(ring->name, "vcn_unified_%d", i); 172 173 r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.inst[i].irq, 0, 174 AMDGPU_RING_PRIO_0, &adev->vcn.inst[i].sched_score); 175 if (r) 176 return r; 177 178 fw_shared = adev->vcn.inst[i].fw_shared.cpu_addr; 179 fw_shared->present_flag_0 = cpu_to_le32(AMDGPU_FW_SHARED_FLAG_0_UNIFIED_QUEUE); 180 fw_shared->sq.is_enabled = 1; 181 182 if (amdgpu_vcnfw_log) 183 amdgpu_vcn_fwlog_init(&adev->vcn.inst[i]); 184 } 185 186 /* TODO: Add queue reset mask when FW fully supports it */ 187 adev->vcn.supported_reset = 188 amdgpu_get_soft_full_reset_mask(&adev->vcn.inst[0].ring_enc[0]); 189 190 if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) 191 adev->vcn.pause_dpg_mode = vcn_v5_0_0_pause_dpg_mode; 192 193 vcn_v5_0_0_alloc_ip_dump(adev); 194 195 r = amdgpu_vcn_sysfs_reset_mask_init(adev); 196 if (r) 197 return r; 198 199 return 0; 200 } 201 202 /** 203 * vcn_v5_0_0_sw_fini - sw fini for VCN block 204 * 205 * @ip_block: Pointer to the amdgpu_ip_block for this hw instance. 206 * 207 * VCN suspend and free up sw allocation 208 */ 209 static int vcn_v5_0_0_sw_fini(struct amdgpu_ip_block *ip_block) 210 { 211 struct amdgpu_device *adev = ip_block->adev; 212 int i, r, idx; 213 214 if (drm_dev_enter(adev_to_drm(adev), &idx)) { 215 for (i = 0; i < adev->vcn.num_vcn_inst; i++) { 216 volatile struct amdgpu_vcn5_fw_shared *fw_shared; 217 218 if (adev->vcn.harvest_config & (1 << i)) 219 continue; 220 221 fw_shared = adev->vcn.inst[i].fw_shared.cpu_addr; 222 fw_shared->present_flag_0 = 0; 223 fw_shared->sq.is_enabled = 0; 224 } 225 226 drm_dev_exit(idx); 227 } 228 229 r = amdgpu_vcn_suspend(adev); 230 if (r) 231 return r; 232 233 amdgpu_vcn_sysfs_reset_mask_fini(adev); 234 r = amdgpu_vcn_sw_fini(adev); 235 236 kfree(adev->vcn.ip_dump); 237 238 return r; 239 } 240 241 /** 242 * vcn_v5_0_0_hw_init - start and test VCN block 243 * 244 * @ip_block: Pointer to the amdgpu_ip_block for this hw instance. 245 * 246 * Initialize the hardware, boot up the VCPU and do some testing 247 */ 248 static int vcn_v5_0_0_hw_init(struct amdgpu_ip_block *ip_block) 249 { 250 struct amdgpu_device *adev = ip_block->adev; 251 struct amdgpu_ring *ring; 252 int i, r; 253 254 for (i = 0; i < adev->vcn.num_vcn_inst; ++i) { 255 if (adev->vcn.harvest_config & (1 << i)) 256 continue; 257 258 ring = &adev->vcn.inst[i].ring_enc[0]; 259 260 adev->nbio.funcs->vcn_doorbell_range(adev, ring->use_doorbell, 261 ((adev->doorbell_index.vcn.vcn_ring0_1 << 1) + 8 * i), i); 262 263 r = amdgpu_ring_test_helper(ring); 264 if (r) 265 return r; 266 } 267 268 return 0; 269 } 270 271 /** 272 * vcn_v5_0_0_hw_fini - stop the hardware block 273 * 274 * @ip_block: Pointer to the amdgpu_ip_block for this hw instance. 275 * 276 * Stop the VCN block, mark ring as not ready any more 277 */ 278 static int vcn_v5_0_0_hw_fini(struct amdgpu_ip_block *ip_block) 279 { 280 struct amdgpu_device *adev = ip_block->adev; 281 int i; 282 283 cancel_delayed_work_sync(&adev->vcn.idle_work); 284 285 for (i = 0; i < adev->vcn.num_vcn_inst; ++i) { 286 if (adev->vcn.harvest_config & (1 << i)) 287 continue; 288 if (!amdgpu_sriov_vf(adev)) { 289 if ((adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) || 290 (adev->vcn.cur_state != AMD_PG_STATE_GATE && 291 RREG32_SOC15(VCN, i, regUVD_STATUS))) { 292 vcn_v5_0_0_set_powergating_state(ip_block, AMD_PG_STATE_GATE); 293 } 294 } 295 } 296 297 return 0; 298 } 299 300 /** 301 * vcn_v5_0_0_suspend - suspend VCN block 302 * 303 * @ip_block: Pointer to the amdgpu_ip_block for this hw instance. 304 * 305 * HW fini and suspend VCN block 306 */ 307 static int vcn_v5_0_0_suspend(struct amdgpu_ip_block *ip_block) 308 { 309 int r; 310 311 r = vcn_v5_0_0_hw_fini(ip_block); 312 if (r) 313 return r; 314 315 r = amdgpu_vcn_suspend(ip_block->adev); 316 317 return r; 318 } 319 320 /** 321 * vcn_v5_0_0_resume - resume VCN block 322 * 323 * @ip_block: Pointer to the amdgpu_ip_block for this hw instance. 324 * 325 * Resume firmware and hw init VCN block 326 */ 327 static int vcn_v5_0_0_resume(struct amdgpu_ip_block *ip_block) 328 { 329 int r; 330 331 r = amdgpu_vcn_resume(ip_block->adev); 332 if (r) 333 return r; 334 335 r = vcn_v5_0_0_hw_init(ip_block); 336 337 return r; 338 } 339 340 /** 341 * vcn_v5_0_0_mc_resume - memory controller programming 342 * 343 * @adev: amdgpu_device pointer 344 * @inst: instance number 345 * 346 * Let the VCN memory controller know it's offsets 347 */ 348 static void vcn_v5_0_0_mc_resume(struct amdgpu_device *adev, int inst) 349 { 350 uint32_t offset, size; 351 const struct common_firmware_header *hdr; 352 353 hdr = (const struct common_firmware_header *)adev->vcn.inst[inst].fw->data; 354 size = AMDGPU_GPU_PAGE_ALIGN(le32_to_cpu(hdr->ucode_size_bytes) + 8); 355 356 /* cache window 0: fw */ 357 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) { 358 WREG32_SOC15(VCN, inst, regUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW, 359 (adev->firmware.ucode[AMDGPU_UCODE_ID_VCN + inst].tmr_mc_addr_lo)); 360 WREG32_SOC15(VCN, inst, regUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH, 361 (adev->firmware.ucode[AMDGPU_UCODE_ID_VCN + inst].tmr_mc_addr_hi)); 362 WREG32_SOC15(VCN, inst, regUVD_VCPU_CACHE_OFFSET0, 0); 363 offset = 0; 364 } else { 365 WREG32_SOC15(VCN, inst, regUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW, 366 lower_32_bits(adev->vcn.inst[inst].gpu_addr)); 367 WREG32_SOC15(VCN, inst, regUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH, 368 upper_32_bits(adev->vcn.inst[inst].gpu_addr)); 369 offset = size; 370 WREG32_SOC15(VCN, inst, regUVD_VCPU_CACHE_OFFSET0, AMDGPU_UVD_FIRMWARE_OFFSET >> 3); 371 } 372 WREG32_SOC15(VCN, inst, regUVD_VCPU_CACHE_SIZE0, size); 373 374 /* cache window 1: stack */ 375 WREG32_SOC15(VCN, inst, regUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW, 376 lower_32_bits(adev->vcn.inst[inst].gpu_addr + offset)); 377 WREG32_SOC15(VCN, inst, regUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH, 378 upper_32_bits(adev->vcn.inst[inst].gpu_addr + offset)); 379 WREG32_SOC15(VCN, inst, regUVD_VCPU_CACHE_OFFSET1, 0); 380 WREG32_SOC15(VCN, inst, regUVD_VCPU_CACHE_SIZE1, AMDGPU_VCN_STACK_SIZE); 381 382 /* cache window 2: context */ 383 WREG32_SOC15(VCN, inst, regUVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW, 384 lower_32_bits(adev->vcn.inst[inst].gpu_addr + offset + AMDGPU_VCN_STACK_SIZE)); 385 WREG32_SOC15(VCN, inst, regUVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH, 386 upper_32_bits(adev->vcn.inst[inst].gpu_addr + offset + AMDGPU_VCN_STACK_SIZE)); 387 WREG32_SOC15(VCN, inst, regUVD_VCPU_CACHE_OFFSET2, 0); 388 WREG32_SOC15(VCN, inst, regUVD_VCPU_CACHE_SIZE2, AMDGPU_VCN_CONTEXT_SIZE); 389 390 /* non-cache window */ 391 WREG32_SOC15(VCN, inst, regUVD_LMI_VCPU_NC0_64BIT_BAR_LOW, 392 lower_32_bits(adev->vcn.inst[inst].fw_shared.gpu_addr)); 393 WREG32_SOC15(VCN, inst, regUVD_LMI_VCPU_NC0_64BIT_BAR_HIGH, 394 upper_32_bits(adev->vcn.inst[inst].fw_shared.gpu_addr)); 395 WREG32_SOC15(VCN, inst, regUVD_VCPU_NONCACHE_OFFSET0, 0); 396 WREG32_SOC15(VCN, inst, regUVD_VCPU_NONCACHE_SIZE0, 397 AMDGPU_GPU_PAGE_ALIGN(sizeof(struct amdgpu_vcn5_fw_shared))); 398 } 399 400 /** 401 * vcn_v5_0_0_mc_resume_dpg_mode - memory controller programming for dpg mode 402 * 403 * @adev: amdgpu_device pointer 404 * @inst_idx: instance number index 405 * @indirect: indirectly write sram 406 * 407 * Let the VCN memory controller know it's offsets with dpg mode 408 */ 409 static void vcn_v5_0_0_mc_resume_dpg_mode(struct amdgpu_device *adev, int inst_idx, bool indirect) 410 { 411 uint32_t offset, size; 412 const struct common_firmware_header *hdr; 413 414 hdr = (const struct common_firmware_header *)adev->vcn.inst[inst_idx].fw->data; 415 size = AMDGPU_GPU_PAGE_ALIGN(le32_to_cpu(hdr->ucode_size_bytes) + 8); 416 417 /* cache window 0: fw */ 418 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) { 419 if (!indirect) { 420 WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET( 421 VCN, inst_idx, regUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW), 422 (adev->firmware.ucode[AMDGPU_UCODE_ID_VCN + inst_idx].tmr_mc_addr_lo), 0, indirect); 423 WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET( 424 VCN, inst_idx, regUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH), 425 (adev->firmware.ucode[AMDGPU_UCODE_ID_VCN + inst_idx].tmr_mc_addr_hi), 0, indirect); 426 WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET( 427 VCN, inst_idx, regUVD_VCPU_CACHE_OFFSET0), 0, 0, indirect); 428 } else { 429 WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET( 430 VCN, inst_idx, regUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW), 0, 0, indirect); 431 WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET( 432 VCN, inst_idx, regUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH), 0, 0, indirect); 433 WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET( 434 VCN, inst_idx, regUVD_VCPU_CACHE_OFFSET0), 0, 0, indirect); 435 } 436 offset = 0; 437 } else { 438 WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET( 439 VCN, inst_idx, regUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW), 440 lower_32_bits(adev->vcn.inst[inst_idx].gpu_addr), 0, indirect); 441 WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET( 442 VCN, inst_idx, regUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH), 443 upper_32_bits(adev->vcn.inst[inst_idx].gpu_addr), 0, indirect); 444 offset = size; 445 WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET( 446 VCN, inst_idx, regUVD_VCPU_CACHE_OFFSET0), 447 AMDGPU_UVD_FIRMWARE_OFFSET >> 3, 0, indirect); 448 } 449 450 if (!indirect) 451 WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET( 452 VCN, inst_idx, regUVD_VCPU_CACHE_SIZE0), size, 0, indirect); 453 else 454 WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET( 455 VCN, inst_idx, regUVD_VCPU_CACHE_SIZE0), 0, 0, indirect); 456 457 /* cache window 1: stack */ 458 if (!indirect) { 459 WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET( 460 VCN, inst_idx, regUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW), 461 lower_32_bits(adev->vcn.inst[inst_idx].gpu_addr + offset), 0, indirect); 462 WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET( 463 VCN, inst_idx, regUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH), 464 upper_32_bits(adev->vcn.inst[inst_idx].gpu_addr + offset), 0, indirect); 465 WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET( 466 VCN, inst_idx, regUVD_VCPU_CACHE_OFFSET1), 0, 0, indirect); 467 } else { 468 WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET( 469 VCN, inst_idx, regUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW), 0, 0, indirect); 470 WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET( 471 VCN, inst_idx, regUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH), 0, 0, indirect); 472 WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET( 473 VCN, inst_idx, regUVD_VCPU_CACHE_OFFSET1), 0, 0, indirect); 474 } 475 WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET( 476 VCN, inst_idx, regUVD_VCPU_CACHE_SIZE1), AMDGPU_VCN_STACK_SIZE, 0, indirect); 477 478 /* cache window 2: context */ 479 WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET( 480 VCN, inst_idx, regUVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW), 481 lower_32_bits(adev->vcn.inst[inst_idx].gpu_addr + offset + AMDGPU_VCN_STACK_SIZE), 0, indirect); 482 WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET( 483 VCN, inst_idx, regUVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH), 484 upper_32_bits(adev->vcn.inst[inst_idx].gpu_addr + offset + AMDGPU_VCN_STACK_SIZE), 0, indirect); 485 WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET( 486 VCN, inst_idx, regUVD_VCPU_CACHE_OFFSET2), 0, 0, indirect); 487 WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET( 488 VCN, inst_idx, regUVD_VCPU_CACHE_SIZE2), AMDGPU_VCN_CONTEXT_SIZE, 0, indirect); 489 490 /* non-cache window */ 491 WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET( 492 VCN, inst_idx, regUVD_LMI_VCPU_NC0_64BIT_BAR_LOW), 493 lower_32_bits(adev->vcn.inst[inst_idx].fw_shared.gpu_addr), 0, indirect); 494 WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET( 495 VCN, inst_idx, regUVD_LMI_VCPU_NC0_64BIT_BAR_HIGH), 496 upper_32_bits(adev->vcn.inst[inst_idx].fw_shared.gpu_addr), 0, indirect); 497 WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET( 498 VCN, inst_idx, regUVD_VCPU_NONCACHE_OFFSET0), 0, 0, indirect); 499 WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET( 500 VCN, inst_idx, regUVD_VCPU_NONCACHE_SIZE0), 501 AMDGPU_GPU_PAGE_ALIGN(sizeof(struct amdgpu_vcn5_fw_shared)), 0, indirect); 502 503 /* VCN global tiling registers */ 504 WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET( 505 VCN, 0, regUVD_GFX10_ADDR_CONFIG), adev->gfx.config.gb_addr_config, 0, indirect); 506 507 return; 508 } 509 510 /** 511 * vcn_v5_0_0_disable_static_power_gating - disable VCN static power gating 512 * 513 * @adev: amdgpu_device pointer 514 * @inst: instance number 515 * 516 * Disable static power gating for VCN block 517 */ 518 static void vcn_v5_0_0_disable_static_power_gating(struct amdgpu_device *adev, int inst) 519 { 520 uint32_t data = 0; 521 522 if (adev->pg_flags & AMD_PG_SUPPORT_VCN) { 523 data = 1 << UVD_IPX_DLDO_CONFIG__ONO2_PWR_CONFIG__SHIFT; 524 WREG32_SOC15(VCN, inst, regUVD_IPX_DLDO_CONFIG, data); 525 SOC15_WAIT_ON_RREG(VCN, inst, regUVD_IPX_DLDO_STATUS, 0, 526 UVD_IPX_DLDO_STATUS__ONO2_PWR_STATUS_MASK); 527 528 data = 2 << UVD_IPX_DLDO_CONFIG__ONO3_PWR_CONFIG__SHIFT; 529 WREG32_SOC15(VCN, inst, regUVD_IPX_DLDO_CONFIG, data); 530 SOC15_WAIT_ON_RREG(VCN, inst, regUVD_IPX_DLDO_STATUS, 531 1 << UVD_IPX_DLDO_STATUS__ONO3_PWR_STATUS__SHIFT, 532 UVD_IPX_DLDO_STATUS__ONO3_PWR_STATUS_MASK); 533 534 data = 2 << UVD_IPX_DLDO_CONFIG__ONO4_PWR_CONFIG__SHIFT; 535 WREG32_SOC15(VCN, inst, regUVD_IPX_DLDO_CONFIG, data); 536 SOC15_WAIT_ON_RREG(VCN, inst, regUVD_IPX_DLDO_STATUS, 537 1 << UVD_IPX_DLDO_STATUS__ONO4_PWR_STATUS__SHIFT, 538 UVD_IPX_DLDO_STATUS__ONO4_PWR_STATUS_MASK); 539 540 data = 2 << UVD_IPX_DLDO_CONFIG__ONO5_PWR_CONFIG__SHIFT; 541 WREG32_SOC15(VCN, inst, regUVD_IPX_DLDO_CONFIG, data); 542 SOC15_WAIT_ON_RREG(VCN, inst, regUVD_IPX_DLDO_STATUS, 543 1 << UVD_IPX_DLDO_STATUS__ONO5_PWR_STATUS__SHIFT, 544 UVD_IPX_DLDO_STATUS__ONO5_PWR_STATUS_MASK); 545 } else { 546 data = 1 << UVD_IPX_DLDO_CONFIG__ONO2_PWR_CONFIG__SHIFT; 547 WREG32_SOC15(VCN, inst, regUVD_IPX_DLDO_CONFIG, data); 548 SOC15_WAIT_ON_RREG(VCN, inst, regUVD_IPX_DLDO_STATUS, 0, 549 UVD_IPX_DLDO_STATUS__ONO2_PWR_STATUS_MASK); 550 551 data = 1 << UVD_IPX_DLDO_CONFIG__ONO3_PWR_CONFIG__SHIFT; 552 WREG32_SOC15(VCN, inst, regUVD_IPX_DLDO_CONFIG, data); 553 SOC15_WAIT_ON_RREG(VCN, inst, regUVD_IPX_DLDO_STATUS, 0, 554 UVD_IPX_DLDO_STATUS__ONO3_PWR_STATUS_MASK); 555 556 data = 1 << UVD_IPX_DLDO_CONFIG__ONO4_PWR_CONFIG__SHIFT; 557 WREG32_SOC15(VCN, inst, regUVD_IPX_DLDO_CONFIG, data); 558 SOC15_WAIT_ON_RREG(VCN, inst, regUVD_IPX_DLDO_STATUS, 0, 559 UVD_IPX_DLDO_STATUS__ONO4_PWR_STATUS_MASK); 560 561 data = 1 << UVD_IPX_DLDO_CONFIG__ONO5_PWR_CONFIG__SHIFT; 562 WREG32_SOC15(VCN, inst, regUVD_IPX_DLDO_CONFIG, data); 563 SOC15_WAIT_ON_RREG(VCN, inst, regUVD_IPX_DLDO_STATUS, 0, 564 UVD_IPX_DLDO_STATUS__ONO5_PWR_STATUS_MASK); 565 } 566 567 data = RREG32_SOC15(VCN, inst, regUVD_POWER_STATUS); 568 data &= ~0x103; 569 if (adev->pg_flags & AMD_PG_SUPPORT_VCN) 570 data |= UVD_PGFSM_CONFIG__UVDM_UVDU_PWR_ON | 571 UVD_POWER_STATUS__UVD_PG_EN_MASK; 572 573 WREG32_SOC15(VCN, inst, regUVD_POWER_STATUS, data); 574 return; 575 } 576 577 /** 578 * vcn_v5_0_0_enable_static_power_gating - enable VCN static power gating 579 * 580 * @adev: amdgpu_device pointer 581 * @inst: instance number 582 * 583 * Enable static power gating for VCN block 584 */ 585 static void vcn_v5_0_0_enable_static_power_gating(struct amdgpu_device *adev, int inst) 586 { 587 uint32_t data; 588 589 if (adev->pg_flags & AMD_PG_SUPPORT_VCN) { 590 /* Before power off, this indicator has to be turned on */ 591 data = RREG32_SOC15(VCN, inst, regUVD_POWER_STATUS); 592 data &= ~UVD_POWER_STATUS__UVD_POWER_STATUS_MASK; 593 data |= UVD_POWER_STATUS__UVD_POWER_STATUS_TILES_OFF; 594 WREG32_SOC15(VCN, inst, regUVD_POWER_STATUS, data); 595 596 data = 2 << UVD_IPX_DLDO_CONFIG__ONO5_PWR_CONFIG__SHIFT; 597 WREG32_SOC15(VCN, inst, regUVD_IPX_DLDO_CONFIG, data); 598 SOC15_WAIT_ON_RREG(VCN, inst, regUVD_IPX_DLDO_STATUS, 599 1 << UVD_IPX_DLDO_STATUS__ONO5_PWR_STATUS__SHIFT, 600 UVD_IPX_DLDO_STATUS__ONO5_PWR_STATUS_MASK); 601 602 data = 2 << UVD_IPX_DLDO_CONFIG__ONO4_PWR_CONFIG__SHIFT; 603 WREG32_SOC15(VCN, inst, regUVD_IPX_DLDO_CONFIG, data); 604 SOC15_WAIT_ON_RREG(VCN, inst, regUVD_IPX_DLDO_STATUS, 605 1 << UVD_IPX_DLDO_STATUS__ONO4_PWR_STATUS__SHIFT, 606 UVD_IPX_DLDO_STATUS__ONO4_PWR_STATUS_MASK); 607 608 data = 2 << UVD_IPX_DLDO_CONFIG__ONO3_PWR_CONFIG__SHIFT; 609 WREG32_SOC15(VCN, inst, regUVD_IPX_DLDO_CONFIG, data); 610 SOC15_WAIT_ON_RREG(VCN, inst, regUVD_IPX_DLDO_STATUS, 611 1 << UVD_IPX_DLDO_STATUS__ONO3_PWR_STATUS__SHIFT, 612 UVD_IPX_DLDO_STATUS__ONO3_PWR_STATUS_MASK); 613 614 data = 2 << UVD_IPX_DLDO_CONFIG__ONO2_PWR_CONFIG__SHIFT; 615 WREG32_SOC15(VCN, inst, regUVD_IPX_DLDO_CONFIG, data); 616 SOC15_WAIT_ON_RREG(VCN, inst, regUVD_IPX_DLDO_STATUS, 617 1 << UVD_IPX_DLDO_STATUS__ONO2_PWR_STATUS__SHIFT, 618 UVD_IPX_DLDO_STATUS__ONO2_PWR_STATUS_MASK); 619 } 620 return; 621 } 622 623 /** 624 * vcn_v5_0_0_disable_clock_gating - disable VCN clock gating 625 * 626 * @adev: amdgpu_device pointer 627 * @inst: instance number 628 * 629 * Disable clock gating for VCN block 630 */ 631 static void vcn_v5_0_0_disable_clock_gating(struct amdgpu_device *adev, int inst) 632 { 633 return; 634 } 635 636 #if 0 637 /** 638 * vcn_v5_0_0_disable_clock_gating_dpg_mode - disable VCN clock gating dpg mode 639 * 640 * @adev: amdgpu_device pointer 641 * @sram_sel: sram select 642 * @inst_idx: instance number index 643 * @indirect: indirectly write sram 644 * 645 * Disable clock gating for VCN block with dpg mode 646 */ 647 static void vcn_v5_0_0_disable_clock_gating_dpg_mode(struct amdgpu_device *adev, uint8_t sram_sel, 648 int inst_idx, uint8_t indirect) 649 { 650 return; 651 } 652 #endif 653 654 /** 655 * vcn_v5_0_0_enable_clock_gating - enable VCN clock gating 656 * 657 * @adev: amdgpu_device pointer 658 * @inst: instance number 659 * 660 * Enable clock gating for VCN block 661 */ 662 static void vcn_v5_0_0_enable_clock_gating(struct amdgpu_device *adev, int inst) 663 { 664 return; 665 } 666 667 /** 668 * vcn_v5_0_0_start_dpg_mode - VCN start with dpg mode 669 * 670 * @adev: amdgpu_device pointer 671 * @inst_idx: instance number index 672 * @indirect: indirectly write sram 673 * 674 * Start VCN block with dpg mode 675 */ 676 static int vcn_v5_0_0_start_dpg_mode(struct amdgpu_device *adev, int inst_idx, bool indirect) 677 { 678 volatile struct amdgpu_vcn5_fw_shared *fw_shared = adev->vcn.inst[inst_idx].fw_shared.cpu_addr; 679 struct amdgpu_ring *ring; 680 uint32_t tmp; 681 682 /* disable register anti-hang mechanism */ 683 WREG32_P(SOC15_REG_OFFSET(VCN, inst_idx, regUVD_POWER_STATUS), 1, 684 ~UVD_POWER_STATUS__UVD_POWER_STATUS_MASK); 685 686 /* enable dynamic power gating mode */ 687 tmp = RREG32_SOC15(VCN, inst_idx, regUVD_POWER_STATUS); 688 tmp |= UVD_POWER_STATUS__UVD_PG_MODE_MASK; 689 tmp |= UVD_POWER_STATUS__UVD_PG_EN_MASK; 690 WREG32_SOC15(VCN, inst_idx, regUVD_POWER_STATUS, tmp); 691 692 if (indirect) 693 adev->vcn.inst[inst_idx].dpg_sram_curr_addr = (uint32_t *)adev->vcn.inst[inst_idx].dpg_sram_cpu_addr; 694 695 /* enable VCPU clock */ 696 tmp = (0xFF << UVD_VCPU_CNTL__PRB_TIMEOUT_VAL__SHIFT); 697 tmp |= UVD_VCPU_CNTL__CLK_EN_MASK | UVD_VCPU_CNTL__BLK_RST_MASK; 698 WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET( 699 VCN, inst_idx, regUVD_VCPU_CNTL), tmp, 0, indirect); 700 701 /* disable master interrupt */ 702 WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET( 703 VCN, inst_idx, regUVD_MASTINT_EN), 0, 0, indirect); 704 705 /* setup regUVD_LMI_CTRL */ 706 tmp = (UVD_LMI_CTRL__WRITE_CLEAN_TIMER_EN_MASK | 707 UVD_LMI_CTRL__REQ_MODE_MASK | 708 UVD_LMI_CTRL__CRC_RESET_MASK | 709 UVD_LMI_CTRL__MASK_MC_URGENT_MASK | 710 UVD_LMI_CTRL__DATA_COHERENCY_EN_MASK | 711 UVD_LMI_CTRL__VCPU_DATA_COHERENCY_EN_MASK | 712 (8 << UVD_LMI_CTRL__WRITE_CLEAN_TIMER__SHIFT) | 713 0x00100000L); 714 WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET( 715 VCN, inst_idx, regUVD_LMI_CTRL), tmp, 0, indirect); 716 717 vcn_v5_0_0_mc_resume_dpg_mode(adev, inst_idx, indirect); 718 719 tmp = (0xFF << UVD_VCPU_CNTL__PRB_TIMEOUT_VAL__SHIFT); 720 tmp |= UVD_VCPU_CNTL__CLK_EN_MASK; 721 WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET( 722 VCN, inst_idx, regUVD_VCPU_CNTL), tmp, 0, indirect); 723 724 /* enable LMI MC and UMC channels */ 725 tmp = 0x1f << UVD_LMI_CTRL2__RE_OFLD_MIF_WR_REQ_NUM__SHIFT; 726 WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET( 727 VCN, inst_idx, regUVD_LMI_CTRL2), tmp, 0, indirect); 728 729 /* enable master interrupt */ 730 WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET( 731 VCN, inst_idx, regUVD_MASTINT_EN), 732 UVD_MASTINT_EN__VCPU_EN_MASK, 0, indirect); 733 734 if (indirect) 735 amdgpu_vcn_psp_update_sram(adev, inst_idx, 0); 736 737 ring = &adev->vcn.inst[inst_idx].ring_enc[0]; 738 739 WREG32_SOC15(VCN, inst_idx, regUVD_RB_BASE_LO, ring->gpu_addr); 740 WREG32_SOC15(VCN, inst_idx, regUVD_RB_BASE_HI, upper_32_bits(ring->gpu_addr)); 741 WREG32_SOC15(VCN, inst_idx, regUVD_RB_SIZE, ring->ring_size / 4); 742 743 tmp = RREG32_SOC15(VCN, inst_idx, regVCN_RB_ENABLE); 744 tmp &= ~(VCN_RB_ENABLE__RB1_EN_MASK); 745 WREG32_SOC15(VCN, inst_idx, regVCN_RB_ENABLE, tmp); 746 fw_shared->sq.queue_mode |= FW_QUEUE_RING_RESET; 747 WREG32_SOC15(VCN, inst_idx, regUVD_RB_RPTR, 0); 748 WREG32_SOC15(VCN, inst_idx, regUVD_RB_WPTR, 0); 749 750 tmp = RREG32_SOC15(VCN, inst_idx, regUVD_RB_RPTR); 751 WREG32_SOC15(VCN, inst_idx, regUVD_RB_WPTR, tmp); 752 ring->wptr = RREG32_SOC15(VCN, inst_idx, regUVD_RB_WPTR); 753 754 tmp = RREG32_SOC15(VCN, inst_idx, regVCN_RB_ENABLE); 755 tmp |= VCN_RB_ENABLE__RB1_EN_MASK; 756 WREG32_SOC15(VCN, inst_idx, regVCN_RB_ENABLE, tmp); 757 fw_shared->sq.queue_mode &= ~(FW_QUEUE_RING_RESET | FW_QUEUE_DPG_HOLD_OFF); 758 759 WREG32_SOC15(VCN, inst_idx, regVCN_RB1_DB_CTRL, 760 ring->doorbell_index << VCN_RB1_DB_CTRL__OFFSET__SHIFT | 761 VCN_RB1_DB_CTRL__EN_MASK); 762 763 return 0; 764 } 765 766 /** 767 * vcn_v5_0_0_start - VCN start 768 * 769 * @adev: amdgpu_device pointer 770 * 771 * Start VCN block 772 */ 773 static int vcn_v5_0_0_start(struct amdgpu_device *adev) 774 { 775 volatile struct amdgpu_vcn5_fw_shared *fw_shared; 776 struct amdgpu_ring *ring; 777 uint32_t tmp; 778 int i, j, k, r; 779 780 for (i = 0; i < adev->vcn.num_vcn_inst; ++i) { 781 if (adev->pm.dpm_enabled) 782 amdgpu_dpm_enable_vcn(adev, true, i); 783 } 784 785 for (i = 0; i < adev->vcn.num_vcn_inst; ++i) { 786 if (adev->vcn.harvest_config & (1 << i)) 787 continue; 788 789 fw_shared = adev->vcn.inst[i].fw_shared.cpu_addr; 790 791 if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) { 792 r = vcn_v5_0_0_start_dpg_mode(adev, i, adev->vcn.indirect_sram); 793 continue; 794 } 795 796 /* disable VCN power gating */ 797 vcn_v5_0_0_disable_static_power_gating(adev, i); 798 799 /* set VCN status busy */ 800 tmp = RREG32_SOC15(VCN, i, regUVD_STATUS) | UVD_STATUS__UVD_BUSY; 801 WREG32_SOC15(VCN, i, regUVD_STATUS, tmp); 802 803 /* enable VCPU clock */ 804 WREG32_P(SOC15_REG_OFFSET(VCN, i, regUVD_VCPU_CNTL), 805 UVD_VCPU_CNTL__CLK_EN_MASK, ~UVD_VCPU_CNTL__CLK_EN_MASK); 806 807 /* disable master interrupt */ 808 WREG32_P(SOC15_REG_OFFSET(VCN, i, regUVD_MASTINT_EN), 0, 809 ~UVD_MASTINT_EN__VCPU_EN_MASK); 810 811 /* enable LMI MC and UMC channels */ 812 WREG32_P(SOC15_REG_OFFSET(VCN, i, regUVD_LMI_CTRL2), 0, 813 ~UVD_LMI_CTRL2__STALL_ARB_UMC_MASK); 814 815 tmp = RREG32_SOC15(VCN, i, regUVD_SOFT_RESET); 816 tmp &= ~UVD_SOFT_RESET__LMI_SOFT_RESET_MASK; 817 tmp &= ~UVD_SOFT_RESET__LMI_UMC_SOFT_RESET_MASK; 818 WREG32_SOC15(VCN, i, regUVD_SOFT_RESET, tmp); 819 820 /* setup regUVD_LMI_CTRL */ 821 tmp = RREG32_SOC15(VCN, i, regUVD_LMI_CTRL); 822 WREG32_SOC15(VCN, i, regUVD_LMI_CTRL, tmp | 823 UVD_LMI_CTRL__WRITE_CLEAN_TIMER_EN_MASK | 824 UVD_LMI_CTRL__MASK_MC_URGENT_MASK | 825 UVD_LMI_CTRL__DATA_COHERENCY_EN_MASK | 826 UVD_LMI_CTRL__VCPU_DATA_COHERENCY_EN_MASK); 827 828 vcn_v5_0_0_mc_resume(adev, i); 829 830 /* VCN global tiling registers */ 831 WREG32_SOC15(VCN, i, regUVD_GFX10_ADDR_CONFIG, 832 adev->gfx.config.gb_addr_config); 833 834 /* unblock VCPU register access */ 835 WREG32_P(SOC15_REG_OFFSET(VCN, i, regUVD_RB_ARB_CTRL), 0, 836 ~UVD_RB_ARB_CTRL__VCPU_DIS_MASK); 837 838 /* release VCPU reset to boot */ 839 WREG32_P(SOC15_REG_OFFSET(VCN, i, regUVD_VCPU_CNTL), 0, 840 ~UVD_VCPU_CNTL__BLK_RST_MASK); 841 842 for (j = 0; j < 10; ++j) { 843 uint32_t status; 844 845 for (k = 0; k < 100; ++k) { 846 status = RREG32_SOC15(VCN, i, regUVD_STATUS); 847 if (status & 2) 848 break; 849 mdelay(10); 850 if (amdgpu_emu_mode == 1) 851 msleep(1); 852 } 853 854 if (amdgpu_emu_mode == 1) { 855 r = -1; 856 if (status & 2) { 857 r = 0; 858 break; 859 } 860 } else { 861 r = 0; 862 if (status & 2) 863 break; 864 865 dev_err(adev->dev, 866 "VCN[%d] is not responding, trying to reset the VCPU!!!\n", i); 867 WREG32_P(SOC15_REG_OFFSET(VCN, i, regUVD_VCPU_CNTL), 868 UVD_VCPU_CNTL__BLK_RST_MASK, 869 ~UVD_VCPU_CNTL__BLK_RST_MASK); 870 mdelay(10); 871 WREG32_P(SOC15_REG_OFFSET(VCN, i, regUVD_VCPU_CNTL), 0, 872 ~UVD_VCPU_CNTL__BLK_RST_MASK); 873 874 mdelay(10); 875 r = -1; 876 } 877 } 878 879 if (r) { 880 dev_err(adev->dev, "VCN[%d] is not responding, giving up!!!\n", i); 881 return r; 882 } 883 884 /* enable master interrupt */ 885 WREG32_P(SOC15_REG_OFFSET(VCN, i, regUVD_MASTINT_EN), 886 UVD_MASTINT_EN__VCPU_EN_MASK, 887 ~UVD_MASTINT_EN__VCPU_EN_MASK); 888 889 /* clear the busy bit of VCN_STATUS */ 890 WREG32_P(SOC15_REG_OFFSET(VCN, i, regUVD_STATUS), 0, 891 ~(2 << UVD_STATUS__VCPU_REPORT__SHIFT)); 892 893 ring = &adev->vcn.inst[i].ring_enc[0]; 894 WREG32_SOC15(VCN, i, regVCN_RB1_DB_CTRL, 895 ring->doorbell_index << VCN_RB1_DB_CTRL__OFFSET__SHIFT | 896 VCN_RB1_DB_CTRL__EN_MASK); 897 898 WREG32_SOC15(VCN, i, regUVD_RB_BASE_LO, ring->gpu_addr); 899 WREG32_SOC15(VCN, i, regUVD_RB_BASE_HI, upper_32_bits(ring->gpu_addr)); 900 WREG32_SOC15(VCN, i, regUVD_RB_SIZE, ring->ring_size / 4); 901 902 tmp = RREG32_SOC15(VCN, i, regVCN_RB_ENABLE); 903 tmp &= ~(VCN_RB_ENABLE__RB1_EN_MASK); 904 WREG32_SOC15(VCN, i, regVCN_RB_ENABLE, tmp); 905 fw_shared->sq.queue_mode |= FW_QUEUE_RING_RESET; 906 WREG32_SOC15(VCN, i, regUVD_RB_RPTR, 0); 907 WREG32_SOC15(VCN, i, regUVD_RB_WPTR, 0); 908 909 tmp = RREG32_SOC15(VCN, i, regUVD_RB_RPTR); 910 WREG32_SOC15(VCN, i, regUVD_RB_WPTR, tmp); 911 ring->wptr = RREG32_SOC15(VCN, i, regUVD_RB_WPTR); 912 913 tmp = RREG32_SOC15(VCN, i, regVCN_RB_ENABLE); 914 tmp |= VCN_RB_ENABLE__RB1_EN_MASK; 915 WREG32_SOC15(VCN, i, regVCN_RB_ENABLE, tmp); 916 fw_shared->sq.queue_mode &= ~(FW_QUEUE_RING_RESET | FW_QUEUE_DPG_HOLD_OFF); 917 } 918 919 return 0; 920 } 921 922 /** 923 * vcn_v5_0_0_stop_dpg_mode - VCN stop with dpg mode 924 * 925 * @adev: amdgpu_device pointer 926 * @inst_idx: instance number index 927 * 928 * Stop VCN block with dpg mode 929 */ 930 static void vcn_v5_0_0_stop_dpg_mode(struct amdgpu_device *adev, int inst_idx) 931 { 932 struct dpg_pause_state state = {.fw_based = VCN_DPG_STATE__UNPAUSE}; 933 uint32_t tmp; 934 935 vcn_v5_0_0_pause_dpg_mode(adev, inst_idx, &state); 936 937 /* Wait for power status to be 1 */ 938 SOC15_WAIT_ON_RREG(VCN, inst_idx, regUVD_POWER_STATUS, 1, 939 UVD_POWER_STATUS__UVD_POWER_STATUS_MASK); 940 941 /* wait for read ptr to be equal to write ptr */ 942 tmp = RREG32_SOC15(VCN, inst_idx, regUVD_RB_WPTR); 943 SOC15_WAIT_ON_RREG(VCN, inst_idx, regUVD_RB_RPTR, tmp, 0xFFFFFFFF); 944 945 /* disable dynamic power gating mode */ 946 WREG32_P(SOC15_REG_OFFSET(VCN, inst_idx, regUVD_POWER_STATUS), 0, 947 ~UVD_POWER_STATUS__UVD_PG_MODE_MASK); 948 949 return; 950 } 951 952 /** 953 * vcn_v5_0_0_stop - VCN stop 954 * 955 * @adev: amdgpu_device pointer 956 * 957 * Stop VCN block 958 */ 959 static int vcn_v5_0_0_stop(struct amdgpu_device *adev) 960 { 961 volatile struct amdgpu_vcn5_fw_shared *fw_shared; 962 uint32_t tmp; 963 int i, r = 0; 964 965 for (i = 0; i < adev->vcn.num_vcn_inst; ++i) { 966 if (adev->vcn.harvest_config & (1 << i)) 967 continue; 968 969 fw_shared = adev->vcn.inst[i].fw_shared.cpu_addr; 970 fw_shared->sq.queue_mode |= FW_QUEUE_DPG_HOLD_OFF; 971 972 if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) { 973 vcn_v5_0_0_stop_dpg_mode(adev, i); 974 continue; 975 } 976 977 /* wait for vcn idle */ 978 r = SOC15_WAIT_ON_RREG(VCN, i, regUVD_STATUS, UVD_STATUS__IDLE, 0x7); 979 if (r) 980 return r; 981 982 tmp = UVD_LMI_STATUS__VCPU_LMI_WRITE_CLEAN_MASK | 983 UVD_LMI_STATUS__READ_CLEAN_MASK | 984 UVD_LMI_STATUS__WRITE_CLEAN_MASK | 985 UVD_LMI_STATUS__WRITE_CLEAN_RAW_MASK; 986 r = SOC15_WAIT_ON_RREG(VCN, i, regUVD_LMI_STATUS, tmp, tmp); 987 if (r) 988 return r; 989 990 /* disable LMI UMC channel */ 991 tmp = RREG32_SOC15(VCN, i, regUVD_LMI_CTRL2); 992 tmp |= UVD_LMI_CTRL2__STALL_ARB_UMC_MASK; 993 WREG32_SOC15(VCN, i, regUVD_LMI_CTRL2, tmp); 994 tmp = UVD_LMI_STATUS__UMC_READ_CLEAN_RAW_MASK | 995 UVD_LMI_STATUS__UMC_WRITE_CLEAN_RAW_MASK; 996 r = SOC15_WAIT_ON_RREG(VCN, i, regUVD_LMI_STATUS, tmp, tmp); 997 if (r) 998 return r; 999 1000 /* block VCPU register access */ 1001 WREG32_P(SOC15_REG_OFFSET(VCN, i, regUVD_RB_ARB_CTRL), 1002 UVD_RB_ARB_CTRL__VCPU_DIS_MASK, 1003 ~UVD_RB_ARB_CTRL__VCPU_DIS_MASK); 1004 1005 /* reset VCPU */ 1006 WREG32_P(SOC15_REG_OFFSET(VCN, i, regUVD_VCPU_CNTL), 1007 UVD_VCPU_CNTL__BLK_RST_MASK, 1008 ~UVD_VCPU_CNTL__BLK_RST_MASK); 1009 1010 /* disable VCPU clock */ 1011 WREG32_P(SOC15_REG_OFFSET(VCN, i, regUVD_VCPU_CNTL), 0, 1012 ~(UVD_VCPU_CNTL__CLK_EN_MASK)); 1013 1014 /* apply soft reset */ 1015 tmp = RREG32_SOC15(VCN, i, regUVD_SOFT_RESET); 1016 tmp |= UVD_SOFT_RESET__LMI_UMC_SOFT_RESET_MASK; 1017 WREG32_SOC15(VCN, i, regUVD_SOFT_RESET, tmp); 1018 tmp = RREG32_SOC15(VCN, i, regUVD_SOFT_RESET); 1019 tmp |= UVD_SOFT_RESET__LMI_SOFT_RESET_MASK; 1020 WREG32_SOC15(VCN, i, regUVD_SOFT_RESET, tmp); 1021 1022 /* clear status */ 1023 WREG32_SOC15(VCN, i, regUVD_STATUS, 0); 1024 1025 /* enable VCN power gating */ 1026 vcn_v5_0_0_enable_static_power_gating(adev, i); 1027 } 1028 1029 for (i = 0; i < adev->vcn.num_vcn_inst; ++i) { 1030 if (adev->pm.dpm_enabled) 1031 amdgpu_dpm_enable_vcn(adev, false, i); 1032 } 1033 1034 return 0; 1035 } 1036 1037 /** 1038 * vcn_v5_0_0_pause_dpg_mode - VCN pause with dpg mode 1039 * 1040 * @adev: amdgpu_device pointer 1041 * @inst_idx: instance number index 1042 * @new_state: pause state 1043 * 1044 * Pause dpg mode for VCN block 1045 */ 1046 static int vcn_v5_0_0_pause_dpg_mode(struct amdgpu_device *adev, int inst_idx, 1047 struct dpg_pause_state *new_state) 1048 { 1049 uint32_t reg_data = 0; 1050 int ret_code; 1051 1052 /* pause/unpause if state is changed */ 1053 if (adev->vcn.inst[inst_idx].pause_state.fw_based != new_state->fw_based) { 1054 DRM_DEV_DEBUG(adev->dev, "dpg pause state changed %d -> %d", 1055 adev->vcn.inst[inst_idx].pause_state.fw_based, new_state->fw_based); 1056 reg_data = RREG32_SOC15(VCN, inst_idx, regUVD_DPG_PAUSE) & 1057 (~UVD_DPG_PAUSE__NJ_PAUSE_DPG_ACK_MASK); 1058 1059 if (new_state->fw_based == VCN_DPG_STATE__PAUSE) { 1060 ret_code = SOC15_WAIT_ON_RREG(VCN, inst_idx, regUVD_POWER_STATUS, 0x1, 1061 UVD_POWER_STATUS__UVD_POWER_STATUS_MASK); 1062 1063 if (!ret_code) { 1064 /* pause DPG */ 1065 reg_data |= UVD_DPG_PAUSE__NJ_PAUSE_DPG_REQ_MASK; 1066 WREG32_SOC15(VCN, inst_idx, regUVD_DPG_PAUSE, reg_data); 1067 1068 /* wait for ACK */ 1069 SOC15_WAIT_ON_RREG(VCN, inst_idx, regUVD_DPG_PAUSE, 1070 UVD_DPG_PAUSE__NJ_PAUSE_DPG_ACK_MASK, 1071 UVD_DPG_PAUSE__NJ_PAUSE_DPG_ACK_MASK); 1072 } 1073 } else { 1074 /* unpause dpg, no need to wait */ 1075 reg_data &= ~UVD_DPG_PAUSE__NJ_PAUSE_DPG_REQ_MASK; 1076 WREG32_SOC15(VCN, inst_idx, regUVD_DPG_PAUSE, reg_data); 1077 } 1078 adev->vcn.inst[inst_idx].pause_state.fw_based = new_state->fw_based; 1079 } 1080 1081 return 0; 1082 } 1083 1084 /** 1085 * vcn_v5_0_0_unified_ring_get_rptr - get unified read pointer 1086 * 1087 * @ring: amdgpu_ring pointer 1088 * 1089 * Returns the current hardware unified read pointer 1090 */ 1091 static uint64_t vcn_v5_0_0_unified_ring_get_rptr(struct amdgpu_ring *ring) 1092 { 1093 struct amdgpu_device *adev = ring->adev; 1094 1095 if (ring != &adev->vcn.inst[ring->me].ring_enc[0]) 1096 DRM_ERROR("wrong ring id is identified in %s", __func__); 1097 1098 return RREG32_SOC15(VCN, ring->me, regUVD_RB_RPTR); 1099 } 1100 1101 /** 1102 * vcn_v5_0_0_unified_ring_get_wptr - get unified write pointer 1103 * 1104 * @ring: amdgpu_ring pointer 1105 * 1106 * Returns the current hardware unified write pointer 1107 */ 1108 static uint64_t vcn_v5_0_0_unified_ring_get_wptr(struct amdgpu_ring *ring) 1109 { 1110 struct amdgpu_device *adev = ring->adev; 1111 1112 if (ring != &adev->vcn.inst[ring->me].ring_enc[0]) 1113 DRM_ERROR("wrong ring id is identified in %s", __func__); 1114 1115 if (ring->use_doorbell) 1116 return *ring->wptr_cpu_addr; 1117 else 1118 return RREG32_SOC15(VCN, ring->me, regUVD_RB_WPTR); 1119 } 1120 1121 /** 1122 * vcn_v5_0_0_unified_ring_set_wptr - set enc write pointer 1123 * 1124 * @ring: amdgpu_ring pointer 1125 * 1126 * Commits the enc write pointer to the hardware 1127 */ 1128 static void vcn_v5_0_0_unified_ring_set_wptr(struct amdgpu_ring *ring) 1129 { 1130 struct amdgpu_device *adev = ring->adev; 1131 1132 if (ring != &adev->vcn.inst[ring->me].ring_enc[0]) 1133 DRM_ERROR("wrong ring id is identified in %s", __func__); 1134 1135 if (ring->use_doorbell) { 1136 *ring->wptr_cpu_addr = lower_32_bits(ring->wptr); 1137 WDOORBELL32(ring->doorbell_index, lower_32_bits(ring->wptr)); 1138 } else { 1139 WREG32_SOC15(VCN, ring->me, regUVD_RB_WPTR, lower_32_bits(ring->wptr)); 1140 } 1141 } 1142 1143 static const struct amdgpu_ring_funcs vcn_v5_0_0_unified_ring_vm_funcs = { 1144 .type = AMDGPU_RING_TYPE_VCN_ENC, 1145 .align_mask = 0x3f, 1146 .nop = VCN_ENC_CMD_NO_OP, 1147 .get_rptr = vcn_v5_0_0_unified_ring_get_rptr, 1148 .get_wptr = vcn_v5_0_0_unified_ring_get_wptr, 1149 .set_wptr = vcn_v5_0_0_unified_ring_set_wptr, 1150 .emit_frame_size = 1151 SOC15_FLUSH_GPU_TLB_NUM_WREG * 3 + 1152 SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 4 + 1153 4 + /* vcn_v2_0_enc_ring_emit_vm_flush */ 1154 5 + 5 + /* vcn_v2_0_enc_ring_emit_fence x2 vm fence */ 1155 1, /* vcn_v2_0_enc_ring_insert_end */ 1156 .emit_ib_size = 5, /* vcn_v2_0_enc_ring_emit_ib */ 1157 .emit_ib = vcn_v2_0_enc_ring_emit_ib, 1158 .emit_fence = vcn_v2_0_enc_ring_emit_fence, 1159 .emit_vm_flush = vcn_v2_0_enc_ring_emit_vm_flush, 1160 .test_ring = amdgpu_vcn_enc_ring_test_ring, 1161 .test_ib = amdgpu_vcn_unified_ring_test_ib, 1162 .insert_nop = amdgpu_ring_insert_nop, 1163 .insert_end = vcn_v2_0_enc_ring_insert_end, 1164 .pad_ib = amdgpu_ring_generic_pad_ib, 1165 .begin_use = amdgpu_vcn_ring_begin_use, 1166 .end_use = amdgpu_vcn_ring_end_use, 1167 .emit_wreg = vcn_v2_0_enc_ring_emit_wreg, 1168 .emit_reg_wait = vcn_v2_0_enc_ring_emit_reg_wait, 1169 .emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper, 1170 }; 1171 1172 /** 1173 * vcn_v5_0_0_set_unified_ring_funcs - set unified ring functions 1174 * 1175 * @adev: amdgpu_device pointer 1176 * 1177 * Set unified ring functions 1178 */ 1179 static void vcn_v5_0_0_set_unified_ring_funcs(struct amdgpu_device *adev) 1180 { 1181 int i; 1182 1183 for (i = 0; i < adev->vcn.num_vcn_inst; ++i) { 1184 if (adev->vcn.harvest_config & (1 << i)) 1185 continue; 1186 1187 adev->vcn.inst[i].ring_enc[0].funcs = &vcn_v5_0_0_unified_ring_vm_funcs; 1188 adev->vcn.inst[i].ring_enc[0].me = i; 1189 } 1190 } 1191 1192 /** 1193 * vcn_v5_0_0_is_idle - check VCN block is idle 1194 * 1195 * @handle: amdgpu_device pointer 1196 * 1197 * Check whether VCN block is idle 1198 */ 1199 static bool vcn_v5_0_0_is_idle(void *handle) 1200 { 1201 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1202 int i, ret = 1; 1203 1204 for (i = 0; i < adev->vcn.num_vcn_inst; ++i) { 1205 if (adev->vcn.harvest_config & (1 << i)) 1206 continue; 1207 1208 ret &= (RREG32_SOC15(VCN, i, regUVD_STATUS) == UVD_STATUS__IDLE); 1209 } 1210 1211 return ret; 1212 } 1213 1214 /** 1215 * vcn_v5_0_0_wait_for_idle - wait for VCN block idle 1216 * 1217 * @ip_block: Pointer to the amdgpu_ip_block for this hw instance. 1218 * 1219 * Wait for VCN block idle 1220 */ 1221 static int vcn_v5_0_0_wait_for_idle(struct amdgpu_ip_block *ip_block) 1222 { 1223 struct amdgpu_device *adev = ip_block->adev; 1224 int i, ret = 0; 1225 1226 for (i = 0; i < adev->vcn.num_vcn_inst; ++i) { 1227 if (adev->vcn.harvest_config & (1 << i)) 1228 continue; 1229 1230 ret = SOC15_WAIT_ON_RREG(VCN, i, regUVD_STATUS, UVD_STATUS__IDLE, 1231 UVD_STATUS__IDLE); 1232 if (ret) 1233 return ret; 1234 } 1235 1236 return ret; 1237 } 1238 1239 /** 1240 * vcn_v5_0_0_set_clockgating_state - set VCN block clockgating state 1241 * 1242 * @ip_block: amdgpu_ip_block pointer 1243 * @state: clock gating state 1244 * 1245 * Set VCN block clockgating state 1246 */ 1247 static int vcn_v5_0_0_set_clockgating_state(struct amdgpu_ip_block *ip_block, 1248 enum amd_clockgating_state state) 1249 { 1250 struct amdgpu_device *adev = ip_block->adev; 1251 bool enable = (state == AMD_CG_STATE_GATE) ? true : false; 1252 int i; 1253 1254 for (i = 0; i < adev->vcn.num_vcn_inst; ++i) { 1255 if (adev->vcn.harvest_config & (1 << i)) 1256 continue; 1257 1258 if (enable) { 1259 if (RREG32_SOC15(VCN, i, regUVD_STATUS) != UVD_STATUS__IDLE) 1260 return -EBUSY; 1261 vcn_v5_0_0_enable_clock_gating(adev, i); 1262 } else { 1263 vcn_v5_0_0_disable_clock_gating(adev, i); 1264 } 1265 } 1266 1267 return 0; 1268 } 1269 1270 /** 1271 * vcn_v5_0_0_set_powergating_state - set VCN block powergating state 1272 * 1273 * @ip_block: amdgpu_ip_block pointer 1274 * @state: power gating state 1275 * 1276 * Set VCN block powergating state 1277 */ 1278 static int vcn_v5_0_0_set_powergating_state(struct amdgpu_ip_block *ip_block, 1279 enum amd_powergating_state state) 1280 { 1281 struct amdgpu_device *adev = ip_block->adev; 1282 int ret; 1283 1284 if (state == adev->vcn.cur_state) 1285 return 0; 1286 1287 if (state == AMD_PG_STATE_GATE) 1288 ret = vcn_v5_0_0_stop(adev); 1289 else 1290 ret = vcn_v5_0_0_start(adev); 1291 1292 if (!ret) 1293 adev->vcn.cur_state = state; 1294 1295 return ret; 1296 } 1297 1298 /** 1299 * vcn_v5_0_0_process_interrupt - process VCN block interrupt 1300 * 1301 * @adev: amdgpu_device pointer 1302 * @source: interrupt sources 1303 * @entry: interrupt entry from clients and sources 1304 * 1305 * Process VCN block interrupt 1306 */ 1307 static int vcn_v5_0_0_process_interrupt(struct amdgpu_device *adev, struct amdgpu_irq_src *source, 1308 struct amdgpu_iv_entry *entry) 1309 { 1310 uint32_t ip_instance; 1311 1312 switch (entry->client_id) { 1313 case SOC15_IH_CLIENTID_VCN: 1314 ip_instance = 0; 1315 break; 1316 case SOC15_IH_CLIENTID_VCN1: 1317 ip_instance = 1; 1318 break; 1319 default: 1320 DRM_ERROR("Unhandled client id: %d\n", entry->client_id); 1321 return 0; 1322 } 1323 1324 DRM_DEBUG("IH: VCN TRAP\n"); 1325 1326 switch (entry->src_id) { 1327 case VCN_5_0__SRCID__UVD_ENC_GENERAL_PURPOSE: 1328 amdgpu_fence_process(&adev->vcn.inst[ip_instance].ring_enc[0]); 1329 break; 1330 case VCN_5_0__SRCID_UVD_POISON: 1331 amdgpu_vcn_process_poison_irq(adev, source, entry); 1332 break; 1333 default: 1334 DRM_ERROR("Unhandled interrupt: %d %d\n", 1335 entry->src_id, entry->src_data[0]); 1336 break; 1337 } 1338 1339 return 0; 1340 } 1341 1342 static const struct amdgpu_irq_src_funcs vcn_v5_0_0_irq_funcs = { 1343 .process = vcn_v5_0_0_process_interrupt, 1344 }; 1345 1346 /** 1347 * vcn_v5_0_0_set_irq_funcs - set VCN block interrupt irq functions 1348 * 1349 * @adev: amdgpu_device pointer 1350 * 1351 * Set VCN block interrupt irq functions 1352 */ 1353 static void vcn_v5_0_0_set_irq_funcs(struct amdgpu_device *adev) 1354 { 1355 int i; 1356 1357 for (i = 0; i < adev->vcn.num_vcn_inst; ++i) { 1358 if (adev->vcn.harvest_config & (1 << i)) 1359 continue; 1360 1361 adev->vcn.inst[i].irq.num_types = adev->vcn.num_enc_rings + 1; 1362 adev->vcn.inst[i].irq.funcs = &vcn_v5_0_0_irq_funcs; 1363 } 1364 } 1365 1366 void vcn_v5_0_0_print_ip_state(struct amdgpu_ip_block *ip_block, 1367 struct drm_printer *p) 1368 { 1369 struct amdgpu_device *adev = ip_block->adev; 1370 int i, j; 1371 uint32_t reg_count = ARRAY_SIZE(vcn_reg_list_5_0); 1372 uint32_t inst_off, is_powered; 1373 1374 if (!adev->vcn.ip_dump) 1375 return; 1376 1377 drm_printf(p, "num_instances:%d\n", adev->vcn.num_vcn_inst); 1378 for (i = 0; i < adev->vcn.num_vcn_inst; i++) { 1379 if (adev->vcn.harvest_config & (1 << i)) { 1380 drm_printf(p, "\nHarvested Instance:VCN%d Skipping dump\n", i); 1381 continue; 1382 } 1383 1384 inst_off = i * reg_count; 1385 is_powered = (adev->vcn.ip_dump[inst_off] & 1386 UVD_POWER_STATUS__UVD_POWER_STATUS_MASK) != 1; 1387 1388 if (is_powered) { 1389 drm_printf(p, "\nActive Instance:VCN%d\n", i); 1390 for (j = 0; j < reg_count; j++) 1391 drm_printf(p, "%-50s \t 0x%08x\n", vcn_reg_list_5_0[j].reg_name, 1392 adev->vcn.ip_dump[inst_off + j]); 1393 } else { 1394 drm_printf(p, "\nInactive Instance:VCN%d\n", i); 1395 } 1396 } 1397 } 1398 1399 void vcn_v5_0_0_dump_ip_state(struct amdgpu_ip_block *ip_block) 1400 { 1401 struct amdgpu_device *adev = ip_block->adev; 1402 int i, j; 1403 bool is_powered; 1404 uint32_t inst_off; 1405 uint32_t reg_count = ARRAY_SIZE(vcn_reg_list_5_0); 1406 1407 if (!adev->vcn.ip_dump) 1408 return; 1409 1410 for (i = 0; i < adev->vcn.num_vcn_inst; i++) { 1411 if (adev->vcn.harvest_config & (1 << i)) 1412 continue; 1413 1414 inst_off = i * reg_count; 1415 /* mmUVD_POWER_STATUS is always readable and is first element of the array */ 1416 adev->vcn.ip_dump[inst_off] = RREG32_SOC15(VCN, i, regUVD_POWER_STATUS); 1417 is_powered = (adev->vcn.ip_dump[inst_off] & 1418 UVD_POWER_STATUS__UVD_POWER_STATUS_MASK) != 1; 1419 1420 if (is_powered) 1421 for (j = 1; j < reg_count; j++) 1422 adev->vcn.ip_dump[inst_off + j] = 1423 RREG32(SOC15_REG_ENTRY_OFFSET_INST(vcn_reg_list_5_0[j], i)); 1424 } 1425 } 1426 1427 static const struct amd_ip_funcs vcn_v5_0_0_ip_funcs = { 1428 .name = "vcn_v5_0_0", 1429 .early_init = vcn_v5_0_0_early_init, 1430 .sw_init = vcn_v5_0_0_sw_init, 1431 .sw_fini = vcn_v5_0_0_sw_fini, 1432 .hw_init = vcn_v5_0_0_hw_init, 1433 .hw_fini = vcn_v5_0_0_hw_fini, 1434 .suspend = vcn_v5_0_0_suspend, 1435 .resume = vcn_v5_0_0_resume, 1436 .is_idle = vcn_v5_0_0_is_idle, 1437 .wait_for_idle = vcn_v5_0_0_wait_for_idle, 1438 .set_clockgating_state = vcn_v5_0_0_set_clockgating_state, 1439 .set_powergating_state = vcn_v5_0_0_set_powergating_state, 1440 .dump_ip_state = vcn_v5_0_0_dump_ip_state, 1441 .print_ip_state = vcn_v5_0_0_print_ip_state, 1442 }; 1443 1444 const struct amdgpu_ip_block_version vcn_v5_0_0_ip_block = { 1445 .type = AMD_IP_BLOCK_TYPE_VCN, 1446 .major = 5, 1447 .minor = 0, 1448 .rev = 0, 1449 .funcs = &vcn_v5_0_0_ip_funcs, 1450 }; 1451