1 /* 2 * Copyright 2021 Advanced Micro Devices, Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 */ 23 24 #include <linux/firmware.h> 25 #include "amdgpu.h" 26 #include "amdgpu_vcn.h" 27 #include "amdgpu_pm.h" 28 #include "amdgpu_cs.h" 29 #include "soc15.h" 30 #include "soc15d.h" 31 #include "soc15_hw_ip.h" 32 #include "vcn_v2_0.h" 33 #include "mmsch_v4_0.h" 34 #include "vcn_v4_0.h" 35 36 #include "vcn/vcn_4_0_0_offset.h" 37 #include "vcn/vcn_4_0_0_sh_mask.h" 38 #include "ivsrcid/vcn/irqsrcs_vcn_4_0.h" 39 40 #include <drm/drm_drv.h> 41 42 #define mmUVD_DPG_LMA_CTL regUVD_DPG_LMA_CTL 43 #define mmUVD_DPG_LMA_CTL_BASE_IDX regUVD_DPG_LMA_CTL_BASE_IDX 44 #define mmUVD_DPG_LMA_DATA regUVD_DPG_LMA_DATA 45 #define mmUVD_DPG_LMA_DATA_BASE_IDX regUVD_DPG_LMA_DATA_BASE_IDX 46 47 #define VCN_VID_SOC_ADDRESS_2_0 0x1fb00 48 #define VCN1_VID_SOC_ADDRESS_3_0 0x48300 49 #define VCN1_AON_SOC_ADDRESS_3_0 0x48000 50 51 #define VCN_HARVEST_MMSCH 0 52 53 #define RDECODE_MSG_CREATE 0x00000000 54 #define RDECODE_MESSAGE_CREATE 0x00000001 55 56 static const struct amdgpu_hwip_reg_entry vcn_reg_list_4_0[] = { 57 SOC15_REG_ENTRY_STR(VCN, 0, regUVD_POWER_STATUS), 58 SOC15_REG_ENTRY_STR(VCN, 0, regUVD_STATUS), 59 SOC15_REG_ENTRY_STR(VCN, 0, regUVD_CONTEXT_ID), 60 SOC15_REG_ENTRY_STR(VCN, 0, regUVD_CONTEXT_ID2), 61 SOC15_REG_ENTRY_STR(VCN, 0, regUVD_GPCOM_VCPU_DATA0), 62 SOC15_REG_ENTRY_STR(VCN, 0, regUVD_GPCOM_VCPU_DATA1), 63 SOC15_REG_ENTRY_STR(VCN, 0, regUVD_GPCOM_VCPU_CMD), 64 SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_BASE_HI), 65 SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_BASE_LO), 66 SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_BASE_HI2), 67 SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_BASE_LO2), 68 SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_BASE_HI3), 69 SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_BASE_LO3), 70 SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_BASE_HI4), 71 SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_BASE_LO4), 72 SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_RPTR), 73 SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_WPTR), 74 SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_RPTR2), 75 SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_WPTR2), 76 SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_RPTR3), 77 SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_WPTR3), 78 SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_RPTR4), 79 SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_WPTR4), 80 SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_SIZE), 81 SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_SIZE2), 82 SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_SIZE3), 83 SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_SIZE4), 84 SOC15_REG_ENTRY_STR(VCN, 0, regUVD_PGFSM_CONFIG), 85 SOC15_REG_ENTRY_STR(VCN, 0, regUVD_PGFSM_STATUS), 86 SOC15_REG_ENTRY_STR(VCN, 0, regUVD_DPG_LMA_CTL), 87 SOC15_REG_ENTRY_STR(VCN, 0, regUVD_DPG_LMA_DATA), 88 SOC15_REG_ENTRY_STR(VCN, 0, regUVD_DPG_LMA_MASK), 89 SOC15_REG_ENTRY_STR(VCN, 0, regUVD_DPG_PAUSE) 90 }; 91 92 static int amdgpu_ih_clientid_vcns[] = { 93 SOC15_IH_CLIENTID_VCN, 94 SOC15_IH_CLIENTID_VCN1 95 }; 96 97 static int vcn_v4_0_start_sriov(struct amdgpu_device *adev); 98 static void vcn_v4_0_set_unified_ring_funcs(struct amdgpu_device *adev); 99 static void vcn_v4_0_set_irq_funcs(struct amdgpu_device *adev); 100 static int vcn_v4_0_set_pg_state(struct amdgpu_vcn_inst *vinst, 101 enum amd_powergating_state state); 102 static int vcn_v4_0_pause_dpg_mode(struct amdgpu_vcn_inst *vinst, 103 struct dpg_pause_state *new_state); 104 static void vcn_v4_0_unified_ring_set_wptr(struct amdgpu_ring *ring); 105 static void vcn_v4_0_set_ras_funcs(struct amdgpu_device *adev); 106 107 /** 108 * vcn_v4_0_early_init - set function pointers and load microcode 109 * 110 * @ip_block: Pointer to the amdgpu_ip_block for this hw instance. 111 * 112 * Set ring and irq function pointers 113 * Load microcode from filesystem 114 */ 115 static int vcn_v4_0_early_init(struct amdgpu_ip_block *ip_block) 116 { 117 struct amdgpu_device *adev = ip_block->adev; 118 int i, r; 119 120 if (amdgpu_sriov_vf(adev)) { 121 adev->vcn.harvest_config = VCN_HARVEST_MMSCH; 122 for (i = 0; i < adev->vcn.num_vcn_inst; ++i) { 123 if (amdgpu_vcn_is_disabled_vcn(adev, VCN_ENCODE_RING, i)) { 124 adev->vcn.harvest_config |= 1 << i; 125 dev_info(adev->dev, "VCN%d is disabled by hypervisor\n", i); 126 } 127 } 128 } 129 130 for (i = 0; i < adev->vcn.num_vcn_inst; ++i) 131 /* re-use enc ring as unified ring */ 132 adev->vcn.inst[i].num_enc_rings = 1; 133 134 vcn_v4_0_set_unified_ring_funcs(adev); 135 vcn_v4_0_set_irq_funcs(adev); 136 vcn_v4_0_set_ras_funcs(adev); 137 138 for (i = 0; i < adev->vcn.num_vcn_inst; ++i) { 139 adev->vcn.inst[i].set_pg_state = vcn_v4_0_set_pg_state; 140 141 r = amdgpu_vcn_early_init(adev, i); 142 if (r) 143 return r; 144 } 145 146 return 0; 147 } 148 149 static int vcn_v4_0_fw_shared_init(struct amdgpu_device *adev, int inst_idx) 150 { 151 volatile struct amdgpu_vcn4_fw_shared *fw_shared; 152 153 fw_shared = adev->vcn.inst[inst_idx].fw_shared.cpu_addr; 154 fw_shared->present_flag_0 = cpu_to_le32(AMDGPU_FW_SHARED_FLAG_0_UNIFIED_QUEUE); 155 fw_shared->sq.is_enabled = 1; 156 157 fw_shared->present_flag_0 |= cpu_to_le32(AMDGPU_VCN_SMU_DPM_INTERFACE_FLAG); 158 fw_shared->smu_dpm_interface.smu_interface_type = (adev->flags & AMD_IS_APU) ? 159 AMDGPU_VCN_SMU_DPM_INTERFACE_APU : AMDGPU_VCN_SMU_DPM_INTERFACE_DGPU; 160 161 if (amdgpu_ip_version(adev, VCN_HWIP, 0) == 162 IP_VERSION(4, 0, 2)) { 163 fw_shared->present_flag_0 |= AMDGPU_FW_SHARED_FLAG_0_DRM_KEY_INJECT; 164 fw_shared->drm_key_wa.method = 165 AMDGPU_DRM_KEY_INJECT_WORKAROUND_VCNFW_ASD_HANDSHAKING; 166 } 167 168 if (amdgpu_vcnfw_log) 169 amdgpu_vcn_fwlog_init(&adev->vcn.inst[inst_idx]); 170 171 return 0; 172 } 173 174 /** 175 * vcn_v4_0_sw_init - sw init for VCN block 176 * 177 * @ip_block: Pointer to the amdgpu_ip_block for this hw instance. 178 * 179 * Load firmware and sw initialization 180 */ 181 static int vcn_v4_0_sw_init(struct amdgpu_ip_block *ip_block) 182 { 183 struct amdgpu_ring *ring; 184 struct amdgpu_device *adev = ip_block->adev; 185 int i, r; 186 uint32_t reg_count = ARRAY_SIZE(vcn_reg_list_4_0); 187 uint32_t *ptr; 188 189 for (i = 0; i < adev->vcn.num_vcn_inst; i++) { 190 if (adev->vcn.harvest_config & (1 << i)) 191 continue; 192 193 r = amdgpu_vcn_sw_init(adev, i); 194 if (r) 195 return r; 196 197 amdgpu_vcn_setup_ucode(adev, i); 198 199 r = amdgpu_vcn_resume(adev, i); 200 if (r) 201 return r; 202 203 /* Init instance 0 sched_score to 1, so it's scheduled after other instances */ 204 if (i == 0) 205 atomic_set(&adev->vcn.inst[i].sched_score, 1); 206 else 207 atomic_set(&adev->vcn.inst[i].sched_score, 0); 208 209 /* VCN UNIFIED TRAP */ 210 r = amdgpu_irq_add_id(adev, amdgpu_ih_clientid_vcns[i], 211 VCN_4_0__SRCID__UVD_ENC_GENERAL_PURPOSE, &adev->vcn.inst[i].irq); 212 if (r) 213 return r; 214 215 /* VCN POISON TRAP */ 216 r = amdgpu_irq_add_id(adev, amdgpu_ih_clientid_vcns[i], 217 VCN_4_0__SRCID_UVD_POISON, &adev->vcn.inst[i].ras_poison_irq); 218 if (r) 219 return r; 220 221 ring = &adev->vcn.inst[i].ring_enc[0]; 222 ring->use_doorbell = true; 223 if (amdgpu_sriov_vf(adev)) 224 ring->doorbell_index = (adev->doorbell_index.vcn.vcn_ring0_1 << 1) + i * 225 (adev->vcn.inst[i].num_enc_rings + 1) + 1; 226 else 227 ring->doorbell_index = (adev->doorbell_index.vcn.vcn_ring0_1 << 1) + 2 + 8 * i; 228 ring->vm_hub = AMDGPU_MMHUB0(0); 229 sprintf(ring->name, "vcn_unified_%d", i); 230 231 r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.inst[i].irq, 0, 232 AMDGPU_RING_PRIO_0, &adev->vcn.inst[i].sched_score); 233 if (r) 234 return r; 235 236 vcn_v4_0_fw_shared_init(adev, i); 237 238 if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) 239 adev->vcn.inst[i].pause_dpg_mode = vcn_v4_0_pause_dpg_mode; 240 } 241 242 adev->vcn.supported_reset = 243 amdgpu_get_soft_full_reset_mask(&adev->vcn.inst[0].ring_enc[0]); 244 adev->vcn.supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE; 245 246 if (amdgpu_sriov_vf(adev)) { 247 r = amdgpu_virt_alloc_mm_table(adev); 248 if (r) 249 return r; 250 } 251 252 253 r = amdgpu_vcn_ras_sw_init(adev); 254 if (r) 255 return r; 256 257 /* Allocate memory for VCN IP Dump buffer */ 258 ptr = kcalloc(adev->vcn.num_vcn_inst * reg_count, sizeof(uint32_t), GFP_KERNEL); 259 if (!ptr) { 260 DRM_ERROR("Failed to allocate memory for VCN IP Dump\n"); 261 adev->vcn.ip_dump = NULL; 262 } else { 263 adev->vcn.ip_dump = ptr; 264 } 265 266 r = amdgpu_vcn_sysfs_reset_mask_init(adev); 267 if (r) 268 return r; 269 270 return 0; 271 } 272 273 /** 274 * vcn_v4_0_sw_fini - sw fini for VCN block 275 * 276 * @ip_block: Pointer to the amdgpu_ip_block for this hw instance. 277 * 278 * VCN suspend and free up sw allocation 279 */ 280 static int vcn_v4_0_sw_fini(struct amdgpu_ip_block *ip_block) 281 { 282 struct amdgpu_device *adev = ip_block->adev; 283 int i, r, idx; 284 285 if (drm_dev_enter(adev_to_drm(adev), &idx)) { 286 for (i = 0; i < adev->vcn.num_vcn_inst; i++) { 287 volatile struct amdgpu_vcn4_fw_shared *fw_shared; 288 289 if (adev->vcn.harvest_config & (1 << i)) 290 continue; 291 292 fw_shared = adev->vcn.inst[i].fw_shared.cpu_addr; 293 fw_shared->present_flag_0 = 0; 294 fw_shared->sq.is_enabled = 0; 295 } 296 297 drm_dev_exit(idx); 298 } 299 300 if (amdgpu_sriov_vf(adev)) 301 amdgpu_virt_free_mm_table(adev); 302 303 for (i = 0; i < adev->vcn.num_vcn_inst; i++) { 304 r = amdgpu_vcn_suspend(adev, i); 305 if (r) 306 return r; 307 } 308 309 amdgpu_vcn_sysfs_reset_mask_fini(adev); 310 311 for (i = 0; i < adev->vcn.num_vcn_inst; i++) { 312 r = amdgpu_vcn_sw_fini(adev, i); 313 if (r) 314 return r; 315 } 316 317 kfree(adev->vcn.ip_dump); 318 319 return 0; 320 } 321 322 /** 323 * vcn_v4_0_hw_init - start and test VCN block 324 * 325 * @ip_block: Pointer to the amdgpu_ip_block for this hw instance. 326 * 327 * Initialize the hardware, boot up the VCPU and do some testing 328 */ 329 static int vcn_v4_0_hw_init(struct amdgpu_ip_block *ip_block) 330 { 331 struct amdgpu_device *adev = ip_block->adev; 332 struct amdgpu_ring *ring; 333 int i, r; 334 335 if (amdgpu_sriov_vf(adev)) { 336 r = vcn_v4_0_start_sriov(adev); 337 if (r) 338 return r; 339 340 for (i = 0; i < adev->vcn.num_vcn_inst; ++i) { 341 if (adev->vcn.harvest_config & (1 << i)) 342 continue; 343 344 ring = &adev->vcn.inst[i].ring_enc[0]; 345 ring->wptr = 0; 346 ring->wptr_old = 0; 347 vcn_v4_0_unified_ring_set_wptr(ring); 348 ring->sched.ready = true; 349 } 350 } else { 351 for (i = 0; i < adev->vcn.num_vcn_inst; ++i) { 352 if (adev->vcn.harvest_config & (1 << i)) 353 continue; 354 355 ring = &adev->vcn.inst[i].ring_enc[0]; 356 357 adev->nbio.funcs->vcn_doorbell_range(adev, ring->use_doorbell, 358 ((adev->doorbell_index.vcn.vcn_ring0_1 << 1) + 8 * i), i); 359 360 r = amdgpu_ring_test_helper(ring); 361 if (r) 362 return r; 363 } 364 } 365 366 return 0; 367 } 368 369 /** 370 * vcn_v4_0_hw_fini - stop the hardware block 371 * 372 * @ip_block: Pointer to the amdgpu_ip_block for this hw instance. 373 * 374 * Stop the VCN block, mark ring as not ready any more 375 */ 376 static int vcn_v4_0_hw_fini(struct amdgpu_ip_block *ip_block) 377 { 378 struct amdgpu_device *adev = ip_block->adev; 379 int i; 380 381 for (i = 0; i < adev->vcn.num_vcn_inst; ++i) { 382 struct amdgpu_vcn_inst *vinst = &adev->vcn.inst[i]; 383 384 if (adev->vcn.harvest_config & (1 << i)) 385 continue; 386 387 cancel_delayed_work_sync(&vinst->idle_work); 388 389 if (!amdgpu_sriov_vf(adev)) { 390 if ((adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) || 391 (vinst->cur_state != AMD_PG_STATE_GATE && 392 RREG32_SOC15(VCN, i, regUVD_STATUS))) { 393 vinst->set_pg_state(vinst, AMD_PG_STATE_GATE); 394 } 395 } 396 if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__VCN)) 397 amdgpu_irq_put(adev, &vinst->ras_poison_irq, 0); 398 } 399 400 return 0; 401 } 402 403 /** 404 * vcn_v4_0_suspend - suspend VCN block 405 * 406 * @ip_block: Pointer to the amdgpu_ip_block for this hw instance. 407 * 408 * HW fini and suspend VCN block 409 */ 410 static int vcn_v4_0_suspend(struct amdgpu_ip_block *ip_block) 411 { 412 struct amdgpu_device *adev = ip_block->adev; 413 int r, i; 414 415 r = vcn_v4_0_hw_fini(ip_block); 416 if (r) 417 return r; 418 419 for (i = 0; i < adev->vcn.num_vcn_inst; i++) { 420 r = amdgpu_vcn_suspend(ip_block->adev, i); 421 if (r) 422 return r; 423 } 424 425 return 0; 426 } 427 428 /** 429 * vcn_v4_0_resume - resume VCN block 430 * 431 * @ip_block: Pointer to the amdgpu_ip_block for this hw instance. 432 * 433 * Resume firmware and hw init VCN block 434 */ 435 static int vcn_v4_0_resume(struct amdgpu_ip_block *ip_block) 436 { 437 struct amdgpu_device *adev = ip_block->adev; 438 int r, i; 439 440 for (i = 0; i < adev->vcn.num_vcn_inst; i++) { 441 r = amdgpu_vcn_resume(ip_block->adev, i); 442 if (r) 443 return r; 444 } 445 446 r = vcn_v4_0_hw_init(ip_block); 447 448 return r; 449 } 450 451 /** 452 * vcn_v4_0_mc_resume - memory controller programming 453 * 454 * @vinst: VCN instance 455 * 456 * Let the VCN memory controller know it's offsets 457 */ 458 static void vcn_v4_0_mc_resume(struct amdgpu_vcn_inst *vinst) 459 { 460 struct amdgpu_device *adev = vinst->adev; 461 int inst = vinst->inst; 462 uint32_t offset, size; 463 const struct common_firmware_header *hdr; 464 465 hdr = (const struct common_firmware_header *)adev->vcn.inst[inst].fw->data; 466 size = AMDGPU_GPU_PAGE_ALIGN(le32_to_cpu(hdr->ucode_size_bytes) + 8); 467 468 /* cache window 0: fw */ 469 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) { 470 WREG32_SOC15(VCN, inst, regUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW, 471 (adev->firmware.ucode[AMDGPU_UCODE_ID_VCN + inst].tmr_mc_addr_lo)); 472 WREG32_SOC15(VCN, inst, regUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH, 473 (adev->firmware.ucode[AMDGPU_UCODE_ID_VCN + inst].tmr_mc_addr_hi)); 474 WREG32_SOC15(VCN, inst, regUVD_VCPU_CACHE_OFFSET0, 0); 475 offset = 0; 476 } else { 477 WREG32_SOC15(VCN, inst, regUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW, 478 lower_32_bits(adev->vcn.inst[inst].gpu_addr)); 479 WREG32_SOC15(VCN, inst, regUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH, 480 upper_32_bits(adev->vcn.inst[inst].gpu_addr)); 481 offset = size; 482 WREG32_SOC15(VCN, inst, regUVD_VCPU_CACHE_OFFSET0, AMDGPU_UVD_FIRMWARE_OFFSET >> 3); 483 } 484 WREG32_SOC15(VCN, inst, regUVD_VCPU_CACHE_SIZE0, size); 485 486 /* cache window 1: stack */ 487 WREG32_SOC15(VCN, inst, regUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW, 488 lower_32_bits(adev->vcn.inst[inst].gpu_addr + offset)); 489 WREG32_SOC15(VCN, inst, regUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH, 490 upper_32_bits(adev->vcn.inst[inst].gpu_addr + offset)); 491 WREG32_SOC15(VCN, inst, regUVD_VCPU_CACHE_OFFSET1, 0); 492 WREG32_SOC15(VCN, inst, regUVD_VCPU_CACHE_SIZE1, AMDGPU_VCN_STACK_SIZE); 493 494 /* cache window 2: context */ 495 WREG32_SOC15(VCN, inst, regUVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW, 496 lower_32_bits(adev->vcn.inst[inst].gpu_addr + offset + AMDGPU_VCN_STACK_SIZE)); 497 WREG32_SOC15(VCN, inst, regUVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH, 498 upper_32_bits(adev->vcn.inst[inst].gpu_addr + offset + AMDGPU_VCN_STACK_SIZE)); 499 WREG32_SOC15(VCN, inst, regUVD_VCPU_CACHE_OFFSET2, 0); 500 WREG32_SOC15(VCN, inst, regUVD_VCPU_CACHE_SIZE2, AMDGPU_VCN_CONTEXT_SIZE); 501 502 /* non-cache window */ 503 WREG32_SOC15(VCN, inst, regUVD_LMI_VCPU_NC0_64BIT_BAR_LOW, 504 lower_32_bits(adev->vcn.inst[inst].fw_shared.gpu_addr)); 505 WREG32_SOC15(VCN, inst, regUVD_LMI_VCPU_NC0_64BIT_BAR_HIGH, 506 upper_32_bits(adev->vcn.inst[inst].fw_shared.gpu_addr)); 507 WREG32_SOC15(VCN, inst, regUVD_VCPU_NONCACHE_OFFSET0, 0); 508 WREG32_SOC15(VCN, inst, regUVD_VCPU_NONCACHE_SIZE0, 509 AMDGPU_GPU_PAGE_ALIGN(sizeof(struct amdgpu_vcn4_fw_shared))); 510 } 511 512 /** 513 * vcn_v4_0_mc_resume_dpg_mode - memory controller programming for dpg mode 514 * 515 * @vinst: VCN instance 516 * @indirect: indirectly write sram 517 * 518 * Let the VCN memory controller know it's offsets with dpg mode 519 */ 520 static void vcn_v4_0_mc_resume_dpg_mode(struct amdgpu_vcn_inst *vinst, 521 bool indirect) 522 { 523 struct amdgpu_device *adev = vinst->adev; 524 int inst_idx = vinst->inst; 525 uint32_t offset, size; 526 const struct common_firmware_header *hdr; 527 hdr = (const struct common_firmware_header *)adev->vcn.inst[inst_idx].fw->data; 528 size = AMDGPU_GPU_PAGE_ALIGN(le32_to_cpu(hdr->ucode_size_bytes) + 8); 529 530 /* cache window 0: fw */ 531 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) { 532 if (!indirect) { 533 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET( 534 VCN, inst_idx, regUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW), 535 (adev->firmware.ucode[AMDGPU_UCODE_ID_VCN + inst_idx].tmr_mc_addr_lo), 0, indirect); 536 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET( 537 VCN, inst_idx, regUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH), 538 (adev->firmware.ucode[AMDGPU_UCODE_ID_VCN + inst_idx].tmr_mc_addr_hi), 0, indirect); 539 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET( 540 VCN, inst_idx, regUVD_VCPU_CACHE_OFFSET0), 0, 0, indirect); 541 } else { 542 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET( 543 VCN, inst_idx, regUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW), 0, 0, indirect); 544 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET( 545 VCN, inst_idx, regUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH), 0, 0, indirect); 546 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET( 547 VCN, inst_idx, regUVD_VCPU_CACHE_OFFSET0), 0, 0, indirect); 548 } 549 offset = 0; 550 } else { 551 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET( 552 VCN, inst_idx, regUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW), 553 lower_32_bits(adev->vcn.inst[inst_idx].gpu_addr), 0, indirect); 554 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET( 555 VCN, inst_idx, regUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH), 556 upper_32_bits(adev->vcn.inst[inst_idx].gpu_addr), 0, indirect); 557 offset = size; 558 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET( 559 VCN, inst_idx, regUVD_VCPU_CACHE_OFFSET0), 560 AMDGPU_UVD_FIRMWARE_OFFSET >> 3, 0, indirect); 561 } 562 563 if (!indirect) 564 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET( 565 VCN, inst_idx, regUVD_VCPU_CACHE_SIZE0), size, 0, indirect); 566 else 567 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET( 568 VCN, inst_idx, regUVD_VCPU_CACHE_SIZE0), 0, 0, indirect); 569 570 /* cache window 1: stack */ 571 if (!indirect) { 572 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET( 573 VCN, inst_idx, regUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW), 574 lower_32_bits(adev->vcn.inst[inst_idx].gpu_addr + offset), 0, indirect); 575 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET( 576 VCN, inst_idx, regUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH), 577 upper_32_bits(adev->vcn.inst[inst_idx].gpu_addr + offset), 0, indirect); 578 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET( 579 VCN, inst_idx, regUVD_VCPU_CACHE_OFFSET1), 0, 0, indirect); 580 } else { 581 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET( 582 VCN, inst_idx, regUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW), 0, 0, indirect); 583 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET( 584 VCN, inst_idx, regUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH), 0, 0, indirect); 585 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET( 586 VCN, inst_idx, regUVD_VCPU_CACHE_OFFSET1), 0, 0, indirect); 587 } 588 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET( 589 VCN, inst_idx, regUVD_VCPU_CACHE_SIZE1), AMDGPU_VCN_STACK_SIZE, 0, indirect); 590 591 /* cache window 2: context */ 592 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET( 593 VCN, inst_idx, regUVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW), 594 lower_32_bits(adev->vcn.inst[inst_idx].gpu_addr + offset + AMDGPU_VCN_STACK_SIZE), 0, indirect); 595 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET( 596 VCN, inst_idx, regUVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH), 597 upper_32_bits(adev->vcn.inst[inst_idx].gpu_addr + offset + AMDGPU_VCN_STACK_SIZE), 0, indirect); 598 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET( 599 VCN, inst_idx, regUVD_VCPU_CACHE_OFFSET2), 0, 0, indirect); 600 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET( 601 VCN, inst_idx, regUVD_VCPU_CACHE_SIZE2), AMDGPU_VCN_CONTEXT_SIZE, 0, indirect); 602 603 /* non-cache window */ 604 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET( 605 VCN, inst_idx, regUVD_LMI_VCPU_NC0_64BIT_BAR_LOW), 606 lower_32_bits(adev->vcn.inst[inst_idx].fw_shared.gpu_addr), 0, indirect); 607 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET( 608 VCN, inst_idx, regUVD_LMI_VCPU_NC0_64BIT_BAR_HIGH), 609 upper_32_bits(adev->vcn.inst[inst_idx].fw_shared.gpu_addr), 0, indirect); 610 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET( 611 VCN, inst_idx, regUVD_VCPU_NONCACHE_OFFSET0), 0, 0, indirect); 612 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET( 613 VCN, inst_idx, regUVD_VCPU_NONCACHE_SIZE0), 614 AMDGPU_GPU_PAGE_ALIGN(sizeof(struct amdgpu_vcn4_fw_shared)), 0, indirect); 615 616 /* VCN global tiling registers */ 617 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET( 618 VCN, inst_idx, regUVD_GFX10_ADDR_CONFIG), 619 adev->gfx.config.gb_addr_config, 0, indirect); 620 } 621 622 /** 623 * vcn_v4_0_disable_static_power_gating - disable VCN static power gating 624 * 625 * @vinst: VCN instance 626 * 627 * Disable static power gating for VCN block 628 */ 629 static void vcn_v4_0_disable_static_power_gating(struct amdgpu_vcn_inst *vinst) 630 { 631 struct amdgpu_device *adev = vinst->adev; 632 int inst = vinst->inst; 633 uint32_t data = 0; 634 635 if (adev->pg_flags & AMD_PG_SUPPORT_VCN) { 636 data = (1 << UVD_PGFSM_CONFIG__UVDM_PWR_CONFIG__SHIFT 637 | 1 << UVD_PGFSM_CONFIG__UVDS_PWR_CONFIG__SHIFT 638 | 1 << UVD_PGFSM_CONFIG__UVDLM_PWR_CONFIG__SHIFT 639 | 2 << UVD_PGFSM_CONFIG__UVDF_PWR_CONFIG__SHIFT 640 | 2 << UVD_PGFSM_CONFIG__UVDTC_PWR_CONFIG__SHIFT 641 | 2 << UVD_PGFSM_CONFIG__UVDB_PWR_CONFIG__SHIFT 642 | 2 << UVD_PGFSM_CONFIG__UVDTA_PWR_CONFIG__SHIFT 643 | 2 << UVD_PGFSM_CONFIG__UVDTD_PWR_CONFIG__SHIFT 644 | 2 << UVD_PGFSM_CONFIG__UVDTE_PWR_CONFIG__SHIFT 645 | 2 << UVD_PGFSM_CONFIG__UVDE_PWR_CONFIG__SHIFT 646 | 2 << UVD_PGFSM_CONFIG__UVDAB_PWR_CONFIG__SHIFT 647 | 2 << UVD_PGFSM_CONFIG__UVDTB_PWR_CONFIG__SHIFT 648 | 2 << UVD_PGFSM_CONFIG__UVDNA_PWR_CONFIG__SHIFT 649 | 2 << UVD_PGFSM_CONFIG__UVDNB_PWR_CONFIG__SHIFT); 650 651 WREG32_SOC15(VCN, inst, regUVD_PGFSM_CONFIG, data); 652 SOC15_WAIT_ON_RREG(VCN, inst, regUVD_PGFSM_STATUS, 653 UVD_PGFSM_STATUS__UVDM_UVDU_UVDLM_PWR_ON_3_0, 0x3F3FFFFF); 654 } else { 655 uint32_t value; 656 657 value = (inst) ? 0x2200800 : 0; 658 data = (1 << UVD_PGFSM_CONFIG__UVDM_PWR_CONFIG__SHIFT 659 | 1 << UVD_PGFSM_CONFIG__UVDS_PWR_CONFIG__SHIFT 660 | 1 << UVD_PGFSM_CONFIG__UVDLM_PWR_CONFIG__SHIFT 661 | 1 << UVD_PGFSM_CONFIG__UVDF_PWR_CONFIG__SHIFT 662 | 1 << UVD_PGFSM_CONFIG__UVDTC_PWR_CONFIG__SHIFT 663 | 1 << UVD_PGFSM_CONFIG__UVDB_PWR_CONFIG__SHIFT 664 | 1 << UVD_PGFSM_CONFIG__UVDTA_PWR_CONFIG__SHIFT 665 | 1 << UVD_PGFSM_CONFIG__UVDTD_PWR_CONFIG__SHIFT 666 | 1 << UVD_PGFSM_CONFIG__UVDTE_PWR_CONFIG__SHIFT 667 | 1 << UVD_PGFSM_CONFIG__UVDE_PWR_CONFIG__SHIFT 668 | 1 << UVD_PGFSM_CONFIG__UVDAB_PWR_CONFIG__SHIFT 669 | 1 << UVD_PGFSM_CONFIG__UVDTB_PWR_CONFIG__SHIFT 670 | 1 << UVD_PGFSM_CONFIG__UVDNA_PWR_CONFIG__SHIFT 671 | 1 << UVD_PGFSM_CONFIG__UVDNB_PWR_CONFIG__SHIFT); 672 673 WREG32_SOC15(VCN, inst, regUVD_PGFSM_CONFIG, data); 674 SOC15_WAIT_ON_RREG(VCN, inst, regUVD_PGFSM_STATUS, value, 0x3F3FFFFF); 675 } 676 677 data = RREG32_SOC15(VCN, inst, regUVD_POWER_STATUS); 678 data &= ~0x103; 679 if (adev->pg_flags & AMD_PG_SUPPORT_VCN) 680 data |= UVD_PGFSM_CONFIG__UVDM_UVDU_PWR_ON | 681 UVD_POWER_STATUS__UVD_PG_EN_MASK; 682 683 WREG32_SOC15(VCN, inst, regUVD_POWER_STATUS, data); 684 685 return; 686 } 687 688 /** 689 * vcn_v4_0_enable_static_power_gating - enable VCN static power gating 690 * 691 * @vinst: VCN instance 692 * 693 * Enable static power gating for VCN block 694 */ 695 static void vcn_v4_0_enable_static_power_gating(struct amdgpu_vcn_inst *vinst) 696 { 697 struct amdgpu_device *adev = vinst->adev; 698 int inst = vinst->inst; 699 uint32_t data; 700 701 if (adev->pg_flags & AMD_PG_SUPPORT_VCN) { 702 /* Before power off, this indicator has to be turned on */ 703 data = RREG32_SOC15(VCN, inst, regUVD_POWER_STATUS); 704 data &= ~UVD_POWER_STATUS__UVD_POWER_STATUS_MASK; 705 data |= UVD_POWER_STATUS__UVD_POWER_STATUS_TILES_OFF; 706 WREG32_SOC15(VCN, inst, regUVD_POWER_STATUS, data); 707 708 data = (2 << UVD_PGFSM_CONFIG__UVDM_PWR_CONFIG__SHIFT 709 | 2 << UVD_PGFSM_CONFIG__UVDS_PWR_CONFIG__SHIFT 710 | 2 << UVD_PGFSM_CONFIG__UVDF_PWR_CONFIG__SHIFT 711 | 2 << UVD_PGFSM_CONFIG__UVDTC_PWR_CONFIG__SHIFT 712 | 2 << UVD_PGFSM_CONFIG__UVDB_PWR_CONFIG__SHIFT 713 | 2 << UVD_PGFSM_CONFIG__UVDTA_PWR_CONFIG__SHIFT 714 | 2 << UVD_PGFSM_CONFIG__UVDLM_PWR_CONFIG__SHIFT 715 | 2 << UVD_PGFSM_CONFIG__UVDTD_PWR_CONFIG__SHIFT 716 | 2 << UVD_PGFSM_CONFIG__UVDTE_PWR_CONFIG__SHIFT 717 | 2 << UVD_PGFSM_CONFIG__UVDE_PWR_CONFIG__SHIFT 718 | 2 << UVD_PGFSM_CONFIG__UVDAB_PWR_CONFIG__SHIFT 719 | 2 << UVD_PGFSM_CONFIG__UVDTB_PWR_CONFIG__SHIFT 720 | 2 << UVD_PGFSM_CONFIG__UVDNA_PWR_CONFIG__SHIFT 721 | 2 << UVD_PGFSM_CONFIG__UVDNB_PWR_CONFIG__SHIFT); 722 WREG32_SOC15(VCN, inst, regUVD_PGFSM_CONFIG, data); 723 724 data = (2 << UVD_PGFSM_STATUS__UVDM_PWR_STATUS__SHIFT 725 | 2 << UVD_PGFSM_STATUS__UVDS_PWR_STATUS__SHIFT 726 | 2 << UVD_PGFSM_STATUS__UVDF_PWR_STATUS__SHIFT 727 | 2 << UVD_PGFSM_STATUS__UVDTC_PWR_STATUS__SHIFT 728 | 2 << UVD_PGFSM_STATUS__UVDB_PWR_STATUS__SHIFT 729 | 2 << UVD_PGFSM_STATUS__UVDTA_PWR_STATUS__SHIFT 730 | 2 << UVD_PGFSM_STATUS__UVDLM_PWR_STATUS__SHIFT 731 | 2 << UVD_PGFSM_STATUS__UVDTD_PWR_STATUS__SHIFT 732 | 2 << UVD_PGFSM_STATUS__UVDTE_PWR_STATUS__SHIFT 733 | 2 << UVD_PGFSM_STATUS__UVDE_PWR_STATUS__SHIFT 734 | 2 << UVD_PGFSM_STATUS__UVDAB_PWR_STATUS__SHIFT 735 | 2 << UVD_PGFSM_STATUS__UVDTB_PWR_STATUS__SHIFT 736 | 2 << UVD_PGFSM_STATUS__UVDNA_PWR_STATUS__SHIFT 737 | 2 << UVD_PGFSM_STATUS__UVDNB_PWR_STATUS__SHIFT); 738 SOC15_WAIT_ON_RREG(VCN, inst, regUVD_PGFSM_STATUS, data, 0x3F3FFFFF); 739 } 740 741 return; 742 } 743 744 /** 745 * vcn_v4_0_disable_clock_gating - disable VCN clock gating 746 * 747 * @vinst: VCN instance 748 * 749 * Disable clock gating for VCN block 750 */ 751 static void vcn_v4_0_disable_clock_gating(struct amdgpu_vcn_inst *vinst) 752 { 753 struct amdgpu_device *adev = vinst->adev; 754 int inst = vinst->inst; 755 uint32_t data; 756 757 if (adev->cg_flags & AMD_CG_SUPPORT_VCN_MGCG) 758 return; 759 760 /* VCN disable CGC */ 761 data = RREG32_SOC15(VCN, inst, regUVD_CGC_CTRL); 762 data &= ~UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK; 763 data |= 1 << UVD_CGC_CTRL__CLK_GATE_DLY_TIMER__SHIFT; 764 data |= 4 << UVD_CGC_CTRL__CLK_OFF_DELAY__SHIFT; 765 WREG32_SOC15(VCN, inst, regUVD_CGC_CTRL, data); 766 767 data = RREG32_SOC15(VCN, inst, regUVD_CGC_GATE); 768 data &= ~(UVD_CGC_GATE__SYS_MASK 769 | UVD_CGC_GATE__UDEC_MASK 770 | UVD_CGC_GATE__MPEG2_MASK 771 | UVD_CGC_GATE__REGS_MASK 772 | UVD_CGC_GATE__RBC_MASK 773 | UVD_CGC_GATE__LMI_MC_MASK 774 | UVD_CGC_GATE__LMI_UMC_MASK 775 | UVD_CGC_GATE__IDCT_MASK 776 | UVD_CGC_GATE__MPRD_MASK 777 | UVD_CGC_GATE__MPC_MASK 778 | UVD_CGC_GATE__LBSI_MASK 779 | UVD_CGC_GATE__LRBBM_MASK 780 | UVD_CGC_GATE__UDEC_RE_MASK 781 | UVD_CGC_GATE__UDEC_CM_MASK 782 | UVD_CGC_GATE__UDEC_IT_MASK 783 | UVD_CGC_GATE__UDEC_DB_MASK 784 | UVD_CGC_GATE__UDEC_MP_MASK 785 | UVD_CGC_GATE__WCB_MASK 786 | UVD_CGC_GATE__VCPU_MASK 787 | UVD_CGC_GATE__MMSCH_MASK); 788 789 WREG32_SOC15(VCN, inst, regUVD_CGC_GATE, data); 790 SOC15_WAIT_ON_RREG(VCN, inst, regUVD_CGC_GATE, 0, 0xFFFFFFFF); 791 792 data = RREG32_SOC15(VCN, inst, regUVD_CGC_CTRL); 793 data &= ~(UVD_CGC_CTRL__UDEC_RE_MODE_MASK 794 | UVD_CGC_CTRL__UDEC_CM_MODE_MASK 795 | UVD_CGC_CTRL__UDEC_IT_MODE_MASK 796 | UVD_CGC_CTRL__UDEC_DB_MODE_MASK 797 | UVD_CGC_CTRL__UDEC_MP_MODE_MASK 798 | UVD_CGC_CTRL__SYS_MODE_MASK 799 | UVD_CGC_CTRL__UDEC_MODE_MASK 800 | UVD_CGC_CTRL__MPEG2_MODE_MASK 801 | UVD_CGC_CTRL__REGS_MODE_MASK 802 | UVD_CGC_CTRL__RBC_MODE_MASK 803 | UVD_CGC_CTRL__LMI_MC_MODE_MASK 804 | UVD_CGC_CTRL__LMI_UMC_MODE_MASK 805 | UVD_CGC_CTRL__IDCT_MODE_MASK 806 | UVD_CGC_CTRL__MPRD_MODE_MASK 807 | UVD_CGC_CTRL__MPC_MODE_MASK 808 | UVD_CGC_CTRL__LBSI_MODE_MASK 809 | UVD_CGC_CTRL__LRBBM_MODE_MASK 810 | UVD_CGC_CTRL__WCB_MODE_MASK 811 | UVD_CGC_CTRL__VCPU_MODE_MASK 812 | UVD_CGC_CTRL__MMSCH_MODE_MASK); 813 WREG32_SOC15(VCN, inst, regUVD_CGC_CTRL, data); 814 815 data = RREG32_SOC15(VCN, inst, regUVD_SUVD_CGC_GATE); 816 data |= (UVD_SUVD_CGC_GATE__SRE_MASK 817 | UVD_SUVD_CGC_GATE__SIT_MASK 818 | UVD_SUVD_CGC_GATE__SMP_MASK 819 | UVD_SUVD_CGC_GATE__SCM_MASK 820 | UVD_SUVD_CGC_GATE__SDB_MASK 821 | UVD_SUVD_CGC_GATE__SRE_H264_MASK 822 | UVD_SUVD_CGC_GATE__SRE_HEVC_MASK 823 | UVD_SUVD_CGC_GATE__SIT_H264_MASK 824 | UVD_SUVD_CGC_GATE__SIT_HEVC_MASK 825 | UVD_SUVD_CGC_GATE__SCM_H264_MASK 826 | UVD_SUVD_CGC_GATE__SCM_HEVC_MASK 827 | UVD_SUVD_CGC_GATE__SDB_H264_MASK 828 | UVD_SUVD_CGC_GATE__SDB_HEVC_MASK 829 | UVD_SUVD_CGC_GATE__SCLR_MASK 830 | UVD_SUVD_CGC_GATE__UVD_SC_MASK 831 | UVD_SUVD_CGC_GATE__ENT_MASK 832 | UVD_SUVD_CGC_GATE__SIT_HEVC_DEC_MASK 833 | UVD_SUVD_CGC_GATE__SIT_HEVC_ENC_MASK 834 | UVD_SUVD_CGC_GATE__SITE_MASK 835 | UVD_SUVD_CGC_GATE__SRE_VP9_MASK 836 | UVD_SUVD_CGC_GATE__SCM_VP9_MASK 837 | UVD_SUVD_CGC_GATE__SIT_VP9_DEC_MASK 838 | UVD_SUVD_CGC_GATE__SDB_VP9_MASK 839 | UVD_SUVD_CGC_GATE__IME_HEVC_MASK); 840 WREG32_SOC15(VCN, inst, regUVD_SUVD_CGC_GATE, data); 841 842 data = RREG32_SOC15(VCN, inst, regUVD_SUVD_CGC_CTRL); 843 data &= ~(UVD_SUVD_CGC_CTRL__SRE_MODE_MASK 844 | UVD_SUVD_CGC_CTRL__SIT_MODE_MASK 845 | UVD_SUVD_CGC_CTRL__SMP_MODE_MASK 846 | UVD_SUVD_CGC_CTRL__SCM_MODE_MASK 847 | UVD_SUVD_CGC_CTRL__SDB_MODE_MASK 848 | UVD_SUVD_CGC_CTRL__SCLR_MODE_MASK 849 | UVD_SUVD_CGC_CTRL__UVD_SC_MODE_MASK 850 | UVD_SUVD_CGC_CTRL__ENT_MODE_MASK 851 | UVD_SUVD_CGC_CTRL__IME_MODE_MASK 852 | UVD_SUVD_CGC_CTRL__SITE_MODE_MASK); 853 WREG32_SOC15(VCN, inst, regUVD_SUVD_CGC_CTRL, data); 854 } 855 856 /** 857 * vcn_v4_0_disable_clock_gating_dpg_mode - disable VCN clock gating dpg mode 858 * 859 * @vinst: VCN instance 860 * @sram_sel: sram select 861 * @indirect: indirectly write sram 862 * 863 * Disable clock gating for VCN block with dpg mode 864 */ 865 static void vcn_v4_0_disable_clock_gating_dpg_mode(struct amdgpu_vcn_inst *vinst, 866 uint8_t sram_sel, 867 uint8_t indirect) 868 { 869 struct amdgpu_device *adev = vinst->adev; 870 int inst_idx = vinst->inst; 871 uint32_t reg_data = 0; 872 873 if (adev->cg_flags & AMD_CG_SUPPORT_VCN_MGCG) 874 return; 875 876 /* enable sw clock gating control */ 877 reg_data = 0 << UVD_CGC_CTRL__DYN_CLOCK_MODE__SHIFT; 878 reg_data |= 1 << UVD_CGC_CTRL__CLK_GATE_DLY_TIMER__SHIFT; 879 reg_data |= 4 << UVD_CGC_CTRL__CLK_OFF_DELAY__SHIFT; 880 reg_data &= ~(UVD_CGC_CTRL__UDEC_RE_MODE_MASK | 881 UVD_CGC_CTRL__UDEC_CM_MODE_MASK | 882 UVD_CGC_CTRL__UDEC_IT_MODE_MASK | 883 UVD_CGC_CTRL__UDEC_DB_MODE_MASK | 884 UVD_CGC_CTRL__UDEC_MP_MODE_MASK | 885 UVD_CGC_CTRL__SYS_MODE_MASK | 886 UVD_CGC_CTRL__UDEC_MODE_MASK | 887 UVD_CGC_CTRL__MPEG2_MODE_MASK | 888 UVD_CGC_CTRL__REGS_MODE_MASK | 889 UVD_CGC_CTRL__RBC_MODE_MASK | 890 UVD_CGC_CTRL__LMI_MC_MODE_MASK | 891 UVD_CGC_CTRL__LMI_UMC_MODE_MASK | 892 UVD_CGC_CTRL__IDCT_MODE_MASK | 893 UVD_CGC_CTRL__MPRD_MODE_MASK | 894 UVD_CGC_CTRL__MPC_MODE_MASK | 895 UVD_CGC_CTRL__LBSI_MODE_MASK | 896 UVD_CGC_CTRL__LRBBM_MODE_MASK | 897 UVD_CGC_CTRL__WCB_MODE_MASK | 898 UVD_CGC_CTRL__VCPU_MODE_MASK); 899 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET( 900 VCN, inst_idx, regUVD_CGC_CTRL), reg_data, sram_sel, indirect); 901 902 /* turn off clock gating */ 903 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET( 904 VCN, inst_idx, regUVD_CGC_GATE), 0, sram_sel, indirect); 905 906 /* turn on SUVD clock gating */ 907 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET( 908 VCN, inst_idx, regUVD_SUVD_CGC_GATE), 1, sram_sel, indirect); 909 910 /* turn on sw mode in UVD_SUVD_CGC_CTRL */ 911 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET( 912 VCN, inst_idx, regUVD_SUVD_CGC_CTRL), 0, sram_sel, indirect); 913 } 914 915 /** 916 * vcn_v4_0_enable_clock_gating - enable VCN clock gating 917 * 918 * @vinst: VCN instance 919 * 920 * Enable clock gating for VCN block 921 */ 922 static void vcn_v4_0_enable_clock_gating(struct amdgpu_vcn_inst *vinst) 923 { 924 struct amdgpu_device *adev = vinst->adev; 925 int inst = vinst->inst; 926 uint32_t data; 927 928 if (adev->cg_flags & AMD_CG_SUPPORT_VCN_MGCG) 929 return; 930 931 /* enable VCN CGC */ 932 data = RREG32_SOC15(VCN, inst, regUVD_CGC_CTRL); 933 data |= 0 << UVD_CGC_CTRL__DYN_CLOCK_MODE__SHIFT; 934 data |= 1 << UVD_CGC_CTRL__CLK_GATE_DLY_TIMER__SHIFT; 935 data |= 4 << UVD_CGC_CTRL__CLK_OFF_DELAY__SHIFT; 936 WREG32_SOC15(VCN, inst, regUVD_CGC_CTRL, data); 937 938 data = RREG32_SOC15(VCN, inst, regUVD_CGC_CTRL); 939 data |= (UVD_CGC_CTRL__UDEC_RE_MODE_MASK 940 | UVD_CGC_CTRL__UDEC_CM_MODE_MASK 941 | UVD_CGC_CTRL__UDEC_IT_MODE_MASK 942 | UVD_CGC_CTRL__UDEC_DB_MODE_MASK 943 | UVD_CGC_CTRL__UDEC_MP_MODE_MASK 944 | UVD_CGC_CTRL__SYS_MODE_MASK 945 | UVD_CGC_CTRL__UDEC_MODE_MASK 946 | UVD_CGC_CTRL__MPEG2_MODE_MASK 947 | UVD_CGC_CTRL__REGS_MODE_MASK 948 | UVD_CGC_CTRL__RBC_MODE_MASK 949 | UVD_CGC_CTRL__LMI_MC_MODE_MASK 950 | UVD_CGC_CTRL__LMI_UMC_MODE_MASK 951 | UVD_CGC_CTRL__IDCT_MODE_MASK 952 | UVD_CGC_CTRL__MPRD_MODE_MASK 953 | UVD_CGC_CTRL__MPC_MODE_MASK 954 | UVD_CGC_CTRL__LBSI_MODE_MASK 955 | UVD_CGC_CTRL__LRBBM_MODE_MASK 956 | UVD_CGC_CTRL__WCB_MODE_MASK 957 | UVD_CGC_CTRL__VCPU_MODE_MASK 958 | UVD_CGC_CTRL__MMSCH_MODE_MASK); 959 WREG32_SOC15(VCN, inst, regUVD_CGC_CTRL, data); 960 961 data = RREG32_SOC15(VCN, inst, regUVD_SUVD_CGC_CTRL); 962 data |= (UVD_SUVD_CGC_CTRL__SRE_MODE_MASK 963 | UVD_SUVD_CGC_CTRL__SIT_MODE_MASK 964 | UVD_SUVD_CGC_CTRL__SMP_MODE_MASK 965 | UVD_SUVD_CGC_CTRL__SCM_MODE_MASK 966 | UVD_SUVD_CGC_CTRL__SDB_MODE_MASK 967 | UVD_SUVD_CGC_CTRL__SCLR_MODE_MASK 968 | UVD_SUVD_CGC_CTRL__UVD_SC_MODE_MASK 969 | UVD_SUVD_CGC_CTRL__ENT_MODE_MASK 970 | UVD_SUVD_CGC_CTRL__IME_MODE_MASK 971 | UVD_SUVD_CGC_CTRL__SITE_MODE_MASK); 972 WREG32_SOC15(VCN, inst, regUVD_SUVD_CGC_CTRL, data); 973 } 974 975 static void vcn_v4_0_enable_ras(struct amdgpu_vcn_inst *vinst, 976 bool indirect) 977 { 978 struct amdgpu_device *adev = vinst->adev; 979 int inst_idx = vinst->inst; 980 uint32_t tmp; 981 982 if (!amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__VCN)) 983 return; 984 985 tmp = VCN_RAS_CNTL__VCPU_VCODEC_REARM_MASK | 986 VCN_RAS_CNTL__VCPU_VCODEC_IH_EN_MASK | 987 VCN_RAS_CNTL__VCPU_VCODEC_PMI_EN_MASK | 988 VCN_RAS_CNTL__VCPU_VCODEC_STALL_EN_MASK; 989 WREG32_SOC15_DPG_MODE(inst_idx, 990 SOC15_DPG_MODE_OFFSET(VCN, 0, regVCN_RAS_CNTL), 991 tmp, 0, indirect); 992 993 tmp = UVD_SYS_INT_EN__RASCNTL_VCPU_VCODEC_EN_MASK; 994 WREG32_SOC15_DPG_MODE(inst_idx, 995 SOC15_DPG_MODE_OFFSET(VCN, 0, regUVD_SYS_INT_EN), 996 tmp, 0, indirect); 997 } 998 999 /** 1000 * vcn_v4_0_start_dpg_mode - VCN start with dpg mode 1001 * 1002 * @vinst: VCN instance 1003 * @indirect: indirectly write sram 1004 * 1005 * Start VCN block with dpg mode 1006 */ 1007 static int vcn_v4_0_start_dpg_mode(struct amdgpu_vcn_inst *vinst, bool indirect) 1008 { 1009 struct amdgpu_device *adev = vinst->adev; 1010 int inst_idx = vinst->inst; 1011 volatile struct amdgpu_vcn4_fw_shared *fw_shared = adev->vcn.inst[inst_idx].fw_shared.cpu_addr; 1012 struct amdgpu_ring *ring; 1013 uint32_t tmp; 1014 1015 /* disable register anti-hang mechanism */ 1016 WREG32_P(SOC15_REG_OFFSET(VCN, inst_idx, regUVD_POWER_STATUS), 1, 1017 ~UVD_POWER_STATUS__UVD_POWER_STATUS_MASK); 1018 /* enable dynamic power gating mode */ 1019 tmp = RREG32_SOC15(VCN, inst_idx, regUVD_POWER_STATUS); 1020 tmp |= UVD_POWER_STATUS__UVD_PG_MODE_MASK; 1021 tmp |= UVD_POWER_STATUS__UVD_PG_EN_MASK; 1022 WREG32_SOC15(VCN, inst_idx, regUVD_POWER_STATUS, tmp); 1023 1024 if (indirect) 1025 adev->vcn.inst[inst_idx].dpg_sram_curr_addr = (uint32_t *)adev->vcn.inst[inst_idx].dpg_sram_cpu_addr; 1026 1027 /* enable clock gating */ 1028 vcn_v4_0_disable_clock_gating_dpg_mode(vinst, 0, indirect); 1029 1030 /* enable VCPU clock */ 1031 tmp = (0xFF << UVD_VCPU_CNTL__PRB_TIMEOUT_VAL__SHIFT); 1032 tmp |= UVD_VCPU_CNTL__CLK_EN_MASK | UVD_VCPU_CNTL__BLK_RST_MASK; 1033 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET( 1034 VCN, inst_idx, regUVD_VCPU_CNTL), tmp, 0, indirect); 1035 1036 /* disable master interupt */ 1037 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET( 1038 VCN, inst_idx, regUVD_MASTINT_EN), 0, 0, indirect); 1039 1040 /* setup regUVD_LMI_CTRL */ 1041 tmp = (UVD_LMI_CTRL__WRITE_CLEAN_TIMER_EN_MASK | 1042 UVD_LMI_CTRL__REQ_MODE_MASK | 1043 UVD_LMI_CTRL__CRC_RESET_MASK | 1044 UVD_LMI_CTRL__MASK_MC_URGENT_MASK | 1045 UVD_LMI_CTRL__DATA_COHERENCY_EN_MASK | 1046 UVD_LMI_CTRL__VCPU_DATA_COHERENCY_EN_MASK | 1047 (8 << UVD_LMI_CTRL__WRITE_CLEAN_TIMER__SHIFT) | 1048 0x00100000L); 1049 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET( 1050 VCN, inst_idx, regUVD_LMI_CTRL), tmp, 0, indirect); 1051 1052 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET( 1053 VCN, inst_idx, regUVD_MPC_CNTL), 1054 0x2 << UVD_MPC_CNTL__REPLACEMENT_MODE__SHIFT, 0, indirect); 1055 1056 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET( 1057 VCN, inst_idx, regUVD_MPC_SET_MUXA0), 1058 ((0x1 << UVD_MPC_SET_MUXA0__VARA_1__SHIFT) | 1059 (0x2 << UVD_MPC_SET_MUXA0__VARA_2__SHIFT) | 1060 (0x3 << UVD_MPC_SET_MUXA0__VARA_3__SHIFT) | 1061 (0x4 << UVD_MPC_SET_MUXA0__VARA_4__SHIFT)), 0, indirect); 1062 1063 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET( 1064 VCN, inst_idx, regUVD_MPC_SET_MUXB0), 1065 ((0x1 << UVD_MPC_SET_MUXB0__VARB_1__SHIFT) | 1066 (0x2 << UVD_MPC_SET_MUXB0__VARB_2__SHIFT) | 1067 (0x3 << UVD_MPC_SET_MUXB0__VARB_3__SHIFT) | 1068 (0x4 << UVD_MPC_SET_MUXB0__VARB_4__SHIFT)), 0, indirect); 1069 1070 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET( 1071 VCN, inst_idx, regUVD_MPC_SET_MUX), 1072 ((0x0 << UVD_MPC_SET_MUX__SET_0__SHIFT) | 1073 (0x1 << UVD_MPC_SET_MUX__SET_1__SHIFT) | 1074 (0x2 << UVD_MPC_SET_MUX__SET_2__SHIFT)), 0, indirect); 1075 1076 vcn_v4_0_mc_resume_dpg_mode(vinst, indirect); 1077 1078 tmp = (0xFF << UVD_VCPU_CNTL__PRB_TIMEOUT_VAL__SHIFT); 1079 tmp |= UVD_VCPU_CNTL__CLK_EN_MASK; 1080 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET( 1081 VCN, inst_idx, regUVD_VCPU_CNTL), tmp, 0, indirect); 1082 1083 /* enable LMI MC and UMC channels */ 1084 tmp = 0x1f << UVD_LMI_CTRL2__RE_OFLD_MIF_WR_REQ_NUM__SHIFT; 1085 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET( 1086 VCN, inst_idx, regUVD_LMI_CTRL2), tmp, 0, indirect); 1087 1088 vcn_v4_0_enable_ras(vinst, indirect); 1089 1090 /* enable master interrupt */ 1091 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET( 1092 VCN, inst_idx, regUVD_MASTINT_EN), 1093 UVD_MASTINT_EN__VCPU_EN_MASK, 0, indirect); 1094 1095 1096 if (indirect) 1097 amdgpu_vcn_psp_update_sram(adev, inst_idx, 0); 1098 1099 ring = &adev->vcn.inst[inst_idx].ring_enc[0]; 1100 1101 WREG32_SOC15(VCN, inst_idx, regUVD_RB_BASE_LO, ring->gpu_addr); 1102 WREG32_SOC15(VCN, inst_idx, regUVD_RB_BASE_HI, upper_32_bits(ring->gpu_addr)); 1103 WREG32_SOC15(VCN, inst_idx, regUVD_RB_SIZE, ring->ring_size / 4); 1104 1105 tmp = RREG32_SOC15(VCN, inst_idx, regVCN_RB_ENABLE); 1106 tmp &= ~(VCN_RB_ENABLE__RB1_EN_MASK); 1107 WREG32_SOC15(VCN, inst_idx, regVCN_RB_ENABLE, tmp); 1108 fw_shared->sq.queue_mode |= FW_QUEUE_RING_RESET; 1109 WREG32_SOC15(VCN, inst_idx, regUVD_RB_RPTR, 0); 1110 WREG32_SOC15(VCN, inst_idx, regUVD_RB_WPTR, 0); 1111 1112 tmp = RREG32_SOC15(VCN, inst_idx, regUVD_RB_RPTR); 1113 WREG32_SOC15(VCN, inst_idx, regUVD_RB_WPTR, tmp); 1114 ring->wptr = RREG32_SOC15(VCN, inst_idx, regUVD_RB_WPTR); 1115 1116 tmp = RREG32_SOC15(VCN, inst_idx, regVCN_RB_ENABLE); 1117 tmp |= VCN_RB_ENABLE__RB1_EN_MASK; 1118 WREG32_SOC15(VCN, inst_idx, regVCN_RB_ENABLE, tmp); 1119 fw_shared->sq.queue_mode &= ~(FW_QUEUE_RING_RESET | FW_QUEUE_DPG_HOLD_OFF); 1120 1121 WREG32_SOC15(VCN, inst_idx, regVCN_RB1_DB_CTRL, 1122 ring->doorbell_index << VCN_RB1_DB_CTRL__OFFSET__SHIFT | 1123 VCN_RB1_DB_CTRL__EN_MASK); 1124 1125 return 0; 1126 } 1127 1128 1129 /** 1130 * vcn_v4_0_start - VCN start 1131 * 1132 * @vinst: VCN instance 1133 * 1134 * Start VCN block 1135 */ 1136 static int vcn_v4_0_start(struct amdgpu_vcn_inst *vinst) 1137 { 1138 struct amdgpu_device *adev = vinst->adev; 1139 int i = vinst->inst; 1140 volatile struct amdgpu_vcn4_fw_shared *fw_shared; 1141 struct amdgpu_ring *ring; 1142 uint32_t tmp; 1143 int j, k, r; 1144 1145 if (adev->vcn.harvest_config & (1 << i)) 1146 return 0; 1147 1148 if (adev->pm.dpm_enabled) 1149 amdgpu_dpm_enable_vcn(adev, true, i); 1150 1151 fw_shared = adev->vcn.inst[i].fw_shared.cpu_addr; 1152 1153 if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) 1154 return vcn_v4_0_start_dpg_mode(vinst, adev->vcn.inst[i].indirect_sram); 1155 1156 /* disable VCN power gating */ 1157 vcn_v4_0_disable_static_power_gating(vinst); 1158 1159 /* set VCN status busy */ 1160 tmp = RREG32_SOC15(VCN, i, regUVD_STATUS) | UVD_STATUS__UVD_BUSY; 1161 WREG32_SOC15(VCN, i, regUVD_STATUS, tmp); 1162 1163 /*SW clock gating */ 1164 vcn_v4_0_disable_clock_gating(vinst); 1165 1166 /* enable VCPU clock */ 1167 WREG32_P(SOC15_REG_OFFSET(VCN, i, regUVD_VCPU_CNTL), 1168 UVD_VCPU_CNTL__CLK_EN_MASK, ~UVD_VCPU_CNTL__CLK_EN_MASK); 1169 1170 /* disable master interrupt */ 1171 WREG32_P(SOC15_REG_OFFSET(VCN, i, regUVD_MASTINT_EN), 0, 1172 ~UVD_MASTINT_EN__VCPU_EN_MASK); 1173 1174 /* enable LMI MC and UMC channels */ 1175 WREG32_P(SOC15_REG_OFFSET(VCN, i, regUVD_LMI_CTRL2), 0, 1176 ~UVD_LMI_CTRL2__STALL_ARB_UMC_MASK); 1177 1178 tmp = RREG32_SOC15(VCN, i, regUVD_SOFT_RESET); 1179 tmp &= ~UVD_SOFT_RESET__LMI_SOFT_RESET_MASK; 1180 tmp &= ~UVD_SOFT_RESET__LMI_UMC_SOFT_RESET_MASK; 1181 WREG32_SOC15(VCN, i, regUVD_SOFT_RESET, tmp); 1182 1183 /* setup regUVD_LMI_CTRL */ 1184 tmp = RREG32_SOC15(VCN, i, regUVD_LMI_CTRL); 1185 WREG32_SOC15(VCN, i, regUVD_LMI_CTRL, tmp | 1186 UVD_LMI_CTRL__WRITE_CLEAN_TIMER_EN_MASK | 1187 UVD_LMI_CTRL__MASK_MC_URGENT_MASK | 1188 UVD_LMI_CTRL__DATA_COHERENCY_EN_MASK | 1189 UVD_LMI_CTRL__VCPU_DATA_COHERENCY_EN_MASK); 1190 1191 /* setup regUVD_MPC_CNTL */ 1192 tmp = RREG32_SOC15(VCN, i, regUVD_MPC_CNTL); 1193 tmp &= ~UVD_MPC_CNTL__REPLACEMENT_MODE_MASK; 1194 tmp |= 0x2 << UVD_MPC_CNTL__REPLACEMENT_MODE__SHIFT; 1195 WREG32_SOC15(VCN, i, regUVD_MPC_CNTL, tmp); 1196 1197 /* setup UVD_MPC_SET_MUXA0 */ 1198 WREG32_SOC15(VCN, i, regUVD_MPC_SET_MUXA0, 1199 ((0x1 << UVD_MPC_SET_MUXA0__VARA_1__SHIFT) | 1200 (0x2 << UVD_MPC_SET_MUXA0__VARA_2__SHIFT) | 1201 (0x3 << UVD_MPC_SET_MUXA0__VARA_3__SHIFT) | 1202 (0x4 << UVD_MPC_SET_MUXA0__VARA_4__SHIFT))); 1203 1204 /* setup UVD_MPC_SET_MUXB0 */ 1205 WREG32_SOC15(VCN, i, regUVD_MPC_SET_MUXB0, 1206 ((0x1 << UVD_MPC_SET_MUXB0__VARB_1__SHIFT) | 1207 (0x2 << UVD_MPC_SET_MUXB0__VARB_2__SHIFT) | 1208 (0x3 << UVD_MPC_SET_MUXB0__VARB_3__SHIFT) | 1209 (0x4 << UVD_MPC_SET_MUXB0__VARB_4__SHIFT))); 1210 1211 /* setup UVD_MPC_SET_MUX */ 1212 WREG32_SOC15(VCN, i, regUVD_MPC_SET_MUX, 1213 ((0x0 << UVD_MPC_SET_MUX__SET_0__SHIFT) | 1214 (0x1 << UVD_MPC_SET_MUX__SET_1__SHIFT) | 1215 (0x2 << UVD_MPC_SET_MUX__SET_2__SHIFT))); 1216 1217 vcn_v4_0_mc_resume(vinst); 1218 1219 /* VCN global tiling registers */ 1220 WREG32_SOC15(VCN, i, regUVD_GFX10_ADDR_CONFIG, 1221 adev->gfx.config.gb_addr_config); 1222 1223 /* unblock VCPU register access */ 1224 WREG32_P(SOC15_REG_OFFSET(VCN, i, regUVD_RB_ARB_CTRL), 0, 1225 ~UVD_RB_ARB_CTRL__VCPU_DIS_MASK); 1226 1227 /* release VCPU reset to boot */ 1228 WREG32_P(SOC15_REG_OFFSET(VCN, i, regUVD_VCPU_CNTL), 0, 1229 ~UVD_VCPU_CNTL__BLK_RST_MASK); 1230 1231 for (j = 0; j < 10; ++j) { 1232 uint32_t status; 1233 1234 for (k = 0; k < 100; ++k) { 1235 status = RREG32_SOC15(VCN, i, regUVD_STATUS); 1236 if (status & 2) 1237 break; 1238 mdelay(10); 1239 if (amdgpu_emu_mode == 1) 1240 msleep(1); 1241 } 1242 1243 if (amdgpu_emu_mode == 1) { 1244 r = -1; 1245 if (status & 2) { 1246 r = 0; 1247 break; 1248 } 1249 } else { 1250 r = 0; 1251 if (status & 2) 1252 break; 1253 1254 dev_err(adev->dev, "VCN[%d] is not responding, trying to reset the VCPU!!!\n", i); 1255 WREG32_P(SOC15_REG_OFFSET(VCN, i, regUVD_VCPU_CNTL), 1256 UVD_VCPU_CNTL__BLK_RST_MASK, 1257 ~UVD_VCPU_CNTL__BLK_RST_MASK); 1258 mdelay(10); 1259 WREG32_P(SOC15_REG_OFFSET(VCN, i, regUVD_VCPU_CNTL), 0, 1260 ~UVD_VCPU_CNTL__BLK_RST_MASK); 1261 1262 mdelay(10); 1263 r = -1; 1264 } 1265 } 1266 1267 if (r) { 1268 dev_err(adev->dev, "VCN[%d] is not responding, giving up!!!\n", i); 1269 return r; 1270 } 1271 1272 /* enable master interrupt */ 1273 WREG32_P(SOC15_REG_OFFSET(VCN, i, regUVD_MASTINT_EN), 1274 UVD_MASTINT_EN__VCPU_EN_MASK, 1275 ~UVD_MASTINT_EN__VCPU_EN_MASK); 1276 1277 /* clear the busy bit of VCN_STATUS */ 1278 WREG32_P(SOC15_REG_OFFSET(VCN, i, regUVD_STATUS), 0, 1279 ~(2 << UVD_STATUS__VCPU_REPORT__SHIFT)); 1280 1281 ring = &adev->vcn.inst[i].ring_enc[0]; 1282 WREG32_SOC15(VCN, i, regVCN_RB1_DB_CTRL, 1283 ring->doorbell_index << VCN_RB1_DB_CTRL__OFFSET__SHIFT | 1284 VCN_RB1_DB_CTRL__EN_MASK); 1285 1286 WREG32_SOC15(VCN, i, regUVD_RB_BASE_LO, ring->gpu_addr); 1287 WREG32_SOC15(VCN, i, regUVD_RB_BASE_HI, upper_32_bits(ring->gpu_addr)); 1288 WREG32_SOC15(VCN, i, regUVD_RB_SIZE, ring->ring_size / 4); 1289 1290 tmp = RREG32_SOC15(VCN, i, regVCN_RB_ENABLE); 1291 tmp &= ~(VCN_RB_ENABLE__RB1_EN_MASK); 1292 WREG32_SOC15(VCN, i, regVCN_RB_ENABLE, tmp); 1293 fw_shared->sq.queue_mode |= FW_QUEUE_RING_RESET; 1294 WREG32_SOC15(VCN, i, regUVD_RB_RPTR, 0); 1295 WREG32_SOC15(VCN, i, regUVD_RB_WPTR, 0); 1296 1297 tmp = RREG32_SOC15(VCN, i, regUVD_RB_RPTR); 1298 WREG32_SOC15(VCN, i, regUVD_RB_WPTR, tmp); 1299 ring->wptr = RREG32_SOC15(VCN, i, regUVD_RB_WPTR); 1300 1301 tmp = RREG32_SOC15(VCN, i, regVCN_RB_ENABLE); 1302 tmp |= VCN_RB_ENABLE__RB1_EN_MASK; 1303 WREG32_SOC15(VCN, i, regVCN_RB_ENABLE, tmp); 1304 fw_shared->sq.queue_mode &= ~(FW_QUEUE_RING_RESET | FW_QUEUE_DPG_HOLD_OFF); 1305 1306 return 0; 1307 } 1308 1309 static int vcn_v4_0_init_ring_metadata(struct amdgpu_device *adev, uint32_t vcn_inst, struct amdgpu_ring *ring_enc) 1310 { 1311 struct amdgpu_vcn_rb_metadata *rb_metadata = NULL; 1312 uint8_t *rb_ptr = (uint8_t *)ring_enc->ring; 1313 1314 rb_ptr += ring_enc->ring_size; 1315 rb_metadata = (struct amdgpu_vcn_rb_metadata *)rb_ptr; 1316 1317 memset(rb_metadata, 0, sizeof(struct amdgpu_vcn_rb_metadata)); 1318 rb_metadata->size = sizeof(struct amdgpu_vcn_rb_metadata); 1319 rb_metadata->present_flag_0 |= cpu_to_le32(AMDGPU_VCN_VF_RB_SETUP_FLAG); 1320 rb_metadata->present_flag_0 |= cpu_to_le32(AMDGPU_VCN_VF_RB_DECOUPLE_FLAG); 1321 rb_metadata->version = 1; 1322 rb_metadata->ring_id = vcn_inst & 0xFF; 1323 1324 return 0; 1325 } 1326 1327 static int vcn_v4_0_start_sriov(struct amdgpu_device *adev) 1328 { 1329 int i; 1330 struct amdgpu_ring *ring_enc; 1331 uint64_t cache_addr; 1332 uint64_t rb_enc_addr; 1333 uint64_t ctx_addr; 1334 uint32_t param, resp, expected; 1335 uint32_t offset, cache_size; 1336 uint32_t tmp, timeout; 1337 1338 struct amdgpu_mm_table *table = &adev->virt.mm_table; 1339 uint32_t *table_loc; 1340 uint32_t table_size; 1341 uint32_t size, size_dw; 1342 uint32_t init_status; 1343 uint32_t enabled_vcn; 1344 1345 struct mmsch_v4_0_cmd_direct_write 1346 direct_wt = { {0} }; 1347 struct mmsch_v4_0_cmd_direct_read_modify_write 1348 direct_rd_mod_wt = { {0} }; 1349 struct mmsch_v4_0_cmd_end end = { {0} }; 1350 struct mmsch_v4_0_init_header header; 1351 1352 volatile struct amdgpu_vcn4_fw_shared *fw_shared; 1353 volatile struct amdgpu_fw_shared_rb_setup *rb_setup; 1354 1355 direct_wt.cmd_header.command_type = 1356 MMSCH_COMMAND__DIRECT_REG_WRITE; 1357 direct_rd_mod_wt.cmd_header.command_type = 1358 MMSCH_COMMAND__DIRECT_REG_READ_MODIFY_WRITE; 1359 end.cmd_header.command_type = 1360 MMSCH_COMMAND__END; 1361 1362 header.version = MMSCH_VERSION; 1363 header.total_size = sizeof(struct mmsch_v4_0_init_header) >> 2; 1364 for (i = 0; i < MMSCH_V4_0_VCN_INSTANCES; i++) { 1365 header.inst[i].init_status = 0; 1366 header.inst[i].table_offset = 0; 1367 header.inst[i].table_size = 0; 1368 } 1369 1370 table_loc = (uint32_t *)table->cpu_addr; 1371 table_loc += header.total_size; 1372 for (i = 0; i < adev->vcn.num_vcn_inst; i++) { 1373 if (adev->vcn.harvest_config & (1 << i)) 1374 continue; 1375 1376 // Must re/init fw_shared at beginning 1377 vcn_v4_0_fw_shared_init(adev, i); 1378 1379 table_size = 0; 1380 1381 MMSCH_V4_0_INSERT_DIRECT_RD_MOD_WT(SOC15_REG_OFFSET(VCN, i, 1382 regUVD_STATUS), 1383 ~UVD_STATUS__UVD_BUSY, UVD_STATUS__UVD_BUSY); 1384 1385 cache_size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.inst[i].fw->size + 4); 1386 1387 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) { 1388 MMSCH_V4_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, i, 1389 regUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW), 1390 adev->firmware.ucode[AMDGPU_UCODE_ID_VCN + i].tmr_mc_addr_lo); 1391 MMSCH_V4_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, i, 1392 regUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH), 1393 adev->firmware.ucode[AMDGPU_UCODE_ID_VCN + i].tmr_mc_addr_hi); 1394 offset = 0; 1395 MMSCH_V4_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, i, 1396 regUVD_VCPU_CACHE_OFFSET0), 1397 0); 1398 } else { 1399 MMSCH_V4_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, i, 1400 regUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW), 1401 lower_32_bits(adev->vcn.inst[i].gpu_addr)); 1402 MMSCH_V4_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, i, 1403 regUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH), 1404 upper_32_bits(adev->vcn.inst[i].gpu_addr)); 1405 offset = cache_size; 1406 MMSCH_V4_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, i, 1407 regUVD_VCPU_CACHE_OFFSET0), 1408 AMDGPU_UVD_FIRMWARE_OFFSET >> 3); 1409 } 1410 1411 MMSCH_V4_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, i, 1412 regUVD_VCPU_CACHE_SIZE0), 1413 cache_size); 1414 1415 cache_addr = adev->vcn.inst[i].gpu_addr + offset; 1416 MMSCH_V4_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, i, 1417 regUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW), 1418 lower_32_bits(cache_addr)); 1419 MMSCH_V4_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, i, 1420 regUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH), 1421 upper_32_bits(cache_addr)); 1422 MMSCH_V4_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, i, 1423 regUVD_VCPU_CACHE_OFFSET1), 1424 0); 1425 MMSCH_V4_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, i, 1426 regUVD_VCPU_CACHE_SIZE1), 1427 AMDGPU_VCN_STACK_SIZE); 1428 1429 cache_addr = adev->vcn.inst[i].gpu_addr + offset + 1430 AMDGPU_VCN_STACK_SIZE; 1431 MMSCH_V4_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, i, 1432 regUVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW), 1433 lower_32_bits(cache_addr)); 1434 MMSCH_V4_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, i, 1435 regUVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH), 1436 upper_32_bits(cache_addr)); 1437 MMSCH_V4_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, i, 1438 regUVD_VCPU_CACHE_OFFSET2), 1439 0); 1440 MMSCH_V4_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, i, 1441 regUVD_VCPU_CACHE_SIZE2), 1442 AMDGPU_VCN_CONTEXT_SIZE); 1443 1444 fw_shared = adev->vcn.inst[i].fw_shared.cpu_addr; 1445 rb_setup = &fw_shared->rb_setup; 1446 1447 ring_enc = &adev->vcn.inst[i].ring_enc[0]; 1448 ring_enc->wptr = 0; 1449 rb_enc_addr = ring_enc->gpu_addr; 1450 1451 rb_setup->is_rb_enabled_flags |= RB_ENABLED; 1452 fw_shared->present_flag_0 |= cpu_to_le32(AMDGPU_VCN_VF_RB_SETUP_FLAG); 1453 1454 if (amdgpu_sriov_is_vcn_rb_decouple(adev)) { 1455 vcn_v4_0_init_ring_metadata(adev, i, ring_enc); 1456 1457 memset((void *)&rb_setup->rb_info, 0, sizeof(struct amdgpu_vcn_rb_setup_info) * MAX_NUM_VCN_RB_SETUP); 1458 if (!(adev->vcn.harvest_config & (1 << 0))) { 1459 rb_setup->rb_info[0].rb_addr_lo = lower_32_bits(adev->vcn.inst[0].ring_enc[0].gpu_addr); 1460 rb_setup->rb_info[0].rb_addr_hi = upper_32_bits(adev->vcn.inst[0].ring_enc[0].gpu_addr); 1461 rb_setup->rb_info[0].rb_size = adev->vcn.inst[0].ring_enc[0].ring_size / 4; 1462 } 1463 if (!(adev->vcn.harvest_config & (1 << 1))) { 1464 rb_setup->rb_info[2].rb_addr_lo = lower_32_bits(adev->vcn.inst[1].ring_enc[0].gpu_addr); 1465 rb_setup->rb_info[2].rb_addr_hi = upper_32_bits(adev->vcn.inst[1].ring_enc[0].gpu_addr); 1466 rb_setup->rb_info[2].rb_size = adev->vcn.inst[1].ring_enc[0].ring_size / 4; 1467 } 1468 fw_shared->decouple.is_enabled = 1; 1469 fw_shared->present_flag_0 |= cpu_to_le32(AMDGPU_VCN_VF_RB_DECOUPLE_FLAG); 1470 } else { 1471 rb_setup->rb_addr_lo = lower_32_bits(rb_enc_addr); 1472 rb_setup->rb_addr_hi = upper_32_bits(rb_enc_addr); 1473 rb_setup->rb_size = ring_enc->ring_size / 4; 1474 } 1475 1476 MMSCH_V4_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, i, 1477 regUVD_LMI_VCPU_NC0_64BIT_BAR_LOW), 1478 lower_32_bits(adev->vcn.inst[i].fw_shared.gpu_addr)); 1479 MMSCH_V4_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, i, 1480 regUVD_LMI_VCPU_NC0_64BIT_BAR_HIGH), 1481 upper_32_bits(adev->vcn.inst[i].fw_shared.gpu_addr)); 1482 MMSCH_V4_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, i, 1483 regUVD_VCPU_NONCACHE_SIZE0), 1484 AMDGPU_GPU_PAGE_ALIGN(sizeof(struct amdgpu_vcn4_fw_shared))); 1485 1486 /* add end packet */ 1487 MMSCH_V4_0_INSERT_END(); 1488 1489 /* refine header */ 1490 header.inst[i].init_status = 0; 1491 header.inst[i].table_offset = header.total_size; 1492 header.inst[i].table_size = table_size; 1493 header.total_size += table_size; 1494 } 1495 1496 /* Update init table header in memory */ 1497 size = sizeof(struct mmsch_v4_0_init_header); 1498 table_loc = (uint32_t *)table->cpu_addr; 1499 memcpy((void *)table_loc, &header, size); 1500 1501 /* message MMSCH (in VCN[0]) to initialize this client 1502 * 1, write to mmsch_vf_ctx_addr_lo/hi register with GPU mc addr 1503 * of memory descriptor location 1504 */ 1505 ctx_addr = table->gpu_addr; 1506 WREG32_SOC15(VCN, 0, regMMSCH_VF_CTX_ADDR_LO, lower_32_bits(ctx_addr)); 1507 WREG32_SOC15(VCN, 0, regMMSCH_VF_CTX_ADDR_HI, upper_32_bits(ctx_addr)); 1508 1509 /* 2, update vmid of descriptor */ 1510 tmp = RREG32_SOC15(VCN, 0, regMMSCH_VF_VMID); 1511 tmp &= ~MMSCH_VF_VMID__VF_CTX_VMID_MASK; 1512 /* use domain0 for MM scheduler */ 1513 tmp |= (0 << MMSCH_VF_VMID__VF_CTX_VMID__SHIFT); 1514 WREG32_SOC15(VCN, 0, regMMSCH_VF_VMID, tmp); 1515 1516 /* 3, notify mmsch about the size of this descriptor */ 1517 size = header.total_size; 1518 WREG32_SOC15(VCN, 0, regMMSCH_VF_CTX_SIZE, size); 1519 1520 /* 4, set resp to zero */ 1521 WREG32_SOC15(VCN, 0, regMMSCH_VF_MAILBOX_RESP, 0); 1522 1523 /* 5, kick off the initialization and wait until 1524 * MMSCH_VF_MAILBOX_RESP becomes non-zero 1525 */ 1526 param = 0x00000001; 1527 WREG32_SOC15(VCN, 0, regMMSCH_VF_MAILBOX_HOST, param); 1528 tmp = 0; 1529 timeout = 1000; 1530 resp = 0; 1531 expected = MMSCH_VF_MAILBOX_RESP__OK; 1532 while (resp != expected) { 1533 resp = RREG32_SOC15(VCN, 0, regMMSCH_VF_MAILBOX_RESP); 1534 if (resp != 0) 1535 break; 1536 1537 udelay(10); 1538 tmp = tmp + 10; 1539 if (tmp >= timeout) { 1540 DRM_ERROR("failed to init MMSCH. TIME-OUT after %d usec"\ 1541 " waiting for regMMSCH_VF_MAILBOX_RESP "\ 1542 "(expected=0x%08x, readback=0x%08x)\n", 1543 tmp, expected, resp); 1544 return -EBUSY; 1545 } 1546 } 1547 enabled_vcn = amdgpu_vcn_is_disabled_vcn(adev, VCN_DECODE_RING, 0) ? 1 : 0; 1548 init_status = ((struct mmsch_v4_0_init_header *)(table_loc))->inst[enabled_vcn].init_status; 1549 if (resp != expected && resp != MMSCH_VF_MAILBOX_RESP__INCOMPLETE 1550 && init_status != MMSCH_VF_ENGINE_STATUS__PASS) 1551 DRM_ERROR("MMSCH init status is incorrect! readback=0x%08x, header init "\ 1552 "status for VCN%x: 0x%x\n", resp, enabled_vcn, init_status); 1553 1554 return 0; 1555 } 1556 1557 /** 1558 * vcn_v4_0_stop_dpg_mode - VCN stop with dpg mode 1559 * 1560 * @vinst: VCN instance 1561 * 1562 * Stop VCN block with dpg mode 1563 */ 1564 static void vcn_v4_0_stop_dpg_mode(struct amdgpu_vcn_inst *vinst) 1565 { 1566 struct amdgpu_device *adev = vinst->adev; 1567 int inst_idx = vinst->inst; 1568 struct dpg_pause_state state = {.fw_based = VCN_DPG_STATE__UNPAUSE}; 1569 uint32_t tmp; 1570 1571 vcn_v4_0_pause_dpg_mode(vinst, &state); 1572 /* Wait for power status to be 1 */ 1573 SOC15_WAIT_ON_RREG(VCN, inst_idx, regUVD_POWER_STATUS, 1, 1574 UVD_POWER_STATUS__UVD_POWER_STATUS_MASK); 1575 1576 /* wait for read ptr to be equal to write ptr */ 1577 tmp = RREG32_SOC15(VCN, inst_idx, regUVD_RB_WPTR); 1578 SOC15_WAIT_ON_RREG(VCN, inst_idx, regUVD_RB_RPTR, tmp, 0xFFFFFFFF); 1579 1580 SOC15_WAIT_ON_RREG(VCN, inst_idx, regUVD_POWER_STATUS, 1, 1581 UVD_POWER_STATUS__UVD_POWER_STATUS_MASK); 1582 1583 /* disable dynamic power gating mode */ 1584 WREG32_P(SOC15_REG_OFFSET(VCN, inst_idx, regUVD_POWER_STATUS), 0, 1585 ~UVD_POWER_STATUS__UVD_PG_MODE_MASK); 1586 } 1587 1588 /** 1589 * vcn_v4_0_stop - VCN stop 1590 * 1591 * @vinst: VCN instance 1592 * 1593 * Stop VCN block 1594 */ 1595 static int vcn_v4_0_stop(struct amdgpu_vcn_inst *vinst) 1596 { 1597 struct amdgpu_device *adev = vinst->adev; 1598 int i = vinst->inst; 1599 volatile struct amdgpu_vcn4_fw_shared *fw_shared; 1600 uint32_t tmp; 1601 int r = 0; 1602 1603 if (adev->vcn.harvest_config & (1 << i)) 1604 return 0; 1605 1606 fw_shared = adev->vcn.inst[i].fw_shared.cpu_addr; 1607 fw_shared->sq.queue_mode |= FW_QUEUE_DPG_HOLD_OFF; 1608 1609 if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) { 1610 vcn_v4_0_stop_dpg_mode(vinst); 1611 r = 0; 1612 goto done; 1613 } 1614 1615 /* wait for vcn idle */ 1616 r = SOC15_WAIT_ON_RREG(VCN, i, regUVD_STATUS, UVD_STATUS__IDLE, 0x7); 1617 if (r) 1618 goto done; 1619 1620 tmp = UVD_LMI_STATUS__VCPU_LMI_WRITE_CLEAN_MASK | 1621 UVD_LMI_STATUS__READ_CLEAN_MASK | 1622 UVD_LMI_STATUS__WRITE_CLEAN_MASK | 1623 UVD_LMI_STATUS__WRITE_CLEAN_RAW_MASK; 1624 r = SOC15_WAIT_ON_RREG(VCN, i, regUVD_LMI_STATUS, tmp, tmp); 1625 if (r) 1626 goto done; 1627 1628 /* disable LMI UMC channel */ 1629 tmp = RREG32_SOC15(VCN, i, regUVD_LMI_CTRL2); 1630 tmp |= UVD_LMI_CTRL2__STALL_ARB_UMC_MASK; 1631 WREG32_SOC15(VCN, i, regUVD_LMI_CTRL2, tmp); 1632 tmp = UVD_LMI_STATUS__UMC_READ_CLEAN_RAW_MASK | 1633 UVD_LMI_STATUS__UMC_WRITE_CLEAN_RAW_MASK; 1634 r = SOC15_WAIT_ON_RREG(VCN, i, regUVD_LMI_STATUS, tmp, tmp); 1635 if (r) 1636 goto done; 1637 1638 /* block VCPU register access */ 1639 WREG32_P(SOC15_REG_OFFSET(VCN, i, regUVD_RB_ARB_CTRL), 1640 UVD_RB_ARB_CTRL__VCPU_DIS_MASK, 1641 ~UVD_RB_ARB_CTRL__VCPU_DIS_MASK); 1642 1643 /* reset VCPU */ 1644 WREG32_P(SOC15_REG_OFFSET(VCN, i, regUVD_VCPU_CNTL), 1645 UVD_VCPU_CNTL__BLK_RST_MASK, 1646 ~UVD_VCPU_CNTL__BLK_RST_MASK); 1647 1648 /* disable VCPU clock */ 1649 WREG32_P(SOC15_REG_OFFSET(VCN, i, regUVD_VCPU_CNTL), 0, 1650 ~(UVD_VCPU_CNTL__CLK_EN_MASK)); 1651 1652 /* apply soft reset */ 1653 tmp = RREG32_SOC15(VCN, i, regUVD_SOFT_RESET); 1654 tmp |= UVD_SOFT_RESET__LMI_UMC_SOFT_RESET_MASK; 1655 WREG32_SOC15(VCN, i, regUVD_SOFT_RESET, tmp); 1656 tmp = RREG32_SOC15(VCN, i, regUVD_SOFT_RESET); 1657 tmp |= UVD_SOFT_RESET__LMI_SOFT_RESET_MASK; 1658 WREG32_SOC15(VCN, i, regUVD_SOFT_RESET, tmp); 1659 1660 /* clear status */ 1661 WREG32_SOC15(VCN, i, regUVD_STATUS, 0); 1662 1663 /* apply HW clock gating */ 1664 vcn_v4_0_enable_clock_gating(vinst); 1665 1666 /* enable VCN power gating */ 1667 vcn_v4_0_enable_static_power_gating(vinst); 1668 1669 done: 1670 if (adev->pm.dpm_enabled) 1671 amdgpu_dpm_enable_vcn(adev, false, i); 1672 1673 return 0; 1674 } 1675 1676 /** 1677 * vcn_v4_0_pause_dpg_mode - VCN pause with dpg mode 1678 * 1679 * @vinst: VCN instance 1680 * @new_state: pause state 1681 * 1682 * Pause dpg mode for VCN block 1683 */ 1684 static int vcn_v4_0_pause_dpg_mode(struct amdgpu_vcn_inst *vinst, 1685 struct dpg_pause_state *new_state) 1686 { 1687 struct amdgpu_device *adev = vinst->adev; 1688 int inst_idx = vinst->inst; 1689 uint32_t reg_data = 0; 1690 int ret_code; 1691 1692 /* pause/unpause if state is changed */ 1693 if (adev->vcn.inst[inst_idx].pause_state.fw_based != new_state->fw_based) { 1694 DRM_DEV_DEBUG(adev->dev, "dpg pause state changed %d -> %d", 1695 adev->vcn.inst[inst_idx].pause_state.fw_based, new_state->fw_based); 1696 reg_data = RREG32_SOC15(VCN, inst_idx, regUVD_DPG_PAUSE) & 1697 (~UVD_DPG_PAUSE__NJ_PAUSE_DPG_ACK_MASK); 1698 1699 if (new_state->fw_based == VCN_DPG_STATE__PAUSE) { 1700 ret_code = SOC15_WAIT_ON_RREG(VCN, inst_idx, regUVD_POWER_STATUS, 0x1, 1701 UVD_POWER_STATUS__UVD_POWER_STATUS_MASK); 1702 1703 if (!ret_code) { 1704 /* pause DPG */ 1705 reg_data |= UVD_DPG_PAUSE__NJ_PAUSE_DPG_REQ_MASK; 1706 WREG32_SOC15(VCN, inst_idx, regUVD_DPG_PAUSE, reg_data); 1707 1708 /* wait for ACK */ 1709 SOC15_WAIT_ON_RREG(VCN, inst_idx, regUVD_DPG_PAUSE, 1710 UVD_DPG_PAUSE__NJ_PAUSE_DPG_ACK_MASK, 1711 UVD_DPG_PAUSE__NJ_PAUSE_DPG_ACK_MASK); 1712 1713 SOC15_WAIT_ON_RREG(VCN, inst_idx, regUVD_POWER_STATUS, 1714 UVD_PGFSM_CONFIG__UVDM_UVDU_PWR_ON, UVD_POWER_STATUS__UVD_POWER_STATUS_MASK); 1715 } 1716 } else { 1717 /* unpause dpg, no need to wait */ 1718 reg_data &= ~UVD_DPG_PAUSE__NJ_PAUSE_DPG_REQ_MASK; 1719 WREG32_SOC15(VCN, inst_idx, regUVD_DPG_PAUSE, reg_data); 1720 } 1721 adev->vcn.inst[inst_idx].pause_state.fw_based = new_state->fw_based; 1722 } 1723 1724 return 0; 1725 } 1726 1727 /** 1728 * vcn_v4_0_unified_ring_get_rptr - get unified read pointer 1729 * 1730 * @ring: amdgpu_ring pointer 1731 * 1732 * Returns the current hardware unified read pointer 1733 */ 1734 static uint64_t vcn_v4_0_unified_ring_get_rptr(struct amdgpu_ring *ring) 1735 { 1736 struct amdgpu_device *adev = ring->adev; 1737 1738 if (ring != &adev->vcn.inst[ring->me].ring_enc[0]) 1739 DRM_ERROR("wrong ring id is identified in %s", __func__); 1740 1741 return RREG32_SOC15(VCN, ring->me, regUVD_RB_RPTR); 1742 } 1743 1744 /** 1745 * vcn_v4_0_unified_ring_get_wptr - get unified write pointer 1746 * 1747 * @ring: amdgpu_ring pointer 1748 * 1749 * Returns the current hardware unified write pointer 1750 */ 1751 static uint64_t vcn_v4_0_unified_ring_get_wptr(struct amdgpu_ring *ring) 1752 { 1753 struct amdgpu_device *adev = ring->adev; 1754 1755 if (ring != &adev->vcn.inst[ring->me].ring_enc[0]) 1756 DRM_ERROR("wrong ring id is identified in %s", __func__); 1757 1758 if (ring->use_doorbell) 1759 return *ring->wptr_cpu_addr; 1760 else 1761 return RREG32_SOC15(VCN, ring->me, regUVD_RB_WPTR); 1762 } 1763 1764 /** 1765 * vcn_v4_0_unified_ring_set_wptr - set enc write pointer 1766 * 1767 * @ring: amdgpu_ring pointer 1768 * 1769 * Commits the enc write pointer to the hardware 1770 */ 1771 static void vcn_v4_0_unified_ring_set_wptr(struct amdgpu_ring *ring) 1772 { 1773 struct amdgpu_device *adev = ring->adev; 1774 1775 if (ring != &adev->vcn.inst[ring->me].ring_enc[0]) 1776 DRM_ERROR("wrong ring id is identified in %s", __func__); 1777 1778 if (ring->use_doorbell) { 1779 *ring->wptr_cpu_addr = lower_32_bits(ring->wptr); 1780 WDOORBELL32(ring->doorbell_index, lower_32_bits(ring->wptr)); 1781 } else { 1782 WREG32_SOC15(VCN, ring->me, regUVD_RB_WPTR, lower_32_bits(ring->wptr)); 1783 } 1784 } 1785 1786 static int vcn_v4_0_limit_sched(struct amdgpu_cs_parser *p, 1787 struct amdgpu_job *job) 1788 { 1789 struct drm_gpu_scheduler **scheds; 1790 1791 /* The create msg must be in the first IB submitted */ 1792 if (atomic_read(&job->base.entity->fence_seq)) 1793 return -EINVAL; 1794 1795 /* if VCN0 is harvested, we can't support AV1 */ 1796 if (p->adev->vcn.harvest_config & AMDGPU_VCN_HARVEST_VCN0) 1797 return -EINVAL; 1798 1799 scheds = p->adev->gpu_sched[AMDGPU_HW_IP_VCN_ENC] 1800 [AMDGPU_RING_PRIO_0].sched; 1801 drm_sched_entity_modify_sched(job->base.entity, scheds, 1); 1802 return 0; 1803 } 1804 1805 static int vcn_v4_0_dec_msg(struct amdgpu_cs_parser *p, struct amdgpu_job *job, 1806 uint64_t addr) 1807 { 1808 struct ttm_operation_ctx ctx = { false, false }; 1809 struct amdgpu_bo_va_mapping *map; 1810 uint32_t *msg, num_buffers; 1811 struct amdgpu_bo *bo; 1812 uint64_t start, end; 1813 unsigned int i; 1814 void *ptr; 1815 int r; 1816 1817 addr &= AMDGPU_GMC_HOLE_MASK; 1818 r = amdgpu_cs_find_mapping(p, addr, &bo, &map); 1819 if (r) { 1820 DRM_ERROR("Can't find BO for addr 0x%08llx\n", addr); 1821 return r; 1822 } 1823 1824 start = map->start * AMDGPU_GPU_PAGE_SIZE; 1825 end = (map->last + 1) * AMDGPU_GPU_PAGE_SIZE; 1826 if (addr & 0x7) { 1827 DRM_ERROR("VCN messages must be 8 byte aligned!\n"); 1828 return -EINVAL; 1829 } 1830 1831 bo->flags |= AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED; 1832 amdgpu_bo_placement_from_domain(bo, bo->allowed_domains); 1833 r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx); 1834 if (r) { 1835 DRM_ERROR("Failed validating the VCN message BO (%d)!\n", r); 1836 return r; 1837 } 1838 1839 r = amdgpu_bo_kmap(bo, &ptr); 1840 if (r) { 1841 DRM_ERROR("Failed mapping the VCN message (%d)!\n", r); 1842 return r; 1843 } 1844 1845 msg = ptr + addr - start; 1846 1847 /* Check length */ 1848 if (msg[1] > end - addr) { 1849 r = -EINVAL; 1850 goto out; 1851 } 1852 1853 if (msg[3] != RDECODE_MSG_CREATE) 1854 goto out; 1855 1856 num_buffers = msg[2]; 1857 for (i = 0, msg = &msg[6]; i < num_buffers; ++i, msg += 4) { 1858 uint32_t offset, size, *create; 1859 1860 if (msg[0] != RDECODE_MESSAGE_CREATE) 1861 continue; 1862 1863 offset = msg[1]; 1864 size = msg[2]; 1865 1866 if (offset + size > end) { 1867 r = -EINVAL; 1868 goto out; 1869 } 1870 1871 create = ptr + addr + offset - start; 1872 1873 /* H264, HEVC and VP9 can run on any instance */ 1874 if (create[0] == 0x7 || create[0] == 0x10 || create[0] == 0x11) 1875 continue; 1876 1877 r = vcn_v4_0_limit_sched(p, job); 1878 if (r) 1879 goto out; 1880 } 1881 1882 out: 1883 amdgpu_bo_kunmap(bo); 1884 return r; 1885 } 1886 1887 #define RADEON_VCN_ENGINE_TYPE_ENCODE (0x00000002) 1888 #define RADEON_VCN_ENGINE_TYPE_DECODE (0x00000003) 1889 1890 #define RADEON_VCN_ENGINE_INFO (0x30000001) 1891 #define RADEON_VCN_ENGINE_INFO_MAX_OFFSET 16 1892 1893 #define RENCODE_ENCODE_STANDARD_AV1 2 1894 #define RENCODE_IB_PARAM_SESSION_INIT 0x00000003 1895 #define RENCODE_IB_PARAM_SESSION_INIT_MAX_OFFSET 64 1896 1897 /* return the offset in ib if id is found, -1 otherwise 1898 * to speed up the searching we only search upto max_offset 1899 */ 1900 static int vcn_v4_0_enc_find_ib_param(struct amdgpu_ib *ib, uint32_t id, int max_offset) 1901 { 1902 int i; 1903 1904 for (i = 0; i < ib->length_dw && i < max_offset && ib->ptr[i] >= 8; i += ib->ptr[i]/4) { 1905 if (ib->ptr[i + 1] == id) 1906 return i; 1907 } 1908 return -1; 1909 } 1910 1911 static int vcn_v4_0_ring_patch_cs_in_place(struct amdgpu_cs_parser *p, 1912 struct amdgpu_job *job, 1913 struct amdgpu_ib *ib) 1914 { 1915 struct amdgpu_ring *ring = amdgpu_job_ring(job); 1916 struct amdgpu_vcn_decode_buffer *decode_buffer; 1917 uint64_t addr; 1918 uint32_t val; 1919 int idx; 1920 1921 /* The first instance can decode anything */ 1922 if (!ring->me) 1923 return 0; 1924 1925 /* RADEON_VCN_ENGINE_INFO is at the top of ib block */ 1926 idx = vcn_v4_0_enc_find_ib_param(ib, RADEON_VCN_ENGINE_INFO, 1927 RADEON_VCN_ENGINE_INFO_MAX_OFFSET); 1928 if (idx < 0) /* engine info is missing */ 1929 return 0; 1930 1931 val = amdgpu_ib_get_value(ib, idx + 2); /* RADEON_VCN_ENGINE_TYPE */ 1932 if (val == RADEON_VCN_ENGINE_TYPE_DECODE) { 1933 decode_buffer = (struct amdgpu_vcn_decode_buffer *)&ib->ptr[idx + 6]; 1934 1935 if (!(decode_buffer->valid_buf_flag & 0x1)) 1936 return 0; 1937 1938 addr = ((u64)decode_buffer->msg_buffer_address_hi) << 32 | 1939 decode_buffer->msg_buffer_address_lo; 1940 return vcn_v4_0_dec_msg(p, job, addr); 1941 } else if (val == RADEON_VCN_ENGINE_TYPE_ENCODE) { 1942 idx = vcn_v4_0_enc_find_ib_param(ib, RENCODE_IB_PARAM_SESSION_INIT, 1943 RENCODE_IB_PARAM_SESSION_INIT_MAX_OFFSET); 1944 if (idx >= 0 && ib->ptr[idx + 2] == RENCODE_ENCODE_STANDARD_AV1) 1945 return vcn_v4_0_limit_sched(p, job); 1946 } 1947 return 0; 1948 } 1949 1950 static int vcn_v4_0_ring_reset(struct amdgpu_ring *ring, unsigned int vmid) 1951 { 1952 struct amdgpu_device *adev = ring->adev; 1953 struct amdgpu_vcn_inst *vinst = &adev->vcn.inst[ring->me]; 1954 1955 if (!(adev->vcn.supported_reset & AMDGPU_RESET_TYPE_PER_QUEUE)) 1956 return -EOPNOTSUPP; 1957 1958 vcn_v4_0_stop(vinst); 1959 vcn_v4_0_start(vinst); 1960 1961 return amdgpu_ring_test_helper(ring); 1962 } 1963 1964 static struct amdgpu_ring_funcs vcn_v4_0_unified_ring_vm_funcs = { 1965 .type = AMDGPU_RING_TYPE_VCN_ENC, 1966 .align_mask = 0x3f, 1967 .nop = VCN_ENC_CMD_NO_OP, 1968 .extra_dw = sizeof(struct amdgpu_vcn_rb_metadata), 1969 .get_rptr = vcn_v4_0_unified_ring_get_rptr, 1970 .get_wptr = vcn_v4_0_unified_ring_get_wptr, 1971 .set_wptr = vcn_v4_0_unified_ring_set_wptr, 1972 .patch_cs_in_place = vcn_v4_0_ring_patch_cs_in_place, 1973 .emit_frame_size = 1974 SOC15_FLUSH_GPU_TLB_NUM_WREG * 3 + 1975 SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 4 + 1976 4 + /* vcn_v2_0_enc_ring_emit_vm_flush */ 1977 5 + 5 + /* vcn_v2_0_enc_ring_emit_fence x2 vm fence */ 1978 1, /* vcn_v2_0_enc_ring_insert_end */ 1979 .emit_ib_size = 5, /* vcn_v2_0_enc_ring_emit_ib */ 1980 .emit_ib = vcn_v2_0_enc_ring_emit_ib, 1981 .emit_fence = vcn_v2_0_enc_ring_emit_fence, 1982 .emit_vm_flush = vcn_v2_0_enc_ring_emit_vm_flush, 1983 .test_ring = amdgpu_vcn_enc_ring_test_ring, 1984 .test_ib = amdgpu_vcn_unified_ring_test_ib, 1985 .insert_nop = amdgpu_ring_insert_nop, 1986 .insert_end = vcn_v2_0_enc_ring_insert_end, 1987 .pad_ib = amdgpu_ring_generic_pad_ib, 1988 .begin_use = amdgpu_vcn_ring_begin_use, 1989 .end_use = amdgpu_vcn_ring_end_use, 1990 .emit_wreg = vcn_v2_0_enc_ring_emit_wreg, 1991 .emit_reg_wait = vcn_v2_0_enc_ring_emit_reg_wait, 1992 .emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper, 1993 .reset = vcn_v4_0_ring_reset, 1994 }; 1995 1996 /** 1997 * vcn_v4_0_set_unified_ring_funcs - set unified ring functions 1998 * 1999 * @adev: amdgpu_device pointer 2000 * 2001 * Set unified ring functions 2002 */ 2003 static void vcn_v4_0_set_unified_ring_funcs(struct amdgpu_device *adev) 2004 { 2005 int i; 2006 2007 for (i = 0; i < adev->vcn.num_vcn_inst; ++i) { 2008 if (adev->vcn.harvest_config & (1 << i)) 2009 continue; 2010 2011 if (amdgpu_ip_version(adev, VCN_HWIP, 0) == IP_VERSION(4, 0, 2)) 2012 vcn_v4_0_unified_ring_vm_funcs.secure_submission_supported = true; 2013 2014 adev->vcn.inst[i].ring_enc[0].funcs = 2015 (const struct amdgpu_ring_funcs *)&vcn_v4_0_unified_ring_vm_funcs; 2016 adev->vcn.inst[i].ring_enc[0].me = i; 2017 } 2018 } 2019 2020 /** 2021 * vcn_v4_0_is_idle - check VCN block is idle 2022 * 2023 * @ip_block: Pointer to the amdgpu_ip_block structure 2024 * 2025 * Check whether VCN block is idle 2026 */ 2027 static bool vcn_v4_0_is_idle(struct amdgpu_ip_block *ip_block) 2028 { 2029 struct amdgpu_device *adev = ip_block->adev; 2030 int i, ret = 1; 2031 2032 for (i = 0; i < adev->vcn.num_vcn_inst; ++i) { 2033 if (adev->vcn.harvest_config & (1 << i)) 2034 continue; 2035 2036 ret &= (RREG32_SOC15(VCN, i, regUVD_STATUS) == UVD_STATUS__IDLE); 2037 } 2038 2039 return ret; 2040 } 2041 2042 /** 2043 * vcn_v4_0_wait_for_idle - wait for VCN block idle 2044 * 2045 * @ip_block: Pointer to the amdgpu_ip_block for this hw instance. 2046 * 2047 * Wait for VCN block idle 2048 */ 2049 static int vcn_v4_0_wait_for_idle(struct amdgpu_ip_block *ip_block) 2050 { 2051 struct amdgpu_device *adev = ip_block->adev; 2052 int i, ret = 0; 2053 2054 for (i = 0; i < adev->vcn.num_vcn_inst; ++i) { 2055 if (adev->vcn.harvest_config & (1 << i)) 2056 continue; 2057 2058 ret = SOC15_WAIT_ON_RREG(VCN, i, regUVD_STATUS, UVD_STATUS__IDLE, 2059 UVD_STATUS__IDLE); 2060 if (ret) 2061 return ret; 2062 } 2063 2064 return ret; 2065 } 2066 2067 /** 2068 * vcn_v4_0_set_clockgating_state - set VCN block clockgating state 2069 * 2070 * @ip_block: amdgpu_ip_block pointer 2071 * @state: clock gating state 2072 * 2073 * Set VCN block clockgating state 2074 */ 2075 static int vcn_v4_0_set_clockgating_state(struct amdgpu_ip_block *ip_block, 2076 enum amd_clockgating_state state) 2077 { 2078 struct amdgpu_device *adev = ip_block->adev; 2079 bool enable = state == AMD_CG_STATE_GATE; 2080 int i; 2081 2082 for (i = 0; i < adev->vcn.num_vcn_inst; ++i) { 2083 struct amdgpu_vcn_inst *vinst = &adev->vcn.inst[i]; 2084 2085 if (adev->vcn.harvest_config & (1 << i)) 2086 continue; 2087 2088 if (enable) { 2089 if (RREG32_SOC15(VCN, i, regUVD_STATUS) != UVD_STATUS__IDLE) 2090 return -EBUSY; 2091 vcn_v4_0_enable_clock_gating(vinst); 2092 } else { 2093 vcn_v4_0_disable_clock_gating(vinst); 2094 } 2095 } 2096 2097 return 0; 2098 } 2099 2100 static int vcn_v4_0_set_pg_state(struct amdgpu_vcn_inst *vinst, 2101 enum amd_powergating_state state) 2102 { 2103 struct amdgpu_device *adev = vinst->adev; 2104 int ret = 0; 2105 2106 /* for SRIOV, guest should not control VCN Power-gating 2107 * MMSCH FW should control Power-gating and clock-gating 2108 * guest should avoid touching CGC and PG 2109 */ 2110 if (amdgpu_sriov_vf(adev)) { 2111 vinst->cur_state = AMD_PG_STATE_UNGATE; 2112 return 0; 2113 } 2114 2115 if (state == vinst->cur_state) 2116 return 0; 2117 2118 if (state == AMD_PG_STATE_GATE) 2119 ret = vcn_v4_0_stop(vinst); 2120 else 2121 ret = vcn_v4_0_start(vinst); 2122 2123 if (!ret) 2124 vinst->cur_state = state; 2125 2126 return ret; 2127 } 2128 2129 /** 2130 * vcn_v4_0_set_ras_interrupt_state - set VCN block RAS interrupt state 2131 * 2132 * @adev: amdgpu_device pointer 2133 * @source: interrupt sources 2134 * @type: interrupt types 2135 * @state: interrupt states 2136 * 2137 * Set VCN block RAS interrupt state 2138 */ 2139 static int vcn_v4_0_set_ras_interrupt_state(struct amdgpu_device *adev, 2140 struct amdgpu_irq_src *source, 2141 unsigned int type, 2142 enum amdgpu_interrupt_state state) 2143 { 2144 return 0; 2145 } 2146 2147 /** 2148 * vcn_v4_0_process_interrupt - process VCN block interrupt 2149 * 2150 * @adev: amdgpu_device pointer 2151 * @source: interrupt sources 2152 * @entry: interrupt entry from clients and sources 2153 * 2154 * Process VCN block interrupt 2155 */ 2156 static int vcn_v4_0_process_interrupt(struct amdgpu_device *adev, struct amdgpu_irq_src *source, 2157 struct amdgpu_iv_entry *entry) 2158 { 2159 uint32_t ip_instance; 2160 2161 if (amdgpu_sriov_is_vcn_rb_decouple(adev)) { 2162 ip_instance = entry->ring_id; 2163 } else { 2164 switch (entry->client_id) { 2165 case SOC15_IH_CLIENTID_VCN: 2166 ip_instance = 0; 2167 break; 2168 case SOC15_IH_CLIENTID_VCN1: 2169 ip_instance = 1; 2170 break; 2171 default: 2172 DRM_ERROR("Unhandled client id: %d\n", entry->client_id); 2173 return 0; 2174 } 2175 } 2176 2177 DRM_DEBUG("IH: VCN TRAP\n"); 2178 2179 switch (entry->src_id) { 2180 case VCN_4_0__SRCID__UVD_ENC_GENERAL_PURPOSE: 2181 amdgpu_fence_process(&adev->vcn.inst[ip_instance].ring_enc[0]); 2182 break; 2183 default: 2184 DRM_ERROR("Unhandled interrupt: %d %d\n", 2185 entry->src_id, entry->src_data[0]); 2186 break; 2187 } 2188 2189 return 0; 2190 } 2191 2192 static const struct amdgpu_irq_src_funcs vcn_v4_0_irq_funcs = { 2193 .process = vcn_v4_0_process_interrupt, 2194 }; 2195 2196 static const struct amdgpu_irq_src_funcs vcn_v4_0_ras_irq_funcs = { 2197 .set = vcn_v4_0_set_ras_interrupt_state, 2198 .process = amdgpu_vcn_process_poison_irq, 2199 }; 2200 2201 /** 2202 * vcn_v4_0_set_irq_funcs - set VCN block interrupt irq functions 2203 * 2204 * @adev: amdgpu_device pointer 2205 * 2206 * Set VCN block interrupt irq functions 2207 */ 2208 static void vcn_v4_0_set_irq_funcs(struct amdgpu_device *adev) 2209 { 2210 int i; 2211 2212 for (i = 0; i < adev->vcn.num_vcn_inst; ++i) { 2213 if (adev->vcn.harvest_config & (1 << i)) 2214 continue; 2215 2216 adev->vcn.inst[i].irq.num_types = adev->vcn.inst[i].num_enc_rings + 1; 2217 adev->vcn.inst[i].irq.funcs = &vcn_v4_0_irq_funcs; 2218 2219 adev->vcn.inst[i].ras_poison_irq.num_types = adev->vcn.inst[i].num_enc_rings + 1; 2220 adev->vcn.inst[i].ras_poison_irq.funcs = &vcn_v4_0_ras_irq_funcs; 2221 } 2222 } 2223 2224 static void vcn_v4_0_print_ip_state(struct amdgpu_ip_block *ip_block, struct drm_printer *p) 2225 { 2226 struct amdgpu_device *adev = ip_block->adev; 2227 int i, j; 2228 uint32_t reg_count = ARRAY_SIZE(vcn_reg_list_4_0); 2229 uint32_t inst_off, is_powered; 2230 2231 if (!adev->vcn.ip_dump) 2232 return; 2233 2234 drm_printf(p, "num_instances:%d\n", adev->vcn.num_vcn_inst); 2235 for (i = 0; i < adev->vcn.num_vcn_inst; i++) { 2236 if (adev->vcn.harvest_config & (1 << i)) { 2237 drm_printf(p, "\nHarvested Instance:VCN%d Skipping dump\n", i); 2238 continue; 2239 } 2240 2241 inst_off = i * reg_count; 2242 is_powered = (adev->vcn.ip_dump[inst_off] & 2243 UVD_POWER_STATUS__UVD_POWER_STATUS_MASK) != 1; 2244 2245 if (is_powered) { 2246 drm_printf(p, "\nActive Instance:VCN%d\n", i); 2247 for (j = 0; j < reg_count; j++) 2248 drm_printf(p, "%-50s \t 0x%08x\n", vcn_reg_list_4_0[j].reg_name, 2249 adev->vcn.ip_dump[inst_off + j]); 2250 } else { 2251 drm_printf(p, "\nInactive Instance:VCN%d\n", i); 2252 } 2253 } 2254 } 2255 2256 static void vcn_v4_0_dump_ip_state(struct amdgpu_ip_block *ip_block) 2257 { 2258 struct amdgpu_device *adev = ip_block->adev; 2259 int i, j; 2260 bool is_powered; 2261 uint32_t inst_off; 2262 uint32_t reg_count = ARRAY_SIZE(vcn_reg_list_4_0); 2263 2264 if (!adev->vcn.ip_dump) 2265 return; 2266 2267 for (i = 0; i < adev->vcn.num_vcn_inst; i++) { 2268 if (adev->vcn.harvest_config & (1 << i)) 2269 continue; 2270 2271 inst_off = i * reg_count; 2272 /* mmUVD_POWER_STATUS is always readable and is first element of the array */ 2273 adev->vcn.ip_dump[inst_off] = RREG32_SOC15(VCN, i, regUVD_POWER_STATUS); 2274 is_powered = (adev->vcn.ip_dump[inst_off] & 2275 UVD_POWER_STATUS__UVD_POWER_STATUS_MASK) != 1; 2276 2277 if (is_powered) 2278 for (j = 1; j < reg_count; j++) 2279 adev->vcn.ip_dump[inst_off + j] = 2280 RREG32(SOC15_REG_ENTRY_OFFSET_INST(vcn_reg_list_4_0[j], 2281 i)); 2282 } 2283 } 2284 2285 static const struct amd_ip_funcs vcn_v4_0_ip_funcs = { 2286 .name = "vcn_v4_0", 2287 .early_init = vcn_v4_0_early_init, 2288 .sw_init = vcn_v4_0_sw_init, 2289 .sw_fini = vcn_v4_0_sw_fini, 2290 .hw_init = vcn_v4_0_hw_init, 2291 .hw_fini = vcn_v4_0_hw_fini, 2292 .suspend = vcn_v4_0_suspend, 2293 .resume = vcn_v4_0_resume, 2294 .is_idle = vcn_v4_0_is_idle, 2295 .wait_for_idle = vcn_v4_0_wait_for_idle, 2296 .set_clockgating_state = vcn_v4_0_set_clockgating_state, 2297 .set_powergating_state = vcn_set_powergating_state, 2298 .dump_ip_state = vcn_v4_0_dump_ip_state, 2299 .print_ip_state = vcn_v4_0_print_ip_state, 2300 }; 2301 2302 const struct amdgpu_ip_block_version vcn_v4_0_ip_block = { 2303 .type = AMD_IP_BLOCK_TYPE_VCN, 2304 .major = 4, 2305 .minor = 0, 2306 .rev = 0, 2307 .funcs = &vcn_v4_0_ip_funcs, 2308 }; 2309 2310 static uint32_t vcn_v4_0_query_poison_by_instance(struct amdgpu_device *adev, 2311 uint32_t instance, uint32_t sub_block) 2312 { 2313 uint32_t poison_stat = 0, reg_value = 0; 2314 2315 switch (sub_block) { 2316 case AMDGPU_VCN_V4_0_VCPU_VCODEC: 2317 reg_value = RREG32_SOC15(VCN, instance, regUVD_RAS_VCPU_VCODEC_STATUS); 2318 poison_stat = REG_GET_FIELD(reg_value, UVD_RAS_VCPU_VCODEC_STATUS, POISONED_PF); 2319 break; 2320 default: 2321 break; 2322 } 2323 2324 if (poison_stat) 2325 dev_info(adev->dev, "Poison detected in VCN%d, sub_block%d\n", 2326 instance, sub_block); 2327 2328 return poison_stat; 2329 } 2330 2331 static bool vcn_v4_0_query_ras_poison_status(struct amdgpu_device *adev) 2332 { 2333 uint32_t inst, sub; 2334 uint32_t poison_stat = 0; 2335 2336 for (inst = 0; inst < adev->vcn.num_vcn_inst; inst++) 2337 for (sub = 0; sub < AMDGPU_VCN_V4_0_MAX_SUB_BLOCK; sub++) 2338 poison_stat += 2339 vcn_v4_0_query_poison_by_instance(adev, inst, sub); 2340 2341 return !!poison_stat; 2342 } 2343 2344 const struct amdgpu_ras_block_hw_ops vcn_v4_0_ras_hw_ops = { 2345 .query_poison_status = vcn_v4_0_query_ras_poison_status, 2346 }; 2347 2348 static struct amdgpu_vcn_ras vcn_v4_0_ras = { 2349 .ras_block = { 2350 .hw_ops = &vcn_v4_0_ras_hw_ops, 2351 .ras_late_init = amdgpu_vcn_ras_late_init, 2352 }, 2353 }; 2354 2355 static void vcn_v4_0_set_ras_funcs(struct amdgpu_device *adev) 2356 { 2357 switch (amdgpu_ip_version(adev, VCN_HWIP, 0)) { 2358 case IP_VERSION(4, 0, 0): 2359 adev->vcn.ras = &vcn_v4_0_ras; 2360 break; 2361 default: 2362 break; 2363 } 2364 } 2365