1 /* 2 * Copyright 2019 Advanced Micro Devices, Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 */ 23 #include <linux/firmware.h> 24 #include <linux/slab.h> 25 #include <linux/module.h> 26 #include <linux/pci.h> 27 28 #include "amdgpu.h" 29 #include "amdgpu_atombios.h" 30 #include "amdgpu_ih.h" 31 #include "amdgpu_uvd.h" 32 #include "amdgpu_vce.h" 33 #include "amdgpu_ucode.h" 34 #include "amdgpu_psp.h" 35 #include "amdgpu_smu.h" 36 #include "atom.h" 37 #include "amd_pcie.h" 38 39 #include "gc/gc_10_1_0_offset.h" 40 #include "gc/gc_10_1_0_sh_mask.h" 41 #include "hdp/hdp_5_0_0_offset.h" 42 #include "hdp/hdp_5_0_0_sh_mask.h" 43 #include "smuio/smuio_11_0_0_offset.h" 44 #include "mp/mp_11_0_offset.h" 45 46 #include "soc15.h" 47 #include "soc15_common.h" 48 #include "gmc_v10_0.h" 49 #include "gfxhub_v2_0.h" 50 #include "mmhub_v2_0.h" 51 #include "nbio_v2_3.h" 52 #include "nv.h" 53 #include "navi10_ih.h" 54 #include "gfx_v10_0.h" 55 #include "sdma_v5_0.h" 56 #include "sdma_v5_2.h" 57 #include "vcn_v2_0.h" 58 #include "jpeg_v2_0.h" 59 #include "vcn_v3_0.h" 60 #include "jpeg_v3_0.h" 61 #include "dce_virtual.h" 62 #include "mes_v10_1.h" 63 #include "mxgpu_nv.h" 64 65 static const struct amd_ip_funcs nv_common_ip_funcs; 66 67 /* 68 * Indirect registers accessor 69 */ 70 static u32 nv_pcie_rreg(struct amdgpu_device *adev, u32 reg) 71 { 72 unsigned long flags, address, data; 73 u32 r; 74 address = adev->nbio.funcs->get_pcie_index_offset(adev); 75 data = adev->nbio.funcs->get_pcie_data_offset(adev); 76 77 spin_lock_irqsave(&adev->pcie_idx_lock, flags); 78 WREG32(address, reg); 79 (void)RREG32(address); 80 r = RREG32(data); 81 spin_unlock_irqrestore(&adev->pcie_idx_lock, flags); 82 return r; 83 } 84 85 static void nv_pcie_wreg(struct amdgpu_device *adev, u32 reg, u32 v) 86 { 87 unsigned long flags, address, data; 88 89 address = adev->nbio.funcs->get_pcie_index_offset(adev); 90 data = adev->nbio.funcs->get_pcie_data_offset(adev); 91 92 spin_lock_irqsave(&adev->pcie_idx_lock, flags); 93 WREG32(address, reg); 94 (void)RREG32(address); 95 WREG32(data, v); 96 (void)RREG32(data); 97 spin_unlock_irqrestore(&adev->pcie_idx_lock, flags); 98 } 99 100 static u32 nv_didt_rreg(struct amdgpu_device *adev, u32 reg) 101 { 102 unsigned long flags, address, data; 103 u32 r; 104 105 address = SOC15_REG_OFFSET(GC, 0, mmDIDT_IND_INDEX); 106 data = SOC15_REG_OFFSET(GC, 0, mmDIDT_IND_DATA); 107 108 spin_lock_irqsave(&adev->didt_idx_lock, flags); 109 WREG32(address, (reg)); 110 r = RREG32(data); 111 spin_unlock_irqrestore(&adev->didt_idx_lock, flags); 112 return r; 113 } 114 115 static void nv_didt_wreg(struct amdgpu_device *adev, u32 reg, u32 v) 116 { 117 unsigned long flags, address, data; 118 119 address = SOC15_REG_OFFSET(GC, 0, mmDIDT_IND_INDEX); 120 data = SOC15_REG_OFFSET(GC, 0, mmDIDT_IND_DATA); 121 122 spin_lock_irqsave(&adev->didt_idx_lock, flags); 123 WREG32(address, (reg)); 124 WREG32(data, (v)); 125 spin_unlock_irqrestore(&adev->didt_idx_lock, flags); 126 } 127 128 static u32 nv_get_config_memsize(struct amdgpu_device *adev) 129 { 130 return adev->nbio.funcs->get_memsize(adev); 131 } 132 133 static u32 nv_get_xclk(struct amdgpu_device *adev) 134 { 135 return adev->clock.spll.reference_freq; 136 } 137 138 139 void nv_grbm_select(struct amdgpu_device *adev, 140 u32 me, u32 pipe, u32 queue, u32 vmid) 141 { 142 u32 grbm_gfx_cntl = 0; 143 grbm_gfx_cntl = REG_SET_FIELD(grbm_gfx_cntl, GRBM_GFX_CNTL, PIPEID, pipe); 144 grbm_gfx_cntl = REG_SET_FIELD(grbm_gfx_cntl, GRBM_GFX_CNTL, MEID, me); 145 grbm_gfx_cntl = REG_SET_FIELD(grbm_gfx_cntl, GRBM_GFX_CNTL, VMID, vmid); 146 grbm_gfx_cntl = REG_SET_FIELD(grbm_gfx_cntl, GRBM_GFX_CNTL, QUEUEID, queue); 147 148 WREG32(SOC15_REG_OFFSET(GC, 0, mmGRBM_GFX_CNTL), grbm_gfx_cntl); 149 } 150 151 static void nv_vga_set_state(struct amdgpu_device *adev, bool state) 152 { 153 /* todo */ 154 } 155 156 static bool nv_read_disabled_bios(struct amdgpu_device *adev) 157 { 158 /* todo */ 159 return false; 160 } 161 162 static bool nv_read_bios_from_rom(struct amdgpu_device *adev, 163 u8 *bios, u32 length_bytes) 164 { 165 u32 *dw_ptr; 166 u32 i, length_dw; 167 168 if (bios == NULL) 169 return false; 170 if (length_bytes == 0) 171 return false; 172 /* APU vbios image is part of sbios image */ 173 if (adev->flags & AMD_IS_APU) 174 return false; 175 176 dw_ptr = (u32 *)bios; 177 length_dw = ALIGN(length_bytes, 4) / 4; 178 179 /* set rom index to 0 */ 180 WREG32(SOC15_REG_OFFSET(SMUIO, 0, mmROM_INDEX), 0); 181 /* read out the rom data */ 182 for (i = 0; i < length_dw; i++) 183 dw_ptr[i] = RREG32(SOC15_REG_OFFSET(SMUIO, 0, mmROM_DATA)); 184 185 return true; 186 } 187 188 static struct soc15_allowed_register_entry nv_allowed_read_registers[] = { 189 { SOC15_REG_ENTRY(GC, 0, mmGRBM_STATUS)}, 190 { SOC15_REG_ENTRY(GC, 0, mmGRBM_STATUS2)}, 191 { SOC15_REG_ENTRY(GC, 0, mmGRBM_STATUS_SE0)}, 192 { SOC15_REG_ENTRY(GC, 0, mmGRBM_STATUS_SE1)}, 193 { SOC15_REG_ENTRY(GC, 0, mmGRBM_STATUS_SE2)}, 194 { SOC15_REG_ENTRY(GC, 0, mmGRBM_STATUS_SE3)}, 195 { SOC15_REG_ENTRY(SDMA0, 0, mmSDMA0_STATUS_REG)}, 196 { SOC15_REG_ENTRY(SDMA1, 0, mmSDMA1_STATUS_REG)}, 197 { SOC15_REG_ENTRY(GC, 0, mmCP_STAT)}, 198 { SOC15_REG_ENTRY(GC, 0, mmCP_STALLED_STAT1)}, 199 { SOC15_REG_ENTRY(GC, 0, mmCP_STALLED_STAT2)}, 200 { SOC15_REG_ENTRY(GC, 0, mmCP_STALLED_STAT3)}, 201 { SOC15_REG_ENTRY(GC, 0, mmCP_CPF_BUSY_STAT)}, 202 { SOC15_REG_ENTRY(GC, 0, mmCP_CPF_STALLED_STAT1)}, 203 { SOC15_REG_ENTRY(GC, 0, mmCP_CPF_STATUS)}, 204 { SOC15_REG_ENTRY(GC, 0, mmCP_CPC_BUSY_STAT)}, 205 { SOC15_REG_ENTRY(GC, 0, mmCP_CPC_STALLED_STAT1)}, 206 { SOC15_REG_ENTRY(GC, 0, mmCP_CPC_STATUS)}, 207 { SOC15_REG_ENTRY(GC, 0, mmGB_ADDR_CONFIG)}, 208 }; 209 210 static uint32_t nv_read_indexed_register(struct amdgpu_device *adev, u32 se_num, 211 u32 sh_num, u32 reg_offset) 212 { 213 uint32_t val; 214 215 mutex_lock(&adev->grbm_idx_mutex); 216 if (se_num != 0xffffffff || sh_num != 0xffffffff) 217 amdgpu_gfx_select_se_sh(adev, se_num, sh_num, 0xffffffff); 218 219 val = RREG32(reg_offset); 220 221 if (se_num != 0xffffffff || sh_num != 0xffffffff) 222 amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff); 223 mutex_unlock(&adev->grbm_idx_mutex); 224 return val; 225 } 226 227 static uint32_t nv_get_register_value(struct amdgpu_device *adev, 228 bool indexed, u32 se_num, 229 u32 sh_num, u32 reg_offset) 230 { 231 if (indexed) { 232 return nv_read_indexed_register(adev, se_num, sh_num, reg_offset); 233 } else { 234 if (reg_offset == SOC15_REG_OFFSET(GC, 0, mmGB_ADDR_CONFIG)) 235 return adev->gfx.config.gb_addr_config; 236 return RREG32(reg_offset); 237 } 238 } 239 240 static int nv_read_register(struct amdgpu_device *adev, u32 se_num, 241 u32 sh_num, u32 reg_offset, u32 *value) 242 { 243 uint32_t i; 244 struct soc15_allowed_register_entry *en; 245 246 *value = 0; 247 for (i = 0; i < ARRAY_SIZE(nv_allowed_read_registers); i++) { 248 en = &nv_allowed_read_registers[i]; 249 if (reg_offset != 250 (adev->reg_offset[en->hwip][en->inst][en->seg] + en->reg_offset)) 251 continue; 252 253 *value = nv_get_register_value(adev, 254 nv_allowed_read_registers[i].grbm_indexed, 255 se_num, sh_num, reg_offset); 256 return 0; 257 } 258 return -EINVAL; 259 } 260 261 static int nv_asic_mode1_reset(struct amdgpu_device *adev) 262 { 263 u32 i; 264 int ret = 0; 265 266 amdgpu_atombios_scratch_regs_engine_hung(adev, true); 267 268 /* disable BM */ 269 pci_clear_master(adev->pdev); 270 271 pci_save_state(adev->pdev); 272 273 if (amdgpu_dpm_is_mode1_reset_supported(adev)) { 274 dev_info(adev->dev, "GPU smu mode1 reset\n"); 275 ret = amdgpu_dpm_mode1_reset(adev); 276 } else { 277 dev_info(adev->dev, "GPU psp mode1 reset\n"); 278 ret = psp_gpu_reset(adev); 279 } 280 281 if (ret) 282 dev_err(adev->dev, "GPU mode1 reset failed\n"); 283 pci_restore_state(adev->pdev); 284 285 /* wait for asic to come out of reset */ 286 for (i = 0; i < adev->usec_timeout; i++) { 287 u32 memsize = adev->nbio.funcs->get_memsize(adev); 288 289 if (memsize != 0xffffffff) 290 break; 291 udelay(1); 292 } 293 294 amdgpu_atombios_scratch_regs_engine_hung(adev, false); 295 296 return ret; 297 } 298 299 static bool nv_asic_supports_baco(struct amdgpu_device *adev) 300 { 301 struct smu_context *smu = &adev->smu; 302 303 if (smu_baco_is_support(smu)) 304 return true; 305 else 306 return false; 307 } 308 309 static enum amd_reset_method 310 nv_asic_reset_method(struct amdgpu_device *adev) 311 { 312 struct smu_context *smu = &adev->smu; 313 314 if (amdgpu_reset_method == AMD_RESET_METHOD_MODE1 || 315 amdgpu_reset_method == AMD_RESET_METHOD_BACO) 316 return amdgpu_reset_method; 317 318 if (amdgpu_reset_method != -1) 319 dev_warn(adev->dev, "Specified reset method:%d isn't supported, using AUTO instead.\n", 320 amdgpu_reset_method); 321 322 if (smu_baco_is_support(smu)) 323 return AMD_RESET_METHOD_BACO; 324 else 325 return AMD_RESET_METHOD_MODE1; 326 } 327 328 static int nv_asic_reset(struct amdgpu_device *adev) 329 { 330 int ret = 0; 331 struct smu_context *smu = &adev->smu; 332 333 if (nv_asic_reset_method(adev) == AMD_RESET_METHOD_BACO) { 334 dev_info(adev->dev, "GPU BACO reset\n"); 335 336 ret = smu_baco_enter(smu); 337 if (ret) 338 return ret; 339 ret = smu_baco_exit(smu); 340 if (ret) 341 return ret; 342 } else 343 ret = nv_asic_mode1_reset(adev); 344 345 return ret; 346 } 347 348 static int nv_set_uvd_clocks(struct amdgpu_device *adev, u32 vclk, u32 dclk) 349 { 350 /* todo */ 351 return 0; 352 } 353 354 static int nv_set_vce_clocks(struct amdgpu_device *adev, u32 evclk, u32 ecclk) 355 { 356 /* todo */ 357 return 0; 358 } 359 360 static void nv_pcie_gen3_enable(struct amdgpu_device *adev) 361 { 362 if (pci_is_root_bus(adev->pdev->bus)) 363 return; 364 365 if (amdgpu_pcie_gen2 == 0) 366 return; 367 368 if (!(adev->pm.pcie_gen_mask & (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2 | 369 CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3))) 370 return; 371 372 /* todo */ 373 } 374 375 static void nv_program_aspm(struct amdgpu_device *adev) 376 { 377 378 if (amdgpu_aspm == 0) 379 return; 380 381 /* todo */ 382 } 383 384 static void nv_enable_doorbell_aperture(struct amdgpu_device *adev, 385 bool enable) 386 { 387 adev->nbio.funcs->enable_doorbell_aperture(adev, enable); 388 adev->nbio.funcs->enable_doorbell_selfring_aperture(adev, enable); 389 } 390 391 static const struct amdgpu_ip_block_version nv_common_ip_block = 392 { 393 .type = AMD_IP_BLOCK_TYPE_COMMON, 394 .major = 1, 395 .minor = 0, 396 .rev = 0, 397 .funcs = &nv_common_ip_funcs, 398 }; 399 400 static int nv_reg_base_init(struct amdgpu_device *adev) 401 { 402 int r; 403 404 if (amdgpu_discovery) { 405 r = amdgpu_discovery_reg_base_init(adev); 406 if (r) { 407 DRM_WARN("failed to init reg base from ip discovery table, " 408 "fallback to legacy init method\n"); 409 goto legacy_init; 410 } 411 412 return 0; 413 } 414 415 legacy_init: 416 switch (adev->asic_type) { 417 case CHIP_NAVI10: 418 navi10_reg_base_init(adev); 419 break; 420 case CHIP_NAVI14: 421 navi14_reg_base_init(adev); 422 break; 423 case CHIP_NAVI12: 424 navi12_reg_base_init(adev); 425 break; 426 case CHIP_SIENNA_CICHLID: 427 case CHIP_NAVY_FLOUNDER: 428 sienna_cichlid_reg_base_init(adev); 429 break; 430 default: 431 return -EINVAL; 432 } 433 434 return 0; 435 } 436 437 void nv_set_virt_ops(struct amdgpu_device *adev) 438 { 439 adev->virt.ops = &xgpu_nv_virt_ops; 440 } 441 442 int nv_set_ip_blocks(struct amdgpu_device *adev) 443 { 444 int r; 445 446 adev->nbio.funcs = &nbio_v2_3_funcs; 447 adev->nbio.hdp_flush_reg = &nbio_v2_3_hdp_flush_reg; 448 449 /* Set IP register base before any HW register access */ 450 r = nv_reg_base_init(adev); 451 if (r) 452 return r; 453 454 switch (adev->asic_type) { 455 case CHIP_NAVI10: 456 case CHIP_NAVI14: 457 amdgpu_device_ip_block_add(adev, &nv_common_ip_block); 458 amdgpu_device_ip_block_add(adev, &gmc_v10_0_ip_block); 459 amdgpu_device_ip_block_add(adev, &navi10_ih_ip_block); 460 amdgpu_device_ip_block_add(adev, &psp_v11_0_ip_block); 461 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP && 462 !amdgpu_sriov_vf(adev)) 463 amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block); 464 if (adev->enable_virtual_display || amdgpu_sriov_vf(adev)) 465 amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block); 466 #if defined(CONFIG_DRM_AMD_DC) 467 else if (amdgpu_device_has_dc_support(adev)) 468 amdgpu_device_ip_block_add(adev, &dm_ip_block); 469 #endif 470 amdgpu_device_ip_block_add(adev, &gfx_v10_0_ip_block); 471 amdgpu_device_ip_block_add(adev, &sdma_v5_0_ip_block); 472 if (adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT && 473 !amdgpu_sriov_vf(adev)) 474 amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block); 475 amdgpu_device_ip_block_add(adev, &vcn_v2_0_ip_block); 476 amdgpu_device_ip_block_add(adev, &jpeg_v2_0_ip_block); 477 if (adev->enable_mes) 478 amdgpu_device_ip_block_add(adev, &mes_v10_1_ip_block); 479 break; 480 case CHIP_NAVI12: 481 amdgpu_device_ip_block_add(adev, &nv_common_ip_block); 482 amdgpu_device_ip_block_add(adev, &gmc_v10_0_ip_block); 483 amdgpu_device_ip_block_add(adev, &navi10_ih_ip_block); 484 amdgpu_device_ip_block_add(adev, &psp_v11_0_ip_block); 485 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) 486 amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block); 487 if (adev->enable_virtual_display || amdgpu_sriov_vf(adev)) 488 amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block); 489 #if defined(CONFIG_DRM_AMD_DC) 490 else if (amdgpu_device_has_dc_support(adev)) 491 amdgpu_device_ip_block_add(adev, &dm_ip_block); 492 #endif 493 amdgpu_device_ip_block_add(adev, &gfx_v10_0_ip_block); 494 amdgpu_device_ip_block_add(adev, &sdma_v5_0_ip_block); 495 if (adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT && 496 !amdgpu_sriov_vf(adev)) 497 amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block); 498 amdgpu_device_ip_block_add(adev, &vcn_v2_0_ip_block); 499 if (!amdgpu_sriov_vf(adev)) 500 amdgpu_device_ip_block_add(adev, &jpeg_v2_0_ip_block); 501 break; 502 case CHIP_SIENNA_CICHLID: 503 amdgpu_device_ip_block_add(adev, &nv_common_ip_block); 504 amdgpu_device_ip_block_add(adev, &gmc_v10_0_ip_block); 505 amdgpu_device_ip_block_add(adev, &navi10_ih_ip_block); 506 if (likely(adev->firmware.load_type == AMDGPU_FW_LOAD_PSP)) 507 amdgpu_device_ip_block_add(adev, &psp_v11_0_ip_block); 508 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP && 509 is_support_sw_smu(adev) && !amdgpu_sriov_vf(adev)) 510 amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block); 511 if (adev->enable_virtual_display || amdgpu_sriov_vf(adev)) 512 amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block); 513 #if defined(CONFIG_DRM_AMD_DC) 514 else if (amdgpu_device_has_dc_support(adev)) 515 amdgpu_device_ip_block_add(adev, &dm_ip_block); 516 #endif 517 amdgpu_device_ip_block_add(adev, &gfx_v10_0_ip_block); 518 amdgpu_device_ip_block_add(adev, &sdma_v5_2_ip_block); 519 amdgpu_device_ip_block_add(adev, &vcn_v3_0_ip_block); 520 if (!amdgpu_sriov_vf(adev)) 521 amdgpu_device_ip_block_add(adev, &jpeg_v3_0_ip_block); 522 523 if (adev->enable_mes) 524 amdgpu_device_ip_block_add(adev, &mes_v10_1_ip_block); 525 break; 526 case CHIP_NAVY_FLOUNDER: 527 amdgpu_device_ip_block_add(adev, &nv_common_ip_block); 528 amdgpu_device_ip_block_add(adev, &gmc_v10_0_ip_block); 529 amdgpu_device_ip_block_add(adev, &navi10_ih_ip_block); 530 if (likely(adev->firmware.load_type == AMDGPU_FW_LOAD_PSP)) 531 amdgpu_device_ip_block_add(adev, &psp_v11_0_ip_block); 532 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP && 533 is_support_sw_smu(adev)) 534 amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block); 535 if (adev->enable_virtual_display || amdgpu_sriov_vf(adev)) 536 amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block); 537 #if defined(CONFIG_DRM_AMD_DC) 538 else if (amdgpu_device_has_dc_support(adev)) 539 amdgpu_device_ip_block_add(adev, &dm_ip_block); 540 #endif 541 amdgpu_device_ip_block_add(adev, &gfx_v10_0_ip_block); 542 amdgpu_device_ip_block_add(adev, &sdma_v5_2_ip_block); 543 amdgpu_device_ip_block_add(adev, &vcn_v3_0_ip_block); 544 amdgpu_device_ip_block_add(adev, &jpeg_v3_0_ip_block); 545 if (adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT && 546 is_support_sw_smu(adev)) 547 amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block); 548 break; 549 default: 550 return -EINVAL; 551 } 552 553 return 0; 554 } 555 556 static uint32_t nv_get_rev_id(struct amdgpu_device *adev) 557 { 558 return adev->nbio.funcs->get_rev_id(adev); 559 } 560 561 static void nv_flush_hdp(struct amdgpu_device *adev, struct amdgpu_ring *ring) 562 { 563 adev->nbio.funcs->hdp_flush(adev, ring); 564 } 565 566 static void nv_invalidate_hdp(struct amdgpu_device *adev, 567 struct amdgpu_ring *ring) 568 { 569 if (!ring || !ring->funcs->emit_wreg) { 570 WREG32_SOC15_NO_KIQ(NBIO, 0, mmHDP_READ_CACHE_INVALIDATE, 1); 571 } else { 572 amdgpu_ring_emit_wreg(ring, SOC15_REG_OFFSET( 573 HDP, 0, mmHDP_READ_CACHE_INVALIDATE), 1); 574 } 575 } 576 577 static bool nv_need_full_reset(struct amdgpu_device *adev) 578 { 579 return true; 580 } 581 582 static bool nv_need_reset_on_init(struct amdgpu_device *adev) 583 { 584 u32 sol_reg; 585 586 if (adev->flags & AMD_IS_APU) 587 return false; 588 589 /* Check sOS sign of life register to confirm sys driver and sOS 590 * are already been loaded. 591 */ 592 sol_reg = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_81); 593 if (sol_reg) 594 return true; 595 596 return false; 597 } 598 599 static uint64_t nv_get_pcie_replay_count(struct amdgpu_device *adev) 600 { 601 602 /* TODO 603 * dummy implement for pcie_replay_count sysfs interface 604 * */ 605 606 return 0; 607 } 608 609 static void nv_init_doorbell_index(struct amdgpu_device *adev) 610 { 611 adev->doorbell_index.kiq = AMDGPU_NAVI10_DOORBELL_KIQ; 612 adev->doorbell_index.mec_ring0 = AMDGPU_NAVI10_DOORBELL_MEC_RING0; 613 adev->doorbell_index.mec_ring1 = AMDGPU_NAVI10_DOORBELL_MEC_RING1; 614 adev->doorbell_index.mec_ring2 = AMDGPU_NAVI10_DOORBELL_MEC_RING2; 615 adev->doorbell_index.mec_ring3 = AMDGPU_NAVI10_DOORBELL_MEC_RING3; 616 adev->doorbell_index.mec_ring4 = AMDGPU_NAVI10_DOORBELL_MEC_RING4; 617 adev->doorbell_index.mec_ring5 = AMDGPU_NAVI10_DOORBELL_MEC_RING5; 618 adev->doorbell_index.mec_ring6 = AMDGPU_NAVI10_DOORBELL_MEC_RING6; 619 adev->doorbell_index.mec_ring7 = AMDGPU_NAVI10_DOORBELL_MEC_RING7; 620 adev->doorbell_index.userqueue_start = AMDGPU_NAVI10_DOORBELL_USERQUEUE_START; 621 adev->doorbell_index.userqueue_end = AMDGPU_NAVI10_DOORBELL_USERQUEUE_END; 622 adev->doorbell_index.gfx_ring0 = AMDGPU_NAVI10_DOORBELL_GFX_RING0; 623 adev->doorbell_index.gfx_ring1 = AMDGPU_NAVI10_DOORBELL_GFX_RING1; 624 adev->doorbell_index.mes_ring = AMDGPU_NAVI10_DOORBELL_MES_RING; 625 adev->doorbell_index.sdma_engine[0] = AMDGPU_NAVI10_DOORBELL_sDMA_ENGINE0; 626 adev->doorbell_index.sdma_engine[1] = AMDGPU_NAVI10_DOORBELL_sDMA_ENGINE1; 627 adev->doorbell_index.sdma_engine[2] = AMDGPU_NAVI10_DOORBELL_sDMA_ENGINE2; 628 adev->doorbell_index.sdma_engine[3] = AMDGPU_NAVI10_DOORBELL_sDMA_ENGINE3; 629 adev->doorbell_index.ih = AMDGPU_NAVI10_DOORBELL_IH; 630 adev->doorbell_index.vcn.vcn_ring0_1 = AMDGPU_NAVI10_DOORBELL64_VCN0_1; 631 adev->doorbell_index.vcn.vcn_ring2_3 = AMDGPU_NAVI10_DOORBELL64_VCN2_3; 632 adev->doorbell_index.vcn.vcn_ring4_5 = AMDGPU_NAVI10_DOORBELL64_VCN4_5; 633 adev->doorbell_index.vcn.vcn_ring6_7 = AMDGPU_NAVI10_DOORBELL64_VCN6_7; 634 adev->doorbell_index.first_non_cp = AMDGPU_NAVI10_DOORBELL64_FIRST_NON_CP; 635 adev->doorbell_index.last_non_cp = AMDGPU_NAVI10_DOORBELL64_LAST_NON_CP; 636 637 adev->doorbell_index.max_assignment = AMDGPU_NAVI10_DOORBELL_MAX_ASSIGNMENT << 1; 638 adev->doorbell_index.sdma_doorbell_range = 20; 639 } 640 641 static const struct amdgpu_asic_funcs nv_asic_funcs = 642 { 643 .read_disabled_bios = &nv_read_disabled_bios, 644 .read_bios_from_rom = &nv_read_bios_from_rom, 645 .read_register = &nv_read_register, 646 .reset = &nv_asic_reset, 647 .reset_method = &nv_asic_reset_method, 648 .set_vga_state = &nv_vga_set_state, 649 .get_xclk = &nv_get_xclk, 650 .set_uvd_clocks = &nv_set_uvd_clocks, 651 .set_vce_clocks = &nv_set_vce_clocks, 652 .get_config_memsize = &nv_get_config_memsize, 653 .flush_hdp = &nv_flush_hdp, 654 .invalidate_hdp = &nv_invalidate_hdp, 655 .init_doorbell_index = &nv_init_doorbell_index, 656 .need_full_reset = &nv_need_full_reset, 657 .need_reset_on_init = &nv_need_reset_on_init, 658 .get_pcie_replay_count = &nv_get_pcie_replay_count, 659 .supports_baco = &nv_asic_supports_baco, 660 }; 661 662 static int nv_common_early_init(void *handle) 663 { 664 #define MMIO_REG_HOLE_OFFSET (0x80000 - PAGE_SIZE) 665 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 666 667 adev->rmmio_remap.reg_offset = MMIO_REG_HOLE_OFFSET; 668 adev->rmmio_remap.bus_addr = adev->rmmio_base + MMIO_REG_HOLE_OFFSET; 669 adev->smc_rreg = NULL; 670 adev->smc_wreg = NULL; 671 adev->pcie_rreg = &nv_pcie_rreg; 672 adev->pcie_wreg = &nv_pcie_wreg; 673 674 /* TODO: will add them during VCN v2 implementation */ 675 adev->uvd_ctx_rreg = NULL; 676 adev->uvd_ctx_wreg = NULL; 677 678 adev->didt_rreg = &nv_didt_rreg; 679 adev->didt_wreg = &nv_didt_wreg; 680 681 adev->asic_funcs = &nv_asic_funcs; 682 683 adev->rev_id = nv_get_rev_id(adev); 684 adev->external_rev_id = 0xff; 685 switch (adev->asic_type) { 686 case CHIP_NAVI10: 687 adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG | 688 AMD_CG_SUPPORT_GFX_CGCG | 689 AMD_CG_SUPPORT_IH_CG | 690 AMD_CG_SUPPORT_HDP_MGCG | 691 AMD_CG_SUPPORT_HDP_LS | 692 AMD_CG_SUPPORT_SDMA_MGCG | 693 AMD_CG_SUPPORT_SDMA_LS | 694 AMD_CG_SUPPORT_MC_MGCG | 695 AMD_CG_SUPPORT_MC_LS | 696 AMD_CG_SUPPORT_ATHUB_MGCG | 697 AMD_CG_SUPPORT_ATHUB_LS | 698 AMD_CG_SUPPORT_VCN_MGCG | 699 AMD_CG_SUPPORT_JPEG_MGCG | 700 AMD_CG_SUPPORT_BIF_MGCG | 701 AMD_CG_SUPPORT_BIF_LS; 702 adev->pg_flags = AMD_PG_SUPPORT_VCN | 703 AMD_PG_SUPPORT_VCN_DPG | 704 AMD_PG_SUPPORT_JPEG | 705 AMD_PG_SUPPORT_ATHUB; 706 adev->external_rev_id = adev->rev_id + 0x1; 707 break; 708 case CHIP_NAVI14: 709 adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG | 710 AMD_CG_SUPPORT_GFX_CGCG | 711 AMD_CG_SUPPORT_IH_CG | 712 AMD_CG_SUPPORT_HDP_MGCG | 713 AMD_CG_SUPPORT_HDP_LS | 714 AMD_CG_SUPPORT_SDMA_MGCG | 715 AMD_CG_SUPPORT_SDMA_LS | 716 AMD_CG_SUPPORT_MC_MGCG | 717 AMD_CG_SUPPORT_MC_LS | 718 AMD_CG_SUPPORT_ATHUB_MGCG | 719 AMD_CG_SUPPORT_ATHUB_LS | 720 AMD_CG_SUPPORT_VCN_MGCG | 721 AMD_CG_SUPPORT_JPEG_MGCG | 722 AMD_CG_SUPPORT_BIF_MGCG | 723 AMD_CG_SUPPORT_BIF_LS; 724 adev->pg_flags = AMD_PG_SUPPORT_VCN | 725 AMD_PG_SUPPORT_JPEG | 726 AMD_PG_SUPPORT_VCN_DPG; 727 adev->external_rev_id = adev->rev_id + 20; 728 break; 729 case CHIP_NAVI12: 730 adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG | 731 AMD_CG_SUPPORT_GFX_MGLS | 732 AMD_CG_SUPPORT_GFX_CGCG | 733 AMD_CG_SUPPORT_GFX_CP_LS | 734 AMD_CG_SUPPORT_GFX_RLC_LS | 735 AMD_CG_SUPPORT_IH_CG | 736 AMD_CG_SUPPORT_HDP_MGCG | 737 AMD_CG_SUPPORT_HDP_LS | 738 AMD_CG_SUPPORT_SDMA_MGCG | 739 AMD_CG_SUPPORT_SDMA_LS | 740 AMD_CG_SUPPORT_MC_MGCG | 741 AMD_CG_SUPPORT_MC_LS | 742 AMD_CG_SUPPORT_ATHUB_MGCG | 743 AMD_CG_SUPPORT_ATHUB_LS | 744 AMD_CG_SUPPORT_VCN_MGCG | 745 AMD_CG_SUPPORT_JPEG_MGCG; 746 adev->pg_flags = AMD_PG_SUPPORT_VCN | 747 AMD_PG_SUPPORT_VCN_DPG | 748 AMD_PG_SUPPORT_JPEG | 749 AMD_PG_SUPPORT_ATHUB; 750 /* guest vm gets 0xffffffff when reading RCC_DEV0_EPF0_STRAP0, 751 * as a consequence, the rev_id and external_rev_id are wrong. 752 * workaround it by hardcoding rev_id to 0 (default value). 753 */ 754 if (amdgpu_sriov_vf(adev)) 755 adev->rev_id = 0; 756 adev->external_rev_id = adev->rev_id + 0xa; 757 break; 758 case CHIP_SIENNA_CICHLID: 759 adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG | 760 AMD_CG_SUPPORT_GFX_CGCG | 761 AMD_CG_SUPPORT_GFX_3D_CGCG | 762 AMD_CG_SUPPORT_MC_MGCG | 763 AMD_CG_SUPPORT_VCN_MGCG | 764 AMD_CG_SUPPORT_JPEG_MGCG | 765 AMD_CG_SUPPORT_HDP_MGCG | 766 AMD_CG_SUPPORT_HDP_LS | 767 AMD_CG_SUPPORT_IH_CG | 768 AMD_CG_SUPPORT_MC_LS; 769 adev->pg_flags = AMD_PG_SUPPORT_VCN | 770 AMD_PG_SUPPORT_VCN_DPG | 771 AMD_PG_SUPPORT_JPEG | 772 AMD_PG_SUPPORT_ATHUB | 773 AMD_PG_SUPPORT_MMHUB; 774 if (amdgpu_sriov_vf(adev)) { 775 /* hypervisor control CG and PG enablement */ 776 adev->cg_flags = 0; 777 adev->pg_flags = 0; 778 } 779 adev->external_rev_id = adev->rev_id + 0x28; 780 break; 781 case CHIP_NAVY_FLOUNDER: 782 adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG | 783 AMD_CG_SUPPORT_GFX_CGCG | 784 AMD_CG_SUPPORT_GFX_3D_CGCG | 785 AMD_CG_SUPPORT_VCN_MGCG | 786 AMD_CG_SUPPORT_JPEG_MGCG | 787 AMD_CG_SUPPORT_MC_MGCG | 788 AMD_CG_SUPPORT_MC_LS | 789 AMD_CG_SUPPORT_HDP_MGCG | 790 AMD_CG_SUPPORT_HDP_LS | 791 AMD_CG_SUPPORT_IH_CG; 792 adev->pg_flags = AMD_PG_SUPPORT_VCN | 793 AMD_PG_SUPPORT_VCN_DPG | 794 AMD_PG_SUPPORT_JPEG | 795 AMD_PG_SUPPORT_ATHUB | 796 AMD_PG_SUPPORT_MMHUB; 797 adev->external_rev_id = adev->rev_id + 0x32; 798 break; 799 800 default: 801 /* FIXME: not supported yet */ 802 return -EINVAL; 803 } 804 805 if (amdgpu_sriov_vf(adev)) { 806 amdgpu_virt_init_setting(adev); 807 xgpu_nv_mailbox_set_irq_funcs(adev); 808 } 809 810 return 0; 811 } 812 813 static int nv_common_late_init(void *handle) 814 { 815 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 816 817 if (amdgpu_sriov_vf(adev)) 818 xgpu_nv_mailbox_get_irq(adev); 819 820 return 0; 821 } 822 823 static int nv_common_sw_init(void *handle) 824 { 825 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 826 827 if (amdgpu_sriov_vf(adev)) 828 xgpu_nv_mailbox_add_irq_id(adev); 829 830 return 0; 831 } 832 833 static int nv_common_sw_fini(void *handle) 834 { 835 return 0; 836 } 837 838 static int nv_common_hw_init(void *handle) 839 { 840 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 841 842 /* enable pcie gen2/3 link */ 843 nv_pcie_gen3_enable(adev); 844 /* enable aspm */ 845 nv_program_aspm(adev); 846 /* setup nbio registers */ 847 adev->nbio.funcs->init_registers(adev); 848 /* remap HDP registers to a hole in mmio space, 849 * for the purpose of expose those registers 850 * to process space 851 */ 852 if (adev->nbio.funcs->remap_hdp_registers) 853 adev->nbio.funcs->remap_hdp_registers(adev); 854 /* enable the doorbell aperture */ 855 nv_enable_doorbell_aperture(adev, true); 856 857 return 0; 858 } 859 860 static int nv_common_hw_fini(void *handle) 861 { 862 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 863 864 /* disable the doorbell aperture */ 865 nv_enable_doorbell_aperture(adev, false); 866 867 return 0; 868 } 869 870 static int nv_common_suspend(void *handle) 871 { 872 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 873 874 return nv_common_hw_fini(adev); 875 } 876 877 static int nv_common_resume(void *handle) 878 { 879 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 880 881 return nv_common_hw_init(adev); 882 } 883 884 static bool nv_common_is_idle(void *handle) 885 { 886 return true; 887 } 888 889 static int nv_common_wait_for_idle(void *handle) 890 { 891 return 0; 892 } 893 894 static int nv_common_soft_reset(void *handle) 895 { 896 return 0; 897 } 898 899 static void nv_update_hdp_mem_power_gating(struct amdgpu_device *adev, 900 bool enable) 901 { 902 uint32_t hdp_clk_cntl, hdp_clk_cntl1; 903 uint32_t hdp_mem_pwr_cntl; 904 905 if (!(adev->cg_flags & (AMD_CG_SUPPORT_HDP_LS | 906 AMD_CG_SUPPORT_HDP_DS | 907 AMD_CG_SUPPORT_HDP_SD))) 908 return; 909 910 hdp_clk_cntl = hdp_clk_cntl1 = RREG32_SOC15(HDP, 0, mmHDP_CLK_CNTL); 911 hdp_mem_pwr_cntl = RREG32_SOC15(HDP, 0, mmHDP_MEM_POWER_CTRL); 912 913 /* Before doing clock/power mode switch, 914 * forced on IPH & RC clock */ 915 hdp_clk_cntl = REG_SET_FIELD(hdp_clk_cntl, HDP_CLK_CNTL, 916 IPH_MEM_CLK_SOFT_OVERRIDE, 1); 917 hdp_clk_cntl = REG_SET_FIELD(hdp_clk_cntl, HDP_CLK_CNTL, 918 RC_MEM_CLK_SOFT_OVERRIDE, 1); 919 WREG32_SOC15(HDP, 0, mmHDP_CLK_CNTL, hdp_clk_cntl); 920 921 /* HDP 5.0 doesn't support dynamic power mode switch, 922 * disable clock and power gating before any changing */ 923 hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL, 924 IPH_MEM_POWER_CTRL_EN, 0); 925 hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL, 926 IPH_MEM_POWER_LS_EN, 0); 927 hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL, 928 IPH_MEM_POWER_DS_EN, 0); 929 hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL, 930 IPH_MEM_POWER_SD_EN, 0); 931 hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL, 932 RC_MEM_POWER_CTRL_EN, 0); 933 hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL, 934 RC_MEM_POWER_LS_EN, 0); 935 hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL, 936 RC_MEM_POWER_DS_EN, 0); 937 hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL, 938 RC_MEM_POWER_SD_EN, 0); 939 WREG32_SOC15(HDP, 0, mmHDP_MEM_POWER_CTRL, hdp_mem_pwr_cntl); 940 941 /* only one clock gating mode (LS/DS/SD) can be enabled */ 942 if (adev->cg_flags & AMD_CG_SUPPORT_HDP_LS) { 943 hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, 944 HDP_MEM_POWER_CTRL, 945 IPH_MEM_POWER_LS_EN, enable); 946 hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, 947 HDP_MEM_POWER_CTRL, 948 RC_MEM_POWER_LS_EN, enable); 949 } else if (adev->cg_flags & AMD_CG_SUPPORT_HDP_DS) { 950 hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, 951 HDP_MEM_POWER_CTRL, 952 IPH_MEM_POWER_DS_EN, enable); 953 hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, 954 HDP_MEM_POWER_CTRL, 955 RC_MEM_POWER_DS_EN, enable); 956 } else if (adev->cg_flags & AMD_CG_SUPPORT_HDP_SD) { 957 hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, 958 HDP_MEM_POWER_CTRL, 959 IPH_MEM_POWER_SD_EN, enable); 960 /* RC should not use shut down mode, fallback to ds */ 961 hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, 962 HDP_MEM_POWER_CTRL, 963 RC_MEM_POWER_DS_EN, enable); 964 } 965 966 /* confirmed that IPH_MEM_POWER_CTRL_EN and RC_MEM_POWER_CTRL_EN have to 967 * be set for SRAM LS/DS/SD */ 968 if (adev->cg_flags & (AMD_CG_SUPPORT_HDP_LS | AMD_CG_SUPPORT_HDP_DS | 969 AMD_CG_SUPPORT_HDP_SD)) { 970 hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL, 971 IPH_MEM_POWER_CTRL_EN, 1); 972 hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL, 973 RC_MEM_POWER_CTRL_EN, 1); 974 } 975 976 WREG32_SOC15(HDP, 0, mmHDP_MEM_POWER_CTRL, hdp_mem_pwr_cntl); 977 978 /* restore IPH & RC clock override after clock/power mode changing */ 979 WREG32_SOC15(HDP, 0, mmHDP_CLK_CNTL, hdp_clk_cntl1); 980 } 981 982 static void nv_update_hdp_clock_gating(struct amdgpu_device *adev, 983 bool enable) 984 { 985 uint32_t hdp_clk_cntl; 986 987 if (!(adev->cg_flags & AMD_CG_SUPPORT_HDP_MGCG)) 988 return; 989 990 hdp_clk_cntl = RREG32_SOC15(HDP, 0, mmHDP_CLK_CNTL); 991 992 if (enable) { 993 hdp_clk_cntl &= 994 ~(uint32_t) 995 (HDP_CLK_CNTL__IPH_MEM_CLK_SOFT_OVERRIDE_MASK | 996 HDP_CLK_CNTL__RC_MEM_CLK_SOFT_OVERRIDE_MASK | 997 HDP_CLK_CNTL__DBUS_CLK_SOFT_OVERRIDE_MASK | 998 HDP_CLK_CNTL__DYN_CLK_SOFT_OVERRIDE_MASK | 999 HDP_CLK_CNTL__XDP_REG_CLK_SOFT_OVERRIDE_MASK | 1000 HDP_CLK_CNTL__HDP_REG_CLK_SOFT_OVERRIDE_MASK); 1001 } else { 1002 hdp_clk_cntl |= HDP_CLK_CNTL__IPH_MEM_CLK_SOFT_OVERRIDE_MASK | 1003 HDP_CLK_CNTL__RC_MEM_CLK_SOFT_OVERRIDE_MASK | 1004 HDP_CLK_CNTL__DBUS_CLK_SOFT_OVERRIDE_MASK | 1005 HDP_CLK_CNTL__DYN_CLK_SOFT_OVERRIDE_MASK | 1006 HDP_CLK_CNTL__XDP_REG_CLK_SOFT_OVERRIDE_MASK | 1007 HDP_CLK_CNTL__HDP_REG_CLK_SOFT_OVERRIDE_MASK; 1008 } 1009 1010 WREG32_SOC15(HDP, 0, mmHDP_CLK_CNTL, hdp_clk_cntl); 1011 } 1012 1013 static int nv_common_set_clockgating_state(void *handle, 1014 enum amd_clockgating_state state) 1015 { 1016 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1017 1018 if (amdgpu_sriov_vf(adev)) 1019 return 0; 1020 1021 switch (adev->asic_type) { 1022 case CHIP_NAVI10: 1023 case CHIP_NAVI14: 1024 case CHIP_NAVI12: 1025 case CHIP_SIENNA_CICHLID: 1026 case CHIP_NAVY_FLOUNDER: 1027 adev->nbio.funcs->update_medium_grain_clock_gating(adev, 1028 state == AMD_CG_STATE_GATE); 1029 adev->nbio.funcs->update_medium_grain_light_sleep(adev, 1030 state == AMD_CG_STATE_GATE); 1031 nv_update_hdp_mem_power_gating(adev, 1032 state == AMD_CG_STATE_GATE); 1033 nv_update_hdp_clock_gating(adev, 1034 state == AMD_CG_STATE_GATE); 1035 break; 1036 default: 1037 break; 1038 } 1039 return 0; 1040 } 1041 1042 static int nv_common_set_powergating_state(void *handle, 1043 enum amd_powergating_state state) 1044 { 1045 /* TODO */ 1046 return 0; 1047 } 1048 1049 static void nv_common_get_clockgating_state(void *handle, u32 *flags) 1050 { 1051 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1052 uint32_t tmp; 1053 1054 if (amdgpu_sriov_vf(adev)) 1055 *flags = 0; 1056 1057 adev->nbio.funcs->get_clockgating_state(adev, flags); 1058 1059 /* AMD_CG_SUPPORT_HDP_MGCG */ 1060 tmp = RREG32_SOC15(HDP, 0, mmHDP_CLK_CNTL); 1061 if (!(tmp & (HDP_CLK_CNTL__IPH_MEM_CLK_SOFT_OVERRIDE_MASK | 1062 HDP_CLK_CNTL__RC_MEM_CLK_SOFT_OVERRIDE_MASK | 1063 HDP_CLK_CNTL__DBUS_CLK_SOFT_OVERRIDE_MASK | 1064 HDP_CLK_CNTL__DYN_CLK_SOFT_OVERRIDE_MASK | 1065 HDP_CLK_CNTL__XDP_REG_CLK_SOFT_OVERRIDE_MASK | 1066 HDP_CLK_CNTL__HDP_REG_CLK_SOFT_OVERRIDE_MASK))) 1067 *flags |= AMD_CG_SUPPORT_HDP_MGCG; 1068 1069 /* AMD_CG_SUPPORT_HDP_LS/DS/SD */ 1070 tmp = RREG32_SOC15(HDP, 0, mmHDP_MEM_POWER_CTRL); 1071 if (tmp & HDP_MEM_POWER_CTRL__IPH_MEM_POWER_LS_EN_MASK) 1072 *flags |= AMD_CG_SUPPORT_HDP_LS; 1073 else if (tmp & HDP_MEM_POWER_CTRL__IPH_MEM_POWER_DS_EN_MASK) 1074 *flags |= AMD_CG_SUPPORT_HDP_DS; 1075 else if (tmp & HDP_MEM_POWER_CTRL__IPH_MEM_POWER_SD_EN_MASK) 1076 *flags |= AMD_CG_SUPPORT_HDP_SD; 1077 1078 return; 1079 } 1080 1081 static const struct amd_ip_funcs nv_common_ip_funcs = { 1082 .name = "nv_common", 1083 .early_init = nv_common_early_init, 1084 .late_init = nv_common_late_init, 1085 .sw_init = nv_common_sw_init, 1086 .sw_fini = nv_common_sw_fini, 1087 .hw_init = nv_common_hw_init, 1088 .hw_fini = nv_common_hw_fini, 1089 .suspend = nv_common_suspend, 1090 .resume = nv_common_resume, 1091 .is_idle = nv_common_is_idle, 1092 .wait_for_idle = nv_common_wait_for_idle, 1093 .soft_reset = nv_common_soft_reset, 1094 .set_clockgating_state = nv_common_set_clockgating_state, 1095 .set_powergating_state = nv_common_set_powergating_state, 1096 .get_clockgating_state = nv_common_get_clockgating_state, 1097 }; 1098