1 /* 2 * Copyright 2019 Advanced Micro Devices, Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 */ 23 #include <linux/firmware.h> 24 #include <linux/slab.h> 25 #include <linux/module.h> 26 #include <linux/pci.h> 27 28 #include "amdgpu.h" 29 #include "amdgpu_atombios.h" 30 #include "amdgpu_ih.h" 31 #include "amdgpu_uvd.h" 32 #include "amdgpu_vce.h" 33 #include "amdgpu_ucode.h" 34 #include "amdgpu_psp.h" 35 #include "amdgpu_smu.h" 36 #include "atom.h" 37 #include "amd_pcie.h" 38 39 #include "gc/gc_10_1_0_offset.h" 40 #include "gc/gc_10_1_0_sh_mask.h" 41 #include "hdp/hdp_5_0_0_offset.h" 42 #include "hdp/hdp_5_0_0_sh_mask.h" 43 #include "smuio/smuio_11_0_0_offset.h" 44 #include "mp/mp_11_0_offset.h" 45 46 #include "soc15.h" 47 #include "soc15_common.h" 48 #include "gmc_v10_0.h" 49 #include "gfxhub_v2_0.h" 50 #include "mmhub_v2_0.h" 51 #include "nbio_v2_3.h" 52 #include "nv.h" 53 #include "navi10_ih.h" 54 #include "gfx_v10_0.h" 55 #include "sdma_v5_0.h" 56 #include "sdma_v5_2.h" 57 #include "vcn_v2_0.h" 58 #include "jpeg_v2_0.h" 59 #include "vcn_v3_0.h" 60 #include "jpeg_v3_0.h" 61 #include "dce_virtual.h" 62 #include "mes_v10_1.h" 63 #include "mxgpu_nv.h" 64 65 static const struct amd_ip_funcs nv_common_ip_funcs; 66 67 /* 68 * Indirect registers accessor 69 */ 70 static u32 nv_pcie_rreg(struct amdgpu_device *adev, u32 reg) 71 { 72 unsigned long flags, address, data; 73 u32 r; 74 address = adev->nbio.funcs->get_pcie_index_offset(adev); 75 data = adev->nbio.funcs->get_pcie_data_offset(adev); 76 77 spin_lock_irqsave(&adev->pcie_idx_lock, flags); 78 WREG32(address, reg); 79 (void)RREG32(address); 80 r = RREG32(data); 81 spin_unlock_irqrestore(&adev->pcie_idx_lock, flags); 82 return r; 83 } 84 85 static void nv_pcie_wreg(struct amdgpu_device *adev, u32 reg, u32 v) 86 { 87 unsigned long flags, address, data; 88 89 address = adev->nbio.funcs->get_pcie_index_offset(adev); 90 data = adev->nbio.funcs->get_pcie_data_offset(adev); 91 92 spin_lock_irqsave(&adev->pcie_idx_lock, flags); 93 WREG32(address, reg); 94 (void)RREG32(address); 95 WREG32(data, v); 96 (void)RREG32(data); 97 spin_unlock_irqrestore(&adev->pcie_idx_lock, flags); 98 } 99 100 static u32 nv_didt_rreg(struct amdgpu_device *adev, u32 reg) 101 { 102 unsigned long flags, address, data; 103 u32 r; 104 105 address = SOC15_REG_OFFSET(GC, 0, mmDIDT_IND_INDEX); 106 data = SOC15_REG_OFFSET(GC, 0, mmDIDT_IND_DATA); 107 108 spin_lock_irqsave(&adev->didt_idx_lock, flags); 109 WREG32(address, (reg)); 110 r = RREG32(data); 111 spin_unlock_irqrestore(&adev->didt_idx_lock, flags); 112 return r; 113 } 114 115 static void nv_didt_wreg(struct amdgpu_device *adev, u32 reg, u32 v) 116 { 117 unsigned long flags, address, data; 118 119 address = SOC15_REG_OFFSET(GC, 0, mmDIDT_IND_INDEX); 120 data = SOC15_REG_OFFSET(GC, 0, mmDIDT_IND_DATA); 121 122 spin_lock_irqsave(&adev->didt_idx_lock, flags); 123 WREG32(address, (reg)); 124 WREG32(data, (v)); 125 spin_unlock_irqrestore(&adev->didt_idx_lock, flags); 126 } 127 128 static u32 nv_get_config_memsize(struct amdgpu_device *adev) 129 { 130 return adev->nbio.funcs->get_memsize(adev); 131 } 132 133 static u32 nv_get_xclk(struct amdgpu_device *adev) 134 { 135 return adev->clock.spll.reference_freq; 136 } 137 138 139 void nv_grbm_select(struct amdgpu_device *adev, 140 u32 me, u32 pipe, u32 queue, u32 vmid) 141 { 142 u32 grbm_gfx_cntl = 0; 143 grbm_gfx_cntl = REG_SET_FIELD(grbm_gfx_cntl, GRBM_GFX_CNTL, PIPEID, pipe); 144 grbm_gfx_cntl = REG_SET_FIELD(grbm_gfx_cntl, GRBM_GFX_CNTL, MEID, me); 145 grbm_gfx_cntl = REG_SET_FIELD(grbm_gfx_cntl, GRBM_GFX_CNTL, VMID, vmid); 146 grbm_gfx_cntl = REG_SET_FIELD(grbm_gfx_cntl, GRBM_GFX_CNTL, QUEUEID, queue); 147 148 WREG32(SOC15_REG_OFFSET(GC, 0, mmGRBM_GFX_CNTL), grbm_gfx_cntl); 149 } 150 151 static void nv_vga_set_state(struct amdgpu_device *adev, bool state) 152 { 153 /* todo */ 154 } 155 156 static bool nv_read_disabled_bios(struct amdgpu_device *adev) 157 { 158 /* todo */ 159 return false; 160 } 161 162 static bool nv_read_bios_from_rom(struct amdgpu_device *adev, 163 u8 *bios, u32 length_bytes) 164 { 165 u32 *dw_ptr; 166 u32 i, length_dw; 167 168 if (bios == NULL) 169 return false; 170 if (length_bytes == 0) 171 return false; 172 /* APU vbios image is part of sbios image */ 173 if (adev->flags & AMD_IS_APU) 174 return false; 175 176 dw_ptr = (u32 *)bios; 177 length_dw = ALIGN(length_bytes, 4) / 4; 178 179 /* set rom index to 0 */ 180 WREG32(SOC15_REG_OFFSET(SMUIO, 0, mmROM_INDEX), 0); 181 /* read out the rom data */ 182 for (i = 0; i < length_dw; i++) 183 dw_ptr[i] = RREG32(SOC15_REG_OFFSET(SMUIO, 0, mmROM_DATA)); 184 185 return true; 186 } 187 188 static struct soc15_allowed_register_entry nv_allowed_read_registers[] = { 189 { SOC15_REG_ENTRY(GC, 0, mmGRBM_STATUS)}, 190 { SOC15_REG_ENTRY(GC, 0, mmGRBM_STATUS2)}, 191 { SOC15_REG_ENTRY(GC, 0, mmGRBM_STATUS_SE0)}, 192 { SOC15_REG_ENTRY(GC, 0, mmGRBM_STATUS_SE1)}, 193 { SOC15_REG_ENTRY(GC, 0, mmGRBM_STATUS_SE2)}, 194 { SOC15_REG_ENTRY(GC, 0, mmGRBM_STATUS_SE3)}, 195 { SOC15_REG_ENTRY(SDMA0, 0, mmSDMA0_STATUS_REG)}, 196 { SOC15_REG_ENTRY(SDMA1, 0, mmSDMA1_STATUS_REG)}, 197 { SOC15_REG_ENTRY(GC, 0, mmCP_STAT)}, 198 { SOC15_REG_ENTRY(GC, 0, mmCP_STALLED_STAT1)}, 199 { SOC15_REG_ENTRY(GC, 0, mmCP_STALLED_STAT2)}, 200 { SOC15_REG_ENTRY(GC, 0, mmCP_STALLED_STAT3)}, 201 { SOC15_REG_ENTRY(GC, 0, mmCP_CPF_BUSY_STAT)}, 202 { SOC15_REG_ENTRY(GC, 0, mmCP_CPF_STALLED_STAT1)}, 203 { SOC15_REG_ENTRY(GC, 0, mmCP_CPF_STATUS)}, 204 { SOC15_REG_ENTRY(GC, 0, mmCP_CPC_BUSY_STAT)}, 205 { SOC15_REG_ENTRY(GC, 0, mmCP_CPC_STALLED_STAT1)}, 206 { SOC15_REG_ENTRY(GC, 0, mmCP_CPC_STATUS)}, 207 { SOC15_REG_ENTRY(GC, 0, mmGB_ADDR_CONFIG)}, 208 }; 209 210 static uint32_t nv_read_indexed_register(struct amdgpu_device *adev, u32 se_num, 211 u32 sh_num, u32 reg_offset) 212 { 213 uint32_t val; 214 215 mutex_lock(&adev->grbm_idx_mutex); 216 if (se_num != 0xffffffff || sh_num != 0xffffffff) 217 amdgpu_gfx_select_se_sh(adev, se_num, sh_num, 0xffffffff); 218 219 val = RREG32(reg_offset); 220 221 if (se_num != 0xffffffff || sh_num != 0xffffffff) 222 amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff); 223 mutex_unlock(&adev->grbm_idx_mutex); 224 return val; 225 } 226 227 static uint32_t nv_get_register_value(struct amdgpu_device *adev, 228 bool indexed, u32 se_num, 229 u32 sh_num, u32 reg_offset) 230 { 231 if (indexed) { 232 return nv_read_indexed_register(adev, se_num, sh_num, reg_offset); 233 } else { 234 if (reg_offset == SOC15_REG_OFFSET(GC, 0, mmGB_ADDR_CONFIG)) 235 return adev->gfx.config.gb_addr_config; 236 return RREG32(reg_offset); 237 } 238 } 239 240 static int nv_read_register(struct amdgpu_device *adev, u32 se_num, 241 u32 sh_num, u32 reg_offset, u32 *value) 242 { 243 uint32_t i; 244 struct soc15_allowed_register_entry *en; 245 246 *value = 0; 247 for (i = 0; i < ARRAY_SIZE(nv_allowed_read_registers); i++) { 248 en = &nv_allowed_read_registers[i]; 249 if (reg_offset != 250 (adev->reg_offset[en->hwip][en->inst][en->seg] + en->reg_offset)) 251 continue; 252 253 *value = nv_get_register_value(adev, 254 nv_allowed_read_registers[i].grbm_indexed, 255 se_num, sh_num, reg_offset); 256 return 0; 257 } 258 return -EINVAL; 259 } 260 261 static int nv_asic_mode1_reset(struct amdgpu_device *adev) 262 { 263 u32 i; 264 int ret = 0; 265 266 amdgpu_atombios_scratch_regs_engine_hung(adev, true); 267 268 /* disable BM */ 269 pci_clear_master(adev->pdev); 270 271 pci_save_state(adev->pdev); 272 273 if (amdgpu_dpm_is_mode1_reset_supported(adev)) { 274 dev_info(adev->dev, "GPU smu mode1 reset\n"); 275 ret = amdgpu_dpm_mode1_reset(adev); 276 } else { 277 dev_info(adev->dev, "GPU psp mode1 reset\n"); 278 ret = psp_gpu_reset(adev); 279 } 280 281 if (ret) 282 dev_err(adev->dev, "GPU mode1 reset failed\n"); 283 pci_restore_state(adev->pdev); 284 285 /* wait for asic to come out of reset */ 286 for (i = 0; i < adev->usec_timeout; i++) { 287 u32 memsize = adev->nbio.funcs->get_memsize(adev); 288 289 if (memsize != 0xffffffff) 290 break; 291 udelay(1); 292 } 293 294 amdgpu_atombios_scratch_regs_engine_hung(adev, false); 295 296 return ret; 297 } 298 299 static bool nv_asic_supports_baco(struct amdgpu_device *adev) 300 { 301 struct smu_context *smu = &adev->smu; 302 303 if (smu_baco_is_support(smu)) 304 return true; 305 else 306 return false; 307 } 308 309 static enum amd_reset_method 310 nv_asic_reset_method(struct amdgpu_device *adev) 311 { 312 struct smu_context *smu = &adev->smu; 313 314 if (amdgpu_reset_method == AMD_RESET_METHOD_MODE1 || 315 amdgpu_reset_method == AMD_RESET_METHOD_BACO) 316 return amdgpu_reset_method; 317 318 if (amdgpu_reset_method != -1) 319 dev_warn(adev->dev, "Specified reset method:%d isn't supported, using AUTO instead.\n", 320 amdgpu_reset_method); 321 322 if (smu_baco_is_support(smu)) 323 return AMD_RESET_METHOD_BACO; 324 else 325 return AMD_RESET_METHOD_MODE1; 326 } 327 328 static int nv_asic_reset(struct amdgpu_device *adev) 329 { 330 int ret = 0; 331 struct smu_context *smu = &adev->smu; 332 333 if (nv_asic_reset_method(adev) == AMD_RESET_METHOD_BACO) { 334 dev_info(adev->dev, "GPU BACO reset\n"); 335 336 ret = smu_baco_enter(smu); 337 if (ret) 338 return ret; 339 ret = smu_baco_exit(smu); 340 if (ret) 341 return ret; 342 } else 343 ret = nv_asic_mode1_reset(adev); 344 345 return ret; 346 } 347 348 static int nv_set_uvd_clocks(struct amdgpu_device *adev, u32 vclk, u32 dclk) 349 { 350 /* todo */ 351 return 0; 352 } 353 354 static int nv_set_vce_clocks(struct amdgpu_device *adev, u32 evclk, u32 ecclk) 355 { 356 /* todo */ 357 return 0; 358 } 359 360 static void nv_pcie_gen3_enable(struct amdgpu_device *adev) 361 { 362 if (pci_is_root_bus(adev->pdev->bus)) 363 return; 364 365 if (amdgpu_pcie_gen2 == 0) 366 return; 367 368 if (!(adev->pm.pcie_gen_mask & (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2 | 369 CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3))) 370 return; 371 372 /* todo */ 373 } 374 375 static void nv_program_aspm(struct amdgpu_device *adev) 376 { 377 378 if (amdgpu_aspm == 0) 379 return; 380 381 /* todo */ 382 } 383 384 static void nv_enable_doorbell_aperture(struct amdgpu_device *adev, 385 bool enable) 386 { 387 adev->nbio.funcs->enable_doorbell_aperture(adev, enable); 388 adev->nbio.funcs->enable_doorbell_selfring_aperture(adev, enable); 389 } 390 391 static const struct amdgpu_ip_block_version nv_common_ip_block = 392 { 393 .type = AMD_IP_BLOCK_TYPE_COMMON, 394 .major = 1, 395 .minor = 0, 396 .rev = 0, 397 .funcs = &nv_common_ip_funcs, 398 }; 399 400 static int nv_reg_base_init(struct amdgpu_device *adev) 401 { 402 int r; 403 404 if (amdgpu_discovery) { 405 r = amdgpu_discovery_reg_base_init(adev); 406 if (r) { 407 DRM_WARN("failed to init reg base from ip discovery table, " 408 "fallback to legacy init method\n"); 409 goto legacy_init; 410 } 411 412 return 0; 413 } 414 415 legacy_init: 416 switch (adev->asic_type) { 417 case CHIP_NAVI10: 418 navi10_reg_base_init(adev); 419 break; 420 case CHIP_NAVI14: 421 navi14_reg_base_init(adev); 422 break; 423 case CHIP_NAVI12: 424 navi12_reg_base_init(adev); 425 break; 426 case CHIP_SIENNA_CICHLID: 427 case CHIP_NAVY_FLOUNDER: 428 sienna_cichlid_reg_base_init(adev); 429 break; 430 default: 431 return -EINVAL; 432 } 433 434 return 0; 435 } 436 437 void nv_set_virt_ops(struct amdgpu_device *adev) 438 { 439 adev->virt.ops = &xgpu_nv_virt_ops; 440 } 441 442 int nv_set_ip_blocks(struct amdgpu_device *adev) 443 { 444 int r; 445 446 adev->nbio.funcs = &nbio_v2_3_funcs; 447 adev->nbio.hdp_flush_reg = &nbio_v2_3_hdp_flush_reg; 448 449 if (adev->asic_type == CHIP_SIENNA_CICHLID) 450 adev->gmc.xgmi.supported = true; 451 452 /* Set IP register base before any HW register access */ 453 r = nv_reg_base_init(adev); 454 if (r) 455 return r; 456 457 switch (adev->asic_type) { 458 case CHIP_NAVI10: 459 case CHIP_NAVI14: 460 amdgpu_device_ip_block_add(adev, &nv_common_ip_block); 461 amdgpu_device_ip_block_add(adev, &gmc_v10_0_ip_block); 462 amdgpu_device_ip_block_add(adev, &navi10_ih_ip_block); 463 amdgpu_device_ip_block_add(adev, &psp_v11_0_ip_block); 464 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP && 465 !amdgpu_sriov_vf(adev)) 466 amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block); 467 if (adev->enable_virtual_display || amdgpu_sriov_vf(adev)) 468 amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block); 469 #if defined(CONFIG_DRM_AMD_DC) 470 else if (amdgpu_device_has_dc_support(adev)) 471 amdgpu_device_ip_block_add(adev, &dm_ip_block); 472 #endif 473 amdgpu_device_ip_block_add(adev, &gfx_v10_0_ip_block); 474 amdgpu_device_ip_block_add(adev, &sdma_v5_0_ip_block); 475 if (adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT && 476 !amdgpu_sriov_vf(adev)) 477 amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block); 478 amdgpu_device_ip_block_add(adev, &vcn_v2_0_ip_block); 479 amdgpu_device_ip_block_add(adev, &jpeg_v2_0_ip_block); 480 if (adev->enable_mes) 481 amdgpu_device_ip_block_add(adev, &mes_v10_1_ip_block); 482 break; 483 case CHIP_NAVI12: 484 amdgpu_device_ip_block_add(adev, &nv_common_ip_block); 485 amdgpu_device_ip_block_add(adev, &gmc_v10_0_ip_block); 486 amdgpu_device_ip_block_add(adev, &navi10_ih_ip_block); 487 amdgpu_device_ip_block_add(adev, &psp_v11_0_ip_block); 488 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) 489 amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block); 490 if (adev->enable_virtual_display || amdgpu_sriov_vf(adev)) 491 amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block); 492 #if defined(CONFIG_DRM_AMD_DC) 493 else if (amdgpu_device_has_dc_support(adev)) 494 amdgpu_device_ip_block_add(adev, &dm_ip_block); 495 #endif 496 amdgpu_device_ip_block_add(adev, &gfx_v10_0_ip_block); 497 amdgpu_device_ip_block_add(adev, &sdma_v5_0_ip_block); 498 if (adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT && 499 !amdgpu_sriov_vf(adev)) 500 amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block); 501 amdgpu_device_ip_block_add(adev, &vcn_v2_0_ip_block); 502 if (!amdgpu_sriov_vf(adev)) 503 amdgpu_device_ip_block_add(adev, &jpeg_v2_0_ip_block); 504 break; 505 case CHIP_SIENNA_CICHLID: 506 amdgpu_device_ip_block_add(adev, &nv_common_ip_block); 507 amdgpu_device_ip_block_add(adev, &gmc_v10_0_ip_block); 508 amdgpu_device_ip_block_add(adev, &navi10_ih_ip_block); 509 if (likely(adev->firmware.load_type == AMDGPU_FW_LOAD_PSP)) 510 amdgpu_device_ip_block_add(adev, &psp_v11_0_ip_block); 511 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP && 512 is_support_sw_smu(adev) && !amdgpu_sriov_vf(adev)) 513 amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block); 514 if (adev->enable_virtual_display || amdgpu_sriov_vf(adev)) 515 amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block); 516 #if defined(CONFIG_DRM_AMD_DC) 517 else if (amdgpu_device_has_dc_support(adev)) 518 amdgpu_device_ip_block_add(adev, &dm_ip_block); 519 #endif 520 amdgpu_device_ip_block_add(adev, &gfx_v10_0_ip_block); 521 amdgpu_device_ip_block_add(adev, &sdma_v5_2_ip_block); 522 amdgpu_device_ip_block_add(adev, &vcn_v3_0_ip_block); 523 if (!amdgpu_sriov_vf(adev)) 524 amdgpu_device_ip_block_add(adev, &jpeg_v3_0_ip_block); 525 526 if (adev->enable_mes) 527 amdgpu_device_ip_block_add(adev, &mes_v10_1_ip_block); 528 break; 529 case CHIP_NAVY_FLOUNDER: 530 amdgpu_device_ip_block_add(adev, &nv_common_ip_block); 531 amdgpu_device_ip_block_add(adev, &gmc_v10_0_ip_block); 532 amdgpu_device_ip_block_add(adev, &navi10_ih_ip_block); 533 if (likely(adev->firmware.load_type == AMDGPU_FW_LOAD_PSP)) 534 amdgpu_device_ip_block_add(adev, &psp_v11_0_ip_block); 535 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP && 536 is_support_sw_smu(adev)) 537 amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block); 538 if (adev->enable_virtual_display || amdgpu_sriov_vf(adev)) 539 amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block); 540 #if defined(CONFIG_DRM_AMD_DC) 541 else if (amdgpu_device_has_dc_support(adev)) 542 amdgpu_device_ip_block_add(adev, &dm_ip_block); 543 #endif 544 amdgpu_device_ip_block_add(adev, &gfx_v10_0_ip_block); 545 amdgpu_device_ip_block_add(adev, &sdma_v5_2_ip_block); 546 amdgpu_device_ip_block_add(adev, &vcn_v3_0_ip_block); 547 amdgpu_device_ip_block_add(adev, &jpeg_v3_0_ip_block); 548 if (adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT && 549 is_support_sw_smu(adev)) 550 amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block); 551 break; 552 default: 553 return -EINVAL; 554 } 555 556 return 0; 557 } 558 559 static uint32_t nv_get_rev_id(struct amdgpu_device *adev) 560 { 561 return adev->nbio.funcs->get_rev_id(adev); 562 } 563 564 static void nv_flush_hdp(struct amdgpu_device *adev, struct amdgpu_ring *ring) 565 { 566 adev->nbio.funcs->hdp_flush(adev, ring); 567 } 568 569 static void nv_invalidate_hdp(struct amdgpu_device *adev, 570 struct amdgpu_ring *ring) 571 { 572 if (!ring || !ring->funcs->emit_wreg) { 573 WREG32_SOC15_NO_KIQ(NBIO, 0, mmHDP_READ_CACHE_INVALIDATE, 1); 574 } else { 575 amdgpu_ring_emit_wreg(ring, SOC15_REG_OFFSET( 576 HDP, 0, mmHDP_READ_CACHE_INVALIDATE), 1); 577 } 578 } 579 580 static bool nv_need_full_reset(struct amdgpu_device *adev) 581 { 582 return true; 583 } 584 585 static bool nv_need_reset_on_init(struct amdgpu_device *adev) 586 { 587 u32 sol_reg; 588 589 if (adev->flags & AMD_IS_APU) 590 return false; 591 592 /* Check sOS sign of life register to confirm sys driver and sOS 593 * are already been loaded. 594 */ 595 sol_reg = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_81); 596 if (sol_reg) 597 return true; 598 599 return false; 600 } 601 602 static uint64_t nv_get_pcie_replay_count(struct amdgpu_device *adev) 603 { 604 605 /* TODO 606 * dummy implement for pcie_replay_count sysfs interface 607 * */ 608 609 return 0; 610 } 611 612 static void nv_init_doorbell_index(struct amdgpu_device *adev) 613 { 614 adev->doorbell_index.kiq = AMDGPU_NAVI10_DOORBELL_KIQ; 615 adev->doorbell_index.mec_ring0 = AMDGPU_NAVI10_DOORBELL_MEC_RING0; 616 adev->doorbell_index.mec_ring1 = AMDGPU_NAVI10_DOORBELL_MEC_RING1; 617 adev->doorbell_index.mec_ring2 = AMDGPU_NAVI10_DOORBELL_MEC_RING2; 618 adev->doorbell_index.mec_ring3 = AMDGPU_NAVI10_DOORBELL_MEC_RING3; 619 adev->doorbell_index.mec_ring4 = AMDGPU_NAVI10_DOORBELL_MEC_RING4; 620 adev->doorbell_index.mec_ring5 = AMDGPU_NAVI10_DOORBELL_MEC_RING5; 621 adev->doorbell_index.mec_ring6 = AMDGPU_NAVI10_DOORBELL_MEC_RING6; 622 adev->doorbell_index.mec_ring7 = AMDGPU_NAVI10_DOORBELL_MEC_RING7; 623 adev->doorbell_index.userqueue_start = AMDGPU_NAVI10_DOORBELL_USERQUEUE_START; 624 adev->doorbell_index.userqueue_end = AMDGPU_NAVI10_DOORBELL_USERQUEUE_END; 625 adev->doorbell_index.gfx_ring0 = AMDGPU_NAVI10_DOORBELL_GFX_RING0; 626 adev->doorbell_index.gfx_ring1 = AMDGPU_NAVI10_DOORBELL_GFX_RING1; 627 adev->doorbell_index.mes_ring = AMDGPU_NAVI10_DOORBELL_MES_RING; 628 adev->doorbell_index.sdma_engine[0] = AMDGPU_NAVI10_DOORBELL_sDMA_ENGINE0; 629 adev->doorbell_index.sdma_engine[1] = AMDGPU_NAVI10_DOORBELL_sDMA_ENGINE1; 630 adev->doorbell_index.sdma_engine[2] = AMDGPU_NAVI10_DOORBELL_sDMA_ENGINE2; 631 adev->doorbell_index.sdma_engine[3] = AMDGPU_NAVI10_DOORBELL_sDMA_ENGINE3; 632 adev->doorbell_index.ih = AMDGPU_NAVI10_DOORBELL_IH; 633 adev->doorbell_index.vcn.vcn_ring0_1 = AMDGPU_NAVI10_DOORBELL64_VCN0_1; 634 adev->doorbell_index.vcn.vcn_ring2_3 = AMDGPU_NAVI10_DOORBELL64_VCN2_3; 635 adev->doorbell_index.vcn.vcn_ring4_5 = AMDGPU_NAVI10_DOORBELL64_VCN4_5; 636 adev->doorbell_index.vcn.vcn_ring6_7 = AMDGPU_NAVI10_DOORBELL64_VCN6_7; 637 adev->doorbell_index.first_non_cp = AMDGPU_NAVI10_DOORBELL64_FIRST_NON_CP; 638 adev->doorbell_index.last_non_cp = AMDGPU_NAVI10_DOORBELL64_LAST_NON_CP; 639 640 adev->doorbell_index.max_assignment = AMDGPU_NAVI10_DOORBELL_MAX_ASSIGNMENT << 1; 641 adev->doorbell_index.sdma_doorbell_range = 20; 642 } 643 644 static const struct amdgpu_asic_funcs nv_asic_funcs = 645 { 646 .read_disabled_bios = &nv_read_disabled_bios, 647 .read_bios_from_rom = &nv_read_bios_from_rom, 648 .read_register = &nv_read_register, 649 .reset = &nv_asic_reset, 650 .reset_method = &nv_asic_reset_method, 651 .set_vga_state = &nv_vga_set_state, 652 .get_xclk = &nv_get_xclk, 653 .set_uvd_clocks = &nv_set_uvd_clocks, 654 .set_vce_clocks = &nv_set_vce_clocks, 655 .get_config_memsize = &nv_get_config_memsize, 656 .flush_hdp = &nv_flush_hdp, 657 .invalidate_hdp = &nv_invalidate_hdp, 658 .init_doorbell_index = &nv_init_doorbell_index, 659 .need_full_reset = &nv_need_full_reset, 660 .need_reset_on_init = &nv_need_reset_on_init, 661 .get_pcie_replay_count = &nv_get_pcie_replay_count, 662 .supports_baco = &nv_asic_supports_baco, 663 }; 664 665 static int nv_common_early_init(void *handle) 666 { 667 #define MMIO_REG_HOLE_OFFSET (0x80000 - PAGE_SIZE) 668 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 669 670 adev->rmmio_remap.reg_offset = MMIO_REG_HOLE_OFFSET; 671 adev->rmmio_remap.bus_addr = adev->rmmio_base + MMIO_REG_HOLE_OFFSET; 672 adev->smc_rreg = NULL; 673 adev->smc_wreg = NULL; 674 adev->pcie_rreg = &nv_pcie_rreg; 675 adev->pcie_wreg = &nv_pcie_wreg; 676 677 /* TODO: will add them during VCN v2 implementation */ 678 adev->uvd_ctx_rreg = NULL; 679 adev->uvd_ctx_wreg = NULL; 680 681 adev->didt_rreg = &nv_didt_rreg; 682 adev->didt_wreg = &nv_didt_wreg; 683 684 adev->asic_funcs = &nv_asic_funcs; 685 686 adev->rev_id = nv_get_rev_id(adev); 687 adev->external_rev_id = 0xff; 688 switch (adev->asic_type) { 689 case CHIP_NAVI10: 690 adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG | 691 AMD_CG_SUPPORT_GFX_CGCG | 692 AMD_CG_SUPPORT_IH_CG | 693 AMD_CG_SUPPORT_HDP_MGCG | 694 AMD_CG_SUPPORT_HDP_LS | 695 AMD_CG_SUPPORT_SDMA_MGCG | 696 AMD_CG_SUPPORT_SDMA_LS | 697 AMD_CG_SUPPORT_MC_MGCG | 698 AMD_CG_SUPPORT_MC_LS | 699 AMD_CG_SUPPORT_ATHUB_MGCG | 700 AMD_CG_SUPPORT_ATHUB_LS | 701 AMD_CG_SUPPORT_VCN_MGCG | 702 AMD_CG_SUPPORT_JPEG_MGCG | 703 AMD_CG_SUPPORT_BIF_MGCG | 704 AMD_CG_SUPPORT_BIF_LS; 705 adev->pg_flags = AMD_PG_SUPPORT_VCN | 706 AMD_PG_SUPPORT_VCN_DPG | 707 AMD_PG_SUPPORT_JPEG | 708 AMD_PG_SUPPORT_ATHUB; 709 adev->external_rev_id = adev->rev_id + 0x1; 710 break; 711 case CHIP_NAVI14: 712 adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG | 713 AMD_CG_SUPPORT_GFX_CGCG | 714 AMD_CG_SUPPORT_IH_CG | 715 AMD_CG_SUPPORT_HDP_MGCG | 716 AMD_CG_SUPPORT_HDP_LS | 717 AMD_CG_SUPPORT_SDMA_MGCG | 718 AMD_CG_SUPPORT_SDMA_LS | 719 AMD_CG_SUPPORT_MC_MGCG | 720 AMD_CG_SUPPORT_MC_LS | 721 AMD_CG_SUPPORT_ATHUB_MGCG | 722 AMD_CG_SUPPORT_ATHUB_LS | 723 AMD_CG_SUPPORT_VCN_MGCG | 724 AMD_CG_SUPPORT_JPEG_MGCG | 725 AMD_CG_SUPPORT_BIF_MGCG | 726 AMD_CG_SUPPORT_BIF_LS; 727 adev->pg_flags = AMD_PG_SUPPORT_VCN | 728 AMD_PG_SUPPORT_JPEG | 729 AMD_PG_SUPPORT_VCN_DPG; 730 adev->external_rev_id = adev->rev_id + 20; 731 break; 732 case CHIP_NAVI12: 733 adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG | 734 AMD_CG_SUPPORT_GFX_MGLS | 735 AMD_CG_SUPPORT_GFX_CGCG | 736 AMD_CG_SUPPORT_GFX_CP_LS | 737 AMD_CG_SUPPORT_GFX_RLC_LS | 738 AMD_CG_SUPPORT_IH_CG | 739 AMD_CG_SUPPORT_HDP_MGCG | 740 AMD_CG_SUPPORT_HDP_LS | 741 AMD_CG_SUPPORT_SDMA_MGCG | 742 AMD_CG_SUPPORT_SDMA_LS | 743 AMD_CG_SUPPORT_MC_MGCG | 744 AMD_CG_SUPPORT_MC_LS | 745 AMD_CG_SUPPORT_ATHUB_MGCG | 746 AMD_CG_SUPPORT_ATHUB_LS | 747 AMD_CG_SUPPORT_VCN_MGCG | 748 AMD_CG_SUPPORT_JPEG_MGCG; 749 adev->pg_flags = AMD_PG_SUPPORT_VCN | 750 AMD_PG_SUPPORT_VCN_DPG | 751 AMD_PG_SUPPORT_JPEG | 752 AMD_PG_SUPPORT_ATHUB; 753 /* guest vm gets 0xffffffff when reading RCC_DEV0_EPF0_STRAP0, 754 * as a consequence, the rev_id and external_rev_id are wrong. 755 * workaround it by hardcoding rev_id to 0 (default value). 756 */ 757 if (amdgpu_sriov_vf(adev)) 758 adev->rev_id = 0; 759 adev->external_rev_id = adev->rev_id + 0xa; 760 break; 761 case CHIP_SIENNA_CICHLID: 762 adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG | 763 AMD_CG_SUPPORT_GFX_CGCG | 764 AMD_CG_SUPPORT_GFX_3D_CGCG | 765 AMD_CG_SUPPORT_MC_MGCG | 766 AMD_CG_SUPPORT_VCN_MGCG | 767 AMD_CG_SUPPORT_JPEG_MGCG | 768 AMD_CG_SUPPORT_HDP_MGCG | 769 AMD_CG_SUPPORT_HDP_LS | 770 AMD_CG_SUPPORT_IH_CG | 771 AMD_CG_SUPPORT_MC_LS; 772 adev->pg_flags = AMD_PG_SUPPORT_VCN | 773 AMD_PG_SUPPORT_VCN_DPG | 774 AMD_PG_SUPPORT_JPEG | 775 AMD_PG_SUPPORT_ATHUB | 776 AMD_PG_SUPPORT_MMHUB; 777 if (amdgpu_sriov_vf(adev)) { 778 /* hypervisor control CG and PG enablement */ 779 adev->cg_flags = 0; 780 adev->pg_flags = 0; 781 } 782 adev->external_rev_id = adev->rev_id + 0x28; 783 break; 784 case CHIP_NAVY_FLOUNDER: 785 adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG | 786 AMD_CG_SUPPORT_GFX_CGCG | 787 AMD_CG_SUPPORT_GFX_3D_CGCG | 788 AMD_CG_SUPPORT_VCN_MGCG | 789 AMD_CG_SUPPORT_JPEG_MGCG | 790 AMD_CG_SUPPORT_MC_MGCG | 791 AMD_CG_SUPPORT_MC_LS | 792 AMD_CG_SUPPORT_HDP_MGCG | 793 AMD_CG_SUPPORT_HDP_LS | 794 AMD_CG_SUPPORT_IH_CG; 795 adev->pg_flags = AMD_PG_SUPPORT_VCN | 796 AMD_PG_SUPPORT_VCN_DPG | 797 AMD_PG_SUPPORT_JPEG | 798 AMD_PG_SUPPORT_ATHUB | 799 AMD_PG_SUPPORT_MMHUB; 800 adev->external_rev_id = adev->rev_id + 0x32; 801 break; 802 803 default: 804 /* FIXME: not supported yet */ 805 return -EINVAL; 806 } 807 808 if (amdgpu_sriov_vf(adev)) { 809 amdgpu_virt_init_setting(adev); 810 xgpu_nv_mailbox_set_irq_funcs(adev); 811 } 812 813 return 0; 814 } 815 816 static int nv_common_late_init(void *handle) 817 { 818 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 819 820 if (amdgpu_sriov_vf(adev)) 821 xgpu_nv_mailbox_get_irq(adev); 822 823 return 0; 824 } 825 826 static int nv_common_sw_init(void *handle) 827 { 828 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 829 830 if (amdgpu_sriov_vf(adev)) 831 xgpu_nv_mailbox_add_irq_id(adev); 832 833 return 0; 834 } 835 836 static int nv_common_sw_fini(void *handle) 837 { 838 return 0; 839 } 840 841 static int nv_common_hw_init(void *handle) 842 { 843 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 844 845 /* enable pcie gen2/3 link */ 846 nv_pcie_gen3_enable(adev); 847 /* enable aspm */ 848 nv_program_aspm(adev); 849 /* setup nbio registers */ 850 adev->nbio.funcs->init_registers(adev); 851 /* remap HDP registers to a hole in mmio space, 852 * for the purpose of expose those registers 853 * to process space 854 */ 855 if (adev->nbio.funcs->remap_hdp_registers) 856 adev->nbio.funcs->remap_hdp_registers(adev); 857 /* enable the doorbell aperture */ 858 nv_enable_doorbell_aperture(adev, true); 859 860 return 0; 861 } 862 863 static int nv_common_hw_fini(void *handle) 864 { 865 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 866 867 /* disable the doorbell aperture */ 868 nv_enable_doorbell_aperture(adev, false); 869 870 return 0; 871 } 872 873 static int nv_common_suspend(void *handle) 874 { 875 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 876 877 return nv_common_hw_fini(adev); 878 } 879 880 static int nv_common_resume(void *handle) 881 { 882 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 883 884 return nv_common_hw_init(adev); 885 } 886 887 static bool nv_common_is_idle(void *handle) 888 { 889 return true; 890 } 891 892 static int nv_common_wait_for_idle(void *handle) 893 { 894 return 0; 895 } 896 897 static int nv_common_soft_reset(void *handle) 898 { 899 return 0; 900 } 901 902 static void nv_update_hdp_mem_power_gating(struct amdgpu_device *adev, 903 bool enable) 904 { 905 uint32_t hdp_clk_cntl, hdp_clk_cntl1; 906 uint32_t hdp_mem_pwr_cntl; 907 908 if (!(adev->cg_flags & (AMD_CG_SUPPORT_HDP_LS | 909 AMD_CG_SUPPORT_HDP_DS | 910 AMD_CG_SUPPORT_HDP_SD))) 911 return; 912 913 hdp_clk_cntl = hdp_clk_cntl1 = RREG32_SOC15(HDP, 0, mmHDP_CLK_CNTL); 914 hdp_mem_pwr_cntl = RREG32_SOC15(HDP, 0, mmHDP_MEM_POWER_CTRL); 915 916 /* Before doing clock/power mode switch, 917 * forced on IPH & RC clock */ 918 hdp_clk_cntl = REG_SET_FIELD(hdp_clk_cntl, HDP_CLK_CNTL, 919 IPH_MEM_CLK_SOFT_OVERRIDE, 1); 920 hdp_clk_cntl = REG_SET_FIELD(hdp_clk_cntl, HDP_CLK_CNTL, 921 RC_MEM_CLK_SOFT_OVERRIDE, 1); 922 WREG32_SOC15(HDP, 0, mmHDP_CLK_CNTL, hdp_clk_cntl); 923 924 /* HDP 5.0 doesn't support dynamic power mode switch, 925 * disable clock and power gating before any changing */ 926 hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL, 927 IPH_MEM_POWER_CTRL_EN, 0); 928 hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL, 929 IPH_MEM_POWER_LS_EN, 0); 930 hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL, 931 IPH_MEM_POWER_DS_EN, 0); 932 hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL, 933 IPH_MEM_POWER_SD_EN, 0); 934 hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL, 935 RC_MEM_POWER_CTRL_EN, 0); 936 hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL, 937 RC_MEM_POWER_LS_EN, 0); 938 hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL, 939 RC_MEM_POWER_DS_EN, 0); 940 hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL, 941 RC_MEM_POWER_SD_EN, 0); 942 WREG32_SOC15(HDP, 0, mmHDP_MEM_POWER_CTRL, hdp_mem_pwr_cntl); 943 944 /* only one clock gating mode (LS/DS/SD) can be enabled */ 945 if (adev->cg_flags & AMD_CG_SUPPORT_HDP_LS) { 946 hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, 947 HDP_MEM_POWER_CTRL, 948 IPH_MEM_POWER_LS_EN, enable); 949 hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, 950 HDP_MEM_POWER_CTRL, 951 RC_MEM_POWER_LS_EN, enable); 952 } else if (adev->cg_flags & AMD_CG_SUPPORT_HDP_DS) { 953 hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, 954 HDP_MEM_POWER_CTRL, 955 IPH_MEM_POWER_DS_EN, enable); 956 hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, 957 HDP_MEM_POWER_CTRL, 958 RC_MEM_POWER_DS_EN, enable); 959 } else if (adev->cg_flags & AMD_CG_SUPPORT_HDP_SD) { 960 hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, 961 HDP_MEM_POWER_CTRL, 962 IPH_MEM_POWER_SD_EN, enable); 963 /* RC should not use shut down mode, fallback to ds */ 964 hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, 965 HDP_MEM_POWER_CTRL, 966 RC_MEM_POWER_DS_EN, enable); 967 } 968 969 /* confirmed that IPH_MEM_POWER_CTRL_EN and RC_MEM_POWER_CTRL_EN have to 970 * be set for SRAM LS/DS/SD */ 971 if (adev->cg_flags & (AMD_CG_SUPPORT_HDP_LS | AMD_CG_SUPPORT_HDP_DS | 972 AMD_CG_SUPPORT_HDP_SD)) { 973 hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL, 974 IPH_MEM_POWER_CTRL_EN, 1); 975 hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL, 976 RC_MEM_POWER_CTRL_EN, 1); 977 } 978 979 WREG32_SOC15(HDP, 0, mmHDP_MEM_POWER_CTRL, hdp_mem_pwr_cntl); 980 981 /* restore IPH & RC clock override after clock/power mode changing */ 982 WREG32_SOC15(HDP, 0, mmHDP_CLK_CNTL, hdp_clk_cntl1); 983 } 984 985 static void nv_update_hdp_clock_gating(struct amdgpu_device *adev, 986 bool enable) 987 { 988 uint32_t hdp_clk_cntl; 989 990 if (!(adev->cg_flags & AMD_CG_SUPPORT_HDP_MGCG)) 991 return; 992 993 hdp_clk_cntl = RREG32_SOC15(HDP, 0, mmHDP_CLK_CNTL); 994 995 if (enable) { 996 hdp_clk_cntl &= 997 ~(uint32_t) 998 (HDP_CLK_CNTL__IPH_MEM_CLK_SOFT_OVERRIDE_MASK | 999 HDP_CLK_CNTL__RC_MEM_CLK_SOFT_OVERRIDE_MASK | 1000 HDP_CLK_CNTL__DBUS_CLK_SOFT_OVERRIDE_MASK | 1001 HDP_CLK_CNTL__DYN_CLK_SOFT_OVERRIDE_MASK | 1002 HDP_CLK_CNTL__XDP_REG_CLK_SOFT_OVERRIDE_MASK | 1003 HDP_CLK_CNTL__HDP_REG_CLK_SOFT_OVERRIDE_MASK); 1004 } else { 1005 hdp_clk_cntl |= HDP_CLK_CNTL__IPH_MEM_CLK_SOFT_OVERRIDE_MASK | 1006 HDP_CLK_CNTL__RC_MEM_CLK_SOFT_OVERRIDE_MASK | 1007 HDP_CLK_CNTL__DBUS_CLK_SOFT_OVERRIDE_MASK | 1008 HDP_CLK_CNTL__DYN_CLK_SOFT_OVERRIDE_MASK | 1009 HDP_CLK_CNTL__XDP_REG_CLK_SOFT_OVERRIDE_MASK | 1010 HDP_CLK_CNTL__HDP_REG_CLK_SOFT_OVERRIDE_MASK; 1011 } 1012 1013 WREG32_SOC15(HDP, 0, mmHDP_CLK_CNTL, hdp_clk_cntl); 1014 } 1015 1016 static int nv_common_set_clockgating_state(void *handle, 1017 enum amd_clockgating_state state) 1018 { 1019 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1020 1021 if (amdgpu_sriov_vf(adev)) 1022 return 0; 1023 1024 switch (adev->asic_type) { 1025 case CHIP_NAVI10: 1026 case CHIP_NAVI14: 1027 case CHIP_NAVI12: 1028 case CHIP_SIENNA_CICHLID: 1029 case CHIP_NAVY_FLOUNDER: 1030 adev->nbio.funcs->update_medium_grain_clock_gating(adev, 1031 state == AMD_CG_STATE_GATE); 1032 adev->nbio.funcs->update_medium_grain_light_sleep(adev, 1033 state == AMD_CG_STATE_GATE); 1034 nv_update_hdp_mem_power_gating(adev, 1035 state == AMD_CG_STATE_GATE); 1036 nv_update_hdp_clock_gating(adev, 1037 state == AMD_CG_STATE_GATE); 1038 break; 1039 default: 1040 break; 1041 } 1042 return 0; 1043 } 1044 1045 static int nv_common_set_powergating_state(void *handle, 1046 enum amd_powergating_state state) 1047 { 1048 /* TODO */ 1049 return 0; 1050 } 1051 1052 static void nv_common_get_clockgating_state(void *handle, u32 *flags) 1053 { 1054 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1055 uint32_t tmp; 1056 1057 if (amdgpu_sriov_vf(adev)) 1058 *flags = 0; 1059 1060 adev->nbio.funcs->get_clockgating_state(adev, flags); 1061 1062 /* AMD_CG_SUPPORT_HDP_MGCG */ 1063 tmp = RREG32_SOC15(HDP, 0, mmHDP_CLK_CNTL); 1064 if (!(tmp & (HDP_CLK_CNTL__IPH_MEM_CLK_SOFT_OVERRIDE_MASK | 1065 HDP_CLK_CNTL__RC_MEM_CLK_SOFT_OVERRIDE_MASK | 1066 HDP_CLK_CNTL__DBUS_CLK_SOFT_OVERRIDE_MASK | 1067 HDP_CLK_CNTL__DYN_CLK_SOFT_OVERRIDE_MASK | 1068 HDP_CLK_CNTL__XDP_REG_CLK_SOFT_OVERRIDE_MASK | 1069 HDP_CLK_CNTL__HDP_REG_CLK_SOFT_OVERRIDE_MASK))) 1070 *flags |= AMD_CG_SUPPORT_HDP_MGCG; 1071 1072 /* AMD_CG_SUPPORT_HDP_LS/DS/SD */ 1073 tmp = RREG32_SOC15(HDP, 0, mmHDP_MEM_POWER_CTRL); 1074 if (tmp & HDP_MEM_POWER_CTRL__IPH_MEM_POWER_LS_EN_MASK) 1075 *flags |= AMD_CG_SUPPORT_HDP_LS; 1076 else if (tmp & HDP_MEM_POWER_CTRL__IPH_MEM_POWER_DS_EN_MASK) 1077 *flags |= AMD_CG_SUPPORT_HDP_DS; 1078 else if (tmp & HDP_MEM_POWER_CTRL__IPH_MEM_POWER_SD_EN_MASK) 1079 *flags |= AMD_CG_SUPPORT_HDP_SD; 1080 1081 return; 1082 } 1083 1084 static const struct amd_ip_funcs nv_common_ip_funcs = { 1085 .name = "nv_common", 1086 .early_init = nv_common_early_init, 1087 .late_init = nv_common_late_init, 1088 .sw_init = nv_common_sw_init, 1089 .sw_fini = nv_common_sw_fini, 1090 .hw_init = nv_common_hw_init, 1091 .hw_fini = nv_common_hw_fini, 1092 .suspend = nv_common_suspend, 1093 .resume = nv_common_resume, 1094 .is_idle = nv_common_is_idle, 1095 .wait_for_idle = nv_common_wait_for_idle, 1096 .soft_reset = nv_common_soft_reset, 1097 .set_clockgating_state = nv_common_set_clockgating_state, 1098 .set_powergating_state = nv_common_set_powergating_state, 1099 .get_clockgating_state = nv_common_get_clockgating_state, 1100 }; 1101