1 /* 2 * Copyright 2016 Advanced Micro Devices, Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 */ 23 #include <linux/firmware.h> 24 #include <linux/slab.h> 25 #include <linux/module.h> 26 #include <linux/pci.h> 27 28 #include "amdgpu.h" 29 #include "amdgpu_atombios.h" 30 #include "amdgpu_ih.h" 31 #include "amdgpu_uvd.h" 32 #include "amdgpu_vce.h" 33 #include "amdgpu_ucode.h" 34 #include "amdgpu_psp.h" 35 #include "atom.h" 36 #include "amd_pcie.h" 37 38 #include "uvd/uvd_7_0_offset.h" 39 #include "gc/gc_9_0_offset.h" 40 #include "gc/gc_9_0_sh_mask.h" 41 #include "sdma0/sdma0_4_0_offset.h" 42 #include "sdma1/sdma1_4_0_offset.h" 43 #include "hdp/hdp_4_0_offset.h" 44 #include "hdp/hdp_4_0_sh_mask.h" 45 #include "smuio/smuio_9_0_offset.h" 46 #include "smuio/smuio_9_0_sh_mask.h" 47 #include "nbio/nbio_7_0_default.h" 48 #include "nbio/nbio_7_0_offset.h" 49 #include "nbio/nbio_7_0_sh_mask.h" 50 #include "nbio/nbio_7_0_smn.h" 51 #include "mp/mp_9_0_offset.h" 52 53 #include "soc15.h" 54 #include "soc15_common.h" 55 #include "gfx_v9_0.h" 56 #include "gmc_v9_0.h" 57 #include "gfxhub_v1_0.h" 58 #include "mmhub_v1_0.h" 59 #include "df_v1_7.h" 60 #include "df_v3_6.h" 61 #include "vega10_ih.h" 62 #include "sdma_v4_0.h" 63 #include "uvd_v7_0.h" 64 #include "vce_v4_0.h" 65 #include "vcn_v1_0.h" 66 #include "dce_virtual.h" 67 #include "mxgpu_ai.h" 68 #include "amdgpu_smu.h" 69 #include "amdgpu_ras.h" 70 #include "amdgpu_xgmi.h" 71 #include <uapi/linux/kfd_ioctl.h> 72 73 #define mmMP0_MISC_CGTT_CTRL0 0x01b9 74 #define mmMP0_MISC_CGTT_CTRL0_BASE_IDX 0 75 #define mmMP0_MISC_LIGHT_SLEEP_CTRL 0x01ba 76 #define mmMP0_MISC_LIGHT_SLEEP_CTRL_BASE_IDX 0 77 78 /* for Vega20 register name change */ 79 #define mmHDP_MEM_POWER_CTRL 0x00d4 80 #define HDP_MEM_POWER_CTRL__IPH_MEM_POWER_CTRL_EN_MASK 0x00000001L 81 #define HDP_MEM_POWER_CTRL__IPH_MEM_POWER_LS_EN_MASK 0x00000002L 82 #define HDP_MEM_POWER_CTRL__RC_MEM_POWER_CTRL_EN_MASK 0x00010000L 83 #define HDP_MEM_POWER_CTRL__RC_MEM_POWER_LS_EN_MASK 0x00020000L 84 #define mmHDP_MEM_POWER_CTRL_BASE_IDX 0 85 /* 86 * Indirect registers accessor 87 */ 88 static u32 soc15_pcie_rreg(struct amdgpu_device *adev, u32 reg) 89 { 90 unsigned long flags, address, data; 91 u32 r; 92 address = adev->nbio_funcs->get_pcie_index_offset(adev); 93 data = adev->nbio_funcs->get_pcie_data_offset(adev); 94 95 spin_lock_irqsave(&adev->pcie_idx_lock, flags); 96 WREG32(address, reg); 97 (void)RREG32(address); 98 r = RREG32(data); 99 spin_unlock_irqrestore(&adev->pcie_idx_lock, flags); 100 return r; 101 } 102 103 static void soc15_pcie_wreg(struct amdgpu_device *adev, u32 reg, u32 v) 104 { 105 unsigned long flags, address, data; 106 107 address = adev->nbio_funcs->get_pcie_index_offset(adev); 108 data = adev->nbio_funcs->get_pcie_data_offset(adev); 109 110 spin_lock_irqsave(&adev->pcie_idx_lock, flags); 111 WREG32(address, reg); 112 (void)RREG32(address); 113 WREG32(data, v); 114 (void)RREG32(data); 115 spin_unlock_irqrestore(&adev->pcie_idx_lock, flags); 116 } 117 118 static u32 soc15_uvd_ctx_rreg(struct amdgpu_device *adev, u32 reg) 119 { 120 unsigned long flags, address, data; 121 u32 r; 122 123 address = SOC15_REG_OFFSET(UVD, 0, mmUVD_CTX_INDEX); 124 data = SOC15_REG_OFFSET(UVD, 0, mmUVD_CTX_DATA); 125 126 spin_lock_irqsave(&adev->uvd_ctx_idx_lock, flags); 127 WREG32(address, ((reg) & 0x1ff)); 128 r = RREG32(data); 129 spin_unlock_irqrestore(&adev->uvd_ctx_idx_lock, flags); 130 return r; 131 } 132 133 static void soc15_uvd_ctx_wreg(struct amdgpu_device *adev, u32 reg, u32 v) 134 { 135 unsigned long flags, address, data; 136 137 address = SOC15_REG_OFFSET(UVD, 0, mmUVD_CTX_INDEX); 138 data = SOC15_REG_OFFSET(UVD, 0, mmUVD_CTX_DATA); 139 140 spin_lock_irqsave(&adev->uvd_ctx_idx_lock, flags); 141 WREG32(address, ((reg) & 0x1ff)); 142 WREG32(data, (v)); 143 spin_unlock_irqrestore(&adev->uvd_ctx_idx_lock, flags); 144 } 145 146 static u32 soc15_didt_rreg(struct amdgpu_device *adev, u32 reg) 147 { 148 unsigned long flags, address, data; 149 u32 r; 150 151 address = SOC15_REG_OFFSET(GC, 0, mmDIDT_IND_INDEX); 152 data = SOC15_REG_OFFSET(GC, 0, mmDIDT_IND_DATA); 153 154 spin_lock_irqsave(&adev->didt_idx_lock, flags); 155 WREG32(address, (reg)); 156 r = RREG32(data); 157 spin_unlock_irqrestore(&adev->didt_idx_lock, flags); 158 return r; 159 } 160 161 static void soc15_didt_wreg(struct amdgpu_device *adev, u32 reg, u32 v) 162 { 163 unsigned long flags, address, data; 164 165 address = SOC15_REG_OFFSET(GC, 0, mmDIDT_IND_INDEX); 166 data = SOC15_REG_OFFSET(GC, 0, mmDIDT_IND_DATA); 167 168 spin_lock_irqsave(&adev->didt_idx_lock, flags); 169 WREG32(address, (reg)); 170 WREG32(data, (v)); 171 spin_unlock_irqrestore(&adev->didt_idx_lock, flags); 172 } 173 174 static u32 soc15_gc_cac_rreg(struct amdgpu_device *adev, u32 reg) 175 { 176 unsigned long flags; 177 u32 r; 178 179 spin_lock_irqsave(&adev->gc_cac_idx_lock, flags); 180 WREG32_SOC15(GC, 0, mmGC_CAC_IND_INDEX, (reg)); 181 r = RREG32_SOC15(GC, 0, mmGC_CAC_IND_DATA); 182 spin_unlock_irqrestore(&adev->gc_cac_idx_lock, flags); 183 return r; 184 } 185 186 static void soc15_gc_cac_wreg(struct amdgpu_device *adev, u32 reg, u32 v) 187 { 188 unsigned long flags; 189 190 spin_lock_irqsave(&adev->gc_cac_idx_lock, flags); 191 WREG32_SOC15(GC, 0, mmGC_CAC_IND_INDEX, (reg)); 192 WREG32_SOC15(GC, 0, mmGC_CAC_IND_DATA, (v)); 193 spin_unlock_irqrestore(&adev->gc_cac_idx_lock, flags); 194 } 195 196 static u32 soc15_se_cac_rreg(struct amdgpu_device *adev, u32 reg) 197 { 198 unsigned long flags; 199 u32 r; 200 201 spin_lock_irqsave(&adev->se_cac_idx_lock, flags); 202 WREG32_SOC15(GC, 0, mmSE_CAC_IND_INDEX, (reg)); 203 r = RREG32_SOC15(GC, 0, mmSE_CAC_IND_DATA); 204 spin_unlock_irqrestore(&adev->se_cac_idx_lock, flags); 205 return r; 206 } 207 208 static void soc15_se_cac_wreg(struct amdgpu_device *adev, u32 reg, u32 v) 209 { 210 unsigned long flags; 211 212 spin_lock_irqsave(&adev->se_cac_idx_lock, flags); 213 WREG32_SOC15(GC, 0, mmSE_CAC_IND_INDEX, (reg)); 214 WREG32_SOC15(GC, 0, mmSE_CAC_IND_DATA, (v)); 215 spin_unlock_irqrestore(&adev->se_cac_idx_lock, flags); 216 } 217 218 static u32 soc15_get_config_memsize(struct amdgpu_device *adev) 219 { 220 return adev->nbio_funcs->get_memsize(adev); 221 } 222 223 static u32 soc15_get_xclk(struct amdgpu_device *adev) 224 { 225 return adev->clock.spll.reference_freq; 226 } 227 228 229 void soc15_grbm_select(struct amdgpu_device *adev, 230 u32 me, u32 pipe, u32 queue, u32 vmid) 231 { 232 u32 grbm_gfx_cntl = 0; 233 grbm_gfx_cntl = REG_SET_FIELD(grbm_gfx_cntl, GRBM_GFX_CNTL, PIPEID, pipe); 234 grbm_gfx_cntl = REG_SET_FIELD(grbm_gfx_cntl, GRBM_GFX_CNTL, MEID, me); 235 grbm_gfx_cntl = REG_SET_FIELD(grbm_gfx_cntl, GRBM_GFX_CNTL, VMID, vmid); 236 grbm_gfx_cntl = REG_SET_FIELD(grbm_gfx_cntl, GRBM_GFX_CNTL, QUEUEID, queue); 237 238 WREG32_SOC15_RLC_SHADOW(GC, 0, mmGRBM_GFX_CNTL, grbm_gfx_cntl); 239 } 240 241 static void soc15_vga_set_state(struct amdgpu_device *adev, bool state) 242 { 243 /* todo */ 244 } 245 246 static bool soc15_read_disabled_bios(struct amdgpu_device *adev) 247 { 248 /* todo */ 249 return false; 250 } 251 252 static bool soc15_read_bios_from_rom(struct amdgpu_device *adev, 253 u8 *bios, u32 length_bytes) 254 { 255 u32 *dw_ptr; 256 u32 i, length_dw; 257 258 if (bios == NULL) 259 return false; 260 if (length_bytes == 0) 261 return false; 262 /* APU vbios image is part of sbios image */ 263 if (adev->flags & AMD_IS_APU) 264 return false; 265 266 dw_ptr = (u32 *)bios; 267 length_dw = ALIGN(length_bytes, 4) / 4; 268 269 /* set rom index to 0 */ 270 WREG32(SOC15_REG_OFFSET(SMUIO, 0, mmROM_INDEX), 0); 271 /* read out the rom data */ 272 for (i = 0; i < length_dw; i++) 273 dw_ptr[i] = RREG32(SOC15_REG_OFFSET(SMUIO, 0, mmROM_DATA)); 274 275 return true; 276 } 277 278 struct soc15_allowed_register_entry { 279 uint32_t hwip; 280 uint32_t inst; 281 uint32_t seg; 282 uint32_t reg_offset; 283 bool grbm_indexed; 284 }; 285 286 287 static struct soc15_allowed_register_entry soc15_allowed_read_registers[] = { 288 { SOC15_REG_ENTRY(GC, 0, mmGRBM_STATUS)}, 289 { SOC15_REG_ENTRY(GC, 0, mmGRBM_STATUS2)}, 290 { SOC15_REG_ENTRY(GC, 0, mmGRBM_STATUS_SE0)}, 291 { SOC15_REG_ENTRY(GC, 0, mmGRBM_STATUS_SE1)}, 292 { SOC15_REG_ENTRY(GC, 0, mmGRBM_STATUS_SE2)}, 293 { SOC15_REG_ENTRY(GC, 0, mmGRBM_STATUS_SE3)}, 294 { SOC15_REG_ENTRY(SDMA0, 0, mmSDMA0_STATUS_REG)}, 295 { SOC15_REG_ENTRY(SDMA1, 0, mmSDMA1_STATUS_REG)}, 296 { SOC15_REG_ENTRY(GC, 0, mmCP_STAT)}, 297 { SOC15_REG_ENTRY(GC, 0, mmCP_STALLED_STAT1)}, 298 { SOC15_REG_ENTRY(GC, 0, mmCP_STALLED_STAT2)}, 299 { SOC15_REG_ENTRY(GC, 0, mmCP_STALLED_STAT3)}, 300 { SOC15_REG_ENTRY(GC, 0, mmCP_CPF_BUSY_STAT)}, 301 { SOC15_REG_ENTRY(GC, 0, mmCP_CPF_STALLED_STAT1)}, 302 { SOC15_REG_ENTRY(GC, 0, mmCP_CPF_STATUS)}, 303 { SOC15_REG_ENTRY(GC, 0, mmCP_CPC_STALLED_STAT1)}, 304 { SOC15_REG_ENTRY(GC, 0, mmCP_CPC_STATUS)}, 305 { SOC15_REG_ENTRY(GC, 0, mmGB_ADDR_CONFIG)}, 306 { SOC15_REG_ENTRY(GC, 0, mmDB_DEBUG2)}, 307 }; 308 309 static uint32_t soc15_read_indexed_register(struct amdgpu_device *adev, u32 se_num, 310 u32 sh_num, u32 reg_offset) 311 { 312 uint32_t val; 313 314 mutex_lock(&adev->grbm_idx_mutex); 315 if (se_num != 0xffffffff || sh_num != 0xffffffff) 316 amdgpu_gfx_select_se_sh(adev, se_num, sh_num, 0xffffffff); 317 318 val = RREG32(reg_offset); 319 320 if (se_num != 0xffffffff || sh_num != 0xffffffff) 321 amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff); 322 mutex_unlock(&adev->grbm_idx_mutex); 323 return val; 324 } 325 326 static uint32_t soc15_get_register_value(struct amdgpu_device *adev, 327 bool indexed, u32 se_num, 328 u32 sh_num, u32 reg_offset) 329 { 330 if (indexed) { 331 return soc15_read_indexed_register(adev, se_num, sh_num, reg_offset); 332 } else { 333 if (reg_offset == SOC15_REG_OFFSET(GC, 0, mmGB_ADDR_CONFIG)) 334 return adev->gfx.config.gb_addr_config; 335 else if (reg_offset == SOC15_REG_OFFSET(GC, 0, mmDB_DEBUG2)) 336 return adev->gfx.config.db_debug2; 337 return RREG32(reg_offset); 338 } 339 } 340 341 static int soc15_read_register(struct amdgpu_device *adev, u32 se_num, 342 u32 sh_num, u32 reg_offset, u32 *value) 343 { 344 uint32_t i; 345 struct soc15_allowed_register_entry *en; 346 347 *value = 0; 348 for (i = 0; i < ARRAY_SIZE(soc15_allowed_read_registers); i++) { 349 en = &soc15_allowed_read_registers[i]; 350 if (reg_offset != (adev->reg_offset[en->hwip][en->inst][en->seg] 351 + en->reg_offset)) 352 continue; 353 354 *value = soc15_get_register_value(adev, 355 soc15_allowed_read_registers[i].grbm_indexed, 356 se_num, sh_num, reg_offset); 357 return 0; 358 } 359 return -EINVAL; 360 } 361 362 363 /** 364 * soc15_program_register_sequence - program an array of registers. 365 * 366 * @adev: amdgpu_device pointer 367 * @regs: pointer to the register array 368 * @array_size: size of the register array 369 * 370 * Programs an array or registers with and and or masks. 371 * This is a helper for setting golden registers. 372 */ 373 374 void soc15_program_register_sequence(struct amdgpu_device *adev, 375 const struct soc15_reg_golden *regs, 376 const u32 array_size) 377 { 378 const struct soc15_reg_golden *entry; 379 u32 tmp, reg; 380 int i; 381 382 for (i = 0; i < array_size; ++i) { 383 entry = ®s[i]; 384 reg = adev->reg_offset[entry->hwip][entry->instance][entry->segment] + entry->reg; 385 386 if (entry->and_mask == 0xffffffff) { 387 tmp = entry->or_mask; 388 } else { 389 tmp = RREG32(reg); 390 tmp &= ~(entry->and_mask); 391 tmp |= entry->or_mask; 392 } 393 394 if (reg == SOC15_REG_OFFSET(GC, 0, mmPA_SC_BINNER_EVENT_CNTL_3) || 395 reg == SOC15_REG_OFFSET(GC, 0, mmPA_SC_ENHANCE) || 396 reg == SOC15_REG_OFFSET(GC, 0, mmPA_SC_ENHANCE_1) || 397 reg == SOC15_REG_OFFSET(GC, 0, mmSH_MEM_CONFIG)) 398 WREG32_RLC(reg, tmp); 399 else 400 WREG32(reg, tmp); 401 402 } 403 404 } 405 406 static int soc15_asic_mode1_reset(struct amdgpu_device *adev) 407 { 408 u32 i; 409 int ret = 0; 410 411 amdgpu_atombios_scratch_regs_engine_hung(adev, true); 412 413 dev_info(adev->dev, "GPU mode1 reset\n"); 414 415 /* disable BM */ 416 pci_clear_master(adev->pdev); 417 418 pci_save_state(adev->pdev); 419 420 ret = psp_gpu_reset(adev); 421 if (ret) 422 dev_err(adev->dev, "GPU mode1 reset failed\n"); 423 424 pci_restore_state(adev->pdev); 425 426 /* wait for asic to come out of reset */ 427 for (i = 0; i < adev->usec_timeout; i++) { 428 u32 memsize = adev->nbio_funcs->get_memsize(adev); 429 430 if (memsize != 0xffffffff) 431 break; 432 udelay(1); 433 } 434 435 amdgpu_atombios_scratch_regs_engine_hung(adev, false); 436 437 return ret; 438 } 439 440 static int soc15_asic_get_baco_capability(struct amdgpu_device *adev, bool *cap) 441 { 442 void *pp_handle = adev->powerplay.pp_handle; 443 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 444 445 if (!pp_funcs || !pp_funcs->get_asic_baco_capability) { 446 *cap = false; 447 return -ENOENT; 448 } 449 450 return pp_funcs->get_asic_baco_capability(pp_handle, cap); 451 } 452 453 static int soc15_asic_baco_reset(struct amdgpu_device *adev) 454 { 455 void *pp_handle = adev->powerplay.pp_handle; 456 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 457 458 if (!pp_funcs ||!pp_funcs->get_asic_baco_state ||!pp_funcs->set_asic_baco_state) 459 return -ENOENT; 460 461 /* enter BACO state */ 462 if (pp_funcs->set_asic_baco_state(pp_handle, 1)) 463 return -EIO; 464 465 /* exit BACO state */ 466 if (pp_funcs->set_asic_baco_state(pp_handle, 0)) 467 return -EIO; 468 469 dev_info(adev->dev, "GPU BACO reset\n"); 470 471 adev->in_baco_reset = 1; 472 473 return 0; 474 } 475 476 static int soc15_asic_reset(struct amdgpu_device *adev) 477 { 478 int ret; 479 bool baco_reset; 480 481 switch (adev->asic_type) { 482 case CHIP_VEGA10: 483 case CHIP_VEGA12: 484 soc15_asic_get_baco_capability(adev, &baco_reset); 485 break; 486 case CHIP_VEGA20: 487 if (adev->psp.sos_fw_version >= 0x80067) 488 soc15_asic_get_baco_capability(adev, &baco_reset); 489 else 490 baco_reset = false; 491 if (baco_reset) { 492 struct amdgpu_hive_info *hive = amdgpu_get_xgmi_hive(adev, 0); 493 struct amdgpu_ras *ras = amdgpu_ras_get_context(adev); 494 495 if (hive || (ras && ras->supported)) 496 baco_reset = false; 497 } 498 break; 499 default: 500 baco_reset = false; 501 break; 502 } 503 504 if (baco_reset) 505 ret = soc15_asic_baco_reset(adev); 506 else 507 ret = soc15_asic_mode1_reset(adev); 508 509 return ret; 510 } 511 512 /*static int soc15_set_uvd_clock(struct amdgpu_device *adev, u32 clock, 513 u32 cntl_reg, u32 status_reg) 514 { 515 return 0; 516 }*/ 517 518 static int soc15_set_uvd_clocks(struct amdgpu_device *adev, u32 vclk, u32 dclk) 519 { 520 /*int r; 521 522 r = soc15_set_uvd_clock(adev, vclk, ixCG_VCLK_CNTL, ixCG_VCLK_STATUS); 523 if (r) 524 return r; 525 526 r = soc15_set_uvd_clock(adev, dclk, ixCG_DCLK_CNTL, ixCG_DCLK_STATUS); 527 */ 528 return 0; 529 } 530 531 static int soc15_set_vce_clocks(struct amdgpu_device *adev, u32 evclk, u32 ecclk) 532 { 533 /* todo */ 534 535 return 0; 536 } 537 538 static void soc15_pcie_gen3_enable(struct amdgpu_device *adev) 539 { 540 if (pci_is_root_bus(adev->pdev->bus)) 541 return; 542 543 if (amdgpu_pcie_gen2 == 0) 544 return; 545 546 if (adev->flags & AMD_IS_APU) 547 return; 548 549 if (!(adev->pm.pcie_gen_mask & (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2 | 550 CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3))) 551 return; 552 553 /* todo */ 554 } 555 556 static void soc15_program_aspm(struct amdgpu_device *adev) 557 { 558 559 if (amdgpu_aspm == 0) 560 return; 561 562 /* todo */ 563 } 564 565 static void soc15_enable_doorbell_aperture(struct amdgpu_device *adev, 566 bool enable) 567 { 568 adev->nbio_funcs->enable_doorbell_aperture(adev, enable); 569 adev->nbio_funcs->enable_doorbell_selfring_aperture(adev, enable); 570 } 571 572 static const struct amdgpu_ip_block_version vega10_common_ip_block = 573 { 574 .type = AMD_IP_BLOCK_TYPE_COMMON, 575 .major = 2, 576 .minor = 0, 577 .rev = 0, 578 .funcs = &soc15_common_ip_funcs, 579 }; 580 581 static uint32_t soc15_get_rev_id(struct amdgpu_device *adev) 582 { 583 return adev->nbio_funcs->get_rev_id(adev); 584 } 585 586 int soc15_set_ip_blocks(struct amdgpu_device *adev) 587 { 588 /* Set IP register base before any HW register access */ 589 switch (adev->asic_type) { 590 case CHIP_VEGA10: 591 case CHIP_VEGA12: 592 case CHIP_RAVEN: 593 vega10_reg_base_init(adev); 594 break; 595 case CHIP_VEGA20: 596 vega20_reg_base_init(adev); 597 break; 598 default: 599 return -EINVAL; 600 } 601 602 if (adev->asic_type == CHIP_VEGA20) 603 adev->gmc.xgmi.supported = true; 604 605 if (adev->flags & AMD_IS_APU) 606 adev->nbio_funcs = &nbio_v7_0_funcs; 607 else if (adev->asic_type == CHIP_VEGA20) 608 adev->nbio_funcs = &nbio_v7_4_funcs; 609 else 610 adev->nbio_funcs = &nbio_v6_1_funcs; 611 612 if (adev->asic_type == CHIP_VEGA20) 613 adev->df_funcs = &df_v3_6_funcs; 614 else 615 adev->df_funcs = &df_v1_7_funcs; 616 617 adev->rev_id = soc15_get_rev_id(adev); 618 adev->nbio_funcs->detect_hw_virt(adev); 619 620 if (amdgpu_sriov_vf(adev)) 621 adev->virt.ops = &xgpu_ai_virt_ops; 622 623 switch (adev->asic_type) { 624 case CHIP_VEGA10: 625 case CHIP_VEGA12: 626 case CHIP_VEGA20: 627 amdgpu_device_ip_block_add(adev, &vega10_common_ip_block); 628 amdgpu_device_ip_block_add(adev, &gmc_v9_0_ip_block); 629 630 /* For Vega10 SR-IOV, PSP need to be initialized before IH */ 631 if (amdgpu_sriov_vf(adev)) { 632 if (likely(adev->firmware.load_type == AMDGPU_FW_LOAD_PSP)) { 633 if (adev->asic_type == CHIP_VEGA20) 634 amdgpu_device_ip_block_add(adev, &psp_v11_0_ip_block); 635 else 636 amdgpu_device_ip_block_add(adev, &psp_v3_1_ip_block); 637 } 638 amdgpu_device_ip_block_add(adev, &vega10_ih_ip_block); 639 } else { 640 amdgpu_device_ip_block_add(adev, &vega10_ih_ip_block); 641 if (likely(adev->firmware.load_type == AMDGPU_FW_LOAD_PSP)) { 642 if (adev->asic_type == CHIP_VEGA20) 643 amdgpu_device_ip_block_add(adev, &psp_v11_0_ip_block); 644 else 645 amdgpu_device_ip_block_add(adev, &psp_v3_1_ip_block); 646 } 647 } 648 amdgpu_device_ip_block_add(adev, &gfx_v9_0_ip_block); 649 amdgpu_device_ip_block_add(adev, &sdma_v4_0_ip_block); 650 if (!amdgpu_sriov_vf(adev)) { 651 if (is_support_sw_smu(adev)) 652 amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block); 653 else 654 amdgpu_device_ip_block_add(adev, &pp_smu_ip_block); 655 } 656 if (adev->enable_virtual_display || amdgpu_sriov_vf(adev)) 657 amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block); 658 #if defined(CONFIG_DRM_AMD_DC) 659 else if (amdgpu_device_has_dc_support(adev)) 660 amdgpu_device_ip_block_add(adev, &dm_ip_block); 661 #else 662 # warning "Enable CONFIG_DRM_AMD_DC for display support on SOC15." 663 #endif 664 if (!(adev->asic_type == CHIP_VEGA20 && amdgpu_sriov_vf(adev))) { 665 amdgpu_device_ip_block_add(adev, &uvd_v7_0_ip_block); 666 amdgpu_device_ip_block_add(adev, &vce_v4_0_ip_block); 667 } 668 break; 669 case CHIP_RAVEN: 670 amdgpu_device_ip_block_add(adev, &vega10_common_ip_block); 671 amdgpu_device_ip_block_add(adev, &gmc_v9_0_ip_block); 672 amdgpu_device_ip_block_add(adev, &vega10_ih_ip_block); 673 if (likely(adev->firmware.load_type == AMDGPU_FW_LOAD_PSP)) 674 amdgpu_device_ip_block_add(adev, &psp_v10_0_ip_block); 675 amdgpu_device_ip_block_add(adev, &gfx_v9_0_ip_block); 676 amdgpu_device_ip_block_add(adev, &sdma_v4_0_ip_block); 677 amdgpu_device_ip_block_add(adev, &pp_smu_ip_block); 678 if (adev->enable_virtual_display || amdgpu_sriov_vf(adev)) 679 amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block); 680 #if defined(CONFIG_DRM_AMD_DC) 681 else if (amdgpu_device_has_dc_support(adev)) 682 amdgpu_device_ip_block_add(adev, &dm_ip_block); 683 #else 684 # warning "Enable CONFIG_DRM_AMD_DC for display support on SOC15." 685 #endif 686 amdgpu_device_ip_block_add(adev, &vcn_v1_0_ip_block); 687 break; 688 default: 689 return -EINVAL; 690 } 691 692 return 0; 693 } 694 695 static void soc15_flush_hdp(struct amdgpu_device *adev, struct amdgpu_ring *ring) 696 { 697 adev->nbio_funcs->hdp_flush(adev, ring); 698 } 699 700 static void soc15_invalidate_hdp(struct amdgpu_device *adev, 701 struct amdgpu_ring *ring) 702 { 703 if (!ring || !ring->funcs->emit_wreg) 704 WREG32_SOC15_NO_KIQ(NBIO, 0, mmHDP_READ_CACHE_INVALIDATE, 1); 705 else 706 amdgpu_ring_emit_wreg(ring, SOC15_REG_OFFSET( 707 HDP, 0, mmHDP_READ_CACHE_INVALIDATE), 1); 708 } 709 710 static bool soc15_need_full_reset(struct amdgpu_device *adev) 711 { 712 /* change this when we implement soft reset */ 713 return true; 714 } 715 static void soc15_get_pcie_usage(struct amdgpu_device *adev, uint64_t *count0, 716 uint64_t *count1) 717 { 718 uint32_t perfctr = 0; 719 uint64_t cnt0_of, cnt1_of; 720 int tmp; 721 722 /* This reports 0 on APUs, so return to avoid writing/reading registers 723 * that may or may not be different from their GPU counterparts 724 */ 725 if (adev->flags & AMD_IS_APU) 726 return; 727 728 /* Set the 2 events that we wish to watch, defined above */ 729 /* Reg 40 is # received msgs, Reg 104 is # of posted requests sent */ 730 perfctr = REG_SET_FIELD(perfctr, PCIE_PERF_CNTL_TXCLK, EVENT0_SEL, 40); 731 perfctr = REG_SET_FIELD(perfctr, PCIE_PERF_CNTL_TXCLK, EVENT1_SEL, 104); 732 733 /* Write to enable desired perf counters */ 734 WREG32_PCIE(smnPCIE_PERF_CNTL_TXCLK, perfctr); 735 /* Zero out and enable the perf counters 736 * Write 0x5: 737 * Bit 0 = Start all counters(1) 738 * Bit 2 = Global counter reset enable(1) 739 */ 740 WREG32_PCIE(smnPCIE_PERF_COUNT_CNTL, 0x00000005); 741 742 msleep(1000); 743 744 /* Load the shadow and disable the perf counters 745 * Write 0x2: 746 * Bit 0 = Stop counters(0) 747 * Bit 1 = Load the shadow counters(1) 748 */ 749 WREG32_PCIE(smnPCIE_PERF_COUNT_CNTL, 0x00000002); 750 751 /* Read register values to get any >32bit overflow */ 752 tmp = RREG32_PCIE(smnPCIE_PERF_CNTL_TXCLK); 753 cnt0_of = REG_GET_FIELD(tmp, PCIE_PERF_CNTL_TXCLK, COUNTER0_UPPER); 754 cnt1_of = REG_GET_FIELD(tmp, PCIE_PERF_CNTL_TXCLK, COUNTER1_UPPER); 755 756 /* Get the values and add the overflow */ 757 *count0 = RREG32_PCIE(smnPCIE_PERF_COUNT0_TXCLK) | (cnt0_of << 32); 758 *count1 = RREG32_PCIE(smnPCIE_PERF_COUNT1_TXCLK) | (cnt1_of << 32); 759 } 760 761 static bool soc15_need_reset_on_init(struct amdgpu_device *adev) 762 { 763 u32 sol_reg; 764 765 /* Just return false for soc15 GPUs. Reset does not seem to 766 * be necessary. 767 */ 768 if (!amdgpu_passthrough(adev)) 769 return false; 770 771 if (adev->flags & AMD_IS_APU) 772 return false; 773 774 /* Check sOS sign of life register to confirm sys driver and sOS 775 * are already been loaded. 776 */ 777 sol_reg = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_81); 778 if (sol_reg) 779 return true; 780 781 return false; 782 } 783 784 static uint64_t soc15_get_pcie_replay_count(struct amdgpu_device *adev) 785 { 786 uint64_t nak_r, nak_g; 787 788 /* Get the number of NAKs received and generated */ 789 nak_r = RREG32_PCIE(smnPCIE_RX_NUM_NAK); 790 nak_g = RREG32_PCIE(smnPCIE_RX_NUM_NAK_GENERATED); 791 792 /* Add the total number of NAKs, i.e the number of replays */ 793 return (nak_r + nak_g); 794 } 795 796 static const struct amdgpu_asic_funcs soc15_asic_funcs = 797 { 798 .read_disabled_bios = &soc15_read_disabled_bios, 799 .read_bios_from_rom = &soc15_read_bios_from_rom, 800 .read_register = &soc15_read_register, 801 .reset = &soc15_asic_reset, 802 .set_vga_state = &soc15_vga_set_state, 803 .get_xclk = &soc15_get_xclk, 804 .set_uvd_clocks = &soc15_set_uvd_clocks, 805 .set_vce_clocks = &soc15_set_vce_clocks, 806 .get_config_memsize = &soc15_get_config_memsize, 807 .flush_hdp = &soc15_flush_hdp, 808 .invalidate_hdp = &soc15_invalidate_hdp, 809 .need_full_reset = &soc15_need_full_reset, 810 .init_doorbell_index = &vega10_doorbell_index_init, 811 .get_pcie_usage = &soc15_get_pcie_usage, 812 .need_reset_on_init = &soc15_need_reset_on_init, 813 .get_pcie_replay_count = &soc15_get_pcie_replay_count, 814 }; 815 816 static const struct amdgpu_asic_funcs vega20_asic_funcs = 817 { 818 .read_disabled_bios = &soc15_read_disabled_bios, 819 .read_bios_from_rom = &soc15_read_bios_from_rom, 820 .read_register = &soc15_read_register, 821 .reset = &soc15_asic_reset, 822 .set_vga_state = &soc15_vga_set_state, 823 .get_xclk = &soc15_get_xclk, 824 .set_uvd_clocks = &soc15_set_uvd_clocks, 825 .set_vce_clocks = &soc15_set_vce_clocks, 826 .get_config_memsize = &soc15_get_config_memsize, 827 .flush_hdp = &soc15_flush_hdp, 828 .invalidate_hdp = &soc15_invalidate_hdp, 829 .need_full_reset = &soc15_need_full_reset, 830 .init_doorbell_index = &vega20_doorbell_index_init, 831 .get_pcie_usage = &soc15_get_pcie_usage, 832 .need_reset_on_init = &soc15_need_reset_on_init, 833 .get_pcie_replay_count = &soc15_get_pcie_replay_count, 834 }; 835 836 static int soc15_common_early_init(void *handle) 837 { 838 #define MMIO_REG_HOLE_OFFSET (0x80000 - PAGE_SIZE) 839 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 840 841 adev->rmmio_remap.reg_offset = MMIO_REG_HOLE_OFFSET; 842 adev->rmmio_remap.bus_addr = adev->rmmio_base + MMIO_REG_HOLE_OFFSET; 843 adev->smc_rreg = NULL; 844 adev->smc_wreg = NULL; 845 adev->pcie_rreg = &soc15_pcie_rreg; 846 adev->pcie_wreg = &soc15_pcie_wreg; 847 adev->uvd_ctx_rreg = &soc15_uvd_ctx_rreg; 848 adev->uvd_ctx_wreg = &soc15_uvd_ctx_wreg; 849 adev->didt_rreg = &soc15_didt_rreg; 850 adev->didt_wreg = &soc15_didt_wreg; 851 adev->gc_cac_rreg = &soc15_gc_cac_rreg; 852 adev->gc_cac_wreg = &soc15_gc_cac_wreg; 853 adev->se_cac_rreg = &soc15_se_cac_rreg; 854 adev->se_cac_wreg = &soc15_se_cac_wreg; 855 856 857 adev->external_rev_id = 0xFF; 858 switch (adev->asic_type) { 859 case CHIP_VEGA10: 860 adev->asic_funcs = &soc15_asic_funcs; 861 adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG | 862 AMD_CG_SUPPORT_GFX_MGLS | 863 AMD_CG_SUPPORT_GFX_RLC_LS | 864 AMD_CG_SUPPORT_GFX_CP_LS | 865 AMD_CG_SUPPORT_GFX_3D_CGCG | 866 AMD_CG_SUPPORT_GFX_3D_CGLS | 867 AMD_CG_SUPPORT_GFX_CGCG | 868 AMD_CG_SUPPORT_GFX_CGLS | 869 AMD_CG_SUPPORT_BIF_MGCG | 870 AMD_CG_SUPPORT_BIF_LS | 871 AMD_CG_SUPPORT_HDP_LS | 872 AMD_CG_SUPPORT_DRM_MGCG | 873 AMD_CG_SUPPORT_DRM_LS | 874 AMD_CG_SUPPORT_ROM_MGCG | 875 AMD_CG_SUPPORT_DF_MGCG | 876 AMD_CG_SUPPORT_SDMA_MGCG | 877 AMD_CG_SUPPORT_SDMA_LS | 878 AMD_CG_SUPPORT_MC_MGCG | 879 AMD_CG_SUPPORT_MC_LS; 880 adev->pg_flags = 0; 881 adev->external_rev_id = 0x1; 882 break; 883 case CHIP_VEGA12: 884 adev->asic_funcs = &soc15_asic_funcs; 885 adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG | 886 AMD_CG_SUPPORT_GFX_MGLS | 887 AMD_CG_SUPPORT_GFX_CGCG | 888 AMD_CG_SUPPORT_GFX_CGLS | 889 AMD_CG_SUPPORT_GFX_3D_CGCG | 890 AMD_CG_SUPPORT_GFX_3D_CGLS | 891 AMD_CG_SUPPORT_GFX_CP_LS | 892 AMD_CG_SUPPORT_MC_LS | 893 AMD_CG_SUPPORT_MC_MGCG | 894 AMD_CG_SUPPORT_SDMA_MGCG | 895 AMD_CG_SUPPORT_SDMA_LS | 896 AMD_CG_SUPPORT_BIF_MGCG | 897 AMD_CG_SUPPORT_BIF_LS | 898 AMD_CG_SUPPORT_HDP_MGCG | 899 AMD_CG_SUPPORT_HDP_LS | 900 AMD_CG_SUPPORT_ROM_MGCG | 901 AMD_CG_SUPPORT_VCE_MGCG | 902 AMD_CG_SUPPORT_UVD_MGCG; 903 adev->pg_flags = 0; 904 adev->external_rev_id = adev->rev_id + 0x14; 905 break; 906 case CHIP_VEGA20: 907 adev->asic_funcs = &vega20_asic_funcs; 908 adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG | 909 AMD_CG_SUPPORT_GFX_MGLS | 910 AMD_CG_SUPPORT_GFX_CGCG | 911 AMD_CG_SUPPORT_GFX_CGLS | 912 AMD_CG_SUPPORT_GFX_3D_CGCG | 913 AMD_CG_SUPPORT_GFX_3D_CGLS | 914 AMD_CG_SUPPORT_GFX_CP_LS | 915 AMD_CG_SUPPORT_MC_LS | 916 AMD_CG_SUPPORT_MC_MGCG | 917 AMD_CG_SUPPORT_SDMA_MGCG | 918 AMD_CG_SUPPORT_SDMA_LS | 919 AMD_CG_SUPPORT_BIF_MGCG | 920 AMD_CG_SUPPORT_BIF_LS | 921 AMD_CG_SUPPORT_HDP_MGCG | 922 AMD_CG_SUPPORT_HDP_LS | 923 AMD_CG_SUPPORT_ROM_MGCG | 924 AMD_CG_SUPPORT_VCE_MGCG | 925 AMD_CG_SUPPORT_UVD_MGCG; 926 adev->pg_flags = 0; 927 adev->external_rev_id = adev->rev_id + 0x28; 928 break; 929 case CHIP_RAVEN: 930 adev->asic_funcs = &soc15_asic_funcs; 931 if (adev->rev_id >= 0x8) 932 adev->external_rev_id = adev->rev_id + 0x79; 933 else if (adev->pdev->device == 0x15d8) 934 adev->external_rev_id = adev->rev_id + 0x41; 935 else if (adev->rev_id == 1) 936 adev->external_rev_id = adev->rev_id + 0x20; 937 else 938 adev->external_rev_id = adev->rev_id + 0x01; 939 940 if (adev->rev_id >= 0x8) { 941 adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG | 942 AMD_CG_SUPPORT_GFX_MGLS | 943 AMD_CG_SUPPORT_GFX_CP_LS | 944 AMD_CG_SUPPORT_GFX_3D_CGCG | 945 AMD_CG_SUPPORT_GFX_3D_CGLS | 946 AMD_CG_SUPPORT_GFX_CGCG | 947 AMD_CG_SUPPORT_GFX_CGLS | 948 AMD_CG_SUPPORT_BIF_LS | 949 AMD_CG_SUPPORT_HDP_LS | 950 AMD_CG_SUPPORT_ROM_MGCG | 951 AMD_CG_SUPPORT_MC_MGCG | 952 AMD_CG_SUPPORT_MC_LS | 953 AMD_CG_SUPPORT_SDMA_MGCG | 954 AMD_CG_SUPPORT_SDMA_LS | 955 AMD_CG_SUPPORT_VCN_MGCG; 956 957 adev->pg_flags = AMD_PG_SUPPORT_SDMA | AMD_PG_SUPPORT_VCN; 958 } else if (adev->pdev->device == 0x15d8) { 959 adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG | 960 AMD_CG_SUPPORT_GFX_MGLS | 961 AMD_CG_SUPPORT_GFX_CP_LS | 962 AMD_CG_SUPPORT_GFX_3D_CGCG | 963 AMD_CG_SUPPORT_GFX_3D_CGLS | 964 AMD_CG_SUPPORT_GFX_CGCG | 965 AMD_CG_SUPPORT_GFX_CGLS | 966 AMD_CG_SUPPORT_BIF_LS | 967 AMD_CG_SUPPORT_HDP_LS | 968 AMD_CG_SUPPORT_ROM_MGCG | 969 AMD_CG_SUPPORT_MC_MGCG | 970 AMD_CG_SUPPORT_MC_LS | 971 AMD_CG_SUPPORT_SDMA_MGCG | 972 AMD_CG_SUPPORT_SDMA_LS; 973 974 adev->pg_flags = AMD_PG_SUPPORT_SDMA | 975 AMD_PG_SUPPORT_MMHUB | 976 AMD_PG_SUPPORT_VCN | 977 AMD_PG_SUPPORT_VCN_DPG; 978 } else { 979 adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG | 980 AMD_CG_SUPPORT_GFX_MGLS | 981 AMD_CG_SUPPORT_GFX_RLC_LS | 982 AMD_CG_SUPPORT_GFX_CP_LS | 983 AMD_CG_SUPPORT_GFX_3D_CGCG | 984 AMD_CG_SUPPORT_GFX_3D_CGLS | 985 AMD_CG_SUPPORT_GFX_CGCG | 986 AMD_CG_SUPPORT_GFX_CGLS | 987 AMD_CG_SUPPORT_BIF_MGCG | 988 AMD_CG_SUPPORT_BIF_LS | 989 AMD_CG_SUPPORT_HDP_MGCG | 990 AMD_CG_SUPPORT_HDP_LS | 991 AMD_CG_SUPPORT_DRM_MGCG | 992 AMD_CG_SUPPORT_DRM_LS | 993 AMD_CG_SUPPORT_ROM_MGCG | 994 AMD_CG_SUPPORT_MC_MGCG | 995 AMD_CG_SUPPORT_MC_LS | 996 AMD_CG_SUPPORT_SDMA_MGCG | 997 AMD_CG_SUPPORT_SDMA_LS | 998 AMD_CG_SUPPORT_VCN_MGCG; 999 1000 adev->pg_flags = AMD_PG_SUPPORT_SDMA | AMD_PG_SUPPORT_VCN; 1001 } 1002 1003 if (adev->pm.pp_feature & PP_GFXOFF_MASK) 1004 adev->pg_flags |= AMD_PG_SUPPORT_GFX_PG | 1005 AMD_PG_SUPPORT_CP | 1006 AMD_PG_SUPPORT_RLC_SMU_HS; 1007 break; 1008 default: 1009 /* FIXME: not supported yet */ 1010 return -EINVAL; 1011 } 1012 1013 if (amdgpu_sriov_vf(adev)) { 1014 amdgpu_virt_init_setting(adev); 1015 xgpu_ai_mailbox_set_irq_funcs(adev); 1016 } 1017 1018 return 0; 1019 } 1020 1021 static int soc15_common_late_init(void *handle) 1022 { 1023 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1024 1025 if (amdgpu_sriov_vf(adev)) 1026 xgpu_ai_mailbox_get_irq(adev); 1027 1028 return 0; 1029 } 1030 1031 static int soc15_common_sw_init(void *handle) 1032 { 1033 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1034 1035 if (amdgpu_sriov_vf(adev)) 1036 xgpu_ai_mailbox_add_irq_id(adev); 1037 1038 return 0; 1039 } 1040 1041 static int soc15_common_sw_fini(void *handle) 1042 { 1043 return 0; 1044 } 1045 1046 static void soc15_doorbell_range_init(struct amdgpu_device *adev) 1047 { 1048 int i; 1049 struct amdgpu_ring *ring; 1050 1051 /* Two reasons to skip 1052 * 1, Host driver already programmed them 1053 * 2, To avoid registers program violations in SR-IOV 1054 */ 1055 if (!amdgpu_virt_support_skip_setting(adev)) { 1056 for (i = 0; i < adev->sdma.num_instances; i++) { 1057 ring = &adev->sdma.instance[i].ring; 1058 adev->nbio_funcs->sdma_doorbell_range(adev, i, 1059 ring->use_doorbell, ring->doorbell_index, 1060 adev->doorbell_index.sdma_doorbell_range); 1061 } 1062 } 1063 1064 adev->nbio_funcs->ih_doorbell_range(adev, adev->irq.ih.use_doorbell, 1065 adev->irq.ih.doorbell_index); 1066 } 1067 1068 static int soc15_common_hw_init(void *handle) 1069 { 1070 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1071 1072 /* enable pcie gen2/3 link */ 1073 soc15_pcie_gen3_enable(adev); 1074 /* enable aspm */ 1075 soc15_program_aspm(adev); 1076 /* setup nbio registers */ 1077 adev->nbio_funcs->init_registers(adev); 1078 /* remap HDP registers to a hole in mmio space, 1079 * for the purpose of expose those registers 1080 * to process space 1081 */ 1082 if (adev->nbio_funcs->remap_hdp_registers) 1083 adev->nbio_funcs->remap_hdp_registers(adev); 1084 /* enable the doorbell aperture */ 1085 soc15_enable_doorbell_aperture(adev, true); 1086 /* HW doorbell routing policy: doorbell writing not 1087 * in SDMA/IH/MM/ACV range will be routed to CP. So 1088 * we need to init SDMA/IH/MM/ACV doorbell range prior 1089 * to CP ip block init and ring test. 1090 */ 1091 soc15_doorbell_range_init(adev); 1092 1093 return 0; 1094 } 1095 1096 static int soc15_common_hw_fini(void *handle) 1097 { 1098 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1099 1100 /* disable the doorbell aperture */ 1101 soc15_enable_doorbell_aperture(adev, false); 1102 if (amdgpu_sriov_vf(adev)) 1103 xgpu_ai_mailbox_put_irq(adev); 1104 1105 return 0; 1106 } 1107 1108 static int soc15_common_suspend(void *handle) 1109 { 1110 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1111 1112 return soc15_common_hw_fini(adev); 1113 } 1114 1115 static int soc15_common_resume(void *handle) 1116 { 1117 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1118 1119 return soc15_common_hw_init(adev); 1120 } 1121 1122 static bool soc15_common_is_idle(void *handle) 1123 { 1124 return true; 1125 } 1126 1127 static int soc15_common_wait_for_idle(void *handle) 1128 { 1129 return 0; 1130 } 1131 1132 static int soc15_common_soft_reset(void *handle) 1133 { 1134 return 0; 1135 } 1136 1137 static void soc15_update_hdp_light_sleep(struct amdgpu_device *adev, bool enable) 1138 { 1139 uint32_t def, data; 1140 1141 if (adev->asic_type == CHIP_VEGA20) { 1142 def = data = RREG32(SOC15_REG_OFFSET(HDP, 0, mmHDP_MEM_POWER_CTRL)); 1143 1144 if (enable && (adev->cg_flags & AMD_CG_SUPPORT_HDP_LS)) 1145 data |= HDP_MEM_POWER_CTRL__IPH_MEM_POWER_CTRL_EN_MASK | 1146 HDP_MEM_POWER_CTRL__IPH_MEM_POWER_LS_EN_MASK | 1147 HDP_MEM_POWER_CTRL__RC_MEM_POWER_CTRL_EN_MASK | 1148 HDP_MEM_POWER_CTRL__RC_MEM_POWER_LS_EN_MASK; 1149 else 1150 data &= ~(HDP_MEM_POWER_CTRL__IPH_MEM_POWER_CTRL_EN_MASK | 1151 HDP_MEM_POWER_CTRL__IPH_MEM_POWER_LS_EN_MASK | 1152 HDP_MEM_POWER_CTRL__RC_MEM_POWER_CTRL_EN_MASK | 1153 HDP_MEM_POWER_CTRL__RC_MEM_POWER_LS_EN_MASK); 1154 1155 if (def != data) 1156 WREG32(SOC15_REG_OFFSET(HDP, 0, mmHDP_MEM_POWER_CTRL), data); 1157 } else { 1158 def = data = RREG32(SOC15_REG_OFFSET(HDP, 0, mmHDP_MEM_POWER_LS)); 1159 1160 if (enable && (adev->cg_flags & AMD_CG_SUPPORT_HDP_LS)) 1161 data |= HDP_MEM_POWER_LS__LS_ENABLE_MASK; 1162 else 1163 data &= ~HDP_MEM_POWER_LS__LS_ENABLE_MASK; 1164 1165 if (def != data) 1166 WREG32(SOC15_REG_OFFSET(HDP, 0, mmHDP_MEM_POWER_LS), data); 1167 } 1168 } 1169 1170 static void soc15_update_drm_clock_gating(struct amdgpu_device *adev, bool enable) 1171 { 1172 uint32_t def, data; 1173 1174 def = data = RREG32(SOC15_REG_OFFSET(MP0, 0, mmMP0_MISC_CGTT_CTRL0)); 1175 1176 if (enable && (adev->cg_flags & AMD_CG_SUPPORT_DRM_MGCG)) 1177 data &= ~(0x01000000 | 1178 0x02000000 | 1179 0x04000000 | 1180 0x08000000 | 1181 0x10000000 | 1182 0x20000000 | 1183 0x40000000 | 1184 0x80000000); 1185 else 1186 data |= (0x01000000 | 1187 0x02000000 | 1188 0x04000000 | 1189 0x08000000 | 1190 0x10000000 | 1191 0x20000000 | 1192 0x40000000 | 1193 0x80000000); 1194 1195 if (def != data) 1196 WREG32(SOC15_REG_OFFSET(MP0, 0, mmMP0_MISC_CGTT_CTRL0), data); 1197 } 1198 1199 static void soc15_update_drm_light_sleep(struct amdgpu_device *adev, bool enable) 1200 { 1201 uint32_t def, data; 1202 1203 def = data = RREG32(SOC15_REG_OFFSET(MP0, 0, mmMP0_MISC_LIGHT_SLEEP_CTRL)); 1204 1205 if (enable && (adev->cg_flags & AMD_CG_SUPPORT_DRM_LS)) 1206 data |= 1; 1207 else 1208 data &= ~1; 1209 1210 if (def != data) 1211 WREG32(SOC15_REG_OFFSET(MP0, 0, mmMP0_MISC_LIGHT_SLEEP_CTRL), data); 1212 } 1213 1214 static void soc15_update_rom_medium_grain_clock_gating(struct amdgpu_device *adev, 1215 bool enable) 1216 { 1217 uint32_t def, data; 1218 1219 def = data = RREG32(SOC15_REG_OFFSET(SMUIO, 0, mmCGTT_ROM_CLK_CTRL0)); 1220 1221 if (enable && (adev->cg_flags & AMD_CG_SUPPORT_ROM_MGCG)) 1222 data &= ~(CGTT_ROM_CLK_CTRL0__SOFT_OVERRIDE0_MASK | 1223 CGTT_ROM_CLK_CTRL0__SOFT_OVERRIDE1_MASK); 1224 else 1225 data |= CGTT_ROM_CLK_CTRL0__SOFT_OVERRIDE0_MASK | 1226 CGTT_ROM_CLK_CTRL0__SOFT_OVERRIDE1_MASK; 1227 1228 if (def != data) 1229 WREG32(SOC15_REG_OFFSET(SMUIO, 0, mmCGTT_ROM_CLK_CTRL0), data); 1230 } 1231 1232 static int soc15_common_set_clockgating_state(void *handle, 1233 enum amd_clockgating_state state) 1234 { 1235 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1236 1237 if (amdgpu_sriov_vf(adev)) 1238 return 0; 1239 1240 switch (adev->asic_type) { 1241 case CHIP_VEGA10: 1242 case CHIP_VEGA12: 1243 case CHIP_VEGA20: 1244 adev->nbio_funcs->update_medium_grain_clock_gating(adev, 1245 state == AMD_CG_STATE_GATE ? true : false); 1246 adev->nbio_funcs->update_medium_grain_light_sleep(adev, 1247 state == AMD_CG_STATE_GATE ? true : false); 1248 soc15_update_hdp_light_sleep(adev, 1249 state == AMD_CG_STATE_GATE ? true : false); 1250 soc15_update_drm_clock_gating(adev, 1251 state == AMD_CG_STATE_GATE ? true : false); 1252 soc15_update_drm_light_sleep(adev, 1253 state == AMD_CG_STATE_GATE ? true : false); 1254 soc15_update_rom_medium_grain_clock_gating(adev, 1255 state == AMD_CG_STATE_GATE ? true : false); 1256 adev->df_funcs->update_medium_grain_clock_gating(adev, 1257 state == AMD_CG_STATE_GATE ? true : false); 1258 break; 1259 case CHIP_RAVEN: 1260 adev->nbio_funcs->update_medium_grain_clock_gating(adev, 1261 state == AMD_CG_STATE_GATE ? true : false); 1262 adev->nbio_funcs->update_medium_grain_light_sleep(adev, 1263 state == AMD_CG_STATE_GATE ? true : false); 1264 soc15_update_hdp_light_sleep(adev, 1265 state == AMD_CG_STATE_GATE ? true : false); 1266 soc15_update_drm_clock_gating(adev, 1267 state == AMD_CG_STATE_GATE ? true : false); 1268 soc15_update_drm_light_sleep(adev, 1269 state == AMD_CG_STATE_GATE ? true : false); 1270 soc15_update_rom_medium_grain_clock_gating(adev, 1271 state == AMD_CG_STATE_GATE ? true : false); 1272 break; 1273 default: 1274 break; 1275 } 1276 return 0; 1277 } 1278 1279 static void soc15_common_get_clockgating_state(void *handle, u32 *flags) 1280 { 1281 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1282 int data; 1283 1284 if (amdgpu_sriov_vf(adev)) 1285 *flags = 0; 1286 1287 adev->nbio_funcs->get_clockgating_state(adev, flags); 1288 1289 /* AMD_CG_SUPPORT_HDP_LS */ 1290 data = RREG32(SOC15_REG_OFFSET(HDP, 0, mmHDP_MEM_POWER_LS)); 1291 if (data & HDP_MEM_POWER_LS__LS_ENABLE_MASK) 1292 *flags |= AMD_CG_SUPPORT_HDP_LS; 1293 1294 /* AMD_CG_SUPPORT_DRM_MGCG */ 1295 data = RREG32(SOC15_REG_OFFSET(MP0, 0, mmMP0_MISC_CGTT_CTRL0)); 1296 if (!(data & 0x01000000)) 1297 *flags |= AMD_CG_SUPPORT_DRM_MGCG; 1298 1299 /* AMD_CG_SUPPORT_DRM_LS */ 1300 data = RREG32(SOC15_REG_OFFSET(MP0, 0, mmMP0_MISC_LIGHT_SLEEP_CTRL)); 1301 if (data & 0x1) 1302 *flags |= AMD_CG_SUPPORT_DRM_LS; 1303 1304 /* AMD_CG_SUPPORT_ROM_MGCG */ 1305 data = RREG32(SOC15_REG_OFFSET(SMUIO, 0, mmCGTT_ROM_CLK_CTRL0)); 1306 if (!(data & CGTT_ROM_CLK_CTRL0__SOFT_OVERRIDE0_MASK)) 1307 *flags |= AMD_CG_SUPPORT_ROM_MGCG; 1308 1309 adev->df_funcs->get_clockgating_state(adev, flags); 1310 } 1311 1312 static int soc15_common_set_powergating_state(void *handle, 1313 enum amd_powergating_state state) 1314 { 1315 /* todo */ 1316 return 0; 1317 } 1318 1319 const struct amd_ip_funcs soc15_common_ip_funcs = { 1320 .name = "soc15_common", 1321 .early_init = soc15_common_early_init, 1322 .late_init = soc15_common_late_init, 1323 .sw_init = soc15_common_sw_init, 1324 .sw_fini = soc15_common_sw_fini, 1325 .hw_init = soc15_common_hw_init, 1326 .hw_fini = soc15_common_hw_fini, 1327 .suspend = soc15_common_suspend, 1328 .resume = soc15_common_resume, 1329 .is_idle = soc15_common_is_idle, 1330 .wait_for_idle = soc15_common_wait_for_idle, 1331 .soft_reset = soc15_common_soft_reset, 1332 .set_clockgating_state = soc15_common_set_clockgating_state, 1333 .set_powergating_state = soc15_common_set_powergating_state, 1334 .get_clockgating_state= soc15_common_get_clockgating_state, 1335 }; 1336