1 /* 2 * Copyright 2014 Advanced Micro Devices, Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 */ 23 #include <linux/firmware.h> 24 #include <linux/slab.h> 25 #include <linux/module.h> 26 #include "drmP.h" 27 #include "amdgpu.h" 28 #include "amdgpu_atombios.h" 29 #include "amdgpu_ih.h" 30 #include "amdgpu_uvd.h" 31 #include "amdgpu_vce.h" 32 #include "amdgpu_ucode.h" 33 #include "atom.h" 34 #include "amd_pcie.h" 35 36 #include "gmc/gmc_8_1_d.h" 37 #include "gmc/gmc_8_1_sh_mask.h" 38 39 #include "oss/oss_3_0_d.h" 40 #include "oss/oss_3_0_sh_mask.h" 41 42 #include "bif/bif_5_0_d.h" 43 #include "bif/bif_5_0_sh_mask.h" 44 45 #include "gca/gfx_8_0_d.h" 46 #include "gca/gfx_8_0_sh_mask.h" 47 48 #include "smu/smu_7_1_1_d.h" 49 #include "smu/smu_7_1_1_sh_mask.h" 50 51 #include "uvd/uvd_5_0_d.h" 52 #include "uvd/uvd_5_0_sh_mask.h" 53 54 #include "vce/vce_3_0_d.h" 55 #include "vce/vce_3_0_sh_mask.h" 56 57 #include "dce/dce_10_0_d.h" 58 #include "dce/dce_10_0_sh_mask.h" 59 60 #include "vid.h" 61 #include "vi.h" 62 #include "vi_dpm.h" 63 #include "gmc_v8_0.h" 64 #include "gmc_v7_0.h" 65 #include "gfx_v8_0.h" 66 #include "sdma_v2_4.h" 67 #include "sdma_v3_0.h" 68 #include "dce_v10_0.h" 69 #include "dce_v11_0.h" 70 #include "iceland_ih.h" 71 #include "tonga_ih.h" 72 #include "cz_ih.h" 73 #include "uvd_v5_0.h" 74 #include "uvd_v6_0.h" 75 #include "vce_v3_0.h" 76 #include "amdgpu_powerplay.h" 77 #if defined(CONFIG_DRM_AMD_ACP) 78 #include "amdgpu_acp.h" 79 #endif 80 #include "dce_virtual.h" 81 82 MODULE_FIRMWARE("amdgpu/topaz_smc.bin"); 83 MODULE_FIRMWARE("amdgpu/tonga_smc.bin"); 84 MODULE_FIRMWARE("amdgpu/fiji_smc.bin"); 85 MODULE_FIRMWARE("amdgpu/polaris10_smc.bin"); 86 MODULE_FIRMWARE("amdgpu/polaris10_smc_sk.bin"); 87 MODULE_FIRMWARE("amdgpu/polaris11_smc.bin"); 88 MODULE_FIRMWARE("amdgpu/polaris11_smc_sk.bin"); 89 90 /* 91 * Indirect registers accessor 92 */ 93 static u32 vi_pcie_rreg(struct amdgpu_device *adev, u32 reg) 94 { 95 unsigned long flags; 96 u32 r; 97 98 spin_lock_irqsave(&adev->pcie_idx_lock, flags); 99 WREG32(mmPCIE_INDEX, reg); 100 (void)RREG32(mmPCIE_INDEX); 101 r = RREG32(mmPCIE_DATA); 102 spin_unlock_irqrestore(&adev->pcie_idx_lock, flags); 103 return r; 104 } 105 106 static void vi_pcie_wreg(struct amdgpu_device *adev, u32 reg, u32 v) 107 { 108 unsigned long flags; 109 110 spin_lock_irqsave(&adev->pcie_idx_lock, flags); 111 WREG32(mmPCIE_INDEX, reg); 112 (void)RREG32(mmPCIE_INDEX); 113 WREG32(mmPCIE_DATA, v); 114 (void)RREG32(mmPCIE_DATA); 115 spin_unlock_irqrestore(&adev->pcie_idx_lock, flags); 116 } 117 118 static u32 vi_smc_rreg(struct amdgpu_device *adev, u32 reg) 119 { 120 unsigned long flags; 121 u32 r; 122 123 spin_lock_irqsave(&adev->smc_idx_lock, flags); 124 WREG32(mmSMC_IND_INDEX_0, (reg)); 125 r = RREG32(mmSMC_IND_DATA_0); 126 spin_unlock_irqrestore(&adev->smc_idx_lock, flags); 127 return r; 128 } 129 130 static void vi_smc_wreg(struct amdgpu_device *adev, u32 reg, u32 v) 131 { 132 unsigned long flags; 133 134 spin_lock_irqsave(&adev->smc_idx_lock, flags); 135 WREG32(mmSMC_IND_INDEX_0, (reg)); 136 WREG32(mmSMC_IND_DATA_0, (v)); 137 spin_unlock_irqrestore(&adev->smc_idx_lock, flags); 138 } 139 140 /* smu_8_0_d.h */ 141 #define mmMP0PUB_IND_INDEX 0x180 142 #define mmMP0PUB_IND_DATA 0x181 143 144 static u32 cz_smc_rreg(struct amdgpu_device *adev, u32 reg) 145 { 146 unsigned long flags; 147 u32 r; 148 149 spin_lock_irqsave(&adev->smc_idx_lock, flags); 150 WREG32(mmMP0PUB_IND_INDEX, (reg)); 151 r = RREG32(mmMP0PUB_IND_DATA); 152 spin_unlock_irqrestore(&adev->smc_idx_lock, flags); 153 return r; 154 } 155 156 static void cz_smc_wreg(struct amdgpu_device *adev, u32 reg, u32 v) 157 { 158 unsigned long flags; 159 160 spin_lock_irqsave(&adev->smc_idx_lock, flags); 161 WREG32(mmMP0PUB_IND_INDEX, (reg)); 162 WREG32(mmMP0PUB_IND_DATA, (v)); 163 spin_unlock_irqrestore(&adev->smc_idx_lock, flags); 164 } 165 166 static u32 vi_uvd_ctx_rreg(struct amdgpu_device *adev, u32 reg) 167 { 168 unsigned long flags; 169 u32 r; 170 171 spin_lock_irqsave(&adev->uvd_ctx_idx_lock, flags); 172 WREG32(mmUVD_CTX_INDEX, ((reg) & 0x1ff)); 173 r = RREG32(mmUVD_CTX_DATA); 174 spin_unlock_irqrestore(&adev->uvd_ctx_idx_lock, flags); 175 return r; 176 } 177 178 static void vi_uvd_ctx_wreg(struct amdgpu_device *adev, u32 reg, u32 v) 179 { 180 unsigned long flags; 181 182 spin_lock_irqsave(&adev->uvd_ctx_idx_lock, flags); 183 WREG32(mmUVD_CTX_INDEX, ((reg) & 0x1ff)); 184 WREG32(mmUVD_CTX_DATA, (v)); 185 spin_unlock_irqrestore(&adev->uvd_ctx_idx_lock, flags); 186 } 187 188 static u32 vi_didt_rreg(struct amdgpu_device *adev, u32 reg) 189 { 190 unsigned long flags; 191 u32 r; 192 193 spin_lock_irqsave(&adev->didt_idx_lock, flags); 194 WREG32(mmDIDT_IND_INDEX, (reg)); 195 r = RREG32(mmDIDT_IND_DATA); 196 spin_unlock_irqrestore(&adev->didt_idx_lock, flags); 197 return r; 198 } 199 200 static void vi_didt_wreg(struct amdgpu_device *adev, u32 reg, u32 v) 201 { 202 unsigned long flags; 203 204 spin_lock_irqsave(&adev->didt_idx_lock, flags); 205 WREG32(mmDIDT_IND_INDEX, (reg)); 206 WREG32(mmDIDT_IND_DATA, (v)); 207 spin_unlock_irqrestore(&adev->didt_idx_lock, flags); 208 } 209 210 static u32 vi_gc_cac_rreg(struct amdgpu_device *adev, u32 reg) 211 { 212 unsigned long flags; 213 u32 r; 214 215 spin_lock_irqsave(&adev->gc_cac_idx_lock, flags); 216 WREG32(mmGC_CAC_IND_INDEX, (reg)); 217 r = RREG32(mmGC_CAC_IND_DATA); 218 spin_unlock_irqrestore(&adev->gc_cac_idx_lock, flags); 219 return r; 220 } 221 222 static void vi_gc_cac_wreg(struct amdgpu_device *adev, u32 reg, u32 v) 223 { 224 unsigned long flags; 225 226 spin_lock_irqsave(&adev->gc_cac_idx_lock, flags); 227 WREG32(mmGC_CAC_IND_INDEX, (reg)); 228 WREG32(mmGC_CAC_IND_DATA, (v)); 229 spin_unlock_irqrestore(&adev->gc_cac_idx_lock, flags); 230 } 231 232 233 static const u32 tonga_mgcg_cgcg_init[] = 234 { 235 mmCGTT_DRM_CLK_CTRL0, 0xffffffff, 0x00600100, 236 mmPCIE_INDEX, 0xffffffff, 0x0140001c, 237 mmPCIE_DATA, 0x000f0000, 0x00000000, 238 mmSMC_IND_INDEX_4, 0xffffffff, 0xC060000C, 239 mmSMC_IND_DATA_4, 0xc0000fff, 0x00000100, 240 mmCGTT_DRM_CLK_CTRL0, 0xff000fff, 0x00000100, 241 mmHDP_XDP_CGTT_BLK_CTRL, 0xc0000fff, 0x00000104, 242 }; 243 244 static const u32 fiji_mgcg_cgcg_init[] = 245 { 246 mmCGTT_DRM_CLK_CTRL0, 0xffffffff, 0x00600100, 247 mmPCIE_INDEX, 0xffffffff, 0x0140001c, 248 mmPCIE_DATA, 0x000f0000, 0x00000000, 249 mmSMC_IND_INDEX_4, 0xffffffff, 0xC060000C, 250 mmSMC_IND_DATA_4, 0xc0000fff, 0x00000100, 251 mmCGTT_DRM_CLK_CTRL0, 0xff000fff, 0x00000100, 252 mmHDP_XDP_CGTT_BLK_CTRL, 0xc0000fff, 0x00000104, 253 }; 254 255 static const u32 iceland_mgcg_cgcg_init[] = 256 { 257 mmPCIE_INDEX, 0xffffffff, ixPCIE_CNTL2, 258 mmPCIE_DATA, 0x000f0000, 0x00000000, 259 mmSMC_IND_INDEX_4, 0xffffffff, ixCGTT_ROM_CLK_CTRL0, 260 mmSMC_IND_DATA_4, 0xc0000fff, 0x00000100, 261 mmHDP_XDP_CGTT_BLK_CTRL, 0xc0000fff, 0x00000104, 262 }; 263 264 static const u32 cz_mgcg_cgcg_init[] = 265 { 266 mmCGTT_DRM_CLK_CTRL0, 0xffffffff, 0x00600100, 267 mmPCIE_INDEX, 0xffffffff, 0x0140001c, 268 mmPCIE_DATA, 0x000f0000, 0x00000000, 269 mmCGTT_DRM_CLK_CTRL0, 0xff000fff, 0x00000100, 270 mmHDP_XDP_CGTT_BLK_CTRL, 0xc0000fff, 0x00000104, 271 }; 272 273 static const u32 stoney_mgcg_cgcg_init[] = 274 { 275 mmCGTT_DRM_CLK_CTRL0, 0xffffffff, 0x00000100, 276 mmHDP_XDP_CGTT_BLK_CTRL, 0xffffffff, 0x00000104, 277 mmHDP_HOST_PATH_CNTL, 0xffffffff, 0x0f000027, 278 }; 279 280 static void vi_init_golden_registers(struct amdgpu_device *adev) 281 { 282 /* Some of the registers might be dependent on GRBM_GFX_INDEX */ 283 mutex_lock(&adev->grbm_idx_mutex); 284 285 switch (adev->asic_type) { 286 case CHIP_TOPAZ: 287 amdgpu_program_register_sequence(adev, 288 iceland_mgcg_cgcg_init, 289 (const u32)ARRAY_SIZE(iceland_mgcg_cgcg_init)); 290 break; 291 case CHIP_FIJI: 292 amdgpu_program_register_sequence(adev, 293 fiji_mgcg_cgcg_init, 294 (const u32)ARRAY_SIZE(fiji_mgcg_cgcg_init)); 295 break; 296 case CHIP_TONGA: 297 amdgpu_program_register_sequence(adev, 298 tonga_mgcg_cgcg_init, 299 (const u32)ARRAY_SIZE(tonga_mgcg_cgcg_init)); 300 break; 301 case CHIP_CARRIZO: 302 amdgpu_program_register_sequence(adev, 303 cz_mgcg_cgcg_init, 304 (const u32)ARRAY_SIZE(cz_mgcg_cgcg_init)); 305 break; 306 case CHIP_STONEY: 307 amdgpu_program_register_sequence(adev, 308 stoney_mgcg_cgcg_init, 309 (const u32)ARRAY_SIZE(stoney_mgcg_cgcg_init)); 310 break; 311 case CHIP_POLARIS11: 312 case CHIP_POLARIS10: 313 default: 314 break; 315 } 316 mutex_unlock(&adev->grbm_idx_mutex); 317 } 318 319 /** 320 * vi_get_xclk - get the xclk 321 * 322 * @adev: amdgpu_device pointer 323 * 324 * Returns the reference clock used by the gfx engine 325 * (VI). 326 */ 327 static u32 vi_get_xclk(struct amdgpu_device *adev) 328 { 329 u32 reference_clock = adev->clock.spll.reference_freq; 330 u32 tmp; 331 332 if (adev->flags & AMD_IS_APU) 333 return reference_clock; 334 335 tmp = RREG32_SMC(ixCG_CLKPIN_CNTL_2); 336 if (REG_GET_FIELD(tmp, CG_CLKPIN_CNTL_2, MUX_TCLK_TO_XCLK)) 337 return 1000; 338 339 tmp = RREG32_SMC(ixCG_CLKPIN_CNTL); 340 if (REG_GET_FIELD(tmp, CG_CLKPIN_CNTL, XTALIN_DIVIDE)) 341 return reference_clock / 4; 342 343 return reference_clock; 344 } 345 346 /** 347 * vi_srbm_select - select specific register instances 348 * 349 * @adev: amdgpu_device pointer 350 * @me: selected ME (micro engine) 351 * @pipe: pipe 352 * @queue: queue 353 * @vmid: VMID 354 * 355 * Switches the currently active registers instances. Some 356 * registers are instanced per VMID, others are instanced per 357 * me/pipe/queue combination. 358 */ 359 void vi_srbm_select(struct amdgpu_device *adev, 360 u32 me, u32 pipe, u32 queue, u32 vmid) 361 { 362 u32 srbm_gfx_cntl = 0; 363 srbm_gfx_cntl = REG_SET_FIELD(srbm_gfx_cntl, SRBM_GFX_CNTL, PIPEID, pipe); 364 srbm_gfx_cntl = REG_SET_FIELD(srbm_gfx_cntl, SRBM_GFX_CNTL, MEID, me); 365 srbm_gfx_cntl = REG_SET_FIELD(srbm_gfx_cntl, SRBM_GFX_CNTL, VMID, vmid); 366 srbm_gfx_cntl = REG_SET_FIELD(srbm_gfx_cntl, SRBM_GFX_CNTL, QUEUEID, queue); 367 WREG32(mmSRBM_GFX_CNTL, srbm_gfx_cntl); 368 } 369 370 static void vi_vga_set_state(struct amdgpu_device *adev, bool state) 371 { 372 /* todo */ 373 } 374 375 static bool vi_read_disabled_bios(struct amdgpu_device *adev) 376 { 377 u32 bus_cntl; 378 u32 d1vga_control = 0; 379 u32 d2vga_control = 0; 380 u32 vga_render_control = 0; 381 u32 rom_cntl; 382 bool r; 383 384 bus_cntl = RREG32(mmBUS_CNTL); 385 if (adev->mode_info.num_crtc) { 386 d1vga_control = RREG32(mmD1VGA_CONTROL); 387 d2vga_control = RREG32(mmD2VGA_CONTROL); 388 vga_render_control = RREG32(mmVGA_RENDER_CONTROL); 389 } 390 rom_cntl = RREG32_SMC(ixROM_CNTL); 391 392 /* enable the rom */ 393 WREG32(mmBUS_CNTL, (bus_cntl & ~BUS_CNTL__BIOS_ROM_DIS_MASK)); 394 if (adev->mode_info.num_crtc) { 395 /* Disable VGA mode */ 396 WREG32(mmD1VGA_CONTROL, 397 (d1vga_control & ~(D1VGA_CONTROL__D1VGA_MODE_ENABLE_MASK | 398 D1VGA_CONTROL__D1VGA_TIMING_SELECT_MASK))); 399 WREG32(mmD2VGA_CONTROL, 400 (d2vga_control & ~(D2VGA_CONTROL__D2VGA_MODE_ENABLE_MASK | 401 D2VGA_CONTROL__D2VGA_TIMING_SELECT_MASK))); 402 WREG32(mmVGA_RENDER_CONTROL, 403 (vga_render_control & ~VGA_RENDER_CONTROL__VGA_VSTATUS_CNTL_MASK)); 404 } 405 WREG32_SMC(ixROM_CNTL, rom_cntl | ROM_CNTL__SCK_OVERWRITE_MASK); 406 407 r = amdgpu_read_bios(adev); 408 409 /* restore regs */ 410 WREG32(mmBUS_CNTL, bus_cntl); 411 if (adev->mode_info.num_crtc) { 412 WREG32(mmD1VGA_CONTROL, d1vga_control); 413 WREG32(mmD2VGA_CONTROL, d2vga_control); 414 WREG32(mmVGA_RENDER_CONTROL, vga_render_control); 415 } 416 WREG32_SMC(ixROM_CNTL, rom_cntl); 417 return r; 418 } 419 420 static bool vi_read_bios_from_rom(struct amdgpu_device *adev, 421 u8 *bios, u32 length_bytes) 422 { 423 u32 *dw_ptr; 424 unsigned long flags; 425 u32 i, length_dw; 426 427 if (bios == NULL) 428 return false; 429 if (length_bytes == 0) 430 return false; 431 /* APU vbios image is part of sbios image */ 432 if (adev->flags & AMD_IS_APU) 433 return false; 434 435 dw_ptr = (u32 *)bios; 436 length_dw = ALIGN(length_bytes, 4) / 4; 437 /* take the smc lock since we are using the smc index */ 438 spin_lock_irqsave(&adev->smc_idx_lock, flags); 439 /* set rom index to 0 */ 440 WREG32(mmSMC_IND_INDEX_0, ixROM_INDEX); 441 WREG32(mmSMC_IND_DATA_0, 0); 442 /* set index to data for continous read */ 443 WREG32(mmSMC_IND_INDEX_0, ixROM_DATA); 444 for (i = 0; i < length_dw; i++) 445 dw_ptr[i] = RREG32(mmSMC_IND_DATA_0); 446 spin_unlock_irqrestore(&adev->smc_idx_lock, flags); 447 448 return true; 449 } 450 451 static void vi_detect_hw_virtualization(struct amdgpu_device *adev) 452 { 453 uint32_t reg = RREG32(mmBIF_IOV_FUNC_IDENTIFIER); 454 /* bit0: 0 means pf and 1 means vf */ 455 /* bit31: 0 means disable IOV and 1 means enable */ 456 if (reg & 1) 457 adev->virtualization.virtual_caps |= AMDGPU_SRIOV_CAPS_IS_VF; 458 459 if (reg & 0x80000000) 460 adev->virtualization.virtual_caps |= AMDGPU_SRIOV_CAPS_ENABLE_IOV; 461 462 if (reg == 0) { 463 if (is_virtual_machine()) /* passthrough mode exclus sr-iov mode */ 464 adev->virtualization.virtual_caps |= AMDGPU_PASSTHROUGH_MODE; 465 } 466 } 467 468 static const struct amdgpu_allowed_register_entry tonga_allowed_read_registers[] = { 469 {mmGB_MACROTILE_MODE7, true}, 470 }; 471 472 static const struct amdgpu_allowed_register_entry cz_allowed_read_registers[] = { 473 {mmGB_TILE_MODE7, true}, 474 {mmGB_TILE_MODE12, true}, 475 {mmGB_TILE_MODE17, true}, 476 {mmGB_TILE_MODE23, true}, 477 {mmGB_MACROTILE_MODE7, true}, 478 }; 479 480 static const struct amdgpu_allowed_register_entry vi_allowed_read_registers[] = { 481 {mmGRBM_STATUS, false}, 482 {mmGRBM_STATUS2, false}, 483 {mmGRBM_STATUS_SE0, false}, 484 {mmGRBM_STATUS_SE1, false}, 485 {mmGRBM_STATUS_SE2, false}, 486 {mmGRBM_STATUS_SE3, false}, 487 {mmSRBM_STATUS, false}, 488 {mmSRBM_STATUS2, false}, 489 {mmSRBM_STATUS3, false}, 490 {mmSDMA0_STATUS_REG + SDMA0_REGISTER_OFFSET, false}, 491 {mmSDMA0_STATUS_REG + SDMA1_REGISTER_OFFSET, false}, 492 {mmCP_STAT, false}, 493 {mmCP_STALLED_STAT1, false}, 494 {mmCP_STALLED_STAT2, false}, 495 {mmCP_STALLED_STAT3, false}, 496 {mmCP_CPF_BUSY_STAT, false}, 497 {mmCP_CPF_STALLED_STAT1, false}, 498 {mmCP_CPF_STATUS, false}, 499 {mmCP_CPC_BUSY_STAT, false}, 500 {mmCP_CPC_STALLED_STAT1, false}, 501 {mmCP_CPC_STATUS, false}, 502 {mmGB_ADDR_CONFIG, false}, 503 {mmMC_ARB_RAMCFG, false}, 504 {mmGB_TILE_MODE0, false}, 505 {mmGB_TILE_MODE1, false}, 506 {mmGB_TILE_MODE2, false}, 507 {mmGB_TILE_MODE3, false}, 508 {mmGB_TILE_MODE4, false}, 509 {mmGB_TILE_MODE5, false}, 510 {mmGB_TILE_MODE6, false}, 511 {mmGB_TILE_MODE7, false}, 512 {mmGB_TILE_MODE8, false}, 513 {mmGB_TILE_MODE9, false}, 514 {mmGB_TILE_MODE10, false}, 515 {mmGB_TILE_MODE11, false}, 516 {mmGB_TILE_MODE12, false}, 517 {mmGB_TILE_MODE13, false}, 518 {mmGB_TILE_MODE14, false}, 519 {mmGB_TILE_MODE15, false}, 520 {mmGB_TILE_MODE16, false}, 521 {mmGB_TILE_MODE17, false}, 522 {mmGB_TILE_MODE18, false}, 523 {mmGB_TILE_MODE19, false}, 524 {mmGB_TILE_MODE20, false}, 525 {mmGB_TILE_MODE21, false}, 526 {mmGB_TILE_MODE22, false}, 527 {mmGB_TILE_MODE23, false}, 528 {mmGB_TILE_MODE24, false}, 529 {mmGB_TILE_MODE25, false}, 530 {mmGB_TILE_MODE26, false}, 531 {mmGB_TILE_MODE27, false}, 532 {mmGB_TILE_MODE28, false}, 533 {mmGB_TILE_MODE29, false}, 534 {mmGB_TILE_MODE30, false}, 535 {mmGB_TILE_MODE31, false}, 536 {mmGB_MACROTILE_MODE0, false}, 537 {mmGB_MACROTILE_MODE1, false}, 538 {mmGB_MACROTILE_MODE2, false}, 539 {mmGB_MACROTILE_MODE3, false}, 540 {mmGB_MACROTILE_MODE4, false}, 541 {mmGB_MACROTILE_MODE5, false}, 542 {mmGB_MACROTILE_MODE6, false}, 543 {mmGB_MACROTILE_MODE7, false}, 544 {mmGB_MACROTILE_MODE8, false}, 545 {mmGB_MACROTILE_MODE9, false}, 546 {mmGB_MACROTILE_MODE10, false}, 547 {mmGB_MACROTILE_MODE11, false}, 548 {mmGB_MACROTILE_MODE12, false}, 549 {mmGB_MACROTILE_MODE13, false}, 550 {mmGB_MACROTILE_MODE14, false}, 551 {mmGB_MACROTILE_MODE15, false}, 552 {mmCC_RB_BACKEND_DISABLE, false, true}, 553 {mmGC_USER_RB_BACKEND_DISABLE, false, true}, 554 {mmGB_BACKEND_MAP, false, false}, 555 {mmPA_SC_RASTER_CONFIG, false, true}, 556 {mmPA_SC_RASTER_CONFIG_1, false, true}, 557 }; 558 559 static uint32_t vi_read_indexed_register(struct amdgpu_device *adev, u32 se_num, 560 u32 sh_num, u32 reg_offset) 561 { 562 uint32_t val; 563 564 mutex_lock(&adev->grbm_idx_mutex); 565 if (se_num != 0xffffffff || sh_num != 0xffffffff) 566 amdgpu_gfx_select_se_sh(adev, se_num, sh_num, 0xffffffff); 567 568 val = RREG32(reg_offset); 569 570 if (se_num != 0xffffffff || sh_num != 0xffffffff) 571 amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff); 572 mutex_unlock(&adev->grbm_idx_mutex); 573 return val; 574 } 575 576 static int vi_read_register(struct amdgpu_device *adev, u32 se_num, 577 u32 sh_num, u32 reg_offset, u32 *value) 578 { 579 const struct amdgpu_allowed_register_entry *asic_register_table = NULL; 580 const struct amdgpu_allowed_register_entry *asic_register_entry; 581 uint32_t size, i; 582 583 *value = 0; 584 switch (adev->asic_type) { 585 case CHIP_TOPAZ: 586 asic_register_table = tonga_allowed_read_registers; 587 size = ARRAY_SIZE(tonga_allowed_read_registers); 588 break; 589 case CHIP_FIJI: 590 case CHIP_TONGA: 591 case CHIP_POLARIS11: 592 case CHIP_POLARIS10: 593 case CHIP_CARRIZO: 594 case CHIP_STONEY: 595 asic_register_table = cz_allowed_read_registers; 596 size = ARRAY_SIZE(cz_allowed_read_registers); 597 break; 598 default: 599 return -EINVAL; 600 } 601 602 if (asic_register_table) { 603 for (i = 0; i < size; i++) { 604 asic_register_entry = asic_register_table + i; 605 if (reg_offset != asic_register_entry->reg_offset) 606 continue; 607 if (!asic_register_entry->untouched) 608 *value = asic_register_entry->grbm_indexed ? 609 vi_read_indexed_register(adev, se_num, 610 sh_num, reg_offset) : 611 RREG32(reg_offset); 612 return 0; 613 } 614 } 615 616 for (i = 0; i < ARRAY_SIZE(vi_allowed_read_registers); i++) { 617 if (reg_offset != vi_allowed_read_registers[i].reg_offset) 618 continue; 619 620 if (!vi_allowed_read_registers[i].untouched) 621 *value = vi_allowed_read_registers[i].grbm_indexed ? 622 vi_read_indexed_register(adev, se_num, 623 sh_num, reg_offset) : 624 RREG32(reg_offset); 625 return 0; 626 } 627 return -EINVAL; 628 } 629 630 static int vi_gpu_pci_config_reset(struct amdgpu_device *adev) 631 { 632 u32 i; 633 634 dev_info(adev->dev, "GPU pci config reset\n"); 635 636 /* disable BM */ 637 pci_clear_master(adev->pdev); 638 /* reset */ 639 amdgpu_pci_config_reset(adev); 640 641 udelay(100); 642 643 /* wait for asic to come out of reset */ 644 for (i = 0; i < adev->usec_timeout; i++) { 645 if (RREG32(mmCONFIG_MEMSIZE) != 0xffffffff) { 646 /* enable BM */ 647 pci_set_master(adev->pdev); 648 return 0; 649 } 650 udelay(1); 651 } 652 return -EINVAL; 653 } 654 655 static void vi_set_bios_scratch_engine_hung(struct amdgpu_device *adev, bool hung) 656 { 657 u32 tmp = RREG32(mmBIOS_SCRATCH_3); 658 659 if (hung) 660 tmp |= ATOM_S3_ASIC_GUI_ENGINE_HUNG; 661 else 662 tmp &= ~ATOM_S3_ASIC_GUI_ENGINE_HUNG; 663 664 WREG32(mmBIOS_SCRATCH_3, tmp); 665 } 666 667 /** 668 * vi_asic_reset - soft reset GPU 669 * 670 * @adev: amdgpu_device pointer 671 * 672 * Look up which blocks are hung and attempt 673 * to reset them. 674 * Returns 0 for success. 675 */ 676 static int vi_asic_reset(struct amdgpu_device *adev) 677 { 678 int r; 679 680 vi_set_bios_scratch_engine_hung(adev, true); 681 682 r = vi_gpu_pci_config_reset(adev); 683 684 vi_set_bios_scratch_engine_hung(adev, false); 685 686 return r; 687 } 688 689 static int vi_set_uvd_clock(struct amdgpu_device *adev, u32 clock, 690 u32 cntl_reg, u32 status_reg) 691 { 692 int r, i; 693 struct atom_clock_dividers dividers; 694 uint32_t tmp; 695 696 r = amdgpu_atombios_get_clock_dividers(adev, 697 COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK, 698 clock, false, ÷rs); 699 if (r) 700 return r; 701 702 tmp = RREG32_SMC(cntl_reg); 703 tmp &= ~(CG_DCLK_CNTL__DCLK_DIR_CNTL_EN_MASK | 704 CG_DCLK_CNTL__DCLK_DIVIDER_MASK); 705 tmp |= dividers.post_divider; 706 WREG32_SMC(cntl_reg, tmp); 707 708 for (i = 0; i < 100; i++) { 709 if (RREG32_SMC(status_reg) & CG_DCLK_STATUS__DCLK_STATUS_MASK) 710 break; 711 mdelay(10); 712 } 713 if (i == 100) 714 return -ETIMEDOUT; 715 716 return 0; 717 } 718 719 static int vi_set_uvd_clocks(struct amdgpu_device *adev, u32 vclk, u32 dclk) 720 { 721 int r; 722 723 r = vi_set_uvd_clock(adev, vclk, ixCG_VCLK_CNTL, ixCG_VCLK_STATUS); 724 if (r) 725 return r; 726 727 r = vi_set_uvd_clock(adev, dclk, ixCG_DCLK_CNTL, ixCG_DCLK_STATUS); 728 729 return 0; 730 } 731 732 static int vi_set_vce_clocks(struct amdgpu_device *adev, u32 evclk, u32 ecclk) 733 { 734 /* todo */ 735 736 return 0; 737 } 738 739 static void vi_pcie_gen3_enable(struct amdgpu_device *adev) 740 { 741 if (pci_is_root_bus(adev->pdev->bus)) 742 return; 743 744 if (amdgpu_pcie_gen2 == 0) 745 return; 746 747 if (adev->flags & AMD_IS_APU) 748 return; 749 750 if (!(adev->pm.pcie_gen_mask & (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2 | 751 CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3))) 752 return; 753 754 /* todo */ 755 } 756 757 static void vi_program_aspm(struct amdgpu_device *adev) 758 { 759 760 if (amdgpu_aspm == 0) 761 return; 762 763 /* todo */ 764 } 765 766 static void vi_enable_doorbell_aperture(struct amdgpu_device *adev, 767 bool enable) 768 { 769 u32 tmp; 770 771 /* not necessary on CZ */ 772 if (adev->flags & AMD_IS_APU) 773 return; 774 775 tmp = RREG32(mmBIF_DOORBELL_APER_EN); 776 if (enable) 777 tmp = REG_SET_FIELD(tmp, BIF_DOORBELL_APER_EN, BIF_DOORBELL_APER_EN, 1); 778 else 779 tmp = REG_SET_FIELD(tmp, BIF_DOORBELL_APER_EN, BIF_DOORBELL_APER_EN, 0); 780 781 WREG32(mmBIF_DOORBELL_APER_EN, tmp); 782 } 783 784 /* topaz has no DCE, UVD, VCE */ 785 static const struct amdgpu_ip_block_version topaz_ip_blocks[] = 786 { 787 /* ORDER MATTERS! */ 788 { 789 .type = AMD_IP_BLOCK_TYPE_COMMON, 790 .major = 2, 791 .minor = 0, 792 .rev = 0, 793 .funcs = &vi_common_ip_funcs, 794 }, 795 { 796 .type = AMD_IP_BLOCK_TYPE_GMC, 797 .major = 7, 798 .minor = 4, 799 .rev = 0, 800 .funcs = &gmc_v7_0_ip_funcs, 801 }, 802 { 803 .type = AMD_IP_BLOCK_TYPE_IH, 804 .major = 2, 805 .minor = 4, 806 .rev = 0, 807 .funcs = &iceland_ih_ip_funcs, 808 }, 809 { 810 .type = AMD_IP_BLOCK_TYPE_SMC, 811 .major = 7, 812 .minor = 1, 813 .rev = 0, 814 .funcs = &amdgpu_pp_ip_funcs, 815 }, 816 { 817 .type = AMD_IP_BLOCK_TYPE_GFX, 818 .major = 8, 819 .minor = 0, 820 .rev = 0, 821 .funcs = &gfx_v8_0_ip_funcs, 822 }, 823 { 824 .type = AMD_IP_BLOCK_TYPE_SDMA, 825 .major = 2, 826 .minor = 4, 827 .rev = 0, 828 .funcs = &sdma_v2_4_ip_funcs, 829 }, 830 }; 831 832 static const struct amdgpu_ip_block_version topaz_ip_blocks_vd[] = 833 { 834 /* ORDER MATTERS! */ 835 { 836 .type = AMD_IP_BLOCK_TYPE_COMMON, 837 .major = 2, 838 .minor = 0, 839 .rev = 0, 840 .funcs = &vi_common_ip_funcs, 841 }, 842 { 843 .type = AMD_IP_BLOCK_TYPE_GMC, 844 .major = 7, 845 .minor = 4, 846 .rev = 0, 847 .funcs = &gmc_v7_0_ip_funcs, 848 }, 849 { 850 .type = AMD_IP_BLOCK_TYPE_IH, 851 .major = 2, 852 .minor = 4, 853 .rev = 0, 854 .funcs = &iceland_ih_ip_funcs, 855 }, 856 { 857 .type = AMD_IP_BLOCK_TYPE_SMC, 858 .major = 7, 859 .minor = 1, 860 .rev = 0, 861 .funcs = &amdgpu_pp_ip_funcs, 862 }, 863 { 864 .type = AMD_IP_BLOCK_TYPE_DCE, 865 .major = 1, 866 .minor = 0, 867 .rev = 0, 868 .funcs = &dce_virtual_ip_funcs, 869 }, 870 { 871 .type = AMD_IP_BLOCK_TYPE_GFX, 872 .major = 8, 873 .minor = 0, 874 .rev = 0, 875 .funcs = &gfx_v8_0_ip_funcs, 876 }, 877 { 878 .type = AMD_IP_BLOCK_TYPE_SDMA, 879 .major = 2, 880 .minor = 4, 881 .rev = 0, 882 .funcs = &sdma_v2_4_ip_funcs, 883 }, 884 }; 885 886 static const struct amdgpu_ip_block_version tonga_ip_blocks[] = 887 { 888 /* ORDER MATTERS! */ 889 { 890 .type = AMD_IP_BLOCK_TYPE_COMMON, 891 .major = 2, 892 .minor = 0, 893 .rev = 0, 894 .funcs = &vi_common_ip_funcs, 895 }, 896 { 897 .type = AMD_IP_BLOCK_TYPE_GMC, 898 .major = 8, 899 .minor = 0, 900 .rev = 0, 901 .funcs = &gmc_v8_0_ip_funcs, 902 }, 903 { 904 .type = AMD_IP_BLOCK_TYPE_IH, 905 .major = 3, 906 .minor = 0, 907 .rev = 0, 908 .funcs = &tonga_ih_ip_funcs, 909 }, 910 { 911 .type = AMD_IP_BLOCK_TYPE_SMC, 912 .major = 7, 913 .minor = 1, 914 .rev = 0, 915 .funcs = &amdgpu_pp_ip_funcs, 916 }, 917 { 918 .type = AMD_IP_BLOCK_TYPE_DCE, 919 .major = 10, 920 .minor = 0, 921 .rev = 0, 922 .funcs = &dce_v10_0_ip_funcs, 923 }, 924 { 925 .type = AMD_IP_BLOCK_TYPE_GFX, 926 .major = 8, 927 .minor = 0, 928 .rev = 0, 929 .funcs = &gfx_v8_0_ip_funcs, 930 }, 931 { 932 .type = AMD_IP_BLOCK_TYPE_SDMA, 933 .major = 3, 934 .minor = 0, 935 .rev = 0, 936 .funcs = &sdma_v3_0_ip_funcs, 937 }, 938 { 939 .type = AMD_IP_BLOCK_TYPE_UVD, 940 .major = 5, 941 .minor = 0, 942 .rev = 0, 943 .funcs = &uvd_v5_0_ip_funcs, 944 }, 945 { 946 .type = AMD_IP_BLOCK_TYPE_VCE, 947 .major = 3, 948 .minor = 0, 949 .rev = 0, 950 .funcs = &vce_v3_0_ip_funcs, 951 }, 952 }; 953 954 static const struct amdgpu_ip_block_version tonga_ip_blocks_vd[] = 955 { 956 /* ORDER MATTERS! */ 957 { 958 .type = AMD_IP_BLOCK_TYPE_COMMON, 959 .major = 2, 960 .minor = 0, 961 .rev = 0, 962 .funcs = &vi_common_ip_funcs, 963 }, 964 { 965 .type = AMD_IP_BLOCK_TYPE_GMC, 966 .major = 8, 967 .minor = 0, 968 .rev = 0, 969 .funcs = &gmc_v8_0_ip_funcs, 970 }, 971 { 972 .type = AMD_IP_BLOCK_TYPE_IH, 973 .major = 3, 974 .minor = 0, 975 .rev = 0, 976 .funcs = &tonga_ih_ip_funcs, 977 }, 978 { 979 .type = AMD_IP_BLOCK_TYPE_SMC, 980 .major = 7, 981 .minor = 1, 982 .rev = 0, 983 .funcs = &amdgpu_pp_ip_funcs, 984 }, 985 { 986 .type = AMD_IP_BLOCK_TYPE_DCE, 987 .major = 10, 988 .minor = 0, 989 .rev = 0, 990 .funcs = &dce_virtual_ip_funcs, 991 }, 992 { 993 .type = AMD_IP_BLOCK_TYPE_GFX, 994 .major = 8, 995 .minor = 0, 996 .rev = 0, 997 .funcs = &gfx_v8_0_ip_funcs, 998 }, 999 { 1000 .type = AMD_IP_BLOCK_TYPE_SDMA, 1001 .major = 3, 1002 .minor = 0, 1003 .rev = 0, 1004 .funcs = &sdma_v3_0_ip_funcs, 1005 }, 1006 { 1007 .type = AMD_IP_BLOCK_TYPE_UVD, 1008 .major = 5, 1009 .minor = 0, 1010 .rev = 0, 1011 .funcs = &uvd_v5_0_ip_funcs, 1012 }, 1013 { 1014 .type = AMD_IP_BLOCK_TYPE_VCE, 1015 .major = 3, 1016 .minor = 0, 1017 .rev = 0, 1018 .funcs = &vce_v3_0_ip_funcs, 1019 }, 1020 }; 1021 1022 static const struct amdgpu_ip_block_version fiji_ip_blocks[] = 1023 { 1024 /* ORDER MATTERS! */ 1025 { 1026 .type = AMD_IP_BLOCK_TYPE_COMMON, 1027 .major = 2, 1028 .minor = 0, 1029 .rev = 0, 1030 .funcs = &vi_common_ip_funcs, 1031 }, 1032 { 1033 .type = AMD_IP_BLOCK_TYPE_GMC, 1034 .major = 8, 1035 .minor = 5, 1036 .rev = 0, 1037 .funcs = &gmc_v8_0_ip_funcs, 1038 }, 1039 { 1040 .type = AMD_IP_BLOCK_TYPE_IH, 1041 .major = 3, 1042 .minor = 0, 1043 .rev = 0, 1044 .funcs = &tonga_ih_ip_funcs, 1045 }, 1046 { 1047 .type = AMD_IP_BLOCK_TYPE_SMC, 1048 .major = 7, 1049 .minor = 1, 1050 .rev = 0, 1051 .funcs = &amdgpu_pp_ip_funcs, 1052 }, 1053 { 1054 .type = AMD_IP_BLOCK_TYPE_DCE, 1055 .major = 10, 1056 .minor = 1, 1057 .rev = 0, 1058 .funcs = &dce_v10_0_ip_funcs, 1059 }, 1060 { 1061 .type = AMD_IP_BLOCK_TYPE_GFX, 1062 .major = 8, 1063 .minor = 0, 1064 .rev = 0, 1065 .funcs = &gfx_v8_0_ip_funcs, 1066 }, 1067 { 1068 .type = AMD_IP_BLOCK_TYPE_SDMA, 1069 .major = 3, 1070 .minor = 0, 1071 .rev = 0, 1072 .funcs = &sdma_v3_0_ip_funcs, 1073 }, 1074 { 1075 .type = AMD_IP_BLOCK_TYPE_UVD, 1076 .major = 6, 1077 .minor = 0, 1078 .rev = 0, 1079 .funcs = &uvd_v6_0_ip_funcs, 1080 }, 1081 { 1082 .type = AMD_IP_BLOCK_TYPE_VCE, 1083 .major = 3, 1084 .minor = 0, 1085 .rev = 0, 1086 .funcs = &vce_v3_0_ip_funcs, 1087 }, 1088 }; 1089 1090 static const struct amdgpu_ip_block_version fiji_ip_blocks_vd[] = 1091 { 1092 /* ORDER MATTERS! */ 1093 { 1094 .type = AMD_IP_BLOCK_TYPE_COMMON, 1095 .major = 2, 1096 .minor = 0, 1097 .rev = 0, 1098 .funcs = &vi_common_ip_funcs, 1099 }, 1100 { 1101 .type = AMD_IP_BLOCK_TYPE_GMC, 1102 .major = 8, 1103 .minor = 5, 1104 .rev = 0, 1105 .funcs = &gmc_v8_0_ip_funcs, 1106 }, 1107 { 1108 .type = AMD_IP_BLOCK_TYPE_IH, 1109 .major = 3, 1110 .minor = 0, 1111 .rev = 0, 1112 .funcs = &tonga_ih_ip_funcs, 1113 }, 1114 { 1115 .type = AMD_IP_BLOCK_TYPE_SMC, 1116 .major = 7, 1117 .minor = 1, 1118 .rev = 0, 1119 .funcs = &amdgpu_pp_ip_funcs, 1120 }, 1121 { 1122 .type = AMD_IP_BLOCK_TYPE_DCE, 1123 .major = 10, 1124 .minor = 1, 1125 .rev = 0, 1126 .funcs = &dce_virtual_ip_funcs, 1127 }, 1128 { 1129 .type = AMD_IP_BLOCK_TYPE_GFX, 1130 .major = 8, 1131 .minor = 0, 1132 .rev = 0, 1133 .funcs = &gfx_v8_0_ip_funcs, 1134 }, 1135 { 1136 .type = AMD_IP_BLOCK_TYPE_SDMA, 1137 .major = 3, 1138 .minor = 0, 1139 .rev = 0, 1140 .funcs = &sdma_v3_0_ip_funcs, 1141 }, 1142 { 1143 .type = AMD_IP_BLOCK_TYPE_UVD, 1144 .major = 6, 1145 .minor = 0, 1146 .rev = 0, 1147 .funcs = &uvd_v6_0_ip_funcs, 1148 }, 1149 { 1150 .type = AMD_IP_BLOCK_TYPE_VCE, 1151 .major = 3, 1152 .minor = 0, 1153 .rev = 0, 1154 .funcs = &vce_v3_0_ip_funcs, 1155 }, 1156 }; 1157 1158 static const struct amdgpu_ip_block_version polaris11_ip_blocks[] = 1159 { 1160 /* ORDER MATTERS! */ 1161 { 1162 .type = AMD_IP_BLOCK_TYPE_COMMON, 1163 .major = 2, 1164 .minor = 0, 1165 .rev = 0, 1166 .funcs = &vi_common_ip_funcs, 1167 }, 1168 { 1169 .type = AMD_IP_BLOCK_TYPE_GMC, 1170 .major = 8, 1171 .minor = 1, 1172 .rev = 0, 1173 .funcs = &gmc_v8_0_ip_funcs, 1174 }, 1175 { 1176 .type = AMD_IP_BLOCK_TYPE_IH, 1177 .major = 3, 1178 .minor = 1, 1179 .rev = 0, 1180 .funcs = &tonga_ih_ip_funcs, 1181 }, 1182 { 1183 .type = AMD_IP_BLOCK_TYPE_SMC, 1184 .major = 7, 1185 .minor = 2, 1186 .rev = 0, 1187 .funcs = &amdgpu_pp_ip_funcs, 1188 }, 1189 { 1190 .type = AMD_IP_BLOCK_TYPE_DCE, 1191 .major = 11, 1192 .minor = 2, 1193 .rev = 0, 1194 .funcs = &dce_v11_0_ip_funcs, 1195 }, 1196 { 1197 .type = AMD_IP_BLOCK_TYPE_GFX, 1198 .major = 8, 1199 .minor = 0, 1200 .rev = 0, 1201 .funcs = &gfx_v8_0_ip_funcs, 1202 }, 1203 { 1204 .type = AMD_IP_BLOCK_TYPE_SDMA, 1205 .major = 3, 1206 .minor = 1, 1207 .rev = 0, 1208 .funcs = &sdma_v3_0_ip_funcs, 1209 }, 1210 { 1211 .type = AMD_IP_BLOCK_TYPE_UVD, 1212 .major = 6, 1213 .minor = 3, 1214 .rev = 0, 1215 .funcs = &uvd_v6_0_ip_funcs, 1216 }, 1217 { 1218 .type = AMD_IP_BLOCK_TYPE_VCE, 1219 .major = 3, 1220 .minor = 4, 1221 .rev = 0, 1222 .funcs = &vce_v3_0_ip_funcs, 1223 }, 1224 }; 1225 1226 static const struct amdgpu_ip_block_version polaris11_ip_blocks_vd[] = 1227 { 1228 /* ORDER MATTERS! */ 1229 { 1230 .type = AMD_IP_BLOCK_TYPE_COMMON, 1231 .major = 2, 1232 .minor = 0, 1233 .rev = 0, 1234 .funcs = &vi_common_ip_funcs, 1235 }, 1236 { 1237 .type = AMD_IP_BLOCK_TYPE_GMC, 1238 .major = 8, 1239 .minor = 1, 1240 .rev = 0, 1241 .funcs = &gmc_v8_0_ip_funcs, 1242 }, 1243 { 1244 .type = AMD_IP_BLOCK_TYPE_IH, 1245 .major = 3, 1246 .minor = 1, 1247 .rev = 0, 1248 .funcs = &tonga_ih_ip_funcs, 1249 }, 1250 { 1251 .type = AMD_IP_BLOCK_TYPE_SMC, 1252 .major = 7, 1253 .minor = 2, 1254 .rev = 0, 1255 .funcs = &amdgpu_pp_ip_funcs, 1256 }, 1257 { 1258 .type = AMD_IP_BLOCK_TYPE_DCE, 1259 .major = 11, 1260 .minor = 2, 1261 .rev = 0, 1262 .funcs = &dce_virtual_ip_funcs, 1263 }, 1264 { 1265 .type = AMD_IP_BLOCK_TYPE_GFX, 1266 .major = 8, 1267 .minor = 0, 1268 .rev = 0, 1269 .funcs = &gfx_v8_0_ip_funcs, 1270 }, 1271 { 1272 .type = AMD_IP_BLOCK_TYPE_SDMA, 1273 .major = 3, 1274 .minor = 1, 1275 .rev = 0, 1276 .funcs = &sdma_v3_0_ip_funcs, 1277 }, 1278 { 1279 .type = AMD_IP_BLOCK_TYPE_UVD, 1280 .major = 6, 1281 .minor = 3, 1282 .rev = 0, 1283 .funcs = &uvd_v6_0_ip_funcs, 1284 }, 1285 { 1286 .type = AMD_IP_BLOCK_TYPE_VCE, 1287 .major = 3, 1288 .minor = 4, 1289 .rev = 0, 1290 .funcs = &vce_v3_0_ip_funcs, 1291 }, 1292 }; 1293 1294 static const struct amdgpu_ip_block_version cz_ip_blocks[] = 1295 { 1296 /* ORDER MATTERS! */ 1297 { 1298 .type = AMD_IP_BLOCK_TYPE_COMMON, 1299 .major = 2, 1300 .minor = 0, 1301 .rev = 0, 1302 .funcs = &vi_common_ip_funcs, 1303 }, 1304 { 1305 .type = AMD_IP_BLOCK_TYPE_GMC, 1306 .major = 8, 1307 .minor = 0, 1308 .rev = 0, 1309 .funcs = &gmc_v8_0_ip_funcs, 1310 }, 1311 { 1312 .type = AMD_IP_BLOCK_TYPE_IH, 1313 .major = 3, 1314 .minor = 0, 1315 .rev = 0, 1316 .funcs = &cz_ih_ip_funcs, 1317 }, 1318 { 1319 .type = AMD_IP_BLOCK_TYPE_SMC, 1320 .major = 8, 1321 .minor = 0, 1322 .rev = 0, 1323 .funcs = &amdgpu_pp_ip_funcs 1324 }, 1325 { 1326 .type = AMD_IP_BLOCK_TYPE_DCE, 1327 .major = 11, 1328 .minor = 0, 1329 .rev = 0, 1330 .funcs = &dce_v11_0_ip_funcs, 1331 }, 1332 { 1333 .type = AMD_IP_BLOCK_TYPE_GFX, 1334 .major = 8, 1335 .minor = 0, 1336 .rev = 0, 1337 .funcs = &gfx_v8_0_ip_funcs, 1338 }, 1339 { 1340 .type = AMD_IP_BLOCK_TYPE_SDMA, 1341 .major = 3, 1342 .minor = 0, 1343 .rev = 0, 1344 .funcs = &sdma_v3_0_ip_funcs, 1345 }, 1346 { 1347 .type = AMD_IP_BLOCK_TYPE_UVD, 1348 .major = 6, 1349 .minor = 0, 1350 .rev = 0, 1351 .funcs = &uvd_v6_0_ip_funcs, 1352 }, 1353 { 1354 .type = AMD_IP_BLOCK_TYPE_VCE, 1355 .major = 3, 1356 .minor = 0, 1357 .rev = 0, 1358 .funcs = &vce_v3_0_ip_funcs, 1359 }, 1360 #if defined(CONFIG_DRM_AMD_ACP) 1361 { 1362 .type = AMD_IP_BLOCK_TYPE_ACP, 1363 .major = 2, 1364 .minor = 2, 1365 .rev = 0, 1366 .funcs = &acp_ip_funcs, 1367 }, 1368 #endif 1369 }; 1370 1371 static const struct amdgpu_ip_block_version cz_ip_blocks_vd[] = 1372 { 1373 /* ORDER MATTERS! */ 1374 { 1375 .type = AMD_IP_BLOCK_TYPE_COMMON, 1376 .major = 2, 1377 .minor = 0, 1378 .rev = 0, 1379 .funcs = &vi_common_ip_funcs, 1380 }, 1381 { 1382 .type = AMD_IP_BLOCK_TYPE_GMC, 1383 .major = 8, 1384 .minor = 0, 1385 .rev = 0, 1386 .funcs = &gmc_v8_0_ip_funcs, 1387 }, 1388 { 1389 .type = AMD_IP_BLOCK_TYPE_IH, 1390 .major = 3, 1391 .minor = 0, 1392 .rev = 0, 1393 .funcs = &cz_ih_ip_funcs, 1394 }, 1395 { 1396 .type = AMD_IP_BLOCK_TYPE_SMC, 1397 .major = 8, 1398 .minor = 0, 1399 .rev = 0, 1400 .funcs = &amdgpu_pp_ip_funcs 1401 }, 1402 { 1403 .type = AMD_IP_BLOCK_TYPE_DCE, 1404 .major = 11, 1405 .minor = 0, 1406 .rev = 0, 1407 .funcs = &dce_virtual_ip_funcs, 1408 }, 1409 { 1410 .type = AMD_IP_BLOCK_TYPE_GFX, 1411 .major = 8, 1412 .minor = 0, 1413 .rev = 0, 1414 .funcs = &gfx_v8_0_ip_funcs, 1415 }, 1416 { 1417 .type = AMD_IP_BLOCK_TYPE_SDMA, 1418 .major = 3, 1419 .minor = 0, 1420 .rev = 0, 1421 .funcs = &sdma_v3_0_ip_funcs, 1422 }, 1423 { 1424 .type = AMD_IP_BLOCK_TYPE_UVD, 1425 .major = 6, 1426 .minor = 0, 1427 .rev = 0, 1428 .funcs = &uvd_v6_0_ip_funcs, 1429 }, 1430 { 1431 .type = AMD_IP_BLOCK_TYPE_VCE, 1432 .major = 3, 1433 .minor = 0, 1434 .rev = 0, 1435 .funcs = &vce_v3_0_ip_funcs, 1436 }, 1437 #if defined(CONFIG_DRM_AMD_ACP) 1438 { 1439 .type = AMD_IP_BLOCK_TYPE_ACP, 1440 .major = 2, 1441 .minor = 2, 1442 .rev = 0, 1443 .funcs = &acp_ip_funcs, 1444 }, 1445 #endif 1446 }; 1447 1448 int vi_set_ip_blocks(struct amdgpu_device *adev) 1449 { 1450 if (adev->enable_virtual_display) { 1451 switch (adev->asic_type) { 1452 case CHIP_TOPAZ: 1453 adev->ip_blocks = topaz_ip_blocks_vd; 1454 adev->num_ip_blocks = ARRAY_SIZE(topaz_ip_blocks_vd); 1455 break; 1456 case CHIP_FIJI: 1457 adev->ip_blocks = fiji_ip_blocks_vd; 1458 adev->num_ip_blocks = ARRAY_SIZE(fiji_ip_blocks_vd); 1459 break; 1460 case CHIP_TONGA: 1461 adev->ip_blocks = tonga_ip_blocks_vd; 1462 adev->num_ip_blocks = ARRAY_SIZE(tonga_ip_blocks_vd); 1463 break; 1464 case CHIP_POLARIS11: 1465 case CHIP_POLARIS10: 1466 adev->ip_blocks = polaris11_ip_blocks_vd; 1467 adev->num_ip_blocks = ARRAY_SIZE(polaris11_ip_blocks_vd); 1468 break; 1469 1470 case CHIP_CARRIZO: 1471 case CHIP_STONEY: 1472 adev->ip_blocks = cz_ip_blocks_vd; 1473 adev->num_ip_blocks = ARRAY_SIZE(cz_ip_blocks_vd); 1474 break; 1475 default: 1476 /* FIXME: not supported yet */ 1477 return -EINVAL; 1478 } 1479 } else { 1480 switch (adev->asic_type) { 1481 case CHIP_TOPAZ: 1482 adev->ip_blocks = topaz_ip_blocks; 1483 adev->num_ip_blocks = ARRAY_SIZE(topaz_ip_blocks); 1484 break; 1485 case CHIP_FIJI: 1486 adev->ip_blocks = fiji_ip_blocks; 1487 adev->num_ip_blocks = ARRAY_SIZE(fiji_ip_blocks); 1488 break; 1489 case CHIP_TONGA: 1490 adev->ip_blocks = tonga_ip_blocks; 1491 adev->num_ip_blocks = ARRAY_SIZE(tonga_ip_blocks); 1492 break; 1493 case CHIP_POLARIS11: 1494 case CHIP_POLARIS10: 1495 adev->ip_blocks = polaris11_ip_blocks; 1496 adev->num_ip_blocks = ARRAY_SIZE(polaris11_ip_blocks); 1497 break; 1498 case CHIP_CARRIZO: 1499 case CHIP_STONEY: 1500 adev->ip_blocks = cz_ip_blocks; 1501 adev->num_ip_blocks = ARRAY_SIZE(cz_ip_blocks); 1502 break; 1503 default: 1504 /* FIXME: not supported yet */ 1505 return -EINVAL; 1506 } 1507 } 1508 1509 return 0; 1510 } 1511 1512 #define ATI_REV_ID_FUSE_MACRO__ADDRESS 0xC0014044 1513 #define ATI_REV_ID_FUSE_MACRO__SHIFT 9 1514 #define ATI_REV_ID_FUSE_MACRO__MASK 0x00001E00 1515 1516 static uint32_t vi_get_rev_id(struct amdgpu_device *adev) 1517 { 1518 if (adev->flags & AMD_IS_APU) 1519 return (RREG32_SMC(ATI_REV_ID_FUSE_MACRO__ADDRESS) & ATI_REV_ID_FUSE_MACRO__MASK) 1520 >> ATI_REV_ID_FUSE_MACRO__SHIFT; 1521 else 1522 return (RREG32(mmPCIE_EFUSE4) & PCIE_EFUSE4__STRAP_BIF_ATI_REV_ID_MASK) 1523 >> PCIE_EFUSE4__STRAP_BIF_ATI_REV_ID__SHIFT; 1524 } 1525 1526 static const struct amdgpu_asic_funcs vi_asic_funcs = 1527 { 1528 .read_disabled_bios = &vi_read_disabled_bios, 1529 .read_bios_from_rom = &vi_read_bios_from_rom, 1530 .detect_hw_virtualization = vi_detect_hw_virtualization, 1531 .read_register = &vi_read_register, 1532 .reset = &vi_asic_reset, 1533 .set_vga_state = &vi_vga_set_state, 1534 .get_xclk = &vi_get_xclk, 1535 .set_uvd_clocks = &vi_set_uvd_clocks, 1536 .set_vce_clocks = &vi_set_vce_clocks, 1537 }; 1538 1539 static int vi_common_early_init(void *handle) 1540 { 1541 bool smc_enabled = false; 1542 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1543 1544 if (adev->flags & AMD_IS_APU) { 1545 adev->smc_rreg = &cz_smc_rreg; 1546 adev->smc_wreg = &cz_smc_wreg; 1547 } else { 1548 adev->smc_rreg = &vi_smc_rreg; 1549 adev->smc_wreg = &vi_smc_wreg; 1550 } 1551 adev->pcie_rreg = &vi_pcie_rreg; 1552 adev->pcie_wreg = &vi_pcie_wreg; 1553 adev->uvd_ctx_rreg = &vi_uvd_ctx_rreg; 1554 adev->uvd_ctx_wreg = &vi_uvd_ctx_wreg; 1555 adev->didt_rreg = &vi_didt_rreg; 1556 adev->didt_wreg = &vi_didt_wreg; 1557 adev->gc_cac_rreg = &vi_gc_cac_rreg; 1558 adev->gc_cac_wreg = &vi_gc_cac_wreg; 1559 1560 adev->asic_funcs = &vi_asic_funcs; 1561 1562 if (amdgpu_get_ip_block(adev, AMD_IP_BLOCK_TYPE_SMC) && 1563 (amdgpu_ip_block_mask & (1 << AMD_IP_BLOCK_TYPE_SMC))) 1564 smc_enabled = true; 1565 1566 adev->rev_id = vi_get_rev_id(adev); 1567 adev->external_rev_id = 0xFF; 1568 switch (adev->asic_type) { 1569 case CHIP_TOPAZ: 1570 adev->cg_flags = 0; 1571 adev->pg_flags = 0; 1572 adev->external_rev_id = 0x1; 1573 break; 1574 case CHIP_FIJI: 1575 adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG | 1576 AMD_CG_SUPPORT_GFX_MGLS | 1577 AMD_CG_SUPPORT_GFX_RLC_LS | 1578 AMD_CG_SUPPORT_GFX_CP_LS | 1579 AMD_CG_SUPPORT_GFX_CGTS | 1580 AMD_CG_SUPPORT_GFX_CGTS_LS | 1581 AMD_CG_SUPPORT_GFX_CGCG | 1582 AMD_CG_SUPPORT_GFX_CGLS | 1583 AMD_CG_SUPPORT_SDMA_MGCG | 1584 AMD_CG_SUPPORT_SDMA_LS | 1585 AMD_CG_SUPPORT_BIF_LS | 1586 AMD_CG_SUPPORT_HDP_MGCG | 1587 AMD_CG_SUPPORT_HDP_LS | 1588 AMD_CG_SUPPORT_ROM_MGCG | 1589 AMD_CG_SUPPORT_MC_MGCG | 1590 AMD_CG_SUPPORT_MC_LS; 1591 adev->pg_flags = 0; 1592 adev->external_rev_id = adev->rev_id + 0x3c; 1593 break; 1594 case CHIP_TONGA: 1595 adev->cg_flags = AMD_CG_SUPPORT_UVD_MGCG; 1596 adev->pg_flags = 0; 1597 adev->external_rev_id = adev->rev_id + 0x14; 1598 break; 1599 case CHIP_POLARIS11: 1600 adev->cg_flags = 0; 1601 adev->pg_flags = 0; 1602 adev->external_rev_id = adev->rev_id + 0x5A; 1603 break; 1604 case CHIP_POLARIS10: 1605 adev->cg_flags = 0; 1606 adev->pg_flags = 0; 1607 adev->external_rev_id = adev->rev_id + 0x50; 1608 break; 1609 case CHIP_CARRIZO: 1610 adev->cg_flags = AMD_CG_SUPPORT_UVD_MGCG | 1611 AMD_CG_SUPPORT_GFX_MGCG | 1612 AMD_CG_SUPPORT_GFX_MGLS | 1613 AMD_CG_SUPPORT_GFX_RLC_LS | 1614 AMD_CG_SUPPORT_GFX_CP_LS | 1615 AMD_CG_SUPPORT_GFX_CGTS | 1616 AMD_CG_SUPPORT_GFX_MGLS | 1617 AMD_CG_SUPPORT_GFX_CGTS_LS | 1618 AMD_CG_SUPPORT_GFX_CGCG | 1619 AMD_CG_SUPPORT_GFX_CGLS | 1620 AMD_CG_SUPPORT_BIF_LS | 1621 AMD_CG_SUPPORT_HDP_MGCG | 1622 AMD_CG_SUPPORT_HDP_LS | 1623 AMD_CG_SUPPORT_SDMA_MGCG | 1624 AMD_CG_SUPPORT_SDMA_LS | 1625 AMD_CG_SUPPORT_VCE_MGCG; 1626 /* rev0 hardware requires workarounds to support PG */ 1627 adev->pg_flags = 0; 1628 if (adev->rev_id != 0x00) { 1629 adev->pg_flags |= AMD_PG_SUPPORT_GFX_PG | 1630 AMD_PG_SUPPORT_GFX_SMG | 1631 AMD_PG_SUPPORT_GFX_PIPELINE | 1632 AMD_PG_SUPPORT_UVD | 1633 AMD_PG_SUPPORT_VCE; 1634 } 1635 adev->external_rev_id = adev->rev_id + 0x1; 1636 break; 1637 case CHIP_STONEY: 1638 adev->cg_flags = AMD_CG_SUPPORT_UVD_MGCG | 1639 AMD_CG_SUPPORT_GFX_MGCG | 1640 AMD_CG_SUPPORT_GFX_MGLS | 1641 AMD_CG_SUPPORT_GFX_RLC_LS | 1642 AMD_CG_SUPPORT_GFX_CP_LS | 1643 AMD_CG_SUPPORT_GFX_CGTS | 1644 AMD_CG_SUPPORT_GFX_MGLS | 1645 AMD_CG_SUPPORT_GFX_CGTS_LS | 1646 AMD_CG_SUPPORT_GFX_CGCG | 1647 AMD_CG_SUPPORT_GFX_CGLS | 1648 AMD_CG_SUPPORT_BIF_LS | 1649 AMD_CG_SUPPORT_HDP_MGCG | 1650 AMD_CG_SUPPORT_HDP_LS | 1651 AMD_CG_SUPPORT_SDMA_MGCG | 1652 AMD_CG_SUPPORT_SDMA_LS | 1653 AMD_CG_SUPPORT_VCE_MGCG; 1654 adev->pg_flags |= AMD_PG_SUPPORT_GFX_PG | 1655 AMD_PG_SUPPORT_GFX_SMG | 1656 AMD_PG_SUPPORT_GFX_PIPELINE | 1657 AMD_PG_SUPPORT_UVD | 1658 AMD_PG_SUPPORT_VCE; 1659 adev->external_rev_id = adev->rev_id + 0x61; 1660 break; 1661 default: 1662 /* FIXME: not supported yet */ 1663 return -EINVAL; 1664 } 1665 1666 /* in early init stage, vbios code won't work */ 1667 if (adev->asic_funcs->detect_hw_virtualization) 1668 amdgpu_asic_detect_hw_virtualization(adev); 1669 1670 if (amdgpu_smc_load_fw && smc_enabled) 1671 adev->firmware.smu_load = true; 1672 1673 amdgpu_get_pcie_info(adev); 1674 1675 return 0; 1676 } 1677 1678 static int vi_common_sw_init(void *handle) 1679 { 1680 return 0; 1681 } 1682 1683 static int vi_common_sw_fini(void *handle) 1684 { 1685 return 0; 1686 } 1687 1688 static int vi_common_hw_init(void *handle) 1689 { 1690 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1691 1692 /* move the golden regs per IP block */ 1693 vi_init_golden_registers(adev); 1694 /* enable pcie gen2/3 link */ 1695 vi_pcie_gen3_enable(adev); 1696 /* enable aspm */ 1697 vi_program_aspm(adev); 1698 /* enable the doorbell aperture */ 1699 vi_enable_doorbell_aperture(adev, true); 1700 1701 return 0; 1702 } 1703 1704 static int vi_common_hw_fini(void *handle) 1705 { 1706 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1707 1708 /* enable the doorbell aperture */ 1709 vi_enable_doorbell_aperture(adev, false); 1710 1711 return 0; 1712 } 1713 1714 static int vi_common_suspend(void *handle) 1715 { 1716 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1717 1718 return vi_common_hw_fini(adev); 1719 } 1720 1721 static int vi_common_resume(void *handle) 1722 { 1723 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1724 1725 return vi_common_hw_init(adev); 1726 } 1727 1728 static bool vi_common_is_idle(void *handle) 1729 { 1730 return true; 1731 } 1732 1733 static int vi_common_wait_for_idle(void *handle) 1734 { 1735 return 0; 1736 } 1737 1738 static int vi_common_soft_reset(void *handle) 1739 { 1740 return 0; 1741 } 1742 1743 static void vi_update_bif_medium_grain_light_sleep(struct amdgpu_device *adev, 1744 bool enable) 1745 { 1746 uint32_t temp, data; 1747 1748 temp = data = RREG32_PCIE(ixPCIE_CNTL2); 1749 1750 if (enable && (adev->cg_flags & AMD_CG_SUPPORT_BIF_LS)) 1751 data |= PCIE_CNTL2__SLV_MEM_LS_EN_MASK | 1752 PCIE_CNTL2__MST_MEM_LS_EN_MASK | 1753 PCIE_CNTL2__REPLAY_MEM_LS_EN_MASK; 1754 else 1755 data &= ~(PCIE_CNTL2__SLV_MEM_LS_EN_MASK | 1756 PCIE_CNTL2__MST_MEM_LS_EN_MASK | 1757 PCIE_CNTL2__REPLAY_MEM_LS_EN_MASK); 1758 1759 if (temp != data) 1760 WREG32_PCIE(ixPCIE_CNTL2, data); 1761 } 1762 1763 static void vi_update_hdp_medium_grain_clock_gating(struct amdgpu_device *adev, 1764 bool enable) 1765 { 1766 uint32_t temp, data; 1767 1768 temp = data = RREG32(mmHDP_HOST_PATH_CNTL); 1769 1770 if (enable && (adev->cg_flags & AMD_CG_SUPPORT_HDP_MGCG)) 1771 data &= ~HDP_HOST_PATH_CNTL__CLOCK_GATING_DIS_MASK; 1772 else 1773 data |= HDP_HOST_PATH_CNTL__CLOCK_GATING_DIS_MASK; 1774 1775 if (temp != data) 1776 WREG32(mmHDP_HOST_PATH_CNTL, data); 1777 } 1778 1779 static void vi_update_hdp_light_sleep(struct amdgpu_device *adev, 1780 bool enable) 1781 { 1782 uint32_t temp, data; 1783 1784 temp = data = RREG32(mmHDP_MEM_POWER_LS); 1785 1786 if (enable && (adev->cg_flags & AMD_CG_SUPPORT_HDP_LS)) 1787 data |= HDP_MEM_POWER_LS__LS_ENABLE_MASK; 1788 else 1789 data &= ~HDP_MEM_POWER_LS__LS_ENABLE_MASK; 1790 1791 if (temp != data) 1792 WREG32(mmHDP_MEM_POWER_LS, data); 1793 } 1794 1795 static void vi_update_rom_medium_grain_clock_gating(struct amdgpu_device *adev, 1796 bool enable) 1797 { 1798 uint32_t temp, data; 1799 1800 temp = data = RREG32_SMC(ixCGTT_ROM_CLK_CTRL0); 1801 1802 if (enable && (adev->cg_flags & AMD_CG_SUPPORT_ROM_MGCG)) 1803 data &= ~(CGTT_ROM_CLK_CTRL0__SOFT_OVERRIDE0_MASK | 1804 CGTT_ROM_CLK_CTRL0__SOFT_OVERRIDE1_MASK); 1805 else 1806 data |= CGTT_ROM_CLK_CTRL0__SOFT_OVERRIDE0_MASK | 1807 CGTT_ROM_CLK_CTRL0__SOFT_OVERRIDE1_MASK; 1808 1809 if (temp != data) 1810 WREG32_SMC(ixCGTT_ROM_CLK_CTRL0, data); 1811 } 1812 1813 static int vi_common_set_clockgating_state_by_smu(void *handle, 1814 enum amd_clockgating_state state) 1815 { 1816 uint32_t msg_id, pp_state; 1817 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1818 void *pp_handle = adev->powerplay.pp_handle; 1819 1820 if (state == AMD_CG_STATE_UNGATE) 1821 pp_state = 0; 1822 else 1823 pp_state = PP_STATE_CG | PP_STATE_LS; 1824 1825 msg_id = PP_CG_MSG_ID(PP_GROUP_SYS, 1826 PP_BLOCK_SYS_MC, 1827 PP_STATE_SUPPORT_CG | PP_STATE_SUPPORT_LS, 1828 pp_state); 1829 amd_set_clockgating_by_smu(pp_handle, msg_id); 1830 1831 msg_id = PP_CG_MSG_ID(PP_GROUP_SYS, 1832 PP_BLOCK_SYS_SDMA, 1833 PP_STATE_SUPPORT_CG | PP_STATE_SUPPORT_LS, 1834 pp_state); 1835 amd_set_clockgating_by_smu(pp_handle, msg_id); 1836 1837 msg_id = PP_CG_MSG_ID(PP_GROUP_SYS, 1838 PP_BLOCK_SYS_HDP, 1839 PP_STATE_SUPPORT_CG | PP_STATE_SUPPORT_LS, 1840 pp_state); 1841 amd_set_clockgating_by_smu(pp_handle, msg_id); 1842 1843 msg_id = PP_CG_MSG_ID(PP_GROUP_SYS, 1844 PP_BLOCK_SYS_BIF, 1845 PP_STATE_SUPPORT_LS, 1846 pp_state); 1847 amd_set_clockgating_by_smu(pp_handle, msg_id); 1848 1849 msg_id = PP_CG_MSG_ID(PP_GROUP_SYS, 1850 PP_BLOCK_SYS_BIF, 1851 PP_STATE_SUPPORT_CG, 1852 pp_state); 1853 amd_set_clockgating_by_smu(pp_handle, msg_id); 1854 1855 msg_id = PP_CG_MSG_ID(PP_GROUP_SYS, 1856 PP_BLOCK_SYS_DRM, 1857 PP_STATE_SUPPORT_LS, 1858 pp_state); 1859 amd_set_clockgating_by_smu(pp_handle, msg_id); 1860 1861 msg_id = PP_CG_MSG_ID(PP_GROUP_SYS, 1862 PP_BLOCK_SYS_ROM, 1863 PP_STATE_SUPPORT_CG, 1864 pp_state); 1865 amd_set_clockgating_by_smu(pp_handle, msg_id); 1866 1867 return 0; 1868 } 1869 1870 static int vi_common_set_clockgating_state(void *handle, 1871 enum amd_clockgating_state state) 1872 { 1873 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1874 1875 switch (adev->asic_type) { 1876 case CHIP_FIJI: 1877 vi_update_bif_medium_grain_light_sleep(adev, 1878 state == AMD_CG_STATE_GATE ? true : false); 1879 vi_update_hdp_medium_grain_clock_gating(adev, 1880 state == AMD_CG_STATE_GATE ? true : false); 1881 vi_update_hdp_light_sleep(adev, 1882 state == AMD_CG_STATE_GATE ? true : false); 1883 vi_update_rom_medium_grain_clock_gating(adev, 1884 state == AMD_CG_STATE_GATE ? true : false); 1885 break; 1886 case CHIP_CARRIZO: 1887 case CHIP_STONEY: 1888 vi_update_bif_medium_grain_light_sleep(adev, 1889 state == AMD_CG_STATE_GATE ? true : false); 1890 vi_update_hdp_medium_grain_clock_gating(adev, 1891 state == AMD_CG_STATE_GATE ? true : false); 1892 vi_update_hdp_light_sleep(adev, 1893 state == AMD_CG_STATE_GATE ? true : false); 1894 break; 1895 case CHIP_TONGA: 1896 case CHIP_POLARIS10: 1897 case CHIP_POLARIS11: 1898 vi_common_set_clockgating_state_by_smu(adev, state); 1899 default: 1900 break; 1901 } 1902 return 0; 1903 } 1904 1905 static int vi_common_set_powergating_state(void *handle, 1906 enum amd_powergating_state state) 1907 { 1908 return 0; 1909 } 1910 1911 const struct amd_ip_funcs vi_common_ip_funcs = { 1912 .name = "vi_common", 1913 .early_init = vi_common_early_init, 1914 .late_init = NULL, 1915 .sw_init = vi_common_sw_init, 1916 .sw_fini = vi_common_sw_fini, 1917 .hw_init = vi_common_hw_init, 1918 .hw_fini = vi_common_hw_fini, 1919 .suspend = vi_common_suspend, 1920 .resume = vi_common_resume, 1921 .is_idle = vi_common_is_idle, 1922 .wait_for_idle = vi_common_wait_for_idle, 1923 .soft_reset = vi_common_soft_reset, 1924 .set_clockgating_state = vi_common_set_clockgating_state, 1925 .set_powergating_state = vi_common_set_powergating_state, 1926 }; 1927 1928