1 /* 2 * Copyright 2014 Advanced Micro Devices, Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 */ 23 #include <linux/firmware.h> 24 #include <linux/slab.h> 25 #include <linux/module.h> 26 #include "drmP.h" 27 #include "amdgpu.h" 28 #include "amdgpu_atombios.h" 29 #include "amdgpu_ih.h" 30 #include "amdgpu_uvd.h" 31 #include "amdgpu_vce.h" 32 #include "amdgpu_ucode.h" 33 #include "atom.h" 34 #include "amd_pcie.h" 35 36 #include "gmc/gmc_8_1_d.h" 37 #include "gmc/gmc_8_1_sh_mask.h" 38 39 #include "oss/oss_3_0_d.h" 40 #include "oss/oss_3_0_sh_mask.h" 41 42 #include "bif/bif_5_0_d.h" 43 #include "bif/bif_5_0_sh_mask.h" 44 45 #include "gca/gfx_8_0_d.h" 46 #include "gca/gfx_8_0_sh_mask.h" 47 48 #include "smu/smu_7_1_1_d.h" 49 #include "smu/smu_7_1_1_sh_mask.h" 50 51 #include "uvd/uvd_5_0_d.h" 52 #include "uvd/uvd_5_0_sh_mask.h" 53 54 #include "vce/vce_3_0_d.h" 55 #include "vce/vce_3_0_sh_mask.h" 56 57 #include "dce/dce_10_0_d.h" 58 #include "dce/dce_10_0_sh_mask.h" 59 60 #include "vid.h" 61 #include "vi.h" 62 #include "vi_dpm.h" 63 #include "gmc_v8_0.h" 64 #include "gmc_v7_0.h" 65 #include "gfx_v8_0.h" 66 #include "sdma_v2_4.h" 67 #include "sdma_v3_0.h" 68 #include "dce_v10_0.h" 69 #include "dce_v11_0.h" 70 #include "iceland_ih.h" 71 #include "tonga_ih.h" 72 #include "cz_ih.h" 73 #include "uvd_v5_0.h" 74 #include "uvd_v6_0.h" 75 #include "vce_v3_0.h" 76 #include "amdgpu_powerplay.h" 77 #if defined(CONFIG_DRM_AMD_ACP) 78 #include "amdgpu_acp.h" 79 #endif 80 81 /* 82 * Indirect registers accessor 83 */ 84 static u32 vi_pcie_rreg(struct amdgpu_device *adev, u32 reg) 85 { 86 unsigned long flags; 87 u32 r; 88 89 spin_lock_irqsave(&adev->pcie_idx_lock, flags); 90 WREG32(mmPCIE_INDEX, reg); 91 (void)RREG32(mmPCIE_INDEX); 92 r = RREG32(mmPCIE_DATA); 93 spin_unlock_irqrestore(&adev->pcie_idx_lock, flags); 94 return r; 95 } 96 97 static void vi_pcie_wreg(struct amdgpu_device *adev, u32 reg, u32 v) 98 { 99 unsigned long flags; 100 101 spin_lock_irqsave(&adev->pcie_idx_lock, flags); 102 WREG32(mmPCIE_INDEX, reg); 103 (void)RREG32(mmPCIE_INDEX); 104 WREG32(mmPCIE_DATA, v); 105 (void)RREG32(mmPCIE_DATA); 106 spin_unlock_irqrestore(&adev->pcie_idx_lock, flags); 107 } 108 109 static u32 vi_smc_rreg(struct amdgpu_device *adev, u32 reg) 110 { 111 unsigned long flags; 112 u32 r; 113 114 spin_lock_irqsave(&adev->smc_idx_lock, flags); 115 WREG32(mmSMC_IND_INDEX_0, (reg)); 116 r = RREG32(mmSMC_IND_DATA_0); 117 spin_unlock_irqrestore(&adev->smc_idx_lock, flags); 118 return r; 119 } 120 121 static void vi_smc_wreg(struct amdgpu_device *adev, u32 reg, u32 v) 122 { 123 unsigned long flags; 124 125 spin_lock_irqsave(&adev->smc_idx_lock, flags); 126 WREG32(mmSMC_IND_INDEX_0, (reg)); 127 WREG32(mmSMC_IND_DATA_0, (v)); 128 spin_unlock_irqrestore(&adev->smc_idx_lock, flags); 129 } 130 131 /* smu_8_0_d.h */ 132 #define mmMP0PUB_IND_INDEX 0x180 133 #define mmMP0PUB_IND_DATA 0x181 134 135 static u32 cz_smc_rreg(struct amdgpu_device *adev, u32 reg) 136 { 137 unsigned long flags; 138 u32 r; 139 140 spin_lock_irqsave(&adev->smc_idx_lock, flags); 141 WREG32(mmMP0PUB_IND_INDEX, (reg)); 142 r = RREG32(mmMP0PUB_IND_DATA); 143 spin_unlock_irqrestore(&adev->smc_idx_lock, flags); 144 return r; 145 } 146 147 static void cz_smc_wreg(struct amdgpu_device *adev, u32 reg, u32 v) 148 { 149 unsigned long flags; 150 151 spin_lock_irqsave(&adev->smc_idx_lock, flags); 152 WREG32(mmMP0PUB_IND_INDEX, (reg)); 153 WREG32(mmMP0PUB_IND_DATA, (v)); 154 spin_unlock_irqrestore(&adev->smc_idx_lock, flags); 155 } 156 157 static u32 vi_uvd_ctx_rreg(struct amdgpu_device *adev, u32 reg) 158 { 159 unsigned long flags; 160 u32 r; 161 162 spin_lock_irqsave(&adev->uvd_ctx_idx_lock, flags); 163 WREG32(mmUVD_CTX_INDEX, ((reg) & 0x1ff)); 164 r = RREG32(mmUVD_CTX_DATA); 165 spin_unlock_irqrestore(&adev->uvd_ctx_idx_lock, flags); 166 return r; 167 } 168 169 static void vi_uvd_ctx_wreg(struct amdgpu_device *adev, u32 reg, u32 v) 170 { 171 unsigned long flags; 172 173 spin_lock_irqsave(&adev->uvd_ctx_idx_lock, flags); 174 WREG32(mmUVD_CTX_INDEX, ((reg) & 0x1ff)); 175 WREG32(mmUVD_CTX_DATA, (v)); 176 spin_unlock_irqrestore(&adev->uvd_ctx_idx_lock, flags); 177 } 178 179 static u32 vi_didt_rreg(struct amdgpu_device *adev, u32 reg) 180 { 181 unsigned long flags; 182 u32 r; 183 184 spin_lock_irqsave(&adev->didt_idx_lock, flags); 185 WREG32(mmDIDT_IND_INDEX, (reg)); 186 r = RREG32(mmDIDT_IND_DATA); 187 spin_unlock_irqrestore(&adev->didt_idx_lock, flags); 188 return r; 189 } 190 191 static void vi_didt_wreg(struct amdgpu_device *adev, u32 reg, u32 v) 192 { 193 unsigned long flags; 194 195 spin_lock_irqsave(&adev->didt_idx_lock, flags); 196 WREG32(mmDIDT_IND_INDEX, (reg)); 197 WREG32(mmDIDT_IND_DATA, (v)); 198 spin_unlock_irqrestore(&adev->didt_idx_lock, flags); 199 } 200 201 static const u32 tonga_mgcg_cgcg_init[] = 202 { 203 mmCGTT_DRM_CLK_CTRL0, 0xffffffff, 0x00600100, 204 mmPCIE_INDEX, 0xffffffff, 0x0140001c, 205 mmPCIE_DATA, 0x000f0000, 0x00000000, 206 mmSMC_IND_INDEX_4, 0xffffffff, 0xC060000C, 207 mmSMC_IND_DATA_4, 0xc0000fff, 0x00000100, 208 mmCGTT_DRM_CLK_CTRL0, 0xff000fff, 0x00000100, 209 mmHDP_XDP_CGTT_BLK_CTRL, 0xc0000fff, 0x00000104, 210 }; 211 212 static const u32 fiji_mgcg_cgcg_init[] = 213 { 214 mmCGTT_DRM_CLK_CTRL0, 0xffffffff, 0x00600100, 215 mmPCIE_INDEX, 0xffffffff, 0x0140001c, 216 mmPCIE_DATA, 0x000f0000, 0x00000000, 217 mmSMC_IND_INDEX_4, 0xffffffff, 0xC060000C, 218 mmSMC_IND_DATA_4, 0xc0000fff, 0x00000100, 219 mmCGTT_DRM_CLK_CTRL0, 0xff000fff, 0x00000100, 220 mmHDP_XDP_CGTT_BLK_CTRL, 0xc0000fff, 0x00000104, 221 }; 222 223 static const u32 iceland_mgcg_cgcg_init[] = 224 { 225 mmPCIE_INDEX, 0xffffffff, ixPCIE_CNTL2, 226 mmPCIE_DATA, 0x000f0000, 0x00000000, 227 mmSMC_IND_INDEX_4, 0xffffffff, ixCGTT_ROM_CLK_CTRL0, 228 mmSMC_IND_DATA_4, 0xc0000fff, 0x00000100, 229 mmHDP_XDP_CGTT_BLK_CTRL, 0xc0000fff, 0x00000104, 230 }; 231 232 static const u32 cz_mgcg_cgcg_init[] = 233 { 234 mmCGTT_DRM_CLK_CTRL0, 0xffffffff, 0x00600100, 235 mmPCIE_INDEX, 0xffffffff, 0x0140001c, 236 mmPCIE_DATA, 0x000f0000, 0x00000000, 237 mmCGTT_DRM_CLK_CTRL0, 0xff000fff, 0x00000100, 238 mmHDP_XDP_CGTT_BLK_CTRL, 0xc0000fff, 0x00000104, 239 }; 240 241 static const u32 stoney_mgcg_cgcg_init[] = 242 { 243 mmCGTT_DRM_CLK_CTRL0, 0xffffffff, 0x00000100, 244 mmHDP_XDP_CGTT_BLK_CTRL, 0xffffffff, 0x00000104, 245 mmHDP_HOST_PATH_CNTL, 0xffffffff, 0x0f000027, 246 }; 247 248 static void vi_init_golden_registers(struct amdgpu_device *adev) 249 { 250 /* Some of the registers might be dependent on GRBM_GFX_INDEX */ 251 mutex_lock(&adev->grbm_idx_mutex); 252 253 switch (adev->asic_type) { 254 case CHIP_TOPAZ: 255 amdgpu_program_register_sequence(adev, 256 iceland_mgcg_cgcg_init, 257 (const u32)ARRAY_SIZE(iceland_mgcg_cgcg_init)); 258 break; 259 case CHIP_FIJI: 260 amdgpu_program_register_sequence(adev, 261 fiji_mgcg_cgcg_init, 262 (const u32)ARRAY_SIZE(fiji_mgcg_cgcg_init)); 263 break; 264 case CHIP_TONGA: 265 amdgpu_program_register_sequence(adev, 266 tonga_mgcg_cgcg_init, 267 (const u32)ARRAY_SIZE(tonga_mgcg_cgcg_init)); 268 break; 269 case CHIP_CARRIZO: 270 amdgpu_program_register_sequence(adev, 271 cz_mgcg_cgcg_init, 272 (const u32)ARRAY_SIZE(cz_mgcg_cgcg_init)); 273 break; 274 case CHIP_STONEY: 275 amdgpu_program_register_sequence(adev, 276 stoney_mgcg_cgcg_init, 277 (const u32)ARRAY_SIZE(stoney_mgcg_cgcg_init)); 278 break; 279 default: 280 break; 281 } 282 mutex_unlock(&adev->grbm_idx_mutex); 283 } 284 285 /** 286 * vi_get_xclk - get the xclk 287 * 288 * @adev: amdgpu_device pointer 289 * 290 * Returns the reference clock used by the gfx engine 291 * (VI). 292 */ 293 static u32 vi_get_xclk(struct amdgpu_device *adev) 294 { 295 u32 reference_clock = adev->clock.spll.reference_freq; 296 u32 tmp; 297 298 if (adev->flags & AMD_IS_APU) 299 return reference_clock; 300 301 tmp = RREG32_SMC(ixCG_CLKPIN_CNTL_2); 302 if (REG_GET_FIELD(tmp, CG_CLKPIN_CNTL_2, MUX_TCLK_TO_XCLK)) 303 return 1000; 304 305 tmp = RREG32_SMC(ixCG_CLKPIN_CNTL); 306 if (REG_GET_FIELD(tmp, CG_CLKPIN_CNTL, XTALIN_DIVIDE)) 307 return reference_clock / 4; 308 309 return reference_clock; 310 } 311 312 /** 313 * vi_srbm_select - select specific register instances 314 * 315 * @adev: amdgpu_device pointer 316 * @me: selected ME (micro engine) 317 * @pipe: pipe 318 * @queue: queue 319 * @vmid: VMID 320 * 321 * Switches the currently active registers instances. Some 322 * registers are instanced per VMID, others are instanced per 323 * me/pipe/queue combination. 324 */ 325 void vi_srbm_select(struct amdgpu_device *adev, 326 u32 me, u32 pipe, u32 queue, u32 vmid) 327 { 328 u32 srbm_gfx_cntl = 0; 329 srbm_gfx_cntl = REG_SET_FIELD(srbm_gfx_cntl, SRBM_GFX_CNTL, PIPEID, pipe); 330 srbm_gfx_cntl = REG_SET_FIELD(srbm_gfx_cntl, SRBM_GFX_CNTL, MEID, me); 331 srbm_gfx_cntl = REG_SET_FIELD(srbm_gfx_cntl, SRBM_GFX_CNTL, VMID, vmid); 332 srbm_gfx_cntl = REG_SET_FIELD(srbm_gfx_cntl, SRBM_GFX_CNTL, QUEUEID, queue); 333 WREG32(mmSRBM_GFX_CNTL, srbm_gfx_cntl); 334 } 335 336 static void vi_vga_set_state(struct amdgpu_device *adev, bool state) 337 { 338 /* todo */ 339 } 340 341 static bool vi_read_disabled_bios(struct amdgpu_device *adev) 342 { 343 u32 bus_cntl; 344 u32 d1vga_control = 0; 345 u32 d2vga_control = 0; 346 u32 vga_render_control = 0; 347 u32 rom_cntl; 348 bool r; 349 350 bus_cntl = RREG32(mmBUS_CNTL); 351 if (adev->mode_info.num_crtc) { 352 d1vga_control = RREG32(mmD1VGA_CONTROL); 353 d2vga_control = RREG32(mmD2VGA_CONTROL); 354 vga_render_control = RREG32(mmVGA_RENDER_CONTROL); 355 } 356 rom_cntl = RREG32_SMC(ixROM_CNTL); 357 358 /* enable the rom */ 359 WREG32(mmBUS_CNTL, (bus_cntl & ~BUS_CNTL__BIOS_ROM_DIS_MASK)); 360 if (adev->mode_info.num_crtc) { 361 /* Disable VGA mode */ 362 WREG32(mmD1VGA_CONTROL, 363 (d1vga_control & ~(D1VGA_CONTROL__D1VGA_MODE_ENABLE_MASK | 364 D1VGA_CONTROL__D1VGA_TIMING_SELECT_MASK))); 365 WREG32(mmD2VGA_CONTROL, 366 (d2vga_control & ~(D2VGA_CONTROL__D2VGA_MODE_ENABLE_MASK | 367 D2VGA_CONTROL__D2VGA_TIMING_SELECT_MASK))); 368 WREG32(mmVGA_RENDER_CONTROL, 369 (vga_render_control & ~VGA_RENDER_CONTROL__VGA_VSTATUS_CNTL_MASK)); 370 } 371 WREG32_SMC(ixROM_CNTL, rom_cntl | ROM_CNTL__SCK_OVERWRITE_MASK); 372 373 r = amdgpu_read_bios(adev); 374 375 /* restore regs */ 376 WREG32(mmBUS_CNTL, bus_cntl); 377 if (adev->mode_info.num_crtc) { 378 WREG32(mmD1VGA_CONTROL, d1vga_control); 379 WREG32(mmD2VGA_CONTROL, d2vga_control); 380 WREG32(mmVGA_RENDER_CONTROL, vga_render_control); 381 } 382 WREG32_SMC(ixROM_CNTL, rom_cntl); 383 return r; 384 } 385 386 static bool vi_read_bios_from_rom(struct amdgpu_device *adev, 387 u8 *bios, u32 length_bytes) 388 { 389 u32 *dw_ptr; 390 unsigned long flags; 391 u32 i, length_dw; 392 393 if (bios == NULL) 394 return false; 395 if (length_bytes == 0) 396 return false; 397 /* APU vbios image is part of sbios image */ 398 if (adev->flags & AMD_IS_APU) 399 return false; 400 401 dw_ptr = (u32 *)bios; 402 length_dw = ALIGN(length_bytes, 4) / 4; 403 /* take the smc lock since we are using the smc index */ 404 spin_lock_irqsave(&adev->smc_idx_lock, flags); 405 /* set rom index to 0 */ 406 WREG32(mmSMC_IND_INDEX_0, ixROM_INDEX); 407 WREG32(mmSMC_IND_DATA_0, 0); 408 /* set index to data for continous read */ 409 WREG32(mmSMC_IND_INDEX_0, ixROM_DATA); 410 for (i = 0; i < length_dw; i++) 411 dw_ptr[i] = RREG32(mmSMC_IND_DATA_0); 412 spin_unlock_irqrestore(&adev->smc_idx_lock, flags); 413 414 return true; 415 } 416 417 static const struct amdgpu_allowed_register_entry tonga_allowed_read_registers[] = { 418 {mmGB_MACROTILE_MODE7, true}, 419 }; 420 421 static const struct amdgpu_allowed_register_entry cz_allowed_read_registers[] = { 422 {mmGB_TILE_MODE7, true}, 423 {mmGB_TILE_MODE12, true}, 424 {mmGB_TILE_MODE17, true}, 425 {mmGB_TILE_MODE23, true}, 426 {mmGB_MACROTILE_MODE7, true}, 427 }; 428 429 static const struct amdgpu_allowed_register_entry vi_allowed_read_registers[] = { 430 {mmGRBM_STATUS, false}, 431 {mmGRBM_STATUS2, false}, 432 {mmGRBM_STATUS_SE0, false}, 433 {mmGRBM_STATUS_SE1, false}, 434 {mmGRBM_STATUS_SE2, false}, 435 {mmGRBM_STATUS_SE3, false}, 436 {mmSRBM_STATUS, false}, 437 {mmSRBM_STATUS2, false}, 438 {mmSRBM_STATUS3, false}, 439 {mmSDMA0_STATUS_REG + SDMA0_REGISTER_OFFSET, false}, 440 {mmSDMA0_STATUS_REG + SDMA1_REGISTER_OFFSET, false}, 441 {mmCP_STAT, false}, 442 {mmCP_STALLED_STAT1, false}, 443 {mmCP_STALLED_STAT2, false}, 444 {mmCP_STALLED_STAT3, false}, 445 {mmCP_CPF_BUSY_STAT, false}, 446 {mmCP_CPF_STALLED_STAT1, false}, 447 {mmCP_CPF_STATUS, false}, 448 {mmCP_CPC_BUSY_STAT, false}, 449 {mmCP_CPC_STALLED_STAT1, false}, 450 {mmCP_CPC_STATUS, false}, 451 {mmGB_ADDR_CONFIG, false}, 452 {mmMC_ARB_RAMCFG, false}, 453 {mmGB_TILE_MODE0, false}, 454 {mmGB_TILE_MODE1, false}, 455 {mmGB_TILE_MODE2, false}, 456 {mmGB_TILE_MODE3, false}, 457 {mmGB_TILE_MODE4, false}, 458 {mmGB_TILE_MODE5, false}, 459 {mmGB_TILE_MODE6, false}, 460 {mmGB_TILE_MODE7, false}, 461 {mmGB_TILE_MODE8, false}, 462 {mmGB_TILE_MODE9, false}, 463 {mmGB_TILE_MODE10, false}, 464 {mmGB_TILE_MODE11, false}, 465 {mmGB_TILE_MODE12, false}, 466 {mmGB_TILE_MODE13, false}, 467 {mmGB_TILE_MODE14, false}, 468 {mmGB_TILE_MODE15, false}, 469 {mmGB_TILE_MODE16, false}, 470 {mmGB_TILE_MODE17, false}, 471 {mmGB_TILE_MODE18, false}, 472 {mmGB_TILE_MODE19, false}, 473 {mmGB_TILE_MODE20, false}, 474 {mmGB_TILE_MODE21, false}, 475 {mmGB_TILE_MODE22, false}, 476 {mmGB_TILE_MODE23, false}, 477 {mmGB_TILE_MODE24, false}, 478 {mmGB_TILE_MODE25, false}, 479 {mmGB_TILE_MODE26, false}, 480 {mmGB_TILE_MODE27, false}, 481 {mmGB_TILE_MODE28, false}, 482 {mmGB_TILE_MODE29, false}, 483 {mmGB_TILE_MODE30, false}, 484 {mmGB_TILE_MODE31, false}, 485 {mmGB_MACROTILE_MODE0, false}, 486 {mmGB_MACROTILE_MODE1, false}, 487 {mmGB_MACROTILE_MODE2, false}, 488 {mmGB_MACROTILE_MODE3, false}, 489 {mmGB_MACROTILE_MODE4, false}, 490 {mmGB_MACROTILE_MODE5, false}, 491 {mmGB_MACROTILE_MODE6, false}, 492 {mmGB_MACROTILE_MODE7, false}, 493 {mmGB_MACROTILE_MODE8, false}, 494 {mmGB_MACROTILE_MODE9, false}, 495 {mmGB_MACROTILE_MODE10, false}, 496 {mmGB_MACROTILE_MODE11, false}, 497 {mmGB_MACROTILE_MODE12, false}, 498 {mmGB_MACROTILE_MODE13, false}, 499 {mmGB_MACROTILE_MODE14, false}, 500 {mmGB_MACROTILE_MODE15, false}, 501 {mmCC_RB_BACKEND_DISABLE, false, true}, 502 {mmGC_USER_RB_BACKEND_DISABLE, false, true}, 503 {mmGB_BACKEND_MAP, false, false}, 504 {mmPA_SC_RASTER_CONFIG, false, true}, 505 {mmPA_SC_RASTER_CONFIG_1, false, true}, 506 }; 507 508 static uint32_t vi_read_indexed_register(struct amdgpu_device *adev, u32 se_num, 509 u32 sh_num, u32 reg_offset) 510 { 511 uint32_t val; 512 513 mutex_lock(&adev->grbm_idx_mutex); 514 if (se_num != 0xffffffff || sh_num != 0xffffffff) 515 gfx_v8_0_select_se_sh(adev, se_num, sh_num); 516 517 val = RREG32(reg_offset); 518 519 if (se_num != 0xffffffff || sh_num != 0xffffffff) 520 gfx_v8_0_select_se_sh(adev, 0xffffffff, 0xffffffff); 521 mutex_unlock(&adev->grbm_idx_mutex); 522 return val; 523 } 524 525 static int vi_read_register(struct amdgpu_device *adev, u32 se_num, 526 u32 sh_num, u32 reg_offset, u32 *value) 527 { 528 const struct amdgpu_allowed_register_entry *asic_register_table = NULL; 529 const struct amdgpu_allowed_register_entry *asic_register_entry; 530 uint32_t size, i; 531 532 *value = 0; 533 switch (adev->asic_type) { 534 case CHIP_TOPAZ: 535 asic_register_table = tonga_allowed_read_registers; 536 size = ARRAY_SIZE(tonga_allowed_read_registers); 537 break; 538 case CHIP_FIJI: 539 case CHIP_TONGA: 540 case CHIP_CARRIZO: 541 case CHIP_STONEY: 542 asic_register_table = cz_allowed_read_registers; 543 size = ARRAY_SIZE(cz_allowed_read_registers); 544 break; 545 default: 546 return -EINVAL; 547 } 548 549 if (asic_register_table) { 550 for (i = 0; i < size; i++) { 551 asic_register_entry = asic_register_table + i; 552 if (reg_offset != asic_register_entry->reg_offset) 553 continue; 554 if (!asic_register_entry->untouched) 555 *value = asic_register_entry->grbm_indexed ? 556 vi_read_indexed_register(adev, se_num, 557 sh_num, reg_offset) : 558 RREG32(reg_offset); 559 return 0; 560 } 561 } 562 563 for (i = 0; i < ARRAY_SIZE(vi_allowed_read_registers); i++) { 564 if (reg_offset != vi_allowed_read_registers[i].reg_offset) 565 continue; 566 567 if (!vi_allowed_read_registers[i].untouched) 568 *value = vi_allowed_read_registers[i].grbm_indexed ? 569 vi_read_indexed_register(adev, se_num, 570 sh_num, reg_offset) : 571 RREG32(reg_offset); 572 return 0; 573 } 574 return -EINVAL; 575 } 576 577 static void vi_gpu_pci_config_reset(struct amdgpu_device *adev) 578 { 579 u32 i; 580 581 dev_info(adev->dev, "GPU pci config reset\n"); 582 583 /* disable BM */ 584 pci_clear_master(adev->pdev); 585 /* reset */ 586 amdgpu_pci_config_reset(adev); 587 588 udelay(100); 589 590 /* wait for asic to come out of reset */ 591 for (i = 0; i < adev->usec_timeout; i++) { 592 if (RREG32(mmCONFIG_MEMSIZE) != 0xffffffff) 593 break; 594 udelay(1); 595 } 596 597 } 598 599 static void vi_set_bios_scratch_engine_hung(struct amdgpu_device *adev, bool hung) 600 { 601 u32 tmp = RREG32(mmBIOS_SCRATCH_3); 602 603 if (hung) 604 tmp |= ATOM_S3_ASIC_GUI_ENGINE_HUNG; 605 else 606 tmp &= ~ATOM_S3_ASIC_GUI_ENGINE_HUNG; 607 608 WREG32(mmBIOS_SCRATCH_3, tmp); 609 } 610 611 /** 612 * vi_asic_reset - soft reset GPU 613 * 614 * @adev: amdgpu_device pointer 615 * 616 * Look up which blocks are hung and attempt 617 * to reset them. 618 * Returns 0 for success. 619 */ 620 static int vi_asic_reset(struct amdgpu_device *adev) 621 { 622 vi_set_bios_scratch_engine_hung(adev, true); 623 624 vi_gpu_pci_config_reset(adev); 625 626 vi_set_bios_scratch_engine_hung(adev, false); 627 628 return 0; 629 } 630 631 static int vi_set_uvd_clock(struct amdgpu_device *adev, u32 clock, 632 u32 cntl_reg, u32 status_reg) 633 { 634 int r, i; 635 struct atom_clock_dividers dividers; 636 uint32_t tmp; 637 638 r = amdgpu_atombios_get_clock_dividers(adev, 639 COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK, 640 clock, false, ÷rs); 641 if (r) 642 return r; 643 644 tmp = RREG32_SMC(cntl_reg); 645 tmp &= ~(CG_DCLK_CNTL__DCLK_DIR_CNTL_EN_MASK | 646 CG_DCLK_CNTL__DCLK_DIVIDER_MASK); 647 tmp |= dividers.post_divider; 648 WREG32_SMC(cntl_reg, tmp); 649 650 for (i = 0; i < 100; i++) { 651 if (RREG32_SMC(status_reg) & CG_DCLK_STATUS__DCLK_STATUS_MASK) 652 break; 653 mdelay(10); 654 } 655 if (i == 100) 656 return -ETIMEDOUT; 657 658 return 0; 659 } 660 661 static int vi_set_uvd_clocks(struct amdgpu_device *adev, u32 vclk, u32 dclk) 662 { 663 int r; 664 665 r = vi_set_uvd_clock(adev, vclk, ixCG_VCLK_CNTL, ixCG_VCLK_STATUS); 666 if (r) 667 return r; 668 669 r = vi_set_uvd_clock(adev, dclk, ixCG_DCLK_CNTL, ixCG_DCLK_STATUS); 670 671 return 0; 672 } 673 674 static int vi_set_vce_clocks(struct amdgpu_device *adev, u32 evclk, u32 ecclk) 675 { 676 /* todo */ 677 678 return 0; 679 } 680 681 static void vi_pcie_gen3_enable(struct amdgpu_device *adev) 682 { 683 if (pci_is_root_bus(adev->pdev->bus)) 684 return; 685 686 if (amdgpu_pcie_gen2 == 0) 687 return; 688 689 if (adev->flags & AMD_IS_APU) 690 return; 691 692 if (!(adev->pm.pcie_gen_mask & (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2 | 693 CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3))) 694 return; 695 696 /* todo */ 697 } 698 699 static void vi_program_aspm(struct amdgpu_device *adev) 700 { 701 702 if (amdgpu_aspm == 0) 703 return; 704 705 /* todo */ 706 } 707 708 static void vi_enable_doorbell_aperture(struct amdgpu_device *adev, 709 bool enable) 710 { 711 u32 tmp; 712 713 /* not necessary on CZ */ 714 if (adev->flags & AMD_IS_APU) 715 return; 716 717 tmp = RREG32(mmBIF_DOORBELL_APER_EN); 718 if (enable) 719 tmp = REG_SET_FIELD(tmp, BIF_DOORBELL_APER_EN, BIF_DOORBELL_APER_EN, 1); 720 else 721 tmp = REG_SET_FIELD(tmp, BIF_DOORBELL_APER_EN, BIF_DOORBELL_APER_EN, 0); 722 723 WREG32(mmBIF_DOORBELL_APER_EN, tmp); 724 } 725 726 /* topaz has no DCE, UVD, VCE */ 727 static const struct amdgpu_ip_block_version topaz_ip_blocks[] = 728 { 729 /* ORDER MATTERS! */ 730 { 731 .type = AMD_IP_BLOCK_TYPE_COMMON, 732 .major = 2, 733 .minor = 0, 734 .rev = 0, 735 .funcs = &vi_common_ip_funcs, 736 }, 737 { 738 .type = AMD_IP_BLOCK_TYPE_GMC, 739 .major = 7, 740 .minor = 4, 741 .rev = 0, 742 .funcs = &gmc_v7_0_ip_funcs, 743 }, 744 { 745 .type = AMD_IP_BLOCK_TYPE_IH, 746 .major = 2, 747 .minor = 4, 748 .rev = 0, 749 .funcs = &iceland_ih_ip_funcs, 750 }, 751 { 752 .type = AMD_IP_BLOCK_TYPE_SMC, 753 .major = 7, 754 .minor = 1, 755 .rev = 0, 756 .funcs = &amdgpu_pp_ip_funcs, 757 }, 758 { 759 .type = AMD_IP_BLOCK_TYPE_GFX, 760 .major = 8, 761 .minor = 0, 762 .rev = 0, 763 .funcs = &gfx_v8_0_ip_funcs, 764 }, 765 { 766 .type = AMD_IP_BLOCK_TYPE_SDMA, 767 .major = 2, 768 .minor = 4, 769 .rev = 0, 770 .funcs = &sdma_v2_4_ip_funcs, 771 }, 772 }; 773 774 static const struct amdgpu_ip_block_version tonga_ip_blocks[] = 775 { 776 /* ORDER MATTERS! */ 777 { 778 .type = AMD_IP_BLOCK_TYPE_COMMON, 779 .major = 2, 780 .minor = 0, 781 .rev = 0, 782 .funcs = &vi_common_ip_funcs, 783 }, 784 { 785 .type = AMD_IP_BLOCK_TYPE_GMC, 786 .major = 8, 787 .minor = 0, 788 .rev = 0, 789 .funcs = &gmc_v8_0_ip_funcs, 790 }, 791 { 792 .type = AMD_IP_BLOCK_TYPE_IH, 793 .major = 3, 794 .minor = 0, 795 .rev = 0, 796 .funcs = &tonga_ih_ip_funcs, 797 }, 798 { 799 .type = AMD_IP_BLOCK_TYPE_SMC, 800 .major = 7, 801 .minor = 1, 802 .rev = 0, 803 .funcs = &amdgpu_pp_ip_funcs, 804 }, 805 { 806 .type = AMD_IP_BLOCK_TYPE_DCE, 807 .major = 10, 808 .minor = 0, 809 .rev = 0, 810 .funcs = &dce_v10_0_ip_funcs, 811 }, 812 { 813 .type = AMD_IP_BLOCK_TYPE_GFX, 814 .major = 8, 815 .minor = 0, 816 .rev = 0, 817 .funcs = &gfx_v8_0_ip_funcs, 818 }, 819 { 820 .type = AMD_IP_BLOCK_TYPE_SDMA, 821 .major = 3, 822 .minor = 0, 823 .rev = 0, 824 .funcs = &sdma_v3_0_ip_funcs, 825 }, 826 { 827 .type = AMD_IP_BLOCK_TYPE_UVD, 828 .major = 5, 829 .minor = 0, 830 .rev = 0, 831 .funcs = &uvd_v5_0_ip_funcs, 832 }, 833 { 834 .type = AMD_IP_BLOCK_TYPE_VCE, 835 .major = 3, 836 .minor = 0, 837 .rev = 0, 838 .funcs = &vce_v3_0_ip_funcs, 839 }, 840 }; 841 842 static const struct amdgpu_ip_block_version fiji_ip_blocks[] = 843 { 844 /* ORDER MATTERS! */ 845 { 846 .type = AMD_IP_BLOCK_TYPE_COMMON, 847 .major = 2, 848 .minor = 0, 849 .rev = 0, 850 .funcs = &vi_common_ip_funcs, 851 }, 852 { 853 .type = AMD_IP_BLOCK_TYPE_GMC, 854 .major = 8, 855 .minor = 5, 856 .rev = 0, 857 .funcs = &gmc_v8_0_ip_funcs, 858 }, 859 { 860 .type = AMD_IP_BLOCK_TYPE_IH, 861 .major = 3, 862 .minor = 0, 863 .rev = 0, 864 .funcs = &tonga_ih_ip_funcs, 865 }, 866 { 867 .type = AMD_IP_BLOCK_TYPE_SMC, 868 .major = 7, 869 .minor = 1, 870 .rev = 0, 871 .funcs = &amdgpu_pp_ip_funcs, 872 }, 873 { 874 .type = AMD_IP_BLOCK_TYPE_DCE, 875 .major = 10, 876 .minor = 1, 877 .rev = 0, 878 .funcs = &dce_v10_0_ip_funcs, 879 }, 880 { 881 .type = AMD_IP_BLOCK_TYPE_GFX, 882 .major = 8, 883 .minor = 0, 884 .rev = 0, 885 .funcs = &gfx_v8_0_ip_funcs, 886 }, 887 { 888 .type = AMD_IP_BLOCK_TYPE_SDMA, 889 .major = 3, 890 .minor = 0, 891 .rev = 0, 892 .funcs = &sdma_v3_0_ip_funcs, 893 }, 894 { 895 .type = AMD_IP_BLOCK_TYPE_UVD, 896 .major = 6, 897 .minor = 0, 898 .rev = 0, 899 .funcs = &uvd_v6_0_ip_funcs, 900 }, 901 { 902 .type = AMD_IP_BLOCK_TYPE_VCE, 903 .major = 3, 904 .minor = 0, 905 .rev = 0, 906 .funcs = &vce_v3_0_ip_funcs, 907 }, 908 }; 909 910 static const struct amdgpu_ip_block_version cz_ip_blocks[] = 911 { 912 /* ORDER MATTERS! */ 913 { 914 .type = AMD_IP_BLOCK_TYPE_COMMON, 915 .major = 2, 916 .minor = 0, 917 .rev = 0, 918 .funcs = &vi_common_ip_funcs, 919 }, 920 { 921 .type = AMD_IP_BLOCK_TYPE_GMC, 922 .major = 8, 923 .minor = 0, 924 .rev = 0, 925 .funcs = &gmc_v8_0_ip_funcs, 926 }, 927 { 928 .type = AMD_IP_BLOCK_TYPE_IH, 929 .major = 3, 930 .minor = 0, 931 .rev = 0, 932 .funcs = &cz_ih_ip_funcs, 933 }, 934 { 935 .type = AMD_IP_BLOCK_TYPE_SMC, 936 .major = 8, 937 .minor = 0, 938 .rev = 0, 939 .funcs = &amdgpu_pp_ip_funcs 940 }, 941 { 942 .type = AMD_IP_BLOCK_TYPE_DCE, 943 .major = 11, 944 .minor = 0, 945 .rev = 0, 946 .funcs = &dce_v11_0_ip_funcs, 947 }, 948 { 949 .type = AMD_IP_BLOCK_TYPE_GFX, 950 .major = 8, 951 .minor = 0, 952 .rev = 0, 953 .funcs = &gfx_v8_0_ip_funcs, 954 }, 955 { 956 .type = AMD_IP_BLOCK_TYPE_SDMA, 957 .major = 3, 958 .minor = 0, 959 .rev = 0, 960 .funcs = &sdma_v3_0_ip_funcs, 961 }, 962 { 963 .type = AMD_IP_BLOCK_TYPE_UVD, 964 .major = 6, 965 .minor = 0, 966 .rev = 0, 967 .funcs = &uvd_v6_0_ip_funcs, 968 }, 969 { 970 .type = AMD_IP_BLOCK_TYPE_VCE, 971 .major = 3, 972 .minor = 0, 973 .rev = 0, 974 .funcs = &vce_v3_0_ip_funcs, 975 }, 976 #if defined(CONFIG_DRM_AMD_ACP) 977 { 978 .type = AMD_IP_BLOCK_TYPE_ACP, 979 .major = 2, 980 .minor = 2, 981 .rev = 0, 982 .funcs = &acp_ip_funcs, 983 }, 984 #endif 985 }; 986 987 int vi_set_ip_blocks(struct amdgpu_device *adev) 988 { 989 switch (adev->asic_type) { 990 case CHIP_TOPAZ: 991 adev->ip_blocks = topaz_ip_blocks; 992 adev->num_ip_blocks = ARRAY_SIZE(topaz_ip_blocks); 993 break; 994 case CHIP_FIJI: 995 adev->ip_blocks = fiji_ip_blocks; 996 adev->num_ip_blocks = ARRAY_SIZE(fiji_ip_blocks); 997 break; 998 case CHIP_TONGA: 999 adev->ip_blocks = tonga_ip_blocks; 1000 adev->num_ip_blocks = ARRAY_SIZE(tonga_ip_blocks); 1001 break; 1002 case CHIP_CARRIZO: 1003 case CHIP_STONEY: 1004 adev->ip_blocks = cz_ip_blocks; 1005 adev->num_ip_blocks = ARRAY_SIZE(cz_ip_blocks); 1006 break; 1007 default: 1008 /* FIXME: not supported yet */ 1009 return -EINVAL; 1010 } 1011 1012 return 0; 1013 } 1014 1015 #define ATI_REV_ID_FUSE_MACRO__ADDRESS 0xC0014044 1016 #define ATI_REV_ID_FUSE_MACRO__SHIFT 9 1017 #define ATI_REV_ID_FUSE_MACRO__MASK 0x00001E00 1018 1019 static uint32_t vi_get_rev_id(struct amdgpu_device *adev) 1020 { 1021 if (adev->flags & AMD_IS_APU) 1022 return (RREG32_SMC(ATI_REV_ID_FUSE_MACRO__ADDRESS) & ATI_REV_ID_FUSE_MACRO__MASK) 1023 >> ATI_REV_ID_FUSE_MACRO__SHIFT; 1024 else 1025 return (RREG32(mmPCIE_EFUSE4) & PCIE_EFUSE4__STRAP_BIF_ATI_REV_ID_MASK) 1026 >> PCIE_EFUSE4__STRAP_BIF_ATI_REV_ID__SHIFT; 1027 } 1028 1029 static const struct amdgpu_asic_funcs vi_asic_funcs = 1030 { 1031 .read_disabled_bios = &vi_read_disabled_bios, 1032 .read_bios_from_rom = &vi_read_bios_from_rom, 1033 .read_register = &vi_read_register, 1034 .reset = &vi_asic_reset, 1035 .set_vga_state = &vi_vga_set_state, 1036 .get_xclk = &vi_get_xclk, 1037 .set_uvd_clocks = &vi_set_uvd_clocks, 1038 .set_vce_clocks = &vi_set_vce_clocks, 1039 .get_cu_info = &gfx_v8_0_get_cu_info, 1040 /* these should be moved to their own ip modules */ 1041 .get_gpu_clock_counter = &gfx_v8_0_get_gpu_clock_counter, 1042 .wait_for_mc_idle = &gmc_v8_0_mc_wait_for_idle, 1043 }; 1044 1045 static int vi_common_early_init(void *handle) 1046 { 1047 bool smc_enabled = false; 1048 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1049 1050 if (adev->flags & AMD_IS_APU) { 1051 adev->smc_rreg = &cz_smc_rreg; 1052 adev->smc_wreg = &cz_smc_wreg; 1053 } else { 1054 adev->smc_rreg = &vi_smc_rreg; 1055 adev->smc_wreg = &vi_smc_wreg; 1056 } 1057 adev->pcie_rreg = &vi_pcie_rreg; 1058 adev->pcie_wreg = &vi_pcie_wreg; 1059 adev->uvd_ctx_rreg = &vi_uvd_ctx_rreg; 1060 adev->uvd_ctx_wreg = &vi_uvd_ctx_wreg; 1061 adev->didt_rreg = &vi_didt_rreg; 1062 adev->didt_wreg = &vi_didt_wreg; 1063 1064 adev->asic_funcs = &vi_asic_funcs; 1065 1066 if (amdgpu_get_ip_block(adev, AMD_IP_BLOCK_TYPE_SMC) && 1067 (amdgpu_ip_block_mask & (1 << AMD_IP_BLOCK_TYPE_SMC))) 1068 smc_enabled = true; 1069 1070 adev->rev_id = vi_get_rev_id(adev); 1071 adev->external_rev_id = 0xFF; 1072 switch (adev->asic_type) { 1073 case CHIP_TOPAZ: 1074 adev->cg_flags = 0; 1075 adev->pg_flags = 0; 1076 adev->external_rev_id = 0x1; 1077 break; 1078 case CHIP_FIJI: 1079 adev->cg_flags = 0; 1080 adev->pg_flags = 0; 1081 adev->external_rev_id = adev->rev_id + 0x3c; 1082 break; 1083 case CHIP_TONGA: 1084 adev->cg_flags = AMD_CG_SUPPORT_UVD_MGCG; 1085 adev->pg_flags = 0; 1086 adev->external_rev_id = adev->rev_id + 0x14; 1087 break; 1088 case CHIP_CARRIZO: 1089 adev->cg_flags = 0; 1090 adev->pg_flags = 0; 1091 adev->external_rev_id = adev->rev_id + 0x1; 1092 break; 1093 case CHIP_STONEY: 1094 adev->cg_flags = AMD_CG_SUPPORT_UVD_MGCG; 1095 adev->pg_flags = 0; 1096 adev->external_rev_id = adev->rev_id + 0x1; 1097 break; 1098 default: 1099 /* FIXME: not supported yet */ 1100 return -EINVAL; 1101 } 1102 1103 if (amdgpu_smc_load_fw && smc_enabled) 1104 adev->firmware.smu_load = true; 1105 1106 amdgpu_get_pcie_info(adev); 1107 1108 return 0; 1109 } 1110 1111 static int vi_common_sw_init(void *handle) 1112 { 1113 return 0; 1114 } 1115 1116 static int vi_common_sw_fini(void *handle) 1117 { 1118 return 0; 1119 } 1120 1121 static int vi_common_hw_init(void *handle) 1122 { 1123 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1124 1125 /* move the golden regs per IP block */ 1126 vi_init_golden_registers(adev); 1127 /* enable pcie gen2/3 link */ 1128 vi_pcie_gen3_enable(adev); 1129 /* enable aspm */ 1130 vi_program_aspm(adev); 1131 /* enable the doorbell aperture */ 1132 vi_enable_doorbell_aperture(adev, true); 1133 1134 return 0; 1135 } 1136 1137 static int vi_common_hw_fini(void *handle) 1138 { 1139 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1140 1141 /* enable the doorbell aperture */ 1142 vi_enable_doorbell_aperture(adev, false); 1143 1144 return 0; 1145 } 1146 1147 static int vi_common_suspend(void *handle) 1148 { 1149 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1150 1151 return vi_common_hw_fini(adev); 1152 } 1153 1154 static int vi_common_resume(void *handle) 1155 { 1156 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1157 1158 return vi_common_hw_init(adev); 1159 } 1160 1161 static bool vi_common_is_idle(void *handle) 1162 { 1163 return true; 1164 } 1165 1166 static int vi_common_wait_for_idle(void *handle) 1167 { 1168 return 0; 1169 } 1170 1171 static void vi_common_print_status(void *handle) 1172 { 1173 return; 1174 } 1175 1176 static int vi_common_soft_reset(void *handle) 1177 { 1178 return 0; 1179 } 1180 1181 static void fiji_update_bif_medium_grain_light_sleep(struct amdgpu_device *adev, 1182 bool enable) 1183 { 1184 uint32_t temp, data; 1185 1186 temp = data = RREG32_PCIE(ixPCIE_CNTL2); 1187 1188 if (enable) 1189 data |= PCIE_CNTL2__SLV_MEM_LS_EN_MASK | 1190 PCIE_CNTL2__MST_MEM_LS_EN_MASK | 1191 PCIE_CNTL2__REPLAY_MEM_LS_EN_MASK; 1192 else 1193 data &= ~(PCIE_CNTL2__SLV_MEM_LS_EN_MASK | 1194 PCIE_CNTL2__MST_MEM_LS_EN_MASK | 1195 PCIE_CNTL2__REPLAY_MEM_LS_EN_MASK); 1196 1197 if (temp != data) 1198 WREG32_PCIE(ixPCIE_CNTL2, data); 1199 } 1200 1201 static void fiji_update_hdp_medium_grain_clock_gating(struct amdgpu_device *adev, 1202 bool enable) 1203 { 1204 uint32_t temp, data; 1205 1206 temp = data = RREG32(mmHDP_HOST_PATH_CNTL); 1207 1208 if (enable) 1209 data &= ~HDP_HOST_PATH_CNTL__CLOCK_GATING_DIS_MASK; 1210 else 1211 data |= HDP_HOST_PATH_CNTL__CLOCK_GATING_DIS_MASK; 1212 1213 if (temp != data) 1214 WREG32(mmHDP_HOST_PATH_CNTL, data); 1215 } 1216 1217 static void fiji_update_hdp_light_sleep(struct amdgpu_device *adev, 1218 bool enable) 1219 { 1220 uint32_t temp, data; 1221 1222 temp = data = RREG32(mmHDP_MEM_POWER_LS); 1223 1224 if (enable) 1225 data |= HDP_MEM_POWER_LS__LS_ENABLE_MASK; 1226 else 1227 data &= ~HDP_MEM_POWER_LS__LS_ENABLE_MASK; 1228 1229 if (temp != data) 1230 WREG32(mmHDP_MEM_POWER_LS, data); 1231 } 1232 1233 static void fiji_update_rom_medium_grain_clock_gating(struct amdgpu_device *adev, 1234 bool enable) 1235 { 1236 uint32_t temp, data; 1237 1238 temp = data = RREG32_SMC(ixCGTT_ROM_CLK_CTRL0); 1239 1240 if (enable) 1241 data &= ~(CGTT_ROM_CLK_CTRL0__SOFT_OVERRIDE0_MASK | 1242 CGTT_ROM_CLK_CTRL0__SOFT_OVERRIDE1_MASK); 1243 else 1244 data |= CGTT_ROM_CLK_CTRL0__SOFT_OVERRIDE0_MASK | 1245 CGTT_ROM_CLK_CTRL0__SOFT_OVERRIDE1_MASK; 1246 1247 if (temp != data) 1248 WREG32_SMC(ixCGTT_ROM_CLK_CTRL0, data); 1249 } 1250 1251 static int vi_common_set_clockgating_state(void *handle, 1252 enum amd_clockgating_state state) 1253 { 1254 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1255 1256 switch (adev->asic_type) { 1257 case CHIP_FIJI: 1258 fiji_update_bif_medium_grain_light_sleep(adev, 1259 state == AMD_CG_STATE_GATE ? true : false); 1260 fiji_update_hdp_medium_grain_clock_gating(adev, 1261 state == AMD_CG_STATE_GATE ? true : false); 1262 fiji_update_hdp_light_sleep(adev, 1263 state == AMD_CG_STATE_GATE ? true : false); 1264 fiji_update_rom_medium_grain_clock_gating(adev, 1265 state == AMD_CG_STATE_GATE ? true : false); 1266 break; 1267 default: 1268 break; 1269 } 1270 return 0; 1271 } 1272 1273 static int vi_common_set_powergating_state(void *handle, 1274 enum amd_powergating_state state) 1275 { 1276 return 0; 1277 } 1278 1279 const struct amd_ip_funcs vi_common_ip_funcs = { 1280 .early_init = vi_common_early_init, 1281 .late_init = NULL, 1282 .sw_init = vi_common_sw_init, 1283 .sw_fini = vi_common_sw_fini, 1284 .hw_init = vi_common_hw_init, 1285 .hw_fini = vi_common_hw_fini, 1286 .suspend = vi_common_suspend, 1287 .resume = vi_common_resume, 1288 .is_idle = vi_common_is_idle, 1289 .wait_for_idle = vi_common_wait_for_idle, 1290 .soft_reset = vi_common_soft_reset, 1291 .print_status = vi_common_print_status, 1292 .set_clockgating_state = vi_common_set_clockgating_state, 1293 .set_powergating_state = vi_common_set_powergating_state, 1294 }; 1295 1296