1 /* 2 * Copyright 2014 Advanced Micro Devices, Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 */ 23 #include <linux/firmware.h> 24 #include <linux/slab.h> 25 #include <linux/module.h> 26 #include "drmP.h" 27 #include "amdgpu.h" 28 #include "amdgpu_atombios.h" 29 #include "amdgpu_ih.h" 30 #include "amdgpu_uvd.h" 31 #include "amdgpu_vce.h" 32 #include "amdgpu_ucode.h" 33 #include "atom.h" 34 #include "amd_pcie.h" 35 36 #include "gmc/gmc_8_1_d.h" 37 #include "gmc/gmc_8_1_sh_mask.h" 38 39 #include "oss/oss_3_0_d.h" 40 #include "oss/oss_3_0_sh_mask.h" 41 42 #include "bif/bif_5_0_d.h" 43 #include "bif/bif_5_0_sh_mask.h" 44 45 #include "gca/gfx_8_0_d.h" 46 #include "gca/gfx_8_0_sh_mask.h" 47 48 #include "smu/smu_7_1_1_d.h" 49 #include "smu/smu_7_1_1_sh_mask.h" 50 51 #include "uvd/uvd_5_0_d.h" 52 #include "uvd/uvd_5_0_sh_mask.h" 53 54 #include "vce/vce_3_0_d.h" 55 #include "vce/vce_3_0_sh_mask.h" 56 57 #include "dce/dce_10_0_d.h" 58 #include "dce/dce_10_0_sh_mask.h" 59 60 #include "vid.h" 61 #include "vi.h" 62 #include "vi_dpm.h" 63 #include "gmc_v8_0.h" 64 #include "gmc_v7_0.h" 65 #include "gfx_v8_0.h" 66 #include "sdma_v2_4.h" 67 #include "sdma_v3_0.h" 68 #include "dce_v10_0.h" 69 #include "dce_v11_0.h" 70 #include "iceland_ih.h" 71 #include "tonga_ih.h" 72 #include "cz_ih.h" 73 #include "uvd_v5_0.h" 74 #include "uvd_v6_0.h" 75 #include "vce_v3_0.h" 76 #include "amdgpu_powerplay.h" 77 #if defined(CONFIG_DRM_AMD_ACP) 78 #include "amdgpu_acp.h" 79 #endif 80 #include "dce_virtual.h" 81 82 MODULE_FIRMWARE("amdgpu/topaz_smc.bin"); 83 MODULE_FIRMWARE("amdgpu/topaz_k_smc.bin"); 84 MODULE_FIRMWARE("amdgpu/tonga_smc.bin"); 85 MODULE_FIRMWARE("amdgpu/tonga_k_smc.bin"); 86 MODULE_FIRMWARE("amdgpu/fiji_smc.bin"); 87 MODULE_FIRMWARE("amdgpu/polaris10_smc.bin"); 88 MODULE_FIRMWARE("amdgpu/polaris10_smc_sk.bin"); 89 MODULE_FIRMWARE("amdgpu/polaris11_smc.bin"); 90 MODULE_FIRMWARE("amdgpu/polaris11_smc_sk.bin"); 91 92 /* 93 * Indirect registers accessor 94 */ 95 static u32 vi_pcie_rreg(struct amdgpu_device *adev, u32 reg) 96 { 97 unsigned long flags; 98 u32 r; 99 100 spin_lock_irqsave(&adev->pcie_idx_lock, flags); 101 WREG32(mmPCIE_INDEX, reg); 102 (void)RREG32(mmPCIE_INDEX); 103 r = RREG32(mmPCIE_DATA); 104 spin_unlock_irqrestore(&adev->pcie_idx_lock, flags); 105 return r; 106 } 107 108 static void vi_pcie_wreg(struct amdgpu_device *adev, u32 reg, u32 v) 109 { 110 unsigned long flags; 111 112 spin_lock_irqsave(&adev->pcie_idx_lock, flags); 113 WREG32(mmPCIE_INDEX, reg); 114 (void)RREG32(mmPCIE_INDEX); 115 WREG32(mmPCIE_DATA, v); 116 (void)RREG32(mmPCIE_DATA); 117 spin_unlock_irqrestore(&adev->pcie_idx_lock, flags); 118 } 119 120 static u32 vi_smc_rreg(struct amdgpu_device *adev, u32 reg) 121 { 122 unsigned long flags; 123 u32 r; 124 125 spin_lock_irqsave(&adev->smc_idx_lock, flags); 126 WREG32(mmSMC_IND_INDEX_11, (reg)); 127 r = RREG32(mmSMC_IND_DATA_11); 128 spin_unlock_irqrestore(&adev->smc_idx_lock, flags); 129 return r; 130 } 131 132 static void vi_smc_wreg(struct amdgpu_device *adev, u32 reg, u32 v) 133 { 134 unsigned long flags; 135 136 spin_lock_irqsave(&adev->smc_idx_lock, flags); 137 WREG32(mmSMC_IND_INDEX_11, (reg)); 138 WREG32(mmSMC_IND_DATA_11, (v)); 139 spin_unlock_irqrestore(&adev->smc_idx_lock, flags); 140 } 141 142 /* smu_8_0_d.h */ 143 #define mmMP0PUB_IND_INDEX 0x180 144 #define mmMP0PUB_IND_DATA 0x181 145 146 static u32 cz_smc_rreg(struct amdgpu_device *adev, u32 reg) 147 { 148 unsigned long flags; 149 u32 r; 150 151 spin_lock_irqsave(&adev->smc_idx_lock, flags); 152 WREG32(mmMP0PUB_IND_INDEX, (reg)); 153 r = RREG32(mmMP0PUB_IND_DATA); 154 spin_unlock_irqrestore(&adev->smc_idx_lock, flags); 155 return r; 156 } 157 158 static void cz_smc_wreg(struct amdgpu_device *adev, u32 reg, u32 v) 159 { 160 unsigned long flags; 161 162 spin_lock_irqsave(&adev->smc_idx_lock, flags); 163 WREG32(mmMP0PUB_IND_INDEX, (reg)); 164 WREG32(mmMP0PUB_IND_DATA, (v)); 165 spin_unlock_irqrestore(&adev->smc_idx_lock, flags); 166 } 167 168 static u32 vi_uvd_ctx_rreg(struct amdgpu_device *adev, u32 reg) 169 { 170 unsigned long flags; 171 u32 r; 172 173 spin_lock_irqsave(&adev->uvd_ctx_idx_lock, flags); 174 WREG32(mmUVD_CTX_INDEX, ((reg) & 0x1ff)); 175 r = RREG32(mmUVD_CTX_DATA); 176 spin_unlock_irqrestore(&adev->uvd_ctx_idx_lock, flags); 177 return r; 178 } 179 180 static void vi_uvd_ctx_wreg(struct amdgpu_device *adev, u32 reg, u32 v) 181 { 182 unsigned long flags; 183 184 spin_lock_irqsave(&adev->uvd_ctx_idx_lock, flags); 185 WREG32(mmUVD_CTX_INDEX, ((reg) & 0x1ff)); 186 WREG32(mmUVD_CTX_DATA, (v)); 187 spin_unlock_irqrestore(&adev->uvd_ctx_idx_lock, flags); 188 } 189 190 static u32 vi_didt_rreg(struct amdgpu_device *adev, u32 reg) 191 { 192 unsigned long flags; 193 u32 r; 194 195 spin_lock_irqsave(&adev->didt_idx_lock, flags); 196 WREG32(mmDIDT_IND_INDEX, (reg)); 197 r = RREG32(mmDIDT_IND_DATA); 198 spin_unlock_irqrestore(&adev->didt_idx_lock, flags); 199 return r; 200 } 201 202 static void vi_didt_wreg(struct amdgpu_device *adev, u32 reg, u32 v) 203 { 204 unsigned long flags; 205 206 spin_lock_irqsave(&adev->didt_idx_lock, flags); 207 WREG32(mmDIDT_IND_INDEX, (reg)); 208 WREG32(mmDIDT_IND_DATA, (v)); 209 spin_unlock_irqrestore(&adev->didt_idx_lock, flags); 210 } 211 212 static u32 vi_gc_cac_rreg(struct amdgpu_device *adev, u32 reg) 213 { 214 unsigned long flags; 215 u32 r; 216 217 spin_lock_irqsave(&adev->gc_cac_idx_lock, flags); 218 WREG32(mmGC_CAC_IND_INDEX, (reg)); 219 r = RREG32(mmGC_CAC_IND_DATA); 220 spin_unlock_irqrestore(&adev->gc_cac_idx_lock, flags); 221 return r; 222 } 223 224 static void vi_gc_cac_wreg(struct amdgpu_device *adev, u32 reg, u32 v) 225 { 226 unsigned long flags; 227 228 spin_lock_irqsave(&adev->gc_cac_idx_lock, flags); 229 WREG32(mmGC_CAC_IND_INDEX, (reg)); 230 WREG32(mmGC_CAC_IND_DATA, (v)); 231 spin_unlock_irqrestore(&adev->gc_cac_idx_lock, flags); 232 } 233 234 235 static const u32 tonga_mgcg_cgcg_init[] = 236 { 237 mmCGTT_DRM_CLK_CTRL0, 0xffffffff, 0x00600100, 238 mmPCIE_INDEX, 0xffffffff, 0x0140001c, 239 mmPCIE_DATA, 0x000f0000, 0x00000000, 240 mmSMC_IND_INDEX_4, 0xffffffff, 0xC060000C, 241 mmSMC_IND_DATA_4, 0xc0000fff, 0x00000100, 242 mmCGTT_DRM_CLK_CTRL0, 0xff000fff, 0x00000100, 243 mmHDP_XDP_CGTT_BLK_CTRL, 0xc0000fff, 0x00000104, 244 }; 245 246 static const u32 fiji_mgcg_cgcg_init[] = 247 { 248 mmCGTT_DRM_CLK_CTRL0, 0xffffffff, 0x00600100, 249 mmPCIE_INDEX, 0xffffffff, 0x0140001c, 250 mmPCIE_DATA, 0x000f0000, 0x00000000, 251 mmSMC_IND_INDEX_4, 0xffffffff, 0xC060000C, 252 mmSMC_IND_DATA_4, 0xc0000fff, 0x00000100, 253 mmCGTT_DRM_CLK_CTRL0, 0xff000fff, 0x00000100, 254 mmHDP_XDP_CGTT_BLK_CTRL, 0xc0000fff, 0x00000104, 255 }; 256 257 static const u32 iceland_mgcg_cgcg_init[] = 258 { 259 mmPCIE_INDEX, 0xffffffff, ixPCIE_CNTL2, 260 mmPCIE_DATA, 0x000f0000, 0x00000000, 261 mmSMC_IND_INDEX_4, 0xffffffff, ixCGTT_ROM_CLK_CTRL0, 262 mmSMC_IND_DATA_4, 0xc0000fff, 0x00000100, 263 mmHDP_XDP_CGTT_BLK_CTRL, 0xc0000fff, 0x00000104, 264 }; 265 266 static const u32 cz_mgcg_cgcg_init[] = 267 { 268 mmCGTT_DRM_CLK_CTRL0, 0xffffffff, 0x00600100, 269 mmPCIE_INDEX, 0xffffffff, 0x0140001c, 270 mmPCIE_DATA, 0x000f0000, 0x00000000, 271 mmCGTT_DRM_CLK_CTRL0, 0xff000fff, 0x00000100, 272 mmHDP_XDP_CGTT_BLK_CTRL, 0xc0000fff, 0x00000104, 273 }; 274 275 static const u32 stoney_mgcg_cgcg_init[] = 276 { 277 mmCGTT_DRM_CLK_CTRL0, 0xffffffff, 0x00000100, 278 mmHDP_XDP_CGTT_BLK_CTRL, 0xffffffff, 0x00000104, 279 mmHDP_HOST_PATH_CNTL, 0xffffffff, 0x0f000027, 280 }; 281 282 static void vi_init_golden_registers(struct amdgpu_device *adev) 283 { 284 /* Some of the registers might be dependent on GRBM_GFX_INDEX */ 285 mutex_lock(&adev->grbm_idx_mutex); 286 287 switch (adev->asic_type) { 288 case CHIP_TOPAZ: 289 amdgpu_program_register_sequence(adev, 290 iceland_mgcg_cgcg_init, 291 (const u32)ARRAY_SIZE(iceland_mgcg_cgcg_init)); 292 break; 293 case CHIP_FIJI: 294 amdgpu_program_register_sequence(adev, 295 fiji_mgcg_cgcg_init, 296 (const u32)ARRAY_SIZE(fiji_mgcg_cgcg_init)); 297 break; 298 case CHIP_TONGA: 299 amdgpu_program_register_sequence(adev, 300 tonga_mgcg_cgcg_init, 301 (const u32)ARRAY_SIZE(tonga_mgcg_cgcg_init)); 302 break; 303 case CHIP_CARRIZO: 304 amdgpu_program_register_sequence(adev, 305 cz_mgcg_cgcg_init, 306 (const u32)ARRAY_SIZE(cz_mgcg_cgcg_init)); 307 break; 308 case CHIP_STONEY: 309 amdgpu_program_register_sequence(adev, 310 stoney_mgcg_cgcg_init, 311 (const u32)ARRAY_SIZE(stoney_mgcg_cgcg_init)); 312 break; 313 case CHIP_POLARIS11: 314 case CHIP_POLARIS10: 315 default: 316 break; 317 } 318 mutex_unlock(&adev->grbm_idx_mutex); 319 } 320 321 /** 322 * vi_get_xclk - get the xclk 323 * 324 * @adev: amdgpu_device pointer 325 * 326 * Returns the reference clock used by the gfx engine 327 * (VI). 328 */ 329 static u32 vi_get_xclk(struct amdgpu_device *adev) 330 { 331 u32 reference_clock = adev->clock.spll.reference_freq; 332 u32 tmp; 333 334 if (adev->flags & AMD_IS_APU) 335 return reference_clock; 336 337 tmp = RREG32_SMC(ixCG_CLKPIN_CNTL_2); 338 if (REG_GET_FIELD(tmp, CG_CLKPIN_CNTL_2, MUX_TCLK_TO_XCLK)) 339 return 1000; 340 341 tmp = RREG32_SMC(ixCG_CLKPIN_CNTL); 342 if (REG_GET_FIELD(tmp, CG_CLKPIN_CNTL, XTALIN_DIVIDE)) 343 return reference_clock / 4; 344 345 return reference_clock; 346 } 347 348 /** 349 * vi_srbm_select - select specific register instances 350 * 351 * @adev: amdgpu_device pointer 352 * @me: selected ME (micro engine) 353 * @pipe: pipe 354 * @queue: queue 355 * @vmid: VMID 356 * 357 * Switches the currently active registers instances. Some 358 * registers are instanced per VMID, others are instanced per 359 * me/pipe/queue combination. 360 */ 361 void vi_srbm_select(struct amdgpu_device *adev, 362 u32 me, u32 pipe, u32 queue, u32 vmid) 363 { 364 u32 srbm_gfx_cntl = 0; 365 srbm_gfx_cntl = REG_SET_FIELD(srbm_gfx_cntl, SRBM_GFX_CNTL, PIPEID, pipe); 366 srbm_gfx_cntl = REG_SET_FIELD(srbm_gfx_cntl, SRBM_GFX_CNTL, MEID, me); 367 srbm_gfx_cntl = REG_SET_FIELD(srbm_gfx_cntl, SRBM_GFX_CNTL, VMID, vmid); 368 srbm_gfx_cntl = REG_SET_FIELD(srbm_gfx_cntl, SRBM_GFX_CNTL, QUEUEID, queue); 369 WREG32(mmSRBM_GFX_CNTL, srbm_gfx_cntl); 370 } 371 372 static void vi_vga_set_state(struct amdgpu_device *adev, bool state) 373 { 374 /* todo */ 375 } 376 377 static bool vi_read_disabled_bios(struct amdgpu_device *adev) 378 { 379 u32 bus_cntl; 380 u32 d1vga_control = 0; 381 u32 d2vga_control = 0; 382 u32 vga_render_control = 0; 383 u32 rom_cntl; 384 bool r; 385 386 bus_cntl = RREG32(mmBUS_CNTL); 387 if (adev->mode_info.num_crtc) { 388 d1vga_control = RREG32(mmD1VGA_CONTROL); 389 d2vga_control = RREG32(mmD2VGA_CONTROL); 390 vga_render_control = RREG32(mmVGA_RENDER_CONTROL); 391 } 392 rom_cntl = RREG32_SMC(ixROM_CNTL); 393 394 /* enable the rom */ 395 WREG32(mmBUS_CNTL, (bus_cntl & ~BUS_CNTL__BIOS_ROM_DIS_MASK)); 396 if (adev->mode_info.num_crtc) { 397 /* Disable VGA mode */ 398 WREG32(mmD1VGA_CONTROL, 399 (d1vga_control & ~(D1VGA_CONTROL__D1VGA_MODE_ENABLE_MASK | 400 D1VGA_CONTROL__D1VGA_TIMING_SELECT_MASK))); 401 WREG32(mmD2VGA_CONTROL, 402 (d2vga_control & ~(D2VGA_CONTROL__D2VGA_MODE_ENABLE_MASK | 403 D2VGA_CONTROL__D2VGA_TIMING_SELECT_MASK))); 404 WREG32(mmVGA_RENDER_CONTROL, 405 (vga_render_control & ~VGA_RENDER_CONTROL__VGA_VSTATUS_CNTL_MASK)); 406 } 407 WREG32_SMC(ixROM_CNTL, rom_cntl | ROM_CNTL__SCK_OVERWRITE_MASK); 408 409 r = amdgpu_read_bios(adev); 410 411 /* restore regs */ 412 WREG32(mmBUS_CNTL, bus_cntl); 413 if (adev->mode_info.num_crtc) { 414 WREG32(mmD1VGA_CONTROL, d1vga_control); 415 WREG32(mmD2VGA_CONTROL, d2vga_control); 416 WREG32(mmVGA_RENDER_CONTROL, vga_render_control); 417 } 418 WREG32_SMC(ixROM_CNTL, rom_cntl); 419 return r; 420 } 421 422 static bool vi_read_bios_from_rom(struct amdgpu_device *adev, 423 u8 *bios, u32 length_bytes) 424 { 425 u32 *dw_ptr; 426 unsigned long flags; 427 u32 i, length_dw; 428 429 if (bios == NULL) 430 return false; 431 if (length_bytes == 0) 432 return false; 433 /* APU vbios image is part of sbios image */ 434 if (adev->flags & AMD_IS_APU) 435 return false; 436 437 dw_ptr = (u32 *)bios; 438 length_dw = ALIGN(length_bytes, 4) / 4; 439 /* take the smc lock since we are using the smc index */ 440 spin_lock_irqsave(&adev->smc_idx_lock, flags); 441 /* set rom index to 0 */ 442 WREG32(mmSMC_IND_INDEX_11, ixROM_INDEX); 443 WREG32(mmSMC_IND_DATA_11, 0); 444 /* set index to data for continous read */ 445 WREG32(mmSMC_IND_INDEX_11, ixROM_DATA); 446 for (i = 0; i < length_dw; i++) 447 dw_ptr[i] = RREG32(mmSMC_IND_DATA_11); 448 spin_unlock_irqrestore(&adev->smc_idx_lock, flags); 449 450 return true; 451 } 452 453 static void vi_detect_hw_virtualization(struct amdgpu_device *adev) 454 { 455 uint32_t reg = RREG32(mmBIF_IOV_FUNC_IDENTIFIER); 456 /* bit0: 0 means pf and 1 means vf */ 457 /* bit31: 0 means disable IOV and 1 means enable */ 458 if (reg & 1) 459 adev->virtualization.virtual_caps |= AMDGPU_SRIOV_CAPS_IS_VF; 460 461 if (reg & 0x80000000) 462 adev->virtualization.virtual_caps |= AMDGPU_SRIOV_CAPS_ENABLE_IOV; 463 464 if (reg == 0) { 465 if (is_virtual_machine()) /* passthrough mode exclus sr-iov mode */ 466 adev->virtualization.virtual_caps |= AMDGPU_PASSTHROUGH_MODE; 467 } 468 } 469 470 static const struct amdgpu_allowed_register_entry tonga_allowed_read_registers[] = { 471 {mmGB_MACROTILE_MODE7, true}, 472 }; 473 474 static const struct amdgpu_allowed_register_entry cz_allowed_read_registers[] = { 475 {mmGB_TILE_MODE7, true}, 476 {mmGB_TILE_MODE12, true}, 477 {mmGB_TILE_MODE17, true}, 478 {mmGB_TILE_MODE23, true}, 479 {mmGB_MACROTILE_MODE7, true}, 480 }; 481 482 static const struct amdgpu_allowed_register_entry vi_allowed_read_registers[] = { 483 {mmGRBM_STATUS, false}, 484 {mmGRBM_STATUS2, false}, 485 {mmGRBM_STATUS_SE0, false}, 486 {mmGRBM_STATUS_SE1, false}, 487 {mmGRBM_STATUS_SE2, false}, 488 {mmGRBM_STATUS_SE3, false}, 489 {mmSRBM_STATUS, false}, 490 {mmSRBM_STATUS2, false}, 491 {mmSRBM_STATUS3, false}, 492 {mmSDMA0_STATUS_REG + SDMA0_REGISTER_OFFSET, false}, 493 {mmSDMA0_STATUS_REG + SDMA1_REGISTER_OFFSET, false}, 494 {mmCP_STAT, false}, 495 {mmCP_STALLED_STAT1, false}, 496 {mmCP_STALLED_STAT2, false}, 497 {mmCP_STALLED_STAT3, false}, 498 {mmCP_CPF_BUSY_STAT, false}, 499 {mmCP_CPF_STALLED_STAT1, false}, 500 {mmCP_CPF_STATUS, false}, 501 {mmCP_CPC_BUSY_STAT, false}, 502 {mmCP_CPC_STALLED_STAT1, false}, 503 {mmCP_CPC_STATUS, false}, 504 {mmGB_ADDR_CONFIG, false}, 505 {mmMC_ARB_RAMCFG, false}, 506 {mmGB_TILE_MODE0, false}, 507 {mmGB_TILE_MODE1, false}, 508 {mmGB_TILE_MODE2, false}, 509 {mmGB_TILE_MODE3, false}, 510 {mmGB_TILE_MODE4, false}, 511 {mmGB_TILE_MODE5, false}, 512 {mmGB_TILE_MODE6, false}, 513 {mmGB_TILE_MODE7, false}, 514 {mmGB_TILE_MODE8, false}, 515 {mmGB_TILE_MODE9, false}, 516 {mmGB_TILE_MODE10, false}, 517 {mmGB_TILE_MODE11, false}, 518 {mmGB_TILE_MODE12, false}, 519 {mmGB_TILE_MODE13, false}, 520 {mmGB_TILE_MODE14, false}, 521 {mmGB_TILE_MODE15, false}, 522 {mmGB_TILE_MODE16, false}, 523 {mmGB_TILE_MODE17, false}, 524 {mmGB_TILE_MODE18, false}, 525 {mmGB_TILE_MODE19, false}, 526 {mmGB_TILE_MODE20, false}, 527 {mmGB_TILE_MODE21, false}, 528 {mmGB_TILE_MODE22, false}, 529 {mmGB_TILE_MODE23, false}, 530 {mmGB_TILE_MODE24, false}, 531 {mmGB_TILE_MODE25, false}, 532 {mmGB_TILE_MODE26, false}, 533 {mmGB_TILE_MODE27, false}, 534 {mmGB_TILE_MODE28, false}, 535 {mmGB_TILE_MODE29, false}, 536 {mmGB_TILE_MODE30, false}, 537 {mmGB_TILE_MODE31, false}, 538 {mmGB_MACROTILE_MODE0, false}, 539 {mmGB_MACROTILE_MODE1, false}, 540 {mmGB_MACROTILE_MODE2, false}, 541 {mmGB_MACROTILE_MODE3, false}, 542 {mmGB_MACROTILE_MODE4, false}, 543 {mmGB_MACROTILE_MODE5, false}, 544 {mmGB_MACROTILE_MODE6, false}, 545 {mmGB_MACROTILE_MODE7, false}, 546 {mmGB_MACROTILE_MODE8, false}, 547 {mmGB_MACROTILE_MODE9, false}, 548 {mmGB_MACROTILE_MODE10, false}, 549 {mmGB_MACROTILE_MODE11, false}, 550 {mmGB_MACROTILE_MODE12, false}, 551 {mmGB_MACROTILE_MODE13, false}, 552 {mmGB_MACROTILE_MODE14, false}, 553 {mmGB_MACROTILE_MODE15, false}, 554 {mmCC_RB_BACKEND_DISABLE, false, true}, 555 {mmGC_USER_RB_BACKEND_DISABLE, false, true}, 556 {mmGB_BACKEND_MAP, false, false}, 557 {mmPA_SC_RASTER_CONFIG, false, true}, 558 {mmPA_SC_RASTER_CONFIG_1, false, true}, 559 }; 560 561 static uint32_t vi_get_register_value(struct amdgpu_device *adev, 562 bool indexed, u32 se_num, 563 u32 sh_num, u32 reg_offset) 564 { 565 if (indexed) { 566 uint32_t val; 567 unsigned se_idx = (se_num == 0xffffffff) ? 0 : se_num; 568 unsigned sh_idx = (sh_num == 0xffffffff) ? 0 : sh_num; 569 570 switch (reg_offset) { 571 case mmCC_RB_BACKEND_DISABLE: 572 return adev->gfx.config.rb_config[se_idx][sh_idx].rb_backend_disable; 573 case mmGC_USER_RB_BACKEND_DISABLE: 574 return adev->gfx.config.rb_config[se_idx][sh_idx].user_rb_backend_disable; 575 case mmPA_SC_RASTER_CONFIG: 576 return adev->gfx.config.rb_config[se_idx][sh_idx].raster_config; 577 case mmPA_SC_RASTER_CONFIG_1: 578 return adev->gfx.config.rb_config[se_idx][sh_idx].raster_config_1; 579 } 580 581 mutex_lock(&adev->grbm_idx_mutex); 582 if (se_num != 0xffffffff || sh_num != 0xffffffff) 583 amdgpu_gfx_select_se_sh(adev, se_num, sh_num, 0xffffffff); 584 585 val = RREG32(reg_offset); 586 587 if (se_num != 0xffffffff || sh_num != 0xffffffff) 588 amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff); 589 mutex_unlock(&adev->grbm_idx_mutex); 590 return val; 591 } else { 592 unsigned idx; 593 594 switch (reg_offset) { 595 case mmGB_ADDR_CONFIG: 596 return adev->gfx.config.gb_addr_config; 597 case mmMC_ARB_RAMCFG: 598 return adev->gfx.config.mc_arb_ramcfg; 599 case mmGB_TILE_MODE0: 600 case mmGB_TILE_MODE1: 601 case mmGB_TILE_MODE2: 602 case mmGB_TILE_MODE3: 603 case mmGB_TILE_MODE4: 604 case mmGB_TILE_MODE5: 605 case mmGB_TILE_MODE6: 606 case mmGB_TILE_MODE7: 607 case mmGB_TILE_MODE8: 608 case mmGB_TILE_MODE9: 609 case mmGB_TILE_MODE10: 610 case mmGB_TILE_MODE11: 611 case mmGB_TILE_MODE12: 612 case mmGB_TILE_MODE13: 613 case mmGB_TILE_MODE14: 614 case mmGB_TILE_MODE15: 615 case mmGB_TILE_MODE16: 616 case mmGB_TILE_MODE17: 617 case mmGB_TILE_MODE18: 618 case mmGB_TILE_MODE19: 619 case mmGB_TILE_MODE20: 620 case mmGB_TILE_MODE21: 621 case mmGB_TILE_MODE22: 622 case mmGB_TILE_MODE23: 623 case mmGB_TILE_MODE24: 624 case mmGB_TILE_MODE25: 625 case mmGB_TILE_MODE26: 626 case mmGB_TILE_MODE27: 627 case mmGB_TILE_MODE28: 628 case mmGB_TILE_MODE29: 629 case mmGB_TILE_MODE30: 630 case mmGB_TILE_MODE31: 631 idx = (reg_offset - mmGB_TILE_MODE0); 632 return adev->gfx.config.tile_mode_array[idx]; 633 case mmGB_MACROTILE_MODE0: 634 case mmGB_MACROTILE_MODE1: 635 case mmGB_MACROTILE_MODE2: 636 case mmGB_MACROTILE_MODE3: 637 case mmGB_MACROTILE_MODE4: 638 case mmGB_MACROTILE_MODE5: 639 case mmGB_MACROTILE_MODE6: 640 case mmGB_MACROTILE_MODE7: 641 case mmGB_MACROTILE_MODE8: 642 case mmGB_MACROTILE_MODE9: 643 case mmGB_MACROTILE_MODE10: 644 case mmGB_MACROTILE_MODE11: 645 case mmGB_MACROTILE_MODE12: 646 case mmGB_MACROTILE_MODE13: 647 case mmGB_MACROTILE_MODE14: 648 case mmGB_MACROTILE_MODE15: 649 idx = (reg_offset - mmGB_MACROTILE_MODE0); 650 return adev->gfx.config.macrotile_mode_array[idx]; 651 default: 652 return RREG32(reg_offset); 653 } 654 } 655 } 656 657 static int vi_read_register(struct amdgpu_device *adev, u32 se_num, 658 u32 sh_num, u32 reg_offset, u32 *value) 659 { 660 const struct amdgpu_allowed_register_entry *asic_register_table = NULL; 661 const struct amdgpu_allowed_register_entry *asic_register_entry; 662 uint32_t size, i; 663 664 *value = 0; 665 switch (adev->asic_type) { 666 case CHIP_TOPAZ: 667 asic_register_table = tonga_allowed_read_registers; 668 size = ARRAY_SIZE(tonga_allowed_read_registers); 669 break; 670 case CHIP_FIJI: 671 case CHIP_TONGA: 672 case CHIP_POLARIS11: 673 case CHIP_POLARIS10: 674 case CHIP_CARRIZO: 675 case CHIP_STONEY: 676 asic_register_table = cz_allowed_read_registers; 677 size = ARRAY_SIZE(cz_allowed_read_registers); 678 break; 679 default: 680 return -EINVAL; 681 } 682 683 if (asic_register_table) { 684 for (i = 0; i < size; i++) { 685 asic_register_entry = asic_register_table + i; 686 if (reg_offset != asic_register_entry->reg_offset) 687 continue; 688 if (!asic_register_entry->untouched) 689 *value = vi_get_register_value(adev, 690 asic_register_entry->grbm_indexed, 691 se_num, sh_num, reg_offset); 692 return 0; 693 } 694 } 695 696 for (i = 0; i < ARRAY_SIZE(vi_allowed_read_registers); i++) { 697 if (reg_offset != vi_allowed_read_registers[i].reg_offset) 698 continue; 699 700 if (!vi_allowed_read_registers[i].untouched) 701 *value = vi_get_register_value(adev, 702 vi_allowed_read_registers[i].grbm_indexed, 703 se_num, sh_num, reg_offset); 704 return 0; 705 } 706 return -EINVAL; 707 } 708 709 static int vi_gpu_pci_config_reset(struct amdgpu_device *adev) 710 { 711 u32 i; 712 713 dev_info(adev->dev, "GPU pci config reset\n"); 714 715 /* disable BM */ 716 pci_clear_master(adev->pdev); 717 /* reset */ 718 amdgpu_pci_config_reset(adev); 719 720 udelay(100); 721 722 /* wait for asic to come out of reset */ 723 for (i = 0; i < adev->usec_timeout; i++) { 724 if (RREG32(mmCONFIG_MEMSIZE) != 0xffffffff) { 725 /* enable BM */ 726 pci_set_master(adev->pdev); 727 return 0; 728 } 729 udelay(1); 730 } 731 return -EINVAL; 732 } 733 734 /** 735 * vi_asic_reset - soft reset GPU 736 * 737 * @adev: amdgpu_device pointer 738 * 739 * Look up which blocks are hung and attempt 740 * to reset them. 741 * Returns 0 for success. 742 */ 743 static int vi_asic_reset(struct amdgpu_device *adev) 744 { 745 int r; 746 747 amdgpu_atombios_scratch_regs_engine_hung(adev, true); 748 749 r = vi_gpu_pci_config_reset(adev); 750 751 amdgpu_atombios_scratch_regs_engine_hung(adev, false); 752 753 return r; 754 } 755 756 static int vi_set_uvd_clock(struct amdgpu_device *adev, u32 clock, 757 u32 cntl_reg, u32 status_reg) 758 { 759 int r, i; 760 struct atom_clock_dividers dividers; 761 uint32_t tmp; 762 763 r = amdgpu_atombios_get_clock_dividers(adev, 764 COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK, 765 clock, false, ÷rs); 766 if (r) 767 return r; 768 769 tmp = RREG32_SMC(cntl_reg); 770 tmp &= ~(CG_DCLK_CNTL__DCLK_DIR_CNTL_EN_MASK | 771 CG_DCLK_CNTL__DCLK_DIVIDER_MASK); 772 tmp |= dividers.post_divider; 773 WREG32_SMC(cntl_reg, tmp); 774 775 for (i = 0; i < 100; i++) { 776 if (RREG32_SMC(status_reg) & CG_DCLK_STATUS__DCLK_STATUS_MASK) 777 break; 778 mdelay(10); 779 } 780 if (i == 100) 781 return -ETIMEDOUT; 782 783 return 0; 784 } 785 786 static int vi_set_uvd_clocks(struct amdgpu_device *adev, u32 vclk, u32 dclk) 787 { 788 int r; 789 790 r = vi_set_uvd_clock(adev, vclk, ixCG_VCLK_CNTL, ixCG_VCLK_STATUS); 791 if (r) 792 return r; 793 794 r = vi_set_uvd_clock(adev, dclk, ixCG_DCLK_CNTL, ixCG_DCLK_STATUS); 795 796 return 0; 797 } 798 799 static int vi_set_vce_clocks(struct amdgpu_device *adev, u32 evclk, u32 ecclk) 800 { 801 /* todo */ 802 803 return 0; 804 } 805 806 static void vi_pcie_gen3_enable(struct amdgpu_device *adev) 807 { 808 if (pci_is_root_bus(adev->pdev->bus)) 809 return; 810 811 if (amdgpu_pcie_gen2 == 0) 812 return; 813 814 if (adev->flags & AMD_IS_APU) 815 return; 816 817 if (!(adev->pm.pcie_gen_mask & (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2 | 818 CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3))) 819 return; 820 821 /* todo */ 822 } 823 824 static void vi_program_aspm(struct amdgpu_device *adev) 825 { 826 827 if (amdgpu_aspm == 0) 828 return; 829 830 /* todo */ 831 } 832 833 static void vi_enable_doorbell_aperture(struct amdgpu_device *adev, 834 bool enable) 835 { 836 u32 tmp; 837 838 /* not necessary on CZ */ 839 if (adev->flags & AMD_IS_APU) 840 return; 841 842 tmp = RREG32(mmBIF_DOORBELL_APER_EN); 843 if (enable) 844 tmp = REG_SET_FIELD(tmp, BIF_DOORBELL_APER_EN, BIF_DOORBELL_APER_EN, 1); 845 else 846 tmp = REG_SET_FIELD(tmp, BIF_DOORBELL_APER_EN, BIF_DOORBELL_APER_EN, 0); 847 848 WREG32(mmBIF_DOORBELL_APER_EN, tmp); 849 } 850 851 #define ATI_REV_ID_FUSE_MACRO__ADDRESS 0xC0014044 852 #define ATI_REV_ID_FUSE_MACRO__SHIFT 9 853 #define ATI_REV_ID_FUSE_MACRO__MASK 0x00001E00 854 855 static uint32_t vi_get_rev_id(struct amdgpu_device *adev) 856 { 857 if (adev->flags & AMD_IS_APU) 858 return (RREG32_SMC(ATI_REV_ID_FUSE_MACRO__ADDRESS) & ATI_REV_ID_FUSE_MACRO__MASK) 859 >> ATI_REV_ID_FUSE_MACRO__SHIFT; 860 else 861 return (RREG32(mmPCIE_EFUSE4) & PCIE_EFUSE4__STRAP_BIF_ATI_REV_ID_MASK) 862 >> PCIE_EFUSE4__STRAP_BIF_ATI_REV_ID__SHIFT; 863 } 864 865 static const struct amdgpu_asic_funcs vi_asic_funcs = 866 { 867 .read_disabled_bios = &vi_read_disabled_bios, 868 .read_bios_from_rom = &vi_read_bios_from_rom, 869 .detect_hw_virtualization = vi_detect_hw_virtualization, 870 .read_register = &vi_read_register, 871 .reset = &vi_asic_reset, 872 .set_vga_state = &vi_vga_set_state, 873 .get_xclk = &vi_get_xclk, 874 .set_uvd_clocks = &vi_set_uvd_clocks, 875 .set_vce_clocks = &vi_set_vce_clocks, 876 }; 877 878 static int vi_common_early_init(void *handle) 879 { 880 bool smc_enabled = false; 881 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 882 883 if (adev->flags & AMD_IS_APU) { 884 adev->smc_rreg = &cz_smc_rreg; 885 adev->smc_wreg = &cz_smc_wreg; 886 } else { 887 adev->smc_rreg = &vi_smc_rreg; 888 adev->smc_wreg = &vi_smc_wreg; 889 } 890 adev->pcie_rreg = &vi_pcie_rreg; 891 adev->pcie_wreg = &vi_pcie_wreg; 892 adev->uvd_ctx_rreg = &vi_uvd_ctx_rreg; 893 adev->uvd_ctx_wreg = &vi_uvd_ctx_wreg; 894 adev->didt_rreg = &vi_didt_rreg; 895 adev->didt_wreg = &vi_didt_wreg; 896 adev->gc_cac_rreg = &vi_gc_cac_rreg; 897 adev->gc_cac_wreg = &vi_gc_cac_wreg; 898 899 adev->asic_funcs = &vi_asic_funcs; 900 901 if (amdgpu_get_ip_block(adev, AMD_IP_BLOCK_TYPE_SMC) && 902 (amdgpu_ip_block_mask & (1 << AMD_IP_BLOCK_TYPE_SMC))) 903 smc_enabled = true; 904 905 adev->rev_id = vi_get_rev_id(adev); 906 adev->external_rev_id = 0xFF; 907 switch (adev->asic_type) { 908 case CHIP_TOPAZ: 909 adev->cg_flags = 0; 910 adev->pg_flags = 0; 911 adev->external_rev_id = 0x1; 912 break; 913 case CHIP_FIJI: 914 adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG | 915 AMD_CG_SUPPORT_GFX_MGLS | 916 AMD_CG_SUPPORT_GFX_RLC_LS | 917 AMD_CG_SUPPORT_GFX_CP_LS | 918 AMD_CG_SUPPORT_GFX_CGTS | 919 AMD_CG_SUPPORT_GFX_CGTS_LS | 920 AMD_CG_SUPPORT_GFX_CGCG | 921 AMD_CG_SUPPORT_GFX_CGLS | 922 AMD_CG_SUPPORT_SDMA_MGCG | 923 AMD_CG_SUPPORT_SDMA_LS | 924 AMD_CG_SUPPORT_BIF_LS | 925 AMD_CG_SUPPORT_HDP_MGCG | 926 AMD_CG_SUPPORT_HDP_LS | 927 AMD_CG_SUPPORT_ROM_MGCG | 928 AMD_CG_SUPPORT_MC_MGCG | 929 AMD_CG_SUPPORT_MC_LS | 930 AMD_CG_SUPPORT_UVD_MGCG; 931 adev->pg_flags = 0; 932 adev->external_rev_id = adev->rev_id + 0x3c; 933 break; 934 case CHIP_TONGA: 935 adev->cg_flags = AMD_CG_SUPPORT_UVD_MGCG; 936 adev->pg_flags = AMD_PG_SUPPORT_UVD; 937 adev->external_rev_id = adev->rev_id + 0x14; 938 break; 939 case CHIP_POLARIS11: 940 adev->cg_flags = AMD_CG_SUPPORT_UVD_MGCG | 941 AMD_CG_SUPPORT_VCE_MGCG; 942 adev->pg_flags = 0; 943 adev->external_rev_id = adev->rev_id + 0x5A; 944 break; 945 case CHIP_POLARIS10: 946 adev->cg_flags = AMD_CG_SUPPORT_UVD_MGCG | 947 AMD_CG_SUPPORT_VCE_MGCG; 948 adev->pg_flags = 0; 949 adev->external_rev_id = adev->rev_id + 0x50; 950 break; 951 case CHIP_CARRIZO: 952 adev->cg_flags = AMD_CG_SUPPORT_UVD_MGCG | 953 AMD_CG_SUPPORT_GFX_MGCG | 954 AMD_CG_SUPPORT_GFX_MGLS | 955 AMD_CG_SUPPORT_GFX_RLC_LS | 956 AMD_CG_SUPPORT_GFX_CP_LS | 957 AMD_CG_SUPPORT_GFX_CGTS | 958 AMD_CG_SUPPORT_GFX_MGLS | 959 AMD_CG_SUPPORT_GFX_CGTS_LS | 960 AMD_CG_SUPPORT_GFX_CGCG | 961 AMD_CG_SUPPORT_GFX_CGLS | 962 AMD_CG_SUPPORT_BIF_LS | 963 AMD_CG_SUPPORT_HDP_MGCG | 964 AMD_CG_SUPPORT_HDP_LS | 965 AMD_CG_SUPPORT_SDMA_MGCG | 966 AMD_CG_SUPPORT_SDMA_LS | 967 AMD_CG_SUPPORT_VCE_MGCG; 968 /* rev0 hardware requires workarounds to support PG */ 969 adev->pg_flags = 0; 970 if (adev->rev_id != 0x00) { 971 adev->pg_flags |= AMD_PG_SUPPORT_GFX_PG | 972 AMD_PG_SUPPORT_GFX_SMG | 973 AMD_PG_SUPPORT_GFX_PIPELINE | 974 AMD_PG_SUPPORT_UVD | 975 AMD_PG_SUPPORT_VCE; 976 } 977 adev->external_rev_id = adev->rev_id + 0x1; 978 break; 979 case CHIP_STONEY: 980 adev->cg_flags = AMD_CG_SUPPORT_UVD_MGCG | 981 AMD_CG_SUPPORT_GFX_MGCG | 982 AMD_CG_SUPPORT_GFX_MGLS | 983 AMD_CG_SUPPORT_GFX_RLC_LS | 984 AMD_CG_SUPPORT_GFX_CP_LS | 985 AMD_CG_SUPPORT_GFX_CGTS | 986 AMD_CG_SUPPORT_GFX_MGLS | 987 AMD_CG_SUPPORT_GFX_CGTS_LS | 988 AMD_CG_SUPPORT_GFX_CGCG | 989 AMD_CG_SUPPORT_GFX_CGLS | 990 AMD_CG_SUPPORT_BIF_LS | 991 AMD_CG_SUPPORT_HDP_MGCG | 992 AMD_CG_SUPPORT_HDP_LS | 993 AMD_CG_SUPPORT_SDMA_MGCG | 994 AMD_CG_SUPPORT_SDMA_LS | 995 AMD_CG_SUPPORT_VCE_MGCG; 996 adev->pg_flags = AMD_PG_SUPPORT_GFX_PG | 997 AMD_PG_SUPPORT_GFX_SMG | 998 AMD_PG_SUPPORT_GFX_PIPELINE | 999 AMD_PG_SUPPORT_UVD | 1000 AMD_PG_SUPPORT_VCE; 1001 adev->external_rev_id = adev->rev_id + 0x61; 1002 break; 1003 default: 1004 /* FIXME: not supported yet */ 1005 return -EINVAL; 1006 } 1007 1008 /* in early init stage, vbios code won't work */ 1009 if (adev->asic_funcs->detect_hw_virtualization) 1010 amdgpu_asic_detect_hw_virtualization(adev); 1011 1012 if (amdgpu_smc_load_fw && smc_enabled) 1013 adev->firmware.smu_load = true; 1014 1015 amdgpu_get_pcie_info(adev); 1016 1017 return 0; 1018 } 1019 1020 static int vi_common_sw_init(void *handle) 1021 { 1022 return 0; 1023 } 1024 1025 static int vi_common_sw_fini(void *handle) 1026 { 1027 return 0; 1028 } 1029 1030 static int vi_common_hw_init(void *handle) 1031 { 1032 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1033 1034 /* move the golden regs per IP block */ 1035 vi_init_golden_registers(adev); 1036 /* enable pcie gen2/3 link */ 1037 vi_pcie_gen3_enable(adev); 1038 /* enable aspm */ 1039 vi_program_aspm(adev); 1040 /* enable the doorbell aperture */ 1041 vi_enable_doorbell_aperture(adev, true); 1042 1043 return 0; 1044 } 1045 1046 static int vi_common_hw_fini(void *handle) 1047 { 1048 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1049 1050 /* enable the doorbell aperture */ 1051 vi_enable_doorbell_aperture(adev, false); 1052 1053 return 0; 1054 } 1055 1056 static int vi_common_suspend(void *handle) 1057 { 1058 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1059 1060 return vi_common_hw_fini(adev); 1061 } 1062 1063 static int vi_common_resume(void *handle) 1064 { 1065 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1066 1067 return vi_common_hw_init(adev); 1068 } 1069 1070 static bool vi_common_is_idle(void *handle) 1071 { 1072 return true; 1073 } 1074 1075 static int vi_common_wait_for_idle(void *handle) 1076 { 1077 return 0; 1078 } 1079 1080 static int vi_common_soft_reset(void *handle) 1081 { 1082 return 0; 1083 } 1084 1085 static void vi_update_bif_medium_grain_light_sleep(struct amdgpu_device *adev, 1086 bool enable) 1087 { 1088 uint32_t temp, data; 1089 1090 temp = data = RREG32_PCIE(ixPCIE_CNTL2); 1091 1092 if (enable && (adev->cg_flags & AMD_CG_SUPPORT_BIF_LS)) 1093 data |= PCIE_CNTL2__SLV_MEM_LS_EN_MASK | 1094 PCIE_CNTL2__MST_MEM_LS_EN_MASK | 1095 PCIE_CNTL2__REPLAY_MEM_LS_EN_MASK; 1096 else 1097 data &= ~(PCIE_CNTL2__SLV_MEM_LS_EN_MASK | 1098 PCIE_CNTL2__MST_MEM_LS_EN_MASK | 1099 PCIE_CNTL2__REPLAY_MEM_LS_EN_MASK); 1100 1101 if (temp != data) 1102 WREG32_PCIE(ixPCIE_CNTL2, data); 1103 } 1104 1105 static void vi_update_hdp_medium_grain_clock_gating(struct amdgpu_device *adev, 1106 bool enable) 1107 { 1108 uint32_t temp, data; 1109 1110 temp = data = RREG32(mmHDP_HOST_PATH_CNTL); 1111 1112 if (enable && (adev->cg_flags & AMD_CG_SUPPORT_HDP_MGCG)) 1113 data &= ~HDP_HOST_PATH_CNTL__CLOCK_GATING_DIS_MASK; 1114 else 1115 data |= HDP_HOST_PATH_CNTL__CLOCK_GATING_DIS_MASK; 1116 1117 if (temp != data) 1118 WREG32(mmHDP_HOST_PATH_CNTL, data); 1119 } 1120 1121 static void vi_update_hdp_light_sleep(struct amdgpu_device *adev, 1122 bool enable) 1123 { 1124 uint32_t temp, data; 1125 1126 temp = data = RREG32(mmHDP_MEM_POWER_LS); 1127 1128 if (enable && (adev->cg_flags & AMD_CG_SUPPORT_HDP_LS)) 1129 data |= HDP_MEM_POWER_LS__LS_ENABLE_MASK; 1130 else 1131 data &= ~HDP_MEM_POWER_LS__LS_ENABLE_MASK; 1132 1133 if (temp != data) 1134 WREG32(mmHDP_MEM_POWER_LS, data); 1135 } 1136 1137 static void vi_update_rom_medium_grain_clock_gating(struct amdgpu_device *adev, 1138 bool enable) 1139 { 1140 uint32_t temp, data; 1141 1142 temp = data = RREG32_SMC(ixCGTT_ROM_CLK_CTRL0); 1143 1144 if (enable && (adev->cg_flags & AMD_CG_SUPPORT_ROM_MGCG)) 1145 data &= ~(CGTT_ROM_CLK_CTRL0__SOFT_OVERRIDE0_MASK | 1146 CGTT_ROM_CLK_CTRL0__SOFT_OVERRIDE1_MASK); 1147 else 1148 data |= CGTT_ROM_CLK_CTRL0__SOFT_OVERRIDE0_MASK | 1149 CGTT_ROM_CLK_CTRL0__SOFT_OVERRIDE1_MASK; 1150 1151 if (temp != data) 1152 WREG32_SMC(ixCGTT_ROM_CLK_CTRL0, data); 1153 } 1154 1155 static int vi_common_set_clockgating_state_by_smu(void *handle, 1156 enum amd_clockgating_state state) 1157 { 1158 uint32_t msg_id, pp_state; 1159 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1160 void *pp_handle = adev->powerplay.pp_handle; 1161 1162 if (state == AMD_CG_STATE_UNGATE) 1163 pp_state = 0; 1164 else 1165 pp_state = PP_STATE_CG | PP_STATE_LS; 1166 1167 msg_id = PP_CG_MSG_ID(PP_GROUP_SYS, 1168 PP_BLOCK_SYS_MC, 1169 PP_STATE_SUPPORT_CG | PP_STATE_SUPPORT_LS, 1170 pp_state); 1171 amd_set_clockgating_by_smu(pp_handle, msg_id); 1172 1173 msg_id = PP_CG_MSG_ID(PP_GROUP_SYS, 1174 PP_BLOCK_SYS_SDMA, 1175 PP_STATE_SUPPORT_CG | PP_STATE_SUPPORT_LS, 1176 pp_state); 1177 amd_set_clockgating_by_smu(pp_handle, msg_id); 1178 1179 msg_id = PP_CG_MSG_ID(PP_GROUP_SYS, 1180 PP_BLOCK_SYS_HDP, 1181 PP_STATE_SUPPORT_CG | PP_STATE_SUPPORT_LS, 1182 pp_state); 1183 amd_set_clockgating_by_smu(pp_handle, msg_id); 1184 1185 msg_id = PP_CG_MSG_ID(PP_GROUP_SYS, 1186 PP_BLOCK_SYS_BIF, 1187 PP_STATE_SUPPORT_LS, 1188 pp_state); 1189 amd_set_clockgating_by_smu(pp_handle, msg_id); 1190 1191 msg_id = PP_CG_MSG_ID(PP_GROUP_SYS, 1192 PP_BLOCK_SYS_BIF, 1193 PP_STATE_SUPPORT_CG, 1194 pp_state); 1195 amd_set_clockgating_by_smu(pp_handle, msg_id); 1196 1197 msg_id = PP_CG_MSG_ID(PP_GROUP_SYS, 1198 PP_BLOCK_SYS_DRM, 1199 PP_STATE_SUPPORT_LS, 1200 pp_state); 1201 amd_set_clockgating_by_smu(pp_handle, msg_id); 1202 1203 msg_id = PP_CG_MSG_ID(PP_GROUP_SYS, 1204 PP_BLOCK_SYS_ROM, 1205 PP_STATE_SUPPORT_CG, 1206 pp_state); 1207 amd_set_clockgating_by_smu(pp_handle, msg_id); 1208 1209 return 0; 1210 } 1211 1212 static int vi_common_set_clockgating_state(void *handle, 1213 enum amd_clockgating_state state) 1214 { 1215 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1216 1217 switch (adev->asic_type) { 1218 case CHIP_FIJI: 1219 vi_update_bif_medium_grain_light_sleep(adev, 1220 state == AMD_CG_STATE_GATE ? true : false); 1221 vi_update_hdp_medium_grain_clock_gating(adev, 1222 state == AMD_CG_STATE_GATE ? true : false); 1223 vi_update_hdp_light_sleep(adev, 1224 state == AMD_CG_STATE_GATE ? true : false); 1225 vi_update_rom_medium_grain_clock_gating(adev, 1226 state == AMD_CG_STATE_GATE ? true : false); 1227 break; 1228 case CHIP_CARRIZO: 1229 case CHIP_STONEY: 1230 vi_update_bif_medium_grain_light_sleep(adev, 1231 state == AMD_CG_STATE_GATE ? true : false); 1232 vi_update_hdp_medium_grain_clock_gating(adev, 1233 state == AMD_CG_STATE_GATE ? true : false); 1234 vi_update_hdp_light_sleep(adev, 1235 state == AMD_CG_STATE_GATE ? true : false); 1236 break; 1237 case CHIP_TONGA: 1238 case CHIP_POLARIS10: 1239 case CHIP_POLARIS11: 1240 vi_common_set_clockgating_state_by_smu(adev, state); 1241 default: 1242 break; 1243 } 1244 return 0; 1245 } 1246 1247 static int vi_common_set_powergating_state(void *handle, 1248 enum amd_powergating_state state) 1249 { 1250 return 0; 1251 } 1252 1253 static const struct amd_ip_funcs vi_common_ip_funcs = { 1254 .name = "vi_common", 1255 .early_init = vi_common_early_init, 1256 .late_init = NULL, 1257 .sw_init = vi_common_sw_init, 1258 .sw_fini = vi_common_sw_fini, 1259 .hw_init = vi_common_hw_init, 1260 .hw_fini = vi_common_hw_fini, 1261 .suspend = vi_common_suspend, 1262 .resume = vi_common_resume, 1263 .is_idle = vi_common_is_idle, 1264 .wait_for_idle = vi_common_wait_for_idle, 1265 .soft_reset = vi_common_soft_reset, 1266 .set_clockgating_state = vi_common_set_clockgating_state, 1267 .set_powergating_state = vi_common_set_powergating_state, 1268 }; 1269 1270 static const struct amdgpu_ip_block_version vi_common_ip_block = 1271 { 1272 .type = AMD_IP_BLOCK_TYPE_COMMON, 1273 .major = 1, 1274 .minor = 0, 1275 .rev = 0, 1276 .funcs = &vi_common_ip_funcs, 1277 }; 1278 1279 int vi_set_ip_blocks(struct amdgpu_device *adev) 1280 { 1281 switch (adev->asic_type) { 1282 case CHIP_TOPAZ: 1283 /* topaz has no DCE, UVD, VCE */ 1284 amdgpu_ip_block_add(adev, &vi_common_ip_block); 1285 amdgpu_ip_block_add(adev, &gmc_v7_4_ip_block); 1286 amdgpu_ip_block_add(adev, &iceland_ih_ip_block); 1287 amdgpu_ip_block_add(adev, &amdgpu_pp_ip_block); 1288 if (adev->enable_virtual_display) 1289 amdgpu_ip_block_add(adev, &dce_virtual_ip_block); 1290 amdgpu_ip_block_add(adev, &gfx_v8_0_ip_block); 1291 amdgpu_ip_block_add(adev, &sdma_v2_4_ip_block); 1292 break; 1293 case CHIP_FIJI: 1294 amdgpu_ip_block_add(adev, &vi_common_ip_block); 1295 amdgpu_ip_block_add(adev, &gmc_v8_5_ip_block); 1296 amdgpu_ip_block_add(adev, &tonga_ih_ip_block); 1297 amdgpu_ip_block_add(adev, &amdgpu_pp_ip_block); 1298 if (adev->enable_virtual_display) 1299 amdgpu_ip_block_add(adev, &dce_virtual_ip_block); 1300 else 1301 amdgpu_ip_block_add(adev, &dce_v10_1_ip_block); 1302 amdgpu_ip_block_add(adev, &gfx_v8_0_ip_block); 1303 amdgpu_ip_block_add(adev, &sdma_v3_0_ip_block); 1304 amdgpu_ip_block_add(adev, &uvd_v6_0_ip_block); 1305 amdgpu_ip_block_add(adev, &vce_v3_0_ip_block); 1306 break; 1307 case CHIP_TONGA: 1308 amdgpu_ip_block_add(adev, &vi_common_ip_block); 1309 amdgpu_ip_block_add(adev, &gmc_v8_0_ip_block); 1310 amdgpu_ip_block_add(adev, &tonga_ih_ip_block); 1311 amdgpu_ip_block_add(adev, &amdgpu_pp_ip_block); 1312 if (adev->enable_virtual_display) 1313 amdgpu_ip_block_add(adev, &dce_virtual_ip_block); 1314 else 1315 amdgpu_ip_block_add(adev, &dce_v10_0_ip_block); 1316 amdgpu_ip_block_add(adev, &gfx_v8_0_ip_block); 1317 amdgpu_ip_block_add(adev, &sdma_v3_0_ip_block); 1318 amdgpu_ip_block_add(adev, &uvd_v5_0_ip_block); 1319 amdgpu_ip_block_add(adev, &vce_v3_0_ip_block); 1320 break; 1321 case CHIP_POLARIS11: 1322 case CHIP_POLARIS10: 1323 amdgpu_ip_block_add(adev, &vi_common_ip_block); 1324 amdgpu_ip_block_add(adev, &gmc_v8_1_ip_block); 1325 amdgpu_ip_block_add(adev, &tonga_ih_ip_block); 1326 amdgpu_ip_block_add(adev, &amdgpu_pp_ip_block); 1327 if (adev->enable_virtual_display) 1328 amdgpu_ip_block_add(adev, &dce_virtual_ip_block); 1329 else 1330 amdgpu_ip_block_add(adev, &dce_v11_2_ip_block); 1331 amdgpu_ip_block_add(adev, &gfx_v8_0_ip_block); 1332 amdgpu_ip_block_add(adev, &sdma_v3_1_ip_block); 1333 amdgpu_ip_block_add(adev, &uvd_v6_3_ip_block); 1334 amdgpu_ip_block_add(adev, &vce_v3_4_ip_block); 1335 break; 1336 case CHIP_CARRIZO: 1337 amdgpu_ip_block_add(adev, &vi_common_ip_block); 1338 amdgpu_ip_block_add(adev, &gmc_v8_0_ip_block); 1339 amdgpu_ip_block_add(adev, &cz_ih_ip_block); 1340 amdgpu_ip_block_add(adev, &amdgpu_pp_ip_block); 1341 if (adev->enable_virtual_display) 1342 amdgpu_ip_block_add(adev, &dce_virtual_ip_block); 1343 else 1344 amdgpu_ip_block_add(adev, &dce_v11_0_ip_block); 1345 amdgpu_ip_block_add(adev, &gfx_v8_0_ip_block); 1346 amdgpu_ip_block_add(adev, &sdma_v3_0_ip_block); 1347 amdgpu_ip_block_add(adev, &uvd_v6_0_ip_block); 1348 amdgpu_ip_block_add(adev, &vce_v3_1_ip_block); 1349 #if defined(CONFIG_DRM_AMD_ACP) 1350 amdgpu_ip_block_add(adev, &acp_ip_block); 1351 #endif 1352 break; 1353 case CHIP_STONEY: 1354 amdgpu_ip_block_add(adev, &vi_common_ip_block); 1355 amdgpu_ip_block_add(adev, &gmc_v8_0_ip_block); 1356 amdgpu_ip_block_add(adev, &cz_ih_ip_block); 1357 amdgpu_ip_block_add(adev, &amdgpu_pp_ip_block); 1358 if (adev->enable_virtual_display) 1359 amdgpu_ip_block_add(adev, &dce_virtual_ip_block); 1360 else 1361 amdgpu_ip_block_add(adev, &dce_v11_0_ip_block); 1362 amdgpu_ip_block_add(adev, &gfx_v8_1_ip_block); 1363 amdgpu_ip_block_add(adev, &sdma_v3_0_ip_block); 1364 amdgpu_ip_block_add(adev, &uvd_v6_2_ip_block); 1365 amdgpu_ip_block_add(adev, &vce_v3_4_ip_block); 1366 #if defined(CONFIG_DRM_AMD_ACP) 1367 amdgpu_ip_block_add(adev, &acp_ip_block); 1368 #endif 1369 break; 1370 default: 1371 /* FIXME: not supported yet */ 1372 return -EINVAL; 1373 } 1374 1375 return 0; 1376 } 1377