1 /* 2 * Copyright 2014 Advanced Micro Devices, Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 */ 23 24 #include <linux/pci.h> 25 #include <linux/slab.h> 26 27 #include "amdgpu.h" 28 #include "amdgpu_atombios.h" 29 #include "amdgpu_ih.h" 30 #include "amdgpu_uvd.h" 31 #include "amdgpu_vce.h" 32 #include "amdgpu_ucode.h" 33 #include "atom.h" 34 #include "amd_pcie.h" 35 36 #include "gmc/gmc_8_1_d.h" 37 #include "gmc/gmc_8_1_sh_mask.h" 38 39 #include "oss/oss_3_0_d.h" 40 #include "oss/oss_3_0_sh_mask.h" 41 42 #include "bif/bif_5_0_d.h" 43 #include "bif/bif_5_0_sh_mask.h" 44 45 #include "gca/gfx_8_0_d.h" 46 #include "gca/gfx_8_0_sh_mask.h" 47 48 #include "smu/smu_7_1_1_d.h" 49 #include "smu/smu_7_1_1_sh_mask.h" 50 51 #include "uvd/uvd_5_0_d.h" 52 #include "uvd/uvd_5_0_sh_mask.h" 53 54 #include "vce/vce_3_0_d.h" 55 #include "vce/vce_3_0_sh_mask.h" 56 57 #include "dce/dce_10_0_d.h" 58 #include "dce/dce_10_0_sh_mask.h" 59 60 #include "vid.h" 61 #include "vi.h" 62 #include "gmc_v8_0.h" 63 #include "gmc_v7_0.h" 64 #include "gfx_v8_0.h" 65 #include "sdma_v2_4.h" 66 #include "sdma_v3_0.h" 67 #include "dce_v10_0.h" 68 #include "dce_v11_0.h" 69 #include "iceland_ih.h" 70 #include "tonga_ih.h" 71 #include "cz_ih.h" 72 #include "uvd_v5_0.h" 73 #include "uvd_v6_0.h" 74 #include "vce_v3_0.h" 75 #if defined(CONFIG_DRM_AMD_ACP) 76 #include "amdgpu_acp.h" 77 #endif 78 #include "dce_virtual.h" 79 #include "mxgpu_vi.h" 80 #include "amdgpu_dm.h" 81 82 /* 83 * Indirect registers accessor 84 */ 85 static u32 vi_pcie_rreg(struct amdgpu_device *adev, u32 reg) 86 { 87 unsigned long flags; 88 u32 r; 89 90 spin_lock_irqsave(&adev->pcie_idx_lock, flags); 91 WREG32_NO_KIQ(mmPCIE_INDEX, reg); 92 (void)RREG32_NO_KIQ(mmPCIE_INDEX); 93 r = RREG32_NO_KIQ(mmPCIE_DATA); 94 spin_unlock_irqrestore(&adev->pcie_idx_lock, flags); 95 return r; 96 } 97 98 static void vi_pcie_wreg(struct amdgpu_device *adev, u32 reg, u32 v) 99 { 100 unsigned long flags; 101 102 spin_lock_irqsave(&adev->pcie_idx_lock, flags); 103 WREG32_NO_KIQ(mmPCIE_INDEX, reg); 104 (void)RREG32_NO_KIQ(mmPCIE_INDEX); 105 WREG32_NO_KIQ(mmPCIE_DATA, v); 106 (void)RREG32_NO_KIQ(mmPCIE_DATA); 107 spin_unlock_irqrestore(&adev->pcie_idx_lock, flags); 108 } 109 110 static u32 vi_smc_rreg(struct amdgpu_device *adev, u32 reg) 111 { 112 unsigned long flags; 113 u32 r; 114 115 spin_lock_irqsave(&adev->smc_idx_lock, flags); 116 WREG32_NO_KIQ(mmSMC_IND_INDEX_11, (reg)); 117 r = RREG32_NO_KIQ(mmSMC_IND_DATA_11); 118 spin_unlock_irqrestore(&adev->smc_idx_lock, flags); 119 return r; 120 } 121 122 static void vi_smc_wreg(struct amdgpu_device *adev, u32 reg, u32 v) 123 { 124 unsigned long flags; 125 126 spin_lock_irqsave(&adev->smc_idx_lock, flags); 127 WREG32_NO_KIQ(mmSMC_IND_INDEX_11, (reg)); 128 WREG32_NO_KIQ(mmSMC_IND_DATA_11, (v)); 129 spin_unlock_irqrestore(&adev->smc_idx_lock, flags); 130 } 131 132 /* smu_8_0_d.h */ 133 #define mmMP0PUB_IND_INDEX 0x180 134 #define mmMP0PUB_IND_DATA 0x181 135 136 static u32 cz_smc_rreg(struct amdgpu_device *adev, u32 reg) 137 { 138 unsigned long flags; 139 u32 r; 140 141 spin_lock_irqsave(&adev->smc_idx_lock, flags); 142 WREG32(mmMP0PUB_IND_INDEX, (reg)); 143 r = RREG32(mmMP0PUB_IND_DATA); 144 spin_unlock_irqrestore(&adev->smc_idx_lock, flags); 145 return r; 146 } 147 148 static void cz_smc_wreg(struct amdgpu_device *adev, u32 reg, u32 v) 149 { 150 unsigned long flags; 151 152 spin_lock_irqsave(&adev->smc_idx_lock, flags); 153 WREG32(mmMP0PUB_IND_INDEX, (reg)); 154 WREG32(mmMP0PUB_IND_DATA, (v)); 155 spin_unlock_irqrestore(&adev->smc_idx_lock, flags); 156 } 157 158 static u32 vi_uvd_ctx_rreg(struct amdgpu_device *adev, u32 reg) 159 { 160 unsigned long flags; 161 u32 r; 162 163 spin_lock_irqsave(&adev->uvd_ctx_idx_lock, flags); 164 WREG32(mmUVD_CTX_INDEX, ((reg) & 0x1ff)); 165 r = RREG32(mmUVD_CTX_DATA); 166 spin_unlock_irqrestore(&adev->uvd_ctx_idx_lock, flags); 167 return r; 168 } 169 170 static void vi_uvd_ctx_wreg(struct amdgpu_device *adev, u32 reg, u32 v) 171 { 172 unsigned long flags; 173 174 spin_lock_irqsave(&adev->uvd_ctx_idx_lock, flags); 175 WREG32(mmUVD_CTX_INDEX, ((reg) & 0x1ff)); 176 WREG32(mmUVD_CTX_DATA, (v)); 177 spin_unlock_irqrestore(&adev->uvd_ctx_idx_lock, flags); 178 } 179 180 static u32 vi_didt_rreg(struct amdgpu_device *adev, u32 reg) 181 { 182 unsigned long flags; 183 u32 r; 184 185 spin_lock_irqsave(&adev->didt_idx_lock, flags); 186 WREG32(mmDIDT_IND_INDEX, (reg)); 187 r = RREG32(mmDIDT_IND_DATA); 188 spin_unlock_irqrestore(&adev->didt_idx_lock, flags); 189 return r; 190 } 191 192 static void vi_didt_wreg(struct amdgpu_device *adev, u32 reg, u32 v) 193 { 194 unsigned long flags; 195 196 spin_lock_irqsave(&adev->didt_idx_lock, flags); 197 WREG32(mmDIDT_IND_INDEX, (reg)); 198 WREG32(mmDIDT_IND_DATA, (v)); 199 spin_unlock_irqrestore(&adev->didt_idx_lock, flags); 200 } 201 202 static u32 vi_gc_cac_rreg(struct amdgpu_device *adev, u32 reg) 203 { 204 unsigned long flags; 205 u32 r; 206 207 spin_lock_irqsave(&adev->gc_cac_idx_lock, flags); 208 WREG32(mmGC_CAC_IND_INDEX, (reg)); 209 r = RREG32(mmGC_CAC_IND_DATA); 210 spin_unlock_irqrestore(&adev->gc_cac_idx_lock, flags); 211 return r; 212 } 213 214 static void vi_gc_cac_wreg(struct amdgpu_device *adev, u32 reg, u32 v) 215 { 216 unsigned long flags; 217 218 spin_lock_irqsave(&adev->gc_cac_idx_lock, flags); 219 WREG32(mmGC_CAC_IND_INDEX, (reg)); 220 WREG32(mmGC_CAC_IND_DATA, (v)); 221 spin_unlock_irqrestore(&adev->gc_cac_idx_lock, flags); 222 } 223 224 225 static const u32 tonga_mgcg_cgcg_init[] = 226 { 227 mmCGTT_DRM_CLK_CTRL0, 0xffffffff, 0x00600100, 228 mmPCIE_INDEX, 0xffffffff, 0x0140001c, 229 mmPCIE_DATA, 0x000f0000, 0x00000000, 230 mmSMC_IND_INDEX_4, 0xffffffff, 0xC060000C, 231 mmSMC_IND_DATA_4, 0xc0000fff, 0x00000100, 232 mmCGTT_DRM_CLK_CTRL0, 0xff000fff, 0x00000100, 233 mmHDP_XDP_CGTT_BLK_CTRL, 0xc0000fff, 0x00000104, 234 }; 235 236 static const u32 fiji_mgcg_cgcg_init[] = 237 { 238 mmCGTT_DRM_CLK_CTRL0, 0xffffffff, 0x00600100, 239 mmPCIE_INDEX, 0xffffffff, 0x0140001c, 240 mmPCIE_DATA, 0x000f0000, 0x00000000, 241 mmSMC_IND_INDEX_4, 0xffffffff, 0xC060000C, 242 mmSMC_IND_DATA_4, 0xc0000fff, 0x00000100, 243 mmCGTT_DRM_CLK_CTRL0, 0xff000fff, 0x00000100, 244 mmHDP_XDP_CGTT_BLK_CTRL, 0xc0000fff, 0x00000104, 245 }; 246 247 static const u32 iceland_mgcg_cgcg_init[] = 248 { 249 mmPCIE_INDEX, 0xffffffff, ixPCIE_CNTL2, 250 mmPCIE_DATA, 0x000f0000, 0x00000000, 251 mmSMC_IND_INDEX_4, 0xffffffff, ixCGTT_ROM_CLK_CTRL0, 252 mmSMC_IND_DATA_4, 0xc0000fff, 0x00000100, 253 mmHDP_XDP_CGTT_BLK_CTRL, 0xc0000fff, 0x00000104, 254 }; 255 256 static const u32 cz_mgcg_cgcg_init[] = 257 { 258 mmCGTT_DRM_CLK_CTRL0, 0xffffffff, 0x00600100, 259 mmPCIE_INDEX, 0xffffffff, 0x0140001c, 260 mmPCIE_DATA, 0x000f0000, 0x00000000, 261 mmCGTT_DRM_CLK_CTRL0, 0xff000fff, 0x00000100, 262 mmHDP_XDP_CGTT_BLK_CTRL, 0xc0000fff, 0x00000104, 263 }; 264 265 static const u32 stoney_mgcg_cgcg_init[] = 266 { 267 mmCGTT_DRM_CLK_CTRL0, 0xffffffff, 0x00000100, 268 mmHDP_XDP_CGTT_BLK_CTRL, 0xffffffff, 0x00000104, 269 mmHDP_HOST_PATH_CNTL, 0xffffffff, 0x0f000027, 270 }; 271 272 static void vi_init_golden_registers(struct amdgpu_device *adev) 273 { 274 /* Some of the registers might be dependent on GRBM_GFX_INDEX */ 275 mutex_lock(&adev->grbm_idx_mutex); 276 277 if (amdgpu_sriov_vf(adev)) { 278 xgpu_vi_init_golden_registers(adev); 279 mutex_unlock(&adev->grbm_idx_mutex); 280 return; 281 } 282 283 switch (adev->asic_type) { 284 case CHIP_TOPAZ: 285 amdgpu_device_program_register_sequence(adev, 286 iceland_mgcg_cgcg_init, 287 ARRAY_SIZE(iceland_mgcg_cgcg_init)); 288 break; 289 case CHIP_FIJI: 290 amdgpu_device_program_register_sequence(adev, 291 fiji_mgcg_cgcg_init, 292 ARRAY_SIZE(fiji_mgcg_cgcg_init)); 293 break; 294 case CHIP_TONGA: 295 amdgpu_device_program_register_sequence(adev, 296 tonga_mgcg_cgcg_init, 297 ARRAY_SIZE(tonga_mgcg_cgcg_init)); 298 break; 299 case CHIP_CARRIZO: 300 amdgpu_device_program_register_sequence(adev, 301 cz_mgcg_cgcg_init, 302 ARRAY_SIZE(cz_mgcg_cgcg_init)); 303 break; 304 case CHIP_STONEY: 305 amdgpu_device_program_register_sequence(adev, 306 stoney_mgcg_cgcg_init, 307 ARRAY_SIZE(stoney_mgcg_cgcg_init)); 308 break; 309 case CHIP_POLARIS10: 310 case CHIP_POLARIS11: 311 case CHIP_POLARIS12: 312 case CHIP_VEGAM: 313 default: 314 break; 315 } 316 mutex_unlock(&adev->grbm_idx_mutex); 317 } 318 319 /** 320 * vi_get_xclk - get the xclk 321 * 322 * @adev: amdgpu_device pointer 323 * 324 * Returns the reference clock used by the gfx engine 325 * (VI). 326 */ 327 static u32 vi_get_xclk(struct amdgpu_device *adev) 328 { 329 u32 reference_clock = adev->clock.spll.reference_freq; 330 u32 tmp; 331 332 if (adev->flags & AMD_IS_APU) 333 return reference_clock; 334 335 tmp = RREG32_SMC(ixCG_CLKPIN_CNTL_2); 336 if (REG_GET_FIELD(tmp, CG_CLKPIN_CNTL_2, MUX_TCLK_TO_XCLK)) 337 return 1000; 338 339 tmp = RREG32_SMC(ixCG_CLKPIN_CNTL); 340 if (REG_GET_FIELD(tmp, CG_CLKPIN_CNTL, XTALIN_DIVIDE)) 341 return reference_clock / 4; 342 343 return reference_clock; 344 } 345 346 /** 347 * vi_srbm_select - select specific register instances 348 * 349 * @adev: amdgpu_device pointer 350 * @me: selected ME (micro engine) 351 * @pipe: pipe 352 * @queue: queue 353 * @vmid: VMID 354 * 355 * Switches the currently active registers instances. Some 356 * registers are instanced per VMID, others are instanced per 357 * me/pipe/queue combination. 358 */ 359 void vi_srbm_select(struct amdgpu_device *adev, 360 u32 me, u32 pipe, u32 queue, u32 vmid) 361 { 362 u32 srbm_gfx_cntl = 0; 363 srbm_gfx_cntl = REG_SET_FIELD(srbm_gfx_cntl, SRBM_GFX_CNTL, PIPEID, pipe); 364 srbm_gfx_cntl = REG_SET_FIELD(srbm_gfx_cntl, SRBM_GFX_CNTL, MEID, me); 365 srbm_gfx_cntl = REG_SET_FIELD(srbm_gfx_cntl, SRBM_GFX_CNTL, VMID, vmid); 366 srbm_gfx_cntl = REG_SET_FIELD(srbm_gfx_cntl, SRBM_GFX_CNTL, QUEUEID, queue); 367 WREG32(mmSRBM_GFX_CNTL, srbm_gfx_cntl); 368 } 369 370 static void vi_vga_set_state(struct amdgpu_device *adev, bool state) 371 { 372 /* todo */ 373 } 374 375 static bool vi_read_disabled_bios(struct amdgpu_device *adev) 376 { 377 u32 bus_cntl; 378 u32 d1vga_control = 0; 379 u32 d2vga_control = 0; 380 u32 vga_render_control = 0; 381 u32 rom_cntl; 382 bool r; 383 384 bus_cntl = RREG32(mmBUS_CNTL); 385 if (adev->mode_info.num_crtc) { 386 d1vga_control = RREG32(mmD1VGA_CONTROL); 387 d2vga_control = RREG32(mmD2VGA_CONTROL); 388 vga_render_control = RREG32(mmVGA_RENDER_CONTROL); 389 } 390 rom_cntl = RREG32_SMC(ixROM_CNTL); 391 392 /* enable the rom */ 393 WREG32(mmBUS_CNTL, (bus_cntl & ~BUS_CNTL__BIOS_ROM_DIS_MASK)); 394 if (adev->mode_info.num_crtc) { 395 /* Disable VGA mode */ 396 WREG32(mmD1VGA_CONTROL, 397 (d1vga_control & ~(D1VGA_CONTROL__D1VGA_MODE_ENABLE_MASK | 398 D1VGA_CONTROL__D1VGA_TIMING_SELECT_MASK))); 399 WREG32(mmD2VGA_CONTROL, 400 (d2vga_control & ~(D2VGA_CONTROL__D2VGA_MODE_ENABLE_MASK | 401 D2VGA_CONTROL__D2VGA_TIMING_SELECT_MASK))); 402 WREG32(mmVGA_RENDER_CONTROL, 403 (vga_render_control & ~VGA_RENDER_CONTROL__VGA_VSTATUS_CNTL_MASK)); 404 } 405 WREG32_SMC(ixROM_CNTL, rom_cntl | ROM_CNTL__SCK_OVERWRITE_MASK); 406 407 r = amdgpu_read_bios(adev); 408 409 /* restore regs */ 410 WREG32(mmBUS_CNTL, bus_cntl); 411 if (adev->mode_info.num_crtc) { 412 WREG32(mmD1VGA_CONTROL, d1vga_control); 413 WREG32(mmD2VGA_CONTROL, d2vga_control); 414 WREG32(mmVGA_RENDER_CONTROL, vga_render_control); 415 } 416 WREG32_SMC(ixROM_CNTL, rom_cntl); 417 return r; 418 } 419 420 static bool vi_read_bios_from_rom(struct amdgpu_device *adev, 421 u8 *bios, u32 length_bytes) 422 { 423 u32 *dw_ptr; 424 unsigned long flags; 425 u32 i, length_dw; 426 427 if (bios == NULL) 428 return false; 429 if (length_bytes == 0) 430 return false; 431 /* APU vbios image is part of sbios image */ 432 if (adev->flags & AMD_IS_APU) 433 return false; 434 435 dw_ptr = (u32 *)bios; 436 length_dw = ALIGN(length_bytes, 4) / 4; 437 /* take the smc lock since we are using the smc index */ 438 spin_lock_irqsave(&adev->smc_idx_lock, flags); 439 /* set rom index to 0 */ 440 WREG32(mmSMC_IND_INDEX_11, ixROM_INDEX); 441 WREG32(mmSMC_IND_DATA_11, 0); 442 /* set index to data for continous read */ 443 WREG32(mmSMC_IND_INDEX_11, ixROM_DATA); 444 for (i = 0; i < length_dw; i++) 445 dw_ptr[i] = RREG32(mmSMC_IND_DATA_11); 446 spin_unlock_irqrestore(&adev->smc_idx_lock, flags); 447 448 return true; 449 } 450 451 static void vi_detect_hw_virtualization(struct amdgpu_device *adev) 452 { 453 uint32_t reg = 0; 454 455 if (adev->asic_type == CHIP_TONGA || 456 adev->asic_type == CHIP_FIJI) { 457 reg = RREG32(mmBIF_IOV_FUNC_IDENTIFIER); 458 /* bit0: 0 means pf and 1 means vf */ 459 if (REG_GET_FIELD(reg, BIF_IOV_FUNC_IDENTIFIER, FUNC_IDENTIFIER)) 460 adev->virt.caps |= AMDGPU_SRIOV_CAPS_IS_VF; 461 /* bit31: 0 means disable IOV and 1 means enable */ 462 if (REG_GET_FIELD(reg, BIF_IOV_FUNC_IDENTIFIER, IOV_ENABLE)) 463 adev->virt.caps |= AMDGPU_SRIOV_CAPS_ENABLE_IOV; 464 } 465 466 if (reg == 0) { 467 if (is_virtual_machine()) /* passthrough mode exclus sr-iov mode */ 468 adev->virt.caps |= AMDGPU_PASSTHROUGH_MODE; 469 } 470 } 471 472 static const struct amdgpu_allowed_register_entry vi_allowed_read_registers[] = { 473 {mmGRBM_STATUS}, 474 {mmGRBM_STATUS2}, 475 {mmGRBM_STATUS_SE0}, 476 {mmGRBM_STATUS_SE1}, 477 {mmGRBM_STATUS_SE2}, 478 {mmGRBM_STATUS_SE3}, 479 {mmSRBM_STATUS}, 480 {mmSRBM_STATUS2}, 481 {mmSRBM_STATUS3}, 482 {mmSDMA0_STATUS_REG + SDMA0_REGISTER_OFFSET}, 483 {mmSDMA0_STATUS_REG + SDMA1_REGISTER_OFFSET}, 484 {mmCP_STAT}, 485 {mmCP_STALLED_STAT1}, 486 {mmCP_STALLED_STAT2}, 487 {mmCP_STALLED_STAT3}, 488 {mmCP_CPF_BUSY_STAT}, 489 {mmCP_CPF_STALLED_STAT1}, 490 {mmCP_CPF_STATUS}, 491 {mmCP_CPC_BUSY_STAT}, 492 {mmCP_CPC_STALLED_STAT1}, 493 {mmCP_CPC_STATUS}, 494 {mmGB_ADDR_CONFIG}, 495 {mmMC_ARB_RAMCFG}, 496 {mmGB_TILE_MODE0}, 497 {mmGB_TILE_MODE1}, 498 {mmGB_TILE_MODE2}, 499 {mmGB_TILE_MODE3}, 500 {mmGB_TILE_MODE4}, 501 {mmGB_TILE_MODE5}, 502 {mmGB_TILE_MODE6}, 503 {mmGB_TILE_MODE7}, 504 {mmGB_TILE_MODE8}, 505 {mmGB_TILE_MODE9}, 506 {mmGB_TILE_MODE10}, 507 {mmGB_TILE_MODE11}, 508 {mmGB_TILE_MODE12}, 509 {mmGB_TILE_MODE13}, 510 {mmGB_TILE_MODE14}, 511 {mmGB_TILE_MODE15}, 512 {mmGB_TILE_MODE16}, 513 {mmGB_TILE_MODE17}, 514 {mmGB_TILE_MODE18}, 515 {mmGB_TILE_MODE19}, 516 {mmGB_TILE_MODE20}, 517 {mmGB_TILE_MODE21}, 518 {mmGB_TILE_MODE22}, 519 {mmGB_TILE_MODE23}, 520 {mmGB_TILE_MODE24}, 521 {mmGB_TILE_MODE25}, 522 {mmGB_TILE_MODE26}, 523 {mmGB_TILE_MODE27}, 524 {mmGB_TILE_MODE28}, 525 {mmGB_TILE_MODE29}, 526 {mmGB_TILE_MODE30}, 527 {mmGB_TILE_MODE31}, 528 {mmGB_MACROTILE_MODE0}, 529 {mmGB_MACROTILE_MODE1}, 530 {mmGB_MACROTILE_MODE2}, 531 {mmGB_MACROTILE_MODE3}, 532 {mmGB_MACROTILE_MODE4}, 533 {mmGB_MACROTILE_MODE5}, 534 {mmGB_MACROTILE_MODE6}, 535 {mmGB_MACROTILE_MODE7}, 536 {mmGB_MACROTILE_MODE8}, 537 {mmGB_MACROTILE_MODE9}, 538 {mmGB_MACROTILE_MODE10}, 539 {mmGB_MACROTILE_MODE11}, 540 {mmGB_MACROTILE_MODE12}, 541 {mmGB_MACROTILE_MODE13}, 542 {mmGB_MACROTILE_MODE14}, 543 {mmGB_MACROTILE_MODE15}, 544 {mmCC_RB_BACKEND_DISABLE, true}, 545 {mmGC_USER_RB_BACKEND_DISABLE, true}, 546 {mmGB_BACKEND_MAP, false}, 547 {mmPA_SC_RASTER_CONFIG, true}, 548 {mmPA_SC_RASTER_CONFIG_1, true}, 549 }; 550 551 static uint32_t vi_get_register_value(struct amdgpu_device *adev, 552 bool indexed, u32 se_num, 553 u32 sh_num, u32 reg_offset) 554 { 555 if (indexed) { 556 uint32_t val; 557 unsigned se_idx = (se_num == 0xffffffff) ? 0 : se_num; 558 unsigned sh_idx = (sh_num == 0xffffffff) ? 0 : sh_num; 559 560 switch (reg_offset) { 561 case mmCC_RB_BACKEND_DISABLE: 562 return adev->gfx.config.rb_config[se_idx][sh_idx].rb_backend_disable; 563 case mmGC_USER_RB_BACKEND_DISABLE: 564 return adev->gfx.config.rb_config[se_idx][sh_idx].user_rb_backend_disable; 565 case mmPA_SC_RASTER_CONFIG: 566 return adev->gfx.config.rb_config[se_idx][sh_idx].raster_config; 567 case mmPA_SC_RASTER_CONFIG_1: 568 return adev->gfx.config.rb_config[se_idx][sh_idx].raster_config_1; 569 } 570 571 mutex_lock(&adev->grbm_idx_mutex); 572 if (se_num != 0xffffffff || sh_num != 0xffffffff) 573 amdgpu_gfx_select_se_sh(adev, se_num, sh_num, 0xffffffff); 574 575 val = RREG32(reg_offset); 576 577 if (se_num != 0xffffffff || sh_num != 0xffffffff) 578 amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff); 579 mutex_unlock(&adev->grbm_idx_mutex); 580 return val; 581 } else { 582 unsigned idx; 583 584 switch (reg_offset) { 585 case mmGB_ADDR_CONFIG: 586 return adev->gfx.config.gb_addr_config; 587 case mmMC_ARB_RAMCFG: 588 return adev->gfx.config.mc_arb_ramcfg; 589 case mmGB_TILE_MODE0: 590 case mmGB_TILE_MODE1: 591 case mmGB_TILE_MODE2: 592 case mmGB_TILE_MODE3: 593 case mmGB_TILE_MODE4: 594 case mmGB_TILE_MODE5: 595 case mmGB_TILE_MODE6: 596 case mmGB_TILE_MODE7: 597 case mmGB_TILE_MODE8: 598 case mmGB_TILE_MODE9: 599 case mmGB_TILE_MODE10: 600 case mmGB_TILE_MODE11: 601 case mmGB_TILE_MODE12: 602 case mmGB_TILE_MODE13: 603 case mmGB_TILE_MODE14: 604 case mmGB_TILE_MODE15: 605 case mmGB_TILE_MODE16: 606 case mmGB_TILE_MODE17: 607 case mmGB_TILE_MODE18: 608 case mmGB_TILE_MODE19: 609 case mmGB_TILE_MODE20: 610 case mmGB_TILE_MODE21: 611 case mmGB_TILE_MODE22: 612 case mmGB_TILE_MODE23: 613 case mmGB_TILE_MODE24: 614 case mmGB_TILE_MODE25: 615 case mmGB_TILE_MODE26: 616 case mmGB_TILE_MODE27: 617 case mmGB_TILE_MODE28: 618 case mmGB_TILE_MODE29: 619 case mmGB_TILE_MODE30: 620 case mmGB_TILE_MODE31: 621 idx = (reg_offset - mmGB_TILE_MODE0); 622 return adev->gfx.config.tile_mode_array[idx]; 623 case mmGB_MACROTILE_MODE0: 624 case mmGB_MACROTILE_MODE1: 625 case mmGB_MACROTILE_MODE2: 626 case mmGB_MACROTILE_MODE3: 627 case mmGB_MACROTILE_MODE4: 628 case mmGB_MACROTILE_MODE5: 629 case mmGB_MACROTILE_MODE6: 630 case mmGB_MACROTILE_MODE7: 631 case mmGB_MACROTILE_MODE8: 632 case mmGB_MACROTILE_MODE9: 633 case mmGB_MACROTILE_MODE10: 634 case mmGB_MACROTILE_MODE11: 635 case mmGB_MACROTILE_MODE12: 636 case mmGB_MACROTILE_MODE13: 637 case mmGB_MACROTILE_MODE14: 638 case mmGB_MACROTILE_MODE15: 639 idx = (reg_offset - mmGB_MACROTILE_MODE0); 640 return adev->gfx.config.macrotile_mode_array[idx]; 641 default: 642 return RREG32(reg_offset); 643 } 644 } 645 } 646 647 static int vi_read_register(struct amdgpu_device *adev, u32 se_num, 648 u32 sh_num, u32 reg_offset, u32 *value) 649 { 650 uint32_t i; 651 652 *value = 0; 653 for (i = 0; i < ARRAY_SIZE(vi_allowed_read_registers); i++) { 654 bool indexed = vi_allowed_read_registers[i].grbm_indexed; 655 656 if (reg_offset != vi_allowed_read_registers[i].reg_offset) 657 continue; 658 659 *value = vi_get_register_value(adev, indexed, se_num, sh_num, 660 reg_offset); 661 return 0; 662 } 663 return -EINVAL; 664 } 665 666 static int vi_gpu_pci_config_reset(struct amdgpu_device *adev) 667 { 668 u32 i; 669 670 dev_info(adev->dev, "GPU pci config reset\n"); 671 672 /* disable BM */ 673 pci_clear_master(adev->pdev); 674 /* reset */ 675 amdgpu_device_pci_config_reset(adev); 676 677 udelay(100); 678 679 /* wait for asic to come out of reset */ 680 for (i = 0; i < adev->usec_timeout; i++) { 681 if (RREG32(mmCONFIG_MEMSIZE) != 0xffffffff) { 682 /* enable BM */ 683 pci_set_master(adev->pdev); 684 adev->has_hw_reset = true; 685 return 0; 686 } 687 udelay(1); 688 } 689 return -EINVAL; 690 } 691 692 int smu7_asic_get_baco_capability(struct amdgpu_device *adev, bool *cap) 693 { 694 void *pp_handle = adev->powerplay.pp_handle; 695 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 696 697 if (!pp_funcs || !pp_funcs->get_asic_baco_capability) { 698 *cap = false; 699 return -ENOENT; 700 } 701 702 return pp_funcs->get_asic_baco_capability(pp_handle, cap); 703 } 704 705 int smu7_asic_baco_reset(struct amdgpu_device *adev) 706 { 707 void *pp_handle = adev->powerplay.pp_handle; 708 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 709 710 if (!pp_funcs ||!pp_funcs->get_asic_baco_state ||!pp_funcs->set_asic_baco_state) 711 return -ENOENT; 712 713 /* enter BACO state */ 714 if (pp_funcs->set_asic_baco_state(pp_handle, 1)) 715 return -EIO; 716 717 /* exit BACO state */ 718 if (pp_funcs->set_asic_baco_state(pp_handle, 0)) 719 return -EIO; 720 721 dev_info(adev->dev, "GPU BACO reset\n"); 722 723 return 0; 724 } 725 726 /** 727 * vi_asic_pci_config_reset - soft reset GPU 728 * 729 * @adev: amdgpu_device pointer 730 * 731 * Use PCI Config method to reset the GPU. 732 * 733 * Returns 0 for success. 734 */ 735 static int vi_asic_pci_config_reset(struct amdgpu_device *adev) 736 { 737 int r; 738 739 amdgpu_atombios_scratch_regs_engine_hung(adev, true); 740 741 r = vi_gpu_pci_config_reset(adev); 742 743 amdgpu_atombios_scratch_regs_engine_hung(adev, false); 744 745 return r; 746 } 747 748 static bool vi_asic_supports_baco(struct amdgpu_device *adev) 749 { 750 bool baco_support; 751 752 switch (adev->asic_type) { 753 case CHIP_FIJI: 754 case CHIP_TONGA: 755 case CHIP_POLARIS10: 756 case CHIP_POLARIS11: 757 case CHIP_POLARIS12: 758 case CHIP_TOPAZ: 759 smu7_asic_get_baco_capability(adev, &baco_support); 760 break; 761 default: 762 baco_support = false; 763 break; 764 } 765 766 return baco_support; 767 } 768 769 static enum amd_reset_method 770 vi_asic_reset_method(struct amdgpu_device *adev) 771 { 772 bool baco_reset; 773 774 switch (adev->asic_type) { 775 case CHIP_FIJI: 776 case CHIP_TONGA: 777 case CHIP_POLARIS10: 778 case CHIP_POLARIS11: 779 case CHIP_POLARIS12: 780 case CHIP_TOPAZ: 781 smu7_asic_get_baco_capability(adev, &baco_reset); 782 break; 783 default: 784 baco_reset = false; 785 break; 786 } 787 788 if (baco_reset) 789 return AMD_RESET_METHOD_BACO; 790 else 791 return AMD_RESET_METHOD_LEGACY; 792 } 793 794 /** 795 * vi_asic_reset - soft reset GPU 796 * 797 * @adev: amdgpu_device pointer 798 * 799 * Look up which blocks are hung and attempt 800 * to reset them. 801 * Returns 0 for success. 802 */ 803 static int vi_asic_reset(struct amdgpu_device *adev) 804 { 805 int r; 806 807 if (vi_asic_reset_method(adev) == AMD_RESET_METHOD_BACO) { 808 if (!adev->in_suspend) 809 amdgpu_inc_vram_lost(adev); 810 r = smu7_asic_baco_reset(adev); 811 } else { 812 r = vi_asic_pci_config_reset(adev); 813 } 814 815 return r; 816 } 817 818 static u32 vi_get_config_memsize(struct amdgpu_device *adev) 819 { 820 return RREG32(mmCONFIG_MEMSIZE); 821 } 822 823 static int vi_set_uvd_clock(struct amdgpu_device *adev, u32 clock, 824 u32 cntl_reg, u32 status_reg) 825 { 826 int r, i; 827 struct atom_clock_dividers dividers; 828 uint32_t tmp; 829 830 r = amdgpu_atombios_get_clock_dividers(adev, 831 COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK, 832 clock, false, ÷rs); 833 if (r) 834 return r; 835 836 tmp = RREG32_SMC(cntl_reg); 837 838 if (adev->flags & AMD_IS_APU) 839 tmp &= ~CG_DCLK_CNTL__DCLK_DIVIDER_MASK; 840 else 841 tmp &= ~(CG_DCLK_CNTL__DCLK_DIR_CNTL_EN_MASK | 842 CG_DCLK_CNTL__DCLK_DIVIDER_MASK); 843 tmp |= dividers.post_divider; 844 WREG32_SMC(cntl_reg, tmp); 845 846 for (i = 0; i < 100; i++) { 847 tmp = RREG32_SMC(status_reg); 848 if (adev->flags & AMD_IS_APU) { 849 if (tmp & 0x10000) 850 break; 851 } else { 852 if (tmp & CG_DCLK_STATUS__DCLK_STATUS_MASK) 853 break; 854 } 855 mdelay(10); 856 } 857 if (i == 100) 858 return -ETIMEDOUT; 859 return 0; 860 } 861 862 #define ixGNB_CLK1_DFS_CNTL 0xD82200F0 863 #define ixGNB_CLK1_STATUS 0xD822010C 864 #define ixGNB_CLK2_DFS_CNTL 0xD8220110 865 #define ixGNB_CLK2_STATUS 0xD822012C 866 #define ixGNB_CLK3_DFS_CNTL 0xD8220130 867 #define ixGNB_CLK3_STATUS 0xD822014C 868 869 static int vi_set_uvd_clocks(struct amdgpu_device *adev, u32 vclk, u32 dclk) 870 { 871 int r; 872 873 if (adev->flags & AMD_IS_APU) { 874 r = vi_set_uvd_clock(adev, vclk, ixGNB_CLK2_DFS_CNTL, ixGNB_CLK2_STATUS); 875 if (r) 876 return r; 877 878 r = vi_set_uvd_clock(adev, dclk, ixGNB_CLK1_DFS_CNTL, ixGNB_CLK1_STATUS); 879 if (r) 880 return r; 881 } else { 882 r = vi_set_uvd_clock(adev, vclk, ixCG_VCLK_CNTL, ixCG_VCLK_STATUS); 883 if (r) 884 return r; 885 886 r = vi_set_uvd_clock(adev, dclk, ixCG_DCLK_CNTL, ixCG_DCLK_STATUS); 887 if (r) 888 return r; 889 } 890 891 return 0; 892 } 893 894 static int vi_set_vce_clocks(struct amdgpu_device *adev, u32 evclk, u32 ecclk) 895 { 896 int r, i; 897 struct atom_clock_dividers dividers; 898 u32 tmp; 899 u32 reg_ctrl; 900 u32 reg_status; 901 u32 status_mask; 902 u32 reg_mask; 903 904 if (adev->flags & AMD_IS_APU) { 905 reg_ctrl = ixGNB_CLK3_DFS_CNTL; 906 reg_status = ixGNB_CLK3_STATUS; 907 status_mask = 0x00010000; 908 reg_mask = CG_ECLK_CNTL__ECLK_DIVIDER_MASK; 909 } else { 910 reg_ctrl = ixCG_ECLK_CNTL; 911 reg_status = ixCG_ECLK_STATUS; 912 status_mask = CG_ECLK_STATUS__ECLK_STATUS_MASK; 913 reg_mask = CG_ECLK_CNTL__ECLK_DIR_CNTL_EN_MASK | CG_ECLK_CNTL__ECLK_DIVIDER_MASK; 914 } 915 916 r = amdgpu_atombios_get_clock_dividers(adev, 917 COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK, 918 ecclk, false, ÷rs); 919 if (r) 920 return r; 921 922 for (i = 0; i < 100; i++) { 923 if (RREG32_SMC(reg_status) & status_mask) 924 break; 925 mdelay(10); 926 } 927 928 if (i == 100) 929 return -ETIMEDOUT; 930 931 tmp = RREG32_SMC(reg_ctrl); 932 tmp &= ~reg_mask; 933 tmp |= dividers.post_divider; 934 WREG32_SMC(reg_ctrl, tmp); 935 936 for (i = 0; i < 100; i++) { 937 if (RREG32_SMC(reg_status) & status_mask) 938 break; 939 mdelay(10); 940 } 941 942 if (i == 100) 943 return -ETIMEDOUT; 944 945 return 0; 946 } 947 948 static void vi_pcie_gen3_enable(struct amdgpu_device *adev) 949 { 950 if (pci_is_root_bus(adev->pdev->bus)) 951 return; 952 953 if (amdgpu_pcie_gen2 == 0) 954 return; 955 956 if (adev->flags & AMD_IS_APU) 957 return; 958 959 if (!(adev->pm.pcie_gen_mask & (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2 | 960 CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3))) 961 return; 962 963 /* todo */ 964 } 965 966 static void vi_program_aspm(struct amdgpu_device *adev) 967 { 968 969 if (amdgpu_aspm == 0) 970 return; 971 972 /* todo */ 973 } 974 975 static void vi_enable_doorbell_aperture(struct amdgpu_device *adev, 976 bool enable) 977 { 978 u32 tmp; 979 980 /* not necessary on CZ */ 981 if (adev->flags & AMD_IS_APU) 982 return; 983 984 tmp = RREG32(mmBIF_DOORBELL_APER_EN); 985 if (enable) 986 tmp = REG_SET_FIELD(tmp, BIF_DOORBELL_APER_EN, BIF_DOORBELL_APER_EN, 1); 987 else 988 tmp = REG_SET_FIELD(tmp, BIF_DOORBELL_APER_EN, BIF_DOORBELL_APER_EN, 0); 989 990 WREG32(mmBIF_DOORBELL_APER_EN, tmp); 991 } 992 993 #define ATI_REV_ID_FUSE_MACRO__ADDRESS 0xC0014044 994 #define ATI_REV_ID_FUSE_MACRO__SHIFT 9 995 #define ATI_REV_ID_FUSE_MACRO__MASK 0x00001E00 996 997 static uint32_t vi_get_rev_id(struct amdgpu_device *adev) 998 { 999 if (adev->flags & AMD_IS_APU) 1000 return (RREG32_SMC(ATI_REV_ID_FUSE_MACRO__ADDRESS) & ATI_REV_ID_FUSE_MACRO__MASK) 1001 >> ATI_REV_ID_FUSE_MACRO__SHIFT; 1002 else 1003 return (RREG32(mmPCIE_EFUSE4) & PCIE_EFUSE4__STRAP_BIF_ATI_REV_ID_MASK) 1004 >> PCIE_EFUSE4__STRAP_BIF_ATI_REV_ID__SHIFT; 1005 } 1006 1007 static void vi_flush_hdp(struct amdgpu_device *adev, struct amdgpu_ring *ring) 1008 { 1009 if (!ring || !ring->funcs->emit_wreg) { 1010 WREG32(mmHDP_MEM_COHERENCY_FLUSH_CNTL, 1); 1011 RREG32(mmHDP_MEM_COHERENCY_FLUSH_CNTL); 1012 } else { 1013 amdgpu_ring_emit_wreg(ring, mmHDP_MEM_COHERENCY_FLUSH_CNTL, 1); 1014 } 1015 } 1016 1017 static void vi_invalidate_hdp(struct amdgpu_device *adev, 1018 struct amdgpu_ring *ring) 1019 { 1020 if (!ring || !ring->funcs->emit_wreg) { 1021 WREG32(mmHDP_DEBUG0, 1); 1022 RREG32(mmHDP_DEBUG0); 1023 } else { 1024 amdgpu_ring_emit_wreg(ring, mmHDP_DEBUG0, 1); 1025 } 1026 } 1027 1028 static bool vi_need_full_reset(struct amdgpu_device *adev) 1029 { 1030 switch (adev->asic_type) { 1031 case CHIP_CARRIZO: 1032 case CHIP_STONEY: 1033 /* CZ has hang issues with full reset at the moment */ 1034 return false; 1035 case CHIP_FIJI: 1036 case CHIP_TONGA: 1037 /* XXX: soft reset should work on fiji and tonga */ 1038 return true; 1039 case CHIP_POLARIS10: 1040 case CHIP_POLARIS11: 1041 case CHIP_POLARIS12: 1042 case CHIP_TOPAZ: 1043 default: 1044 /* change this when we support soft reset */ 1045 return true; 1046 } 1047 } 1048 1049 static void vi_get_pcie_usage(struct amdgpu_device *adev, uint64_t *count0, 1050 uint64_t *count1) 1051 { 1052 uint32_t perfctr = 0; 1053 uint64_t cnt0_of, cnt1_of; 1054 int tmp; 1055 1056 /* This reports 0 on APUs, so return to avoid writing/reading registers 1057 * that may or may not be different from their GPU counterparts 1058 */ 1059 if (adev->flags & AMD_IS_APU) 1060 return; 1061 1062 /* Set the 2 events that we wish to watch, defined above */ 1063 /* Reg 40 is # received msgs, Reg 104 is # of posted requests sent */ 1064 perfctr = REG_SET_FIELD(perfctr, PCIE_PERF_CNTL_TXCLK, EVENT0_SEL, 40); 1065 perfctr = REG_SET_FIELD(perfctr, PCIE_PERF_CNTL_TXCLK, EVENT1_SEL, 104); 1066 1067 /* Write to enable desired perf counters */ 1068 WREG32_PCIE(ixPCIE_PERF_CNTL_TXCLK, perfctr); 1069 /* Zero out and enable the perf counters 1070 * Write 0x5: 1071 * Bit 0 = Start all counters(1) 1072 * Bit 2 = Global counter reset enable(1) 1073 */ 1074 WREG32_PCIE(ixPCIE_PERF_COUNT_CNTL, 0x00000005); 1075 1076 msleep(1000); 1077 1078 /* Load the shadow and disable the perf counters 1079 * Write 0x2: 1080 * Bit 0 = Stop counters(0) 1081 * Bit 1 = Load the shadow counters(1) 1082 */ 1083 WREG32_PCIE(ixPCIE_PERF_COUNT_CNTL, 0x00000002); 1084 1085 /* Read register values to get any >32bit overflow */ 1086 tmp = RREG32_PCIE(ixPCIE_PERF_CNTL_TXCLK); 1087 cnt0_of = REG_GET_FIELD(tmp, PCIE_PERF_CNTL_TXCLK, COUNTER0_UPPER); 1088 cnt1_of = REG_GET_FIELD(tmp, PCIE_PERF_CNTL_TXCLK, COUNTER1_UPPER); 1089 1090 /* Get the values and add the overflow */ 1091 *count0 = RREG32_PCIE(ixPCIE_PERF_COUNT0_TXCLK) | (cnt0_of << 32); 1092 *count1 = RREG32_PCIE(ixPCIE_PERF_COUNT1_TXCLK) | (cnt1_of << 32); 1093 } 1094 1095 static uint64_t vi_get_pcie_replay_count(struct amdgpu_device *adev) 1096 { 1097 uint64_t nak_r, nak_g; 1098 1099 /* Get the number of NAKs received and generated */ 1100 nak_r = RREG32_PCIE(ixPCIE_RX_NUM_NAK); 1101 nak_g = RREG32_PCIE(ixPCIE_RX_NUM_NAK_GENERATED); 1102 1103 /* Add the total number of NAKs, i.e the number of replays */ 1104 return (nak_r + nak_g); 1105 } 1106 1107 static bool vi_need_reset_on_init(struct amdgpu_device *adev) 1108 { 1109 u32 clock_cntl, pc; 1110 1111 if (adev->flags & AMD_IS_APU) 1112 return false; 1113 1114 /* check if the SMC is already running */ 1115 clock_cntl = RREG32_SMC(ixSMC_SYSCON_CLOCK_CNTL_0); 1116 pc = RREG32_SMC(ixSMC_PC_C); 1117 if ((0 == REG_GET_FIELD(clock_cntl, SMC_SYSCON_CLOCK_CNTL_0, ck_disable)) && 1118 (0x20100 <= pc)) 1119 return true; 1120 1121 return false; 1122 } 1123 1124 static const struct amdgpu_asic_funcs vi_asic_funcs = 1125 { 1126 .read_disabled_bios = &vi_read_disabled_bios, 1127 .read_bios_from_rom = &vi_read_bios_from_rom, 1128 .read_register = &vi_read_register, 1129 .reset = &vi_asic_reset, 1130 .reset_method = &vi_asic_reset_method, 1131 .set_vga_state = &vi_vga_set_state, 1132 .get_xclk = &vi_get_xclk, 1133 .set_uvd_clocks = &vi_set_uvd_clocks, 1134 .set_vce_clocks = &vi_set_vce_clocks, 1135 .get_config_memsize = &vi_get_config_memsize, 1136 .flush_hdp = &vi_flush_hdp, 1137 .invalidate_hdp = &vi_invalidate_hdp, 1138 .need_full_reset = &vi_need_full_reset, 1139 .init_doorbell_index = &legacy_doorbell_index_init, 1140 .get_pcie_usage = &vi_get_pcie_usage, 1141 .need_reset_on_init = &vi_need_reset_on_init, 1142 .get_pcie_replay_count = &vi_get_pcie_replay_count, 1143 .supports_baco = &vi_asic_supports_baco, 1144 }; 1145 1146 #define CZ_REV_BRISTOL(rev) \ 1147 ((rev >= 0xC8 && rev <= 0xCE) || (rev >= 0xE1 && rev <= 0xE6)) 1148 1149 static int vi_common_early_init(void *handle) 1150 { 1151 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1152 1153 if (adev->flags & AMD_IS_APU) { 1154 adev->smc_rreg = &cz_smc_rreg; 1155 adev->smc_wreg = &cz_smc_wreg; 1156 } else { 1157 adev->smc_rreg = &vi_smc_rreg; 1158 adev->smc_wreg = &vi_smc_wreg; 1159 } 1160 adev->pcie_rreg = &vi_pcie_rreg; 1161 adev->pcie_wreg = &vi_pcie_wreg; 1162 adev->uvd_ctx_rreg = &vi_uvd_ctx_rreg; 1163 adev->uvd_ctx_wreg = &vi_uvd_ctx_wreg; 1164 adev->didt_rreg = &vi_didt_rreg; 1165 adev->didt_wreg = &vi_didt_wreg; 1166 adev->gc_cac_rreg = &vi_gc_cac_rreg; 1167 adev->gc_cac_wreg = &vi_gc_cac_wreg; 1168 1169 adev->asic_funcs = &vi_asic_funcs; 1170 1171 adev->rev_id = vi_get_rev_id(adev); 1172 adev->external_rev_id = 0xFF; 1173 switch (adev->asic_type) { 1174 case CHIP_TOPAZ: 1175 adev->cg_flags = 0; 1176 adev->pg_flags = 0; 1177 adev->external_rev_id = 0x1; 1178 break; 1179 case CHIP_FIJI: 1180 adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG | 1181 AMD_CG_SUPPORT_GFX_MGLS | 1182 AMD_CG_SUPPORT_GFX_RLC_LS | 1183 AMD_CG_SUPPORT_GFX_CP_LS | 1184 AMD_CG_SUPPORT_GFX_CGTS | 1185 AMD_CG_SUPPORT_GFX_CGTS_LS | 1186 AMD_CG_SUPPORT_GFX_CGCG | 1187 AMD_CG_SUPPORT_GFX_CGLS | 1188 AMD_CG_SUPPORT_SDMA_MGCG | 1189 AMD_CG_SUPPORT_SDMA_LS | 1190 AMD_CG_SUPPORT_BIF_LS | 1191 AMD_CG_SUPPORT_HDP_MGCG | 1192 AMD_CG_SUPPORT_HDP_LS | 1193 AMD_CG_SUPPORT_ROM_MGCG | 1194 AMD_CG_SUPPORT_MC_MGCG | 1195 AMD_CG_SUPPORT_MC_LS | 1196 AMD_CG_SUPPORT_UVD_MGCG; 1197 adev->pg_flags = 0; 1198 adev->external_rev_id = adev->rev_id + 0x3c; 1199 break; 1200 case CHIP_TONGA: 1201 adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG | 1202 AMD_CG_SUPPORT_GFX_CGCG | 1203 AMD_CG_SUPPORT_GFX_CGLS | 1204 AMD_CG_SUPPORT_SDMA_MGCG | 1205 AMD_CG_SUPPORT_SDMA_LS | 1206 AMD_CG_SUPPORT_BIF_LS | 1207 AMD_CG_SUPPORT_HDP_MGCG | 1208 AMD_CG_SUPPORT_HDP_LS | 1209 AMD_CG_SUPPORT_ROM_MGCG | 1210 AMD_CG_SUPPORT_MC_MGCG | 1211 AMD_CG_SUPPORT_MC_LS | 1212 AMD_CG_SUPPORT_DRM_LS | 1213 AMD_CG_SUPPORT_UVD_MGCG; 1214 adev->pg_flags = 0; 1215 adev->external_rev_id = adev->rev_id + 0x14; 1216 break; 1217 case CHIP_POLARIS11: 1218 adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG | 1219 AMD_CG_SUPPORT_GFX_RLC_LS | 1220 AMD_CG_SUPPORT_GFX_CP_LS | 1221 AMD_CG_SUPPORT_GFX_CGCG | 1222 AMD_CG_SUPPORT_GFX_CGLS | 1223 AMD_CG_SUPPORT_GFX_3D_CGCG | 1224 AMD_CG_SUPPORT_GFX_3D_CGLS | 1225 AMD_CG_SUPPORT_SDMA_MGCG | 1226 AMD_CG_SUPPORT_SDMA_LS | 1227 AMD_CG_SUPPORT_BIF_MGCG | 1228 AMD_CG_SUPPORT_BIF_LS | 1229 AMD_CG_SUPPORT_HDP_MGCG | 1230 AMD_CG_SUPPORT_HDP_LS | 1231 AMD_CG_SUPPORT_ROM_MGCG | 1232 AMD_CG_SUPPORT_MC_MGCG | 1233 AMD_CG_SUPPORT_MC_LS | 1234 AMD_CG_SUPPORT_DRM_LS | 1235 AMD_CG_SUPPORT_UVD_MGCG | 1236 AMD_CG_SUPPORT_VCE_MGCG; 1237 adev->pg_flags = 0; 1238 adev->external_rev_id = adev->rev_id + 0x5A; 1239 break; 1240 case CHIP_POLARIS10: 1241 adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG | 1242 AMD_CG_SUPPORT_GFX_RLC_LS | 1243 AMD_CG_SUPPORT_GFX_CP_LS | 1244 AMD_CG_SUPPORT_GFX_CGCG | 1245 AMD_CG_SUPPORT_GFX_CGLS | 1246 AMD_CG_SUPPORT_GFX_3D_CGCG | 1247 AMD_CG_SUPPORT_GFX_3D_CGLS | 1248 AMD_CG_SUPPORT_SDMA_MGCG | 1249 AMD_CG_SUPPORT_SDMA_LS | 1250 AMD_CG_SUPPORT_BIF_MGCG | 1251 AMD_CG_SUPPORT_BIF_LS | 1252 AMD_CG_SUPPORT_HDP_MGCG | 1253 AMD_CG_SUPPORT_HDP_LS | 1254 AMD_CG_SUPPORT_ROM_MGCG | 1255 AMD_CG_SUPPORT_MC_MGCG | 1256 AMD_CG_SUPPORT_MC_LS | 1257 AMD_CG_SUPPORT_DRM_LS | 1258 AMD_CG_SUPPORT_UVD_MGCG | 1259 AMD_CG_SUPPORT_VCE_MGCG; 1260 adev->pg_flags = 0; 1261 adev->external_rev_id = adev->rev_id + 0x50; 1262 break; 1263 case CHIP_POLARIS12: 1264 adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG | 1265 AMD_CG_SUPPORT_GFX_RLC_LS | 1266 AMD_CG_SUPPORT_GFX_CP_LS | 1267 AMD_CG_SUPPORT_GFX_CGCG | 1268 AMD_CG_SUPPORT_GFX_CGLS | 1269 AMD_CG_SUPPORT_GFX_3D_CGCG | 1270 AMD_CG_SUPPORT_GFX_3D_CGLS | 1271 AMD_CG_SUPPORT_SDMA_MGCG | 1272 AMD_CG_SUPPORT_SDMA_LS | 1273 AMD_CG_SUPPORT_BIF_MGCG | 1274 AMD_CG_SUPPORT_BIF_LS | 1275 AMD_CG_SUPPORT_HDP_MGCG | 1276 AMD_CG_SUPPORT_HDP_LS | 1277 AMD_CG_SUPPORT_ROM_MGCG | 1278 AMD_CG_SUPPORT_MC_MGCG | 1279 AMD_CG_SUPPORT_MC_LS | 1280 AMD_CG_SUPPORT_DRM_LS | 1281 AMD_CG_SUPPORT_UVD_MGCG | 1282 AMD_CG_SUPPORT_VCE_MGCG; 1283 adev->pg_flags = 0; 1284 adev->external_rev_id = adev->rev_id + 0x64; 1285 break; 1286 case CHIP_VEGAM: 1287 adev->cg_flags = 0; 1288 /*AMD_CG_SUPPORT_GFX_MGCG | 1289 AMD_CG_SUPPORT_GFX_RLC_LS | 1290 AMD_CG_SUPPORT_GFX_CP_LS | 1291 AMD_CG_SUPPORT_GFX_CGCG | 1292 AMD_CG_SUPPORT_GFX_CGLS | 1293 AMD_CG_SUPPORT_GFX_3D_CGCG | 1294 AMD_CG_SUPPORT_GFX_3D_CGLS | 1295 AMD_CG_SUPPORT_SDMA_MGCG | 1296 AMD_CG_SUPPORT_SDMA_LS | 1297 AMD_CG_SUPPORT_BIF_MGCG | 1298 AMD_CG_SUPPORT_BIF_LS | 1299 AMD_CG_SUPPORT_HDP_MGCG | 1300 AMD_CG_SUPPORT_HDP_LS | 1301 AMD_CG_SUPPORT_ROM_MGCG | 1302 AMD_CG_SUPPORT_MC_MGCG | 1303 AMD_CG_SUPPORT_MC_LS | 1304 AMD_CG_SUPPORT_DRM_LS | 1305 AMD_CG_SUPPORT_UVD_MGCG | 1306 AMD_CG_SUPPORT_VCE_MGCG;*/ 1307 adev->pg_flags = 0; 1308 adev->external_rev_id = adev->rev_id + 0x6E; 1309 break; 1310 case CHIP_CARRIZO: 1311 adev->cg_flags = AMD_CG_SUPPORT_UVD_MGCG | 1312 AMD_CG_SUPPORT_GFX_MGCG | 1313 AMD_CG_SUPPORT_GFX_MGLS | 1314 AMD_CG_SUPPORT_GFX_RLC_LS | 1315 AMD_CG_SUPPORT_GFX_CP_LS | 1316 AMD_CG_SUPPORT_GFX_CGTS | 1317 AMD_CG_SUPPORT_GFX_CGTS_LS | 1318 AMD_CG_SUPPORT_GFX_CGCG | 1319 AMD_CG_SUPPORT_GFX_CGLS | 1320 AMD_CG_SUPPORT_BIF_LS | 1321 AMD_CG_SUPPORT_HDP_MGCG | 1322 AMD_CG_SUPPORT_HDP_LS | 1323 AMD_CG_SUPPORT_SDMA_MGCG | 1324 AMD_CG_SUPPORT_SDMA_LS | 1325 AMD_CG_SUPPORT_VCE_MGCG; 1326 /* rev0 hardware requires workarounds to support PG */ 1327 adev->pg_flags = 0; 1328 if (adev->rev_id != 0x00 || CZ_REV_BRISTOL(adev->pdev->revision)) { 1329 adev->pg_flags |= AMD_PG_SUPPORT_GFX_SMG | 1330 AMD_PG_SUPPORT_GFX_PIPELINE | 1331 AMD_PG_SUPPORT_CP | 1332 AMD_PG_SUPPORT_UVD | 1333 AMD_PG_SUPPORT_VCE; 1334 } 1335 adev->external_rev_id = adev->rev_id + 0x1; 1336 break; 1337 case CHIP_STONEY: 1338 adev->cg_flags = AMD_CG_SUPPORT_UVD_MGCG | 1339 AMD_CG_SUPPORT_GFX_MGCG | 1340 AMD_CG_SUPPORT_GFX_MGLS | 1341 AMD_CG_SUPPORT_GFX_RLC_LS | 1342 AMD_CG_SUPPORT_GFX_CP_LS | 1343 AMD_CG_SUPPORT_GFX_CGTS | 1344 AMD_CG_SUPPORT_GFX_CGTS_LS | 1345 AMD_CG_SUPPORT_GFX_CGLS | 1346 AMD_CG_SUPPORT_BIF_LS | 1347 AMD_CG_SUPPORT_HDP_MGCG | 1348 AMD_CG_SUPPORT_HDP_LS | 1349 AMD_CG_SUPPORT_SDMA_MGCG | 1350 AMD_CG_SUPPORT_SDMA_LS | 1351 AMD_CG_SUPPORT_VCE_MGCG; 1352 adev->pg_flags = AMD_PG_SUPPORT_GFX_PG | 1353 AMD_PG_SUPPORT_GFX_SMG | 1354 AMD_PG_SUPPORT_GFX_PIPELINE | 1355 AMD_PG_SUPPORT_CP | 1356 AMD_PG_SUPPORT_UVD | 1357 AMD_PG_SUPPORT_VCE; 1358 adev->external_rev_id = adev->rev_id + 0x61; 1359 break; 1360 default: 1361 /* FIXME: not supported yet */ 1362 return -EINVAL; 1363 } 1364 1365 if (amdgpu_sriov_vf(adev)) { 1366 amdgpu_virt_init_setting(adev); 1367 xgpu_vi_mailbox_set_irq_funcs(adev); 1368 } 1369 1370 return 0; 1371 } 1372 1373 static int vi_common_late_init(void *handle) 1374 { 1375 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1376 1377 if (amdgpu_sriov_vf(adev)) 1378 xgpu_vi_mailbox_get_irq(adev); 1379 1380 return 0; 1381 } 1382 1383 static int vi_common_sw_init(void *handle) 1384 { 1385 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1386 1387 if (amdgpu_sriov_vf(adev)) 1388 xgpu_vi_mailbox_add_irq_id(adev); 1389 1390 return 0; 1391 } 1392 1393 static int vi_common_sw_fini(void *handle) 1394 { 1395 return 0; 1396 } 1397 1398 static int vi_common_hw_init(void *handle) 1399 { 1400 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1401 1402 /* move the golden regs per IP block */ 1403 vi_init_golden_registers(adev); 1404 /* enable pcie gen2/3 link */ 1405 vi_pcie_gen3_enable(adev); 1406 /* enable aspm */ 1407 vi_program_aspm(adev); 1408 /* enable the doorbell aperture */ 1409 vi_enable_doorbell_aperture(adev, true); 1410 1411 return 0; 1412 } 1413 1414 static int vi_common_hw_fini(void *handle) 1415 { 1416 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1417 1418 /* enable the doorbell aperture */ 1419 vi_enable_doorbell_aperture(adev, false); 1420 1421 if (amdgpu_sriov_vf(adev)) 1422 xgpu_vi_mailbox_put_irq(adev); 1423 1424 return 0; 1425 } 1426 1427 static int vi_common_suspend(void *handle) 1428 { 1429 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1430 1431 return vi_common_hw_fini(adev); 1432 } 1433 1434 static int vi_common_resume(void *handle) 1435 { 1436 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1437 1438 return vi_common_hw_init(adev); 1439 } 1440 1441 static bool vi_common_is_idle(void *handle) 1442 { 1443 return true; 1444 } 1445 1446 static int vi_common_wait_for_idle(void *handle) 1447 { 1448 return 0; 1449 } 1450 1451 static int vi_common_soft_reset(void *handle) 1452 { 1453 return 0; 1454 } 1455 1456 static void vi_update_bif_medium_grain_light_sleep(struct amdgpu_device *adev, 1457 bool enable) 1458 { 1459 uint32_t temp, data; 1460 1461 temp = data = RREG32_PCIE(ixPCIE_CNTL2); 1462 1463 if (enable && (adev->cg_flags & AMD_CG_SUPPORT_BIF_LS)) 1464 data |= PCIE_CNTL2__SLV_MEM_LS_EN_MASK | 1465 PCIE_CNTL2__MST_MEM_LS_EN_MASK | 1466 PCIE_CNTL2__REPLAY_MEM_LS_EN_MASK; 1467 else 1468 data &= ~(PCIE_CNTL2__SLV_MEM_LS_EN_MASK | 1469 PCIE_CNTL2__MST_MEM_LS_EN_MASK | 1470 PCIE_CNTL2__REPLAY_MEM_LS_EN_MASK); 1471 1472 if (temp != data) 1473 WREG32_PCIE(ixPCIE_CNTL2, data); 1474 } 1475 1476 static void vi_update_hdp_medium_grain_clock_gating(struct amdgpu_device *adev, 1477 bool enable) 1478 { 1479 uint32_t temp, data; 1480 1481 temp = data = RREG32(mmHDP_HOST_PATH_CNTL); 1482 1483 if (enable && (adev->cg_flags & AMD_CG_SUPPORT_HDP_MGCG)) 1484 data &= ~HDP_HOST_PATH_CNTL__CLOCK_GATING_DIS_MASK; 1485 else 1486 data |= HDP_HOST_PATH_CNTL__CLOCK_GATING_DIS_MASK; 1487 1488 if (temp != data) 1489 WREG32(mmHDP_HOST_PATH_CNTL, data); 1490 } 1491 1492 static void vi_update_hdp_light_sleep(struct amdgpu_device *adev, 1493 bool enable) 1494 { 1495 uint32_t temp, data; 1496 1497 temp = data = RREG32(mmHDP_MEM_POWER_LS); 1498 1499 if (enable && (adev->cg_flags & AMD_CG_SUPPORT_HDP_LS)) 1500 data |= HDP_MEM_POWER_LS__LS_ENABLE_MASK; 1501 else 1502 data &= ~HDP_MEM_POWER_LS__LS_ENABLE_MASK; 1503 1504 if (temp != data) 1505 WREG32(mmHDP_MEM_POWER_LS, data); 1506 } 1507 1508 static void vi_update_drm_light_sleep(struct amdgpu_device *adev, 1509 bool enable) 1510 { 1511 uint32_t temp, data; 1512 1513 temp = data = RREG32(0x157a); 1514 1515 if (enable && (adev->cg_flags & AMD_CG_SUPPORT_DRM_LS)) 1516 data |= 1; 1517 else 1518 data &= ~1; 1519 1520 if (temp != data) 1521 WREG32(0x157a, data); 1522 } 1523 1524 1525 static void vi_update_rom_medium_grain_clock_gating(struct amdgpu_device *adev, 1526 bool enable) 1527 { 1528 uint32_t temp, data; 1529 1530 temp = data = RREG32_SMC(ixCGTT_ROM_CLK_CTRL0); 1531 1532 if (enable && (adev->cg_flags & AMD_CG_SUPPORT_ROM_MGCG)) 1533 data &= ~(CGTT_ROM_CLK_CTRL0__SOFT_OVERRIDE0_MASK | 1534 CGTT_ROM_CLK_CTRL0__SOFT_OVERRIDE1_MASK); 1535 else 1536 data |= CGTT_ROM_CLK_CTRL0__SOFT_OVERRIDE0_MASK | 1537 CGTT_ROM_CLK_CTRL0__SOFT_OVERRIDE1_MASK; 1538 1539 if (temp != data) 1540 WREG32_SMC(ixCGTT_ROM_CLK_CTRL0, data); 1541 } 1542 1543 static int vi_common_set_clockgating_state_by_smu(void *handle, 1544 enum amd_clockgating_state state) 1545 { 1546 uint32_t msg_id, pp_state = 0; 1547 uint32_t pp_support_state = 0; 1548 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1549 1550 if (adev->cg_flags & (AMD_CG_SUPPORT_MC_LS | AMD_CG_SUPPORT_MC_MGCG)) { 1551 if (adev->cg_flags & AMD_CG_SUPPORT_MC_LS) { 1552 pp_support_state = PP_STATE_SUPPORT_LS; 1553 pp_state = PP_STATE_LS; 1554 } 1555 if (adev->cg_flags & AMD_CG_SUPPORT_MC_MGCG) { 1556 pp_support_state |= PP_STATE_SUPPORT_CG; 1557 pp_state |= PP_STATE_CG; 1558 } 1559 if (state == AMD_CG_STATE_UNGATE) 1560 pp_state = 0; 1561 msg_id = PP_CG_MSG_ID(PP_GROUP_SYS, 1562 PP_BLOCK_SYS_MC, 1563 pp_support_state, 1564 pp_state); 1565 if (adev->powerplay.pp_funcs->set_clockgating_by_smu) 1566 amdgpu_dpm_set_clockgating_by_smu(adev, msg_id); 1567 } 1568 1569 if (adev->cg_flags & (AMD_CG_SUPPORT_SDMA_LS | AMD_CG_SUPPORT_SDMA_MGCG)) { 1570 if (adev->cg_flags & AMD_CG_SUPPORT_SDMA_LS) { 1571 pp_support_state = PP_STATE_SUPPORT_LS; 1572 pp_state = PP_STATE_LS; 1573 } 1574 if (adev->cg_flags & AMD_CG_SUPPORT_SDMA_MGCG) { 1575 pp_support_state |= PP_STATE_SUPPORT_CG; 1576 pp_state |= PP_STATE_CG; 1577 } 1578 if (state == AMD_CG_STATE_UNGATE) 1579 pp_state = 0; 1580 msg_id = PP_CG_MSG_ID(PP_GROUP_SYS, 1581 PP_BLOCK_SYS_SDMA, 1582 pp_support_state, 1583 pp_state); 1584 if (adev->powerplay.pp_funcs->set_clockgating_by_smu) 1585 amdgpu_dpm_set_clockgating_by_smu(adev, msg_id); 1586 } 1587 1588 if (adev->cg_flags & (AMD_CG_SUPPORT_HDP_LS | AMD_CG_SUPPORT_HDP_MGCG)) { 1589 if (adev->cg_flags & AMD_CG_SUPPORT_HDP_LS) { 1590 pp_support_state = PP_STATE_SUPPORT_LS; 1591 pp_state = PP_STATE_LS; 1592 } 1593 if (adev->cg_flags & AMD_CG_SUPPORT_HDP_MGCG) { 1594 pp_support_state |= PP_STATE_SUPPORT_CG; 1595 pp_state |= PP_STATE_CG; 1596 } 1597 if (state == AMD_CG_STATE_UNGATE) 1598 pp_state = 0; 1599 msg_id = PP_CG_MSG_ID(PP_GROUP_SYS, 1600 PP_BLOCK_SYS_HDP, 1601 pp_support_state, 1602 pp_state); 1603 if (adev->powerplay.pp_funcs->set_clockgating_by_smu) 1604 amdgpu_dpm_set_clockgating_by_smu(adev, msg_id); 1605 } 1606 1607 1608 if (adev->cg_flags & AMD_CG_SUPPORT_BIF_LS) { 1609 if (state == AMD_CG_STATE_UNGATE) 1610 pp_state = 0; 1611 else 1612 pp_state = PP_STATE_LS; 1613 1614 msg_id = PP_CG_MSG_ID(PP_GROUP_SYS, 1615 PP_BLOCK_SYS_BIF, 1616 PP_STATE_SUPPORT_LS, 1617 pp_state); 1618 if (adev->powerplay.pp_funcs->set_clockgating_by_smu) 1619 amdgpu_dpm_set_clockgating_by_smu(adev, msg_id); 1620 } 1621 if (adev->cg_flags & AMD_CG_SUPPORT_BIF_MGCG) { 1622 if (state == AMD_CG_STATE_UNGATE) 1623 pp_state = 0; 1624 else 1625 pp_state = PP_STATE_CG; 1626 1627 msg_id = PP_CG_MSG_ID(PP_GROUP_SYS, 1628 PP_BLOCK_SYS_BIF, 1629 PP_STATE_SUPPORT_CG, 1630 pp_state); 1631 if (adev->powerplay.pp_funcs->set_clockgating_by_smu) 1632 amdgpu_dpm_set_clockgating_by_smu(adev, msg_id); 1633 } 1634 1635 if (adev->cg_flags & AMD_CG_SUPPORT_DRM_LS) { 1636 1637 if (state == AMD_CG_STATE_UNGATE) 1638 pp_state = 0; 1639 else 1640 pp_state = PP_STATE_LS; 1641 1642 msg_id = PP_CG_MSG_ID(PP_GROUP_SYS, 1643 PP_BLOCK_SYS_DRM, 1644 PP_STATE_SUPPORT_LS, 1645 pp_state); 1646 if (adev->powerplay.pp_funcs->set_clockgating_by_smu) 1647 amdgpu_dpm_set_clockgating_by_smu(adev, msg_id); 1648 } 1649 1650 if (adev->cg_flags & AMD_CG_SUPPORT_ROM_MGCG) { 1651 1652 if (state == AMD_CG_STATE_UNGATE) 1653 pp_state = 0; 1654 else 1655 pp_state = PP_STATE_CG; 1656 1657 msg_id = PP_CG_MSG_ID(PP_GROUP_SYS, 1658 PP_BLOCK_SYS_ROM, 1659 PP_STATE_SUPPORT_CG, 1660 pp_state); 1661 if (adev->powerplay.pp_funcs->set_clockgating_by_smu) 1662 amdgpu_dpm_set_clockgating_by_smu(adev, msg_id); 1663 } 1664 return 0; 1665 } 1666 1667 static int vi_common_set_clockgating_state(void *handle, 1668 enum amd_clockgating_state state) 1669 { 1670 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1671 1672 if (amdgpu_sriov_vf(adev)) 1673 return 0; 1674 1675 switch (adev->asic_type) { 1676 case CHIP_FIJI: 1677 vi_update_bif_medium_grain_light_sleep(adev, 1678 state == AMD_CG_STATE_GATE); 1679 vi_update_hdp_medium_grain_clock_gating(adev, 1680 state == AMD_CG_STATE_GATE); 1681 vi_update_hdp_light_sleep(adev, 1682 state == AMD_CG_STATE_GATE); 1683 vi_update_rom_medium_grain_clock_gating(adev, 1684 state == AMD_CG_STATE_GATE); 1685 break; 1686 case CHIP_CARRIZO: 1687 case CHIP_STONEY: 1688 vi_update_bif_medium_grain_light_sleep(adev, 1689 state == AMD_CG_STATE_GATE); 1690 vi_update_hdp_medium_grain_clock_gating(adev, 1691 state == AMD_CG_STATE_GATE); 1692 vi_update_hdp_light_sleep(adev, 1693 state == AMD_CG_STATE_GATE); 1694 vi_update_drm_light_sleep(adev, 1695 state == AMD_CG_STATE_GATE); 1696 break; 1697 case CHIP_TONGA: 1698 case CHIP_POLARIS10: 1699 case CHIP_POLARIS11: 1700 case CHIP_POLARIS12: 1701 case CHIP_VEGAM: 1702 vi_common_set_clockgating_state_by_smu(adev, state); 1703 default: 1704 break; 1705 } 1706 return 0; 1707 } 1708 1709 static int vi_common_set_powergating_state(void *handle, 1710 enum amd_powergating_state state) 1711 { 1712 return 0; 1713 } 1714 1715 static void vi_common_get_clockgating_state(void *handle, u32 *flags) 1716 { 1717 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1718 int data; 1719 1720 if (amdgpu_sriov_vf(adev)) 1721 *flags = 0; 1722 1723 /* AMD_CG_SUPPORT_BIF_LS */ 1724 data = RREG32_PCIE(ixPCIE_CNTL2); 1725 if (data & PCIE_CNTL2__SLV_MEM_LS_EN_MASK) 1726 *flags |= AMD_CG_SUPPORT_BIF_LS; 1727 1728 /* AMD_CG_SUPPORT_HDP_LS */ 1729 data = RREG32(mmHDP_MEM_POWER_LS); 1730 if (data & HDP_MEM_POWER_LS__LS_ENABLE_MASK) 1731 *flags |= AMD_CG_SUPPORT_HDP_LS; 1732 1733 /* AMD_CG_SUPPORT_HDP_MGCG */ 1734 data = RREG32(mmHDP_HOST_PATH_CNTL); 1735 if (!(data & HDP_HOST_PATH_CNTL__CLOCK_GATING_DIS_MASK)) 1736 *flags |= AMD_CG_SUPPORT_HDP_MGCG; 1737 1738 /* AMD_CG_SUPPORT_ROM_MGCG */ 1739 data = RREG32_SMC(ixCGTT_ROM_CLK_CTRL0); 1740 if (!(data & CGTT_ROM_CLK_CTRL0__SOFT_OVERRIDE0_MASK)) 1741 *flags |= AMD_CG_SUPPORT_ROM_MGCG; 1742 } 1743 1744 static const struct amd_ip_funcs vi_common_ip_funcs = { 1745 .name = "vi_common", 1746 .early_init = vi_common_early_init, 1747 .late_init = vi_common_late_init, 1748 .sw_init = vi_common_sw_init, 1749 .sw_fini = vi_common_sw_fini, 1750 .hw_init = vi_common_hw_init, 1751 .hw_fini = vi_common_hw_fini, 1752 .suspend = vi_common_suspend, 1753 .resume = vi_common_resume, 1754 .is_idle = vi_common_is_idle, 1755 .wait_for_idle = vi_common_wait_for_idle, 1756 .soft_reset = vi_common_soft_reset, 1757 .set_clockgating_state = vi_common_set_clockgating_state, 1758 .set_powergating_state = vi_common_set_powergating_state, 1759 .get_clockgating_state = vi_common_get_clockgating_state, 1760 }; 1761 1762 static const struct amdgpu_ip_block_version vi_common_ip_block = 1763 { 1764 .type = AMD_IP_BLOCK_TYPE_COMMON, 1765 .major = 1, 1766 .minor = 0, 1767 .rev = 0, 1768 .funcs = &vi_common_ip_funcs, 1769 }; 1770 1771 int vi_set_ip_blocks(struct amdgpu_device *adev) 1772 { 1773 /* in early init stage, vbios code won't work */ 1774 vi_detect_hw_virtualization(adev); 1775 1776 if (amdgpu_sriov_vf(adev)) 1777 adev->virt.ops = &xgpu_vi_virt_ops; 1778 1779 switch (adev->asic_type) { 1780 case CHIP_TOPAZ: 1781 /* topaz has no DCE, UVD, VCE */ 1782 amdgpu_device_ip_block_add(adev, &vi_common_ip_block); 1783 amdgpu_device_ip_block_add(adev, &gmc_v7_4_ip_block); 1784 amdgpu_device_ip_block_add(adev, &iceland_ih_ip_block); 1785 amdgpu_device_ip_block_add(adev, &gfx_v8_0_ip_block); 1786 amdgpu_device_ip_block_add(adev, &sdma_v2_4_ip_block); 1787 amdgpu_device_ip_block_add(adev, &pp_smu_ip_block); 1788 if (adev->enable_virtual_display) 1789 amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block); 1790 break; 1791 case CHIP_FIJI: 1792 amdgpu_device_ip_block_add(adev, &vi_common_ip_block); 1793 amdgpu_device_ip_block_add(adev, &gmc_v8_5_ip_block); 1794 amdgpu_device_ip_block_add(adev, &tonga_ih_ip_block); 1795 amdgpu_device_ip_block_add(adev, &gfx_v8_0_ip_block); 1796 amdgpu_device_ip_block_add(adev, &sdma_v3_0_ip_block); 1797 amdgpu_device_ip_block_add(adev, &pp_smu_ip_block); 1798 if (adev->enable_virtual_display || amdgpu_sriov_vf(adev)) 1799 amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block); 1800 #if defined(CONFIG_DRM_AMD_DC) 1801 else if (amdgpu_device_has_dc_support(adev)) 1802 amdgpu_device_ip_block_add(adev, &dm_ip_block); 1803 #endif 1804 else 1805 amdgpu_device_ip_block_add(adev, &dce_v10_1_ip_block); 1806 if (!amdgpu_sriov_vf(adev)) { 1807 amdgpu_device_ip_block_add(adev, &uvd_v6_0_ip_block); 1808 amdgpu_device_ip_block_add(adev, &vce_v3_0_ip_block); 1809 } 1810 break; 1811 case CHIP_TONGA: 1812 amdgpu_device_ip_block_add(adev, &vi_common_ip_block); 1813 amdgpu_device_ip_block_add(adev, &gmc_v8_0_ip_block); 1814 amdgpu_device_ip_block_add(adev, &tonga_ih_ip_block); 1815 amdgpu_device_ip_block_add(adev, &gfx_v8_0_ip_block); 1816 amdgpu_device_ip_block_add(adev, &sdma_v3_0_ip_block); 1817 amdgpu_device_ip_block_add(adev, &pp_smu_ip_block); 1818 if (adev->enable_virtual_display || amdgpu_sriov_vf(adev)) 1819 amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block); 1820 #if defined(CONFIG_DRM_AMD_DC) 1821 else if (amdgpu_device_has_dc_support(adev)) 1822 amdgpu_device_ip_block_add(adev, &dm_ip_block); 1823 #endif 1824 else 1825 amdgpu_device_ip_block_add(adev, &dce_v10_0_ip_block); 1826 if (!amdgpu_sriov_vf(adev)) { 1827 amdgpu_device_ip_block_add(adev, &uvd_v5_0_ip_block); 1828 amdgpu_device_ip_block_add(adev, &vce_v3_0_ip_block); 1829 } 1830 break; 1831 case CHIP_POLARIS10: 1832 case CHIP_POLARIS11: 1833 case CHIP_POLARIS12: 1834 case CHIP_VEGAM: 1835 amdgpu_device_ip_block_add(adev, &vi_common_ip_block); 1836 amdgpu_device_ip_block_add(adev, &gmc_v8_1_ip_block); 1837 amdgpu_device_ip_block_add(adev, &tonga_ih_ip_block); 1838 amdgpu_device_ip_block_add(adev, &gfx_v8_0_ip_block); 1839 amdgpu_device_ip_block_add(adev, &sdma_v3_1_ip_block); 1840 amdgpu_device_ip_block_add(adev, &pp_smu_ip_block); 1841 if (adev->enable_virtual_display) 1842 amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block); 1843 #if defined(CONFIG_DRM_AMD_DC) 1844 else if (amdgpu_device_has_dc_support(adev)) 1845 amdgpu_device_ip_block_add(adev, &dm_ip_block); 1846 #endif 1847 else 1848 amdgpu_device_ip_block_add(adev, &dce_v11_2_ip_block); 1849 amdgpu_device_ip_block_add(adev, &uvd_v6_3_ip_block); 1850 amdgpu_device_ip_block_add(adev, &vce_v3_4_ip_block); 1851 break; 1852 case CHIP_CARRIZO: 1853 amdgpu_device_ip_block_add(adev, &vi_common_ip_block); 1854 amdgpu_device_ip_block_add(adev, &gmc_v8_0_ip_block); 1855 amdgpu_device_ip_block_add(adev, &cz_ih_ip_block); 1856 amdgpu_device_ip_block_add(adev, &gfx_v8_0_ip_block); 1857 amdgpu_device_ip_block_add(adev, &sdma_v3_0_ip_block); 1858 amdgpu_device_ip_block_add(adev, &pp_smu_ip_block); 1859 if (adev->enable_virtual_display) 1860 amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block); 1861 #if defined(CONFIG_DRM_AMD_DC) 1862 else if (amdgpu_device_has_dc_support(adev)) 1863 amdgpu_device_ip_block_add(adev, &dm_ip_block); 1864 #endif 1865 else 1866 amdgpu_device_ip_block_add(adev, &dce_v11_0_ip_block); 1867 amdgpu_device_ip_block_add(adev, &uvd_v6_0_ip_block); 1868 amdgpu_device_ip_block_add(adev, &vce_v3_1_ip_block); 1869 #if defined(CONFIG_DRM_AMD_ACP) 1870 amdgpu_device_ip_block_add(adev, &acp_ip_block); 1871 #endif 1872 break; 1873 case CHIP_STONEY: 1874 amdgpu_device_ip_block_add(adev, &vi_common_ip_block); 1875 amdgpu_device_ip_block_add(adev, &gmc_v8_0_ip_block); 1876 amdgpu_device_ip_block_add(adev, &cz_ih_ip_block); 1877 amdgpu_device_ip_block_add(adev, &gfx_v8_1_ip_block); 1878 amdgpu_device_ip_block_add(adev, &sdma_v3_0_ip_block); 1879 amdgpu_device_ip_block_add(adev, &pp_smu_ip_block); 1880 if (adev->enable_virtual_display) 1881 amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block); 1882 #if defined(CONFIG_DRM_AMD_DC) 1883 else if (amdgpu_device_has_dc_support(adev)) 1884 amdgpu_device_ip_block_add(adev, &dm_ip_block); 1885 #endif 1886 else 1887 amdgpu_device_ip_block_add(adev, &dce_v11_0_ip_block); 1888 amdgpu_device_ip_block_add(adev, &uvd_v6_2_ip_block); 1889 amdgpu_device_ip_block_add(adev, &vce_v3_4_ip_block); 1890 #if defined(CONFIG_DRM_AMD_ACP) 1891 amdgpu_device_ip_block_add(adev, &acp_ip_block); 1892 #endif 1893 break; 1894 default: 1895 /* FIXME: not supported yet */ 1896 return -EINVAL; 1897 } 1898 1899 return 0; 1900 } 1901 1902 void legacy_doorbell_index_init(struct amdgpu_device *adev) 1903 { 1904 adev->doorbell_index.kiq = AMDGPU_DOORBELL_KIQ; 1905 adev->doorbell_index.mec_ring0 = AMDGPU_DOORBELL_MEC_RING0; 1906 adev->doorbell_index.mec_ring1 = AMDGPU_DOORBELL_MEC_RING1; 1907 adev->doorbell_index.mec_ring2 = AMDGPU_DOORBELL_MEC_RING2; 1908 adev->doorbell_index.mec_ring3 = AMDGPU_DOORBELL_MEC_RING3; 1909 adev->doorbell_index.mec_ring4 = AMDGPU_DOORBELL_MEC_RING4; 1910 adev->doorbell_index.mec_ring5 = AMDGPU_DOORBELL_MEC_RING5; 1911 adev->doorbell_index.mec_ring6 = AMDGPU_DOORBELL_MEC_RING6; 1912 adev->doorbell_index.mec_ring7 = AMDGPU_DOORBELL_MEC_RING7; 1913 adev->doorbell_index.gfx_ring0 = AMDGPU_DOORBELL_GFX_RING0; 1914 adev->doorbell_index.sdma_engine[0] = AMDGPU_DOORBELL_sDMA_ENGINE0; 1915 adev->doorbell_index.sdma_engine[1] = AMDGPU_DOORBELL_sDMA_ENGINE1; 1916 adev->doorbell_index.ih = AMDGPU_DOORBELL_IH; 1917 adev->doorbell_index.max_assignment = AMDGPU_DOORBELL_MAX_ASSIGNMENT; 1918 } 1919