1 /* 2 * Copyright 2014 Advanced Micro Devices, Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 */ 23 24 #include <linux/pci.h> 25 #include <linux/slab.h> 26 27 #include <drm/amdgpu_drm.h> 28 29 #include "amdgpu.h" 30 #include "amdgpu_atombios.h" 31 #include "amdgpu_ih.h" 32 #include "amdgpu_uvd.h" 33 #include "amdgpu_vce.h" 34 #include "amdgpu_ucode.h" 35 #include "atom.h" 36 #include "amd_pcie.h" 37 38 #include "gmc/gmc_8_1_d.h" 39 #include "gmc/gmc_8_1_sh_mask.h" 40 41 #include "oss/oss_3_0_d.h" 42 #include "oss/oss_3_0_sh_mask.h" 43 44 #include "bif/bif_5_0_d.h" 45 #include "bif/bif_5_0_sh_mask.h" 46 47 #include "gca/gfx_8_0_d.h" 48 #include "gca/gfx_8_0_sh_mask.h" 49 50 #include "smu/smu_7_1_1_d.h" 51 #include "smu/smu_7_1_1_sh_mask.h" 52 53 #include "uvd/uvd_5_0_d.h" 54 #include "uvd/uvd_5_0_sh_mask.h" 55 56 #include "vce/vce_3_0_d.h" 57 #include "vce/vce_3_0_sh_mask.h" 58 59 #include "dce/dce_10_0_d.h" 60 #include "dce/dce_10_0_sh_mask.h" 61 62 #include "vid.h" 63 #include "vi.h" 64 #include "gmc_v8_0.h" 65 #include "gmc_v7_0.h" 66 #include "gfx_v8_0.h" 67 #include "sdma_v2_4.h" 68 #include "sdma_v3_0.h" 69 #include "dce_v10_0.h" 70 #include "iceland_ih.h" 71 #include "tonga_ih.h" 72 #include "cz_ih.h" 73 #include "uvd_v5_0.h" 74 #include "uvd_v6_0.h" 75 #include "vce_v3_0.h" 76 #if defined(CONFIG_DRM_AMD_ACP) 77 #include "amdgpu_acp.h" 78 #endif 79 #include "amdgpu_vkms.h" 80 #include "mxgpu_vi.h" 81 #include "amdgpu_dm.h" 82 83 #define ixPCIE_LC_L1_PM_SUBSTATE 0x100100C6 84 #define PCIE_LC_L1_PM_SUBSTATE__LC_L1_SUBSTATES_OVERRIDE_EN_MASK 0x00000001L 85 #define PCIE_LC_L1_PM_SUBSTATE__LC_PCI_PM_L1_2_OVERRIDE_MASK 0x00000002L 86 #define PCIE_LC_L1_PM_SUBSTATE__LC_PCI_PM_L1_1_OVERRIDE_MASK 0x00000004L 87 #define PCIE_LC_L1_PM_SUBSTATE__LC_ASPM_L1_2_OVERRIDE_MASK 0x00000008L 88 #define PCIE_LC_L1_PM_SUBSTATE__LC_ASPM_L1_1_OVERRIDE_MASK 0x00000010L 89 #define ixPCIE_L1_PM_SUB_CNTL 0x378 90 #define PCIE_L1_PM_SUB_CNTL__ASPM_L1_2_EN_MASK 0x00000004L 91 #define PCIE_L1_PM_SUB_CNTL__ASPM_L1_1_EN_MASK 0x00000008L 92 #define PCIE_L1_PM_SUB_CNTL__PCI_PM_L1_2_EN_MASK 0x00000001L 93 #define PCIE_L1_PM_SUB_CNTL__PCI_PM_L1_1_EN_MASK 0x00000002L 94 #define PCIE_LC_CNTL6__LC_L1_POWERDOWN_MASK 0x00200000L 95 #define LINK_CAP 0x64 96 #define PCIE_LINK_CAP__CLOCK_POWER_MANAGEMENT_MASK 0x00040000L 97 #define ixCPM_CONTROL 0x1400118 98 #define ixPCIE_LC_CNTL7 0x100100BC 99 #define PCIE_LC_CNTL7__LC_L1_SIDEBAND_CLKREQ_PDWN_EN_MASK 0x00000400L 100 #define PCIE_LC_CNTL__LC_L0S_INACTIVITY_DEFAULT 0x00000007 101 #define PCIE_LC_CNTL__LC_L1_INACTIVITY_DEFAULT 0x00000009 102 #define CPM_CONTROL__CLKREQb_UNGATE_TXCLK_ENABLE_MASK 0x01000000L 103 #define PCIE_L1_PM_SUB_CNTL 0x378 104 #define ASIC_IS_P22(asic_type, rid) ((asic_type >= CHIP_POLARIS10) && \ 105 (asic_type <= CHIP_POLARIS12) && \ 106 (rid >= 0x6E)) 107 /* Topaz */ 108 static const struct amdgpu_video_codecs topaz_video_codecs_encode = 109 { 110 .codec_count = 0, 111 .codec_array = NULL, 112 }; 113 114 /* Tonga, CZ, ST, Fiji */ 115 static const struct amdgpu_video_codec_info tonga_video_codecs_encode_array[] = 116 { 117 { 118 .codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 119 .max_width = 4096, 120 .max_height = 2304, 121 .max_pixels_per_frame = 4096 * 2304, 122 .max_level = 0, 123 }, 124 }; 125 126 static const struct amdgpu_video_codecs tonga_video_codecs_encode = 127 { 128 .codec_count = ARRAY_SIZE(tonga_video_codecs_encode_array), 129 .codec_array = tonga_video_codecs_encode_array, 130 }; 131 132 /* Polaris */ 133 static const struct amdgpu_video_codec_info polaris_video_codecs_encode_array[] = 134 { 135 { 136 .codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 137 .max_width = 4096, 138 .max_height = 4096, 139 .max_pixels_per_frame = 4096 * 4096, 140 .max_level = 0, 141 }, 142 { 143 .codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 144 .max_width = 4096, 145 .max_height = 4096, 146 .max_pixels_per_frame = 4096 * 4096, 147 .max_level = 0, 148 }, 149 }; 150 151 static const struct amdgpu_video_codecs polaris_video_codecs_encode = 152 { 153 .codec_count = ARRAY_SIZE(polaris_video_codecs_encode_array), 154 .codec_array = polaris_video_codecs_encode_array, 155 }; 156 157 /* Topaz */ 158 static const struct amdgpu_video_codecs topaz_video_codecs_decode = 159 { 160 .codec_count = 0, 161 .codec_array = NULL, 162 }; 163 164 /* Tonga */ 165 static const struct amdgpu_video_codec_info tonga_video_codecs_decode_array[] = 166 { 167 { 168 .codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG2, 169 .max_width = 1920, 170 .max_height = 1088, 171 .max_pixels_per_frame = 1920 * 1088, 172 .max_level = 3, 173 }, 174 { 175 .codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4, 176 .max_width = 1920, 177 .max_height = 1088, 178 .max_pixels_per_frame = 1920 * 1088, 179 .max_level = 5, 180 }, 181 { 182 .codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 183 .max_width = 4096, 184 .max_height = 4096, 185 .max_pixels_per_frame = 4096 * 4096, 186 .max_level = 52, 187 }, 188 { 189 .codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VC1, 190 .max_width = 1920, 191 .max_height = 1088, 192 .max_pixels_per_frame = 1920 * 1088, 193 .max_level = 4, 194 }, 195 }; 196 197 static const struct amdgpu_video_codecs tonga_video_codecs_decode = 198 { 199 .codec_count = ARRAY_SIZE(tonga_video_codecs_decode_array), 200 .codec_array = tonga_video_codecs_decode_array, 201 }; 202 203 /* CZ, ST, Fiji, Polaris */ 204 static const struct amdgpu_video_codec_info cz_video_codecs_decode_array[] = 205 { 206 { 207 .codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG2, 208 .max_width = 1920, 209 .max_height = 1088, 210 .max_pixels_per_frame = 1920 * 1088, 211 .max_level = 3, 212 }, 213 { 214 .codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4, 215 .max_width = 1920, 216 .max_height = 1088, 217 .max_pixels_per_frame = 1920 * 1088, 218 .max_level = 5, 219 }, 220 { 221 .codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 222 .max_width = 4096, 223 .max_height = 4096, 224 .max_pixels_per_frame = 4096 * 4096, 225 .max_level = 52, 226 }, 227 { 228 .codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VC1, 229 .max_width = 1920, 230 .max_height = 1088, 231 .max_pixels_per_frame = 1920 * 1088, 232 .max_level = 4, 233 }, 234 { 235 .codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 236 .max_width = 4096, 237 .max_height = 4096, 238 .max_pixels_per_frame = 4096 * 4096, 239 .max_level = 186, 240 }, 241 { 242 .codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_JPEG, 243 .max_width = 4096, 244 .max_height = 4096, 245 .max_pixels_per_frame = 4096 * 4096, 246 .max_level = 0, 247 }, 248 }; 249 250 static const struct amdgpu_video_codecs cz_video_codecs_decode = 251 { 252 .codec_count = ARRAY_SIZE(cz_video_codecs_decode_array), 253 .codec_array = cz_video_codecs_decode_array, 254 }; 255 256 static int vi_query_video_codecs(struct amdgpu_device *adev, bool encode, 257 const struct amdgpu_video_codecs **codecs) 258 { 259 switch (adev->asic_type) { 260 case CHIP_TOPAZ: 261 if (encode) 262 *codecs = &topaz_video_codecs_encode; 263 else 264 *codecs = &topaz_video_codecs_decode; 265 return 0; 266 case CHIP_TONGA: 267 if (encode) 268 *codecs = &tonga_video_codecs_encode; 269 else 270 *codecs = &tonga_video_codecs_decode; 271 return 0; 272 case CHIP_POLARIS10: 273 case CHIP_POLARIS11: 274 case CHIP_POLARIS12: 275 case CHIP_VEGAM: 276 if (encode) 277 *codecs = &polaris_video_codecs_encode; 278 else 279 *codecs = &cz_video_codecs_decode; 280 return 0; 281 case CHIP_FIJI: 282 case CHIP_CARRIZO: 283 case CHIP_STONEY: 284 if (encode) 285 *codecs = &tonga_video_codecs_encode; 286 else 287 *codecs = &cz_video_codecs_decode; 288 return 0; 289 default: 290 return -EINVAL; 291 } 292 } 293 294 /* 295 * Indirect registers accessor 296 */ 297 static u32 vi_pcie_rreg(struct amdgpu_device *adev, u32 reg) 298 { 299 unsigned long flags; 300 u32 r; 301 302 spin_lock_irqsave(&adev->pcie_idx_lock, flags); 303 WREG32_NO_KIQ(mmPCIE_INDEX, reg); 304 (void)RREG32_NO_KIQ(mmPCIE_INDEX); 305 r = RREG32_NO_KIQ(mmPCIE_DATA); 306 spin_unlock_irqrestore(&adev->pcie_idx_lock, flags); 307 return r; 308 } 309 310 static void vi_pcie_wreg(struct amdgpu_device *adev, u32 reg, u32 v) 311 { 312 unsigned long flags; 313 314 spin_lock_irqsave(&adev->pcie_idx_lock, flags); 315 WREG32_NO_KIQ(mmPCIE_INDEX, reg); 316 (void)RREG32_NO_KIQ(mmPCIE_INDEX); 317 WREG32_NO_KIQ(mmPCIE_DATA, v); 318 (void)RREG32_NO_KIQ(mmPCIE_DATA); 319 spin_unlock_irqrestore(&adev->pcie_idx_lock, flags); 320 } 321 322 static u32 vi_smc_rreg(struct amdgpu_device *adev, u32 reg) 323 { 324 unsigned long flags; 325 u32 r; 326 327 spin_lock_irqsave(&adev->smc_idx_lock, flags); 328 WREG32_NO_KIQ(mmSMC_IND_INDEX_11, (reg)); 329 r = RREG32_NO_KIQ(mmSMC_IND_DATA_11); 330 spin_unlock_irqrestore(&adev->smc_idx_lock, flags); 331 return r; 332 } 333 334 static void vi_smc_wreg(struct amdgpu_device *adev, u32 reg, u32 v) 335 { 336 unsigned long flags; 337 338 spin_lock_irqsave(&adev->smc_idx_lock, flags); 339 WREG32_NO_KIQ(mmSMC_IND_INDEX_11, (reg)); 340 WREG32_NO_KIQ(mmSMC_IND_DATA_11, (v)); 341 spin_unlock_irqrestore(&adev->smc_idx_lock, flags); 342 } 343 344 /* smu_8_0_d.h */ 345 #define mmMP0PUB_IND_INDEX 0x180 346 #define mmMP0PUB_IND_DATA 0x181 347 348 static u32 cz_smc_rreg(struct amdgpu_device *adev, u32 reg) 349 { 350 unsigned long flags; 351 u32 r; 352 353 spin_lock_irqsave(&adev->smc_idx_lock, flags); 354 WREG32(mmMP0PUB_IND_INDEX, (reg)); 355 r = RREG32(mmMP0PUB_IND_DATA); 356 spin_unlock_irqrestore(&adev->smc_idx_lock, flags); 357 return r; 358 } 359 360 static void cz_smc_wreg(struct amdgpu_device *adev, u32 reg, u32 v) 361 { 362 unsigned long flags; 363 364 spin_lock_irqsave(&adev->smc_idx_lock, flags); 365 WREG32(mmMP0PUB_IND_INDEX, (reg)); 366 WREG32(mmMP0PUB_IND_DATA, (v)); 367 spin_unlock_irqrestore(&adev->smc_idx_lock, flags); 368 } 369 370 static u32 vi_uvd_ctx_rreg(struct amdgpu_device *adev, u32 reg) 371 { 372 unsigned long flags; 373 u32 r; 374 375 spin_lock_irqsave(&adev->uvd_ctx_idx_lock, flags); 376 WREG32(mmUVD_CTX_INDEX, ((reg) & 0x1ff)); 377 r = RREG32(mmUVD_CTX_DATA); 378 spin_unlock_irqrestore(&adev->uvd_ctx_idx_lock, flags); 379 return r; 380 } 381 382 static void vi_uvd_ctx_wreg(struct amdgpu_device *adev, u32 reg, u32 v) 383 { 384 unsigned long flags; 385 386 spin_lock_irqsave(&adev->uvd_ctx_idx_lock, flags); 387 WREG32(mmUVD_CTX_INDEX, ((reg) & 0x1ff)); 388 WREG32(mmUVD_CTX_DATA, (v)); 389 spin_unlock_irqrestore(&adev->uvd_ctx_idx_lock, flags); 390 } 391 392 static u32 vi_didt_rreg(struct amdgpu_device *adev, u32 reg) 393 { 394 unsigned long flags; 395 u32 r; 396 397 spin_lock_irqsave(&adev->didt_idx_lock, flags); 398 WREG32(mmDIDT_IND_INDEX, (reg)); 399 r = RREG32(mmDIDT_IND_DATA); 400 spin_unlock_irqrestore(&adev->didt_idx_lock, flags); 401 return r; 402 } 403 404 static void vi_didt_wreg(struct amdgpu_device *adev, u32 reg, u32 v) 405 { 406 unsigned long flags; 407 408 spin_lock_irqsave(&adev->didt_idx_lock, flags); 409 WREG32(mmDIDT_IND_INDEX, (reg)); 410 WREG32(mmDIDT_IND_DATA, (v)); 411 spin_unlock_irqrestore(&adev->didt_idx_lock, flags); 412 } 413 414 static u32 vi_gc_cac_rreg(struct amdgpu_device *adev, u32 reg) 415 { 416 unsigned long flags; 417 u32 r; 418 419 spin_lock_irqsave(&adev->gc_cac_idx_lock, flags); 420 WREG32(mmGC_CAC_IND_INDEX, (reg)); 421 r = RREG32(mmGC_CAC_IND_DATA); 422 spin_unlock_irqrestore(&adev->gc_cac_idx_lock, flags); 423 return r; 424 } 425 426 static void vi_gc_cac_wreg(struct amdgpu_device *adev, u32 reg, u32 v) 427 { 428 unsigned long flags; 429 430 spin_lock_irqsave(&adev->gc_cac_idx_lock, flags); 431 WREG32(mmGC_CAC_IND_INDEX, (reg)); 432 WREG32(mmGC_CAC_IND_DATA, (v)); 433 spin_unlock_irqrestore(&adev->gc_cac_idx_lock, flags); 434 } 435 436 437 static const u32 tonga_mgcg_cgcg_init[] = 438 { 439 mmCGTT_DRM_CLK_CTRL0, 0xffffffff, 0x00600100, 440 mmPCIE_INDEX, 0xffffffff, 0x0140001c, 441 mmPCIE_DATA, 0x000f0000, 0x00000000, 442 mmSMC_IND_INDEX_4, 0xffffffff, 0xC060000C, 443 mmSMC_IND_DATA_4, 0xc0000fff, 0x00000100, 444 mmCGTT_DRM_CLK_CTRL0, 0xff000fff, 0x00000100, 445 mmHDP_XDP_CGTT_BLK_CTRL, 0xc0000fff, 0x00000104, 446 }; 447 448 static const u32 fiji_mgcg_cgcg_init[] = 449 { 450 mmCGTT_DRM_CLK_CTRL0, 0xffffffff, 0x00600100, 451 mmPCIE_INDEX, 0xffffffff, 0x0140001c, 452 mmPCIE_DATA, 0x000f0000, 0x00000000, 453 mmSMC_IND_INDEX_4, 0xffffffff, 0xC060000C, 454 mmSMC_IND_DATA_4, 0xc0000fff, 0x00000100, 455 mmCGTT_DRM_CLK_CTRL0, 0xff000fff, 0x00000100, 456 mmHDP_XDP_CGTT_BLK_CTRL, 0xc0000fff, 0x00000104, 457 }; 458 459 static const u32 iceland_mgcg_cgcg_init[] = 460 { 461 mmPCIE_INDEX, 0xffffffff, ixPCIE_CNTL2, 462 mmPCIE_DATA, 0x000f0000, 0x00000000, 463 mmSMC_IND_INDEX_4, 0xffffffff, ixCGTT_ROM_CLK_CTRL0, 464 mmSMC_IND_DATA_4, 0xc0000fff, 0x00000100, 465 mmHDP_XDP_CGTT_BLK_CTRL, 0xc0000fff, 0x00000104, 466 }; 467 468 static const u32 cz_mgcg_cgcg_init[] = 469 { 470 mmCGTT_DRM_CLK_CTRL0, 0xffffffff, 0x00600100, 471 mmPCIE_INDEX, 0xffffffff, 0x0140001c, 472 mmPCIE_DATA, 0x000f0000, 0x00000000, 473 mmCGTT_DRM_CLK_CTRL0, 0xff000fff, 0x00000100, 474 mmHDP_XDP_CGTT_BLK_CTRL, 0xc0000fff, 0x00000104, 475 }; 476 477 static const u32 stoney_mgcg_cgcg_init[] = 478 { 479 mmCGTT_DRM_CLK_CTRL0, 0xffffffff, 0x00000100, 480 mmHDP_XDP_CGTT_BLK_CTRL, 0xffffffff, 0x00000104, 481 mmHDP_HOST_PATH_CNTL, 0xffffffff, 0x0f000027, 482 }; 483 484 static void vi_init_golden_registers(struct amdgpu_device *adev) 485 { 486 /* Some of the registers might be dependent on GRBM_GFX_INDEX */ 487 mutex_lock(&adev->grbm_idx_mutex); 488 489 if (amdgpu_sriov_vf(adev)) { 490 xgpu_vi_init_golden_registers(adev); 491 mutex_unlock(&adev->grbm_idx_mutex); 492 return; 493 } 494 495 switch (adev->asic_type) { 496 case CHIP_TOPAZ: 497 amdgpu_device_program_register_sequence(adev, 498 iceland_mgcg_cgcg_init, 499 ARRAY_SIZE(iceland_mgcg_cgcg_init)); 500 break; 501 case CHIP_FIJI: 502 amdgpu_device_program_register_sequence(adev, 503 fiji_mgcg_cgcg_init, 504 ARRAY_SIZE(fiji_mgcg_cgcg_init)); 505 break; 506 case CHIP_TONGA: 507 amdgpu_device_program_register_sequence(adev, 508 tonga_mgcg_cgcg_init, 509 ARRAY_SIZE(tonga_mgcg_cgcg_init)); 510 break; 511 case CHIP_CARRIZO: 512 amdgpu_device_program_register_sequence(adev, 513 cz_mgcg_cgcg_init, 514 ARRAY_SIZE(cz_mgcg_cgcg_init)); 515 break; 516 case CHIP_STONEY: 517 amdgpu_device_program_register_sequence(adev, 518 stoney_mgcg_cgcg_init, 519 ARRAY_SIZE(stoney_mgcg_cgcg_init)); 520 break; 521 case CHIP_POLARIS10: 522 case CHIP_POLARIS11: 523 case CHIP_POLARIS12: 524 case CHIP_VEGAM: 525 default: 526 break; 527 } 528 mutex_unlock(&adev->grbm_idx_mutex); 529 } 530 531 /** 532 * vi_get_xclk - get the xclk 533 * 534 * @adev: amdgpu_device pointer 535 * 536 * Returns the reference clock used by the gfx engine 537 * (VI). 538 */ 539 static u32 vi_get_xclk(struct amdgpu_device *adev) 540 { 541 u32 reference_clock = adev->clock.spll.reference_freq; 542 u32 tmp; 543 544 if (adev->flags & AMD_IS_APU) { 545 switch (adev->asic_type) { 546 case CHIP_STONEY: 547 /* vbios says 48Mhz, but the actual freq is 100Mhz */ 548 return 10000; 549 default: 550 return reference_clock; 551 } 552 } 553 554 tmp = RREG32_SMC(ixCG_CLKPIN_CNTL_2); 555 if (REG_GET_FIELD(tmp, CG_CLKPIN_CNTL_2, MUX_TCLK_TO_XCLK)) 556 return 1000; 557 558 tmp = RREG32_SMC(ixCG_CLKPIN_CNTL); 559 if (REG_GET_FIELD(tmp, CG_CLKPIN_CNTL, XTALIN_DIVIDE)) 560 return reference_clock / 4; 561 562 return reference_clock; 563 } 564 565 /** 566 * vi_srbm_select - select specific register instances 567 * 568 * @adev: amdgpu_device pointer 569 * @me: selected ME (micro engine) 570 * @pipe: pipe 571 * @queue: queue 572 * @vmid: VMID 573 * 574 * Switches the currently active registers instances. Some 575 * registers are instanced per VMID, others are instanced per 576 * me/pipe/queue combination. 577 */ 578 void vi_srbm_select(struct amdgpu_device *adev, 579 u32 me, u32 pipe, u32 queue, u32 vmid) 580 { 581 u32 srbm_gfx_cntl = 0; 582 srbm_gfx_cntl = REG_SET_FIELD(srbm_gfx_cntl, SRBM_GFX_CNTL, PIPEID, pipe); 583 srbm_gfx_cntl = REG_SET_FIELD(srbm_gfx_cntl, SRBM_GFX_CNTL, MEID, me); 584 srbm_gfx_cntl = REG_SET_FIELD(srbm_gfx_cntl, SRBM_GFX_CNTL, VMID, vmid); 585 srbm_gfx_cntl = REG_SET_FIELD(srbm_gfx_cntl, SRBM_GFX_CNTL, QUEUEID, queue); 586 WREG32(mmSRBM_GFX_CNTL, srbm_gfx_cntl); 587 } 588 589 static bool vi_read_disabled_bios(struct amdgpu_device *adev) 590 { 591 u32 bus_cntl; 592 u32 d1vga_control = 0; 593 u32 d2vga_control = 0; 594 u32 vga_render_control = 0; 595 u32 rom_cntl; 596 bool r; 597 598 bus_cntl = RREG32(mmBUS_CNTL); 599 if (adev->mode_info.num_crtc) { 600 d1vga_control = RREG32(mmD1VGA_CONTROL); 601 d2vga_control = RREG32(mmD2VGA_CONTROL); 602 vga_render_control = RREG32(mmVGA_RENDER_CONTROL); 603 } 604 rom_cntl = RREG32_SMC(ixROM_CNTL); 605 606 /* enable the rom */ 607 WREG32(mmBUS_CNTL, (bus_cntl & ~BUS_CNTL__BIOS_ROM_DIS_MASK)); 608 if (adev->mode_info.num_crtc) { 609 /* Disable VGA mode */ 610 WREG32(mmD1VGA_CONTROL, 611 (d1vga_control & ~(D1VGA_CONTROL__D1VGA_MODE_ENABLE_MASK | 612 D1VGA_CONTROL__D1VGA_TIMING_SELECT_MASK))); 613 WREG32(mmD2VGA_CONTROL, 614 (d2vga_control & ~(D2VGA_CONTROL__D2VGA_MODE_ENABLE_MASK | 615 D2VGA_CONTROL__D2VGA_TIMING_SELECT_MASK))); 616 WREG32(mmVGA_RENDER_CONTROL, 617 (vga_render_control & ~VGA_RENDER_CONTROL__VGA_VSTATUS_CNTL_MASK)); 618 } 619 WREG32_SMC(ixROM_CNTL, rom_cntl | ROM_CNTL__SCK_OVERWRITE_MASK); 620 621 r = amdgpu_read_bios(adev); 622 623 /* restore regs */ 624 WREG32(mmBUS_CNTL, bus_cntl); 625 if (adev->mode_info.num_crtc) { 626 WREG32(mmD1VGA_CONTROL, d1vga_control); 627 WREG32(mmD2VGA_CONTROL, d2vga_control); 628 WREG32(mmVGA_RENDER_CONTROL, vga_render_control); 629 } 630 WREG32_SMC(ixROM_CNTL, rom_cntl); 631 return r; 632 } 633 634 static bool vi_read_bios_from_rom(struct amdgpu_device *adev, 635 u8 *bios, u32 length_bytes) 636 { 637 u32 *dw_ptr; 638 unsigned long flags; 639 u32 i, length_dw; 640 641 if (bios == NULL) 642 return false; 643 if (length_bytes == 0) 644 return false; 645 /* APU vbios image is part of sbios image */ 646 if (adev->flags & AMD_IS_APU) 647 return false; 648 649 dw_ptr = (u32 *)bios; 650 length_dw = ALIGN(length_bytes, 4) / 4; 651 /* take the smc lock since we are using the smc index */ 652 spin_lock_irqsave(&adev->smc_idx_lock, flags); 653 /* set rom index to 0 */ 654 WREG32(mmSMC_IND_INDEX_11, ixROM_INDEX); 655 WREG32(mmSMC_IND_DATA_11, 0); 656 /* set index to data for continous read */ 657 WREG32(mmSMC_IND_INDEX_11, ixROM_DATA); 658 for (i = 0; i < length_dw; i++) 659 dw_ptr[i] = RREG32(mmSMC_IND_DATA_11); 660 spin_unlock_irqrestore(&adev->smc_idx_lock, flags); 661 662 return true; 663 } 664 665 static const struct amdgpu_allowed_register_entry vi_allowed_read_registers[] = { 666 {mmGRBM_STATUS}, 667 {mmGRBM_STATUS2}, 668 {mmGRBM_STATUS_SE0}, 669 {mmGRBM_STATUS_SE1}, 670 {mmGRBM_STATUS_SE2}, 671 {mmGRBM_STATUS_SE3}, 672 {mmSRBM_STATUS}, 673 {mmSRBM_STATUS2}, 674 {mmSRBM_STATUS3}, 675 {mmSDMA0_STATUS_REG + SDMA0_REGISTER_OFFSET}, 676 {mmSDMA0_STATUS_REG + SDMA1_REGISTER_OFFSET}, 677 {mmCP_STAT}, 678 {mmCP_STALLED_STAT1}, 679 {mmCP_STALLED_STAT2}, 680 {mmCP_STALLED_STAT3}, 681 {mmCP_CPF_BUSY_STAT}, 682 {mmCP_CPF_STALLED_STAT1}, 683 {mmCP_CPF_STATUS}, 684 {mmCP_CPC_BUSY_STAT}, 685 {mmCP_CPC_STALLED_STAT1}, 686 {mmCP_CPC_STATUS}, 687 {mmGB_ADDR_CONFIG}, 688 {mmMC_ARB_RAMCFG}, 689 {mmGB_TILE_MODE0}, 690 {mmGB_TILE_MODE1}, 691 {mmGB_TILE_MODE2}, 692 {mmGB_TILE_MODE3}, 693 {mmGB_TILE_MODE4}, 694 {mmGB_TILE_MODE5}, 695 {mmGB_TILE_MODE6}, 696 {mmGB_TILE_MODE7}, 697 {mmGB_TILE_MODE8}, 698 {mmGB_TILE_MODE9}, 699 {mmGB_TILE_MODE10}, 700 {mmGB_TILE_MODE11}, 701 {mmGB_TILE_MODE12}, 702 {mmGB_TILE_MODE13}, 703 {mmGB_TILE_MODE14}, 704 {mmGB_TILE_MODE15}, 705 {mmGB_TILE_MODE16}, 706 {mmGB_TILE_MODE17}, 707 {mmGB_TILE_MODE18}, 708 {mmGB_TILE_MODE19}, 709 {mmGB_TILE_MODE20}, 710 {mmGB_TILE_MODE21}, 711 {mmGB_TILE_MODE22}, 712 {mmGB_TILE_MODE23}, 713 {mmGB_TILE_MODE24}, 714 {mmGB_TILE_MODE25}, 715 {mmGB_TILE_MODE26}, 716 {mmGB_TILE_MODE27}, 717 {mmGB_TILE_MODE28}, 718 {mmGB_TILE_MODE29}, 719 {mmGB_TILE_MODE30}, 720 {mmGB_TILE_MODE31}, 721 {mmGB_MACROTILE_MODE0}, 722 {mmGB_MACROTILE_MODE1}, 723 {mmGB_MACROTILE_MODE2}, 724 {mmGB_MACROTILE_MODE3}, 725 {mmGB_MACROTILE_MODE4}, 726 {mmGB_MACROTILE_MODE5}, 727 {mmGB_MACROTILE_MODE6}, 728 {mmGB_MACROTILE_MODE7}, 729 {mmGB_MACROTILE_MODE8}, 730 {mmGB_MACROTILE_MODE9}, 731 {mmGB_MACROTILE_MODE10}, 732 {mmGB_MACROTILE_MODE11}, 733 {mmGB_MACROTILE_MODE12}, 734 {mmGB_MACROTILE_MODE13}, 735 {mmGB_MACROTILE_MODE14}, 736 {mmGB_MACROTILE_MODE15}, 737 {mmCC_RB_BACKEND_DISABLE, true}, 738 {mmGC_USER_RB_BACKEND_DISABLE, true}, 739 {mmGB_BACKEND_MAP, false}, 740 {mmPA_SC_RASTER_CONFIG, true}, 741 {mmPA_SC_RASTER_CONFIG_1, true}, 742 }; 743 744 static uint32_t vi_get_register_value(struct amdgpu_device *adev, 745 bool indexed, u32 se_num, 746 u32 sh_num, u32 reg_offset) 747 { 748 if (indexed) { 749 uint32_t val; 750 unsigned se_idx = (se_num == 0xffffffff) ? 0 : se_num; 751 unsigned sh_idx = (sh_num == 0xffffffff) ? 0 : sh_num; 752 753 switch (reg_offset) { 754 case mmCC_RB_BACKEND_DISABLE: 755 return adev->gfx.config.rb_config[se_idx][sh_idx].rb_backend_disable; 756 case mmGC_USER_RB_BACKEND_DISABLE: 757 return adev->gfx.config.rb_config[se_idx][sh_idx].user_rb_backend_disable; 758 case mmPA_SC_RASTER_CONFIG: 759 return adev->gfx.config.rb_config[se_idx][sh_idx].raster_config; 760 case mmPA_SC_RASTER_CONFIG_1: 761 return adev->gfx.config.rb_config[se_idx][sh_idx].raster_config_1; 762 } 763 764 mutex_lock(&adev->grbm_idx_mutex); 765 if (se_num != 0xffffffff || sh_num != 0xffffffff) 766 amdgpu_gfx_select_se_sh(adev, se_num, sh_num, 0xffffffff, 0); 767 768 val = RREG32(reg_offset); 769 770 if (se_num != 0xffffffff || sh_num != 0xffffffff) 771 amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff, 0); 772 mutex_unlock(&adev->grbm_idx_mutex); 773 return val; 774 } else { 775 unsigned idx; 776 777 switch (reg_offset) { 778 case mmGB_ADDR_CONFIG: 779 return adev->gfx.config.gb_addr_config; 780 case mmMC_ARB_RAMCFG: 781 return adev->gfx.config.mc_arb_ramcfg; 782 case mmGB_TILE_MODE0: 783 case mmGB_TILE_MODE1: 784 case mmGB_TILE_MODE2: 785 case mmGB_TILE_MODE3: 786 case mmGB_TILE_MODE4: 787 case mmGB_TILE_MODE5: 788 case mmGB_TILE_MODE6: 789 case mmGB_TILE_MODE7: 790 case mmGB_TILE_MODE8: 791 case mmGB_TILE_MODE9: 792 case mmGB_TILE_MODE10: 793 case mmGB_TILE_MODE11: 794 case mmGB_TILE_MODE12: 795 case mmGB_TILE_MODE13: 796 case mmGB_TILE_MODE14: 797 case mmGB_TILE_MODE15: 798 case mmGB_TILE_MODE16: 799 case mmGB_TILE_MODE17: 800 case mmGB_TILE_MODE18: 801 case mmGB_TILE_MODE19: 802 case mmGB_TILE_MODE20: 803 case mmGB_TILE_MODE21: 804 case mmGB_TILE_MODE22: 805 case mmGB_TILE_MODE23: 806 case mmGB_TILE_MODE24: 807 case mmGB_TILE_MODE25: 808 case mmGB_TILE_MODE26: 809 case mmGB_TILE_MODE27: 810 case mmGB_TILE_MODE28: 811 case mmGB_TILE_MODE29: 812 case mmGB_TILE_MODE30: 813 case mmGB_TILE_MODE31: 814 idx = (reg_offset - mmGB_TILE_MODE0); 815 return adev->gfx.config.tile_mode_array[idx]; 816 case mmGB_MACROTILE_MODE0: 817 case mmGB_MACROTILE_MODE1: 818 case mmGB_MACROTILE_MODE2: 819 case mmGB_MACROTILE_MODE3: 820 case mmGB_MACROTILE_MODE4: 821 case mmGB_MACROTILE_MODE5: 822 case mmGB_MACROTILE_MODE6: 823 case mmGB_MACROTILE_MODE7: 824 case mmGB_MACROTILE_MODE8: 825 case mmGB_MACROTILE_MODE9: 826 case mmGB_MACROTILE_MODE10: 827 case mmGB_MACROTILE_MODE11: 828 case mmGB_MACROTILE_MODE12: 829 case mmGB_MACROTILE_MODE13: 830 case mmGB_MACROTILE_MODE14: 831 case mmGB_MACROTILE_MODE15: 832 idx = (reg_offset - mmGB_MACROTILE_MODE0); 833 return adev->gfx.config.macrotile_mode_array[idx]; 834 default: 835 return RREG32(reg_offset); 836 } 837 } 838 } 839 840 static int vi_read_register(struct amdgpu_device *adev, u32 se_num, 841 u32 sh_num, u32 reg_offset, u32 *value) 842 { 843 uint32_t i; 844 845 *value = 0; 846 for (i = 0; i < ARRAY_SIZE(vi_allowed_read_registers); i++) { 847 bool indexed = vi_allowed_read_registers[i].grbm_indexed; 848 849 if (reg_offset != vi_allowed_read_registers[i].reg_offset) 850 continue; 851 852 *value = vi_get_register_value(adev, indexed, se_num, sh_num, 853 reg_offset); 854 return 0; 855 } 856 return -EINVAL; 857 } 858 859 /** 860 * vi_asic_pci_config_reset - soft reset GPU 861 * 862 * @adev: amdgpu_device pointer 863 * 864 * Use PCI Config method to reset the GPU. 865 * 866 * Returns 0 for success. 867 */ 868 static int vi_asic_pci_config_reset(struct amdgpu_device *adev) 869 { 870 u32 i; 871 int r = -EINVAL; 872 873 amdgpu_atombios_scratch_regs_engine_hung(adev, true); 874 875 /* disable BM */ 876 pci_clear_master(adev->pdev); 877 /* reset */ 878 amdgpu_device_pci_config_reset(adev); 879 880 udelay(100); 881 882 /* wait for asic to come out of reset */ 883 for (i = 0; i < adev->usec_timeout; i++) { 884 if (RREG32(mmCONFIG_MEMSIZE) != 0xffffffff) { 885 /* enable BM */ 886 pci_set_master(adev->pdev); 887 adev->has_hw_reset = true; 888 r = 0; 889 break; 890 } 891 udelay(1); 892 } 893 894 amdgpu_atombios_scratch_regs_engine_hung(adev, false); 895 896 return r; 897 } 898 899 static int vi_asic_supports_baco(struct amdgpu_device *adev) 900 { 901 switch (adev->asic_type) { 902 case CHIP_FIJI: 903 case CHIP_TONGA: 904 case CHIP_POLARIS10: 905 case CHIP_POLARIS11: 906 case CHIP_POLARIS12: 907 case CHIP_TOPAZ: 908 return amdgpu_dpm_is_baco_supported(adev); 909 default: 910 return 0; 911 } 912 } 913 914 static enum amd_reset_method 915 vi_asic_reset_method(struct amdgpu_device *adev) 916 { 917 int baco_reset; 918 919 if (amdgpu_reset_method == AMD_RESET_METHOD_LEGACY || 920 amdgpu_reset_method == AMD_RESET_METHOD_BACO) 921 return amdgpu_reset_method; 922 923 if (amdgpu_reset_method != -1) 924 dev_warn(adev->dev, "Specified reset method:%d isn't supported, using AUTO instead.\n", 925 amdgpu_reset_method); 926 927 switch (adev->asic_type) { 928 case CHIP_FIJI: 929 case CHIP_TONGA: 930 case CHIP_POLARIS10: 931 case CHIP_POLARIS11: 932 case CHIP_POLARIS12: 933 case CHIP_TOPAZ: 934 baco_reset = amdgpu_dpm_is_baco_supported(adev); 935 break; 936 default: 937 baco_reset = 0; 938 break; 939 } 940 941 if (baco_reset) 942 return AMD_RESET_METHOD_BACO; 943 else 944 return AMD_RESET_METHOD_LEGACY; 945 } 946 947 /** 948 * vi_asic_reset - soft reset GPU 949 * 950 * @adev: amdgpu_device pointer 951 * 952 * Look up which blocks are hung and attempt 953 * to reset them. 954 * Returns 0 for success. 955 */ 956 static int vi_asic_reset(struct amdgpu_device *adev) 957 { 958 int r; 959 960 /* APUs don't have full asic reset */ 961 if (adev->flags & AMD_IS_APU) 962 return 0; 963 964 if (vi_asic_reset_method(adev) == AMD_RESET_METHOD_BACO) { 965 dev_info(adev->dev, "BACO reset\n"); 966 r = amdgpu_dpm_baco_reset(adev); 967 } else { 968 dev_info(adev->dev, "PCI CONFIG reset\n"); 969 r = vi_asic_pci_config_reset(adev); 970 } 971 972 return r; 973 } 974 975 static u32 vi_get_config_memsize(struct amdgpu_device *adev) 976 { 977 return RREG32(mmCONFIG_MEMSIZE); 978 } 979 980 static int vi_set_uvd_clock(struct amdgpu_device *adev, u32 clock, 981 u32 cntl_reg, u32 status_reg) 982 { 983 int r, i; 984 struct atom_clock_dividers dividers; 985 uint32_t tmp; 986 987 r = amdgpu_atombios_get_clock_dividers(adev, 988 COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK, 989 clock, false, ÷rs); 990 if (r) 991 return r; 992 993 tmp = RREG32_SMC(cntl_reg); 994 995 if (adev->flags & AMD_IS_APU) 996 tmp &= ~CG_DCLK_CNTL__DCLK_DIVIDER_MASK; 997 else 998 tmp &= ~(CG_DCLK_CNTL__DCLK_DIR_CNTL_EN_MASK | 999 CG_DCLK_CNTL__DCLK_DIVIDER_MASK); 1000 tmp |= dividers.post_divider; 1001 WREG32_SMC(cntl_reg, tmp); 1002 1003 for (i = 0; i < 100; i++) { 1004 tmp = RREG32_SMC(status_reg); 1005 if (adev->flags & AMD_IS_APU) { 1006 if (tmp & 0x10000) 1007 break; 1008 } else { 1009 if (tmp & CG_DCLK_STATUS__DCLK_STATUS_MASK) 1010 break; 1011 } 1012 mdelay(10); 1013 } 1014 if (i == 100) 1015 return -ETIMEDOUT; 1016 return 0; 1017 } 1018 1019 #define ixGNB_CLK1_DFS_CNTL 0xD82200F0 1020 #define ixGNB_CLK1_STATUS 0xD822010C 1021 #define ixGNB_CLK2_DFS_CNTL 0xD8220110 1022 #define ixGNB_CLK2_STATUS 0xD822012C 1023 #define ixGNB_CLK3_DFS_CNTL 0xD8220130 1024 #define ixGNB_CLK3_STATUS 0xD822014C 1025 1026 static int vi_set_uvd_clocks(struct amdgpu_device *adev, u32 vclk, u32 dclk) 1027 { 1028 int r; 1029 1030 if (adev->flags & AMD_IS_APU) { 1031 r = vi_set_uvd_clock(adev, vclk, ixGNB_CLK2_DFS_CNTL, ixGNB_CLK2_STATUS); 1032 if (r) 1033 return r; 1034 1035 r = vi_set_uvd_clock(adev, dclk, ixGNB_CLK1_DFS_CNTL, ixGNB_CLK1_STATUS); 1036 if (r) 1037 return r; 1038 } else { 1039 r = vi_set_uvd_clock(adev, vclk, ixCG_VCLK_CNTL, ixCG_VCLK_STATUS); 1040 if (r) 1041 return r; 1042 1043 r = vi_set_uvd_clock(adev, dclk, ixCG_DCLK_CNTL, ixCG_DCLK_STATUS); 1044 if (r) 1045 return r; 1046 } 1047 1048 return 0; 1049 } 1050 1051 static int vi_set_vce_clocks(struct amdgpu_device *adev, u32 evclk, u32 ecclk) 1052 { 1053 int r, i; 1054 struct atom_clock_dividers dividers; 1055 u32 tmp; 1056 u32 reg_ctrl; 1057 u32 reg_status; 1058 u32 status_mask; 1059 u32 reg_mask; 1060 1061 if (adev->flags & AMD_IS_APU) { 1062 reg_ctrl = ixGNB_CLK3_DFS_CNTL; 1063 reg_status = ixGNB_CLK3_STATUS; 1064 status_mask = 0x00010000; 1065 reg_mask = CG_ECLK_CNTL__ECLK_DIVIDER_MASK; 1066 } else { 1067 reg_ctrl = ixCG_ECLK_CNTL; 1068 reg_status = ixCG_ECLK_STATUS; 1069 status_mask = CG_ECLK_STATUS__ECLK_STATUS_MASK; 1070 reg_mask = CG_ECLK_CNTL__ECLK_DIR_CNTL_EN_MASK | CG_ECLK_CNTL__ECLK_DIVIDER_MASK; 1071 } 1072 1073 r = amdgpu_atombios_get_clock_dividers(adev, 1074 COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK, 1075 ecclk, false, ÷rs); 1076 if (r) 1077 return r; 1078 1079 for (i = 0; i < 100; i++) { 1080 if (RREG32_SMC(reg_status) & status_mask) 1081 break; 1082 mdelay(10); 1083 } 1084 1085 if (i == 100) 1086 return -ETIMEDOUT; 1087 1088 tmp = RREG32_SMC(reg_ctrl); 1089 tmp &= ~reg_mask; 1090 tmp |= dividers.post_divider; 1091 WREG32_SMC(reg_ctrl, tmp); 1092 1093 for (i = 0; i < 100; i++) { 1094 if (RREG32_SMC(reg_status) & status_mask) 1095 break; 1096 mdelay(10); 1097 } 1098 1099 if (i == 100) 1100 return -ETIMEDOUT; 1101 1102 return 0; 1103 } 1104 1105 static void vi_enable_aspm(struct amdgpu_device *adev) 1106 { 1107 u32 data, orig; 1108 1109 orig = data = RREG32_PCIE(ixPCIE_LC_CNTL); 1110 data |= PCIE_LC_CNTL__LC_L0S_INACTIVITY_DEFAULT << 1111 PCIE_LC_CNTL__LC_L0S_INACTIVITY__SHIFT; 1112 data |= PCIE_LC_CNTL__LC_L1_INACTIVITY_DEFAULT << 1113 PCIE_LC_CNTL__LC_L1_INACTIVITY__SHIFT; 1114 data &= ~PCIE_LC_CNTL__LC_PMI_TO_L1_DIS_MASK; 1115 data |= PCIE_LC_CNTL__LC_DELAY_L1_EXIT_MASK; 1116 if (orig != data) 1117 WREG32_PCIE(ixPCIE_LC_CNTL, data); 1118 } 1119 1120 static void vi_program_aspm(struct amdgpu_device *adev) 1121 { 1122 u32 data, data1, orig; 1123 bool bL1SS = false; 1124 bool bClkReqSupport = true; 1125 1126 if (!amdgpu_device_should_use_aspm(adev)) 1127 return; 1128 1129 if (adev->asic_type < CHIP_POLARIS10) 1130 return; 1131 1132 orig = data = RREG32_PCIE(ixPCIE_LC_CNTL); 1133 data &= ~PCIE_LC_CNTL__LC_L1_INACTIVITY_MASK; 1134 data &= ~PCIE_LC_CNTL__LC_L0S_INACTIVITY_MASK; 1135 data |= PCIE_LC_CNTL__LC_PMI_TO_L1_DIS_MASK; 1136 if (orig != data) 1137 WREG32_PCIE(ixPCIE_LC_CNTL, data); 1138 1139 orig = data = RREG32_PCIE(ixPCIE_LC_N_FTS_CNTL); 1140 data &= ~PCIE_LC_N_FTS_CNTL__LC_XMIT_N_FTS_MASK; 1141 data |= 0x0024 << PCIE_LC_N_FTS_CNTL__LC_XMIT_N_FTS__SHIFT; 1142 data |= PCIE_LC_N_FTS_CNTL__LC_XMIT_N_FTS_OVERRIDE_EN_MASK; 1143 if (orig != data) 1144 WREG32_PCIE(ixPCIE_LC_N_FTS_CNTL, data); 1145 1146 orig = data = RREG32_PCIE(ixPCIE_LC_CNTL3); 1147 data |= PCIE_LC_CNTL3__LC_GO_TO_RECOVERY_MASK; 1148 if (orig != data) 1149 WREG32_PCIE(ixPCIE_LC_CNTL3, data); 1150 1151 orig = data = RREG32_PCIE(ixPCIE_P_CNTL); 1152 data |= PCIE_P_CNTL__P_IGNORE_EDB_ERR_MASK; 1153 if (orig != data) 1154 WREG32_PCIE(ixPCIE_P_CNTL, data); 1155 1156 data = RREG32_PCIE(ixPCIE_LC_L1_PM_SUBSTATE); 1157 pci_read_config_dword(adev->pdev, PCIE_L1_PM_SUB_CNTL, &data1); 1158 if (data & PCIE_LC_L1_PM_SUBSTATE__LC_L1_SUBSTATES_OVERRIDE_EN_MASK && 1159 (data & (PCIE_LC_L1_PM_SUBSTATE__LC_PCI_PM_L1_2_OVERRIDE_MASK | 1160 PCIE_LC_L1_PM_SUBSTATE__LC_PCI_PM_L1_1_OVERRIDE_MASK | 1161 PCIE_LC_L1_PM_SUBSTATE__LC_ASPM_L1_2_OVERRIDE_MASK | 1162 PCIE_LC_L1_PM_SUBSTATE__LC_ASPM_L1_1_OVERRIDE_MASK))) { 1163 bL1SS = true; 1164 } else if (data1 & (PCIE_L1_PM_SUB_CNTL__ASPM_L1_2_EN_MASK | 1165 PCIE_L1_PM_SUB_CNTL__ASPM_L1_1_EN_MASK | 1166 PCIE_L1_PM_SUB_CNTL__PCI_PM_L1_2_EN_MASK | 1167 PCIE_L1_PM_SUB_CNTL__PCI_PM_L1_1_EN_MASK)) { 1168 bL1SS = true; 1169 } 1170 1171 orig = data = RREG32_PCIE(ixPCIE_LC_CNTL6); 1172 data |= PCIE_LC_CNTL6__LC_L1_POWERDOWN_MASK; 1173 if (orig != data) 1174 WREG32_PCIE(ixPCIE_LC_CNTL6, data); 1175 1176 orig = data = RREG32_PCIE(ixPCIE_LC_LINK_WIDTH_CNTL); 1177 data |= PCIE_LC_LINK_WIDTH_CNTL__LC_DYN_LANES_PWR_STATE_MASK; 1178 if (orig != data) 1179 WREG32_PCIE(ixPCIE_LC_LINK_WIDTH_CNTL, data); 1180 1181 pci_read_config_dword(adev->pdev, LINK_CAP, &data); 1182 if (!(data & PCIE_LINK_CAP__CLOCK_POWER_MANAGEMENT_MASK)) 1183 bClkReqSupport = false; 1184 1185 if (bClkReqSupport) { 1186 orig = data = RREG32_SMC(ixTHM_CLK_CNTL); 1187 data &= ~(THM_CLK_CNTL__CMON_CLK_SEL_MASK | THM_CLK_CNTL__TMON_CLK_SEL_MASK); 1188 data |= (1 << THM_CLK_CNTL__CMON_CLK_SEL__SHIFT) | 1189 (1 << THM_CLK_CNTL__TMON_CLK_SEL__SHIFT); 1190 if (orig != data) 1191 WREG32_SMC(ixTHM_CLK_CNTL, data); 1192 1193 orig = data = RREG32_SMC(ixMISC_CLK_CTRL); 1194 data &= ~(MISC_CLK_CTRL__DEEP_SLEEP_CLK_SEL_MASK | 1195 MISC_CLK_CTRL__ZCLK_SEL_MASK | MISC_CLK_CTRL__DFT_SMS_PG_CLK_SEL_MASK); 1196 data |= (1 << MISC_CLK_CTRL__DEEP_SLEEP_CLK_SEL__SHIFT) | 1197 (1 << MISC_CLK_CTRL__ZCLK_SEL__SHIFT); 1198 data |= (0x20 << MISC_CLK_CTRL__DFT_SMS_PG_CLK_SEL__SHIFT); 1199 if (orig != data) 1200 WREG32_SMC(ixMISC_CLK_CTRL, data); 1201 1202 orig = data = RREG32_SMC(ixCG_CLKPIN_CNTL); 1203 data |= CG_CLKPIN_CNTL__XTALIN_DIVIDE_MASK; 1204 if (orig != data) 1205 WREG32_SMC(ixCG_CLKPIN_CNTL, data); 1206 1207 orig = data = RREG32_SMC(ixCG_CLKPIN_CNTL_2); 1208 data |= CG_CLKPIN_CNTL_2__ENABLE_XCLK_MASK; 1209 if (orig != data) 1210 WREG32_SMC(ixCG_CLKPIN_CNTL, data); 1211 1212 orig = data = RREG32_SMC(ixMPLL_BYPASSCLK_SEL); 1213 data &= ~MPLL_BYPASSCLK_SEL__MPLL_CLKOUT_SEL_MASK; 1214 data |= (4 << MPLL_BYPASSCLK_SEL__MPLL_CLKOUT_SEL__SHIFT); 1215 if (orig != data) 1216 WREG32_SMC(ixMPLL_BYPASSCLK_SEL, data); 1217 1218 orig = data = RREG32_PCIE(ixCPM_CONTROL); 1219 data |= (CPM_CONTROL__REFCLK_XSTCLK_ENABLE_MASK | 1220 CPM_CONTROL__CLKREQb_UNGATE_TXCLK_ENABLE_MASK); 1221 if (orig != data) 1222 WREG32_PCIE(ixCPM_CONTROL, data); 1223 1224 orig = data = RREG32_PCIE(ixPCIE_CONFIG_CNTL); 1225 data &= ~PCIE_CONFIG_CNTL__DYN_CLK_LATENCY_MASK; 1226 data |= (0xE << PCIE_CONFIG_CNTL__DYN_CLK_LATENCY__SHIFT); 1227 if (orig != data) 1228 WREG32_PCIE(ixPCIE_CONFIG_CNTL, data); 1229 1230 orig = data = RREG32(mmBIF_CLK_CTRL); 1231 data |= BIF_CLK_CTRL__BIF_XSTCLK_READY_MASK; 1232 if (orig != data) 1233 WREG32(mmBIF_CLK_CTRL, data); 1234 1235 orig = data = RREG32_PCIE(ixPCIE_LC_CNTL7); 1236 data |= PCIE_LC_CNTL7__LC_L1_SIDEBAND_CLKREQ_PDWN_EN_MASK; 1237 if (orig != data) 1238 WREG32_PCIE(ixPCIE_LC_CNTL7, data); 1239 1240 orig = data = RREG32_PCIE(ixPCIE_HW_DEBUG); 1241 data |= PCIE_HW_DEBUG__HW_01_DEBUG_MASK; 1242 if (orig != data) 1243 WREG32_PCIE(ixPCIE_HW_DEBUG, data); 1244 1245 orig = data = RREG32_PCIE(ixPCIE_LC_CNTL2); 1246 data |= PCIE_LC_CNTL2__LC_ALLOW_PDWN_IN_L23_MASK; 1247 data |= PCIE_LC_CNTL2__LC_ALLOW_PDWN_IN_L1_MASK; 1248 if (bL1SS) 1249 data &= ~PCIE_LC_CNTL2__LC_ALLOW_PDWN_IN_L1_MASK; 1250 if (orig != data) 1251 WREG32_PCIE(ixPCIE_LC_CNTL2, data); 1252 1253 } 1254 1255 vi_enable_aspm(adev); 1256 1257 data = RREG32_PCIE(ixPCIE_LC_N_FTS_CNTL); 1258 data1 = RREG32_PCIE(ixPCIE_LC_STATUS1); 1259 if (((data & PCIE_LC_N_FTS_CNTL__LC_N_FTS_MASK) == PCIE_LC_N_FTS_CNTL__LC_N_FTS_MASK) && 1260 data1 & PCIE_LC_STATUS1__LC_REVERSE_XMIT_MASK && 1261 data1 & PCIE_LC_STATUS1__LC_REVERSE_RCVR_MASK) { 1262 orig = data = RREG32_PCIE(ixPCIE_LC_CNTL); 1263 data &= ~PCIE_LC_CNTL__LC_L0S_INACTIVITY_MASK; 1264 if (orig != data) 1265 WREG32_PCIE(ixPCIE_LC_CNTL, data); 1266 } 1267 1268 if ((adev->asic_type == CHIP_POLARIS12 && 1269 !(ASICID_IS_P23(adev->pdev->device, adev->pdev->revision))) || 1270 ASIC_IS_P22(adev->asic_type, adev->external_rev_id)) { 1271 orig = data = RREG32_PCIE(ixPCIE_LC_TRAINING_CNTL); 1272 data &= ~PCIE_LC_TRAINING_CNTL__LC_DISABLE_TRAINING_BIT_ARCH_MASK; 1273 if (orig != data) 1274 WREG32_PCIE(ixPCIE_LC_TRAINING_CNTL, data); 1275 } 1276 } 1277 1278 static void vi_enable_doorbell_aperture(struct amdgpu_device *adev, 1279 bool enable) 1280 { 1281 u32 tmp; 1282 1283 /* not necessary on CZ */ 1284 if (adev->flags & AMD_IS_APU) 1285 return; 1286 1287 tmp = RREG32(mmBIF_DOORBELL_APER_EN); 1288 if (enable) 1289 tmp = REG_SET_FIELD(tmp, BIF_DOORBELL_APER_EN, BIF_DOORBELL_APER_EN, 1); 1290 else 1291 tmp = REG_SET_FIELD(tmp, BIF_DOORBELL_APER_EN, BIF_DOORBELL_APER_EN, 0); 1292 1293 WREG32(mmBIF_DOORBELL_APER_EN, tmp); 1294 } 1295 1296 #define ATI_REV_ID_FUSE_MACRO__ADDRESS 0xC0014044 1297 #define ATI_REV_ID_FUSE_MACRO__SHIFT 9 1298 #define ATI_REV_ID_FUSE_MACRO__MASK 0x00001E00 1299 1300 static uint32_t vi_get_rev_id(struct amdgpu_device *adev) 1301 { 1302 if (adev->flags & AMD_IS_APU) 1303 return (RREG32_SMC(ATI_REV_ID_FUSE_MACRO__ADDRESS) & ATI_REV_ID_FUSE_MACRO__MASK) 1304 >> ATI_REV_ID_FUSE_MACRO__SHIFT; 1305 else 1306 return (RREG32(mmPCIE_EFUSE4) & PCIE_EFUSE4__STRAP_BIF_ATI_REV_ID_MASK) 1307 >> PCIE_EFUSE4__STRAP_BIF_ATI_REV_ID__SHIFT; 1308 } 1309 1310 static void vi_flush_hdp(struct amdgpu_device *adev, struct amdgpu_ring *ring) 1311 { 1312 if (!ring || !ring->funcs->emit_wreg) { 1313 WREG32(mmHDP_MEM_COHERENCY_FLUSH_CNTL, 1); 1314 RREG32(mmHDP_MEM_COHERENCY_FLUSH_CNTL); 1315 } else { 1316 amdgpu_ring_emit_wreg(ring, mmHDP_MEM_COHERENCY_FLUSH_CNTL, 1); 1317 } 1318 } 1319 1320 static void vi_invalidate_hdp(struct amdgpu_device *adev, 1321 struct amdgpu_ring *ring) 1322 { 1323 if (!ring || !ring->funcs->emit_wreg) { 1324 WREG32(mmHDP_DEBUG0, 1); 1325 RREG32(mmHDP_DEBUG0); 1326 } else { 1327 amdgpu_ring_emit_wreg(ring, mmHDP_DEBUG0, 1); 1328 } 1329 } 1330 1331 static bool vi_need_full_reset(struct amdgpu_device *adev) 1332 { 1333 switch (adev->asic_type) { 1334 case CHIP_CARRIZO: 1335 case CHIP_STONEY: 1336 /* CZ has hang issues with full reset at the moment */ 1337 return false; 1338 case CHIP_FIJI: 1339 case CHIP_TONGA: 1340 /* XXX: soft reset should work on fiji and tonga */ 1341 return true; 1342 case CHIP_POLARIS10: 1343 case CHIP_POLARIS11: 1344 case CHIP_POLARIS12: 1345 case CHIP_TOPAZ: 1346 default: 1347 /* change this when we support soft reset */ 1348 return true; 1349 } 1350 } 1351 1352 static void vi_get_pcie_usage(struct amdgpu_device *adev, uint64_t *count0, 1353 uint64_t *count1) 1354 { 1355 uint32_t perfctr = 0; 1356 uint64_t cnt0_of, cnt1_of; 1357 int tmp; 1358 1359 /* This reports 0 on APUs, so return to avoid writing/reading registers 1360 * that may or may not be different from their GPU counterparts 1361 */ 1362 if (adev->flags & AMD_IS_APU) 1363 return; 1364 1365 /* Set the 2 events that we wish to watch, defined above */ 1366 /* Reg 40 is # received msgs, Reg 104 is # of posted requests sent */ 1367 perfctr = REG_SET_FIELD(perfctr, PCIE_PERF_CNTL_TXCLK, EVENT0_SEL, 40); 1368 perfctr = REG_SET_FIELD(perfctr, PCIE_PERF_CNTL_TXCLK, EVENT1_SEL, 104); 1369 1370 /* Write to enable desired perf counters */ 1371 WREG32_PCIE(ixPCIE_PERF_CNTL_TXCLK, perfctr); 1372 /* Zero out and enable the perf counters 1373 * Write 0x5: 1374 * Bit 0 = Start all counters(1) 1375 * Bit 2 = Global counter reset enable(1) 1376 */ 1377 WREG32_PCIE(ixPCIE_PERF_COUNT_CNTL, 0x00000005); 1378 1379 msleep(1000); 1380 1381 /* Load the shadow and disable the perf counters 1382 * Write 0x2: 1383 * Bit 0 = Stop counters(0) 1384 * Bit 1 = Load the shadow counters(1) 1385 */ 1386 WREG32_PCIE(ixPCIE_PERF_COUNT_CNTL, 0x00000002); 1387 1388 /* Read register values to get any >32bit overflow */ 1389 tmp = RREG32_PCIE(ixPCIE_PERF_CNTL_TXCLK); 1390 cnt0_of = REG_GET_FIELD(tmp, PCIE_PERF_CNTL_TXCLK, COUNTER0_UPPER); 1391 cnt1_of = REG_GET_FIELD(tmp, PCIE_PERF_CNTL_TXCLK, COUNTER1_UPPER); 1392 1393 /* Get the values and add the overflow */ 1394 *count0 = RREG32_PCIE(ixPCIE_PERF_COUNT0_TXCLK) | (cnt0_of << 32); 1395 *count1 = RREG32_PCIE(ixPCIE_PERF_COUNT1_TXCLK) | (cnt1_of << 32); 1396 } 1397 1398 static uint64_t vi_get_pcie_replay_count(struct amdgpu_device *adev) 1399 { 1400 uint64_t nak_r, nak_g; 1401 1402 /* Get the number of NAKs received and generated */ 1403 nak_r = RREG32_PCIE(ixPCIE_RX_NUM_NAK); 1404 nak_g = RREG32_PCIE(ixPCIE_RX_NUM_NAK_GENERATED); 1405 1406 /* Add the total number of NAKs, i.e the number of replays */ 1407 return (nak_r + nak_g); 1408 } 1409 1410 static bool vi_need_reset_on_init(struct amdgpu_device *adev) 1411 { 1412 u32 clock_cntl, pc; 1413 1414 if (adev->flags & AMD_IS_APU) 1415 return false; 1416 1417 /* check if the SMC is already running */ 1418 clock_cntl = RREG32_SMC(ixSMC_SYSCON_CLOCK_CNTL_0); 1419 pc = RREG32_SMC(ixSMC_PC_C); 1420 if ((0 == REG_GET_FIELD(clock_cntl, SMC_SYSCON_CLOCK_CNTL_0, ck_disable)) && 1421 (0x20100 <= pc)) 1422 return true; 1423 1424 return false; 1425 } 1426 1427 static const struct amdgpu_asic_funcs vi_asic_funcs = 1428 { 1429 .read_disabled_bios = &vi_read_disabled_bios, 1430 .read_bios_from_rom = &vi_read_bios_from_rom, 1431 .read_register = &vi_read_register, 1432 .reset = &vi_asic_reset, 1433 .reset_method = &vi_asic_reset_method, 1434 .get_xclk = &vi_get_xclk, 1435 .set_uvd_clocks = &vi_set_uvd_clocks, 1436 .set_vce_clocks = &vi_set_vce_clocks, 1437 .get_config_memsize = &vi_get_config_memsize, 1438 .flush_hdp = &vi_flush_hdp, 1439 .invalidate_hdp = &vi_invalidate_hdp, 1440 .need_full_reset = &vi_need_full_reset, 1441 .init_doorbell_index = &legacy_doorbell_index_init, 1442 .get_pcie_usage = &vi_get_pcie_usage, 1443 .need_reset_on_init = &vi_need_reset_on_init, 1444 .get_pcie_replay_count = &vi_get_pcie_replay_count, 1445 .supports_baco = &vi_asic_supports_baco, 1446 .query_video_codecs = &vi_query_video_codecs, 1447 }; 1448 1449 #define CZ_REV_BRISTOL(rev) \ 1450 ((rev >= 0xC8 && rev <= 0xCE) || (rev >= 0xE1 && rev <= 0xE6)) 1451 1452 static int vi_common_early_init(struct amdgpu_ip_block *ip_block) 1453 { 1454 struct amdgpu_device *adev = ip_block->adev; 1455 1456 if (adev->flags & AMD_IS_APU) { 1457 adev->smc_rreg = &cz_smc_rreg; 1458 adev->smc_wreg = &cz_smc_wreg; 1459 } else { 1460 adev->smc_rreg = &vi_smc_rreg; 1461 adev->smc_wreg = &vi_smc_wreg; 1462 } 1463 adev->pcie_rreg = &vi_pcie_rreg; 1464 adev->pcie_wreg = &vi_pcie_wreg; 1465 adev->uvd_ctx_rreg = &vi_uvd_ctx_rreg; 1466 adev->uvd_ctx_wreg = &vi_uvd_ctx_wreg; 1467 adev->didt_rreg = &vi_didt_rreg; 1468 adev->didt_wreg = &vi_didt_wreg; 1469 adev->gc_cac_rreg = &vi_gc_cac_rreg; 1470 adev->gc_cac_wreg = &vi_gc_cac_wreg; 1471 1472 adev->asic_funcs = &vi_asic_funcs; 1473 1474 adev->rev_id = vi_get_rev_id(adev); 1475 adev->external_rev_id = 0xFF; 1476 switch (adev->asic_type) { 1477 case CHIP_TOPAZ: 1478 adev->cg_flags = 0; 1479 adev->pg_flags = 0; 1480 adev->external_rev_id = 0x1; 1481 break; 1482 case CHIP_FIJI: 1483 adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG | 1484 AMD_CG_SUPPORT_GFX_MGLS | 1485 AMD_CG_SUPPORT_GFX_RLC_LS | 1486 AMD_CG_SUPPORT_GFX_CP_LS | 1487 AMD_CG_SUPPORT_GFX_CGTS | 1488 AMD_CG_SUPPORT_GFX_CGTS_LS | 1489 AMD_CG_SUPPORT_GFX_CGCG | 1490 AMD_CG_SUPPORT_GFX_CGLS | 1491 AMD_CG_SUPPORT_SDMA_MGCG | 1492 AMD_CG_SUPPORT_SDMA_LS | 1493 AMD_CG_SUPPORT_BIF_LS | 1494 AMD_CG_SUPPORT_HDP_MGCG | 1495 AMD_CG_SUPPORT_HDP_LS | 1496 AMD_CG_SUPPORT_ROM_MGCG | 1497 AMD_CG_SUPPORT_MC_MGCG | 1498 AMD_CG_SUPPORT_MC_LS | 1499 AMD_CG_SUPPORT_UVD_MGCG; 1500 adev->pg_flags = 0; 1501 adev->external_rev_id = adev->rev_id + 0x3c; 1502 break; 1503 case CHIP_TONGA: 1504 adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG | 1505 AMD_CG_SUPPORT_GFX_CGCG | 1506 AMD_CG_SUPPORT_GFX_CGLS | 1507 AMD_CG_SUPPORT_SDMA_MGCG | 1508 AMD_CG_SUPPORT_SDMA_LS | 1509 AMD_CG_SUPPORT_BIF_LS | 1510 AMD_CG_SUPPORT_HDP_MGCG | 1511 AMD_CG_SUPPORT_HDP_LS | 1512 AMD_CG_SUPPORT_ROM_MGCG | 1513 AMD_CG_SUPPORT_MC_MGCG | 1514 AMD_CG_SUPPORT_MC_LS | 1515 AMD_CG_SUPPORT_DRM_LS | 1516 AMD_CG_SUPPORT_UVD_MGCG; 1517 adev->pg_flags = 0; 1518 adev->external_rev_id = adev->rev_id + 0x14; 1519 break; 1520 case CHIP_POLARIS11: 1521 adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG | 1522 AMD_CG_SUPPORT_GFX_RLC_LS | 1523 AMD_CG_SUPPORT_GFX_CP_LS | 1524 AMD_CG_SUPPORT_GFX_CGCG | 1525 AMD_CG_SUPPORT_GFX_CGLS | 1526 AMD_CG_SUPPORT_GFX_3D_CGCG | 1527 AMD_CG_SUPPORT_GFX_3D_CGLS | 1528 AMD_CG_SUPPORT_SDMA_MGCG | 1529 AMD_CG_SUPPORT_SDMA_LS | 1530 AMD_CG_SUPPORT_BIF_MGCG | 1531 AMD_CG_SUPPORT_BIF_LS | 1532 AMD_CG_SUPPORT_HDP_MGCG | 1533 AMD_CG_SUPPORT_HDP_LS | 1534 AMD_CG_SUPPORT_ROM_MGCG | 1535 AMD_CG_SUPPORT_MC_MGCG | 1536 AMD_CG_SUPPORT_MC_LS | 1537 AMD_CG_SUPPORT_DRM_LS | 1538 AMD_CG_SUPPORT_UVD_MGCG | 1539 AMD_CG_SUPPORT_VCE_MGCG; 1540 adev->pg_flags = 0; 1541 adev->external_rev_id = adev->rev_id + 0x5A; 1542 break; 1543 case CHIP_POLARIS10: 1544 adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG | 1545 AMD_CG_SUPPORT_GFX_RLC_LS | 1546 AMD_CG_SUPPORT_GFX_CP_LS | 1547 AMD_CG_SUPPORT_GFX_CGCG | 1548 AMD_CG_SUPPORT_GFX_CGLS | 1549 AMD_CG_SUPPORT_GFX_3D_CGCG | 1550 AMD_CG_SUPPORT_GFX_3D_CGLS | 1551 AMD_CG_SUPPORT_SDMA_MGCG | 1552 AMD_CG_SUPPORT_SDMA_LS | 1553 AMD_CG_SUPPORT_BIF_MGCG | 1554 AMD_CG_SUPPORT_BIF_LS | 1555 AMD_CG_SUPPORT_HDP_MGCG | 1556 AMD_CG_SUPPORT_HDP_LS | 1557 AMD_CG_SUPPORT_ROM_MGCG | 1558 AMD_CG_SUPPORT_MC_MGCG | 1559 AMD_CG_SUPPORT_MC_LS | 1560 AMD_CG_SUPPORT_DRM_LS | 1561 AMD_CG_SUPPORT_UVD_MGCG | 1562 AMD_CG_SUPPORT_VCE_MGCG; 1563 adev->pg_flags = 0; 1564 adev->external_rev_id = adev->rev_id + 0x50; 1565 break; 1566 case CHIP_POLARIS12: 1567 adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG | 1568 AMD_CG_SUPPORT_GFX_RLC_LS | 1569 AMD_CG_SUPPORT_GFX_CP_LS | 1570 AMD_CG_SUPPORT_GFX_CGCG | 1571 AMD_CG_SUPPORT_GFX_CGLS | 1572 AMD_CG_SUPPORT_GFX_3D_CGCG | 1573 AMD_CG_SUPPORT_GFX_3D_CGLS | 1574 AMD_CG_SUPPORT_SDMA_MGCG | 1575 AMD_CG_SUPPORT_SDMA_LS | 1576 AMD_CG_SUPPORT_BIF_MGCG | 1577 AMD_CG_SUPPORT_BIF_LS | 1578 AMD_CG_SUPPORT_HDP_MGCG | 1579 AMD_CG_SUPPORT_HDP_LS | 1580 AMD_CG_SUPPORT_ROM_MGCG | 1581 AMD_CG_SUPPORT_MC_MGCG | 1582 AMD_CG_SUPPORT_MC_LS | 1583 AMD_CG_SUPPORT_DRM_LS | 1584 AMD_CG_SUPPORT_UVD_MGCG | 1585 AMD_CG_SUPPORT_VCE_MGCG; 1586 adev->pg_flags = 0; 1587 adev->external_rev_id = adev->rev_id + 0x64; 1588 break; 1589 case CHIP_VEGAM: 1590 adev->cg_flags = 0; 1591 /*AMD_CG_SUPPORT_GFX_MGCG | 1592 AMD_CG_SUPPORT_GFX_RLC_LS | 1593 AMD_CG_SUPPORT_GFX_CP_LS | 1594 AMD_CG_SUPPORT_GFX_CGCG | 1595 AMD_CG_SUPPORT_GFX_CGLS | 1596 AMD_CG_SUPPORT_GFX_3D_CGCG | 1597 AMD_CG_SUPPORT_GFX_3D_CGLS | 1598 AMD_CG_SUPPORT_SDMA_MGCG | 1599 AMD_CG_SUPPORT_SDMA_LS | 1600 AMD_CG_SUPPORT_BIF_MGCG | 1601 AMD_CG_SUPPORT_BIF_LS | 1602 AMD_CG_SUPPORT_HDP_MGCG | 1603 AMD_CG_SUPPORT_HDP_LS | 1604 AMD_CG_SUPPORT_ROM_MGCG | 1605 AMD_CG_SUPPORT_MC_MGCG | 1606 AMD_CG_SUPPORT_MC_LS | 1607 AMD_CG_SUPPORT_DRM_LS | 1608 AMD_CG_SUPPORT_UVD_MGCG | 1609 AMD_CG_SUPPORT_VCE_MGCG;*/ 1610 adev->pg_flags = 0; 1611 adev->external_rev_id = adev->rev_id + 0x6E; 1612 break; 1613 case CHIP_CARRIZO: 1614 adev->cg_flags = AMD_CG_SUPPORT_UVD_MGCG | 1615 AMD_CG_SUPPORT_GFX_MGCG | 1616 AMD_CG_SUPPORT_GFX_MGLS | 1617 AMD_CG_SUPPORT_GFX_RLC_LS | 1618 AMD_CG_SUPPORT_GFX_CP_LS | 1619 AMD_CG_SUPPORT_GFX_CGTS | 1620 AMD_CG_SUPPORT_GFX_CGTS_LS | 1621 AMD_CG_SUPPORT_GFX_CGCG | 1622 AMD_CG_SUPPORT_GFX_CGLS | 1623 AMD_CG_SUPPORT_BIF_LS | 1624 AMD_CG_SUPPORT_HDP_MGCG | 1625 AMD_CG_SUPPORT_HDP_LS | 1626 AMD_CG_SUPPORT_SDMA_MGCG | 1627 AMD_CG_SUPPORT_SDMA_LS | 1628 AMD_CG_SUPPORT_VCE_MGCG; 1629 /* rev0 hardware requires workarounds to support PG */ 1630 adev->pg_flags = 0; 1631 if (adev->rev_id != 0x00 || CZ_REV_BRISTOL(adev->pdev->revision)) { 1632 adev->pg_flags |= AMD_PG_SUPPORT_GFX_SMG | 1633 AMD_PG_SUPPORT_GFX_PIPELINE | 1634 AMD_PG_SUPPORT_CP | 1635 AMD_PG_SUPPORT_UVD | 1636 AMD_PG_SUPPORT_VCE; 1637 } 1638 adev->external_rev_id = adev->rev_id + 0x1; 1639 break; 1640 case CHIP_STONEY: 1641 adev->cg_flags = AMD_CG_SUPPORT_UVD_MGCG | 1642 AMD_CG_SUPPORT_GFX_MGCG | 1643 AMD_CG_SUPPORT_GFX_MGLS | 1644 AMD_CG_SUPPORT_GFX_RLC_LS | 1645 AMD_CG_SUPPORT_GFX_CP_LS | 1646 AMD_CG_SUPPORT_GFX_CGTS | 1647 AMD_CG_SUPPORT_GFX_CGTS_LS | 1648 AMD_CG_SUPPORT_GFX_CGLS | 1649 AMD_CG_SUPPORT_BIF_LS | 1650 AMD_CG_SUPPORT_HDP_MGCG | 1651 AMD_CG_SUPPORT_HDP_LS | 1652 AMD_CG_SUPPORT_SDMA_MGCG | 1653 AMD_CG_SUPPORT_SDMA_LS | 1654 AMD_CG_SUPPORT_VCE_MGCG; 1655 adev->pg_flags = AMD_PG_SUPPORT_GFX_PG | 1656 AMD_PG_SUPPORT_GFX_SMG | 1657 AMD_PG_SUPPORT_GFX_PIPELINE | 1658 AMD_PG_SUPPORT_CP | 1659 AMD_PG_SUPPORT_UVD | 1660 AMD_PG_SUPPORT_VCE; 1661 adev->external_rev_id = adev->rev_id + 0x61; 1662 break; 1663 default: 1664 /* FIXME: not supported yet */ 1665 return -EINVAL; 1666 } 1667 1668 if (amdgpu_sriov_vf(adev)) { 1669 amdgpu_virt_init_setting(adev); 1670 xgpu_vi_mailbox_set_irq_funcs(adev); 1671 } 1672 1673 return 0; 1674 } 1675 1676 static int vi_common_late_init(struct amdgpu_ip_block *ip_block) 1677 { 1678 struct amdgpu_device *adev = ip_block->adev; 1679 1680 if (amdgpu_sriov_vf(adev)) 1681 xgpu_vi_mailbox_get_irq(adev); 1682 1683 return 0; 1684 } 1685 1686 static int vi_common_sw_init(struct amdgpu_ip_block *ip_block) 1687 { 1688 struct amdgpu_device *adev = ip_block->adev; 1689 1690 if (amdgpu_sriov_vf(adev)) 1691 xgpu_vi_mailbox_add_irq_id(adev); 1692 1693 return 0; 1694 } 1695 1696 static int vi_common_hw_init(struct amdgpu_ip_block *ip_block) 1697 { 1698 struct amdgpu_device *adev = ip_block->adev; 1699 1700 /* move the golden regs per IP block */ 1701 vi_init_golden_registers(adev); 1702 /* enable aspm */ 1703 vi_program_aspm(adev); 1704 /* enable the doorbell aperture */ 1705 vi_enable_doorbell_aperture(adev, true); 1706 1707 return 0; 1708 } 1709 1710 static int vi_common_hw_fini(struct amdgpu_ip_block *ip_block) 1711 { 1712 struct amdgpu_device *adev = ip_block->adev; 1713 1714 /* enable the doorbell aperture */ 1715 vi_enable_doorbell_aperture(adev, false); 1716 1717 if (amdgpu_sriov_vf(adev)) 1718 xgpu_vi_mailbox_put_irq(adev); 1719 1720 return 0; 1721 } 1722 1723 static int vi_common_suspend(struct amdgpu_ip_block *ip_block) 1724 { 1725 return vi_common_hw_fini(ip_block); 1726 } 1727 1728 static int vi_common_resume(struct amdgpu_ip_block *ip_block) 1729 { 1730 return vi_common_hw_init(ip_block); 1731 } 1732 1733 static bool vi_common_is_idle(struct amdgpu_ip_block *ip_block) 1734 { 1735 return true; 1736 } 1737 1738 static void vi_update_bif_medium_grain_light_sleep(struct amdgpu_device *adev, 1739 bool enable) 1740 { 1741 uint32_t temp, data; 1742 1743 temp = data = RREG32_PCIE(ixPCIE_CNTL2); 1744 1745 if (enable && (adev->cg_flags & AMD_CG_SUPPORT_BIF_LS)) 1746 data |= PCIE_CNTL2__SLV_MEM_LS_EN_MASK | 1747 PCIE_CNTL2__MST_MEM_LS_EN_MASK | 1748 PCIE_CNTL2__REPLAY_MEM_LS_EN_MASK; 1749 else 1750 data &= ~(PCIE_CNTL2__SLV_MEM_LS_EN_MASK | 1751 PCIE_CNTL2__MST_MEM_LS_EN_MASK | 1752 PCIE_CNTL2__REPLAY_MEM_LS_EN_MASK); 1753 1754 if (temp != data) 1755 WREG32_PCIE(ixPCIE_CNTL2, data); 1756 } 1757 1758 static void vi_update_hdp_medium_grain_clock_gating(struct amdgpu_device *adev, 1759 bool enable) 1760 { 1761 uint32_t temp, data; 1762 1763 temp = data = RREG32(mmHDP_HOST_PATH_CNTL); 1764 1765 if (enable && (adev->cg_flags & AMD_CG_SUPPORT_HDP_MGCG)) 1766 data &= ~HDP_HOST_PATH_CNTL__CLOCK_GATING_DIS_MASK; 1767 else 1768 data |= HDP_HOST_PATH_CNTL__CLOCK_GATING_DIS_MASK; 1769 1770 if (temp != data) 1771 WREG32(mmHDP_HOST_PATH_CNTL, data); 1772 } 1773 1774 static void vi_update_hdp_light_sleep(struct amdgpu_device *adev, 1775 bool enable) 1776 { 1777 uint32_t temp, data; 1778 1779 temp = data = RREG32(mmHDP_MEM_POWER_LS); 1780 1781 if (enable && (adev->cg_flags & AMD_CG_SUPPORT_HDP_LS)) 1782 data |= HDP_MEM_POWER_LS__LS_ENABLE_MASK; 1783 else 1784 data &= ~HDP_MEM_POWER_LS__LS_ENABLE_MASK; 1785 1786 if (temp != data) 1787 WREG32(mmHDP_MEM_POWER_LS, data); 1788 } 1789 1790 static void vi_update_drm_light_sleep(struct amdgpu_device *adev, 1791 bool enable) 1792 { 1793 uint32_t temp, data; 1794 1795 temp = data = RREG32(0x157a); 1796 1797 if (enable && (adev->cg_flags & AMD_CG_SUPPORT_DRM_LS)) 1798 data |= 1; 1799 else 1800 data &= ~1; 1801 1802 if (temp != data) 1803 WREG32(0x157a, data); 1804 } 1805 1806 1807 static void vi_update_rom_medium_grain_clock_gating(struct amdgpu_device *adev, 1808 bool enable) 1809 { 1810 uint32_t temp, data; 1811 1812 temp = data = RREG32_SMC(ixCGTT_ROM_CLK_CTRL0); 1813 1814 if (enable && (adev->cg_flags & AMD_CG_SUPPORT_ROM_MGCG)) 1815 data &= ~(CGTT_ROM_CLK_CTRL0__SOFT_OVERRIDE0_MASK | 1816 CGTT_ROM_CLK_CTRL0__SOFT_OVERRIDE1_MASK); 1817 else 1818 data |= CGTT_ROM_CLK_CTRL0__SOFT_OVERRIDE0_MASK | 1819 CGTT_ROM_CLK_CTRL0__SOFT_OVERRIDE1_MASK; 1820 1821 if (temp != data) 1822 WREG32_SMC(ixCGTT_ROM_CLK_CTRL0, data); 1823 } 1824 1825 static int vi_common_set_clockgating_state_by_smu(void *handle, 1826 enum amd_clockgating_state state) 1827 { 1828 uint32_t msg_id, pp_state = 0; 1829 uint32_t pp_support_state = 0; 1830 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1831 1832 if (adev->cg_flags & (AMD_CG_SUPPORT_MC_LS | AMD_CG_SUPPORT_MC_MGCG)) { 1833 if (adev->cg_flags & AMD_CG_SUPPORT_MC_LS) { 1834 pp_support_state = PP_STATE_SUPPORT_LS; 1835 pp_state = PP_STATE_LS; 1836 } 1837 if (adev->cg_flags & AMD_CG_SUPPORT_MC_MGCG) { 1838 pp_support_state |= PP_STATE_SUPPORT_CG; 1839 pp_state |= PP_STATE_CG; 1840 } 1841 if (state == AMD_CG_STATE_UNGATE) 1842 pp_state = 0; 1843 msg_id = PP_CG_MSG_ID(PP_GROUP_SYS, 1844 PP_BLOCK_SYS_MC, 1845 pp_support_state, 1846 pp_state); 1847 amdgpu_dpm_set_clockgating_by_smu(adev, msg_id); 1848 } 1849 1850 if (adev->cg_flags & (AMD_CG_SUPPORT_SDMA_LS | AMD_CG_SUPPORT_SDMA_MGCG)) { 1851 if (adev->cg_flags & AMD_CG_SUPPORT_SDMA_LS) { 1852 pp_support_state = PP_STATE_SUPPORT_LS; 1853 pp_state = PP_STATE_LS; 1854 } 1855 if (adev->cg_flags & AMD_CG_SUPPORT_SDMA_MGCG) { 1856 pp_support_state |= PP_STATE_SUPPORT_CG; 1857 pp_state |= PP_STATE_CG; 1858 } 1859 if (state == AMD_CG_STATE_UNGATE) 1860 pp_state = 0; 1861 msg_id = PP_CG_MSG_ID(PP_GROUP_SYS, 1862 PP_BLOCK_SYS_SDMA, 1863 pp_support_state, 1864 pp_state); 1865 amdgpu_dpm_set_clockgating_by_smu(adev, msg_id); 1866 } 1867 1868 if (adev->cg_flags & (AMD_CG_SUPPORT_HDP_LS | AMD_CG_SUPPORT_HDP_MGCG)) { 1869 if (adev->cg_flags & AMD_CG_SUPPORT_HDP_LS) { 1870 pp_support_state = PP_STATE_SUPPORT_LS; 1871 pp_state = PP_STATE_LS; 1872 } 1873 if (adev->cg_flags & AMD_CG_SUPPORT_HDP_MGCG) { 1874 pp_support_state |= PP_STATE_SUPPORT_CG; 1875 pp_state |= PP_STATE_CG; 1876 } 1877 if (state == AMD_CG_STATE_UNGATE) 1878 pp_state = 0; 1879 msg_id = PP_CG_MSG_ID(PP_GROUP_SYS, 1880 PP_BLOCK_SYS_HDP, 1881 pp_support_state, 1882 pp_state); 1883 amdgpu_dpm_set_clockgating_by_smu(adev, msg_id); 1884 } 1885 1886 1887 if (adev->cg_flags & AMD_CG_SUPPORT_BIF_LS) { 1888 if (state == AMD_CG_STATE_UNGATE) 1889 pp_state = 0; 1890 else 1891 pp_state = PP_STATE_LS; 1892 1893 msg_id = PP_CG_MSG_ID(PP_GROUP_SYS, 1894 PP_BLOCK_SYS_BIF, 1895 PP_STATE_SUPPORT_LS, 1896 pp_state); 1897 amdgpu_dpm_set_clockgating_by_smu(adev, msg_id); 1898 } 1899 if (adev->cg_flags & AMD_CG_SUPPORT_BIF_MGCG) { 1900 if (state == AMD_CG_STATE_UNGATE) 1901 pp_state = 0; 1902 else 1903 pp_state = PP_STATE_CG; 1904 1905 msg_id = PP_CG_MSG_ID(PP_GROUP_SYS, 1906 PP_BLOCK_SYS_BIF, 1907 PP_STATE_SUPPORT_CG, 1908 pp_state); 1909 amdgpu_dpm_set_clockgating_by_smu(adev, msg_id); 1910 } 1911 1912 if (adev->cg_flags & AMD_CG_SUPPORT_DRM_LS) { 1913 1914 if (state == AMD_CG_STATE_UNGATE) 1915 pp_state = 0; 1916 else 1917 pp_state = PP_STATE_LS; 1918 1919 msg_id = PP_CG_MSG_ID(PP_GROUP_SYS, 1920 PP_BLOCK_SYS_DRM, 1921 PP_STATE_SUPPORT_LS, 1922 pp_state); 1923 amdgpu_dpm_set_clockgating_by_smu(adev, msg_id); 1924 } 1925 1926 if (adev->cg_flags & AMD_CG_SUPPORT_ROM_MGCG) { 1927 1928 if (state == AMD_CG_STATE_UNGATE) 1929 pp_state = 0; 1930 else 1931 pp_state = PP_STATE_CG; 1932 1933 msg_id = PP_CG_MSG_ID(PP_GROUP_SYS, 1934 PP_BLOCK_SYS_ROM, 1935 PP_STATE_SUPPORT_CG, 1936 pp_state); 1937 amdgpu_dpm_set_clockgating_by_smu(adev, msg_id); 1938 } 1939 return 0; 1940 } 1941 1942 static int vi_common_set_clockgating_state(struct amdgpu_ip_block *ip_block, 1943 enum amd_clockgating_state state) 1944 { 1945 struct amdgpu_device *adev = ip_block->adev; 1946 1947 if (amdgpu_sriov_vf(adev)) 1948 return 0; 1949 1950 switch (adev->asic_type) { 1951 case CHIP_FIJI: 1952 vi_update_bif_medium_grain_light_sleep(adev, 1953 state == AMD_CG_STATE_GATE); 1954 vi_update_hdp_medium_grain_clock_gating(adev, 1955 state == AMD_CG_STATE_GATE); 1956 vi_update_hdp_light_sleep(adev, 1957 state == AMD_CG_STATE_GATE); 1958 vi_update_rom_medium_grain_clock_gating(adev, 1959 state == AMD_CG_STATE_GATE); 1960 break; 1961 case CHIP_CARRIZO: 1962 case CHIP_STONEY: 1963 vi_update_bif_medium_grain_light_sleep(adev, 1964 state == AMD_CG_STATE_GATE); 1965 vi_update_hdp_medium_grain_clock_gating(adev, 1966 state == AMD_CG_STATE_GATE); 1967 vi_update_hdp_light_sleep(adev, 1968 state == AMD_CG_STATE_GATE); 1969 vi_update_drm_light_sleep(adev, 1970 state == AMD_CG_STATE_GATE); 1971 break; 1972 case CHIP_TONGA: 1973 case CHIP_POLARIS10: 1974 case CHIP_POLARIS11: 1975 case CHIP_POLARIS12: 1976 case CHIP_VEGAM: 1977 vi_common_set_clockgating_state_by_smu(adev, state); 1978 break; 1979 default: 1980 break; 1981 } 1982 return 0; 1983 } 1984 1985 static int vi_common_set_powergating_state(struct amdgpu_ip_block *ip_block, 1986 enum amd_powergating_state state) 1987 { 1988 return 0; 1989 } 1990 1991 static void vi_common_get_clockgating_state(struct amdgpu_ip_block *ip_block, u64 *flags) 1992 { 1993 struct amdgpu_device *adev = ip_block->adev; 1994 int data; 1995 1996 if (amdgpu_sriov_vf(adev)) 1997 *flags = 0; 1998 1999 /* AMD_CG_SUPPORT_BIF_LS */ 2000 data = RREG32_PCIE(ixPCIE_CNTL2); 2001 if (data & PCIE_CNTL2__SLV_MEM_LS_EN_MASK) 2002 *flags |= AMD_CG_SUPPORT_BIF_LS; 2003 2004 /* AMD_CG_SUPPORT_HDP_LS */ 2005 data = RREG32(mmHDP_MEM_POWER_LS); 2006 if (data & HDP_MEM_POWER_LS__LS_ENABLE_MASK) 2007 *flags |= AMD_CG_SUPPORT_HDP_LS; 2008 2009 /* AMD_CG_SUPPORT_HDP_MGCG */ 2010 data = RREG32(mmHDP_HOST_PATH_CNTL); 2011 if (!(data & HDP_HOST_PATH_CNTL__CLOCK_GATING_DIS_MASK)) 2012 *flags |= AMD_CG_SUPPORT_HDP_MGCG; 2013 2014 /* AMD_CG_SUPPORT_ROM_MGCG */ 2015 data = RREG32_SMC(ixCGTT_ROM_CLK_CTRL0); 2016 if (!(data & CGTT_ROM_CLK_CTRL0__SOFT_OVERRIDE0_MASK)) 2017 *flags |= AMD_CG_SUPPORT_ROM_MGCG; 2018 } 2019 2020 static const struct amd_ip_funcs vi_common_ip_funcs = { 2021 .name = "vi_common", 2022 .early_init = vi_common_early_init, 2023 .late_init = vi_common_late_init, 2024 .sw_init = vi_common_sw_init, 2025 .hw_init = vi_common_hw_init, 2026 .hw_fini = vi_common_hw_fini, 2027 .suspend = vi_common_suspend, 2028 .resume = vi_common_resume, 2029 .is_idle = vi_common_is_idle, 2030 .set_clockgating_state = vi_common_set_clockgating_state, 2031 .set_powergating_state = vi_common_set_powergating_state, 2032 .get_clockgating_state = vi_common_get_clockgating_state, 2033 }; 2034 2035 static const struct amdgpu_ip_block_version vi_common_ip_block = 2036 { 2037 .type = AMD_IP_BLOCK_TYPE_COMMON, 2038 .major = 1, 2039 .minor = 0, 2040 .rev = 0, 2041 .funcs = &vi_common_ip_funcs, 2042 }; 2043 2044 void vi_set_virt_ops(struct amdgpu_device *adev) 2045 { 2046 adev->virt.ops = &xgpu_vi_virt_ops; 2047 } 2048 2049 int vi_set_ip_blocks(struct amdgpu_device *adev) 2050 { 2051 amdgpu_device_set_sriov_virtual_display(adev); 2052 2053 switch (adev->asic_type) { 2054 case CHIP_TOPAZ: 2055 /* topaz has no DCE, UVD, VCE */ 2056 amdgpu_device_ip_block_add(adev, &vi_common_ip_block); 2057 amdgpu_device_ip_block_add(adev, &gmc_v7_4_ip_block); 2058 amdgpu_device_ip_block_add(adev, &iceland_ih_ip_block); 2059 amdgpu_device_ip_block_add(adev, &gfx_v8_0_ip_block); 2060 amdgpu_device_ip_block_add(adev, &sdma_v2_4_ip_block); 2061 amdgpu_device_ip_block_add(adev, &pp_smu_ip_block); 2062 if (adev->enable_virtual_display) 2063 amdgpu_device_ip_block_add(adev, &amdgpu_vkms_ip_block); 2064 break; 2065 case CHIP_FIJI: 2066 amdgpu_device_ip_block_add(adev, &vi_common_ip_block); 2067 amdgpu_device_ip_block_add(adev, &gmc_v8_5_ip_block); 2068 amdgpu_device_ip_block_add(adev, &tonga_ih_ip_block); 2069 amdgpu_device_ip_block_add(adev, &gfx_v8_0_ip_block); 2070 amdgpu_device_ip_block_add(adev, &sdma_v3_0_ip_block); 2071 amdgpu_device_ip_block_add(adev, &pp_smu_ip_block); 2072 if (adev->enable_virtual_display) 2073 amdgpu_device_ip_block_add(adev, &amdgpu_vkms_ip_block); 2074 #if defined(CONFIG_DRM_AMD_DC) 2075 else if (amdgpu_device_has_dc_support(adev)) 2076 amdgpu_device_ip_block_add(adev, &dm_ip_block); 2077 #endif 2078 else 2079 amdgpu_device_ip_block_add(adev, &dce_v10_1_ip_block); 2080 if (!amdgpu_sriov_vf(adev)) { 2081 amdgpu_device_ip_block_add(adev, &uvd_v6_0_ip_block); 2082 amdgpu_device_ip_block_add(adev, &vce_v3_0_ip_block); 2083 } 2084 break; 2085 case CHIP_TONGA: 2086 amdgpu_device_ip_block_add(adev, &vi_common_ip_block); 2087 amdgpu_device_ip_block_add(adev, &gmc_v8_0_ip_block); 2088 amdgpu_device_ip_block_add(adev, &tonga_ih_ip_block); 2089 amdgpu_device_ip_block_add(adev, &gfx_v8_0_ip_block); 2090 amdgpu_device_ip_block_add(adev, &sdma_v3_0_ip_block); 2091 amdgpu_device_ip_block_add(adev, &pp_smu_ip_block); 2092 if (adev->enable_virtual_display) 2093 amdgpu_device_ip_block_add(adev, &amdgpu_vkms_ip_block); 2094 #if defined(CONFIG_DRM_AMD_DC) 2095 else if (amdgpu_device_has_dc_support(adev)) 2096 amdgpu_device_ip_block_add(adev, &dm_ip_block); 2097 #endif 2098 else 2099 amdgpu_device_ip_block_add(adev, &dce_v10_0_ip_block); 2100 if (!amdgpu_sriov_vf(adev)) { 2101 amdgpu_device_ip_block_add(adev, &uvd_v5_0_ip_block); 2102 amdgpu_device_ip_block_add(adev, &vce_v3_0_ip_block); 2103 } 2104 break; 2105 case CHIP_POLARIS10: 2106 case CHIP_POLARIS11: 2107 case CHIP_POLARIS12: 2108 case CHIP_VEGAM: 2109 amdgpu_device_ip_block_add(adev, &vi_common_ip_block); 2110 amdgpu_device_ip_block_add(adev, &gmc_v8_1_ip_block); 2111 amdgpu_device_ip_block_add(adev, &tonga_ih_ip_block); 2112 amdgpu_device_ip_block_add(adev, &gfx_v8_0_ip_block); 2113 amdgpu_device_ip_block_add(adev, &sdma_v3_1_ip_block); 2114 amdgpu_device_ip_block_add(adev, &pp_smu_ip_block); 2115 if (adev->enable_virtual_display) 2116 amdgpu_device_ip_block_add(adev, &amdgpu_vkms_ip_block); 2117 #if defined(CONFIG_DRM_AMD_DC) 2118 else if (amdgpu_device_has_dc_support(adev)) 2119 amdgpu_device_ip_block_add(adev, &dm_ip_block); 2120 #endif 2121 amdgpu_device_ip_block_add(adev, &uvd_v6_3_ip_block); 2122 amdgpu_device_ip_block_add(adev, &vce_v3_4_ip_block); 2123 break; 2124 case CHIP_CARRIZO: 2125 amdgpu_device_ip_block_add(adev, &vi_common_ip_block); 2126 amdgpu_device_ip_block_add(adev, &gmc_v8_0_ip_block); 2127 amdgpu_device_ip_block_add(adev, &cz_ih_ip_block); 2128 amdgpu_device_ip_block_add(adev, &gfx_v8_0_ip_block); 2129 amdgpu_device_ip_block_add(adev, &sdma_v3_0_ip_block); 2130 amdgpu_device_ip_block_add(adev, &pp_smu_ip_block); 2131 if (adev->enable_virtual_display) 2132 amdgpu_device_ip_block_add(adev, &amdgpu_vkms_ip_block); 2133 #if defined(CONFIG_DRM_AMD_DC) 2134 else if (amdgpu_device_has_dc_support(adev)) 2135 amdgpu_device_ip_block_add(adev, &dm_ip_block); 2136 #endif 2137 amdgpu_device_ip_block_add(adev, &uvd_v6_0_ip_block); 2138 amdgpu_device_ip_block_add(adev, &vce_v3_1_ip_block); 2139 #if defined(CONFIG_DRM_AMD_ACP) 2140 amdgpu_device_ip_block_add(adev, &acp_ip_block); 2141 #endif 2142 break; 2143 case CHIP_STONEY: 2144 amdgpu_device_ip_block_add(adev, &vi_common_ip_block); 2145 amdgpu_device_ip_block_add(adev, &gmc_v8_0_ip_block); 2146 amdgpu_device_ip_block_add(adev, &cz_ih_ip_block); 2147 amdgpu_device_ip_block_add(adev, &gfx_v8_1_ip_block); 2148 amdgpu_device_ip_block_add(adev, &sdma_v3_0_ip_block); 2149 amdgpu_device_ip_block_add(adev, &pp_smu_ip_block); 2150 if (adev->enable_virtual_display) 2151 amdgpu_device_ip_block_add(adev, &amdgpu_vkms_ip_block); 2152 #if defined(CONFIG_DRM_AMD_DC) 2153 else if (amdgpu_device_has_dc_support(adev)) 2154 amdgpu_device_ip_block_add(adev, &dm_ip_block); 2155 #endif 2156 amdgpu_device_ip_block_add(adev, &uvd_v6_2_ip_block); 2157 amdgpu_device_ip_block_add(adev, &vce_v3_4_ip_block); 2158 #if defined(CONFIG_DRM_AMD_ACP) 2159 amdgpu_device_ip_block_add(adev, &acp_ip_block); 2160 #endif 2161 break; 2162 default: 2163 /* FIXME: not supported yet */ 2164 return -EINVAL; 2165 } 2166 2167 return 0; 2168 } 2169 2170 void legacy_doorbell_index_init(struct amdgpu_device *adev) 2171 { 2172 adev->doorbell_index.kiq = AMDGPU_DOORBELL_KIQ; 2173 adev->doorbell_index.mec_ring0 = AMDGPU_DOORBELL_MEC_RING0; 2174 adev->doorbell_index.mec_ring1 = AMDGPU_DOORBELL_MEC_RING1; 2175 adev->doorbell_index.mec_ring2 = AMDGPU_DOORBELL_MEC_RING2; 2176 adev->doorbell_index.mec_ring3 = AMDGPU_DOORBELL_MEC_RING3; 2177 adev->doorbell_index.mec_ring4 = AMDGPU_DOORBELL_MEC_RING4; 2178 adev->doorbell_index.mec_ring5 = AMDGPU_DOORBELL_MEC_RING5; 2179 adev->doorbell_index.mec_ring6 = AMDGPU_DOORBELL_MEC_RING6; 2180 adev->doorbell_index.mec_ring7 = AMDGPU_DOORBELL_MEC_RING7; 2181 adev->doorbell_index.gfx_ring0 = AMDGPU_DOORBELL_GFX_RING0; 2182 adev->doorbell_index.sdma_engine[0] = AMDGPU_DOORBELL_sDMA_ENGINE0; 2183 adev->doorbell_index.sdma_engine[1] = AMDGPU_DOORBELL_sDMA_ENGINE1; 2184 adev->doorbell_index.ih = AMDGPU_DOORBELL_IH; 2185 adev->doorbell_index.max_assignment = AMDGPU_DOORBELL_MAX_ASSIGNMENT; 2186 } 2187