1 /* 2 * Copyright 2018-2024 Advanced Micro Devices, Inc. All rights reserved. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 */ 23 24 #include <linux/firmware.h> 25 26 #include "amdgpu.h" 27 #include "amdgpu_discovery.h" 28 #include "soc15_hw_ip.h" 29 #include "discovery.h" 30 #include "amdgpu_ras.h" 31 32 #include "soc15.h" 33 #include "gfx_v9_0.h" 34 #include "gfx_v9_4_3.h" 35 #include "gmc_v9_0.h" 36 #include "df_v1_7.h" 37 #include "df_v3_6.h" 38 #include "df_v4_3.h" 39 #include "df_v4_6_2.h" 40 #include "df_v4_15.h" 41 #include "nbio_v6_1.h" 42 #include "nbio_v7_0.h" 43 #include "nbio_v7_4.h" 44 #include "nbio_v7_9.h" 45 #include "nbio_v7_11.h" 46 #include "hdp_v4_0.h" 47 #include "vega10_ih.h" 48 #include "vega20_ih.h" 49 #include "sdma_v4_0.h" 50 #include "sdma_v4_4_2.h" 51 #include "uvd_v7_0.h" 52 #include "vce_v4_0.h" 53 #include "vcn_v1_0.h" 54 #include "vcn_v2_5.h" 55 #include "jpeg_v2_5.h" 56 #include "smuio_v9_0.h" 57 #include "gmc_v10_0.h" 58 #include "gmc_v11_0.h" 59 #include "gmc_v12_0.h" 60 #include "gfxhub_v2_0.h" 61 #include "mmhub_v2_0.h" 62 #include "nbio_v2_3.h" 63 #include "nbio_v4_3.h" 64 #include "nbio_v7_2.h" 65 #include "nbio_v7_7.h" 66 #include "nbif_v6_3_1.h" 67 #include "hdp_v5_0.h" 68 #include "hdp_v5_2.h" 69 #include "hdp_v6_0.h" 70 #include "hdp_v7_0.h" 71 #include "nv.h" 72 #include "soc21.h" 73 #include "soc24.h" 74 #include "navi10_ih.h" 75 #include "ih_v6_0.h" 76 #include "ih_v6_1.h" 77 #include "ih_v7_0.h" 78 #include "gfx_v10_0.h" 79 #include "gfx_v11_0.h" 80 #include "gfx_v12_0.h" 81 #include "sdma_v5_0.h" 82 #include "sdma_v5_2.h" 83 #include "sdma_v6_0.h" 84 #include "sdma_v7_0.h" 85 #include "lsdma_v6_0.h" 86 #include "lsdma_v7_0.h" 87 #include "vcn_v2_0.h" 88 #include "jpeg_v2_0.h" 89 #include "vcn_v3_0.h" 90 #include "jpeg_v3_0.h" 91 #include "vcn_v4_0.h" 92 #include "jpeg_v4_0.h" 93 #include "vcn_v4_0_3.h" 94 #include "jpeg_v4_0_3.h" 95 #include "vcn_v4_0_5.h" 96 #include "jpeg_v4_0_5.h" 97 #include "amdgpu_vkms.h" 98 #include "mes_v11_0.h" 99 #include "mes_v12_0.h" 100 #include "smuio_v11_0.h" 101 #include "smuio_v11_0_6.h" 102 #include "smuio_v13_0.h" 103 #include "smuio_v13_0_3.h" 104 #include "smuio_v13_0_6.h" 105 #include "smuio_v14_0_2.h" 106 #include "vcn_v5_0_0.h" 107 #include "vcn_v5_0_1.h" 108 #include "jpeg_v5_0_0.h" 109 #include "jpeg_v5_0_1.h" 110 111 #include "amdgpu_vpe.h" 112 #if defined(CONFIG_DRM_AMD_ISP) 113 #include "amdgpu_isp.h" 114 #endif 115 116 MODULE_FIRMWARE("amdgpu/ip_discovery.bin"); 117 MODULE_FIRMWARE("amdgpu/vega10_ip_discovery.bin"); 118 MODULE_FIRMWARE("amdgpu/vega12_ip_discovery.bin"); 119 MODULE_FIRMWARE("amdgpu/vega20_ip_discovery.bin"); 120 MODULE_FIRMWARE("amdgpu/raven_ip_discovery.bin"); 121 MODULE_FIRMWARE("amdgpu/raven2_ip_discovery.bin"); 122 MODULE_FIRMWARE("amdgpu/picasso_ip_discovery.bin"); 123 MODULE_FIRMWARE("amdgpu/arcturus_ip_discovery.bin"); 124 MODULE_FIRMWARE("amdgpu/aldebaran_ip_discovery.bin"); 125 126 #define mmIP_DISCOVERY_VERSION 0x16A00 127 #define mmRCC_CONFIG_MEMSIZE 0xde3 128 #define mmMP0_SMN_C2PMSG_33 0x16061 129 #define mmMM_INDEX 0x0 130 #define mmMM_INDEX_HI 0x6 131 #define mmMM_DATA 0x1 132 133 static const char *hw_id_names[HW_ID_MAX] = { 134 [MP1_HWID] = "MP1", 135 [MP2_HWID] = "MP2", 136 [THM_HWID] = "THM", 137 [SMUIO_HWID] = "SMUIO", 138 [FUSE_HWID] = "FUSE", 139 [CLKA_HWID] = "CLKA", 140 [PWR_HWID] = "PWR", 141 [GC_HWID] = "GC", 142 [UVD_HWID] = "UVD", 143 [AUDIO_AZ_HWID] = "AUDIO_AZ", 144 [ACP_HWID] = "ACP", 145 [DCI_HWID] = "DCI", 146 [DMU_HWID] = "DMU", 147 [DCO_HWID] = "DCO", 148 [DIO_HWID] = "DIO", 149 [XDMA_HWID] = "XDMA", 150 [DCEAZ_HWID] = "DCEAZ", 151 [DAZ_HWID] = "DAZ", 152 [SDPMUX_HWID] = "SDPMUX", 153 [NTB_HWID] = "NTB", 154 [IOHC_HWID] = "IOHC", 155 [L2IMU_HWID] = "L2IMU", 156 [VCE_HWID] = "VCE", 157 [MMHUB_HWID] = "MMHUB", 158 [ATHUB_HWID] = "ATHUB", 159 [DBGU_NBIO_HWID] = "DBGU_NBIO", 160 [DFX_HWID] = "DFX", 161 [DBGU0_HWID] = "DBGU0", 162 [DBGU1_HWID] = "DBGU1", 163 [OSSSYS_HWID] = "OSSSYS", 164 [HDP_HWID] = "HDP", 165 [SDMA0_HWID] = "SDMA0", 166 [SDMA1_HWID] = "SDMA1", 167 [SDMA2_HWID] = "SDMA2", 168 [SDMA3_HWID] = "SDMA3", 169 [LSDMA_HWID] = "LSDMA", 170 [ISP_HWID] = "ISP", 171 [DBGU_IO_HWID] = "DBGU_IO", 172 [DF_HWID] = "DF", 173 [CLKB_HWID] = "CLKB", 174 [FCH_HWID] = "FCH", 175 [DFX_DAP_HWID] = "DFX_DAP", 176 [L1IMU_PCIE_HWID] = "L1IMU_PCIE", 177 [L1IMU_NBIF_HWID] = "L1IMU_NBIF", 178 [L1IMU_IOAGR_HWID] = "L1IMU_IOAGR", 179 [L1IMU3_HWID] = "L1IMU3", 180 [L1IMU4_HWID] = "L1IMU4", 181 [L1IMU5_HWID] = "L1IMU5", 182 [L1IMU6_HWID] = "L1IMU6", 183 [L1IMU7_HWID] = "L1IMU7", 184 [L1IMU8_HWID] = "L1IMU8", 185 [L1IMU9_HWID] = "L1IMU9", 186 [L1IMU10_HWID] = "L1IMU10", 187 [L1IMU11_HWID] = "L1IMU11", 188 [L1IMU12_HWID] = "L1IMU12", 189 [L1IMU13_HWID] = "L1IMU13", 190 [L1IMU14_HWID] = "L1IMU14", 191 [L1IMU15_HWID] = "L1IMU15", 192 [WAFLC_HWID] = "WAFLC", 193 [FCH_USB_PD_HWID] = "FCH_USB_PD", 194 [PCIE_HWID] = "PCIE", 195 [PCS_HWID] = "PCS", 196 [DDCL_HWID] = "DDCL", 197 [SST_HWID] = "SST", 198 [IOAGR_HWID] = "IOAGR", 199 [NBIF_HWID] = "NBIF", 200 [IOAPIC_HWID] = "IOAPIC", 201 [SYSTEMHUB_HWID] = "SYSTEMHUB", 202 [NTBCCP_HWID] = "NTBCCP", 203 [UMC_HWID] = "UMC", 204 [SATA_HWID] = "SATA", 205 [USB_HWID] = "USB", 206 [CCXSEC_HWID] = "CCXSEC", 207 [XGMI_HWID] = "XGMI", 208 [XGBE_HWID] = "XGBE", 209 [MP0_HWID] = "MP0", 210 [VPE_HWID] = "VPE", 211 }; 212 213 static int hw_id_map[MAX_HWIP] = { 214 [GC_HWIP] = GC_HWID, 215 [HDP_HWIP] = HDP_HWID, 216 [SDMA0_HWIP] = SDMA0_HWID, 217 [SDMA1_HWIP] = SDMA1_HWID, 218 [SDMA2_HWIP] = SDMA2_HWID, 219 [SDMA3_HWIP] = SDMA3_HWID, 220 [LSDMA_HWIP] = LSDMA_HWID, 221 [MMHUB_HWIP] = MMHUB_HWID, 222 [ATHUB_HWIP] = ATHUB_HWID, 223 [NBIO_HWIP] = NBIF_HWID, 224 [MP0_HWIP] = MP0_HWID, 225 [MP1_HWIP] = MP1_HWID, 226 [UVD_HWIP] = UVD_HWID, 227 [VCE_HWIP] = VCE_HWID, 228 [DF_HWIP] = DF_HWID, 229 [DCE_HWIP] = DMU_HWID, 230 [OSSSYS_HWIP] = OSSSYS_HWID, 231 [SMUIO_HWIP] = SMUIO_HWID, 232 [PWR_HWIP] = PWR_HWID, 233 [NBIF_HWIP] = NBIF_HWID, 234 [THM_HWIP] = THM_HWID, 235 [CLK_HWIP] = CLKA_HWID, 236 [UMC_HWIP] = UMC_HWID, 237 [XGMI_HWIP] = XGMI_HWID, 238 [DCI_HWIP] = DCI_HWID, 239 [PCIE_HWIP] = PCIE_HWID, 240 [VPE_HWIP] = VPE_HWID, 241 [ISP_HWIP] = ISP_HWID, 242 }; 243 244 static int amdgpu_discovery_read_binary_from_sysmem(struct amdgpu_device *adev, uint8_t *binary) 245 { 246 u64 tmr_offset, tmr_size, pos; 247 void *discv_regn; 248 int ret; 249 250 ret = amdgpu_acpi_get_tmr_info(adev, &tmr_offset, &tmr_size); 251 if (ret) 252 return ret; 253 254 pos = tmr_offset + tmr_size - DISCOVERY_TMR_OFFSET; 255 256 /* This region is read-only and reserved from system use */ 257 discv_regn = memremap(pos, adev->discovery.size, MEMREMAP_WC); 258 if (discv_regn) { 259 memcpy(binary, discv_regn, adev->discovery.size); 260 memunmap(discv_regn); 261 return 0; 262 } 263 264 return -ENOENT; 265 } 266 267 #define IP_DISCOVERY_V2 2 268 #define IP_DISCOVERY_V4 4 269 270 static int amdgpu_discovery_read_binary_from_mem(struct amdgpu_device *adev, 271 uint8_t *binary) 272 { 273 bool sz_valid = true; 274 uint64_t vram_size; 275 int i, ret = 0; 276 u32 msg; 277 278 if (!amdgpu_sriov_vf(adev)) { 279 /* It can take up to two second for IFWI init to complete on some dGPUs, 280 * but generally it should be in the 60-100ms range. Normally this starts 281 * as soon as the device gets power so by the time the OS loads this has long 282 * completed. However, when a card is hotplugged via e.g., USB4, we need to 283 * wait for this to complete. Once the C2PMSG is updated, we can 284 * continue. 285 */ 286 287 for (i = 0; i < 2000; i++) { 288 msg = RREG32(mmMP0_SMN_C2PMSG_33); 289 if (msg & 0x80000000) 290 break; 291 msleep(1); 292 } 293 } 294 295 vram_size = RREG32(mmRCC_CONFIG_MEMSIZE); 296 if (!vram_size || vram_size == U32_MAX) 297 sz_valid = false; 298 else 299 vram_size <<= 20; 300 301 /* 302 * If in VRAM, discovery TMR is marked for reservation. If it is in system mem, 303 * then it is not required to be reserved. 304 */ 305 if (sz_valid) { 306 uint64_t pos = vram_size - DISCOVERY_TMR_OFFSET; 307 amdgpu_device_vram_access(adev, pos, (uint32_t *)binary, 308 adev->discovery.size, false); 309 adev->discovery.reserve_tmr = true; 310 } else { 311 ret = amdgpu_discovery_read_binary_from_sysmem(adev, binary); 312 } 313 314 if (ret) 315 dev_err(adev->dev, 316 "failed to read discovery info from memory, vram size read: %llx", 317 vram_size); 318 319 return ret; 320 } 321 322 static int amdgpu_discovery_read_binary_from_file(struct amdgpu_device *adev, 323 uint8_t *binary, 324 const char *fw_name) 325 { 326 const struct firmware *fw; 327 int r; 328 329 r = firmware_request_nowarn(&fw, fw_name, adev->dev); 330 if (r) { 331 if (amdgpu_discovery == 2) 332 dev_err(adev->dev, "can't load firmware \"%s\"\n", fw_name); 333 else 334 drm_info(&adev->ddev, "Optional firmware \"%s\" was not found\n", fw_name); 335 return r; 336 } 337 338 memcpy((u8 *)binary, (u8 *)fw->data, fw->size); 339 release_firmware(fw); 340 341 return 0; 342 } 343 344 static uint16_t amdgpu_discovery_calculate_checksum(uint8_t *data, uint32_t size) 345 { 346 uint16_t checksum = 0; 347 int i; 348 349 for (i = 0; i < size; i++) 350 checksum += data[i]; 351 352 return checksum; 353 } 354 355 static inline bool amdgpu_discovery_verify_checksum(uint8_t *data, uint32_t size, 356 uint16_t expected) 357 { 358 return !!(amdgpu_discovery_calculate_checksum(data, size) == expected); 359 } 360 361 static inline bool amdgpu_discovery_verify_binary_signature(uint8_t *binary) 362 { 363 struct binary_header *bhdr; 364 bhdr = (struct binary_header *)binary; 365 366 return (le32_to_cpu(bhdr->binary_signature) == BINARY_SIGNATURE); 367 } 368 369 static void amdgpu_discovery_harvest_config_quirk(struct amdgpu_device *adev) 370 { 371 /* 372 * So far, apply this quirk only on those Navy Flounder boards which 373 * have a bad harvest table of VCN config. 374 */ 375 if ((amdgpu_ip_version(adev, UVD_HWIP, 1) == IP_VERSION(3, 0, 1)) && 376 (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(10, 3, 2))) { 377 switch (adev->pdev->revision) { 378 case 0xC1: 379 case 0xC2: 380 case 0xC3: 381 case 0xC5: 382 case 0xC7: 383 case 0xCF: 384 case 0xDF: 385 adev->vcn.harvest_config |= AMDGPU_VCN_HARVEST_VCN1; 386 adev->vcn.inst_mask &= ~AMDGPU_VCN_HARVEST_VCN1; 387 break; 388 default: 389 break; 390 } 391 } 392 } 393 394 static int amdgpu_discovery_verify_npsinfo(struct amdgpu_device *adev, 395 struct binary_header *bhdr) 396 { 397 uint8_t *discovery_bin = adev->discovery.bin; 398 struct table_info *info; 399 uint16_t checksum; 400 uint16_t offset; 401 402 info = &bhdr->table_list[NPS_INFO]; 403 offset = le16_to_cpu(info->offset); 404 checksum = le16_to_cpu(info->checksum); 405 406 struct nps_info_header *nhdr = 407 (struct nps_info_header *)(discovery_bin + offset); 408 409 if (le32_to_cpu(nhdr->table_id) != NPS_INFO_TABLE_ID) { 410 dev_dbg(adev->dev, "invalid ip discovery nps info table id\n"); 411 return -EINVAL; 412 } 413 414 if (!amdgpu_discovery_verify_checksum(discovery_bin + offset, 415 le32_to_cpu(nhdr->size_bytes), 416 checksum)) { 417 dev_dbg(adev->dev, "invalid nps info data table checksum\n"); 418 return -EINVAL; 419 } 420 421 return 0; 422 } 423 424 static const char *amdgpu_discovery_get_fw_name(struct amdgpu_device *adev) 425 { 426 if (amdgpu_discovery == 2) { 427 /* Assume there is valid discovery TMR in VRAM even if binary is sideloaded */ 428 adev->discovery.reserve_tmr = true; 429 return "amdgpu/ip_discovery.bin"; 430 } 431 432 switch (adev->asic_type) { 433 case CHIP_VEGA10: 434 return "amdgpu/vega10_ip_discovery.bin"; 435 case CHIP_VEGA12: 436 return "amdgpu/vega12_ip_discovery.bin"; 437 case CHIP_RAVEN: 438 if (adev->apu_flags & AMD_APU_IS_RAVEN2) 439 return "amdgpu/raven2_ip_discovery.bin"; 440 else if (adev->apu_flags & AMD_APU_IS_PICASSO) 441 return "amdgpu/picasso_ip_discovery.bin"; 442 else 443 return "amdgpu/raven_ip_discovery.bin"; 444 case CHIP_VEGA20: 445 return "amdgpu/vega20_ip_discovery.bin"; 446 case CHIP_ARCTURUS: 447 return "amdgpu/arcturus_ip_discovery.bin"; 448 case CHIP_ALDEBARAN: 449 return "amdgpu/aldebaran_ip_discovery.bin"; 450 default: 451 return NULL; 452 } 453 } 454 455 static int amdgpu_discovery_init(struct amdgpu_device *adev) 456 { 457 struct table_info *info; 458 struct binary_header *bhdr; 459 uint8_t *discovery_bin; 460 const char *fw_name; 461 uint16_t offset; 462 uint16_t size; 463 uint16_t checksum; 464 int r; 465 466 adev->discovery.bin = kzalloc(DISCOVERY_TMR_SIZE, GFP_KERNEL); 467 if (!adev->discovery.bin) 468 return -ENOMEM; 469 adev->discovery.size = DISCOVERY_TMR_SIZE; 470 adev->discovery.debugfs_blob.data = adev->discovery.bin; 471 adev->discovery.debugfs_blob.size = adev->discovery.size; 472 473 discovery_bin = adev->discovery.bin; 474 /* Read from file if it is the preferred option */ 475 fw_name = amdgpu_discovery_get_fw_name(adev); 476 if (fw_name != NULL) { 477 drm_dbg(&adev->ddev, "use ip discovery information from file"); 478 r = amdgpu_discovery_read_binary_from_file(adev, discovery_bin, 479 fw_name); 480 if (r) 481 goto out; 482 } else { 483 drm_dbg(&adev->ddev, "use ip discovery information from memory"); 484 r = amdgpu_discovery_read_binary_from_mem(adev, discovery_bin); 485 if (r) 486 goto out; 487 } 488 489 /* check the ip discovery binary signature */ 490 if (!amdgpu_discovery_verify_binary_signature(discovery_bin)) { 491 dev_err(adev->dev, 492 "get invalid ip discovery binary signature\n"); 493 r = -EINVAL; 494 goto out; 495 } 496 497 bhdr = (struct binary_header *)discovery_bin; 498 499 offset = offsetof(struct binary_header, binary_checksum) + 500 sizeof(bhdr->binary_checksum); 501 size = le16_to_cpu(bhdr->binary_size) - offset; 502 checksum = le16_to_cpu(bhdr->binary_checksum); 503 504 if (!amdgpu_discovery_verify_checksum(discovery_bin + offset, size, 505 checksum)) { 506 dev_err(adev->dev, "invalid ip discovery binary checksum\n"); 507 r = -EINVAL; 508 goto out; 509 } 510 511 info = &bhdr->table_list[IP_DISCOVERY]; 512 offset = le16_to_cpu(info->offset); 513 checksum = le16_to_cpu(info->checksum); 514 515 if (offset) { 516 struct ip_discovery_header *ihdr = 517 (struct ip_discovery_header *)(discovery_bin + offset); 518 if (le32_to_cpu(ihdr->signature) != DISCOVERY_TABLE_SIGNATURE) { 519 dev_err(adev->dev, "invalid ip discovery data table signature\n"); 520 r = -EINVAL; 521 goto out; 522 } 523 524 if (!amdgpu_discovery_verify_checksum(discovery_bin + offset, 525 le16_to_cpu(ihdr->size), 526 checksum)) { 527 dev_err(adev->dev, "invalid ip discovery data table checksum\n"); 528 r = -EINVAL; 529 goto out; 530 } 531 } 532 533 info = &bhdr->table_list[GC]; 534 offset = le16_to_cpu(info->offset); 535 checksum = le16_to_cpu(info->checksum); 536 537 if (offset) { 538 struct gpu_info_header *ghdr = 539 (struct gpu_info_header *)(discovery_bin + offset); 540 541 if (le32_to_cpu(ghdr->table_id) != GC_TABLE_ID) { 542 dev_err(adev->dev, "invalid ip discovery gc table id\n"); 543 r = -EINVAL; 544 goto out; 545 } 546 547 if (!amdgpu_discovery_verify_checksum(discovery_bin + offset, 548 le32_to_cpu(ghdr->size), 549 checksum)) { 550 dev_err(adev->dev, "invalid gc data table checksum\n"); 551 r = -EINVAL; 552 goto out; 553 } 554 } 555 556 info = &bhdr->table_list[HARVEST_INFO]; 557 offset = le16_to_cpu(info->offset); 558 checksum = le16_to_cpu(info->checksum); 559 560 if (offset) { 561 struct harvest_info_header *hhdr = 562 (struct harvest_info_header *)(discovery_bin + offset); 563 564 if (le32_to_cpu(hhdr->signature) != HARVEST_TABLE_SIGNATURE) { 565 dev_err(adev->dev, "invalid ip discovery harvest table signature\n"); 566 r = -EINVAL; 567 goto out; 568 } 569 570 if (!amdgpu_discovery_verify_checksum( 571 discovery_bin + offset, 572 sizeof(struct harvest_table), checksum)) { 573 dev_err(adev->dev, "invalid harvest data table checksum\n"); 574 r = -EINVAL; 575 goto out; 576 } 577 } 578 579 info = &bhdr->table_list[VCN_INFO]; 580 offset = le16_to_cpu(info->offset); 581 checksum = le16_to_cpu(info->checksum); 582 583 if (offset) { 584 struct vcn_info_header *vhdr = 585 (struct vcn_info_header *)(discovery_bin + offset); 586 587 if (le32_to_cpu(vhdr->table_id) != VCN_INFO_TABLE_ID) { 588 dev_err(adev->dev, "invalid ip discovery vcn table id\n"); 589 r = -EINVAL; 590 goto out; 591 } 592 593 if (!amdgpu_discovery_verify_checksum( 594 discovery_bin + offset, 595 le32_to_cpu(vhdr->size_bytes), checksum)) { 596 dev_err(adev->dev, "invalid vcn data table checksum\n"); 597 r = -EINVAL; 598 goto out; 599 } 600 } 601 602 info = &bhdr->table_list[MALL_INFO]; 603 offset = le16_to_cpu(info->offset); 604 checksum = le16_to_cpu(info->checksum); 605 606 if (0 && offset) { 607 struct mall_info_header *mhdr = 608 (struct mall_info_header *)(discovery_bin + offset); 609 610 if (le32_to_cpu(mhdr->table_id) != MALL_INFO_TABLE_ID) { 611 dev_err(adev->dev, "invalid ip discovery mall table id\n"); 612 r = -EINVAL; 613 goto out; 614 } 615 616 if (!amdgpu_discovery_verify_checksum( 617 discovery_bin + offset, 618 le32_to_cpu(mhdr->size_bytes), checksum)) { 619 dev_err(adev->dev, "invalid mall data table checksum\n"); 620 r = -EINVAL; 621 goto out; 622 } 623 } 624 625 return 0; 626 627 out: 628 kfree(adev->discovery.bin); 629 adev->discovery.bin = NULL; 630 if ((amdgpu_discovery != 2) && 631 (RREG32(mmIP_DISCOVERY_VERSION) == 4)) 632 amdgpu_ras_query_boot_status(adev, 4); 633 return r; 634 } 635 636 static void amdgpu_discovery_sysfs_fini(struct amdgpu_device *adev); 637 638 void amdgpu_discovery_fini(struct amdgpu_device *adev) 639 { 640 amdgpu_discovery_sysfs_fini(adev); 641 kfree(adev->discovery.bin); 642 adev->discovery.bin = NULL; 643 } 644 645 static int amdgpu_discovery_validate_ip(struct amdgpu_device *adev, 646 uint8_t instance, uint16_t hw_id) 647 { 648 if (instance >= HWIP_MAX_INSTANCE) { 649 dev_err(adev->dev, 650 "Unexpected instance_number (%d) from ip discovery blob\n", 651 instance); 652 return -EINVAL; 653 } 654 if (hw_id >= HW_ID_MAX) { 655 dev_err(adev->dev, 656 "Unexpected hw_id (%d) from ip discovery blob\n", 657 hw_id); 658 return -EINVAL; 659 } 660 661 return 0; 662 } 663 664 static void amdgpu_discovery_read_harvest_bit_per_ip(struct amdgpu_device *adev, 665 uint32_t *vcn_harvest_count) 666 { 667 uint8_t *discovery_bin = adev->discovery.bin; 668 struct binary_header *bhdr; 669 struct ip_discovery_header *ihdr; 670 struct die_header *dhdr; 671 struct ip *ip; 672 uint16_t die_offset, ip_offset, num_dies, num_ips; 673 uint16_t hw_id; 674 uint8_t inst; 675 int i, j; 676 677 bhdr = (struct binary_header *)discovery_bin; 678 ihdr = (struct ip_discovery_header 679 *)(discovery_bin + 680 le16_to_cpu(bhdr->table_list[IP_DISCOVERY].offset)); 681 num_dies = le16_to_cpu(ihdr->num_dies); 682 683 /* scan harvest bit of all IP data structures */ 684 for (i = 0; i < num_dies; i++) { 685 die_offset = le16_to_cpu(ihdr->die_info[i].die_offset); 686 dhdr = (struct die_header *)(discovery_bin + die_offset); 687 num_ips = le16_to_cpu(dhdr->num_ips); 688 ip_offset = die_offset + sizeof(*dhdr); 689 690 for (j = 0; j < num_ips; j++) { 691 ip = (struct ip *)(discovery_bin + ip_offset); 692 inst = ip->number_instance; 693 hw_id = le16_to_cpu(ip->hw_id); 694 if (amdgpu_discovery_validate_ip(adev, inst, hw_id)) 695 goto next_ip; 696 697 if (ip->harvest == 1) { 698 switch (hw_id) { 699 case VCN_HWID: 700 (*vcn_harvest_count)++; 701 if (inst == 0) { 702 adev->vcn.harvest_config |= AMDGPU_VCN_HARVEST_VCN0; 703 adev->vcn.inst_mask &= 704 ~AMDGPU_VCN_HARVEST_VCN0; 705 adev->jpeg.inst_mask &= 706 ~AMDGPU_VCN_HARVEST_VCN0; 707 } else { 708 adev->vcn.harvest_config |= AMDGPU_VCN_HARVEST_VCN1; 709 adev->vcn.inst_mask &= 710 ~AMDGPU_VCN_HARVEST_VCN1; 711 adev->jpeg.inst_mask &= 712 ~AMDGPU_VCN_HARVEST_VCN1; 713 } 714 break; 715 case DMU_HWID: 716 adev->harvest_ip_mask |= AMD_HARVEST_IP_DMU_MASK; 717 break; 718 default: 719 break; 720 } 721 } 722 next_ip: 723 ip_offset += struct_size(ip, base_address, 724 ip->num_base_address); 725 } 726 } 727 } 728 729 static void amdgpu_discovery_read_from_harvest_table(struct amdgpu_device *adev, 730 uint32_t *vcn_harvest_count, 731 uint32_t *umc_harvest_count) 732 { 733 uint8_t *discovery_bin = adev->discovery.bin; 734 struct binary_header *bhdr; 735 struct harvest_table *harvest_info; 736 u16 offset; 737 int i; 738 uint32_t umc_harvest_config = 0; 739 740 bhdr = (struct binary_header *)discovery_bin; 741 offset = le16_to_cpu(bhdr->table_list[HARVEST_INFO].offset); 742 743 if (!offset) { 744 dev_err(adev->dev, "invalid harvest table offset\n"); 745 return; 746 } 747 748 harvest_info = (struct harvest_table *)(discovery_bin + offset); 749 750 for (i = 0; i < 32; i++) { 751 if (le16_to_cpu(harvest_info->list[i].hw_id) == 0) 752 break; 753 754 switch (le16_to_cpu(harvest_info->list[i].hw_id)) { 755 case VCN_HWID: 756 (*vcn_harvest_count)++; 757 adev->vcn.harvest_config |= 758 (1 << harvest_info->list[i].number_instance); 759 adev->jpeg.harvest_config |= 760 (1 << harvest_info->list[i].number_instance); 761 762 adev->vcn.inst_mask &= 763 ~(1U << harvest_info->list[i].number_instance); 764 adev->jpeg.inst_mask &= 765 ~(1U << harvest_info->list[i].number_instance); 766 break; 767 case DMU_HWID: 768 adev->harvest_ip_mask |= AMD_HARVEST_IP_DMU_MASK; 769 break; 770 case UMC_HWID: 771 umc_harvest_config |= 772 1 << (le16_to_cpu(harvest_info->list[i].number_instance)); 773 (*umc_harvest_count)++; 774 break; 775 case GC_HWID: 776 adev->gfx.xcc_mask &= 777 ~(1U << harvest_info->list[i].number_instance); 778 break; 779 case SDMA0_HWID: 780 adev->sdma.sdma_mask &= 781 ~(1U << harvest_info->list[i].number_instance); 782 break; 783 #if defined(CONFIG_DRM_AMD_ISP) 784 case ISP_HWID: 785 adev->isp.harvest_config |= 786 ~(1U << harvest_info->list[i].number_instance); 787 break; 788 #endif 789 default: 790 break; 791 } 792 } 793 794 adev->umc.active_mask = ((1 << adev->umc.node_inst_num) - 1) & 795 ~umc_harvest_config; 796 } 797 798 /* ================================================== */ 799 800 struct ip_hw_instance { 801 struct kobject kobj; /* ip_discovery/die/#die/#hw_id/#instance/<attrs...> */ 802 803 int hw_id; 804 u8 num_instance; 805 u8 major, minor, revision; 806 u8 harvest; 807 808 int num_base_addresses; 809 u32 base_addr[] __counted_by(num_base_addresses); 810 }; 811 812 struct ip_hw_id { 813 struct kset hw_id_kset; /* ip_discovery/die/#die/#hw_id/, contains ip_hw_instance */ 814 int hw_id; 815 }; 816 817 struct ip_die_entry { 818 struct kset ip_kset; /* ip_discovery/die/#die/, contains ip_hw_id */ 819 u16 num_ips; 820 }; 821 822 /* -------------------------------------------------- */ 823 824 struct ip_hw_instance_attr { 825 struct attribute attr; 826 ssize_t (*show)(struct ip_hw_instance *ip_hw_instance, char *buf); 827 }; 828 829 static ssize_t hw_id_show(struct ip_hw_instance *ip_hw_instance, char *buf) 830 { 831 return sysfs_emit(buf, "%d\n", ip_hw_instance->hw_id); 832 } 833 834 static ssize_t num_instance_show(struct ip_hw_instance *ip_hw_instance, char *buf) 835 { 836 return sysfs_emit(buf, "%d\n", ip_hw_instance->num_instance); 837 } 838 839 static ssize_t major_show(struct ip_hw_instance *ip_hw_instance, char *buf) 840 { 841 return sysfs_emit(buf, "%d\n", ip_hw_instance->major); 842 } 843 844 static ssize_t minor_show(struct ip_hw_instance *ip_hw_instance, char *buf) 845 { 846 return sysfs_emit(buf, "%d\n", ip_hw_instance->minor); 847 } 848 849 static ssize_t revision_show(struct ip_hw_instance *ip_hw_instance, char *buf) 850 { 851 return sysfs_emit(buf, "%d\n", ip_hw_instance->revision); 852 } 853 854 static ssize_t harvest_show(struct ip_hw_instance *ip_hw_instance, char *buf) 855 { 856 return sysfs_emit(buf, "0x%01X\n", ip_hw_instance->harvest); 857 } 858 859 static ssize_t num_base_addresses_show(struct ip_hw_instance *ip_hw_instance, char *buf) 860 { 861 return sysfs_emit(buf, "%d\n", ip_hw_instance->num_base_addresses); 862 } 863 864 static ssize_t base_addr_show(struct ip_hw_instance *ip_hw_instance, char *buf) 865 { 866 ssize_t res, at; 867 int ii; 868 869 for (res = at = ii = 0; ii < ip_hw_instance->num_base_addresses; ii++) { 870 /* Here we satisfy the condition that, at + size <= PAGE_SIZE. 871 */ 872 if (at + 12 > PAGE_SIZE) 873 break; 874 res = sysfs_emit_at(buf, at, "0x%08X\n", 875 ip_hw_instance->base_addr[ii]); 876 if (res <= 0) 877 break; 878 at += res; 879 } 880 881 return res < 0 ? res : at; 882 } 883 884 static struct ip_hw_instance_attr ip_hw_attr[] = { 885 __ATTR_RO(hw_id), 886 __ATTR_RO(num_instance), 887 __ATTR_RO(major), 888 __ATTR_RO(minor), 889 __ATTR_RO(revision), 890 __ATTR_RO(harvest), 891 __ATTR_RO(num_base_addresses), 892 __ATTR_RO(base_addr), 893 }; 894 895 static struct attribute *ip_hw_instance_attrs[ARRAY_SIZE(ip_hw_attr) + 1]; 896 ATTRIBUTE_GROUPS(ip_hw_instance); 897 898 #define to_ip_hw_instance(x) container_of(x, struct ip_hw_instance, kobj) 899 #define to_ip_hw_instance_attr(x) container_of(x, struct ip_hw_instance_attr, attr) 900 901 static ssize_t ip_hw_instance_attr_show(struct kobject *kobj, 902 struct attribute *attr, 903 char *buf) 904 { 905 struct ip_hw_instance *ip_hw_instance = to_ip_hw_instance(kobj); 906 struct ip_hw_instance_attr *ip_hw_attr = to_ip_hw_instance_attr(attr); 907 908 if (!ip_hw_attr->show) 909 return -EIO; 910 911 return ip_hw_attr->show(ip_hw_instance, buf); 912 } 913 914 static const struct sysfs_ops ip_hw_instance_sysfs_ops = { 915 .show = ip_hw_instance_attr_show, 916 }; 917 918 static void ip_hw_instance_release(struct kobject *kobj) 919 { 920 struct ip_hw_instance *ip_hw_instance = to_ip_hw_instance(kobj); 921 922 kfree(ip_hw_instance); 923 } 924 925 static const struct kobj_type ip_hw_instance_ktype = { 926 .release = ip_hw_instance_release, 927 .sysfs_ops = &ip_hw_instance_sysfs_ops, 928 .default_groups = ip_hw_instance_groups, 929 }; 930 931 /* -------------------------------------------------- */ 932 933 #define to_ip_hw_id(x) container_of(to_kset(x), struct ip_hw_id, hw_id_kset) 934 935 static void ip_hw_id_release(struct kobject *kobj) 936 { 937 struct ip_hw_id *ip_hw_id = to_ip_hw_id(kobj); 938 939 if (!list_empty(&ip_hw_id->hw_id_kset.list)) 940 DRM_ERROR("ip_hw_id->hw_id_kset is not empty"); 941 kfree(ip_hw_id); 942 } 943 944 static const struct kobj_type ip_hw_id_ktype = { 945 .release = ip_hw_id_release, 946 .sysfs_ops = &kobj_sysfs_ops, 947 }; 948 949 /* -------------------------------------------------- */ 950 951 static void die_kobj_release(struct kobject *kobj); 952 static void ip_disc_release(struct kobject *kobj); 953 954 struct ip_die_entry_attribute { 955 struct attribute attr; 956 ssize_t (*show)(struct ip_die_entry *ip_die_entry, char *buf); 957 }; 958 959 #define to_ip_die_entry_attr(x) container_of(x, struct ip_die_entry_attribute, attr) 960 961 static ssize_t num_ips_show(struct ip_die_entry *ip_die_entry, char *buf) 962 { 963 return sysfs_emit(buf, "%d\n", ip_die_entry->num_ips); 964 } 965 966 /* If there are more ip_die_entry attrs, other than the number of IPs, 967 * we can make this intro an array of attrs, and then initialize 968 * ip_die_entry_attrs in a loop. 969 */ 970 static struct ip_die_entry_attribute num_ips_attr = 971 __ATTR_RO(num_ips); 972 973 static struct attribute *ip_die_entry_attrs[] = { 974 &num_ips_attr.attr, 975 NULL, 976 }; 977 ATTRIBUTE_GROUPS(ip_die_entry); /* ip_die_entry_groups */ 978 979 #define to_ip_die_entry(x) container_of(to_kset(x), struct ip_die_entry, ip_kset) 980 981 static ssize_t ip_die_entry_attr_show(struct kobject *kobj, 982 struct attribute *attr, 983 char *buf) 984 { 985 struct ip_die_entry_attribute *ip_die_entry_attr = to_ip_die_entry_attr(attr); 986 struct ip_die_entry *ip_die_entry = to_ip_die_entry(kobj); 987 988 if (!ip_die_entry_attr->show) 989 return -EIO; 990 991 return ip_die_entry_attr->show(ip_die_entry, buf); 992 } 993 994 static void ip_die_entry_release(struct kobject *kobj) 995 { 996 struct ip_die_entry *ip_die_entry = to_ip_die_entry(kobj); 997 998 if (!list_empty(&ip_die_entry->ip_kset.list)) 999 DRM_ERROR("ip_die_entry->ip_kset is not empty"); 1000 kfree(ip_die_entry); 1001 } 1002 1003 static const struct sysfs_ops ip_die_entry_sysfs_ops = { 1004 .show = ip_die_entry_attr_show, 1005 }; 1006 1007 static const struct kobj_type ip_die_entry_ktype = { 1008 .release = ip_die_entry_release, 1009 .sysfs_ops = &ip_die_entry_sysfs_ops, 1010 .default_groups = ip_die_entry_groups, 1011 }; 1012 1013 static const struct kobj_type die_kobj_ktype = { 1014 .release = die_kobj_release, 1015 .sysfs_ops = &kobj_sysfs_ops, 1016 }; 1017 1018 static const struct kobj_type ip_discovery_ktype = { 1019 .release = ip_disc_release, 1020 .sysfs_ops = &kobj_sysfs_ops, 1021 }; 1022 1023 struct ip_discovery_top { 1024 struct kobject kobj; /* ip_discovery/ */ 1025 struct kset die_kset; /* ip_discovery/die/, contains ip_die_entry */ 1026 struct amdgpu_device *adev; 1027 }; 1028 1029 static void die_kobj_release(struct kobject *kobj) 1030 { 1031 struct ip_discovery_top *ip_top = container_of(to_kset(kobj), 1032 struct ip_discovery_top, 1033 die_kset); 1034 if (!list_empty(&ip_top->die_kset.list)) 1035 DRM_ERROR("ip_top->die_kset is not empty"); 1036 } 1037 1038 static void ip_disc_release(struct kobject *kobj) 1039 { 1040 struct ip_discovery_top *ip_top = container_of(kobj, struct ip_discovery_top, 1041 kobj); 1042 struct amdgpu_device *adev = ip_top->adev; 1043 1044 kfree(ip_top); 1045 adev->discovery.ip_top = NULL; 1046 } 1047 1048 static uint8_t amdgpu_discovery_get_harvest_info(struct amdgpu_device *adev, 1049 uint16_t hw_id, uint8_t inst) 1050 { 1051 uint8_t harvest = 0; 1052 1053 /* Until a uniform way is figured, get mask based on hwid */ 1054 switch (hw_id) { 1055 case VCN_HWID: 1056 /* VCN vs UVD+VCE */ 1057 if (!amdgpu_ip_version(adev, VCE_HWIP, 0)) 1058 harvest = ((1 << inst) & adev->vcn.inst_mask) == 0; 1059 break; 1060 case DMU_HWID: 1061 if (adev->harvest_ip_mask & AMD_HARVEST_IP_DMU_MASK) 1062 harvest = 0x1; 1063 break; 1064 case UMC_HWID: 1065 /* TODO: It needs another parsing; for now, ignore.*/ 1066 break; 1067 case GC_HWID: 1068 harvest = ((1 << inst) & adev->gfx.xcc_mask) == 0; 1069 break; 1070 case SDMA0_HWID: 1071 harvest = ((1 << inst) & adev->sdma.sdma_mask) == 0; 1072 break; 1073 default: 1074 break; 1075 } 1076 1077 return harvest; 1078 } 1079 1080 static int amdgpu_discovery_sysfs_ips(struct amdgpu_device *adev, 1081 struct ip_die_entry *ip_die_entry, 1082 const size_t _ip_offset, const int num_ips, 1083 bool reg_base_64) 1084 { 1085 uint8_t *discovery_bin = adev->discovery.bin; 1086 int ii, jj, kk, res; 1087 uint16_t hw_id; 1088 uint8_t inst; 1089 1090 DRM_DEBUG("num_ips:%d", num_ips); 1091 1092 /* Find all IPs of a given HW ID, and add their instance to 1093 * #die/#hw_id/#instance/<attributes> 1094 */ 1095 for (ii = 0; ii < HW_ID_MAX; ii++) { 1096 struct ip_hw_id *ip_hw_id = NULL; 1097 size_t ip_offset = _ip_offset; 1098 1099 for (jj = 0; jj < num_ips; jj++) { 1100 struct ip_v4 *ip; 1101 struct ip_hw_instance *ip_hw_instance; 1102 1103 ip = (struct ip_v4 *)(discovery_bin + ip_offset); 1104 inst = ip->instance_number; 1105 hw_id = le16_to_cpu(ip->hw_id); 1106 if (amdgpu_discovery_validate_ip(adev, inst, hw_id) || 1107 hw_id != ii) 1108 goto next_ip; 1109 1110 DRM_DEBUG("match:%d @ ip_offset:%zu", ii, ip_offset); 1111 1112 /* We have a hw_id match; register the hw 1113 * block if not yet registered. 1114 */ 1115 if (!ip_hw_id) { 1116 ip_hw_id = kzalloc(sizeof(*ip_hw_id), GFP_KERNEL); 1117 if (!ip_hw_id) 1118 return -ENOMEM; 1119 ip_hw_id->hw_id = ii; 1120 1121 kobject_set_name(&ip_hw_id->hw_id_kset.kobj, "%d", ii); 1122 ip_hw_id->hw_id_kset.kobj.kset = &ip_die_entry->ip_kset; 1123 ip_hw_id->hw_id_kset.kobj.ktype = &ip_hw_id_ktype; 1124 res = kset_register(&ip_hw_id->hw_id_kset); 1125 if (res) { 1126 DRM_ERROR("Couldn't register ip_hw_id kset"); 1127 kfree(ip_hw_id); 1128 return res; 1129 } 1130 if (hw_id_names[ii]) { 1131 res = sysfs_create_link(&ip_die_entry->ip_kset.kobj, 1132 &ip_hw_id->hw_id_kset.kobj, 1133 hw_id_names[ii]); 1134 if (res) { 1135 DRM_ERROR("Couldn't create IP link %s in IP Die:%s\n", 1136 hw_id_names[ii], 1137 kobject_name(&ip_die_entry->ip_kset.kobj)); 1138 } 1139 } 1140 } 1141 1142 /* Now register its instance. 1143 */ 1144 ip_hw_instance = kzalloc(struct_size(ip_hw_instance, 1145 base_addr, 1146 ip->num_base_address), 1147 GFP_KERNEL); 1148 if (!ip_hw_instance) { 1149 DRM_ERROR("no memory for ip_hw_instance"); 1150 return -ENOMEM; 1151 } 1152 ip_hw_instance->hw_id = le16_to_cpu(ip->hw_id); /* == ii */ 1153 ip_hw_instance->num_instance = ip->instance_number; 1154 ip_hw_instance->major = ip->major; 1155 ip_hw_instance->minor = ip->minor; 1156 ip_hw_instance->revision = ip->revision; 1157 ip_hw_instance->harvest = 1158 amdgpu_discovery_get_harvest_info( 1159 adev, ip_hw_instance->hw_id, 1160 ip_hw_instance->num_instance); 1161 ip_hw_instance->num_base_addresses = ip->num_base_address; 1162 1163 for (kk = 0; kk < ip_hw_instance->num_base_addresses; kk++) { 1164 if (reg_base_64) 1165 ip_hw_instance->base_addr[kk] = 1166 lower_32_bits(le64_to_cpu(ip->base_address_64[kk])) & 0x3FFFFFFF; 1167 else 1168 ip_hw_instance->base_addr[kk] = ip->base_address[kk]; 1169 } 1170 1171 kobject_init(&ip_hw_instance->kobj, &ip_hw_instance_ktype); 1172 ip_hw_instance->kobj.kset = &ip_hw_id->hw_id_kset; 1173 res = kobject_add(&ip_hw_instance->kobj, NULL, 1174 "%d", ip_hw_instance->num_instance); 1175 next_ip: 1176 if (reg_base_64) 1177 ip_offset += struct_size(ip, base_address_64, 1178 ip->num_base_address); 1179 else 1180 ip_offset += struct_size(ip, base_address, 1181 ip->num_base_address); 1182 } 1183 } 1184 1185 return 0; 1186 } 1187 1188 static int amdgpu_discovery_sysfs_recurse(struct amdgpu_device *adev) 1189 { 1190 struct ip_discovery_top *ip_top = adev->discovery.ip_top; 1191 uint8_t *discovery_bin = adev->discovery.bin; 1192 struct binary_header *bhdr; 1193 struct ip_discovery_header *ihdr; 1194 struct die_header *dhdr; 1195 struct kset *die_kset = &ip_top->die_kset; 1196 u16 num_dies, die_offset, num_ips; 1197 size_t ip_offset; 1198 int ii, res; 1199 1200 bhdr = (struct binary_header *)discovery_bin; 1201 ihdr = (struct ip_discovery_header 1202 *)(discovery_bin + 1203 le16_to_cpu(bhdr->table_list[IP_DISCOVERY].offset)); 1204 num_dies = le16_to_cpu(ihdr->num_dies); 1205 1206 DRM_DEBUG("number of dies: %d\n", num_dies); 1207 1208 for (ii = 0; ii < num_dies; ii++) { 1209 struct ip_die_entry *ip_die_entry; 1210 1211 die_offset = le16_to_cpu(ihdr->die_info[ii].die_offset); 1212 dhdr = (struct die_header *)(discovery_bin + die_offset); 1213 num_ips = le16_to_cpu(dhdr->num_ips); 1214 ip_offset = die_offset + sizeof(*dhdr); 1215 1216 /* Add the die to the kset. 1217 * 1218 * dhdr->die_id == ii, which was checked in 1219 * amdgpu_discovery_reg_base_init(). 1220 */ 1221 1222 ip_die_entry = kzalloc(sizeof(*ip_die_entry), GFP_KERNEL); 1223 if (!ip_die_entry) 1224 return -ENOMEM; 1225 1226 ip_die_entry->num_ips = num_ips; 1227 1228 kobject_set_name(&ip_die_entry->ip_kset.kobj, "%d", le16_to_cpu(dhdr->die_id)); 1229 ip_die_entry->ip_kset.kobj.kset = die_kset; 1230 ip_die_entry->ip_kset.kobj.ktype = &ip_die_entry_ktype; 1231 res = kset_register(&ip_die_entry->ip_kset); 1232 if (res) { 1233 DRM_ERROR("Couldn't register ip_die_entry kset"); 1234 kfree(ip_die_entry); 1235 return res; 1236 } 1237 1238 amdgpu_discovery_sysfs_ips(adev, ip_die_entry, ip_offset, num_ips, !!ihdr->base_addr_64_bit); 1239 } 1240 1241 return 0; 1242 } 1243 1244 static int amdgpu_discovery_sysfs_init(struct amdgpu_device *adev) 1245 { 1246 uint8_t *discovery_bin = adev->discovery.bin; 1247 struct ip_discovery_top *ip_top; 1248 struct kset *die_kset; 1249 int res, ii; 1250 1251 if (!discovery_bin) 1252 return -EINVAL; 1253 1254 ip_top = kzalloc(sizeof(*ip_top), GFP_KERNEL); 1255 if (!ip_top) 1256 return -ENOMEM; 1257 1258 ip_top->adev = adev; 1259 adev->discovery.ip_top = ip_top; 1260 res = kobject_init_and_add(&ip_top->kobj, &ip_discovery_ktype, 1261 &adev->dev->kobj, "ip_discovery"); 1262 if (res) { 1263 DRM_ERROR("Couldn't init and add ip_discovery/"); 1264 goto Err; 1265 } 1266 1267 die_kset = &ip_top->die_kset; 1268 kobject_set_name(&die_kset->kobj, "%s", "die"); 1269 die_kset->kobj.parent = &ip_top->kobj; 1270 die_kset->kobj.ktype = &die_kobj_ktype; 1271 res = kset_register(&ip_top->die_kset); 1272 if (res) { 1273 DRM_ERROR("Couldn't register die_kset"); 1274 goto Err; 1275 } 1276 1277 for (ii = 0; ii < ARRAY_SIZE(ip_hw_attr); ii++) 1278 ip_hw_instance_attrs[ii] = &ip_hw_attr[ii].attr; 1279 ip_hw_instance_attrs[ii] = NULL; 1280 1281 res = amdgpu_discovery_sysfs_recurse(adev); 1282 1283 return res; 1284 Err: 1285 kobject_put(&ip_top->kobj); 1286 return res; 1287 } 1288 1289 /* -------------------------------------------------- */ 1290 1291 #define list_to_kobj(el) container_of(el, struct kobject, entry) 1292 1293 static void amdgpu_discovery_sysfs_ip_hw_free(struct ip_hw_id *ip_hw_id) 1294 { 1295 struct list_head *el, *tmp; 1296 struct kset *hw_id_kset; 1297 1298 hw_id_kset = &ip_hw_id->hw_id_kset; 1299 spin_lock(&hw_id_kset->list_lock); 1300 list_for_each_prev_safe(el, tmp, &hw_id_kset->list) { 1301 list_del_init(el); 1302 spin_unlock(&hw_id_kset->list_lock); 1303 /* kobject is embedded in ip_hw_instance */ 1304 kobject_put(list_to_kobj(el)); 1305 spin_lock(&hw_id_kset->list_lock); 1306 } 1307 spin_unlock(&hw_id_kset->list_lock); 1308 kobject_put(&ip_hw_id->hw_id_kset.kobj); 1309 } 1310 1311 static void amdgpu_discovery_sysfs_die_free(struct ip_die_entry *ip_die_entry) 1312 { 1313 struct list_head *el, *tmp; 1314 struct kset *ip_kset; 1315 1316 ip_kset = &ip_die_entry->ip_kset; 1317 spin_lock(&ip_kset->list_lock); 1318 list_for_each_prev_safe(el, tmp, &ip_kset->list) { 1319 list_del_init(el); 1320 spin_unlock(&ip_kset->list_lock); 1321 amdgpu_discovery_sysfs_ip_hw_free(to_ip_hw_id(list_to_kobj(el))); 1322 spin_lock(&ip_kset->list_lock); 1323 } 1324 spin_unlock(&ip_kset->list_lock); 1325 kobject_put(&ip_die_entry->ip_kset.kobj); 1326 } 1327 1328 static void amdgpu_discovery_sysfs_fini(struct amdgpu_device *adev) 1329 { 1330 struct ip_discovery_top *ip_top = adev->discovery.ip_top; 1331 struct list_head *el, *tmp; 1332 struct kset *die_kset; 1333 1334 die_kset = &ip_top->die_kset; 1335 spin_lock(&die_kset->list_lock); 1336 list_for_each_prev_safe(el, tmp, &die_kset->list) { 1337 list_del_init(el); 1338 spin_unlock(&die_kset->list_lock); 1339 amdgpu_discovery_sysfs_die_free(to_ip_die_entry(list_to_kobj(el))); 1340 spin_lock(&die_kset->list_lock); 1341 } 1342 spin_unlock(&die_kset->list_lock); 1343 kobject_put(&ip_top->die_kset.kobj); 1344 kobject_put(&ip_top->kobj); 1345 } 1346 1347 /* ================================================== */ 1348 1349 static int amdgpu_discovery_reg_base_init(struct amdgpu_device *adev) 1350 { 1351 uint8_t num_base_address, subrev, variant; 1352 struct binary_header *bhdr; 1353 struct ip_discovery_header *ihdr; 1354 struct die_header *dhdr; 1355 uint8_t *discovery_bin; 1356 struct ip_v4 *ip; 1357 uint16_t die_offset; 1358 uint16_t ip_offset; 1359 uint16_t num_dies; 1360 uint32_t wafl_ver; 1361 uint16_t num_ips; 1362 uint16_t hw_id; 1363 uint8_t inst; 1364 int hw_ip; 1365 int i, j, k; 1366 int r; 1367 1368 r = amdgpu_discovery_init(adev); 1369 if (r) 1370 return r; 1371 discovery_bin = adev->discovery.bin; 1372 wafl_ver = 0; 1373 adev->gfx.xcc_mask = 0; 1374 adev->sdma.sdma_mask = 0; 1375 adev->vcn.inst_mask = 0; 1376 adev->jpeg.inst_mask = 0; 1377 bhdr = (struct binary_header *)discovery_bin; 1378 ihdr = (struct ip_discovery_header 1379 *)(discovery_bin + 1380 le16_to_cpu(bhdr->table_list[IP_DISCOVERY].offset)); 1381 num_dies = le16_to_cpu(ihdr->num_dies); 1382 1383 DRM_DEBUG("number of dies: %d\n", num_dies); 1384 1385 for (i = 0; i < num_dies; i++) { 1386 die_offset = le16_to_cpu(ihdr->die_info[i].die_offset); 1387 dhdr = (struct die_header *)(discovery_bin + die_offset); 1388 num_ips = le16_to_cpu(dhdr->num_ips); 1389 ip_offset = die_offset + sizeof(*dhdr); 1390 1391 if (le16_to_cpu(dhdr->die_id) != i) { 1392 DRM_ERROR("invalid die id %d, expected %d\n", 1393 le16_to_cpu(dhdr->die_id), i); 1394 return -EINVAL; 1395 } 1396 1397 DRM_DEBUG("number of hardware IPs on die%d: %d\n", 1398 le16_to_cpu(dhdr->die_id), num_ips); 1399 1400 for (j = 0; j < num_ips; j++) { 1401 ip = (struct ip_v4 *)(discovery_bin + ip_offset); 1402 1403 inst = ip->instance_number; 1404 hw_id = le16_to_cpu(ip->hw_id); 1405 if (amdgpu_discovery_validate_ip(adev, inst, hw_id)) 1406 goto next_ip; 1407 1408 num_base_address = ip->num_base_address; 1409 1410 DRM_DEBUG("%s(%d) #%d v%d.%d.%d:\n", 1411 hw_id_names[le16_to_cpu(ip->hw_id)], 1412 le16_to_cpu(ip->hw_id), 1413 ip->instance_number, 1414 ip->major, ip->minor, 1415 ip->revision); 1416 1417 if (le16_to_cpu(ip->hw_id) == VCN_HWID) { 1418 /* Bit [5:0]: original revision value 1419 * Bit [7:6]: en/decode capability: 1420 * 0b00 : VCN function normally 1421 * 0b10 : encode is disabled 1422 * 0b01 : decode is disabled 1423 */ 1424 if (adev->vcn.num_vcn_inst < 1425 AMDGPU_MAX_VCN_INSTANCES) { 1426 adev->vcn.inst[adev->vcn.num_vcn_inst].vcn_config = 1427 ip->revision & 0xc0; 1428 adev->vcn.num_vcn_inst++; 1429 adev->vcn.inst_mask |= 1430 (1U << ip->instance_number); 1431 adev->jpeg.inst_mask |= 1432 (1U << ip->instance_number); 1433 } else { 1434 dev_err(adev->dev, "Too many VCN instances: %d vs %d\n", 1435 adev->vcn.num_vcn_inst + 1, 1436 AMDGPU_MAX_VCN_INSTANCES); 1437 } 1438 ip->revision &= ~0xc0; 1439 } 1440 if (le16_to_cpu(ip->hw_id) == SDMA0_HWID || 1441 le16_to_cpu(ip->hw_id) == SDMA1_HWID || 1442 le16_to_cpu(ip->hw_id) == SDMA2_HWID || 1443 le16_to_cpu(ip->hw_id) == SDMA3_HWID) { 1444 if (adev->sdma.num_instances < 1445 AMDGPU_MAX_SDMA_INSTANCES) { 1446 adev->sdma.num_instances++; 1447 adev->sdma.sdma_mask |= 1448 (1U << ip->instance_number); 1449 } else { 1450 dev_err(adev->dev, "Too many SDMA instances: %d vs %d\n", 1451 adev->sdma.num_instances + 1, 1452 AMDGPU_MAX_SDMA_INSTANCES); 1453 } 1454 } 1455 1456 if (le16_to_cpu(ip->hw_id) == VPE_HWID) { 1457 if (adev->vpe.num_instances < AMDGPU_MAX_VPE_INSTANCES) 1458 adev->vpe.num_instances++; 1459 else 1460 dev_err(adev->dev, "Too many VPE instances: %d vs %d\n", 1461 adev->vpe.num_instances + 1, 1462 AMDGPU_MAX_VPE_INSTANCES); 1463 } 1464 1465 if (le16_to_cpu(ip->hw_id) == UMC_HWID) { 1466 adev->gmc.num_umc++; 1467 adev->umc.node_inst_num++; 1468 } 1469 1470 if (le16_to_cpu(ip->hw_id) == GC_HWID) 1471 adev->gfx.xcc_mask |= 1472 (1U << ip->instance_number); 1473 1474 if (!wafl_ver && le16_to_cpu(ip->hw_id) == WAFLC_HWID) 1475 wafl_ver = IP_VERSION_FULL(ip->major, ip->minor, 1476 ip->revision, 0, 0); 1477 1478 for (k = 0; k < num_base_address; k++) { 1479 /* 1480 * convert the endianness of base addresses in place, 1481 * so that we don't need to convert them when accessing adev->reg_offset. 1482 */ 1483 if (ihdr->base_addr_64_bit) 1484 /* Truncate the 64bit base address from ip discovery 1485 * and only store lower 32bit ip base in reg_offset[]. 1486 * Bits > 32 follows ASIC specific format, thus just 1487 * discard them and handle it within specific ASIC. 1488 * By this way reg_offset[] and related helpers can 1489 * stay unchanged. 1490 * The base address is in dwords, thus clear the 1491 * highest 2 bits to store. 1492 */ 1493 ip->base_address[k] = 1494 lower_32_bits(le64_to_cpu(ip->base_address_64[k])) & 0x3FFFFFFF; 1495 else 1496 ip->base_address[k] = le32_to_cpu(ip->base_address[k]); 1497 DRM_DEBUG("\t0x%08x\n", ip->base_address[k]); 1498 } 1499 1500 for (hw_ip = 0; hw_ip < MAX_HWIP; hw_ip++) { 1501 if (hw_id_map[hw_ip] == le16_to_cpu(ip->hw_id) && 1502 hw_id_map[hw_ip] != 0) { 1503 DRM_DEBUG("set register base offset for %s\n", 1504 hw_id_names[le16_to_cpu(ip->hw_id)]); 1505 adev->reg_offset[hw_ip][ip->instance_number] = 1506 ip->base_address; 1507 /* Instance support is somewhat inconsistent. 1508 * SDMA is a good example. Sienna cichlid has 4 total 1509 * SDMA instances, each enumerated separately (HWIDs 1510 * 42, 43, 68, 69). Arcturus has 8 total SDMA instances, 1511 * but they are enumerated as multiple instances of the 1512 * same HWIDs (4x HWID 42, 4x HWID 43). UMC is another 1513 * example. On most chips there are multiple instances 1514 * with the same HWID. 1515 */ 1516 1517 if (ihdr->version < 3) { 1518 subrev = 0; 1519 variant = 0; 1520 } else { 1521 subrev = ip->sub_revision; 1522 variant = ip->variant; 1523 } 1524 1525 adev->ip_versions[hw_ip] 1526 [ip->instance_number] = 1527 IP_VERSION_FULL(ip->major, 1528 ip->minor, 1529 ip->revision, 1530 variant, 1531 subrev); 1532 } 1533 } 1534 1535 next_ip: 1536 if (ihdr->base_addr_64_bit) 1537 ip_offset += struct_size(ip, base_address_64, ip->num_base_address); 1538 else 1539 ip_offset += struct_size(ip, base_address, ip->num_base_address); 1540 } 1541 } 1542 1543 if (wafl_ver && !adev->ip_versions[XGMI_HWIP][0]) 1544 adev->ip_versions[XGMI_HWIP][0] = wafl_ver; 1545 1546 return 0; 1547 } 1548 1549 static void amdgpu_discovery_harvest_ip(struct amdgpu_device *adev) 1550 { 1551 uint8_t *discovery_bin = adev->discovery.bin; 1552 struct ip_discovery_header *ihdr; 1553 struct binary_header *bhdr; 1554 int vcn_harvest_count = 0; 1555 int umc_harvest_count = 0; 1556 uint16_t offset, ihdr_ver; 1557 1558 bhdr = (struct binary_header *)discovery_bin; 1559 offset = le16_to_cpu(bhdr->table_list[IP_DISCOVERY].offset); 1560 ihdr = (struct ip_discovery_header *)(discovery_bin + offset); 1561 ihdr_ver = le16_to_cpu(ihdr->version); 1562 /* 1563 * Harvest table does not fit Navi1x and legacy GPUs, 1564 * so read harvest bit per IP data structure to set 1565 * harvest configuration. 1566 */ 1567 if (amdgpu_ip_version(adev, GC_HWIP, 0) < IP_VERSION(10, 2, 0) && 1568 ihdr_ver <= 2) { 1569 if ((adev->pdev->device == 0x731E && 1570 (adev->pdev->revision == 0xC6 || 1571 adev->pdev->revision == 0xC7)) || 1572 (adev->pdev->device == 0x7340 && 1573 adev->pdev->revision == 0xC9) || 1574 (adev->pdev->device == 0x7360 && 1575 adev->pdev->revision == 0xC7)) 1576 amdgpu_discovery_read_harvest_bit_per_ip(adev, 1577 &vcn_harvest_count); 1578 } else { 1579 amdgpu_discovery_read_from_harvest_table(adev, 1580 &vcn_harvest_count, 1581 &umc_harvest_count); 1582 } 1583 1584 amdgpu_discovery_harvest_config_quirk(adev); 1585 1586 if (vcn_harvest_count == adev->vcn.num_vcn_inst) { 1587 adev->harvest_ip_mask |= AMD_HARVEST_IP_VCN_MASK; 1588 adev->harvest_ip_mask |= AMD_HARVEST_IP_JPEG_MASK; 1589 } 1590 1591 if (umc_harvest_count < adev->gmc.num_umc) { 1592 adev->gmc.num_umc -= umc_harvest_count; 1593 } 1594 } 1595 1596 union gc_info { 1597 struct gc_info_v1_0 v1; 1598 struct gc_info_v1_1 v1_1; 1599 struct gc_info_v1_2 v1_2; 1600 struct gc_info_v1_3 v1_3; 1601 struct gc_info_v2_0 v2; 1602 struct gc_info_v2_1 v2_1; 1603 }; 1604 1605 static int amdgpu_discovery_get_gfx_info(struct amdgpu_device *adev) 1606 { 1607 uint8_t *discovery_bin = adev->discovery.bin; 1608 struct binary_header *bhdr; 1609 union gc_info *gc_info; 1610 u16 offset; 1611 1612 if (!discovery_bin) { 1613 DRM_ERROR("ip discovery uninitialized\n"); 1614 return -EINVAL; 1615 } 1616 1617 bhdr = (struct binary_header *)discovery_bin; 1618 offset = le16_to_cpu(bhdr->table_list[GC].offset); 1619 1620 if (!offset) 1621 return 0; 1622 1623 gc_info = (union gc_info *)(discovery_bin + offset); 1624 1625 switch (le16_to_cpu(gc_info->v1.header.version_major)) { 1626 case 1: 1627 adev->gfx.config.max_shader_engines = le32_to_cpu(gc_info->v1.gc_num_se); 1628 adev->gfx.config.max_cu_per_sh = 2 * (le32_to_cpu(gc_info->v1.gc_num_wgp0_per_sa) + 1629 le32_to_cpu(gc_info->v1.gc_num_wgp1_per_sa)); 1630 adev->gfx.config.max_sh_per_se = le32_to_cpu(gc_info->v1.gc_num_sa_per_se); 1631 adev->gfx.config.max_backends_per_se = le32_to_cpu(gc_info->v1.gc_num_rb_per_se); 1632 adev->gfx.config.max_texture_channel_caches = le32_to_cpu(gc_info->v1.gc_num_gl2c); 1633 adev->gfx.config.max_gprs = le32_to_cpu(gc_info->v1.gc_num_gprs); 1634 adev->gfx.config.max_gs_threads = le32_to_cpu(gc_info->v1.gc_num_max_gs_thds); 1635 adev->gfx.config.gs_vgt_table_depth = le32_to_cpu(gc_info->v1.gc_gs_table_depth); 1636 adev->gfx.config.gs_prim_buffer_depth = le32_to_cpu(gc_info->v1.gc_gsprim_buff_depth); 1637 adev->gfx.config.double_offchip_lds_buf = le32_to_cpu(gc_info->v1.gc_double_offchip_lds_buffer); 1638 adev->gfx.cu_info.wave_front_size = le32_to_cpu(gc_info->v1.gc_wave_size); 1639 adev->gfx.cu_info.max_waves_per_simd = le32_to_cpu(gc_info->v1.gc_max_waves_per_simd); 1640 adev->gfx.cu_info.max_scratch_slots_per_cu = le32_to_cpu(gc_info->v1.gc_max_scratch_slots_per_cu); 1641 adev->gfx.cu_info.lds_size = le32_to_cpu(gc_info->v1.gc_lds_size); 1642 adev->gfx.config.num_sc_per_sh = le32_to_cpu(gc_info->v1.gc_num_sc_per_se) / 1643 le32_to_cpu(gc_info->v1.gc_num_sa_per_se); 1644 adev->gfx.config.num_packer_per_sc = le32_to_cpu(gc_info->v1.gc_num_packer_per_sc); 1645 if (le16_to_cpu(gc_info->v1.header.version_minor) >= 1) { 1646 adev->gfx.config.gc_num_tcp_per_sa = le32_to_cpu(gc_info->v1_1.gc_num_tcp_per_sa); 1647 adev->gfx.config.gc_num_sdp_interface = le32_to_cpu(gc_info->v1_1.gc_num_sdp_interface); 1648 adev->gfx.config.gc_num_tcps = le32_to_cpu(gc_info->v1_1.gc_num_tcps); 1649 } 1650 if (le16_to_cpu(gc_info->v1.header.version_minor) >= 2) { 1651 adev->gfx.config.gc_num_tcp_per_wpg = le32_to_cpu(gc_info->v1_2.gc_num_tcp_per_wpg); 1652 adev->gfx.config.gc_tcp_l1_size = le32_to_cpu(gc_info->v1_2.gc_tcp_l1_size); 1653 adev->gfx.config.gc_num_sqc_per_wgp = le32_to_cpu(gc_info->v1_2.gc_num_sqc_per_wgp); 1654 adev->gfx.config.gc_l1_instruction_cache_size_per_sqc = le32_to_cpu(gc_info->v1_2.gc_l1_instruction_cache_size_per_sqc); 1655 adev->gfx.config.gc_l1_data_cache_size_per_sqc = le32_to_cpu(gc_info->v1_2.gc_l1_data_cache_size_per_sqc); 1656 adev->gfx.config.gc_gl1c_per_sa = le32_to_cpu(gc_info->v1_2.gc_gl1c_per_sa); 1657 adev->gfx.config.gc_gl1c_size_per_instance = le32_to_cpu(gc_info->v1_2.gc_gl1c_size_per_instance); 1658 adev->gfx.config.gc_gl2c_per_gpu = le32_to_cpu(gc_info->v1_2.gc_gl2c_per_gpu); 1659 } 1660 if (le16_to_cpu(gc_info->v1.header.version_minor) >= 3) { 1661 adev->gfx.config.gc_tcp_size_per_cu = le32_to_cpu(gc_info->v1_3.gc_tcp_size_per_cu); 1662 adev->gfx.config.gc_tcp_cache_line_size = le32_to_cpu(gc_info->v1_3.gc_tcp_cache_line_size); 1663 adev->gfx.config.gc_instruction_cache_size_per_sqc = le32_to_cpu(gc_info->v1_3.gc_instruction_cache_size_per_sqc); 1664 adev->gfx.config.gc_instruction_cache_line_size = le32_to_cpu(gc_info->v1_3.gc_instruction_cache_line_size); 1665 adev->gfx.config.gc_scalar_data_cache_size_per_sqc = le32_to_cpu(gc_info->v1_3.gc_scalar_data_cache_size_per_sqc); 1666 adev->gfx.config.gc_scalar_data_cache_line_size = le32_to_cpu(gc_info->v1_3.gc_scalar_data_cache_line_size); 1667 adev->gfx.config.gc_tcc_size = le32_to_cpu(gc_info->v1_3.gc_tcc_size); 1668 adev->gfx.config.gc_tcc_cache_line_size = le32_to_cpu(gc_info->v1_3.gc_tcc_cache_line_size); 1669 } 1670 break; 1671 case 2: 1672 adev->gfx.config.max_shader_engines = le32_to_cpu(gc_info->v2.gc_num_se); 1673 adev->gfx.config.max_cu_per_sh = le32_to_cpu(gc_info->v2.gc_num_cu_per_sh); 1674 adev->gfx.config.max_sh_per_se = le32_to_cpu(gc_info->v2.gc_num_sh_per_se); 1675 adev->gfx.config.max_backends_per_se = le32_to_cpu(gc_info->v2.gc_num_rb_per_se); 1676 adev->gfx.config.max_texture_channel_caches = le32_to_cpu(gc_info->v2.gc_num_tccs); 1677 adev->gfx.config.max_gprs = le32_to_cpu(gc_info->v2.gc_num_gprs); 1678 adev->gfx.config.max_gs_threads = le32_to_cpu(gc_info->v2.gc_num_max_gs_thds); 1679 adev->gfx.config.gs_vgt_table_depth = le32_to_cpu(gc_info->v2.gc_gs_table_depth); 1680 adev->gfx.config.gs_prim_buffer_depth = le32_to_cpu(gc_info->v2.gc_gsprim_buff_depth); 1681 adev->gfx.config.double_offchip_lds_buf = le32_to_cpu(gc_info->v2.gc_double_offchip_lds_buffer); 1682 adev->gfx.cu_info.wave_front_size = le32_to_cpu(gc_info->v2.gc_wave_size); 1683 adev->gfx.cu_info.max_waves_per_simd = le32_to_cpu(gc_info->v2.gc_max_waves_per_simd); 1684 adev->gfx.cu_info.max_scratch_slots_per_cu = le32_to_cpu(gc_info->v2.gc_max_scratch_slots_per_cu); 1685 adev->gfx.cu_info.lds_size = le32_to_cpu(gc_info->v2.gc_lds_size); 1686 adev->gfx.config.num_sc_per_sh = le32_to_cpu(gc_info->v2.gc_num_sc_per_se) / 1687 le32_to_cpu(gc_info->v2.gc_num_sh_per_se); 1688 adev->gfx.config.num_packer_per_sc = le32_to_cpu(gc_info->v2.gc_num_packer_per_sc); 1689 if (le16_to_cpu(gc_info->v2.header.version_minor) == 1) { 1690 adev->gfx.config.gc_num_tcp_per_sa = le32_to_cpu(gc_info->v2_1.gc_num_tcp_per_sh); 1691 adev->gfx.config.gc_tcp_size_per_cu = le32_to_cpu(gc_info->v2_1.gc_tcp_size_per_cu); 1692 adev->gfx.config.gc_num_sdp_interface = le32_to_cpu(gc_info->v2_1.gc_num_sdp_interface); /* per XCD */ 1693 adev->gfx.config.gc_num_cu_per_sqc = le32_to_cpu(gc_info->v2_1.gc_num_cu_per_sqc); 1694 adev->gfx.config.gc_l1_instruction_cache_size_per_sqc = le32_to_cpu(gc_info->v2_1.gc_instruction_cache_size_per_sqc); 1695 adev->gfx.config.gc_l1_data_cache_size_per_sqc = le32_to_cpu(gc_info->v2_1.gc_scalar_data_cache_size_per_sqc); 1696 adev->gfx.config.gc_tcc_size = le32_to_cpu(gc_info->v2_1.gc_tcc_size); /* per XCD */ 1697 } 1698 break; 1699 default: 1700 dev_err(adev->dev, 1701 "Unhandled GC info table %d.%d\n", 1702 le16_to_cpu(gc_info->v1.header.version_major), 1703 le16_to_cpu(gc_info->v1.header.version_minor)); 1704 return -EINVAL; 1705 } 1706 return 0; 1707 } 1708 1709 union mall_info { 1710 struct mall_info_v1_0 v1; 1711 struct mall_info_v2_0 v2; 1712 }; 1713 1714 static int amdgpu_discovery_get_mall_info(struct amdgpu_device *adev) 1715 { 1716 uint8_t *discovery_bin = adev->discovery.bin; 1717 struct binary_header *bhdr; 1718 union mall_info *mall_info; 1719 u32 u, mall_size_per_umc, m_s_present, half_use; 1720 u64 mall_size; 1721 u16 offset; 1722 1723 if (!discovery_bin) { 1724 DRM_ERROR("ip discovery uninitialized\n"); 1725 return -EINVAL; 1726 } 1727 1728 bhdr = (struct binary_header *)discovery_bin; 1729 offset = le16_to_cpu(bhdr->table_list[MALL_INFO].offset); 1730 1731 if (!offset) 1732 return 0; 1733 1734 mall_info = (union mall_info *)(discovery_bin + offset); 1735 1736 switch (le16_to_cpu(mall_info->v1.header.version_major)) { 1737 case 1: 1738 mall_size = 0; 1739 mall_size_per_umc = le32_to_cpu(mall_info->v1.mall_size_per_m); 1740 m_s_present = le32_to_cpu(mall_info->v1.m_s_present); 1741 half_use = le32_to_cpu(mall_info->v1.m_half_use); 1742 for (u = 0; u < adev->gmc.num_umc; u++) { 1743 if (m_s_present & (1 << u)) 1744 mall_size += mall_size_per_umc * 2; 1745 else if (half_use & (1 << u)) 1746 mall_size += mall_size_per_umc / 2; 1747 else 1748 mall_size += mall_size_per_umc; 1749 } 1750 adev->gmc.mall_size = mall_size; 1751 adev->gmc.m_half_use = half_use; 1752 break; 1753 case 2: 1754 mall_size_per_umc = le32_to_cpu(mall_info->v2.mall_size_per_umc); 1755 adev->gmc.mall_size = (uint64_t)mall_size_per_umc * adev->gmc.num_umc; 1756 break; 1757 default: 1758 dev_err(adev->dev, 1759 "Unhandled MALL info table %d.%d\n", 1760 le16_to_cpu(mall_info->v1.header.version_major), 1761 le16_to_cpu(mall_info->v1.header.version_minor)); 1762 return -EINVAL; 1763 } 1764 return 0; 1765 } 1766 1767 union vcn_info { 1768 struct vcn_info_v1_0 v1; 1769 }; 1770 1771 static int amdgpu_discovery_get_vcn_info(struct amdgpu_device *adev) 1772 { 1773 uint8_t *discovery_bin = adev->discovery.bin; 1774 struct binary_header *bhdr; 1775 union vcn_info *vcn_info; 1776 u16 offset; 1777 int v; 1778 1779 if (!discovery_bin) { 1780 DRM_ERROR("ip discovery uninitialized\n"); 1781 return -EINVAL; 1782 } 1783 1784 /* num_vcn_inst is currently limited to AMDGPU_MAX_VCN_INSTANCES 1785 * which is smaller than VCN_INFO_TABLE_MAX_NUM_INSTANCES 1786 * but that may change in the future with new GPUs so keep this 1787 * check for defensive purposes. 1788 */ 1789 if (adev->vcn.num_vcn_inst > VCN_INFO_TABLE_MAX_NUM_INSTANCES) { 1790 dev_err(adev->dev, "invalid vcn instances\n"); 1791 return -EINVAL; 1792 } 1793 1794 bhdr = (struct binary_header *)discovery_bin; 1795 offset = le16_to_cpu(bhdr->table_list[VCN_INFO].offset); 1796 1797 if (!offset) 1798 return 0; 1799 1800 vcn_info = (union vcn_info *)(discovery_bin + offset); 1801 1802 switch (le16_to_cpu(vcn_info->v1.header.version_major)) { 1803 case 1: 1804 /* num_vcn_inst is currently limited to AMDGPU_MAX_VCN_INSTANCES 1805 * so this won't overflow. 1806 */ 1807 for (v = 0; v < adev->vcn.num_vcn_inst; v++) { 1808 adev->vcn.inst[v].vcn_codec_disable_mask = 1809 le32_to_cpu(vcn_info->v1.instance_info[v].fuse_data.all_bits); 1810 } 1811 break; 1812 default: 1813 dev_err(adev->dev, 1814 "Unhandled VCN info table %d.%d\n", 1815 le16_to_cpu(vcn_info->v1.header.version_major), 1816 le16_to_cpu(vcn_info->v1.header.version_minor)); 1817 return -EINVAL; 1818 } 1819 return 0; 1820 } 1821 1822 union nps_info { 1823 struct nps_info_v1_0 v1; 1824 }; 1825 1826 static int amdgpu_discovery_refresh_nps_info(struct amdgpu_device *adev, 1827 union nps_info *nps_data) 1828 { 1829 uint64_t vram_size, pos, offset; 1830 struct nps_info_header *nhdr; 1831 struct binary_header bhdr; 1832 uint16_t checksum; 1833 1834 vram_size = (uint64_t)RREG32(mmRCC_CONFIG_MEMSIZE) << 20; 1835 pos = vram_size - DISCOVERY_TMR_OFFSET; 1836 amdgpu_device_vram_access(adev, pos, &bhdr, sizeof(bhdr), false); 1837 1838 offset = le16_to_cpu(bhdr.table_list[NPS_INFO].offset); 1839 checksum = le16_to_cpu(bhdr.table_list[NPS_INFO].checksum); 1840 1841 amdgpu_device_vram_access(adev, (pos + offset), nps_data, 1842 sizeof(*nps_data), false); 1843 1844 nhdr = (struct nps_info_header *)(nps_data); 1845 if (!amdgpu_discovery_verify_checksum((uint8_t *)nps_data, 1846 le32_to_cpu(nhdr->size_bytes), 1847 checksum)) { 1848 dev_err(adev->dev, "nps data refresh, checksum mismatch\n"); 1849 return -EINVAL; 1850 } 1851 1852 return 0; 1853 } 1854 1855 int amdgpu_discovery_get_nps_info(struct amdgpu_device *adev, 1856 uint32_t *nps_type, 1857 struct amdgpu_gmc_memrange **ranges, 1858 int *range_cnt, bool refresh) 1859 { 1860 uint8_t *discovery_bin = adev->discovery.bin; 1861 struct amdgpu_gmc_memrange *mem_ranges; 1862 struct binary_header *bhdr; 1863 union nps_info *nps_info; 1864 union nps_info nps_data; 1865 u16 offset; 1866 int i, r; 1867 1868 if (!nps_type || !range_cnt || !ranges) 1869 return -EINVAL; 1870 1871 if (refresh) { 1872 r = amdgpu_discovery_refresh_nps_info(adev, &nps_data); 1873 if (r) 1874 return r; 1875 nps_info = &nps_data; 1876 } else { 1877 if (!discovery_bin) { 1878 dev_err(adev->dev, 1879 "fetch mem range failed, ip discovery uninitialized\n"); 1880 return -EINVAL; 1881 } 1882 1883 bhdr = (struct binary_header *)discovery_bin; 1884 offset = le16_to_cpu(bhdr->table_list[NPS_INFO].offset); 1885 1886 if (!offset) 1887 return -ENOENT; 1888 1889 /* If verification fails, return as if NPS table doesn't exist */ 1890 if (amdgpu_discovery_verify_npsinfo(adev, bhdr)) 1891 return -ENOENT; 1892 1893 nps_info = (union nps_info *)(discovery_bin + offset); 1894 } 1895 1896 switch (le16_to_cpu(nps_info->v1.header.version_major)) { 1897 case 1: 1898 mem_ranges = kvcalloc(nps_info->v1.count, 1899 sizeof(*mem_ranges), 1900 GFP_KERNEL); 1901 if (!mem_ranges) 1902 return -ENOMEM; 1903 *nps_type = nps_info->v1.nps_type; 1904 *range_cnt = nps_info->v1.count; 1905 for (i = 0; i < *range_cnt; i++) { 1906 mem_ranges[i].base_address = 1907 nps_info->v1.instance_info[i].base_address; 1908 mem_ranges[i].limit_address = 1909 nps_info->v1.instance_info[i].limit_address; 1910 mem_ranges[i].nid_mask = -1; 1911 mem_ranges[i].flags = 0; 1912 } 1913 *ranges = mem_ranges; 1914 break; 1915 default: 1916 dev_err(adev->dev, "Unhandled NPS info table %d.%d\n", 1917 le16_to_cpu(nps_info->v1.header.version_major), 1918 le16_to_cpu(nps_info->v1.header.version_minor)); 1919 return -EINVAL; 1920 } 1921 1922 return 0; 1923 } 1924 1925 static int amdgpu_discovery_set_common_ip_blocks(struct amdgpu_device *adev) 1926 { 1927 /* what IP to use for this? */ 1928 switch (amdgpu_ip_version(adev, GC_HWIP, 0)) { 1929 case IP_VERSION(9, 0, 1): 1930 case IP_VERSION(9, 1, 0): 1931 case IP_VERSION(9, 2, 1): 1932 case IP_VERSION(9, 2, 2): 1933 case IP_VERSION(9, 3, 0): 1934 case IP_VERSION(9, 4, 0): 1935 case IP_VERSION(9, 4, 1): 1936 case IP_VERSION(9, 4, 2): 1937 case IP_VERSION(9, 4, 3): 1938 case IP_VERSION(9, 4, 4): 1939 case IP_VERSION(9, 5, 0): 1940 amdgpu_device_ip_block_add(adev, &vega10_common_ip_block); 1941 break; 1942 case IP_VERSION(10, 1, 10): 1943 case IP_VERSION(10, 1, 1): 1944 case IP_VERSION(10, 1, 2): 1945 case IP_VERSION(10, 1, 3): 1946 case IP_VERSION(10, 1, 4): 1947 case IP_VERSION(10, 3, 0): 1948 case IP_VERSION(10, 3, 1): 1949 case IP_VERSION(10, 3, 2): 1950 case IP_VERSION(10, 3, 3): 1951 case IP_VERSION(10, 3, 4): 1952 case IP_VERSION(10, 3, 5): 1953 case IP_VERSION(10, 3, 6): 1954 case IP_VERSION(10, 3, 7): 1955 amdgpu_device_ip_block_add(adev, &nv_common_ip_block); 1956 break; 1957 case IP_VERSION(11, 0, 0): 1958 case IP_VERSION(11, 0, 1): 1959 case IP_VERSION(11, 0, 2): 1960 case IP_VERSION(11, 0, 3): 1961 case IP_VERSION(11, 0, 4): 1962 case IP_VERSION(11, 5, 0): 1963 case IP_VERSION(11, 5, 1): 1964 case IP_VERSION(11, 5, 2): 1965 case IP_VERSION(11, 5, 3): 1966 amdgpu_device_ip_block_add(adev, &soc21_common_ip_block); 1967 break; 1968 case IP_VERSION(12, 0, 0): 1969 case IP_VERSION(12, 0, 1): 1970 amdgpu_device_ip_block_add(adev, &soc24_common_ip_block); 1971 break; 1972 default: 1973 dev_err(adev->dev, 1974 "Failed to add common ip block(GC_HWIP:0x%x)\n", 1975 amdgpu_ip_version(adev, GC_HWIP, 0)); 1976 return -EINVAL; 1977 } 1978 return 0; 1979 } 1980 1981 static int amdgpu_discovery_set_gmc_ip_blocks(struct amdgpu_device *adev) 1982 { 1983 /* use GC or MMHUB IP version */ 1984 switch (amdgpu_ip_version(adev, GC_HWIP, 0)) { 1985 case IP_VERSION(9, 0, 1): 1986 case IP_VERSION(9, 1, 0): 1987 case IP_VERSION(9, 2, 1): 1988 case IP_VERSION(9, 2, 2): 1989 case IP_VERSION(9, 3, 0): 1990 case IP_VERSION(9, 4, 0): 1991 case IP_VERSION(9, 4, 1): 1992 case IP_VERSION(9, 4, 2): 1993 case IP_VERSION(9, 4, 3): 1994 case IP_VERSION(9, 4, 4): 1995 case IP_VERSION(9, 5, 0): 1996 amdgpu_device_ip_block_add(adev, &gmc_v9_0_ip_block); 1997 break; 1998 case IP_VERSION(10, 1, 10): 1999 case IP_VERSION(10, 1, 1): 2000 case IP_VERSION(10, 1, 2): 2001 case IP_VERSION(10, 1, 3): 2002 case IP_VERSION(10, 1, 4): 2003 case IP_VERSION(10, 3, 0): 2004 case IP_VERSION(10, 3, 1): 2005 case IP_VERSION(10, 3, 2): 2006 case IP_VERSION(10, 3, 3): 2007 case IP_VERSION(10, 3, 4): 2008 case IP_VERSION(10, 3, 5): 2009 case IP_VERSION(10, 3, 6): 2010 case IP_VERSION(10, 3, 7): 2011 amdgpu_device_ip_block_add(adev, &gmc_v10_0_ip_block); 2012 break; 2013 case IP_VERSION(11, 0, 0): 2014 case IP_VERSION(11, 0, 1): 2015 case IP_VERSION(11, 0, 2): 2016 case IP_VERSION(11, 0, 3): 2017 case IP_VERSION(11, 0, 4): 2018 case IP_VERSION(11, 5, 0): 2019 case IP_VERSION(11, 5, 1): 2020 case IP_VERSION(11, 5, 2): 2021 case IP_VERSION(11, 5, 3): 2022 amdgpu_device_ip_block_add(adev, &gmc_v11_0_ip_block); 2023 break; 2024 case IP_VERSION(12, 0, 0): 2025 case IP_VERSION(12, 0, 1): 2026 amdgpu_device_ip_block_add(adev, &gmc_v12_0_ip_block); 2027 break; 2028 default: 2029 dev_err(adev->dev, "Failed to add gmc ip block(GC_HWIP:0x%x)\n", 2030 amdgpu_ip_version(adev, GC_HWIP, 0)); 2031 return -EINVAL; 2032 } 2033 return 0; 2034 } 2035 2036 static int amdgpu_discovery_set_ih_ip_blocks(struct amdgpu_device *adev) 2037 { 2038 switch (amdgpu_ip_version(adev, OSSSYS_HWIP, 0)) { 2039 case IP_VERSION(4, 0, 0): 2040 case IP_VERSION(4, 0, 1): 2041 case IP_VERSION(4, 1, 0): 2042 case IP_VERSION(4, 1, 1): 2043 case IP_VERSION(4, 3, 0): 2044 amdgpu_device_ip_block_add(adev, &vega10_ih_ip_block); 2045 break; 2046 case IP_VERSION(4, 2, 0): 2047 case IP_VERSION(4, 2, 1): 2048 case IP_VERSION(4, 4, 0): 2049 case IP_VERSION(4, 4, 2): 2050 case IP_VERSION(4, 4, 5): 2051 amdgpu_device_ip_block_add(adev, &vega20_ih_ip_block); 2052 break; 2053 case IP_VERSION(5, 0, 0): 2054 case IP_VERSION(5, 0, 1): 2055 case IP_VERSION(5, 0, 2): 2056 case IP_VERSION(5, 0, 3): 2057 case IP_VERSION(5, 2, 0): 2058 case IP_VERSION(5, 2, 1): 2059 amdgpu_device_ip_block_add(adev, &navi10_ih_ip_block); 2060 break; 2061 case IP_VERSION(6, 0, 0): 2062 case IP_VERSION(6, 0, 1): 2063 case IP_VERSION(6, 0, 2): 2064 amdgpu_device_ip_block_add(adev, &ih_v6_0_ip_block); 2065 break; 2066 case IP_VERSION(6, 1, 0): 2067 amdgpu_device_ip_block_add(adev, &ih_v6_1_ip_block); 2068 break; 2069 case IP_VERSION(7, 0, 0): 2070 amdgpu_device_ip_block_add(adev, &ih_v7_0_ip_block); 2071 break; 2072 default: 2073 dev_err(adev->dev, 2074 "Failed to add ih ip block(OSSSYS_HWIP:0x%x)\n", 2075 amdgpu_ip_version(adev, OSSSYS_HWIP, 0)); 2076 return -EINVAL; 2077 } 2078 return 0; 2079 } 2080 2081 static int amdgpu_discovery_set_psp_ip_blocks(struct amdgpu_device *adev) 2082 { 2083 switch (amdgpu_ip_version(adev, MP0_HWIP, 0)) { 2084 case IP_VERSION(9, 0, 0): 2085 amdgpu_device_ip_block_add(adev, &psp_v3_1_ip_block); 2086 break; 2087 case IP_VERSION(10, 0, 0): 2088 case IP_VERSION(10, 0, 1): 2089 amdgpu_device_ip_block_add(adev, &psp_v10_0_ip_block); 2090 break; 2091 case IP_VERSION(11, 0, 0): 2092 case IP_VERSION(11, 0, 2): 2093 case IP_VERSION(11, 0, 4): 2094 case IP_VERSION(11, 0, 5): 2095 case IP_VERSION(11, 0, 9): 2096 case IP_VERSION(11, 0, 7): 2097 case IP_VERSION(11, 0, 11): 2098 case IP_VERSION(11, 0, 12): 2099 case IP_VERSION(11, 0, 13): 2100 case IP_VERSION(11, 5, 0): 2101 case IP_VERSION(11, 5, 2): 2102 amdgpu_device_ip_block_add(adev, &psp_v11_0_ip_block); 2103 break; 2104 case IP_VERSION(11, 0, 8): 2105 amdgpu_device_ip_block_add(adev, &psp_v11_0_8_ip_block); 2106 break; 2107 case IP_VERSION(11, 0, 3): 2108 case IP_VERSION(12, 0, 1): 2109 amdgpu_device_ip_block_add(adev, &psp_v12_0_ip_block); 2110 break; 2111 case IP_VERSION(13, 0, 0): 2112 case IP_VERSION(13, 0, 1): 2113 case IP_VERSION(13, 0, 2): 2114 case IP_VERSION(13, 0, 3): 2115 case IP_VERSION(13, 0, 5): 2116 case IP_VERSION(13, 0, 6): 2117 case IP_VERSION(13, 0, 7): 2118 case IP_VERSION(13, 0, 8): 2119 case IP_VERSION(13, 0, 10): 2120 case IP_VERSION(13, 0, 11): 2121 case IP_VERSION(13, 0, 12): 2122 case IP_VERSION(13, 0, 14): 2123 case IP_VERSION(14, 0, 0): 2124 case IP_VERSION(14, 0, 1): 2125 case IP_VERSION(14, 0, 4): 2126 amdgpu_device_ip_block_add(adev, &psp_v13_0_ip_block); 2127 break; 2128 case IP_VERSION(13, 0, 4): 2129 amdgpu_device_ip_block_add(adev, &psp_v13_0_4_ip_block); 2130 break; 2131 case IP_VERSION(14, 0, 2): 2132 case IP_VERSION(14, 0, 3): 2133 case IP_VERSION(14, 0, 5): 2134 amdgpu_device_ip_block_add(adev, &psp_v14_0_ip_block); 2135 break; 2136 default: 2137 dev_err(adev->dev, 2138 "Failed to add psp ip block(MP0_HWIP:0x%x)\n", 2139 amdgpu_ip_version(adev, MP0_HWIP, 0)); 2140 return -EINVAL; 2141 } 2142 return 0; 2143 } 2144 2145 static int amdgpu_discovery_set_smu_ip_blocks(struct amdgpu_device *adev) 2146 { 2147 switch (amdgpu_ip_version(adev, MP1_HWIP, 0)) { 2148 case IP_VERSION(9, 0, 0): 2149 case IP_VERSION(10, 0, 0): 2150 case IP_VERSION(10, 0, 1): 2151 case IP_VERSION(11, 0, 2): 2152 if (adev->asic_type == CHIP_ARCTURUS) 2153 amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block); 2154 else 2155 amdgpu_device_ip_block_add(adev, &pp_smu_ip_block); 2156 break; 2157 case IP_VERSION(11, 0, 0): 2158 case IP_VERSION(11, 0, 5): 2159 case IP_VERSION(11, 0, 9): 2160 case IP_VERSION(11, 0, 7): 2161 case IP_VERSION(11, 0, 11): 2162 case IP_VERSION(11, 0, 12): 2163 case IP_VERSION(11, 0, 13): 2164 case IP_VERSION(11, 5, 0): 2165 case IP_VERSION(11, 5, 2): 2166 amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block); 2167 break; 2168 case IP_VERSION(11, 0, 8): 2169 if (adev->apu_flags & AMD_APU_IS_CYAN_SKILLFISH2) 2170 amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block); 2171 break; 2172 case IP_VERSION(12, 0, 0): 2173 case IP_VERSION(12, 0, 1): 2174 amdgpu_device_ip_block_add(adev, &smu_v12_0_ip_block); 2175 break; 2176 case IP_VERSION(13, 0, 0): 2177 case IP_VERSION(13, 0, 1): 2178 case IP_VERSION(13, 0, 2): 2179 case IP_VERSION(13, 0, 3): 2180 case IP_VERSION(13, 0, 4): 2181 case IP_VERSION(13, 0, 5): 2182 case IP_VERSION(13, 0, 6): 2183 case IP_VERSION(13, 0, 7): 2184 case IP_VERSION(13, 0, 8): 2185 case IP_VERSION(13, 0, 10): 2186 case IP_VERSION(13, 0, 11): 2187 case IP_VERSION(13, 0, 14): 2188 case IP_VERSION(13, 0, 12): 2189 amdgpu_device_ip_block_add(adev, &smu_v13_0_ip_block); 2190 break; 2191 case IP_VERSION(14, 0, 0): 2192 case IP_VERSION(14, 0, 1): 2193 case IP_VERSION(14, 0, 2): 2194 case IP_VERSION(14, 0, 3): 2195 case IP_VERSION(14, 0, 4): 2196 case IP_VERSION(14, 0, 5): 2197 amdgpu_device_ip_block_add(adev, &smu_v14_0_ip_block); 2198 break; 2199 default: 2200 dev_err(adev->dev, 2201 "Failed to add smu ip block(MP1_HWIP:0x%x)\n", 2202 amdgpu_ip_version(adev, MP1_HWIP, 0)); 2203 return -EINVAL; 2204 } 2205 return 0; 2206 } 2207 2208 #if defined(CONFIG_DRM_AMD_DC) 2209 static void amdgpu_discovery_set_sriov_display(struct amdgpu_device *adev) 2210 { 2211 amdgpu_device_set_sriov_virtual_display(adev); 2212 amdgpu_device_ip_block_add(adev, &amdgpu_vkms_ip_block); 2213 } 2214 #endif 2215 2216 static int amdgpu_discovery_set_display_ip_blocks(struct amdgpu_device *adev) 2217 { 2218 if (adev->enable_virtual_display) { 2219 amdgpu_device_ip_block_add(adev, &amdgpu_vkms_ip_block); 2220 return 0; 2221 } 2222 2223 if (!amdgpu_device_has_dc_support(adev)) 2224 return 0; 2225 2226 #if defined(CONFIG_DRM_AMD_DC) 2227 if (amdgpu_ip_version(adev, DCE_HWIP, 0)) { 2228 switch (amdgpu_ip_version(adev, DCE_HWIP, 0)) { 2229 case IP_VERSION(1, 0, 0): 2230 case IP_VERSION(1, 0, 1): 2231 case IP_VERSION(2, 0, 2): 2232 case IP_VERSION(2, 0, 0): 2233 case IP_VERSION(2, 0, 3): 2234 case IP_VERSION(2, 1, 0): 2235 case IP_VERSION(3, 0, 0): 2236 case IP_VERSION(3, 0, 2): 2237 case IP_VERSION(3, 0, 3): 2238 case IP_VERSION(3, 0, 1): 2239 case IP_VERSION(3, 1, 2): 2240 case IP_VERSION(3, 1, 3): 2241 case IP_VERSION(3, 1, 4): 2242 case IP_VERSION(3, 1, 5): 2243 case IP_VERSION(3, 1, 6): 2244 case IP_VERSION(3, 2, 0): 2245 case IP_VERSION(3, 2, 1): 2246 case IP_VERSION(3, 5, 0): 2247 case IP_VERSION(3, 5, 1): 2248 case IP_VERSION(3, 6, 0): 2249 case IP_VERSION(4, 1, 0): 2250 /* TODO: Fix IP version. DC code expects version 4.0.1 */ 2251 if (adev->ip_versions[DCE_HWIP][0] == IP_VERSION(4, 1, 0)) 2252 adev->ip_versions[DCE_HWIP][0] = IP_VERSION(4, 0, 1); 2253 2254 if (amdgpu_sriov_vf(adev)) 2255 amdgpu_discovery_set_sriov_display(adev); 2256 else 2257 amdgpu_device_ip_block_add(adev, &dm_ip_block); 2258 break; 2259 default: 2260 dev_err(adev->dev, 2261 "Failed to add dm ip block(DCE_HWIP:0x%x)\n", 2262 amdgpu_ip_version(adev, DCE_HWIP, 0)); 2263 return -EINVAL; 2264 } 2265 } else if (amdgpu_ip_version(adev, DCI_HWIP, 0)) { 2266 switch (amdgpu_ip_version(adev, DCI_HWIP, 0)) { 2267 case IP_VERSION(12, 0, 0): 2268 case IP_VERSION(12, 0, 1): 2269 case IP_VERSION(12, 1, 0): 2270 if (amdgpu_sriov_vf(adev)) 2271 amdgpu_discovery_set_sriov_display(adev); 2272 else 2273 amdgpu_device_ip_block_add(adev, &dm_ip_block); 2274 break; 2275 default: 2276 dev_err(adev->dev, 2277 "Failed to add dm ip block(DCI_HWIP:0x%x)\n", 2278 amdgpu_ip_version(adev, DCI_HWIP, 0)); 2279 return -EINVAL; 2280 } 2281 } 2282 #endif 2283 return 0; 2284 } 2285 2286 static int amdgpu_discovery_set_gc_ip_blocks(struct amdgpu_device *adev) 2287 { 2288 switch (amdgpu_ip_version(adev, GC_HWIP, 0)) { 2289 case IP_VERSION(9, 0, 1): 2290 case IP_VERSION(9, 1, 0): 2291 case IP_VERSION(9, 2, 1): 2292 case IP_VERSION(9, 2, 2): 2293 case IP_VERSION(9, 3, 0): 2294 case IP_VERSION(9, 4, 0): 2295 case IP_VERSION(9, 4, 1): 2296 case IP_VERSION(9, 4, 2): 2297 amdgpu_device_ip_block_add(adev, &gfx_v9_0_ip_block); 2298 break; 2299 case IP_VERSION(9, 4, 3): 2300 case IP_VERSION(9, 4, 4): 2301 case IP_VERSION(9, 5, 0): 2302 amdgpu_device_ip_block_add(adev, &gfx_v9_4_3_ip_block); 2303 break; 2304 case IP_VERSION(10, 1, 10): 2305 case IP_VERSION(10, 1, 2): 2306 case IP_VERSION(10, 1, 1): 2307 case IP_VERSION(10, 1, 3): 2308 case IP_VERSION(10, 1, 4): 2309 case IP_VERSION(10, 3, 0): 2310 case IP_VERSION(10, 3, 2): 2311 case IP_VERSION(10, 3, 1): 2312 case IP_VERSION(10, 3, 4): 2313 case IP_VERSION(10, 3, 5): 2314 case IP_VERSION(10, 3, 6): 2315 case IP_VERSION(10, 3, 3): 2316 case IP_VERSION(10, 3, 7): 2317 amdgpu_device_ip_block_add(adev, &gfx_v10_0_ip_block); 2318 break; 2319 case IP_VERSION(11, 0, 0): 2320 case IP_VERSION(11, 0, 1): 2321 case IP_VERSION(11, 0, 2): 2322 case IP_VERSION(11, 0, 3): 2323 case IP_VERSION(11, 0, 4): 2324 case IP_VERSION(11, 5, 0): 2325 case IP_VERSION(11, 5, 1): 2326 case IP_VERSION(11, 5, 2): 2327 case IP_VERSION(11, 5, 3): 2328 amdgpu_device_ip_block_add(adev, &gfx_v11_0_ip_block); 2329 break; 2330 case IP_VERSION(12, 0, 0): 2331 case IP_VERSION(12, 0, 1): 2332 amdgpu_device_ip_block_add(adev, &gfx_v12_0_ip_block); 2333 break; 2334 default: 2335 dev_err(adev->dev, "Failed to add gfx ip block(GC_HWIP:0x%x)\n", 2336 amdgpu_ip_version(adev, GC_HWIP, 0)); 2337 return -EINVAL; 2338 } 2339 return 0; 2340 } 2341 2342 static int amdgpu_discovery_set_sdma_ip_blocks(struct amdgpu_device *adev) 2343 { 2344 switch (amdgpu_ip_version(adev, SDMA0_HWIP, 0)) { 2345 case IP_VERSION(4, 0, 0): 2346 case IP_VERSION(4, 0, 1): 2347 case IP_VERSION(4, 1, 0): 2348 case IP_VERSION(4, 1, 1): 2349 case IP_VERSION(4, 1, 2): 2350 case IP_VERSION(4, 2, 0): 2351 case IP_VERSION(4, 2, 2): 2352 case IP_VERSION(4, 4, 0): 2353 amdgpu_device_ip_block_add(adev, &sdma_v4_0_ip_block); 2354 break; 2355 case IP_VERSION(4, 4, 2): 2356 case IP_VERSION(4, 4, 5): 2357 case IP_VERSION(4, 4, 4): 2358 amdgpu_device_ip_block_add(adev, &sdma_v4_4_2_ip_block); 2359 break; 2360 case IP_VERSION(5, 0, 0): 2361 case IP_VERSION(5, 0, 1): 2362 case IP_VERSION(5, 0, 2): 2363 case IP_VERSION(5, 0, 5): 2364 amdgpu_device_ip_block_add(adev, &sdma_v5_0_ip_block); 2365 break; 2366 case IP_VERSION(5, 2, 0): 2367 case IP_VERSION(5, 2, 2): 2368 case IP_VERSION(5, 2, 4): 2369 case IP_VERSION(5, 2, 5): 2370 case IP_VERSION(5, 2, 6): 2371 case IP_VERSION(5, 2, 3): 2372 case IP_VERSION(5, 2, 1): 2373 case IP_VERSION(5, 2, 7): 2374 amdgpu_device_ip_block_add(adev, &sdma_v5_2_ip_block); 2375 break; 2376 case IP_VERSION(6, 0, 0): 2377 case IP_VERSION(6, 0, 1): 2378 case IP_VERSION(6, 0, 2): 2379 case IP_VERSION(6, 0, 3): 2380 case IP_VERSION(6, 1, 0): 2381 case IP_VERSION(6, 1, 1): 2382 case IP_VERSION(6, 1, 2): 2383 case IP_VERSION(6, 1, 3): 2384 amdgpu_device_ip_block_add(adev, &sdma_v6_0_ip_block); 2385 break; 2386 case IP_VERSION(7, 0, 0): 2387 case IP_VERSION(7, 0, 1): 2388 amdgpu_device_ip_block_add(adev, &sdma_v7_0_ip_block); 2389 break; 2390 default: 2391 dev_err(adev->dev, 2392 "Failed to add sdma ip block(SDMA0_HWIP:0x%x)\n", 2393 amdgpu_ip_version(adev, SDMA0_HWIP, 0)); 2394 return -EINVAL; 2395 } 2396 return 0; 2397 } 2398 2399 static int amdgpu_discovery_set_mm_ip_blocks(struct amdgpu_device *adev) 2400 { 2401 if (amdgpu_ip_version(adev, VCE_HWIP, 0)) { 2402 switch (amdgpu_ip_version(adev, UVD_HWIP, 0)) { 2403 case IP_VERSION(7, 0, 0): 2404 case IP_VERSION(7, 2, 0): 2405 /* UVD is not supported on vega20 SR-IOV */ 2406 if (!(adev->asic_type == CHIP_VEGA20 && amdgpu_sriov_vf(adev))) 2407 amdgpu_device_ip_block_add(adev, &uvd_v7_0_ip_block); 2408 break; 2409 default: 2410 dev_err(adev->dev, 2411 "Failed to add uvd v7 ip block(UVD_HWIP:0x%x)\n", 2412 amdgpu_ip_version(adev, UVD_HWIP, 0)); 2413 return -EINVAL; 2414 } 2415 switch (amdgpu_ip_version(adev, VCE_HWIP, 0)) { 2416 case IP_VERSION(4, 0, 0): 2417 case IP_VERSION(4, 1, 0): 2418 /* VCE is not supported on vega20 SR-IOV */ 2419 if (!(adev->asic_type == CHIP_VEGA20 && amdgpu_sriov_vf(adev))) 2420 amdgpu_device_ip_block_add(adev, &vce_v4_0_ip_block); 2421 break; 2422 default: 2423 dev_err(adev->dev, 2424 "Failed to add VCE v4 ip block(VCE_HWIP:0x%x)\n", 2425 amdgpu_ip_version(adev, VCE_HWIP, 0)); 2426 return -EINVAL; 2427 } 2428 } else { 2429 switch (amdgpu_ip_version(adev, UVD_HWIP, 0)) { 2430 case IP_VERSION(1, 0, 0): 2431 case IP_VERSION(1, 0, 1): 2432 amdgpu_device_ip_block_add(adev, &vcn_v1_0_ip_block); 2433 break; 2434 case IP_VERSION(2, 0, 0): 2435 case IP_VERSION(2, 0, 2): 2436 case IP_VERSION(2, 2, 0): 2437 amdgpu_device_ip_block_add(adev, &vcn_v2_0_ip_block); 2438 if (!amdgpu_sriov_vf(adev)) 2439 amdgpu_device_ip_block_add(adev, &jpeg_v2_0_ip_block); 2440 break; 2441 case IP_VERSION(2, 0, 3): 2442 break; 2443 case IP_VERSION(2, 5, 0): 2444 amdgpu_device_ip_block_add(adev, &vcn_v2_5_ip_block); 2445 amdgpu_device_ip_block_add(adev, &jpeg_v2_5_ip_block); 2446 break; 2447 case IP_VERSION(2, 6, 0): 2448 amdgpu_device_ip_block_add(adev, &vcn_v2_6_ip_block); 2449 amdgpu_device_ip_block_add(adev, &jpeg_v2_6_ip_block); 2450 break; 2451 case IP_VERSION(3, 0, 0): 2452 case IP_VERSION(3, 0, 16): 2453 case IP_VERSION(3, 1, 1): 2454 case IP_VERSION(3, 1, 2): 2455 case IP_VERSION(3, 0, 2): 2456 amdgpu_device_ip_block_add(adev, &vcn_v3_0_ip_block); 2457 if (!amdgpu_sriov_vf(adev)) 2458 amdgpu_device_ip_block_add(adev, &jpeg_v3_0_ip_block); 2459 break; 2460 case IP_VERSION(3, 0, 33): 2461 amdgpu_device_ip_block_add(adev, &vcn_v3_0_ip_block); 2462 break; 2463 case IP_VERSION(4, 0, 0): 2464 case IP_VERSION(4, 0, 2): 2465 case IP_VERSION(4, 0, 4): 2466 amdgpu_device_ip_block_add(adev, &vcn_v4_0_ip_block); 2467 amdgpu_device_ip_block_add(adev, &jpeg_v4_0_ip_block); 2468 break; 2469 case IP_VERSION(4, 0, 3): 2470 amdgpu_device_ip_block_add(adev, &vcn_v4_0_3_ip_block); 2471 amdgpu_device_ip_block_add(adev, &jpeg_v4_0_3_ip_block); 2472 break; 2473 case IP_VERSION(4, 0, 5): 2474 case IP_VERSION(4, 0, 6): 2475 amdgpu_device_ip_block_add(adev, &vcn_v4_0_5_ip_block); 2476 amdgpu_device_ip_block_add(adev, &jpeg_v4_0_5_ip_block); 2477 break; 2478 case IP_VERSION(5, 0, 0): 2479 amdgpu_device_ip_block_add(adev, &vcn_v5_0_0_ip_block); 2480 amdgpu_device_ip_block_add(adev, &jpeg_v5_0_0_ip_block); 2481 break; 2482 case IP_VERSION(5, 0, 1): 2483 amdgpu_device_ip_block_add(adev, &vcn_v5_0_1_ip_block); 2484 amdgpu_device_ip_block_add(adev, &jpeg_v5_0_1_ip_block); 2485 break; 2486 default: 2487 dev_err(adev->dev, 2488 "Failed to add vcn/jpeg ip block(UVD_HWIP:0x%x)\n", 2489 amdgpu_ip_version(adev, UVD_HWIP, 0)); 2490 return -EINVAL; 2491 } 2492 } 2493 return 0; 2494 } 2495 2496 static int amdgpu_discovery_set_mes_ip_blocks(struct amdgpu_device *adev) 2497 { 2498 switch (amdgpu_ip_version(adev, GC_HWIP, 0)) { 2499 case IP_VERSION(11, 0, 0): 2500 case IP_VERSION(11, 0, 1): 2501 case IP_VERSION(11, 0, 2): 2502 case IP_VERSION(11, 0, 3): 2503 case IP_VERSION(11, 0, 4): 2504 case IP_VERSION(11, 5, 0): 2505 case IP_VERSION(11, 5, 1): 2506 case IP_VERSION(11, 5, 2): 2507 case IP_VERSION(11, 5, 3): 2508 amdgpu_device_ip_block_add(adev, &mes_v11_0_ip_block); 2509 adev->enable_mes = true; 2510 adev->enable_mes_kiq = true; 2511 break; 2512 case IP_VERSION(12, 0, 0): 2513 case IP_VERSION(12, 0, 1): 2514 amdgpu_device_ip_block_add(adev, &mes_v12_0_ip_block); 2515 adev->enable_mes = true; 2516 adev->enable_mes_kiq = true; 2517 if (amdgpu_uni_mes) 2518 adev->enable_uni_mes = true; 2519 break; 2520 default: 2521 break; 2522 } 2523 return 0; 2524 } 2525 2526 static void amdgpu_discovery_init_soc_config(struct amdgpu_device *adev) 2527 { 2528 switch (amdgpu_ip_version(adev, GC_HWIP, 0)) { 2529 case IP_VERSION(9, 4, 3): 2530 case IP_VERSION(9, 4, 4): 2531 case IP_VERSION(9, 5, 0): 2532 aqua_vanjaram_init_soc_config(adev); 2533 break; 2534 default: 2535 break; 2536 } 2537 } 2538 2539 static int amdgpu_discovery_set_vpe_ip_blocks(struct amdgpu_device *adev) 2540 { 2541 switch (amdgpu_ip_version(adev, VPE_HWIP, 0)) { 2542 case IP_VERSION(6, 1, 0): 2543 case IP_VERSION(6, 1, 1): 2544 case IP_VERSION(6, 1, 3): 2545 amdgpu_device_ip_block_add(adev, &vpe_v6_1_ip_block); 2546 break; 2547 default: 2548 break; 2549 } 2550 2551 return 0; 2552 } 2553 2554 static int amdgpu_discovery_set_umsch_mm_ip_blocks(struct amdgpu_device *adev) 2555 { 2556 switch (amdgpu_ip_version(adev, VCN_HWIP, 0)) { 2557 case IP_VERSION(4, 0, 5): 2558 case IP_VERSION(4, 0, 6): 2559 if (amdgpu_umsch_mm & 0x1) { 2560 amdgpu_device_ip_block_add(adev, &umsch_mm_v4_0_ip_block); 2561 adev->enable_umsch_mm = true; 2562 } 2563 break; 2564 default: 2565 break; 2566 } 2567 2568 return 0; 2569 } 2570 2571 static int amdgpu_discovery_set_isp_ip_blocks(struct amdgpu_device *adev) 2572 { 2573 #if defined(CONFIG_DRM_AMD_ISP) 2574 switch (amdgpu_ip_version(adev, ISP_HWIP, 0)) { 2575 case IP_VERSION(4, 1, 0): 2576 amdgpu_device_ip_block_add(adev, &isp_v4_1_0_ip_block); 2577 break; 2578 case IP_VERSION(4, 1, 1): 2579 amdgpu_device_ip_block_add(adev, &isp_v4_1_1_ip_block); 2580 break; 2581 default: 2582 break; 2583 } 2584 #endif 2585 2586 return 0; 2587 } 2588 2589 int amdgpu_discovery_set_ip_blocks(struct amdgpu_device *adev) 2590 { 2591 int r; 2592 2593 switch (adev->asic_type) { 2594 case CHIP_VEGA10: 2595 /* This is not fatal. We only need the discovery 2596 * binary for sysfs. We don't need it for a 2597 * functional system. 2598 */ 2599 amdgpu_discovery_init(adev); 2600 vega10_reg_base_init(adev); 2601 adev->sdma.num_instances = 2; 2602 adev->sdma.sdma_mask = 3; 2603 adev->gmc.num_umc = 4; 2604 adev->gfx.xcc_mask = 1; 2605 adev->ip_versions[MMHUB_HWIP][0] = IP_VERSION(9, 0, 0); 2606 adev->ip_versions[ATHUB_HWIP][0] = IP_VERSION(9, 0, 0); 2607 adev->ip_versions[OSSSYS_HWIP][0] = IP_VERSION(4, 0, 0); 2608 adev->ip_versions[HDP_HWIP][0] = IP_VERSION(4, 0, 0); 2609 adev->ip_versions[SDMA0_HWIP][0] = IP_VERSION(4, 0, 0); 2610 adev->ip_versions[SDMA1_HWIP][0] = IP_VERSION(4, 0, 0); 2611 adev->ip_versions[DF_HWIP][0] = IP_VERSION(2, 1, 0); 2612 adev->ip_versions[NBIO_HWIP][0] = IP_VERSION(6, 1, 0); 2613 adev->ip_versions[UMC_HWIP][0] = IP_VERSION(6, 0, 0); 2614 adev->ip_versions[MP0_HWIP][0] = IP_VERSION(9, 0, 0); 2615 adev->ip_versions[MP1_HWIP][0] = IP_VERSION(9, 0, 0); 2616 adev->ip_versions[THM_HWIP][0] = IP_VERSION(9, 0, 0); 2617 adev->ip_versions[SMUIO_HWIP][0] = IP_VERSION(9, 0, 0); 2618 adev->ip_versions[GC_HWIP][0] = IP_VERSION(9, 0, 1); 2619 adev->ip_versions[UVD_HWIP][0] = IP_VERSION(7, 0, 0); 2620 adev->ip_versions[VCE_HWIP][0] = IP_VERSION(4, 0, 0); 2621 adev->ip_versions[DCI_HWIP][0] = IP_VERSION(12, 0, 0); 2622 break; 2623 case CHIP_VEGA12: 2624 /* This is not fatal. We only need the discovery 2625 * binary for sysfs. We don't need it for a 2626 * functional system. 2627 */ 2628 amdgpu_discovery_init(adev); 2629 vega10_reg_base_init(adev); 2630 adev->sdma.num_instances = 2; 2631 adev->sdma.sdma_mask = 3; 2632 adev->gmc.num_umc = 4; 2633 adev->gfx.xcc_mask = 1; 2634 adev->ip_versions[MMHUB_HWIP][0] = IP_VERSION(9, 3, 0); 2635 adev->ip_versions[ATHUB_HWIP][0] = IP_VERSION(9, 3, 0); 2636 adev->ip_versions[OSSSYS_HWIP][0] = IP_VERSION(4, 0, 1); 2637 adev->ip_versions[HDP_HWIP][0] = IP_VERSION(4, 0, 1); 2638 adev->ip_versions[SDMA0_HWIP][0] = IP_VERSION(4, 0, 1); 2639 adev->ip_versions[SDMA1_HWIP][0] = IP_VERSION(4, 0, 1); 2640 adev->ip_versions[DF_HWIP][0] = IP_VERSION(2, 5, 0); 2641 adev->ip_versions[NBIO_HWIP][0] = IP_VERSION(6, 2, 0); 2642 adev->ip_versions[UMC_HWIP][0] = IP_VERSION(6, 1, 0); 2643 adev->ip_versions[MP0_HWIP][0] = IP_VERSION(9, 0, 0); 2644 adev->ip_versions[MP1_HWIP][0] = IP_VERSION(9, 0, 0); 2645 adev->ip_versions[THM_HWIP][0] = IP_VERSION(9, 0, 0); 2646 adev->ip_versions[SMUIO_HWIP][0] = IP_VERSION(9, 0, 1); 2647 adev->ip_versions[GC_HWIP][0] = IP_VERSION(9, 2, 1); 2648 adev->ip_versions[UVD_HWIP][0] = IP_VERSION(7, 0, 0); 2649 adev->ip_versions[VCE_HWIP][0] = IP_VERSION(4, 0, 0); 2650 adev->ip_versions[DCI_HWIP][0] = IP_VERSION(12, 0, 1); 2651 break; 2652 case CHIP_RAVEN: 2653 /* This is not fatal. We only need the discovery 2654 * binary for sysfs. We don't need it for a 2655 * functional system. 2656 */ 2657 amdgpu_discovery_init(adev); 2658 vega10_reg_base_init(adev); 2659 adev->sdma.num_instances = 1; 2660 adev->sdma.sdma_mask = 1; 2661 adev->vcn.num_vcn_inst = 1; 2662 adev->gmc.num_umc = 2; 2663 adev->gfx.xcc_mask = 1; 2664 if (adev->apu_flags & AMD_APU_IS_RAVEN2) { 2665 adev->ip_versions[MMHUB_HWIP][0] = IP_VERSION(9, 2, 0); 2666 adev->ip_versions[ATHUB_HWIP][0] = IP_VERSION(9, 2, 0); 2667 adev->ip_versions[OSSSYS_HWIP][0] = IP_VERSION(4, 1, 1); 2668 adev->ip_versions[HDP_HWIP][0] = IP_VERSION(4, 1, 1); 2669 adev->ip_versions[SDMA0_HWIP][0] = IP_VERSION(4, 1, 1); 2670 adev->ip_versions[DF_HWIP][0] = IP_VERSION(2, 1, 1); 2671 adev->ip_versions[NBIO_HWIP][0] = IP_VERSION(7, 0, 1); 2672 adev->ip_versions[UMC_HWIP][0] = IP_VERSION(7, 5, 0); 2673 adev->ip_versions[MP0_HWIP][0] = IP_VERSION(10, 0, 1); 2674 adev->ip_versions[MP1_HWIP][0] = IP_VERSION(10, 0, 1); 2675 adev->ip_versions[THM_HWIP][0] = IP_VERSION(10, 1, 0); 2676 adev->ip_versions[SMUIO_HWIP][0] = IP_VERSION(10, 0, 1); 2677 adev->ip_versions[GC_HWIP][0] = IP_VERSION(9, 2, 2); 2678 adev->ip_versions[UVD_HWIP][0] = IP_VERSION(1, 0, 1); 2679 adev->ip_versions[DCE_HWIP][0] = IP_VERSION(1, 0, 1); 2680 adev->ip_versions[ISP_HWIP][0] = IP_VERSION(2, 0, 0); 2681 } else { 2682 adev->ip_versions[MMHUB_HWIP][0] = IP_VERSION(9, 1, 0); 2683 adev->ip_versions[ATHUB_HWIP][0] = IP_VERSION(9, 1, 0); 2684 adev->ip_versions[OSSSYS_HWIP][0] = IP_VERSION(4, 1, 0); 2685 adev->ip_versions[HDP_HWIP][0] = IP_VERSION(4, 1, 0); 2686 adev->ip_versions[SDMA0_HWIP][0] = IP_VERSION(4, 1, 0); 2687 adev->ip_versions[DF_HWIP][0] = IP_VERSION(2, 1, 0); 2688 adev->ip_versions[NBIO_HWIP][0] = IP_VERSION(7, 0, 0); 2689 adev->ip_versions[UMC_HWIP][0] = IP_VERSION(7, 0, 0); 2690 adev->ip_versions[MP0_HWIP][0] = IP_VERSION(10, 0, 0); 2691 adev->ip_versions[MP1_HWIP][0] = IP_VERSION(10, 0, 0); 2692 adev->ip_versions[THM_HWIP][0] = IP_VERSION(10, 0, 0); 2693 adev->ip_versions[SMUIO_HWIP][0] = IP_VERSION(10, 0, 0); 2694 adev->ip_versions[GC_HWIP][0] = IP_VERSION(9, 1, 0); 2695 adev->ip_versions[UVD_HWIP][0] = IP_VERSION(1, 0, 0); 2696 adev->ip_versions[DCE_HWIP][0] = IP_VERSION(1, 0, 0); 2697 adev->ip_versions[ISP_HWIP][0] = IP_VERSION(2, 0, 0); 2698 } 2699 break; 2700 case CHIP_VEGA20: 2701 /* This is not fatal. We only need the discovery 2702 * binary for sysfs. We don't need it for a 2703 * functional system. 2704 */ 2705 amdgpu_discovery_init(adev); 2706 vega20_reg_base_init(adev); 2707 adev->sdma.num_instances = 2; 2708 adev->sdma.sdma_mask = 3; 2709 adev->gmc.num_umc = 8; 2710 adev->gfx.xcc_mask = 1; 2711 adev->ip_versions[MMHUB_HWIP][0] = IP_VERSION(9, 4, 0); 2712 adev->ip_versions[ATHUB_HWIP][0] = IP_VERSION(9, 4, 0); 2713 adev->ip_versions[OSSSYS_HWIP][0] = IP_VERSION(4, 2, 0); 2714 adev->ip_versions[HDP_HWIP][0] = IP_VERSION(4, 2, 0); 2715 adev->ip_versions[SDMA0_HWIP][0] = IP_VERSION(4, 2, 0); 2716 adev->ip_versions[SDMA1_HWIP][0] = IP_VERSION(4, 2, 0); 2717 adev->ip_versions[DF_HWIP][0] = IP_VERSION(3, 6, 0); 2718 adev->ip_versions[NBIO_HWIP][0] = IP_VERSION(7, 4, 0); 2719 adev->ip_versions[UMC_HWIP][0] = IP_VERSION(6, 1, 1); 2720 adev->ip_versions[MP0_HWIP][0] = IP_VERSION(11, 0, 2); 2721 adev->ip_versions[MP1_HWIP][0] = IP_VERSION(11, 0, 2); 2722 adev->ip_versions[THM_HWIP][0] = IP_VERSION(11, 0, 2); 2723 adev->ip_versions[SMUIO_HWIP][0] = IP_VERSION(11, 0, 2); 2724 adev->ip_versions[GC_HWIP][0] = IP_VERSION(9, 4, 0); 2725 adev->ip_versions[UVD_HWIP][0] = IP_VERSION(7, 2, 0); 2726 adev->ip_versions[UVD_HWIP][1] = IP_VERSION(7, 2, 0); 2727 adev->ip_versions[VCE_HWIP][0] = IP_VERSION(4, 1, 0); 2728 adev->ip_versions[DCI_HWIP][0] = IP_VERSION(12, 1, 0); 2729 break; 2730 case CHIP_ARCTURUS: 2731 /* This is not fatal. We only need the discovery 2732 * binary for sysfs. We don't need it for a 2733 * functional system. 2734 */ 2735 amdgpu_discovery_init(adev); 2736 arct_reg_base_init(adev); 2737 adev->sdma.num_instances = 8; 2738 adev->sdma.sdma_mask = 0xff; 2739 adev->vcn.num_vcn_inst = 2; 2740 adev->gmc.num_umc = 8; 2741 adev->gfx.xcc_mask = 1; 2742 adev->ip_versions[MMHUB_HWIP][0] = IP_VERSION(9, 4, 1); 2743 adev->ip_versions[ATHUB_HWIP][0] = IP_VERSION(9, 4, 1); 2744 adev->ip_versions[OSSSYS_HWIP][0] = IP_VERSION(4, 2, 1); 2745 adev->ip_versions[HDP_HWIP][0] = IP_VERSION(4, 2, 1); 2746 adev->ip_versions[SDMA0_HWIP][0] = IP_VERSION(4, 2, 2); 2747 adev->ip_versions[SDMA1_HWIP][0] = IP_VERSION(4, 2, 2); 2748 adev->ip_versions[SDMA1_HWIP][1] = IP_VERSION(4, 2, 2); 2749 adev->ip_versions[SDMA1_HWIP][2] = IP_VERSION(4, 2, 2); 2750 adev->ip_versions[SDMA1_HWIP][3] = IP_VERSION(4, 2, 2); 2751 adev->ip_versions[SDMA1_HWIP][4] = IP_VERSION(4, 2, 2); 2752 adev->ip_versions[SDMA1_HWIP][5] = IP_VERSION(4, 2, 2); 2753 adev->ip_versions[SDMA1_HWIP][6] = IP_VERSION(4, 2, 2); 2754 adev->ip_versions[DF_HWIP][0] = IP_VERSION(3, 6, 1); 2755 adev->ip_versions[NBIO_HWIP][0] = IP_VERSION(7, 4, 1); 2756 adev->ip_versions[UMC_HWIP][0] = IP_VERSION(6, 1, 2); 2757 adev->ip_versions[MP0_HWIP][0] = IP_VERSION(11, 0, 4); 2758 adev->ip_versions[MP1_HWIP][0] = IP_VERSION(11, 0, 2); 2759 adev->ip_versions[THM_HWIP][0] = IP_VERSION(11, 0, 3); 2760 adev->ip_versions[SMUIO_HWIP][0] = IP_VERSION(11, 0, 3); 2761 adev->ip_versions[GC_HWIP][0] = IP_VERSION(9, 4, 1); 2762 adev->ip_versions[UVD_HWIP][0] = IP_VERSION(2, 5, 0); 2763 adev->ip_versions[UVD_HWIP][1] = IP_VERSION(2, 5, 0); 2764 break; 2765 case CHIP_ALDEBARAN: 2766 /* This is not fatal. We only need the discovery 2767 * binary for sysfs. We don't need it for a 2768 * functional system. 2769 */ 2770 amdgpu_discovery_init(adev); 2771 aldebaran_reg_base_init(adev); 2772 adev->sdma.num_instances = 5; 2773 adev->sdma.sdma_mask = 0x1f; 2774 adev->vcn.num_vcn_inst = 2; 2775 adev->gmc.num_umc = 4; 2776 adev->gfx.xcc_mask = 1; 2777 adev->ip_versions[MMHUB_HWIP][0] = IP_VERSION(9, 4, 2); 2778 adev->ip_versions[ATHUB_HWIP][0] = IP_VERSION(9, 4, 2); 2779 adev->ip_versions[OSSSYS_HWIP][0] = IP_VERSION(4, 4, 0); 2780 adev->ip_versions[HDP_HWIP][0] = IP_VERSION(4, 4, 0); 2781 adev->ip_versions[SDMA0_HWIP][0] = IP_VERSION(4, 4, 0); 2782 adev->ip_versions[SDMA0_HWIP][1] = IP_VERSION(4, 4, 0); 2783 adev->ip_versions[SDMA0_HWIP][2] = IP_VERSION(4, 4, 0); 2784 adev->ip_versions[SDMA0_HWIP][3] = IP_VERSION(4, 4, 0); 2785 adev->ip_versions[SDMA0_HWIP][4] = IP_VERSION(4, 4, 0); 2786 adev->ip_versions[DF_HWIP][0] = IP_VERSION(3, 6, 2); 2787 adev->ip_versions[NBIO_HWIP][0] = IP_VERSION(7, 4, 4); 2788 adev->ip_versions[UMC_HWIP][0] = IP_VERSION(6, 7, 0); 2789 adev->ip_versions[MP0_HWIP][0] = IP_VERSION(13, 0, 2); 2790 adev->ip_versions[MP1_HWIP][0] = IP_VERSION(13, 0, 2); 2791 adev->ip_versions[THM_HWIP][0] = IP_VERSION(13, 0, 2); 2792 adev->ip_versions[SMUIO_HWIP][0] = IP_VERSION(13, 0, 2); 2793 adev->ip_versions[GC_HWIP][0] = IP_VERSION(9, 4, 2); 2794 adev->ip_versions[UVD_HWIP][0] = IP_VERSION(2, 6, 0); 2795 adev->ip_versions[UVD_HWIP][1] = IP_VERSION(2, 6, 0); 2796 adev->ip_versions[XGMI_HWIP][0] = IP_VERSION(6, 1, 0); 2797 break; 2798 case CHIP_CYAN_SKILLFISH: 2799 if (adev->apu_flags & AMD_APU_IS_CYAN_SKILLFISH2) { 2800 r = amdgpu_discovery_reg_base_init(adev); 2801 if (r) 2802 return -EINVAL; 2803 2804 amdgpu_discovery_harvest_ip(adev); 2805 amdgpu_discovery_get_gfx_info(adev); 2806 amdgpu_discovery_get_mall_info(adev); 2807 amdgpu_discovery_get_vcn_info(adev); 2808 } else { 2809 cyan_skillfish_reg_base_init(adev); 2810 adev->sdma.num_instances = 2; 2811 adev->sdma.sdma_mask = 3; 2812 adev->gfx.xcc_mask = 1; 2813 adev->ip_versions[MMHUB_HWIP][0] = IP_VERSION(2, 0, 3); 2814 adev->ip_versions[ATHUB_HWIP][0] = IP_VERSION(2, 0, 3); 2815 adev->ip_versions[OSSSYS_HWIP][0] = IP_VERSION(5, 0, 1); 2816 adev->ip_versions[HDP_HWIP][0] = IP_VERSION(5, 0, 1); 2817 adev->ip_versions[SDMA0_HWIP][0] = IP_VERSION(5, 0, 1); 2818 adev->ip_versions[SDMA1_HWIP][1] = IP_VERSION(5, 0, 1); 2819 adev->ip_versions[DF_HWIP][0] = IP_VERSION(3, 5, 0); 2820 adev->ip_versions[NBIO_HWIP][0] = IP_VERSION(2, 1, 1); 2821 adev->ip_versions[UMC_HWIP][0] = IP_VERSION(8, 1, 1); 2822 adev->ip_versions[MP0_HWIP][0] = IP_VERSION(11, 0, 8); 2823 adev->ip_versions[MP1_HWIP][0] = IP_VERSION(11, 0, 8); 2824 adev->ip_versions[THM_HWIP][0] = IP_VERSION(11, 0, 1); 2825 adev->ip_versions[SMUIO_HWIP][0] = IP_VERSION(11, 0, 8); 2826 adev->ip_versions[GC_HWIP][0] = IP_VERSION(10, 1, 3); 2827 adev->ip_versions[UVD_HWIP][0] = IP_VERSION(2, 0, 3); 2828 } 2829 break; 2830 default: 2831 r = amdgpu_discovery_reg_base_init(adev); 2832 if (r) { 2833 drm_err(&adev->ddev, "discovery failed: %d\n", r); 2834 return r; 2835 } 2836 2837 amdgpu_discovery_harvest_ip(adev); 2838 amdgpu_discovery_get_gfx_info(adev); 2839 amdgpu_discovery_get_mall_info(adev); 2840 amdgpu_discovery_get_vcn_info(adev); 2841 break; 2842 } 2843 2844 amdgpu_discovery_init_soc_config(adev); 2845 amdgpu_discovery_sysfs_init(adev); 2846 2847 switch (amdgpu_ip_version(adev, GC_HWIP, 0)) { 2848 case IP_VERSION(9, 0, 1): 2849 case IP_VERSION(9, 2, 1): 2850 case IP_VERSION(9, 4, 0): 2851 case IP_VERSION(9, 4, 1): 2852 case IP_VERSION(9, 4, 2): 2853 case IP_VERSION(9, 4, 3): 2854 case IP_VERSION(9, 4, 4): 2855 case IP_VERSION(9, 5, 0): 2856 adev->family = AMDGPU_FAMILY_AI; 2857 break; 2858 case IP_VERSION(9, 1, 0): 2859 case IP_VERSION(9, 2, 2): 2860 case IP_VERSION(9, 3, 0): 2861 adev->family = AMDGPU_FAMILY_RV; 2862 break; 2863 case IP_VERSION(10, 1, 10): 2864 case IP_VERSION(10, 1, 1): 2865 case IP_VERSION(10, 1, 2): 2866 case IP_VERSION(10, 1, 3): 2867 case IP_VERSION(10, 1, 4): 2868 case IP_VERSION(10, 3, 0): 2869 case IP_VERSION(10, 3, 2): 2870 case IP_VERSION(10, 3, 4): 2871 case IP_VERSION(10, 3, 5): 2872 adev->family = AMDGPU_FAMILY_NV; 2873 break; 2874 case IP_VERSION(10, 3, 1): 2875 adev->family = AMDGPU_FAMILY_VGH; 2876 adev->apu_flags |= AMD_APU_IS_VANGOGH; 2877 break; 2878 case IP_VERSION(10, 3, 3): 2879 adev->family = AMDGPU_FAMILY_YC; 2880 break; 2881 case IP_VERSION(10, 3, 6): 2882 adev->family = AMDGPU_FAMILY_GC_10_3_6; 2883 break; 2884 case IP_VERSION(10, 3, 7): 2885 adev->family = AMDGPU_FAMILY_GC_10_3_7; 2886 break; 2887 case IP_VERSION(11, 0, 0): 2888 case IP_VERSION(11, 0, 2): 2889 case IP_VERSION(11, 0, 3): 2890 adev->family = AMDGPU_FAMILY_GC_11_0_0; 2891 break; 2892 case IP_VERSION(11, 0, 1): 2893 case IP_VERSION(11, 0, 4): 2894 adev->family = AMDGPU_FAMILY_GC_11_0_1; 2895 break; 2896 case IP_VERSION(11, 5, 0): 2897 case IP_VERSION(11, 5, 1): 2898 case IP_VERSION(11, 5, 2): 2899 case IP_VERSION(11, 5, 3): 2900 adev->family = AMDGPU_FAMILY_GC_11_5_0; 2901 break; 2902 case IP_VERSION(12, 0, 0): 2903 case IP_VERSION(12, 0, 1): 2904 adev->family = AMDGPU_FAMILY_GC_12_0_0; 2905 break; 2906 default: 2907 return -EINVAL; 2908 } 2909 2910 switch (amdgpu_ip_version(adev, GC_HWIP, 0)) { 2911 case IP_VERSION(9, 1, 0): 2912 case IP_VERSION(9, 2, 2): 2913 case IP_VERSION(9, 3, 0): 2914 case IP_VERSION(10, 1, 3): 2915 case IP_VERSION(10, 1, 4): 2916 case IP_VERSION(10, 3, 1): 2917 case IP_VERSION(10, 3, 3): 2918 case IP_VERSION(10, 3, 6): 2919 case IP_VERSION(10, 3, 7): 2920 case IP_VERSION(11, 0, 1): 2921 case IP_VERSION(11, 0, 4): 2922 case IP_VERSION(11, 5, 0): 2923 case IP_VERSION(11, 5, 1): 2924 case IP_VERSION(11, 5, 2): 2925 case IP_VERSION(11, 5, 3): 2926 adev->flags |= AMD_IS_APU; 2927 break; 2928 default: 2929 break; 2930 } 2931 2932 /* set NBIO version */ 2933 switch (amdgpu_ip_version(adev, NBIO_HWIP, 0)) { 2934 case IP_VERSION(6, 1, 0): 2935 case IP_VERSION(6, 2, 0): 2936 adev->nbio.funcs = &nbio_v6_1_funcs; 2937 adev->nbio.hdp_flush_reg = &nbio_v6_1_hdp_flush_reg; 2938 break; 2939 case IP_VERSION(7, 0, 0): 2940 case IP_VERSION(7, 0, 1): 2941 case IP_VERSION(2, 5, 0): 2942 adev->nbio.funcs = &nbio_v7_0_funcs; 2943 adev->nbio.hdp_flush_reg = &nbio_v7_0_hdp_flush_reg; 2944 break; 2945 case IP_VERSION(7, 4, 0): 2946 case IP_VERSION(7, 4, 1): 2947 case IP_VERSION(7, 4, 4): 2948 adev->nbio.funcs = &nbio_v7_4_funcs; 2949 adev->nbio.hdp_flush_reg = &nbio_v7_4_hdp_flush_reg; 2950 break; 2951 case IP_VERSION(7, 9, 0): 2952 case IP_VERSION(7, 9, 1): 2953 adev->nbio.funcs = &nbio_v7_9_funcs; 2954 adev->nbio.hdp_flush_reg = &nbio_v7_9_hdp_flush_reg; 2955 break; 2956 case IP_VERSION(7, 11, 0): 2957 case IP_VERSION(7, 11, 1): 2958 case IP_VERSION(7, 11, 2): 2959 case IP_VERSION(7, 11, 3): 2960 adev->nbio.funcs = &nbio_v7_11_funcs; 2961 adev->nbio.hdp_flush_reg = &nbio_v7_11_hdp_flush_reg; 2962 break; 2963 case IP_VERSION(7, 2, 0): 2964 case IP_VERSION(7, 2, 1): 2965 case IP_VERSION(7, 3, 0): 2966 case IP_VERSION(7, 5, 0): 2967 case IP_VERSION(7, 5, 1): 2968 adev->nbio.funcs = &nbio_v7_2_funcs; 2969 adev->nbio.hdp_flush_reg = &nbio_v7_2_hdp_flush_reg; 2970 break; 2971 case IP_VERSION(2, 1, 1): 2972 case IP_VERSION(2, 3, 0): 2973 case IP_VERSION(2, 3, 1): 2974 case IP_VERSION(2, 3, 2): 2975 case IP_VERSION(3, 3, 0): 2976 case IP_VERSION(3, 3, 1): 2977 case IP_VERSION(3, 3, 2): 2978 case IP_VERSION(3, 3, 3): 2979 adev->nbio.funcs = &nbio_v2_3_funcs; 2980 adev->nbio.hdp_flush_reg = &nbio_v2_3_hdp_flush_reg; 2981 break; 2982 case IP_VERSION(4, 3, 0): 2983 case IP_VERSION(4, 3, 1): 2984 if (amdgpu_sriov_vf(adev)) 2985 adev->nbio.funcs = &nbio_v4_3_sriov_funcs; 2986 else 2987 adev->nbio.funcs = &nbio_v4_3_funcs; 2988 adev->nbio.hdp_flush_reg = &nbio_v4_3_hdp_flush_reg; 2989 break; 2990 case IP_VERSION(7, 7, 0): 2991 case IP_VERSION(7, 7, 1): 2992 adev->nbio.funcs = &nbio_v7_7_funcs; 2993 adev->nbio.hdp_flush_reg = &nbio_v7_7_hdp_flush_reg; 2994 break; 2995 case IP_VERSION(6, 3, 1): 2996 adev->nbio.funcs = &nbif_v6_3_1_funcs; 2997 adev->nbio.hdp_flush_reg = &nbif_v6_3_1_hdp_flush_reg; 2998 break; 2999 default: 3000 break; 3001 } 3002 3003 switch (amdgpu_ip_version(adev, HDP_HWIP, 0)) { 3004 case IP_VERSION(4, 0, 0): 3005 case IP_VERSION(4, 0, 1): 3006 case IP_VERSION(4, 1, 0): 3007 case IP_VERSION(4, 1, 1): 3008 case IP_VERSION(4, 1, 2): 3009 case IP_VERSION(4, 2, 0): 3010 case IP_VERSION(4, 2, 1): 3011 case IP_VERSION(4, 4, 0): 3012 case IP_VERSION(4, 4, 2): 3013 case IP_VERSION(4, 4, 5): 3014 adev->hdp.funcs = &hdp_v4_0_funcs; 3015 break; 3016 case IP_VERSION(5, 0, 0): 3017 case IP_VERSION(5, 0, 1): 3018 case IP_VERSION(5, 0, 2): 3019 case IP_VERSION(5, 0, 3): 3020 case IP_VERSION(5, 0, 4): 3021 case IP_VERSION(5, 2, 0): 3022 adev->hdp.funcs = &hdp_v5_0_funcs; 3023 break; 3024 case IP_VERSION(5, 2, 1): 3025 adev->hdp.funcs = &hdp_v5_2_funcs; 3026 break; 3027 case IP_VERSION(6, 0, 0): 3028 case IP_VERSION(6, 0, 1): 3029 case IP_VERSION(6, 1, 0): 3030 adev->hdp.funcs = &hdp_v6_0_funcs; 3031 break; 3032 case IP_VERSION(7, 0, 0): 3033 adev->hdp.funcs = &hdp_v7_0_funcs; 3034 break; 3035 default: 3036 break; 3037 } 3038 3039 switch (amdgpu_ip_version(adev, DF_HWIP, 0)) { 3040 case IP_VERSION(3, 6, 0): 3041 case IP_VERSION(3, 6, 1): 3042 case IP_VERSION(3, 6, 2): 3043 adev->df.funcs = &df_v3_6_funcs; 3044 break; 3045 case IP_VERSION(2, 1, 0): 3046 case IP_VERSION(2, 1, 1): 3047 case IP_VERSION(2, 5, 0): 3048 case IP_VERSION(3, 5, 1): 3049 case IP_VERSION(3, 5, 2): 3050 adev->df.funcs = &df_v1_7_funcs; 3051 break; 3052 case IP_VERSION(4, 3, 0): 3053 adev->df.funcs = &df_v4_3_funcs; 3054 break; 3055 case IP_VERSION(4, 6, 2): 3056 adev->df.funcs = &df_v4_6_2_funcs; 3057 break; 3058 case IP_VERSION(4, 15, 0): 3059 case IP_VERSION(4, 15, 1): 3060 adev->df.funcs = &df_v4_15_funcs; 3061 break; 3062 default: 3063 break; 3064 } 3065 3066 switch (amdgpu_ip_version(adev, SMUIO_HWIP, 0)) { 3067 case IP_VERSION(9, 0, 0): 3068 case IP_VERSION(9, 0, 1): 3069 case IP_VERSION(10, 0, 0): 3070 case IP_VERSION(10, 0, 1): 3071 case IP_VERSION(10, 0, 2): 3072 adev->smuio.funcs = &smuio_v9_0_funcs; 3073 break; 3074 case IP_VERSION(11, 0, 0): 3075 case IP_VERSION(11, 0, 2): 3076 case IP_VERSION(11, 0, 3): 3077 case IP_VERSION(11, 0, 4): 3078 case IP_VERSION(11, 0, 7): 3079 case IP_VERSION(11, 0, 8): 3080 adev->smuio.funcs = &smuio_v11_0_funcs; 3081 break; 3082 case IP_VERSION(11, 0, 6): 3083 case IP_VERSION(11, 0, 10): 3084 case IP_VERSION(11, 0, 11): 3085 case IP_VERSION(11, 5, 0): 3086 case IP_VERSION(11, 5, 2): 3087 case IP_VERSION(13, 0, 1): 3088 case IP_VERSION(13, 0, 9): 3089 case IP_VERSION(13, 0, 10): 3090 adev->smuio.funcs = &smuio_v11_0_6_funcs; 3091 break; 3092 case IP_VERSION(13, 0, 2): 3093 adev->smuio.funcs = &smuio_v13_0_funcs; 3094 break; 3095 case IP_VERSION(13, 0, 3): 3096 case IP_VERSION(13, 0, 11): 3097 adev->smuio.funcs = &smuio_v13_0_3_funcs; 3098 if (adev->smuio.funcs->get_pkg_type(adev) == AMDGPU_PKG_TYPE_APU) { 3099 adev->flags |= AMD_IS_APU; 3100 } 3101 break; 3102 case IP_VERSION(13, 0, 6): 3103 case IP_VERSION(13, 0, 8): 3104 case IP_VERSION(14, 0, 0): 3105 case IP_VERSION(14, 0, 1): 3106 adev->smuio.funcs = &smuio_v13_0_6_funcs; 3107 break; 3108 case IP_VERSION(14, 0, 2): 3109 adev->smuio.funcs = &smuio_v14_0_2_funcs; 3110 break; 3111 default: 3112 break; 3113 } 3114 3115 switch (amdgpu_ip_version(adev, LSDMA_HWIP, 0)) { 3116 case IP_VERSION(6, 0, 0): 3117 case IP_VERSION(6, 0, 1): 3118 case IP_VERSION(6, 0, 2): 3119 case IP_VERSION(6, 0, 3): 3120 adev->lsdma.funcs = &lsdma_v6_0_funcs; 3121 break; 3122 case IP_VERSION(7, 0, 0): 3123 case IP_VERSION(7, 0, 1): 3124 adev->lsdma.funcs = &lsdma_v7_0_funcs; 3125 break; 3126 default: 3127 break; 3128 } 3129 3130 r = amdgpu_discovery_set_common_ip_blocks(adev); 3131 if (r) 3132 return r; 3133 3134 r = amdgpu_discovery_set_gmc_ip_blocks(adev); 3135 if (r) 3136 return r; 3137 3138 /* For SR-IOV, PSP needs to be initialized before IH */ 3139 if (amdgpu_sriov_vf(adev)) { 3140 r = amdgpu_discovery_set_psp_ip_blocks(adev); 3141 if (r) 3142 return r; 3143 r = amdgpu_discovery_set_ih_ip_blocks(adev); 3144 if (r) 3145 return r; 3146 } else { 3147 r = amdgpu_discovery_set_ih_ip_blocks(adev); 3148 if (r) 3149 return r; 3150 3151 if (likely(adev->firmware.load_type == AMDGPU_FW_LOAD_PSP)) { 3152 r = amdgpu_discovery_set_psp_ip_blocks(adev); 3153 if (r) 3154 return r; 3155 } 3156 } 3157 3158 if (likely(adev->firmware.load_type == AMDGPU_FW_LOAD_PSP)) { 3159 r = amdgpu_discovery_set_smu_ip_blocks(adev); 3160 if (r) 3161 return r; 3162 } 3163 3164 r = amdgpu_discovery_set_display_ip_blocks(adev); 3165 if (r) 3166 return r; 3167 3168 r = amdgpu_discovery_set_gc_ip_blocks(adev); 3169 if (r) 3170 return r; 3171 3172 r = amdgpu_discovery_set_sdma_ip_blocks(adev); 3173 if (r) 3174 return r; 3175 3176 if ((adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT && 3177 !amdgpu_sriov_vf(adev)) || 3178 (adev->firmware.load_type == AMDGPU_FW_LOAD_RLC_BACKDOOR_AUTO && amdgpu_dpm == 1)) { 3179 r = amdgpu_discovery_set_smu_ip_blocks(adev); 3180 if (r) 3181 return r; 3182 } 3183 3184 r = amdgpu_discovery_set_mm_ip_blocks(adev); 3185 if (r) 3186 return r; 3187 3188 r = amdgpu_discovery_set_mes_ip_blocks(adev); 3189 if (r) 3190 return r; 3191 3192 r = amdgpu_discovery_set_vpe_ip_blocks(adev); 3193 if (r) 3194 return r; 3195 3196 r = amdgpu_discovery_set_umsch_mm_ip_blocks(adev); 3197 if (r) 3198 return r; 3199 3200 r = amdgpu_discovery_set_isp_ip_blocks(adev); 3201 if (r) 3202 return r; 3203 return 0; 3204 } 3205 3206