1 /* 2 * Copyright 2018-2024 Advanced Micro Devices, Inc. All rights reserved. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 */ 23 24 #include <linux/firmware.h> 25 26 #include "amdgpu.h" 27 #include "amdgpu_discovery.h" 28 #include "soc15_hw_ip.h" 29 #include "discovery.h" 30 #include "amdgpu_ras.h" 31 32 #include "soc15.h" 33 #include "gfx_v9_0.h" 34 #include "gfx_v9_4_3.h" 35 #include "gmc_v9_0.h" 36 #include "df_v1_7.h" 37 #include "df_v3_6.h" 38 #include "df_v4_3.h" 39 #include "df_v4_6_2.h" 40 #include "df_v4_15.h" 41 #include "nbio_v6_1.h" 42 #include "nbio_v7_0.h" 43 #include "nbio_v7_4.h" 44 #include "nbio_v7_9.h" 45 #include "nbio_v7_11.h" 46 #include "hdp_v4_0.h" 47 #include "vega10_ih.h" 48 #include "vega20_ih.h" 49 #include "sdma_v4_0.h" 50 #include "sdma_v4_4_2.h" 51 #include "uvd_v7_0.h" 52 #include "vce_v4_0.h" 53 #include "vcn_v1_0.h" 54 #include "vcn_v2_5.h" 55 #include "jpeg_v2_5.h" 56 #include "smuio_v9_0.h" 57 #include "gmc_v10_0.h" 58 #include "gmc_v11_0.h" 59 #include "gmc_v12_0.h" 60 #include "gfxhub_v2_0.h" 61 #include "mmhub_v2_0.h" 62 #include "nbio_v2_3.h" 63 #include "nbio_v4_3.h" 64 #include "nbio_v7_2.h" 65 #include "nbio_v7_7.h" 66 #include "nbif_v6_3_1.h" 67 #include "hdp_v5_0.h" 68 #include "hdp_v5_2.h" 69 #include "hdp_v6_0.h" 70 #include "hdp_v7_0.h" 71 #include "nv.h" 72 #include "soc21.h" 73 #include "soc24.h" 74 #include "navi10_ih.h" 75 #include "ih_v6_0.h" 76 #include "ih_v6_1.h" 77 #include "ih_v7_0.h" 78 #include "gfx_v10_0.h" 79 #include "gfx_v11_0.h" 80 #include "gfx_v12_0.h" 81 #include "sdma_v5_0.h" 82 #include "sdma_v5_2.h" 83 #include "sdma_v6_0.h" 84 #include "sdma_v7_0.h" 85 #include "lsdma_v6_0.h" 86 #include "lsdma_v7_0.h" 87 #include "vcn_v2_0.h" 88 #include "jpeg_v2_0.h" 89 #include "vcn_v3_0.h" 90 #include "jpeg_v3_0.h" 91 #include "vcn_v4_0.h" 92 #include "jpeg_v4_0.h" 93 #include "vcn_v4_0_3.h" 94 #include "jpeg_v4_0_3.h" 95 #include "vcn_v4_0_5.h" 96 #include "jpeg_v4_0_5.h" 97 #include "amdgpu_vkms.h" 98 #include "mes_v11_0.h" 99 #include "mes_v12_0.h" 100 #include "smuio_v11_0.h" 101 #include "smuio_v11_0_6.h" 102 #include "smuio_v13_0.h" 103 #include "smuio_v13_0_3.h" 104 #include "smuio_v13_0_6.h" 105 #include "smuio_v14_0_2.h" 106 #include "vcn_v5_0_0.h" 107 #include "vcn_v5_0_1.h" 108 #include "jpeg_v5_0_0.h" 109 #include "jpeg_v5_0_1.h" 110 #include "amdgpu_ras_mgr.h" 111 112 #include "amdgpu_vpe.h" 113 #if defined(CONFIG_DRM_AMD_ISP) 114 #include "amdgpu_isp.h" 115 #endif 116 117 MODULE_FIRMWARE("amdgpu/ip_discovery.bin"); 118 MODULE_FIRMWARE("amdgpu/vega10_ip_discovery.bin"); 119 MODULE_FIRMWARE("amdgpu/vega12_ip_discovery.bin"); 120 MODULE_FIRMWARE("amdgpu/vega20_ip_discovery.bin"); 121 MODULE_FIRMWARE("amdgpu/raven_ip_discovery.bin"); 122 MODULE_FIRMWARE("amdgpu/raven2_ip_discovery.bin"); 123 MODULE_FIRMWARE("amdgpu/picasso_ip_discovery.bin"); 124 MODULE_FIRMWARE("amdgpu/arcturus_ip_discovery.bin"); 125 MODULE_FIRMWARE("amdgpu/aldebaran_ip_discovery.bin"); 126 127 #define mmIP_DISCOVERY_VERSION 0x16A00 128 #define mmRCC_CONFIG_MEMSIZE 0xde3 129 #define mmMP0_SMN_C2PMSG_33 0x16061 130 #define mmMM_INDEX 0x0 131 #define mmMM_INDEX_HI 0x6 132 #define mmMM_DATA 0x1 133 134 static const char *hw_id_names[HW_ID_MAX] = { 135 [MP1_HWID] = "MP1", 136 [MP2_HWID] = "MP2", 137 [THM_HWID] = "THM", 138 [SMUIO_HWID] = "SMUIO", 139 [FUSE_HWID] = "FUSE", 140 [CLKA_HWID] = "CLKA", 141 [PWR_HWID] = "PWR", 142 [GC_HWID] = "GC", 143 [UVD_HWID] = "UVD", 144 [AUDIO_AZ_HWID] = "AUDIO_AZ", 145 [ACP_HWID] = "ACP", 146 [DCI_HWID] = "DCI", 147 [DMU_HWID] = "DMU", 148 [DCO_HWID] = "DCO", 149 [DIO_HWID] = "DIO", 150 [XDMA_HWID] = "XDMA", 151 [DCEAZ_HWID] = "DCEAZ", 152 [DAZ_HWID] = "DAZ", 153 [SDPMUX_HWID] = "SDPMUX", 154 [NTB_HWID] = "NTB", 155 [IOHC_HWID] = "IOHC", 156 [L2IMU_HWID] = "L2IMU", 157 [VCE_HWID] = "VCE", 158 [MMHUB_HWID] = "MMHUB", 159 [ATHUB_HWID] = "ATHUB", 160 [DBGU_NBIO_HWID] = "DBGU_NBIO", 161 [DFX_HWID] = "DFX", 162 [DBGU0_HWID] = "DBGU0", 163 [DBGU1_HWID] = "DBGU1", 164 [OSSSYS_HWID] = "OSSSYS", 165 [HDP_HWID] = "HDP", 166 [SDMA0_HWID] = "SDMA0", 167 [SDMA1_HWID] = "SDMA1", 168 [SDMA2_HWID] = "SDMA2", 169 [SDMA3_HWID] = "SDMA3", 170 [LSDMA_HWID] = "LSDMA", 171 [ISP_HWID] = "ISP", 172 [DBGU_IO_HWID] = "DBGU_IO", 173 [DF_HWID] = "DF", 174 [CLKB_HWID] = "CLKB", 175 [FCH_HWID] = "FCH", 176 [DFX_DAP_HWID] = "DFX_DAP", 177 [L1IMU_PCIE_HWID] = "L1IMU_PCIE", 178 [L1IMU_NBIF_HWID] = "L1IMU_NBIF", 179 [L1IMU_IOAGR_HWID] = "L1IMU_IOAGR", 180 [L1IMU3_HWID] = "L1IMU3", 181 [L1IMU4_HWID] = "L1IMU4", 182 [L1IMU5_HWID] = "L1IMU5", 183 [L1IMU6_HWID] = "L1IMU6", 184 [L1IMU7_HWID] = "L1IMU7", 185 [L1IMU8_HWID] = "L1IMU8", 186 [L1IMU9_HWID] = "L1IMU9", 187 [L1IMU10_HWID] = "L1IMU10", 188 [L1IMU11_HWID] = "L1IMU11", 189 [L1IMU12_HWID] = "L1IMU12", 190 [L1IMU13_HWID] = "L1IMU13", 191 [L1IMU14_HWID] = "L1IMU14", 192 [L1IMU15_HWID] = "L1IMU15", 193 [WAFLC_HWID] = "WAFLC", 194 [FCH_USB_PD_HWID] = "FCH_USB_PD", 195 [PCIE_HWID] = "PCIE", 196 [PCS_HWID] = "PCS", 197 [DDCL_HWID] = "DDCL", 198 [SST_HWID] = "SST", 199 [IOAGR_HWID] = "IOAGR", 200 [NBIF_HWID] = "NBIF", 201 [IOAPIC_HWID] = "IOAPIC", 202 [SYSTEMHUB_HWID] = "SYSTEMHUB", 203 [NTBCCP_HWID] = "NTBCCP", 204 [UMC_HWID] = "UMC", 205 [SATA_HWID] = "SATA", 206 [USB_HWID] = "USB", 207 [CCXSEC_HWID] = "CCXSEC", 208 [XGMI_HWID] = "XGMI", 209 [XGBE_HWID] = "XGBE", 210 [MP0_HWID] = "MP0", 211 [VPE_HWID] = "VPE", 212 }; 213 214 static int hw_id_map[MAX_HWIP] = { 215 [GC_HWIP] = GC_HWID, 216 [HDP_HWIP] = HDP_HWID, 217 [SDMA0_HWIP] = SDMA0_HWID, 218 [SDMA1_HWIP] = SDMA1_HWID, 219 [SDMA2_HWIP] = SDMA2_HWID, 220 [SDMA3_HWIP] = SDMA3_HWID, 221 [LSDMA_HWIP] = LSDMA_HWID, 222 [MMHUB_HWIP] = MMHUB_HWID, 223 [ATHUB_HWIP] = ATHUB_HWID, 224 [NBIO_HWIP] = NBIF_HWID, 225 [MP0_HWIP] = MP0_HWID, 226 [MP1_HWIP] = MP1_HWID, 227 [UVD_HWIP] = UVD_HWID, 228 [VCE_HWIP] = VCE_HWID, 229 [DF_HWIP] = DF_HWID, 230 [DCE_HWIP] = DMU_HWID, 231 [OSSSYS_HWIP] = OSSSYS_HWID, 232 [SMUIO_HWIP] = SMUIO_HWID, 233 [PWR_HWIP] = PWR_HWID, 234 [NBIF_HWIP] = NBIF_HWID, 235 [THM_HWIP] = THM_HWID, 236 [CLK_HWIP] = CLKA_HWID, 237 [UMC_HWIP] = UMC_HWID, 238 [XGMI_HWIP] = XGMI_HWID, 239 [DCI_HWIP] = DCI_HWID, 240 [PCIE_HWIP] = PCIE_HWID, 241 [VPE_HWIP] = VPE_HWID, 242 [ISP_HWIP] = ISP_HWID, 243 }; 244 245 static int amdgpu_discovery_read_binary_from_sysmem(struct amdgpu_device *adev, uint8_t *binary) 246 { 247 u64 tmr_offset, tmr_size, pos; 248 void *discv_regn; 249 int ret; 250 251 ret = amdgpu_acpi_get_tmr_info(adev, &tmr_offset, &tmr_size); 252 if (ret) 253 return ret; 254 255 pos = tmr_offset + tmr_size - DISCOVERY_TMR_OFFSET; 256 257 /* This region is read-only and reserved from system use */ 258 discv_regn = memremap(pos, adev->discovery.size, MEMREMAP_WC); 259 if (discv_regn) { 260 memcpy(binary, discv_regn, adev->discovery.size); 261 memunmap(discv_regn); 262 return 0; 263 } 264 265 return -ENOENT; 266 } 267 268 #define IP_DISCOVERY_V2 2 269 #define IP_DISCOVERY_V4 4 270 271 static int amdgpu_discovery_read_binary_from_mem(struct amdgpu_device *adev, 272 uint8_t *binary) 273 { 274 bool sz_valid = true; 275 uint64_t vram_size; 276 int i, ret = 0; 277 u32 msg; 278 279 if (!amdgpu_sriov_vf(adev)) { 280 /* It can take up to two second for IFWI init to complete on some dGPUs, 281 * but generally it should be in the 60-100ms range. Normally this starts 282 * as soon as the device gets power so by the time the OS loads this has long 283 * completed. However, when a card is hotplugged via e.g., USB4, we need to 284 * wait for this to complete. Once the C2PMSG is updated, we can 285 * continue. 286 */ 287 288 for (i = 0; i < 2000; i++) { 289 msg = RREG32(mmMP0_SMN_C2PMSG_33); 290 if (msg & 0x80000000) 291 break; 292 msleep(1); 293 } 294 } 295 296 vram_size = RREG32(mmRCC_CONFIG_MEMSIZE); 297 if (!vram_size || vram_size == U32_MAX) 298 sz_valid = false; 299 else 300 vram_size <<= 20; 301 302 /* 303 * If in VRAM, discovery TMR is marked for reservation. If it is in system mem, 304 * then it is not required to be reserved. 305 */ 306 if (sz_valid) { 307 uint64_t pos = vram_size - DISCOVERY_TMR_OFFSET; 308 amdgpu_device_vram_access(adev, pos, (uint32_t *)binary, 309 adev->discovery.size, false); 310 adev->discovery.reserve_tmr = true; 311 } else { 312 ret = amdgpu_discovery_read_binary_from_sysmem(adev, binary); 313 } 314 315 if (ret) 316 dev_err(adev->dev, 317 "failed to read discovery info from memory, vram size read: %llx", 318 vram_size); 319 320 return ret; 321 } 322 323 static int amdgpu_discovery_read_binary_from_file(struct amdgpu_device *adev, 324 uint8_t *binary, 325 const char *fw_name) 326 { 327 const struct firmware *fw; 328 int r; 329 330 r = firmware_request_nowarn(&fw, fw_name, adev->dev); 331 if (r) { 332 if (amdgpu_discovery == 2) 333 dev_err(adev->dev, "can't load firmware \"%s\"\n", fw_name); 334 else 335 drm_info(&adev->ddev, "Optional firmware \"%s\" was not found\n", fw_name); 336 return r; 337 } 338 339 memcpy((u8 *)binary, (u8 *)fw->data, fw->size); 340 release_firmware(fw); 341 342 return 0; 343 } 344 345 static uint16_t amdgpu_discovery_calculate_checksum(uint8_t *data, uint32_t size) 346 { 347 uint16_t checksum = 0; 348 int i; 349 350 for (i = 0; i < size; i++) 351 checksum += data[i]; 352 353 return checksum; 354 } 355 356 static inline bool amdgpu_discovery_verify_checksum(uint8_t *data, uint32_t size, 357 uint16_t expected) 358 { 359 return !!(amdgpu_discovery_calculate_checksum(data, size) == expected); 360 } 361 362 static inline bool amdgpu_discovery_verify_binary_signature(uint8_t *binary) 363 { 364 struct binary_header *bhdr; 365 bhdr = (struct binary_header *)binary; 366 367 return (le32_to_cpu(bhdr->binary_signature) == BINARY_SIGNATURE); 368 } 369 370 static void amdgpu_discovery_harvest_config_quirk(struct amdgpu_device *adev) 371 { 372 /* 373 * So far, apply this quirk only on those Navy Flounder boards which 374 * have a bad harvest table of VCN config. 375 */ 376 if ((amdgpu_ip_version(adev, UVD_HWIP, 1) == IP_VERSION(3, 0, 1)) && 377 (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(10, 3, 2))) { 378 switch (adev->pdev->revision) { 379 case 0xC1: 380 case 0xC2: 381 case 0xC3: 382 case 0xC5: 383 case 0xC7: 384 case 0xCF: 385 case 0xDF: 386 adev->vcn.harvest_config |= AMDGPU_VCN_HARVEST_VCN1; 387 adev->vcn.inst_mask &= ~AMDGPU_VCN_HARVEST_VCN1; 388 break; 389 default: 390 break; 391 } 392 } 393 } 394 395 static int amdgpu_discovery_verify_npsinfo(struct amdgpu_device *adev, 396 struct binary_header *bhdr) 397 { 398 uint8_t *discovery_bin = adev->discovery.bin; 399 struct table_info *info; 400 uint16_t checksum; 401 uint16_t offset; 402 403 info = &bhdr->table_list[NPS_INFO]; 404 offset = le16_to_cpu(info->offset); 405 checksum = le16_to_cpu(info->checksum); 406 407 struct nps_info_header *nhdr = 408 (struct nps_info_header *)(discovery_bin + offset); 409 410 if (le32_to_cpu(nhdr->table_id) != NPS_INFO_TABLE_ID) { 411 dev_dbg(adev->dev, "invalid ip discovery nps info table id\n"); 412 return -EINVAL; 413 } 414 415 if (!amdgpu_discovery_verify_checksum(discovery_bin + offset, 416 le32_to_cpu(nhdr->size_bytes), 417 checksum)) { 418 dev_dbg(adev->dev, "invalid nps info data table checksum\n"); 419 return -EINVAL; 420 } 421 422 return 0; 423 } 424 425 static const char *amdgpu_discovery_get_fw_name(struct amdgpu_device *adev) 426 { 427 if (amdgpu_discovery == 2) { 428 /* Assume there is valid discovery TMR in VRAM even if binary is sideloaded */ 429 adev->discovery.reserve_tmr = true; 430 return "amdgpu/ip_discovery.bin"; 431 } 432 433 switch (adev->asic_type) { 434 case CHIP_VEGA10: 435 return "amdgpu/vega10_ip_discovery.bin"; 436 case CHIP_VEGA12: 437 return "amdgpu/vega12_ip_discovery.bin"; 438 case CHIP_RAVEN: 439 if (adev->apu_flags & AMD_APU_IS_RAVEN2) 440 return "amdgpu/raven2_ip_discovery.bin"; 441 else if (adev->apu_flags & AMD_APU_IS_PICASSO) 442 return "amdgpu/picasso_ip_discovery.bin"; 443 else 444 return "amdgpu/raven_ip_discovery.bin"; 445 case CHIP_VEGA20: 446 return "amdgpu/vega20_ip_discovery.bin"; 447 case CHIP_ARCTURUS: 448 return "amdgpu/arcturus_ip_discovery.bin"; 449 case CHIP_ALDEBARAN: 450 return "amdgpu/aldebaran_ip_discovery.bin"; 451 default: 452 return NULL; 453 } 454 } 455 456 static int amdgpu_discovery_init(struct amdgpu_device *adev) 457 { 458 struct table_info *info; 459 struct binary_header *bhdr; 460 uint8_t *discovery_bin; 461 const char *fw_name; 462 uint16_t offset; 463 uint16_t size; 464 uint16_t checksum; 465 int r; 466 467 adev->discovery.bin = kzalloc(DISCOVERY_TMR_SIZE, GFP_KERNEL); 468 if (!adev->discovery.bin) 469 return -ENOMEM; 470 adev->discovery.size = DISCOVERY_TMR_SIZE; 471 adev->discovery.debugfs_blob.data = adev->discovery.bin; 472 adev->discovery.debugfs_blob.size = adev->discovery.size; 473 474 discovery_bin = adev->discovery.bin; 475 /* Read from file if it is the preferred option */ 476 fw_name = amdgpu_discovery_get_fw_name(adev); 477 if (fw_name != NULL) { 478 drm_dbg(&adev->ddev, "use ip discovery information from file"); 479 r = amdgpu_discovery_read_binary_from_file(adev, discovery_bin, 480 fw_name); 481 if (r) 482 goto out; 483 } else { 484 drm_dbg(&adev->ddev, "use ip discovery information from memory"); 485 r = amdgpu_discovery_read_binary_from_mem(adev, discovery_bin); 486 if (r) 487 goto out; 488 } 489 490 /* check the ip discovery binary signature */ 491 if (!amdgpu_discovery_verify_binary_signature(discovery_bin)) { 492 dev_err(adev->dev, 493 "get invalid ip discovery binary signature\n"); 494 r = -EINVAL; 495 goto out; 496 } 497 498 bhdr = (struct binary_header *)discovery_bin; 499 500 offset = offsetof(struct binary_header, binary_checksum) + 501 sizeof(bhdr->binary_checksum); 502 size = le16_to_cpu(bhdr->binary_size) - offset; 503 checksum = le16_to_cpu(bhdr->binary_checksum); 504 505 if (!amdgpu_discovery_verify_checksum(discovery_bin + offset, size, 506 checksum)) { 507 dev_err(adev->dev, "invalid ip discovery binary checksum\n"); 508 r = -EINVAL; 509 goto out; 510 } 511 512 info = &bhdr->table_list[IP_DISCOVERY]; 513 offset = le16_to_cpu(info->offset); 514 checksum = le16_to_cpu(info->checksum); 515 516 if (offset) { 517 struct ip_discovery_header *ihdr = 518 (struct ip_discovery_header *)(discovery_bin + offset); 519 if (le32_to_cpu(ihdr->signature) != DISCOVERY_TABLE_SIGNATURE) { 520 dev_err(adev->dev, "invalid ip discovery data table signature\n"); 521 r = -EINVAL; 522 goto out; 523 } 524 525 if (!amdgpu_discovery_verify_checksum(discovery_bin + offset, 526 le16_to_cpu(ihdr->size), 527 checksum)) { 528 dev_err(adev->dev, "invalid ip discovery data table checksum\n"); 529 r = -EINVAL; 530 goto out; 531 } 532 } 533 534 info = &bhdr->table_list[GC]; 535 offset = le16_to_cpu(info->offset); 536 checksum = le16_to_cpu(info->checksum); 537 538 if (offset) { 539 struct gpu_info_header *ghdr = 540 (struct gpu_info_header *)(discovery_bin + offset); 541 542 if (le32_to_cpu(ghdr->table_id) != GC_TABLE_ID) { 543 dev_err(adev->dev, "invalid ip discovery gc table id\n"); 544 r = -EINVAL; 545 goto out; 546 } 547 548 if (!amdgpu_discovery_verify_checksum(discovery_bin + offset, 549 le32_to_cpu(ghdr->size), 550 checksum)) { 551 dev_err(adev->dev, "invalid gc data table checksum\n"); 552 r = -EINVAL; 553 goto out; 554 } 555 } 556 557 info = &bhdr->table_list[HARVEST_INFO]; 558 offset = le16_to_cpu(info->offset); 559 checksum = le16_to_cpu(info->checksum); 560 561 if (offset) { 562 struct harvest_info_header *hhdr = 563 (struct harvest_info_header *)(discovery_bin + offset); 564 565 if (le32_to_cpu(hhdr->signature) != HARVEST_TABLE_SIGNATURE) { 566 dev_err(adev->dev, "invalid ip discovery harvest table signature\n"); 567 r = -EINVAL; 568 goto out; 569 } 570 571 if (!amdgpu_discovery_verify_checksum( 572 discovery_bin + offset, 573 sizeof(struct harvest_table), checksum)) { 574 dev_err(adev->dev, "invalid harvest data table checksum\n"); 575 r = -EINVAL; 576 goto out; 577 } 578 } 579 580 info = &bhdr->table_list[VCN_INFO]; 581 offset = le16_to_cpu(info->offset); 582 checksum = le16_to_cpu(info->checksum); 583 584 if (offset) { 585 struct vcn_info_header *vhdr = 586 (struct vcn_info_header *)(discovery_bin + offset); 587 588 if (le32_to_cpu(vhdr->table_id) != VCN_INFO_TABLE_ID) { 589 dev_err(adev->dev, "invalid ip discovery vcn table id\n"); 590 r = -EINVAL; 591 goto out; 592 } 593 594 if (!amdgpu_discovery_verify_checksum( 595 discovery_bin + offset, 596 le32_to_cpu(vhdr->size_bytes), checksum)) { 597 dev_err(adev->dev, "invalid vcn data table checksum\n"); 598 r = -EINVAL; 599 goto out; 600 } 601 } 602 603 info = &bhdr->table_list[MALL_INFO]; 604 offset = le16_to_cpu(info->offset); 605 checksum = le16_to_cpu(info->checksum); 606 607 if (0 && offset) { 608 struct mall_info_header *mhdr = 609 (struct mall_info_header *)(discovery_bin + offset); 610 611 if (le32_to_cpu(mhdr->table_id) != MALL_INFO_TABLE_ID) { 612 dev_err(adev->dev, "invalid ip discovery mall table id\n"); 613 r = -EINVAL; 614 goto out; 615 } 616 617 if (!amdgpu_discovery_verify_checksum( 618 discovery_bin + offset, 619 le32_to_cpu(mhdr->size_bytes), checksum)) { 620 dev_err(adev->dev, "invalid mall data table checksum\n"); 621 r = -EINVAL; 622 goto out; 623 } 624 } 625 626 return 0; 627 628 out: 629 kfree(adev->discovery.bin); 630 adev->discovery.bin = NULL; 631 if ((amdgpu_discovery != 2) && 632 (RREG32(mmIP_DISCOVERY_VERSION) == 4)) 633 amdgpu_ras_query_boot_status(adev, 4); 634 return r; 635 } 636 637 static void amdgpu_discovery_sysfs_fini(struct amdgpu_device *adev); 638 639 void amdgpu_discovery_fini(struct amdgpu_device *adev) 640 { 641 amdgpu_discovery_sysfs_fini(adev); 642 kfree(adev->discovery.bin); 643 adev->discovery.bin = NULL; 644 } 645 646 static int amdgpu_discovery_validate_ip(struct amdgpu_device *adev, 647 uint8_t instance, uint16_t hw_id) 648 { 649 if (instance >= HWIP_MAX_INSTANCE) { 650 dev_err(adev->dev, 651 "Unexpected instance_number (%d) from ip discovery blob\n", 652 instance); 653 return -EINVAL; 654 } 655 if (hw_id >= HW_ID_MAX) { 656 dev_err(adev->dev, 657 "Unexpected hw_id (%d) from ip discovery blob\n", 658 hw_id); 659 return -EINVAL; 660 } 661 662 return 0; 663 } 664 665 static void amdgpu_discovery_read_harvest_bit_per_ip(struct amdgpu_device *adev, 666 uint32_t *vcn_harvest_count) 667 { 668 uint8_t *discovery_bin = adev->discovery.bin; 669 struct binary_header *bhdr; 670 struct ip_discovery_header *ihdr; 671 struct die_header *dhdr; 672 struct ip *ip; 673 uint16_t die_offset, ip_offset, num_dies, num_ips; 674 uint16_t hw_id; 675 uint8_t inst; 676 int i, j; 677 678 bhdr = (struct binary_header *)discovery_bin; 679 ihdr = (struct ip_discovery_header 680 *)(discovery_bin + 681 le16_to_cpu(bhdr->table_list[IP_DISCOVERY].offset)); 682 num_dies = le16_to_cpu(ihdr->num_dies); 683 684 /* scan harvest bit of all IP data structures */ 685 for (i = 0; i < num_dies; i++) { 686 die_offset = le16_to_cpu(ihdr->die_info[i].die_offset); 687 dhdr = (struct die_header *)(discovery_bin + die_offset); 688 num_ips = le16_to_cpu(dhdr->num_ips); 689 ip_offset = die_offset + sizeof(*dhdr); 690 691 for (j = 0; j < num_ips; j++) { 692 ip = (struct ip *)(discovery_bin + ip_offset); 693 inst = ip->number_instance; 694 hw_id = le16_to_cpu(ip->hw_id); 695 if (amdgpu_discovery_validate_ip(adev, inst, hw_id)) 696 goto next_ip; 697 698 if (ip->harvest == 1) { 699 switch (hw_id) { 700 case VCN_HWID: 701 (*vcn_harvest_count)++; 702 if (inst == 0) { 703 adev->vcn.harvest_config |= AMDGPU_VCN_HARVEST_VCN0; 704 adev->vcn.inst_mask &= 705 ~AMDGPU_VCN_HARVEST_VCN0; 706 adev->jpeg.inst_mask &= 707 ~AMDGPU_VCN_HARVEST_VCN0; 708 } else { 709 adev->vcn.harvest_config |= AMDGPU_VCN_HARVEST_VCN1; 710 adev->vcn.inst_mask &= 711 ~AMDGPU_VCN_HARVEST_VCN1; 712 adev->jpeg.inst_mask &= 713 ~AMDGPU_VCN_HARVEST_VCN1; 714 } 715 break; 716 case DMU_HWID: 717 adev->harvest_ip_mask |= AMD_HARVEST_IP_DMU_MASK; 718 break; 719 default: 720 break; 721 } 722 } 723 next_ip: 724 ip_offset += struct_size(ip, base_address, 725 ip->num_base_address); 726 } 727 } 728 } 729 730 static void amdgpu_discovery_read_from_harvest_table(struct amdgpu_device *adev, 731 uint32_t *vcn_harvest_count, 732 uint32_t *umc_harvest_count) 733 { 734 uint8_t *discovery_bin = adev->discovery.bin; 735 struct binary_header *bhdr; 736 struct harvest_table *harvest_info; 737 u16 offset; 738 int i; 739 uint32_t umc_harvest_config = 0; 740 741 bhdr = (struct binary_header *)discovery_bin; 742 offset = le16_to_cpu(bhdr->table_list[HARVEST_INFO].offset); 743 744 if (!offset) { 745 dev_err(adev->dev, "invalid harvest table offset\n"); 746 return; 747 } 748 749 harvest_info = (struct harvest_table *)(discovery_bin + offset); 750 751 for (i = 0; i < 32; i++) { 752 if (le16_to_cpu(harvest_info->list[i].hw_id) == 0) 753 break; 754 755 switch (le16_to_cpu(harvest_info->list[i].hw_id)) { 756 case VCN_HWID: 757 (*vcn_harvest_count)++; 758 adev->vcn.harvest_config |= 759 (1 << harvest_info->list[i].number_instance); 760 adev->jpeg.harvest_config |= 761 (1 << harvest_info->list[i].number_instance); 762 763 adev->vcn.inst_mask &= 764 ~(1U << harvest_info->list[i].number_instance); 765 adev->jpeg.inst_mask &= 766 ~(1U << harvest_info->list[i].number_instance); 767 break; 768 case DMU_HWID: 769 adev->harvest_ip_mask |= AMD_HARVEST_IP_DMU_MASK; 770 break; 771 case UMC_HWID: 772 umc_harvest_config |= 773 1 << (le16_to_cpu(harvest_info->list[i].number_instance)); 774 (*umc_harvest_count)++; 775 break; 776 case GC_HWID: 777 adev->gfx.xcc_mask &= 778 ~(1U << harvest_info->list[i].number_instance); 779 break; 780 case SDMA0_HWID: 781 adev->sdma.sdma_mask &= 782 ~(1U << harvest_info->list[i].number_instance); 783 break; 784 #if defined(CONFIG_DRM_AMD_ISP) 785 case ISP_HWID: 786 adev->isp.harvest_config |= 787 ~(1U << harvest_info->list[i].number_instance); 788 break; 789 #endif 790 default: 791 break; 792 } 793 } 794 795 adev->umc.active_mask = ((1 << adev->umc.node_inst_num) - 1) & 796 ~umc_harvest_config; 797 } 798 799 /* ================================================== */ 800 801 struct ip_hw_instance { 802 struct kobject kobj; /* ip_discovery/die/#die/#hw_id/#instance/<attrs...> */ 803 804 int hw_id; 805 u8 num_instance; 806 u8 major, minor, revision; 807 u8 harvest; 808 809 int num_base_addresses; 810 u32 base_addr[] __counted_by(num_base_addresses); 811 }; 812 813 struct ip_hw_id { 814 struct kset hw_id_kset; /* ip_discovery/die/#die/#hw_id/, contains ip_hw_instance */ 815 int hw_id; 816 }; 817 818 struct ip_die_entry { 819 struct kset ip_kset; /* ip_discovery/die/#die/, contains ip_hw_id */ 820 u16 num_ips; 821 }; 822 823 /* -------------------------------------------------- */ 824 825 struct ip_hw_instance_attr { 826 struct attribute attr; 827 ssize_t (*show)(struct ip_hw_instance *ip_hw_instance, char *buf); 828 }; 829 830 static ssize_t hw_id_show(struct ip_hw_instance *ip_hw_instance, char *buf) 831 { 832 return sysfs_emit(buf, "%d\n", ip_hw_instance->hw_id); 833 } 834 835 static ssize_t num_instance_show(struct ip_hw_instance *ip_hw_instance, char *buf) 836 { 837 return sysfs_emit(buf, "%d\n", ip_hw_instance->num_instance); 838 } 839 840 static ssize_t major_show(struct ip_hw_instance *ip_hw_instance, char *buf) 841 { 842 return sysfs_emit(buf, "%d\n", ip_hw_instance->major); 843 } 844 845 static ssize_t minor_show(struct ip_hw_instance *ip_hw_instance, char *buf) 846 { 847 return sysfs_emit(buf, "%d\n", ip_hw_instance->minor); 848 } 849 850 static ssize_t revision_show(struct ip_hw_instance *ip_hw_instance, char *buf) 851 { 852 return sysfs_emit(buf, "%d\n", ip_hw_instance->revision); 853 } 854 855 static ssize_t harvest_show(struct ip_hw_instance *ip_hw_instance, char *buf) 856 { 857 return sysfs_emit(buf, "0x%01X\n", ip_hw_instance->harvest); 858 } 859 860 static ssize_t num_base_addresses_show(struct ip_hw_instance *ip_hw_instance, char *buf) 861 { 862 return sysfs_emit(buf, "%d\n", ip_hw_instance->num_base_addresses); 863 } 864 865 static ssize_t base_addr_show(struct ip_hw_instance *ip_hw_instance, char *buf) 866 { 867 ssize_t res, at; 868 int ii; 869 870 for (res = at = ii = 0; ii < ip_hw_instance->num_base_addresses; ii++) { 871 /* Here we satisfy the condition that, at + size <= PAGE_SIZE. 872 */ 873 if (at + 12 > PAGE_SIZE) 874 break; 875 res = sysfs_emit_at(buf, at, "0x%08X\n", 876 ip_hw_instance->base_addr[ii]); 877 if (res <= 0) 878 break; 879 at += res; 880 } 881 882 return res < 0 ? res : at; 883 } 884 885 static struct ip_hw_instance_attr ip_hw_attr[] = { 886 __ATTR_RO(hw_id), 887 __ATTR_RO(num_instance), 888 __ATTR_RO(major), 889 __ATTR_RO(minor), 890 __ATTR_RO(revision), 891 __ATTR_RO(harvest), 892 __ATTR_RO(num_base_addresses), 893 __ATTR_RO(base_addr), 894 }; 895 896 static struct attribute *ip_hw_instance_attrs[ARRAY_SIZE(ip_hw_attr) + 1]; 897 ATTRIBUTE_GROUPS(ip_hw_instance); 898 899 #define to_ip_hw_instance(x) container_of(x, struct ip_hw_instance, kobj) 900 #define to_ip_hw_instance_attr(x) container_of(x, struct ip_hw_instance_attr, attr) 901 902 static ssize_t ip_hw_instance_attr_show(struct kobject *kobj, 903 struct attribute *attr, 904 char *buf) 905 { 906 struct ip_hw_instance *ip_hw_instance = to_ip_hw_instance(kobj); 907 struct ip_hw_instance_attr *ip_hw_attr = to_ip_hw_instance_attr(attr); 908 909 if (!ip_hw_attr->show) 910 return -EIO; 911 912 return ip_hw_attr->show(ip_hw_instance, buf); 913 } 914 915 static const struct sysfs_ops ip_hw_instance_sysfs_ops = { 916 .show = ip_hw_instance_attr_show, 917 }; 918 919 static void ip_hw_instance_release(struct kobject *kobj) 920 { 921 struct ip_hw_instance *ip_hw_instance = to_ip_hw_instance(kobj); 922 923 kfree(ip_hw_instance); 924 } 925 926 static const struct kobj_type ip_hw_instance_ktype = { 927 .release = ip_hw_instance_release, 928 .sysfs_ops = &ip_hw_instance_sysfs_ops, 929 .default_groups = ip_hw_instance_groups, 930 }; 931 932 /* -------------------------------------------------- */ 933 934 #define to_ip_hw_id(x) container_of(to_kset(x), struct ip_hw_id, hw_id_kset) 935 936 static void ip_hw_id_release(struct kobject *kobj) 937 { 938 struct ip_hw_id *ip_hw_id = to_ip_hw_id(kobj); 939 940 if (!list_empty(&ip_hw_id->hw_id_kset.list)) 941 DRM_ERROR("ip_hw_id->hw_id_kset is not empty"); 942 kfree(ip_hw_id); 943 } 944 945 static const struct kobj_type ip_hw_id_ktype = { 946 .release = ip_hw_id_release, 947 .sysfs_ops = &kobj_sysfs_ops, 948 }; 949 950 /* -------------------------------------------------- */ 951 952 static void die_kobj_release(struct kobject *kobj); 953 static void ip_disc_release(struct kobject *kobj); 954 955 struct ip_die_entry_attribute { 956 struct attribute attr; 957 ssize_t (*show)(struct ip_die_entry *ip_die_entry, char *buf); 958 }; 959 960 #define to_ip_die_entry_attr(x) container_of(x, struct ip_die_entry_attribute, attr) 961 962 static ssize_t num_ips_show(struct ip_die_entry *ip_die_entry, char *buf) 963 { 964 return sysfs_emit(buf, "%d\n", ip_die_entry->num_ips); 965 } 966 967 /* If there are more ip_die_entry attrs, other than the number of IPs, 968 * we can make this intro an array of attrs, and then initialize 969 * ip_die_entry_attrs in a loop. 970 */ 971 static struct ip_die_entry_attribute num_ips_attr = 972 __ATTR_RO(num_ips); 973 974 static struct attribute *ip_die_entry_attrs[] = { 975 &num_ips_attr.attr, 976 NULL, 977 }; 978 ATTRIBUTE_GROUPS(ip_die_entry); /* ip_die_entry_groups */ 979 980 #define to_ip_die_entry(x) container_of(to_kset(x), struct ip_die_entry, ip_kset) 981 982 static ssize_t ip_die_entry_attr_show(struct kobject *kobj, 983 struct attribute *attr, 984 char *buf) 985 { 986 struct ip_die_entry_attribute *ip_die_entry_attr = to_ip_die_entry_attr(attr); 987 struct ip_die_entry *ip_die_entry = to_ip_die_entry(kobj); 988 989 if (!ip_die_entry_attr->show) 990 return -EIO; 991 992 return ip_die_entry_attr->show(ip_die_entry, buf); 993 } 994 995 static void ip_die_entry_release(struct kobject *kobj) 996 { 997 struct ip_die_entry *ip_die_entry = to_ip_die_entry(kobj); 998 999 if (!list_empty(&ip_die_entry->ip_kset.list)) 1000 DRM_ERROR("ip_die_entry->ip_kset is not empty"); 1001 kfree(ip_die_entry); 1002 } 1003 1004 static const struct sysfs_ops ip_die_entry_sysfs_ops = { 1005 .show = ip_die_entry_attr_show, 1006 }; 1007 1008 static const struct kobj_type ip_die_entry_ktype = { 1009 .release = ip_die_entry_release, 1010 .sysfs_ops = &ip_die_entry_sysfs_ops, 1011 .default_groups = ip_die_entry_groups, 1012 }; 1013 1014 static const struct kobj_type die_kobj_ktype = { 1015 .release = die_kobj_release, 1016 .sysfs_ops = &kobj_sysfs_ops, 1017 }; 1018 1019 static const struct kobj_type ip_discovery_ktype = { 1020 .release = ip_disc_release, 1021 .sysfs_ops = &kobj_sysfs_ops, 1022 }; 1023 1024 struct ip_discovery_top { 1025 struct kobject kobj; /* ip_discovery/ */ 1026 struct kset die_kset; /* ip_discovery/die/, contains ip_die_entry */ 1027 struct amdgpu_device *adev; 1028 }; 1029 1030 static void die_kobj_release(struct kobject *kobj) 1031 { 1032 struct ip_discovery_top *ip_top = container_of(to_kset(kobj), 1033 struct ip_discovery_top, 1034 die_kset); 1035 if (!list_empty(&ip_top->die_kset.list)) 1036 DRM_ERROR("ip_top->die_kset is not empty"); 1037 } 1038 1039 static void ip_disc_release(struct kobject *kobj) 1040 { 1041 struct ip_discovery_top *ip_top = container_of(kobj, struct ip_discovery_top, 1042 kobj); 1043 struct amdgpu_device *adev = ip_top->adev; 1044 1045 kfree(ip_top); 1046 adev->discovery.ip_top = NULL; 1047 } 1048 1049 static uint8_t amdgpu_discovery_get_harvest_info(struct amdgpu_device *adev, 1050 uint16_t hw_id, uint8_t inst) 1051 { 1052 uint8_t harvest = 0; 1053 1054 /* Until a uniform way is figured, get mask based on hwid */ 1055 switch (hw_id) { 1056 case VCN_HWID: 1057 /* VCN vs UVD+VCE */ 1058 if (!amdgpu_ip_version(adev, VCE_HWIP, 0)) 1059 harvest = ((1 << inst) & adev->vcn.inst_mask) == 0; 1060 break; 1061 case DMU_HWID: 1062 if (adev->harvest_ip_mask & AMD_HARVEST_IP_DMU_MASK) 1063 harvest = 0x1; 1064 break; 1065 case UMC_HWID: 1066 /* TODO: It needs another parsing; for now, ignore.*/ 1067 break; 1068 case GC_HWID: 1069 harvest = ((1 << inst) & adev->gfx.xcc_mask) == 0; 1070 break; 1071 case SDMA0_HWID: 1072 harvest = ((1 << inst) & adev->sdma.sdma_mask) == 0; 1073 break; 1074 default: 1075 break; 1076 } 1077 1078 return harvest; 1079 } 1080 1081 static int amdgpu_discovery_sysfs_ips(struct amdgpu_device *adev, 1082 struct ip_die_entry *ip_die_entry, 1083 const size_t _ip_offset, const int num_ips, 1084 bool reg_base_64) 1085 { 1086 uint8_t *discovery_bin = adev->discovery.bin; 1087 int ii, jj, kk, res; 1088 uint16_t hw_id; 1089 uint8_t inst; 1090 1091 DRM_DEBUG("num_ips:%d", num_ips); 1092 1093 /* Find all IPs of a given HW ID, and add their instance to 1094 * #die/#hw_id/#instance/<attributes> 1095 */ 1096 for (ii = 0; ii < HW_ID_MAX; ii++) { 1097 struct ip_hw_id *ip_hw_id = NULL; 1098 size_t ip_offset = _ip_offset; 1099 1100 for (jj = 0; jj < num_ips; jj++) { 1101 struct ip_v4 *ip; 1102 struct ip_hw_instance *ip_hw_instance; 1103 1104 ip = (struct ip_v4 *)(discovery_bin + ip_offset); 1105 inst = ip->instance_number; 1106 hw_id = le16_to_cpu(ip->hw_id); 1107 if (amdgpu_discovery_validate_ip(adev, inst, hw_id) || 1108 hw_id != ii) 1109 goto next_ip; 1110 1111 DRM_DEBUG("match:%d @ ip_offset:%zu", ii, ip_offset); 1112 1113 /* We have a hw_id match; register the hw 1114 * block if not yet registered. 1115 */ 1116 if (!ip_hw_id) { 1117 ip_hw_id = kzalloc(sizeof(*ip_hw_id), GFP_KERNEL); 1118 if (!ip_hw_id) 1119 return -ENOMEM; 1120 ip_hw_id->hw_id = ii; 1121 1122 kobject_set_name(&ip_hw_id->hw_id_kset.kobj, "%d", ii); 1123 ip_hw_id->hw_id_kset.kobj.kset = &ip_die_entry->ip_kset; 1124 ip_hw_id->hw_id_kset.kobj.ktype = &ip_hw_id_ktype; 1125 res = kset_register(&ip_hw_id->hw_id_kset); 1126 if (res) { 1127 DRM_ERROR("Couldn't register ip_hw_id kset"); 1128 kfree(ip_hw_id); 1129 return res; 1130 } 1131 if (hw_id_names[ii]) { 1132 res = sysfs_create_link(&ip_die_entry->ip_kset.kobj, 1133 &ip_hw_id->hw_id_kset.kobj, 1134 hw_id_names[ii]); 1135 if (res) { 1136 DRM_ERROR("Couldn't create IP link %s in IP Die:%s\n", 1137 hw_id_names[ii], 1138 kobject_name(&ip_die_entry->ip_kset.kobj)); 1139 } 1140 } 1141 } 1142 1143 /* Now register its instance. 1144 */ 1145 ip_hw_instance = kzalloc(struct_size(ip_hw_instance, 1146 base_addr, 1147 ip->num_base_address), 1148 GFP_KERNEL); 1149 if (!ip_hw_instance) { 1150 DRM_ERROR("no memory for ip_hw_instance"); 1151 return -ENOMEM; 1152 } 1153 ip_hw_instance->hw_id = le16_to_cpu(ip->hw_id); /* == ii */ 1154 ip_hw_instance->num_instance = ip->instance_number; 1155 ip_hw_instance->major = ip->major; 1156 ip_hw_instance->minor = ip->minor; 1157 ip_hw_instance->revision = ip->revision; 1158 ip_hw_instance->harvest = 1159 amdgpu_discovery_get_harvest_info( 1160 adev, ip_hw_instance->hw_id, 1161 ip_hw_instance->num_instance); 1162 ip_hw_instance->num_base_addresses = ip->num_base_address; 1163 1164 for (kk = 0; kk < ip_hw_instance->num_base_addresses; kk++) { 1165 if (reg_base_64) 1166 ip_hw_instance->base_addr[kk] = 1167 lower_32_bits(le64_to_cpu(ip->base_address_64[kk])) & 0x3FFFFFFF; 1168 else 1169 ip_hw_instance->base_addr[kk] = ip->base_address[kk]; 1170 } 1171 1172 kobject_init(&ip_hw_instance->kobj, &ip_hw_instance_ktype); 1173 ip_hw_instance->kobj.kset = &ip_hw_id->hw_id_kset; 1174 res = kobject_add(&ip_hw_instance->kobj, NULL, 1175 "%d", ip_hw_instance->num_instance); 1176 next_ip: 1177 if (reg_base_64) 1178 ip_offset += struct_size(ip, base_address_64, 1179 ip->num_base_address); 1180 else 1181 ip_offset += struct_size(ip, base_address, 1182 ip->num_base_address); 1183 } 1184 } 1185 1186 return 0; 1187 } 1188 1189 static int amdgpu_discovery_sysfs_recurse(struct amdgpu_device *adev) 1190 { 1191 struct ip_discovery_top *ip_top = adev->discovery.ip_top; 1192 uint8_t *discovery_bin = adev->discovery.bin; 1193 struct binary_header *bhdr; 1194 struct ip_discovery_header *ihdr; 1195 struct die_header *dhdr; 1196 struct kset *die_kset = &ip_top->die_kset; 1197 u16 num_dies, die_offset, num_ips; 1198 size_t ip_offset; 1199 int ii, res; 1200 1201 bhdr = (struct binary_header *)discovery_bin; 1202 ihdr = (struct ip_discovery_header 1203 *)(discovery_bin + 1204 le16_to_cpu(bhdr->table_list[IP_DISCOVERY].offset)); 1205 num_dies = le16_to_cpu(ihdr->num_dies); 1206 1207 DRM_DEBUG("number of dies: %d\n", num_dies); 1208 1209 for (ii = 0; ii < num_dies; ii++) { 1210 struct ip_die_entry *ip_die_entry; 1211 1212 die_offset = le16_to_cpu(ihdr->die_info[ii].die_offset); 1213 dhdr = (struct die_header *)(discovery_bin + die_offset); 1214 num_ips = le16_to_cpu(dhdr->num_ips); 1215 ip_offset = die_offset + sizeof(*dhdr); 1216 1217 /* Add the die to the kset. 1218 * 1219 * dhdr->die_id == ii, which was checked in 1220 * amdgpu_discovery_reg_base_init(). 1221 */ 1222 1223 ip_die_entry = kzalloc(sizeof(*ip_die_entry), GFP_KERNEL); 1224 if (!ip_die_entry) 1225 return -ENOMEM; 1226 1227 ip_die_entry->num_ips = num_ips; 1228 1229 kobject_set_name(&ip_die_entry->ip_kset.kobj, "%d", le16_to_cpu(dhdr->die_id)); 1230 ip_die_entry->ip_kset.kobj.kset = die_kset; 1231 ip_die_entry->ip_kset.kobj.ktype = &ip_die_entry_ktype; 1232 res = kset_register(&ip_die_entry->ip_kset); 1233 if (res) { 1234 DRM_ERROR("Couldn't register ip_die_entry kset"); 1235 kfree(ip_die_entry); 1236 return res; 1237 } 1238 1239 amdgpu_discovery_sysfs_ips(adev, ip_die_entry, ip_offset, num_ips, !!ihdr->base_addr_64_bit); 1240 } 1241 1242 return 0; 1243 } 1244 1245 static int amdgpu_discovery_sysfs_init(struct amdgpu_device *adev) 1246 { 1247 uint8_t *discovery_bin = adev->discovery.bin; 1248 struct ip_discovery_top *ip_top; 1249 struct kset *die_kset; 1250 int res, ii; 1251 1252 if (!discovery_bin) 1253 return -EINVAL; 1254 1255 ip_top = kzalloc(sizeof(*ip_top), GFP_KERNEL); 1256 if (!ip_top) 1257 return -ENOMEM; 1258 1259 ip_top->adev = adev; 1260 adev->discovery.ip_top = ip_top; 1261 res = kobject_init_and_add(&ip_top->kobj, &ip_discovery_ktype, 1262 &adev->dev->kobj, "ip_discovery"); 1263 if (res) { 1264 DRM_ERROR("Couldn't init and add ip_discovery/"); 1265 goto Err; 1266 } 1267 1268 die_kset = &ip_top->die_kset; 1269 kobject_set_name(&die_kset->kobj, "%s", "die"); 1270 die_kset->kobj.parent = &ip_top->kobj; 1271 die_kset->kobj.ktype = &die_kobj_ktype; 1272 res = kset_register(&ip_top->die_kset); 1273 if (res) { 1274 DRM_ERROR("Couldn't register die_kset"); 1275 goto Err; 1276 } 1277 1278 for (ii = 0; ii < ARRAY_SIZE(ip_hw_attr); ii++) 1279 ip_hw_instance_attrs[ii] = &ip_hw_attr[ii].attr; 1280 ip_hw_instance_attrs[ii] = NULL; 1281 1282 res = amdgpu_discovery_sysfs_recurse(adev); 1283 1284 return res; 1285 Err: 1286 kobject_put(&ip_top->kobj); 1287 return res; 1288 } 1289 1290 /* -------------------------------------------------- */ 1291 1292 #define list_to_kobj(el) container_of(el, struct kobject, entry) 1293 1294 static void amdgpu_discovery_sysfs_ip_hw_free(struct ip_hw_id *ip_hw_id) 1295 { 1296 struct list_head *el, *tmp; 1297 struct kset *hw_id_kset; 1298 1299 hw_id_kset = &ip_hw_id->hw_id_kset; 1300 spin_lock(&hw_id_kset->list_lock); 1301 list_for_each_prev_safe(el, tmp, &hw_id_kset->list) { 1302 list_del_init(el); 1303 spin_unlock(&hw_id_kset->list_lock); 1304 /* kobject is embedded in ip_hw_instance */ 1305 kobject_put(list_to_kobj(el)); 1306 spin_lock(&hw_id_kset->list_lock); 1307 } 1308 spin_unlock(&hw_id_kset->list_lock); 1309 kobject_put(&ip_hw_id->hw_id_kset.kobj); 1310 } 1311 1312 static void amdgpu_discovery_sysfs_die_free(struct ip_die_entry *ip_die_entry) 1313 { 1314 struct list_head *el, *tmp; 1315 struct kset *ip_kset; 1316 1317 ip_kset = &ip_die_entry->ip_kset; 1318 spin_lock(&ip_kset->list_lock); 1319 list_for_each_prev_safe(el, tmp, &ip_kset->list) { 1320 list_del_init(el); 1321 spin_unlock(&ip_kset->list_lock); 1322 amdgpu_discovery_sysfs_ip_hw_free(to_ip_hw_id(list_to_kobj(el))); 1323 spin_lock(&ip_kset->list_lock); 1324 } 1325 spin_unlock(&ip_kset->list_lock); 1326 kobject_put(&ip_die_entry->ip_kset.kobj); 1327 } 1328 1329 static void amdgpu_discovery_sysfs_fini(struct amdgpu_device *adev) 1330 { 1331 struct ip_discovery_top *ip_top = adev->discovery.ip_top; 1332 struct list_head *el, *tmp; 1333 struct kset *die_kset; 1334 1335 die_kset = &ip_top->die_kset; 1336 spin_lock(&die_kset->list_lock); 1337 list_for_each_prev_safe(el, tmp, &die_kset->list) { 1338 list_del_init(el); 1339 spin_unlock(&die_kset->list_lock); 1340 amdgpu_discovery_sysfs_die_free(to_ip_die_entry(list_to_kobj(el))); 1341 spin_lock(&die_kset->list_lock); 1342 } 1343 spin_unlock(&die_kset->list_lock); 1344 kobject_put(&ip_top->die_kset.kobj); 1345 kobject_put(&ip_top->kobj); 1346 } 1347 1348 /* ================================================== */ 1349 1350 static int amdgpu_discovery_reg_base_init(struct amdgpu_device *adev) 1351 { 1352 uint8_t num_base_address, subrev, variant; 1353 struct binary_header *bhdr; 1354 struct ip_discovery_header *ihdr; 1355 struct die_header *dhdr; 1356 uint8_t *discovery_bin; 1357 struct ip_v4 *ip; 1358 uint16_t die_offset; 1359 uint16_t ip_offset; 1360 uint16_t num_dies; 1361 uint32_t wafl_ver; 1362 uint16_t num_ips; 1363 uint16_t hw_id; 1364 uint8_t inst; 1365 int hw_ip; 1366 int i, j, k; 1367 int r; 1368 1369 r = amdgpu_discovery_init(adev); 1370 if (r) 1371 return r; 1372 discovery_bin = adev->discovery.bin; 1373 wafl_ver = 0; 1374 adev->gfx.xcc_mask = 0; 1375 adev->sdma.sdma_mask = 0; 1376 adev->vcn.inst_mask = 0; 1377 adev->jpeg.inst_mask = 0; 1378 bhdr = (struct binary_header *)discovery_bin; 1379 ihdr = (struct ip_discovery_header 1380 *)(discovery_bin + 1381 le16_to_cpu(bhdr->table_list[IP_DISCOVERY].offset)); 1382 num_dies = le16_to_cpu(ihdr->num_dies); 1383 1384 DRM_DEBUG("number of dies: %d\n", num_dies); 1385 1386 for (i = 0; i < num_dies; i++) { 1387 die_offset = le16_to_cpu(ihdr->die_info[i].die_offset); 1388 dhdr = (struct die_header *)(discovery_bin + die_offset); 1389 num_ips = le16_to_cpu(dhdr->num_ips); 1390 ip_offset = die_offset + sizeof(*dhdr); 1391 1392 if (le16_to_cpu(dhdr->die_id) != i) { 1393 DRM_ERROR("invalid die id %d, expected %d\n", 1394 le16_to_cpu(dhdr->die_id), i); 1395 return -EINVAL; 1396 } 1397 1398 DRM_DEBUG("number of hardware IPs on die%d: %d\n", 1399 le16_to_cpu(dhdr->die_id), num_ips); 1400 1401 for (j = 0; j < num_ips; j++) { 1402 ip = (struct ip_v4 *)(discovery_bin + ip_offset); 1403 1404 inst = ip->instance_number; 1405 hw_id = le16_to_cpu(ip->hw_id); 1406 if (amdgpu_discovery_validate_ip(adev, inst, hw_id)) 1407 goto next_ip; 1408 1409 num_base_address = ip->num_base_address; 1410 1411 DRM_DEBUG("%s(%d) #%d v%d.%d.%d:\n", 1412 hw_id_names[le16_to_cpu(ip->hw_id)], 1413 le16_to_cpu(ip->hw_id), 1414 ip->instance_number, 1415 ip->major, ip->minor, 1416 ip->revision); 1417 1418 if (le16_to_cpu(ip->hw_id) == VCN_HWID) { 1419 /* Bit [5:0]: original revision value 1420 * Bit [7:6]: en/decode capability: 1421 * 0b00 : VCN function normally 1422 * 0b10 : encode is disabled 1423 * 0b01 : decode is disabled 1424 */ 1425 if (adev->vcn.num_vcn_inst < 1426 AMDGPU_MAX_VCN_INSTANCES) { 1427 adev->vcn.inst[adev->vcn.num_vcn_inst].vcn_config = 1428 ip->revision & 0xc0; 1429 adev->vcn.num_vcn_inst++; 1430 adev->vcn.inst_mask |= 1431 (1U << ip->instance_number); 1432 adev->jpeg.inst_mask |= 1433 (1U << ip->instance_number); 1434 } else { 1435 dev_err(adev->dev, "Too many VCN instances: %d vs %d\n", 1436 adev->vcn.num_vcn_inst + 1, 1437 AMDGPU_MAX_VCN_INSTANCES); 1438 } 1439 ip->revision &= ~0xc0; 1440 } 1441 if (le16_to_cpu(ip->hw_id) == SDMA0_HWID || 1442 le16_to_cpu(ip->hw_id) == SDMA1_HWID || 1443 le16_to_cpu(ip->hw_id) == SDMA2_HWID || 1444 le16_to_cpu(ip->hw_id) == SDMA3_HWID) { 1445 if (adev->sdma.num_instances < 1446 AMDGPU_MAX_SDMA_INSTANCES) { 1447 adev->sdma.num_instances++; 1448 adev->sdma.sdma_mask |= 1449 (1U << ip->instance_number); 1450 } else { 1451 dev_err(adev->dev, "Too many SDMA instances: %d vs %d\n", 1452 adev->sdma.num_instances + 1, 1453 AMDGPU_MAX_SDMA_INSTANCES); 1454 } 1455 } 1456 1457 if (le16_to_cpu(ip->hw_id) == VPE_HWID) { 1458 if (adev->vpe.num_instances < AMDGPU_MAX_VPE_INSTANCES) 1459 adev->vpe.num_instances++; 1460 else 1461 dev_err(adev->dev, "Too many VPE instances: %d vs %d\n", 1462 adev->vpe.num_instances + 1, 1463 AMDGPU_MAX_VPE_INSTANCES); 1464 } 1465 1466 if (le16_to_cpu(ip->hw_id) == UMC_HWID) { 1467 adev->gmc.num_umc++; 1468 adev->umc.node_inst_num++; 1469 } 1470 1471 if (le16_to_cpu(ip->hw_id) == GC_HWID) 1472 adev->gfx.xcc_mask |= 1473 (1U << ip->instance_number); 1474 1475 if (!wafl_ver && le16_to_cpu(ip->hw_id) == WAFLC_HWID) 1476 wafl_ver = IP_VERSION_FULL(ip->major, ip->minor, 1477 ip->revision, 0, 0); 1478 1479 for (k = 0; k < num_base_address; k++) { 1480 /* 1481 * convert the endianness of base addresses in place, 1482 * so that we don't need to convert them when accessing adev->reg_offset. 1483 */ 1484 if (ihdr->base_addr_64_bit) 1485 /* Truncate the 64bit base address from ip discovery 1486 * and only store lower 32bit ip base in reg_offset[]. 1487 * Bits > 32 follows ASIC specific format, thus just 1488 * discard them and handle it within specific ASIC. 1489 * By this way reg_offset[] and related helpers can 1490 * stay unchanged. 1491 * The base address is in dwords, thus clear the 1492 * highest 2 bits to store. 1493 */ 1494 ip->base_address[k] = 1495 lower_32_bits(le64_to_cpu(ip->base_address_64[k])) & 0x3FFFFFFF; 1496 else 1497 ip->base_address[k] = le32_to_cpu(ip->base_address[k]); 1498 DRM_DEBUG("\t0x%08x\n", ip->base_address[k]); 1499 } 1500 1501 for (hw_ip = 0; hw_ip < MAX_HWIP; hw_ip++) { 1502 if (hw_id_map[hw_ip] == le16_to_cpu(ip->hw_id) && 1503 hw_id_map[hw_ip] != 0) { 1504 DRM_DEBUG("set register base offset for %s\n", 1505 hw_id_names[le16_to_cpu(ip->hw_id)]); 1506 adev->reg_offset[hw_ip][ip->instance_number] = 1507 ip->base_address; 1508 /* Instance support is somewhat inconsistent. 1509 * SDMA is a good example. Sienna cichlid has 4 total 1510 * SDMA instances, each enumerated separately (HWIDs 1511 * 42, 43, 68, 69). Arcturus has 8 total SDMA instances, 1512 * but they are enumerated as multiple instances of the 1513 * same HWIDs (4x HWID 42, 4x HWID 43). UMC is another 1514 * example. On most chips there are multiple instances 1515 * with the same HWID. 1516 */ 1517 1518 if (ihdr->version < 3) { 1519 subrev = 0; 1520 variant = 0; 1521 } else { 1522 subrev = ip->sub_revision; 1523 variant = ip->variant; 1524 } 1525 1526 adev->ip_versions[hw_ip] 1527 [ip->instance_number] = 1528 IP_VERSION_FULL(ip->major, 1529 ip->minor, 1530 ip->revision, 1531 variant, 1532 subrev); 1533 } 1534 } 1535 1536 next_ip: 1537 if (ihdr->base_addr_64_bit) 1538 ip_offset += struct_size(ip, base_address_64, ip->num_base_address); 1539 else 1540 ip_offset += struct_size(ip, base_address, ip->num_base_address); 1541 } 1542 } 1543 1544 if (wafl_ver && !adev->ip_versions[XGMI_HWIP][0]) 1545 adev->ip_versions[XGMI_HWIP][0] = wafl_ver; 1546 1547 return 0; 1548 } 1549 1550 static void amdgpu_discovery_harvest_ip(struct amdgpu_device *adev) 1551 { 1552 uint8_t *discovery_bin = adev->discovery.bin; 1553 struct ip_discovery_header *ihdr; 1554 struct binary_header *bhdr; 1555 int vcn_harvest_count = 0; 1556 int umc_harvest_count = 0; 1557 uint16_t offset, ihdr_ver; 1558 1559 bhdr = (struct binary_header *)discovery_bin; 1560 offset = le16_to_cpu(bhdr->table_list[IP_DISCOVERY].offset); 1561 ihdr = (struct ip_discovery_header *)(discovery_bin + offset); 1562 ihdr_ver = le16_to_cpu(ihdr->version); 1563 /* 1564 * Harvest table does not fit Navi1x and legacy GPUs, 1565 * so read harvest bit per IP data structure to set 1566 * harvest configuration. 1567 */ 1568 if (amdgpu_ip_version(adev, GC_HWIP, 0) < IP_VERSION(10, 2, 0) && 1569 ihdr_ver <= 2) { 1570 if ((adev->pdev->device == 0x731E && 1571 (adev->pdev->revision == 0xC6 || 1572 adev->pdev->revision == 0xC7)) || 1573 (adev->pdev->device == 0x7340 && 1574 adev->pdev->revision == 0xC9) || 1575 (adev->pdev->device == 0x7360 && 1576 adev->pdev->revision == 0xC7)) 1577 amdgpu_discovery_read_harvest_bit_per_ip(adev, 1578 &vcn_harvest_count); 1579 } else { 1580 amdgpu_discovery_read_from_harvest_table(adev, 1581 &vcn_harvest_count, 1582 &umc_harvest_count); 1583 } 1584 1585 amdgpu_discovery_harvest_config_quirk(adev); 1586 1587 if (vcn_harvest_count == adev->vcn.num_vcn_inst) { 1588 adev->harvest_ip_mask |= AMD_HARVEST_IP_VCN_MASK; 1589 adev->harvest_ip_mask |= AMD_HARVEST_IP_JPEG_MASK; 1590 } 1591 1592 if (umc_harvest_count < adev->gmc.num_umc) { 1593 adev->gmc.num_umc -= umc_harvest_count; 1594 } 1595 } 1596 1597 union gc_info { 1598 struct gc_info_v1_0 v1; 1599 struct gc_info_v1_1 v1_1; 1600 struct gc_info_v1_2 v1_2; 1601 struct gc_info_v1_3 v1_3; 1602 struct gc_info_v2_0 v2; 1603 struct gc_info_v2_1 v2_1; 1604 }; 1605 1606 static int amdgpu_discovery_get_gfx_info(struct amdgpu_device *adev) 1607 { 1608 uint8_t *discovery_bin = adev->discovery.bin; 1609 struct binary_header *bhdr; 1610 union gc_info *gc_info; 1611 u16 offset; 1612 1613 if (!discovery_bin) { 1614 DRM_ERROR("ip discovery uninitialized\n"); 1615 return -EINVAL; 1616 } 1617 1618 bhdr = (struct binary_header *)discovery_bin; 1619 offset = le16_to_cpu(bhdr->table_list[GC].offset); 1620 1621 if (!offset) 1622 return 0; 1623 1624 gc_info = (union gc_info *)(discovery_bin + offset); 1625 1626 switch (le16_to_cpu(gc_info->v1.header.version_major)) { 1627 case 1: 1628 adev->gfx.config.max_shader_engines = le32_to_cpu(gc_info->v1.gc_num_se); 1629 adev->gfx.config.max_cu_per_sh = 2 * (le32_to_cpu(gc_info->v1.gc_num_wgp0_per_sa) + 1630 le32_to_cpu(gc_info->v1.gc_num_wgp1_per_sa)); 1631 adev->gfx.config.max_sh_per_se = le32_to_cpu(gc_info->v1.gc_num_sa_per_se); 1632 adev->gfx.config.max_backends_per_se = le32_to_cpu(gc_info->v1.gc_num_rb_per_se); 1633 adev->gfx.config.max_texture_channel_caches = le32_to_cpu(gc_info->v1.gc_num_gl2c); 1634 adev->gfx.config.max_gprs = le32_to_cpu(gc_info->v1.gc_num_gprs); 1635 adev->gfx.config.max_gs_threads = le32_to_cpu(gc_info->v1.gc_num_max_gs_thds); 1636 adev->gfx.config.gs_vgt_table_depth = le32_to_cpu(gc_info->v1.gc_gs_table_depth); 1637 adev->gfx.config.gs_prim_buffer_depth = le32_to_cpu(gc_info->v1.gc_gsprim_buff_depth); 1638 adev->gfx.config.double_offchip_lds_buf = le32_to_cpu(gc_info->v1.gc_double_offchip_lds_buffer); 1639 adev->gfx.cu_info.wave_front_size = le32_to_cpu(gc_info->v1.gc_wave_size); 1640 adev->gfx.cu_info.max_waves_per_simd = le32_to_cpu(gc_info->v1.gc_max_waves_per_simd); 1641 adev->gfx.cu_info.max_scratch_slots_per_cu = le32_to_cpu(gc_info->v1.gc_max_scratch_slots_per_cu); 1642 adev->gfx.cu_info.lds_size = le32_to_cpu(gc_info->v1.gc_lds_size); 1643 adev->gfx.config.num_sc_per_sh = le32_to_cpu(gc_info->v1.gc_num_sc_per_se) / 1644 le32_to_cpu(gc_info->v1.gc_num_sa_per_se); 1645 adev->gfx.config.num_packer_per_sc = le32_to_cpu(gc_info->v1.gc_num_packer_per_sc); 1646 if (le16_to_cpu(gc_info->v1.header.version_minor) >= 1) { 1647 adev->gfx.config.gc_num_tcp_per_sa = le32_to_cpu(gc_info->v1_1.gc_num_tcp_per_sa); 1648 adev->gfx.config.gc_num_sdp_interface = le32_to_cpu(gc_info->v1_1.gc_num_sdp_interface); 1649 adev->gfx.config.gc_num_tcps = le32_to_cpu(gc_info->v1_1.gc_num_tcps); 1650 } 1651 if (le16_to_cpu(gc_info->v1.header.version_minor) >= 2) { 1652 adev->gfx.config.gc_num_tcp_per_wpg = le32_to_cpu(gc_info->v1_2.gc_num_tcp_per_wpg); 1653 adev->gfx.config.gc_tcp_l1_size = le32_to_cpu(gc_info->v1_2.gc_tcp_l1_size); 1654 adev->gfx.config.gc_num_sqc_per_wgp = le32_to_cpu(gc_info->v1_2.gc_num_sqc_per_wgp); 1655 adev->gfx.config.gc_l1_instruction_cache_size_per_sqc = le32_to_cpu(gc_info->v1_2.gc_l1_instruction_cache_size_per_sqc); 1656 adev->gfx.config.gc_l1_data_cache_size_per_sqc = le32_to_cpu(gc_info->v1_2.gc_l1_data_cache_size_per_sqc); 1657 adev->gfx.config.gc_gl1c_per_sa = le32_to_cpu(gc_info->v1_2.gc_gl1c_per_sa); 1658 adev->gfx.config.gc_gl1c_size_per_instance = le32_to_cpu(gc_info->v1_2.gc_gl1c_size_per_instance); 1659 adev->gfx.config.gc_gl2c_per_gpu = le32_to_cpu(gc_info->v1_2.gc_gl2c_per_gpu); 1660 } 1661 if (le16_to_cpu(gc_info->v1.header.version_minor) >= 3) { 1662 adev->gfx.config.gc_tcp_size_per_cu = le32_to_cpu(gc_info->v1_3.gc_tcp_size_per_cu); 1663 adev->gfx.config.gc_tcp_cache_line_size = le32_to_cpu(gc_info->v1_3.gc_tcp_cache_line_size); 1664 adev->gfx.config.gc_instruction_cache_size_per_sqc = le32_to_cpu(gc_info->v1_3.gc_instruction_cache_size_per_sqc); 1665 adev->gfx.config.gc_instruction_cache_line_size = le32_to_cpu(gc_info->v1_3.gc_instruction_cache_line_size); 1666 adev->gfx.config.gc_scalar_data_cache_size_per_sqc = le32_to_cpu(gc_info->v1_3.gc_scalar_data_cache_size_per_sqc); 1667 adev->gfx.config.gc_scalar_data_cache_line_size = le32_to_cpu(gc_info->v1_3.gc_scalar_data_cache_line_size); 1668 adev->gfx.config.gc_tcc_size = le32_to_cpu(gc_info->v1_3.gc_tcc_size); 1669 adev->gfx.config.gc_tcc_cache_line_size = le32_to_cpu(gc_info->v1_3.gc_tcc_cache_line_size); 1670 } 1671 break; 1672 case 2: 1673 adev->gfx.config.max_shader_engines = le32_to_cpu(gc_info->v2.gc_num_se); 1674 adev->gfx.config.max_cu_per_sh = le32_to_cpu(gc_info->v2.gc_num_cu_per_sh); 1675 adev->gfx.config.max_sh_per_se = le32_to_cpu(gc_info->v2.gc_num_sh_per_se); 1676 adev->gfx.config.max_backends_per_se = le32_to_cpu(gc_info->v2.gc_num_rb_per_se); 1677 adev->gfx.config.max_texture_channel_caches = le32_to_cpu(gc_info->v2.gc_num_tccs); 1678 adev->gfx.config.max_gprs = le32_to_cpu(gc_info->v2.gc_num_gprs); 1679 adev->gfx.config.max_gs_threads = le32_to_cpu(gc_info->v2.gc_num_max_gs_thds); 1680 adev->gfx.config.gs_vgt_table_depth = le32_to_cpu(gc_info->v2.gc_gs_table_depth); 1681 adev->gfx.config.gs_prim_buffer_depth = le32_to_cpu(gc_info->v2.gc_gsprim_buff_depth); 1682 adev->gfx.config.double_offchip_lds_buf = le32_to_cpu(gc_info->v2.gc_double_offchip_lds_buffer); 1683 adev->gfx.cu_info.wave_front_size = le32_to_cpu(gc_info->v2.gc_wave_size); 1684 adev->gfx.cu_info.max_waves_per_simd = le32_to_cpu(gc_info->v2.gc_max_waves_per_simd); 1685 adev->gfx.cu_info.max_scratch_slots_per_cu = le32_to_cpu(gc_info->v2.gc_max_scratch_slots_per_cu); 1686 adev->gfx.cu_info.lds_size = le32_to_cpu(gc_info->v2.gc_lds_size); 1687 adev->gfx.config.num_sc_per_sh = le32_to_cpu(gc_info->v2.gc_num_sc_per_se) / 1688 le32_to_cpu(gc_info->v2.gc_num_sh_per_se); 1689 adev->gfx.config.num_packer_per_sc = le32_to_cpu(gc_info->v2.gc_num_packer_per_sc); 1690 if (le16_to_cpu(gc_info->v2.header.version_minor) == 1) { 1691 adev->gfx.config.gc_num_tcp_per_sa = le32_to_cpu(gc_info->v2_1.gc_num_tcp_per_sh); 1692 adev->gfx.config.gc_tcp_size_per_cu = le32_to_cpu(gc_info->v2_1.gc_tcp_size_per_cu); 1693 adev->gfx.config.gc_num_sdp_interface = le32_to_cpu(gc_info->v2_1.gc_num_sdp_interface); /* per XCD */ 1694 adev->gfx.config.gc_num_cu_per_sqc = le32_to_cpu(gc_info->v2_1.gc_num_cu_per_sqc); 1695 adev->gfx.config.gc_l1_instruction_cache_size_per_sqc = le32_to_cpu(gc_info->v2_1.gc_instruction_cache_size_per_sqc); 1696 adev->gfx.config.gc_l1_data_cache_size_per_sqc = le32_to_cpu(gc_info->v2_1.gc_scalar_data_cache_size_per_sqc); 1697 adev->gfx.config.gc_tcc_size = le32_to_cpu(gc_info->v2_1.gc_tcc_size); /* per XCD */ 1698 } 1699 break; 1700 default: 1701 dev_err(adev->dev, 1702 "Unhandled GC info table %d.%d\n", 1703 le16_to_cpu(gc_info->v1.header.version_major), 1704 le16_to_cpu(gc_info->v1.header.version_minor)); 1705 return -EINVAL; 1706 } 1707 return 0; 1708 } 1709 1710 union mall_info { 1711 struct mall_info_v1_0 v1; 1712 struct mall_info_v2_0 v2; 1713 }; 1714 1715 static int amdgpu_discovery_get_mall_info(struct amdgpu_device *adev) 1716 { 1717 uint8_t *discovery_bin = adev->discovery.bin; 1718 struct binary_header *bhdr; 1719 union mall_info *mall_info; 1720 u32 u, mall_size_per_umc, m_s_present, half_use; 1721 u64 mall_size; 1722 u16 offset; 1723 1724 if (!discovery_bin) { 1725 DRM_ERROR("ip discovery uninitialized\n"); 1726 return -EINVAL; 1727 } 1728 1729 bhdr = (struct binary_header *)discovery_bin; 1730 offset = le16_to_cpu(bhdr->table_list[MALL_INFO].offset); 1731 1732 if (!offset) 1733 return 0; 1734 1735 mall_info = (union mall_info *)(discovery_bin + offset); 1736 1737 switch (le16_to_cpu(mall_info->v1.header.version_major)) { 1738 case 1: 1739 mall_size = 0; 1740 mall_size_per_umc = le32_to_cpu(mall_info->v1.mall_size_per_m); 1741 m_s_present = le32_to_cpu(mall_info->v1.m_s_present); 1742 half_use = le32_to_cpu(mall_info->v1.m_half_use); 1743 for (u = 0; u < adev->gmc.num_umc; u++) { 1744 if (m_s_present & (1 << u)) 1745 mall_size += mall_size_per_umc * 2; 1746 else if (half_use & (1 << u)) 1747 mall_size += mall_size_per_umc / 2; 1748 else 1749 mall_size += mall_size_per_umc; 1750 } 1751 adev->gmc.mall_size = mall_size; 1752 adev->gmc.m_half_use = half_use; 1753 break; 1754 case 2: 1755 mall_size_per_umc = le32_to_cpu(mall_info->v2.mall_size_per_umc); 1756 adev->gmc.mall_size = (uint64_t)mall_size_per_umc * adev->gmc.num_umc; 1757 break; 1758 default: 1759 dev_err(adev->dev, 1760 "Unhandled MALL info table %d.%d\n", 1761 le16_to_cpu(mall_info->v1.header.version_major), 1762 le16_to_cpu(mall_info->v1.header.version_minor)); 1763 return -EINVAL; 1764 } 1765 return 0; 1766 } 1767 1768 union vcn_info { 1769 struct vcn_info_v1_0 v1; 1770 }; 1771 1772 static int amdgpu_discovery_get_vcn_info(struct amdgpu_device *adev) 1773 { 1774 uint8_t *discovery_bin = adev->discovery.bin; 1775 struct binary_header *bhdr; 1776 union vcn_info *vcn_info; 1777 u16 offset; 1778 int v; 1779 1780 if (!discovery_bin) { 1781 DRM_ERROR("ip discovery uninitialized\n"); 1782 return -EINVAL; 1783 } 1784 1785 /* num_vcn_inst is currently limited to AMDGPU_MAX_VCN_INSTANCES 1786 * which is smaller than VCN_INFO_TABLE_MAX_NUM_INSTANCES 1787 * but that may change in the future with new GPUs so keep this 1788 * check for defensive purposes. 1789 */ 1790 if (adev->vcn.num_vcn_inst > VCN_INFO_TABLE_MAX_NUM_INSTANCES) { 1791 dev_err(adev->dev, "invalid vcn instances\n"); 1792 return -EINVAL; 1793 } 1794 1795 bhdr = (struct binary_header *)discovery_bin; 1796 offset = le16_to_cpu(bhdr->table_list[VCN_INFO].offset); 1797 1798 if (!offset) 1799 return 0; 1800 1801 vcn_info = (union vcn_info *)(discovery_bin + offset); 1802 1803 switch (le16_to_cpu(vcn_info->v1.header.version_major)) { 1804 case 1: 1805 /* num_vcn_inst is currently limited to AMDGPU_MAX_VCN_INSTANCES 1806 * so this won't overflow. 1807 */ 1808 for (v = 0; v < adev->vcn.num_vcn_inst; v++) { 1809 adev->vcn.inst[v].vcn_codec_disable_mask = 1810 le32_to_cpu(vcn_info->v1.instance_info[v].fuse_data.all_bits); 1811 } 1812 break; 1813 default: 1814 dev_err(adev->dev, 1815 "Unhandled VCN info table %d.%d\n", 1816 le16_to_cpu(vcn_info->v1.header.version_major), 1817 le16_to_cpu(vcn_info->v1.header.version_minor)); 1818 return -EINVAL; 1819 } 1820 return 0; 1821 } 1822 1823 union nps_info { 1824 struct nps_info_v1_0 v1; 1825 }; 1826 1827 static int amdgpu_discovery_refresh_nps_info(struct amdgpu_device *adev, 1828 union nps_info *nps_data) 1829 { 1830 uint64_t vram_size, pos, offset; 1831 struct nps_info_header *nhdr; 1832 struct binary_header bhdr; 1833 uint16_t checksum; 1834 1835 vram_size = (uint64_t)RREG32(mmRCC_CONFIG_MEMSIZE) << 20; 1836 pos = vram_size - DISCOVERY_TMR_OFFSET; 1837 amdgpu_device_vram_access(adev, pos, &bhdr, sizeof(bhdr), false); 1838 1839 offset = le16_to_cpu(bhdr.table_list[NPS_INFO].offset); 1840 checksum = le16_to_cpu(bhdr.table_list[NPS_INFO].checksum); 1841 1842 amdgpu_device_vram_access(adev, (pos + offset), nps_data, 1843 sizeof(*nps_data), false); 1844 1845 nhdr = (struct nps_info_header *)(nps_data); 1846 if (!amdgpu_discovery_verify_checksum((uint8_t *)nps_data, 1847 le32_to_cpu(nhdr->size_bytes), 1848 checksum)) { 1849 dev_err(adev->dev, "nps data refresh, checksum mismatch\n"); 1850 return -EINVAL; 1851 } 1852 1853 return 0; 1854 } 1855 1856 int amdgpu_discovery_get_nps_info(struct amdgpu_device *adev, 1857 uint32_t *nps_type, 1858 struct amdgpu_gmc_memrange **ranges, 1859 int *range_cnt, bool refresh) 1860 { 1861 uint8_t *discovery_bin = adev->discovery.bin; 1862 struct amdgpu_gmc_memrange *mem_ranges; 1863 struct binary_header *bhdr; 1864 union nps_info *nps_info; 1865 union nps_info nps_data; 1866 u16 offset; 1867 int i, r; 1868 1869 if (!nps_type || !range_cnt || !ranges) 1870 return -EINVAL; 1871 1872 if (refresh) { 1873 r = amdgpu_discovery_refresh_nps_info(adev, &nps_data); 1874 if (r) 1875 return r; 1876 nps_info = &nps_data; 1877 } else { 1878 if (!discovery_bin) { 1879 dev_err(adev->dev, 1880 "fetch mem range failed, ip discovery uninitialized\n"); 1881 return -EINVAL; 1882 } 1883 1884 bhdr = (struct binary_header *)discovery_bin; 1885 offset = le16_to_cpu(bhdr->table_list[NPS_INFO].offset); 1886 1887 if (!offset) 1888 return -ENOENT; 1889 1890 /* If verification fails, return as if NPS table doesn't exist */ 1891 if (amdgpu_discovery_verify_npsinfo(adev, bhdr)) 1892 return -ENOENT; 1893 1894 nps_info = (union nps_info *)(discovery_bin + offset); 1895 } 1896 1897 switch (le16_to_cpu(nps_info->v1.header.version_major)) { 1898 case 1: 1899 mem_ranges = kvcalloc(nps_info->v1.count, 1900 sizeof(*mem_ranges), 1901 GFP_KERNEL); 1902 if (!mem_ranges) 1903 return -ENOMEM; 1904 *nps_type = nps_info->v1.nps_type; 1905 *range_cnt = nps_info->v1.count; 1906 for (i = 0; i < *range_cnt; i++) { 1907 mem_ranges[i].base_address = 1908 nps_info->v1.instance_info[i].base_address; 1909 mem_ranges[i].limit_address = 1910 nps_info->v1.instance_info[i].limit_address; 1911 mem_ranges[i].nid_mask = -1; 1912 mem_ranges[i].flags = 0; 1913 } 1914 *ranges = mem_ranges; 1915 break; 1916 default: 1917 dev_err(adev->dev, "Unhandled NPS info table %d.%d\n", 1918 le16_to_cpu(nps_info->v1.header.version_major), 1919 le16_to_cpu(nps_info->v1.header.version_minor)); 1920 return -EINVAL; 1921 } 1922 1923 return 0; 1924 } 1925 1926 static int amdgpu_discovery_set_common_ip_blocks(struct amdgpu_device *adev) 1927 { 1928 /* what IP to use for this? */ 1929 switch (amdgpu_ip_version(adev, GC_HWIP, 0)) { 1930 case IP_VERSION(9, 0, 1): 1931 case IP_VERSION(9, 1, 0): 1932 case IP_VERSION(9, 2, 1): 1933 case IP_VERSION(9, 2, 2): 1934 case IP_VERSION(9, 3, 0): 1935 case IP_VERSION(9, 4, 0): 1936 case IP_VERSION(9, 4, 1): 1937 case IP_VERSION(9, 4, 2): 1938 case IP_VERSION(9, 4, 3): 1939 case IP_VERSION(9, 4, 4): 1940 case IP_VERSION(9, 5, 0): 1941 amdgpu_device_ip_block_add(adev, &vega10_common_ip_block); 1942 break; 1943 case IP_VERSION(10, 1, 10): 1944 case IP_VERSION(10, 1, 1): 1945 case IP_VERSION(10, 1, 2): 1946 case IP_VERSION(10, 1, 3): 1947 case IP_VERSION(10, 1, 4): 1948 case IP_VERSION(10, 3, 0): 1949 case IP_VERSION(10, 3, 1): 1950 case IP_VERSION(10, 3, 2): 1951 case IP_VERSION(10, 3, 3): 1952 case IP_VERSION(10, 3, 4): 1953 case IP_VERSION(10, 3, 5): 1954 case IP_VERSION(10, 3, 6): 1955 case IP_VERSION(10, 3, 7): 1956 amdgpu_device_ip_block_add(adev, &nv_common_ip_block); 1957 break; 1958 case IP_VERSION(11, 0, 0): 1959 case IP_VERSION(11, 0, 1): 1960 case IP_VERSION(11, 0, 2): 1961 case IP_VERSION(11, 0, 3): 1962 case IP_VERSION(11, 0, 4): 1963 case IP_VERSION(11, 5, 0): 1964 case IP_VERSION(11, 5, 1): 1965 case IP_VERSION(11, 5, 2): 1966 case IP_VERSION(11, 5, 3): 1967 amdgpu_device_ip_block_add(adev, &soc21_common_ip_block); 1968 break; 1969 case IP_VERSION(12, 0, 0): 1970 case IP_VERSION(12, 0, 1): 1971 amdgpu_device_ip_block_add(adev, &soc24_common_ip_block); 1972 break; 1973 default: 1974 dev_err(adev->dev, 1975 "Failed to add common ip block(GC_HWIP:0x%x)\n", 1976 amdgpu_ip_version(adev, GC_HWIP, 0)); 1977 return -EINVAL; 1978 } 1979 return 0; 1980 } 1981 1982 static int amdgpu_discovery_set_gmc_ip_blocks(struct amdgpu_device *adev) 1983 { 1984 /* use GC or MMHUB IP version */ 1985 switch (amdgpu_ip_version(adev, GC_HWIP, 0)) { 1986 case IP_VERSION(9, 0, 1): 1987 case IP_VERSION(9, 1, 0): 1988 case IP_VERSION(9, 2, 1): 1989 case IP_VERSION(9, 2, 2): 1990 case IP_VERSION(9, 3, 0): 1991 case IP_VERSION(9, 4, 0): 1992 case IP_VERSION(9, 4, 1): 1993 case IP_VERSION(9, 4, 2): 1994 case IP_VERSION(9, 4, 3): 1995 case IP_VERSION(9, 4, 4): 1996 case IP_VERSION(9, 5, 0): 1997 amdgpu_device_ip_block_add(adev, &gmc_v9_0_ip_block); 1998 break; 1999 case IP_VERSION(10, 1, 10): 2000 case IP_VERSION(10, 1, 1): 2001 case IP_VERSION(10, 1, 2): 2002 case IP_VERSION(10, 1, 3): 2003 case IP_VERSION(10, 1, 4): 2004 case IP_VERSION(10, 3, 0): 2005 case IP_VERSION(10, 3, 1): 2006 case IP_VERSION(10, 3, 2): 2007 case IP_VERSION(10, 3, 3): 2008 case IP_VERSION(10, 3, 4): 2009 case IP_VERSION(10, 3, 5): 2010 case IP_VERSION(10, 3, 6): 2011 case IP_VERSION(10, 3, 7): 2012 amdgpu_device_ip_block_add(adev, &gmc_v10_0_ip_block); 2013 break; 2014 case IP_VERSION(11, 0, 0): 2015 case IP_VERSION(11, 0, 1): 2016 case IP_VERSION(11, 0, 2): 2017 case IP_VERSION(11, 0, 3): 2018 case IP_VERSION(11, 0, 4): 2019 case IP_VERSION(11, 5, 0): 2020 case IP_VERSION(11, 5, 1): 2021 case IP_VERSION(11, 5, 2): 2022 case IP_VERSION(11, 5, 3): 2023 amdgpu_device_ip_block_add(adev, &gmc_v11_0_ip_block); 2024 break; 2025 case IP_VERSION(12, 0, 0): 2026 case IP_VERSION(12, 0, 1): 2027 amdgpu_device_ip_block_add(adev, &gmc_v12_0_ip_block); 2028 break; 2029 default: 2030 dev_err(adev->dev, "Failed to add gmc ip block(GC_HWIP:0x%x)\n", 2031 amdgpu_ip_version(adev, GC_HWIP, 0)); 2032 return -EINVAL; 2033 } 2034 return 0; 2035 } 2036 2037 static int amdgpu_discovery_set_ih_ip_blocks(struct amdgpu_device *adev) 2038 { 2039 switch (amdgpu_ip_version(adev, OSSSYS_HWIP, 0)) { 2040 case IP_VERSION(4, 0, 0): 2041 case IP_VERSION(4, 0, 1): 2042 case IP_VERSION(4, 1, 0): 2043 case IP_VERSION(4, 1, 1): 2044 case IP_VERSION(4, 3, 0): 2045 amdgpu_device_ip_block_add(adev, &vega10_ih_ip_block); 2046 break; 2047 case IP_VERSION(4, 2, 0): 2048 case IP_VERSION(4, 2, 1): 2049 case IP_VERSION(4, 4, 0): 2050 case IP_VERSION(4, 4, 2): 2051 case IP_VERSION(4, 4, 5): 2052 amdgpu_device_ip_block_add(adev, &vega20_ih_ip_block); 2053 break; 2054 case IP_VERSION(5, 0, 0): 2055 case IP_VERSION(5, 0, 1): 2056 case IP_VERSION(5, 0, 2): 2057 case IP_VERSION(5, 0, 3): 2058 case IP_VERSION(5, 2, 0): 2059 case IP_VERSION(5, 2, 1): 2060 amdgpu_device_ip_block_add(adev, &navi10_ih_ip_block); 2061 break; 2062 case IP_VERSION(6, 0, 0): 2063 case IP_VERSION(6, 0, 1): 2064 case IP_VERSION(6, 0, 2): 2065 amdgpu_device_ip_block_add(adev, &ih_v6_0_ip_block); 2066 break; 2067 case IP_VERSION(6, 1, 0): 2068 amdgpu_device_ip_block_add(adev, &ih_v6_1_ip_block); 2069 break; 2070 case IP_VERSION(7, 0, 0): 2071 amdgpu_device_ip_block_add(adev, &ih_v7_0_ip_block); 2072 break; 2073 default: 2074 dev_err(adev->dev, 2075 "Failed to add ih ip block(OSSSYS_HWIP:0x%x)\n", 2076 amdgpu_ip_version(adev, OSSSYS_HWIP, 0)); 2077 return -EINVAL; 2078 } 2079 return 0; 2080 } 2081 2082 static int amdgpu_discovery_set_psp_ip_blocks(struct amdgpu_device *adev) 2083 { 2084 switch (amdgpu_ip_version(adev, MP0_HWIP, 0)) { 2085 case IP_VERSION(9, 0, 0): 2086 amdgpu_device_ip_block_add(adev, &psp_v3_1_ip_block); 2087 break; 2088 case IP_VERSION(10, 0, 0): 2089 case IP_VERSION(10, 0, 1): 2090 amdgpu_device_ip_block_add(adev, &psp_v10_0_ip_block); 2091 break; 2092 case IP_VERSION(11, 0, 0): 2093 case IP_VERSION(11, 0, 2): 2094 case IP_VERSION(11, 0, 4): 2095 case IP_VERSION(11, 0, 5): 2096 case IP_VERSION(11, 0, 9): 2097 case IP_VERSION(11, 0, 7): 2098 case IP_VERSION(11, 0, 11): 2099 case IP_VERSION(11, 0, 12): 2100 case IP_VERSION(11, 0, 13): 2101 case IP_VERSION(11, 5, 0): 2102 case IP_VERSION(11, 5, 2): 2103 amdgpu_device_ip_block_add(adev, &psp_v11_0_ip_block); 2104 break; 2105 case IP_VERSION(11, 0, 8): 2106 amdgpu_device_ip_block_add(adev, &psp_v11_0_8_ip_block); 2107 break; 2108 case IP_VERSION(11, 0, 3): 2109 case IP_VERSION(12, 0, 1): 2110 amdgpu_device_ip_block_add(adev, &psp_v12_0_ip_block); 2111 break; 2112 case IP_VERSION(13, 0, 0): 2113 case IP_VERSION(13, 0, 1): 2114 case IP_VERSION(13, 0, 2): 2115 case IP_VERSION(13, 0, 3): 2116 case IP_VERSION(13, 0, 5): 2117 case IP_VERSION(13, 0, 6): 2118 case IP_VERSION(13, 0, 7): 2119 case IP_VERSION(13, 0, 8): 2120 case IP_VERSION(13, 0, 10): 2121 case IP_VERSION(13, 0, 11): 2122 case IP_VERSION(13, 0, 12): 2123 case IP_VERSION(13, 0, 14): 2124 case IP_VERSION(14, 0, 0): 2125 case IP_VERSION(14, 0, 1): 2126 case IP_VERSION(14, 0, 4): 2127 amdgpu_device_ip_block_add(adev, &psp_v13_0_ip_block); 2128 break; 2129 case IP_VERSION(13, 0, 4): 2130 amdgpu_device_ip_block_add(adev, &psp_v13_0_4_ip_block); 2131 break; 2132 case IP_VERSION(14, 0, 2): 2133 case IP_VERSION(14, 0, 3): 2134 case IP_VERSION(14, 0, 5): 2135 amdgpu_device_ip_block_add(adev, &psp_v14_0_ip_block); 2136 break; 2137 default: 2138 dev_err(adev->dev, 2139 "Failed to add psp ip block(MP0_HWIP:0x%x)\n", 2140 amdgpu_ip_version(adev, MP0_HWIP, 0)); 2141 return -EINVAL; 2142 } 2143 return 0; 2144 } 2145 2146 static int amdgpu_discovery_set_smu_ip_blocks(struct amdgpu_device *adev) 2147 { 2148 switch (amdgpu_ip_version(adev, MP1_HWIP, 0)) { 2149 case IP_VERSION(9, 0, 0): 2150 case IP_VERSION(10, 0, 0): 2151 case IP_VERSION(10, 0, 1): 2152 case IP_VERSION(11, 0, 2): 2153 if (adev->asic_type == CHIP_ARCTURUS) 2154 amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block); 2155 else 2156 amdgpu_device_ip_block_add(adev, &pp_smu_ip_block); 2157 break; 2158 case IP_VERSION(11, 0, 0): 2159 case IP_VERSION(11, 0, 5): 2160 case IP_VERSION(11, 0, 9): 2161 case IP_VERSION(11, 0, 7): 2162 case IP_VERSION(11, 0, 11): 2163 case IP_VERSION(11, 0, 12): 2164 case IP_VERSION(11, 0, 13): 2165 case IP_VERSION(11, 5, 0): 2166 case IP_VERSION(11, 5, 2): 2167 amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block); 2168 break; 2169 case IP_VERSION(11, 0, 8): 2170 if (adev->apu_flags & AMD_APU_IS_CYAN_SKILLFISH2) 2171 amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block); 2172 break; 2173 case IP_VERSION(12, 0, 0): 2174 case IP_VERSION(12, 0, 1): 2175 amdgpu_device_ip_block_add(adev, &smu_v12_0_ip_block); 2176 break; 2177 case IP_VERSION(13, 0, 0): 2178 case IP_VERSION(13, 0, 1): 2179 case IP_VERSION(13, 0, 2): 2180 case IP_VERSION(13, 0, 3): 2181 case IP_VERSION(13, 0, 4): 2182 case IP_VERSION(13, 0, 5): 2183 case IP_VERSION(13, 0, 6): 2184 case IP_VERSION(13, 0, 7): 2185 case IP_VERSION(13, 0, 8): 2186 case IP_VERSION(13, 0, 10): 2187 case IP_VERSION(13, 0, 11): 2188 case IP_VERSION(13, 0, 14): 2189 case IP_VERSION(13, 0, 12): 2190 amdgpu_device_ip_block_add(adev, &smu_v13_0_ip_block); 2191 break; 2192 case IP_VERSION(14, 0, 0): 2193 case IP_VERSION(14, 0, 1): 2194 case IP_VERSION(14, 0, 2): 2195 case IP_VERSION(14, 0, 3): 2196 case IP_VERSION(14, 0, 4): 2197 case IP_VERSION(14, 0, 5): 2198 amdgpu_device_ip_block_add(adev, &smu_v14_0_ip_block); 2199 break; 2200 default: 2201 dev_err(adev->dev, 2202 "Failed to add smu ip block(MP1_HWIP:0x%x)\n", 2203 amdgpu_ip_version(adev, MP1_HWIP, 0)); 2204 return -EINVAL; 2205 } 2206 return 0; 2207 } 2208 2209 #if defined(CONFIG_DRM_AMD_DC) 2210 static void amdgpu_discovery_set_sriov_display(struct amdgpu_device *adev) 2211 { 2212 amdgpu_device_set_sriov_virtual_display(adev); 2213 amdgpu_device_ip_block_add(adev, &amdgpu_vkms_ip_block); 2214 } 2215 #endif 2216 2217 static int amdgpu_discovery_set_display_ip_blocks(struct amdgpu_device *adev) 2218 { 2219 if (adev->enable_virtual_display) { 2220 amdgpu_device_ip_block_add(adev, &amdgpu_vkms_ip_block); 2221 return 0; 2222 } 2223 2224 if (!amdgpu_device_has_dc_support(adev)) 2225 return 0; 2226 2227 #if defined(CONFIG_DRM_AMD_DC) 2228 if (amdgpu_ip_version(adev, DCE_HWIP, 0)) { 2229 switch (amdgpu_ip_version(adev, DCE_HWIP, 0)) { 2230 case IP_VERSION(1, 0, 0): 2231 case IP_VERSION(1, 0, 1): 2232 case IP_VERSION(2, 0, 2): 2233 case IP_VERSION(2, 0, 0): 2234 case IP_VERSION(2, 0, 3): 2235 case IP_VERSION(2, 1, 0): 2236 case IP_VERSION(3, 0, 0): 2237 case IP_VERSION(3, 0, 2): 2238 case IP_VERSION(3, 0, 3): 2239 case IP_VERSION(3, 0, 1): 2240 case IP_VERSION(3, 1, 2): 2241 case IP_VERSION(3, 1, 3): 2242 case IP_VERSION(3, 1, 4): 2243 case IP_VERSION(3, 1, 5): 2244 case IP_VERSION(3, 1, 6): 2245 case IP_VERSION(3, 2, 0): 2246 case IP_VERSION(3, 2, 1): 2247 case IP_VERSION(3, 5, 0): 2248 case IP_VERSION(3, 5, 1): 2249 case IP_VERSION(3, 6, 0): 2250 case IP_VERSION(4, 1, 0): 2251 /* TODO: Fix IP version. DC code expects version 4.0.1 */ 2252 if (adev->ip_versions[DCE_HWIP][0] == IP_VERSION(4, 1, 0)) 2253 adev->ip_versions[DCE_HWIP][0] = IP_VERSION(4, 0, 1); 2254 2255 if (amdgpu_sriov_vf(adev)) 2256 amdgpu_discovery_set_sriov_display(adev); 2257 else 2258 amdgpu_device_ip_block_add(adev, &dm_ip_block); 2259 break; 2260 default: 2261 dev_err(adev->dev, 2262 "Failed to add dm ip block(DCE_HWIP:0x%x)\n", 2263 amdgpu_ip_version(adev, DCE_HWIP, 0)); 2264 return -EINVAL; 2265 } 2266 } else if (amdgpu_ip_version(adev, DCI_HWIP, 0)) { 2267 switch (amdgpu_ip_version(adev, DCI_HWIP, 0)) { 2268 case IP_VERSION(12, 0, 0): 2269 case IP_VERSION(12, 0, 1): 2270 case IP_VERSION(12, 1, 0): 2271 if (amdgpu_sriov_vf(adev)) 2272 amdgpu_discovery_set_sriov_display(adev); 2273 else 2274 amdgpu_device_ip_block_add(adev, &dm_ip_block); 2275 break; 2276 default: 2277 dev_err(adev->dev, 2278 "Failed to add dm ip block(DCI_HWIP:0x%x)\n", 2279 amdgpu_ip_version(adev, DCI_HWIP, 0)); 2280 return -EINVAL; 2281 } 2282 } 2283 #endif 2284 return 0; 2285 } 2286 2287 static int amdgpu_discovery_set_gc_ip_blocks(struct amdgpu_device *adev) 2288 { 2289 switch (amdgpu_ip_version(adev, GC_HWIP, 0)) { 2290 case IP_VERSION(9, 0, 1): 2291 case IP_VERSION(9, 1, 0): 2292 case IP_VERSION(9, 2, 1): 2293 case IP_VERSION(9, 2, 2): 2294 case IP_VERSION(9, 3, 0): 2295 case IP_VERSION(9, 4, 0): 2296 case IP_VERSION(9, 4, 1): 2297 case IP_VERSION(9, 4, 2): 2298 amdgpu_device_ip_block_add(adev, &gfx_v9_0_ip_block); 2299 break; 2300 case IP_VERSION(9, 4, 3): 2301 case IP_VERSION(9, 4, 4): 2302 case IP_VERSION(9, 5, 0): 2303 amdgpu_device_ip_block_add(adev, &gfx_v9_4_3_ip_block); 2304 break; 2305 case IP_VERSION(10, 1, 10): 2306 case IP_VERSION(10, 1, 2): 2307 case IP_VERSION(10, 1, 1): 2308 case IP_VERSION(10, 1, 3): 2309 case IP_VERSION(10, 1, 4): 2310 case IP_VERSION(10, 3, 0): 2311 case IP_VERSION(10, 3, 2): 2312 case IP_VERSION(10, 3, 1): 2313 case IP_VERSION(10, 3, 4): 2314 case IP_VERSION(10, 3, 5): 2315 case IP_VERSION(10, 3, 6): 2316 case IP_VERSION(10, 3, 3): 2317 case IP_VERSION(10, 3, 7): 2318 amdgpu_device_ip_block_add(adev, &gfx_v10_0_ip_block); 2319 break; 2320 case IP_VERSION(11, 0, 0): 2321 case IP_VERSION(11, 0, 1): 2322 case IP_VERSION(11, 0, 2): 2323 case IP_VERSION(11, 0, 3): 2324 case IP_VERSION(11, 0, 4): 2325 case IP_VERSION(11, 5, 0): 2326 case IP_VERSION(11, 5, 1): 2327 case IP_VERSION(11, 5, 2): 2328 case IP_VERSION(11, 5, 3): 2329 amdgpu_device_ip_block_add(adev, &gfx_v11_0_ip_block); 2330 break; 2331 case IP_VERSION(12, 0, 0): 2332 case IP_VERSION(12, 0, 1): 2333 amdgpu_device_ip_block_add(adev, &gfx_v12_0_ip_block); 2334 break; 2335 default: 2336 dev_err(adev->dev, "Failed to add gfx ip block(GC_HWIP:0x%x)\n", 2337 amdgpu_ip_version(adev, GC_HWIP, 0)); 2338 return -EINVAL; 2339 } 2340 return 0; 2341 } 2342 2343 static int amdgpu_discovery_set_sdma_ip_blocks(struct amdgpu_device *adev) 2344 { 2345 switch (amdgpu_ip_version(adev, SDMA0_HWIP, 0)) { 2346 case IP_VERSION(4, 0, 0): 2347 case IP_VERSION(4, 0, 1): 2348 case IP_VERSION(4, 1, 0): 2349 case IP_VERSION(4, 1, 1): 2350 case IP_VERSION(4, 1, 2): 2351 case IP_VERSION(4, 2, 0): 2352 case IP_VERSION(4, 2, 2): 2353 case IP_VERSION(4, 4, 0): 2354 amdgpu_device_ip_block_add(adev, &sdma_v4_0_ip_block); 2355 break; 2356 case IP_VERSION(4, 4, 2): 2357 case IP_VERSION(4, 4, 5): 2358 case IP_VERSION(4, 4, 4): 2359 amdgpu_device_ip_block_add(adev, &sdma_v4_4_2_ip_block); 2360 break; 2361 case IP_VERSION(5, 0, 0): 2362 case IP_VERSION(5, 0, 1): 2363 case IP_VERSION(5, 0, 2): 2364 case IP_VERSION(5, 0, 5): 2365 amdgpu_device_ip_block_add(adev, &sdma_v5_0_ip_block); 2366 break; 2367 case IP_VERSION(5, 2, 0): 2368 case IP_VERSION(5, 2, 2): 2369 case IP_VERSION(5, 2, 4): 2370 case IP_VERSION(5, 2, 5): 2371 case IP_VERSION(5, 2, 6): 2372 case IP_VERSION(5, 2, 3): 2373 case IP_VERSION(5, 2, 1): 2374 case IP_VERSION(5, 2, 7): 2375 amdgpu_device_ip_block_add(adev, &sdma_v5_2_ip_block); 2376 break; 2377 case IP_VERSION(6, 0, 0): 2378 case IP_VERSION(6, 0, 1): 2379 case IP_VERSION(6, 0, 2): 2380 case IP_VERSION(6, 0, 3): 2381 case IP_VERSION(6, 1, 0): 2382 case IP_VERSION(6, 1, 1): 2383 case IP_VERSION(6, 1, 2): 2384 case IP_VERSION(6, 1, 3): 2385 amdgpu_device_ip_block_add(adev, &sdma_v6_0_ip_block); 2386 break; 2387 case IP_VERSION(7, 0, 0): 2388 case IP_VERSION(7, 0, 1): 2389 amdgpu_device_ip_block_add(adev, &sdma_v7_0_ip_block); 2390 break; 2391 default: 2392 dev_err(adev->dev, 2393 "Failed to add sdma ip block(SDMA0_HWIP:0x%x)\n", 2394 amdgpu_ip_version(adev, SDMA0_HWIP, 0)); 2395 return -EINVAL; 2396 } 2397 2398 return 0; 2399 } 2400 2401 static int amdgpu_discovery_set_ras_ip_blocks(struct amdgpu_device *adev) 2402 { 2403 switch (amdgpu_ip_version(adev, MP0_HWIP, 0)) { 2404 case IP_VERSION(13, 0, 6): 2405 case IP_VERSION(13, 0, 12): 2406 case IP_VERSION(13, 0, 14): 2407 amdgpu_device_ip_block_add(adev, &ras_v1_0_ip_block); 2408 break; 2409 default: 2410 break; 2411 } 2412 return 0; 2413 } 2414 2415 static int amdgpu_discovery_set_mm_ip_blocks(struct amdgpu_device *adev) 2416 { 2417 if (amdgpu_ip_version(adev, VCE_HWIP, 0)) { 2418 switch (amdgpu_ip_version(adev, UVD_HWIP, 0)) { 2419 case IP_VERSION(7, 0, 0): 2420 case IP_VERSION(7, 2, 0): 2421 /* UVD is not supported on vega20 SR-IOV */ 2422 if (!(adev->asic_type == CHIP_VEGA20 && amdgpu_sriov_vf(adev))) 2423 amdgpu_device_ip_block_add(adev, &uvd_v7_0_ip_block); 2424 break; 2425 default: 2426 dev_err(adev->dev, 2427 "Failed to add uvd v7 ip block(UVD_HWIP:0x%x)\n", 2428 amdgpu_ip_version(adev, UVD_HWIP, 0)); 2429 return -EINVAL; 2430 } 2431 switch (amdgpu_ip_version(adev, VCE_HWIP, 0)) { 2432 case IP_VERSION(4, 0, 0): 2433 case IP_VERSION(4, 1, 0): 2434 /* VCE is not supported on vega20 SR-IOV */ 2435 if (!(adev->asic_type == CHIP_VEGA20 && amdgpu_sriov_vf(adev))) 2436 amdgpu_device_ip_block_add(adev, &vce_v4_0_ip_block); 2437 break; 2438 default: 2439 dev_err(adev->dev, 2440 "Failed to add VCE v4 ip block(VCE_HWIP:0x%x)\n", 2441 amdgpu_ip_version(adev, VCE_HWIP, 0)); 2442 return -EINVAL; 2443 } 2444 } else { 2445 switch (amdgpu_ip_version(adev, UVD_HWIP, 0)) { 2446 case IP_VERSION(1, 0, 0): 2447 case IP_VERSION(1, 0, 1): 2448 amdgpu_device_ip_block_add(adev, &vcn_v1_0_ip_block); 2449 break; 2450 case IP_VERSION(2, 0, 0): 2451 case IP_VERSION(2, 0, 2): 2452 case IP_VERSION(2, 2, 0): 2453 amdgpu_device_ip_block_add(adev, &vcn_v2_0_ip_block); 2454 if (!amdgpu_sriov_vf(adev)) 2455 amdgpu_device_ip_block_add(adev, &jpeg_v2_0_ip_block); 2456 break; 2457 case IP_VERSION(2, 0, 3): 2458 break; 2459 case IP_VERSION(2, 5, 0): 2460 amdgpu_device_ip_block_add(adev, &vcn_v2_5_ip_block); 2461 amdgpu_device_ip_block_add(adev, &jpeg_v2_5_ip_block); 2462 break; 2463 case IP_VERSION(2, 6, 0): 2464 amdgpu_device_ip_block_add(adev, &vcn_v2_6_ip_block); 2465 amdgpu_device_ip_block_add(adev, &jpeg_v2_6_ip_block); 2466 break; 2467 case IP_VERSION(3, 0, 0): 2468 case IP_VERSION(3, 0, 16): 2469 case IP_VERSION(3, 1, 1): 2470 case IP_VERSION(3, 1, 2): 2471 case IP_VERSION(3, 0, 2): 2472 amdgpu_device_ip_block_add(adev, &vcn_v3_0_ip_block); 2473 if (!amdgpu_sriov_vf(adev)) 2474 amdgpu_device_ip_block_add(adev, &jpeg_v3_0_ip_block); 2475 break; 2476 case IP_VERSION(3, 0, 33): 2477 amdgpu_device_ip_block_add(adev, &vcn_v3_0_ip_block); 2478 break; 2479 case IP_VERSION(4, 0, 0): 2480 case IP_VERSION(4, 0, 2): 2481 case IP_VERSION(4, 0, 4): 2482 amdgpu_device_ip_block_add(adev, &vcn_v4_0_ip_block); 2483 amdgpu_device_ip_block_add(adev, &jpeg_v4_0_ip_block); 2484 break; 2485 case IP_VERSION(4, 0, 3): 2486 amdgpu_device_ip_block_add(adev, &vcn_v4_0_3_ip_block); 2487 amdgpu_device_ip_block_add(adev, &jpeg_v4_0_3_ip_block); 2488 break; 2489 case IP_VERSION(4, 0, 5): 2490 case IP_VERSION(4, 0, 6): 2491 amdgpu_device_ip_block_add(adev, &vcn_v4_0_5_ip_block); 2492 amdgpu_device_ip_block_add(adev, &jpeg_v4_0_5_ip_block); 2493 break; 2494 case IP_VERSION(5, 0, 0): 2495 amdgpu_device_ip_block_add(adev, &vcn_v5_0_0_ip_block); 2496 amdgpu_device_ip_block_add(adev, &jpeg_v5_0_0_ip_block); 2497 break; 2498 case IP_VERSION(5, 0, 1): 2499 amdgpu_device_ip_block_add(adev, &vcn_v5_0_1_ip_block); 2500 amdgpu_device_ip_block_add(adev, &jpeg_v5_0_1_ip_block); 2501 break; 2502 default: 2503 dev_err(adev->dev, 2504 "Failed to add vcn/jpeg ip block(UVD_HWIP:0x%x)\n", 2505 amdgpu_ip_version(adev, UVD_HWIP, 0)); 2506 return -EINVAL; 2507 } 2508 } 2509 return 0; 2510 } 2511 2512 static int amdgpu_discovery_set_mes_ip_blocks(struct amdgpu_device *adev) 2513 { 2514 switch (amdgpu_ip_version(adev, GC_HWIP, 0)) { 2515 case IP_VERSION(11, 0, 0): 2516 case IP_VERSION(11, 0, 1): 2517 case IP_VERSION(11, 0, 2): 2518 case IP_VERSION(11, 0, 3): 2519 case IP_VERSION(11, 0, 4): 2520 case IP_VERSION(11, 5, 0): 2521 case IP_VERSION(11, 5, 1): 2522 case IP_VERSION(11, 5, 2): 2523 case IP_VERSION(11, 5, 3): 2524 amdgpu_device_ip_block_add(adev, &mes_v11_0_ip_block); 2525 adev->enable_mes = true; 2526 adev->enable_mes_kiq = true; 2527 break; 2528 case IP_VERSION(12, 0, 0): 2529 case IP_VERSION(12, 0, 1): 2530 amdgpu_device_ip_block_add(adev, &mes_v12_0_ip_block); 2531 adev->enable_mes = true; 2532 adev->enable_mes_kiq = true; 2533 if (amdgpu_uni_mes) 2534 adev->enable_uni_mes = true; 2535 break; 2536 default: 2537 break; 2538 } 2539 return 0; 2540 } 2541 2542 static void amdgpu_discovery_init_soc_config(struct amdgpu_device *adev) 2543 { 2544 switch (amdgpu_ip_version(adev, GC_HWIP, 0)) { 2545 case IP_VERSION(9, 4, 3): 2546 case IP_VERSION(9, 4, 4): 2547 case IP_VERSION(9, 5, 0): 2548 aqua_vanjaram_init_soc_config(adev); 2549 break; 2550 default: 2551 break; 2552 } 2553 } 2554 2555 static int amdgpu_discovery_set_vpe_ip_blocks(struct amdgpu_device *adev) 2556 { 2557 switch (amdgpu_ip_version(adev, VPE_HWIP, 0)) { 2558 case IP_VERSION(6, 1, 0): 2559 case IP_VERSION(6, 1, 1): 2560 case IP_VERSION(6, 1, 3): 2561 amdgpu_device_ip_block_add(adev, &vpe_v6_1_ip_block); 2562 break; 2563 default: 2564 break; 2565 } 2566 2567 return 0; 2568 } 2569 2570 static int amdgpu_discovery_set_umsch_mm_ip_blocks(struct amdgpu_device *adev) 2571 { 2572 switch (amdgpu_ip_version(adev, VCN_HWIP, 0)) { 2573 case IP_VERSION(4, 0, 5): 2574 case IP_VERSION(4, 0, 6): 2575 if (amdgpu_umsch_mm & 0x1) { 2576 amdgpu_device_ip_block_add(adev, &umsch_mm_v4_0_ip_block); 2577 adev->enable_umsch_mm = true; 2578 } 2579 break; 2580 default: 2581 break; 2582 } 2583 2584 return 0; 2585 } 2586 2587 static int amdgpu_discovery_set_isp_ip_blocks(struct amdgpu_device *adev) 2588 { 2589 #if defined(CONFIG_DRM_AMD_ISP) 2590 switch (amdgpu_ip_version(adev, ISP_HWIP, 0)) { 2591 case IP_VERSION(4, 1, 0): 2592 amdgpu_device_ip_block_add(adev, &isp_v4_1_0_ip_block); 2593 break; 2594 case IP_VERSION(4, 1, 1): 2595 amdgpu_device_ip_block_add(adev, &isp_v4_1_1_ip_block); 2596 break; 2597 default: 2598 break; 2599 } 2600 #endif 2601 2602 return 0; 2603 } 2604 2605 int amdgpu_discovery_set_ip_blocks(struct amdgpu_device *adev) 2606 { 2607 int r; 2608 2609 switch (adev->asic_type) { 2610 case CHIP_VEGA10: 2611 /* This is not fatal. We only need the discovery 2612 * binary for sysfs. We don't need it for a 2613 * functional system. 2614 */ 2615 amdgpu_discovery_init(adev); 2616 vega10_reg_base_init(adev); 2617 adev->sdma.num_instances = 2; 2618 adev->sdma.sdma_mask = 3; 2619 adev->gmc.num_umc = 4; 2620 adev->gfx.xcc_mask = 1; 2621 adev->ip_versions[MMHUB_HWIP][0] = IP_VERSION(9, 0, 0); 2622 adev->ip_versions[ATHUB_HWIP][0] = IP_VERSION(9, 0, 0); 2623 adev->ip_versions[OSSSYS_HWIP][0] = IP_VERSION(4, 0, 0); 2624 adev->ip_versions[HDP_HWIP][0] = IP_VERSION(4, 0, 0); 2625 adev->ip_versions[SDMA0_HWIP][0] = IP_VERSION(4, 0, 0); 2626 adev->ip_versions[SDMA1_HWIP][0] = IP_VERSION(4, 0, 0); 2627 adev->ip_versions[DF_HWIP][0] = IP_VERSION(2, 1, 0); 2628 adev->ip_versions[NBIO_HWIP][0] = IP_VERSION(6, 1, 0); 2629 adev->ip_versions[UMC_HWIP][0] = IP_VERSION(6, 0, 0); 2630 adev->ip_versions[MP0_HWIP][0] = IP_VERSION(9, 0, 0); 2631 adev->ip_versions[MP1_HWIP][0] = IP_VERSION(9, 0, 0); 2632 adev->ip_versions[THM_HWIP][0] = IP_VERSION(9, 0, 0); 2633 adev->ip_versions[SMUIO_HWIP][0] = IP_VERSION(9, 0, 0); 2634 adev->ip_versions[GC_HWIP][0] = IP_VERSION(9, 0, 1); 2635 adev->ip_versions[UVD_HWIP][0] = IP_VERSION(7, 0, 0); 2636 adev->ip_versions[VCE_HWIP][0] = IP_VERSION(4, 0, 0); 2637 adev->ip_versions[DCI_HWIP][0] = IP_VERSION(12, 0, 0); 2638 break; 2639 case CHIP_VEGA12: 2640 /* This is not fatal. We only need the discovery 2641 * binary for sysfs. We don't need it for a 2642 * functional system. 2643 */ 2644 amdgpu_discovery_init(adev); 2645 vega10_reg_base_init(adev); 2646 adev->sdma.num_instances = 2; 2647 adev->sdma.sdma_mask = 3; 2648 adev->gmc.num_umc = 4; 2649 adev->gfx.xcc_mask = 1; 2650 adev->ip_versions[MMHUB_HWIP][0] = IP_VERSION(9, 3, 0); 2651 adev->ip_versions[ATHUB_HWIP][0] = IP_VERSION(9, 3, 0); 2652 adev->ip_versions[OSSSYS_HWIP][0] = IP_VERSION(4, 0, 1); 2653 adev->ip_versions[HDP_HWIP][0] = IP_VERSION(4, 0, 1); 2654 adev->ip_versions[SDMA0_HWIP][0] = IP_VERSION(4, 0, 1); 2655 adev->ip_versions[SDMA1_HWIP][0] = IP_VERSION(4, 0, 1); 2656 adev->ip_versions[DF_HWIP][0] = IP_VERSION(2, 5, 0); 2657 adev->ip_versions[NBIO_HWIP][0] = IP_VERSION(6, 2, 0); 2658 adev->ip_versions[UMC_HWIP][0] = IP_VERSION(6, 1, 0); 2659 adev->ip_versions[MP0_HWIP][0] = IP_VERSION(9, 0, 0); 2660 adev->ip_versions[MP1_HWIP][0] = IP_VERSION(9, 0, 0); 2661 adev->ip_versions[THM_HWIP][0] = IP_VERSION(9, 0, 0); 2662 adev->ip_versions[SMUIO_HWIP][0] = IP_VERSION(9, 0, 1); 2663 adev->ip_versions[GC_HWIP][0] = IP_VERSION(9, 2, 1); 2664 adev->ip_versions[UVD_HWIP][0] = IP_VERSION(7, 0, 0); 2665 adev->ip_versions[VCE_HWIP][0] = IP_VERSION(4, 0, 0); 2666 adev->ip_versions[DCI_HWIP][0] = IP_VERSION(12, 0, 1); 2667 break; 2668 case CHIP_RAVEN: 2669 /* This is not fatal. We only need the discovery 2670 * binary for sysfs. We don't need it for a 2671 * functional system. 2672 */ 2673 amdgpu_discovery_init(adev); 2674 vega10_reg_base_init(adev); 2675 adev->sdma.num_instances = 1; 2676 adev->sdma.sdma_mask = 1; 2677 adev->vcn.num_vcn_inst = 1; 2678 adev->gmc.num_umc = 2; 2679 adev->gfx.xcc_mask = 1; 2680 if (adev->apu_flags & AMD_APU_IS_RAVEN2) { 2681 adev->ip_versions[MMHUB_HWIP][0] = IP_VERSION(9, 2, 0); 2682 adev->ip_versions[ATHUB_HWIP][0] = IP_VERSION(9, 2, 0); 2683 adev->ip_versions[OSSSYS_HWIP][0] = IP_VERSION(4, 1, 1); 2684 adev->ip_versions[HDP_HWIP][0] = IP_VERSION(4, 1, 1); 2685 adev->ip_versions[SDMA0_HWIP][0] = IP_VERSION(4, 1, 1); 2686 adev->ip_versions[DF_HWIP][0] = IP_VERSION(2, 1, 1); 2687 adev->ip_versions[NBIO_HWIP][0] = IP_VERSION(7, 0, 1); 2688 adev->ip_versions[UMC_HWIP][0] = IP_VERSION(7, 5, 0); 2689 adev->ip_versions[MP0_HWIP][0] = IP_VERSION(10, 0, 1); 2690 adev->ip_versions[MP1_HWIP][0] = IP_VERSION(10, 0, 1); 2691 adev->ip_versions[THM_HWIP][0] = IP_VERSION(10, 1, 0); 2692 adev->ip_versions[SMUIO_HWIP][0] = IP_VERSION(10, 0, 1); 2693 adev->ip_versions[GC_HWIP][0] = IP_VERSION(9, 2, 2); 2694 adev->ip_versions[UVD_HWIP][0] = IP_VERSION(1, 0, 1); 2695 adev->ip_versions[DCE_HWIP][0] = IP_VERSION(1, 0, 1); 2696 adev->ip_versions[ISP_HWIP][0] = IP_VERSION(2, 0, 0); 2697 } else { 2698 adev->ip_versions[MMHUB_HWIP][0] = IP_VERSION(9, 1, 0); 2699 adev->ip_versions[ATHUB_HWIP][0] = IP_VERSION(9, 1, 0); 2700 adev->ip_versions[OSSSYS_HWIP][0] = IP_VERSION(4, 1, 0); 2701 adev->ip_versions[HDP_HWIP][0] = IP_VERSION(4, 1, 0); 2702 adev->ip_versions[SDMA0_HWIP][0] = IP_VERSION(4, 1, 0); 2703 adev->ip_versions[DF_HWIP][0] = IP_VERSION(2, 1, 0); 2704 adev->ip_versions[NBIO_HWIP][0] = IP_VERSION(7, 0, 0); 2705 adev->ip_versions[UMC_HWIP][0] = IP_VERSION(7, 0, 0); 2706 adev->ip_versions[MP0_HWIP][0] = IP_VERSION(10, 0, 0); 2707 adev->ip_versions[MP1_HWIP][0] = IP_VERSION(10, 0, 0); 2708 adev->ip_versions[THM_HWIP][0] = IP_VERSION(10, 0, 0); 2709 adev->ip_versions[SMUIO_HWIP][0] = IP_VERSION(10, 0, 0); 2710 adev->ip_versions[GC_HWIP][0] = IP_VERSION(9, 1, 0); 2711 adev->ip_versions[UVD_HWIP][0] = IP_VERSION(1, 0, 0); 2712 adev->ip_versions[DCE_HWIP][0] = IP_VERSION(1, 0, 0); 2713 adev->ip_versions[ISP_HWIP][0] = IP_VERSION(2, 0, 0); 2714 } 2715 break; 2716 case CHIP_VEGA20: 2717 /* This is not fatal. We only need the discovery 2718 * binary for sysfs. We don't need it for a 2719 * functional system. 2720 */ 2721 amdgpu_discovery_init(adev); 2722 vega20_reg_base_init(adev); 2723 adev->sdma.num_instances = 2; 2724 adev->sdma.sdma_mask = 3; 2725 adev->gmc.num_umc = 8; 2726 adev->gfx.xcc_mask = 1; 2727 adev->ip_versions[MMHUB_HWIP][0] = IP_VERSION(9, 4, 0); 2728 adev->ip_versions[ATHUB_HWIP][0] = IP_VERSION(9, 4, 0); 2729 adev->ip_versions[OSSSYS_HWIP][0] = IP_VERSION(4, 2, 0); 2730 adev->ip_versions[HDP_HWIP][0] = IP_VERSION(4, 2, 0); 2731 adev->ip_versions[SDMA0_HWIP][0] = IP_VERSION(4, 2, 0); 2732 adev->ip_versions[SDMA1_HWIP][0] = IP_VERSION(4, 2, 0); 2733 adev->ip_versions[DF_HWIP][0] = IP_VERSION(3, 6, 0); 2734 adev->ip_versions[NBIO_HWIP][0] = IP_VERSION(7, 4, 0); 2735 adev->ip_versions[UMC_HWIP][0] = IP_VERSION(6, 1, 1); 2736 adev->ip_versions[MP0_HWIP][0] = IP_VERSION(11, 0, 2); 2737 adev->ip_versions[MP1_HWIP][0] = IP_VERSION(11, 0, 2); 2738 adev->ip_versions[THM_HWIP][0] = IP_VERSION(11, 0, 2); 2739 adev->ip_versions[SMUIO_HWIP][0] = IP_VERSION(11, 0, 2); 2740 adev->ip_versions[GC_HWIP][0] = IP_VERSION(9, 4, 0); 2741 adev->ip_versions[UVD_HWIP][0] = IP_VERSION(7, 2, 0); 2742 adev->ip_versions[UVD_HWIP][1] = IP_VERSION(7, 2, 0); 2743 adev->ip_versions[VCE_HWIP][0] = IP_VERSION(4, 1, 0); 2744 adev->ip_versions[DCI_HWIP][0] = IP_VERSION(12, 1, 0); 2745 break; 2746 case CHIP_ARCTURUS: 2747 /* This is not fatal. We only need the discovery 2748 * binary for sysfs. We don't need it for a 2749 * functional system. 2750 */ 2751 amdgpu_discovery_init(adev); 2752 arct_reg_base_init(adev); 2753 adev->sdma.num_instances = 8; 2754 adev->sdma.sdma_mask = 0xff; 2755 adev->vcn.num_vcn_inst = 2; 2756 adev->gmc.num_umc = 8; 2757 adev->gfx.xcc_mask = 1; 2758 adev->ip_versions[MMHUB_HWIP][0] = IP_VERSION(9, 4, 1); 2759 adev->ip_versions[ATHUB_HWIP][0] = IP_VERSION(9, 4, 1); 2760 adev->ip_versions[OSSSYS_HWIP][0] = IP_VERSION(4, 2, 1); 2761 adev->ip_versions[HDP_HWIP][0] = IP_VERSION(4, 2, 1); 2762 adev->ip_versions[SDMA0_HWIP][0] = IP_VERSION(4, 2, 2); 2763 adev->ip_versions[SDMA1_HWIP][0] = IP_VERSION(4, 2, 2); 2764 adev->ip_versions[SDMA1_HWIP][1] = IP_VERSION(4, 2, 2); 2765 adev->ip_versions[SDMA1_HWIP][2] = IP_VERSION(4, 2, 2); 2766 adev->ip_versions[SDMA1_HWIP][3] = IP_VERSION(4, 2, 2); 2767 adev->ip_versions[SDMA1_HWIP][4] = IP_VERSION(4, 2, 2); 2768 adev->ip_versions[SDMA1_HWIP][5] = IP_VERSION(4, 2, 2); 2769 adev->ip_versions[SDMA1_HWIP][6] = IP_VERSION(4, 2, 2); 2770 adev->ip_versions[DF_HWIP][0] = IP_VERSION(3, 6, 1); 2771 adev->ip_versions[NBIO_HWIP][0] = IP_VERSION(7, 4, 1); 2772 adev->ip_versions[UMC_HWIP][0] = IP_VERSION(6, 1, 2); 2773 adev->ip_versions[MP0_HWIP][0] = IP_VERSION(11, 0, 4); 2774 adev->ip_versions[MP1_HWIP][0] = IP_VERSION(11, 0, 2); 2775 adev->ip_versions[THM_HWIP][0] = IP_VERSION(11, 0, 3); 2776 adev->ip_versions[SMUIO_HWIP][0] = IP_VERSION(11, 0, 3); 2777 adev->ip_versions[GC_HWIP][0] = IP_VERSION(9, 4, 1); 2778 adev->ip_versions[UVD_HWIP][0] = IP_VERSION(2, 5, 0); 2779 adev->ip_versions[UVD_HWIP][1] = IP_VERSION(2, 5, 0); 2780 break; 2781 case CHIP_ALDEBARAN: 2782 /* This is not fatal. We only need the discovery 2783 * binary for sysfs. We don't need it for a 2784 * functional system. 2785 */ 2786 amdgpu_discovery_init(adev); 2787 aldebaran_reg_base_init(adev); 2788 adev->sdma.num_instances = 5; 2789 adev->sdma.sdma_mask = 0x1f; 2790 adev->vcn.num_vcn_inst = 2; 2791 adev->gmc.num_umc = 4; 2792 adev->gfx.xcc_mask = 1; 2793 adev->ip_versions[MMHUB_HWIP][0] = IP_VERSION(9, 4, 2); 2794 adev->ip_versions[ATHUB_HWIP][0] = IP_VERSION(9, 4, 2); 2795 adev->ip_versions[OSSSYS_HWIP][0] = IP_VERSION(4, 4, 0); 2796 adev->ip_versions[HDP_HWIP][0] = IP_VERSION(4, 4, 0); 2797 adev->ip_versions[SDMA0_HWIP][0] = IP_VERSION(4, 4, 0); 2798 adev->ip_versions[SDMA0_HWIP][1] = IP_VERSION(4, 4, 0); 2799 adev->ip_versions[SDMA0_HWIP][2] = IP_VERSION(4, 4, 0); 2800 adev->ip_versions[SDMA0_HWIP][3] = IP_VERSION(4, 4, 0); 2801 adev->ip_versions[SDMA0_HWIP][4] = IP_VERSION(4, 4, 0); 2802 adev->ip_versions[DF_HWIP][0] = IP_VERSION(3, 6, 2); 2803 adev->ip_versions[NBIO_HWIP][0] = IP_VERSION(7, 4, 4); 2804 adev->ip_versions[UMC_HWIP][0] = IP_VERSION(6, 7, 0); 2805 adev->ip_versions[MP0_HWIP][0] = IP_VERSION(13, 0, 2); 2806 adev->ip_versions[MP1_HWIP][0] = IP_VERSION(13, 0, 2); 2807 adev->ip_versions[THM_HWIP][0] = IP_VERSION(13, 0, 2); 2808 adev->ip_versions[SMUIO_HWIP][0] = IP_VERSION(13, 0, 2); 2809 adev->ip_versions[GC_HWIP][0] = IP_VERSION(9, 4, 2); 2810 adev->ip_versions[UVD_HWIP][0] = IP_VERSION(2, 6, 0); 2811 adev->ip_versions[UVD_HWIP][1] = IP_VERSION(2, 6, 0); 2812 adev->ip_versions[XGMI_HWIP][0] = IP_VERSION(6, 1, 0); 2813 break; 2814 case CHIP_CYAN_SKILLFISH: 2815 if (adev->apu_flags & AMD_APU_IS_CYAN_SKILLFISH2) { 2816 r = amdgpu_discovery_reg_base_init(adev); 2817 if (r) 2818 return -EINVAL; 2819 2820 amdgpu_discovery_harvest_ip(adev); 2821 amdgpu_discovery_get_gfx_info(adev); 2822 amdgpu_discovery_get_mall_info(adev); 2823 amdgpu_discovery_get_vcn_info(adev); 2824 } else { 2825 cyan_skillfish_reg_base_init(adev); 2826 adev->sdma.num_instances = 2; 2827 adev->sdma.sdma_mask = 3; 2828 adev->gfx.xcc_mask = 1; 2829 adev->ip_versions[MMHUB_HWIP][0] = IP_VERSION(2, 0, 3); 2830 adev->ip_versions[ATHUB_HWIP][0] = IP_VERSION(2, 0, 3); 2831 adev->ip_versions[OSSSYS_HWIP][0] = IP_VERSION(5, 0, 1); 2832 adev->ip_versions[HDP_HWIP][0] = IP_VERSION(5, 0, 1); 2833 adev->ip_versions[SDMA0_HWIP][0] = IP_VERSION(5, 0, 1); 2834 adev->ip_versions[SDMA1_HWIP][1] = IP_VERSION(5, 0, 1); 2835 adev->ip_versions[DF_HWIP][0] = IP_VERSION(3, 5, 0); 2836 adev->ip_versions[NBIO_HWIP][0] = IP_VERSION(2, 1, 1); 2837 adev->ip_versions[UMC_HWIP][0] = IP_VERSION(8, 1, 1); 2838 adev->ip_versions[MP0_HWIP][0] = IP_VERSION(11, 0, 8); 2839 adev->ip_versions[MP1_HWIP][0] = IP_VERSION(11, 0, 8); 2840 adev->ip_versions[THM_HWIP][0] = IP_VERSION(11, 0, 1); 2841 adev->ip_versions[SMUIO_HWIP][0] = IP_VERSION(11, 0, 8); 2842 adev->ip_versions[GC_HWIP][0] = IP_VERSION(10, 1, 3); 2843 adev->ip_versions[UVD_HWIP][0] = IP_VERSION(2, 0, 3); 2844 } 2845 break; 2846 default: 2847 r = amdgpu_discovery_reg_base_init(adev); 2848 if (r) { 2849 drm_err(&adev->ddev, "discovery failed: %d\n", r); 2850 return r; 2851 } 2852 2853 amdgpu_discovery_harvest_ip(adev); 2854 amdgpu_discovery_get_gfx_info(adev); 2855 amdgpu_discovery_get_mall_info(adev); 2856 amdgpu_discovery_get_vcn_info(adev); 2857 break; 2858 } 2859 2860 amdgpu_discovery_init_soc_config(adev); 2861 amdgpu_discovery_sysfs_init(adev); 2862 2863 switch (amdgpu_ip_version(adev, GC_HWIP, 0)) { 2864 case IP_VERSION(9, 0, 1): 2865 case IP_VERSION(9, 2, 1): 2866 case IP_VERSION(9, 4, 0): 2867 case IP_VERSION(9, 4, 1): 2868 case IP_VERSION(9, 4, 2): 2869 case IP_VERSION(9, 4, 3): 2870 case IP_VERSION(9, 4, 4): 2871 case IP_VERSION(9, 5, 0): 2872 adev->family = AMDGPU_FAMILY_AI; 2873 break; 2874 case IP_VERSION(9, 1, 0): 2875 case IP_VERSION(9, 2, 2): 2876 case IP_VERSION(9, 3, 0): 2877 adev->family = AMDGPU_FAMILY_RV; 2878 break; 2879 case IP_VERSION(10, 1, 10): 2880 case IP_VERSION(10, 1, 1): 2881 case IP_VERSION(10, 1, 2): 2882 case IP_VERSION(10, 1, 3): 2883 case IP_VERSION(10, 1, 4): 2884 case IP_VERSION(10, 3, 0): 2885 case IP_VERSION(10, 3, 2): 2886 case IP_VERSION(10, 3, 4): 2887 case IP_VERSION(10, 3, 5): 2888 adev->family = AMDGPU_FAMILY_NV; 2889 break; 2890 case IP_VERSION(10, 3, 1): 2891 adev->family = AMDGPU_FAMILY_VGH; 2892 adev->apu_flags |= AMD_APU_IS_VANGOGH; 2893 break; 2894 case IP_VERSION(10, 3, 3): 2895 adev->family = AMDGPU_FAMILY_YC; 2896 break; 2897 case IP_VERSION(10, 3, 6): 2898 adev->family = AMDGPU_FAMILY_GC_10_3_6; 2899 break; 2900 case IP_VERSION(10, 3, 7): 2901 adev->family = AMDGPU_FAMILY_GC_10_3_7; 2902 break; 2903 case IP_VERSION(11, 0, 0): 2904 case IP_VERSION(11, 0, 2): 2905 case IP_VERSION(11, 0, 3): 2906 adev->family = AMDGPU_FAMILY_GC_11_0_0; 2907 break; 2908 case IP_VERSION(11, 0, 1): 2909 case IP_VERSION(11, 0, 4): 2910 adev->family = AMDGPU_FAMILY_GC_11_0_1; 2911 break; 2912 case IP_VERSION(11, 5, 0): 2913 case IP_VERSION(11, 5, 1): 2914 case IP_VERSION(11, 5, 2): 2915 case IP_VERSION(11, 5, 3): 2916 adev->family = AMDGPU_FAMILY_GC_11_5_0; 2917 break; 2918 case IP_VERSION(12, 0, 0): 2919 case IP_VERSION(12, 0, 1): 2920 adev->family = AMDGPU_FAMILY_GC_12_0_0; 2921 break; 2922 default: 2923 return -EINVAL; 2924 } 2925 2926 switch (amdgpu_ip_version(adev, GC_HWIP, 0)) { 2927 case IP_VERSION(9, 1, 0): 2928 case IP_VERSION(9, 2, 2): 2929 case IP_VERSION(9, 3, 0): 2930 case IP_VERSION(10, 1, 3): 2931 case IP_VERSION(10, 1, 4): 2932 case IP_VERSION(10, 3, 1): 2933 case IP_VERSION(10, 3, 3): 2934 case IP_VERSION(10, 3, 6): 2935 case IP_VERSION(10, 3, 7): 2936 case IP_VERSION(11, 0, 1): 2937 case IP_VERSION(11, 0, 4): 2938 case IP_VERSION(11, 5, 0): 2939 case IP_VERSION(11, 5, 1): 2940 case IP_VERSION(11, 5, 2): 2941 case IP_VERSION(11, 5, 3): 2942 adev->flags |= AMD_IS_APU; 2943 break; 2944 default: 2945 break; 2946 } 2947 2948 /* set NBIO version */ 2949 switch (amdgpu_ip_version(adev, NBIO_HWIP, 0)) { 2950 case IP_VERSION(6, 1, 0): 2951 case IP_VERSION(6, 2, 0): 2952 adev->nbio.funcs = &nbio_v6_1_funcs; 2953 adev->nbio.hdp_flush_reg = &nbio_v6_1_hdp_flush_reg; 2954 break; 2955 case IP_VERSION(7, 0, 0): 2956 case IP_VERSION(7, 0, 1): 2957 case IP_VERSION(2, 5, 0): 2958 adev->nbio.funcs = &nbio_v7_0_funcs; 2959 adev->nbio.hdp_flush_reg = &nbio_v7_0_hdp_flush_reg; 2960 break; 2961 case IP_VERSION(7, 4, 0): 2962 case IP_VERSION(7, 4, 1): 2963 case IP_VERSION(7, 4, 4): 2964 adev->nbio.funcs = &nbio_v7_4_funcs; 2965 adev->nbio.hdp_flush_reg = &nbio_v7_4_hdp_flush_reg; 2966 break; 2967 case IP_VERSION(7, 9, 0): 2968 case IP_VERSION(7, 9, 1): 2969 adev->nbio.funcs = &nbio_v7_9_funcs; 2970 adev->nbio.hdp_flush_reg = &nbio_v7_9_hdp_flush_reg; 2971 break; 2972 case IP_VERSION(7, 11, 0): 2973 case IP_VERSION(7, 11, 1): 2974 case IP_VERSION(7, 11, 2): 2975 case IP_VERSION(7, 11, 3): 2976 adev->nbio.funcs = &nbio_v7_11_funcs; 2977 adev->nbio.hdp_flush_reg = &nbio_v7_11_hdp_flush_reg; 2978 break; 2979 case IP_VERSION(7, 2, 0): 2980 case IP_VERSION(7, 2, 1): 2981 case IP_VERSION(7, 3, 0): 2982 case IP_VERSION(7, 5, 0): 2983 case IP_VERSION(7, 5, 1): 2984 adev->nbio.funcs = &nbio_v7_2_funcs; 2985 adev->nbio.hdp_flush_reg = &nbio_v7_2_hdp_flush_reg; 2986 break; 2987 case IP_VERSION(2, 1, 1): 2988 case IP_VERSION(2, 3, 0): 2989 case IP_VERSION(2, 3, 1): 2990 case IP_VERSION(2, 3, 2): 2991 case IP_VERSION(3, 3, 0): 2992 case IP_VERSION(3, 3, 1): 2993 case IP_VERSION(3, 3, 2): 2994 case IP_VERSION(3, 3, 3): 2995 adev->nbio.funcs = &nbio_v2_3_funcs; 2996 adev->nbio.hdp_flush_reg = &nbio_v2_3_hdp_flush_reg; 2997 break; 2998 case IP_VERSION(4, 3, 0): 2999 case IP_VERSION(4, 3, 1): 3000 if (amdgpu_sriov_vf(adev)) 3001 adev->nbio.funcs = &nbio_v4_3_sriov_funcs; 3002 else 3003 adev->nbio.funcs = &nbio_v4_3_funcs; 3004 adev->nbio.hdp_flush_reg = &nbio_v4_3_hdp_flush_reg; 3005 break; 3006 case IP_VERSION(7, 7, 0): 3007 case IP_VERSION(7, 7, 1): 3008 adev->nbio.funcs = &nbio_v7_7_funcs; 3009 adev->nbio.hdp_flush_reg = &nbio_v7_7_hdp_flush_reg; 3010 break; 3011 case IP_VERSION(6, 3, 1): 3012 adev->nbio.funcs = &nbif_v6_3_1_funcs; 3013 adev->nbio.hdp_flush_reg = &nbif_v6_3_1_hdp_flush_reg; 3014 break; 3015 default: 3016 break; 3017 } 3018 3019 switch (amdgpu_ip_version(adev, HDP_HWIP, 0)) { 3020 case IP_VERSION(4, 0, 0): 3021 case IP_VERSION(4, 0, 1): 3022 case IP_VERSION(4, 1, 0): 3023 case IP_VERSION(4, 1, 1): 3024 case IP_VERSION(4, 1, 2): 3025 case IP_VERSION(4, 2, 0): 3026 case IP_VERSION(4, 2, 1): 3027 case IP_VERSION(4, 4, 0): 3028 case IP_VERSION(4, 4, 2): 3029 case IP_VERSION(4, 4, 5): 3030 adev->hdp.funcs = &hdp_v4_0_funcs; 3031 break; 3032 case IP_VERSION(5, 0, 0): 3033 case IP_VERSION(5, 0, 1): 3034 case IP_VERSION(5, 0, 2): 3035 case IP_VERSION(5, 0, 3): 3036 case IP_VERSION(5, 0, 4): 3037 case IP_VERSION(5, 2, 0): 3038 adev->hdp.funcs = &hdp_v5_0_funcs; 3039 break; 3040 case IP_VERSION(5, 2, 1): 3041 adev->hdp.funcs = &hdp_v5_2_funcs; 3042 break; 3043 case IP_VERSION(6, 0, 0): 3044 case IP_VERSION(6, 0, 1): 3045 case IP_VERSION(6, 1, 0): 3046 adev->hdp.funcs = &hdp_v6_0_funcs; 3047 break; 3048 case IP_VERSION(7, 0, 0): 3049 adev->hdp.funcs = &hdp_v7_0_funcs; 3050 break; 3051 default: 3052 break; 3053 } 3054 3055 switch (amdgpu_ip_version(adev, DF_HWIP, 0)) { 3056 case IP_VERSION(3, 6, 0): 3057 case IP_VERSION(3, 6, 1): 3058 case IP_VERSION(3, 6, 2): 3059 adev->df.funcs = &df_v3_6_funcs; 3060 break; 3061 case IP_VERSION(2, 1, 0): 3062 case IP_VERSION(2, 1, 1): 3063 case IP_VERSION(2, 5, 0): 3064 case IP_VERSION(3, 5, 1): 3065 case IP_VERSION(3, 5, 2): 3066 adev->df.funcs = &df_v1_7_funcs; 3067 break; 3068 case IP_VERSION(4, 3, 0): 3069 adev->df.funcs = &df_v4_3_funcs; 3070 break; 3071 case IP_VERSION(4, 6, 2): 3072 adev->df.funcs = &df_v4_6_2_funcs; 3073 break; 3074 case IP_VERSION(4, 15, 0): 3075 case IP_VERSION(4, 15, 1): 3076 adev->df.funcs = &df_v4_15_funcs; 3077 break; 3078 default: 3079 break; 3080 } 3081 3082 switch (amdgpu_ip_version(adev, SMUIO_HWIP, 0)) { 3083 case IP_VERSION(9, 0, 0): 3084 case IP_VERSION(9, 0, 1): 3085 case IP_VERSION(10, 0, 0): 3086 case IP_VERSION(10, 0, 1): 3087 case IP_VERSION(10, 0, 2): 3088 adev->smuio.funcs = &smuio_v9_0_funcs; 3089 break; 3090 case IP_VERSION(11, 0, 0): 3091 case IP_VERSION(11, 0, 2): 3092 case IP_VERSION(11, 0, 3): 3093 case IP_VERSION(11, 0, 4): 3094 case IP_VERSION(11, 0, 7): 3095 case IP_VERSION(11, 0, 8): 3096 adev->smuio.funcs = &smuio_v11_0_funcs; 3097 break; 3098 case IP_VERSION(11, 0, 6): 3099 case IP_VERSION(11, 0, 10): 3100 case IP_VERSION(11, 0, 11): 3101 case IP_VERSION(11, 5, 0): 3102 case IP_VERSION(11, 5, 2): 3103 case IP_VERSION(13, 0, 1): 3104 case IP_VERSION(13, 0, 9): 3105 case IP_VERSION(13, 0, 10): 3106 adev->smuio.funcs = &smuio_v11_0_6_funcs; 3107 break; 3108 case IP_VERSION(13, 0, 2): 3109 adev->smuio.funcs = &smuio_v13_0_funcs; 3110 break; 3111 case IP_VERSION(13, 0, 3): 3112 case IP_VERSION(13, 0, 11): 3113 adev->smuio.funcs = &smuio_v13_0_3_funcs; 3114 if (adev->smuio.funcs->get_pkg_type(adev) == AMDGPU_PKG_TYPE_APU) { 3115 adev->flags |= AMD_IS_APU; 3116 } 3117 break; 3118 case IP_VERSION(13, 0, 6): 3119 case IP_VERSION(13, 0, 8): 3120 case IP_VERSION(14, 0, 0): 3121 case IP_VERSION(14, 0, 1): 3122 adev->smuio.funcs = &smuio_v13_0_6_funcs; 3123 break; 3124 case IP_VERSION(14, 0, 2): 3125 adev->smuio.funcs = &smuio_v14_0_2_funcs; 3126 break; 3127 default: 3128 break; 3129 } 3130 3131 switch (amdgpu_ip_version(adev, LSDMA_HWIP, 0)) { 3132 case IP_VERSION(6, 0, 0): 3133 case IP_VERSION(6, 0, 1): 3134 case IP_VERSION(6, 0, 2): 3135 case IP_VERSION(6, 0, 3): 3136 adev->lsdma.funcs = &lsdma_v6_0_funcs; 3137 break; 3138 case IP_VERSION(7, 0, 0): 3139 case IP_VERSION(7, 0, 1): 3140 adev->lsdma.funcs = &lsdma_v7_0_funcs; 3141 break; 3142 default: 3143 break; 3144 } 3145 3146 r = amdgpu_discovery_set_common_ip_blocks(adev); 3147 if (r) 3148 return r; 3149 3150 r = amdgpu_discovery_set_gmc_ip_blocks(adev); 3151 if (r) 3152 return r; 3153 3154 /* For SR-IOV, PSP needs to be initialized before IH */ 3155 if (amdgpu_sriov_vf(adev)) { 3156 r = amdgpu_discovery_set_psp_ip_blocks(adev); 3157 if (r) 3158 return r; 3159 r = amdgpu_discovery_set_ih_ip_blocks(adev); 3160 if (r) 3161 return r; 3162 } else { 3163 r = amdgpu_discovery_set_ih_ip_blocks(adev); 3164 if (r) 3165 return r; 3166 3167 if (likely(adev->firmware.load_type == AMDGPU_FW_LOAD_PSP)) { 3168 r = amdgpu_discovery_set_psp_ip_blocks(adev); 3169 if (r) 3170 return r; 3171 } 3172 } 3173 3174 if (likely(adev->firmware.load_type == AMDGPU_FW_LOAD_PSP)) { 3175 r = amdgpu_discovery_set_smu_ip_blocks(adev); 3176 if (r) 3177 return r; 3178 } 3179 3180 r = amdgpu_discovery_set_display_ip_blocks(adev); 3181 if (r) 3182 return r; 3183 3184 r = amdgpu_discovery_set_gc_ip_blocks(adev); 3185 if (r) 3186 return r; 3187 3188 r = amdgpu_discovery_set_sdma_ip_blocks(adev); 3189 if (r) 3190 return r; 3191 3192 r = amdgpu_discovery_set_ras_ip_blocks(adev); 3193 if (r) 3194 return r; 3195 3196 if ((adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT && 3197 !amdgpu_sriov_vf(adev)) || 3198 (adev->firmware.load_type == AMDGPU_FW_LOAD_RLC_BACKDOOR_AUTO && amdgpu_dpm == 1)) { 3199 r = amdgpu_discovery_set_smu_ip_blocks(adev); 3200 if (r) 3201 return r; 3202 } 3203 3204 r = amdgpu_discovery_set_mm_ip_blocks(adev); 3205 if (r) 3206 return r; 3207 3208 r = amdgpu_discovery_set_mes_ip_blocks(adev); 3209 if (r) 3210 return r; 3211 3212 r = amdgpu_discovery_set_vpe_ip_blocks(adev); 3213 if (r) 3214 return r; 3215 3216 r = amdgpu_discovery_set_umsch_mm_ip_blocks(adev); 3217 if (r) 3218 return r; 3219 3220 r = amdgpu_discovery_set_isp_ip_blocks(adev); 3221 if (r) 3222 return r; 3223 return 0; 3224 } 3225 3226