1 /* 2 * Copyright 2018-2024 Advanced Micro Devices, Inc. All rights reserved. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 */ 23 24 #include <linux/firmware.h> 25 26 #include "amdgpu.h" 27 #include "amdgpu_discovery.h" 28 #include "soc15_hw_ip.h" 29 #include "discovery.h" 30 #include "amdgpu_ras.h" 31 32 #include "soc15.h" 33 #include "gfx_v9_0.h" 34 #include "gfx_v9_4_3.h" 35 #include "gmc_v9_0.h" 36 #include "df_v1_7.h" 37 #include "df_v3_6.h" 38 #include "df_v4_3.h" 39 #include "df_v4_6_2.h" 40 #include "df_v4_15.h" 41 #include "nbio_v6_1.h" 42 #include "nbio_v7_0.h" 43 #include "nbio_v7_4.h" 44 #include "nbio_v7_9.h" 45 #include "nbio_v7_11.h" 46 #include "hdp_v4_0.h" 47 #include "vega10_ih.h" 48 #include "vega20_ih.h" 49 #include "sdma_v4_0.h" 50 #include "sdma_v4_4_2.h" 51 #include "uvd_v7_0.h" 52 #include "vce_v4_0.h" 53 #include "vcn_v1_0.h" 54 #include "vcn_v2_5.h" 55 #include "jpeg_v2_5.h" 56 #include "smuio_v9_0.h" 57 #include "gmc_v10_0.h" 58 #include "gmc_v11_0.h" 59 #include "gmc_v12_0.h" 60 #include "gfxhub_v2_0.h" 61 #include "mmhub_v2_0.h" 62 #include "nbio_v2_3.h" 63 #include "nbio_v4_3.h" 64 #include "nbio_v7_2.h" 65 #include "nbio_v7_7.h" 66 #include "nbif_v6_3_1.h" 67 #include "hdp_v5_0.h" 68 #include "hdp_v5_2.h" 69 #include "hdp_v6_0.h" 70 #include "hdp_v7_0.h" 71 #include "nv.h" 72 #include "soc21.h" 73 #include "soc24.h" 74 #include "navi10_ih.h" 75 #include "ih_v6_0.h" 76 #include "ih_v6_1.h" 77 #include "ih_v7_0.h" 78 #include "gfx_v10_0.h" 79 #include "gfx_v11_0.h" 80 #include "gfx_v12_0.h" 81 #include "sdma_v5_0.h" 82 #include "sdma_v5_2.h" 83 #include "sdma_v6_0.h" 84 #include "sdma_v7_0.h" 85 #include "lsdma_v6_0.h" 86 #include "lsdma_v7_0.h" 87 #include "vcn_v2_0.h" 88 #include "jpeg_v2_0.h" 89 #include "vcn_v3_0.h" 90 #include "jpeg_v3_0.h" 91 #include "vcn_v4_0.h" 92 #include "jpeg_v4_0.h" 93 #include "vcn_v4_0_3.h" 94 #include "jpeg_v4_0_3.h" 95 #include "vcn_v4_0_5.h" 96 #include "jpeg_v4_0_5.h" 97 #include "amdgpu_vkms.h" 98 #include "mes_v11_0.h" 99 #include "mes_v12_0.h" 100 #include "smuio_v11_0.h" 101 #include "smuio_v11_0_6.h" 102 #include "smuio_v13_0.h" 103 #include "smuio_v13_0_3.h" 104 #include "smuio_v13_0_6.h" 105 #include "smuio_v14_0_2.h" 106 #include "vcn_v5_0_0.h" 107 #include "vcn_v5_0_1.h" 108 #include "jpeg_v5_0_0.h" 109 #include "jpeg_v5_0_1.h" 110 #include "amdgpu_ras_mgr.h" 111 112 #include "amdgpu_vpe.h" 113 #if defined(CONFIG_DRM_AMD_ISP) 114 #include "amdgpu_isp.h" 115 #endif 116 117 MODULE_FIRMWARE("amdgpu/ip_discovery.bin"); 118 MODULE_FIRMWARE("amdgpu/vega10_ip_discovery.bin"); 119 MODULE_FIRMWARE("amdgpu/vega12_ip_discovery.bin"); 120 MODULE_FIRMWARE("amdgpu/vega20_ip_discovery.bin"); 121 MODULE_FIRMWARE("amdgpu/raven_ip_discovery.bin"); 122 MODULE_FIRMWARE("amdgpu/raven2_ip_discovery.bin"); 123 MODULE_FIRMWARE("amdgpu/picasso_ip_discovery.bin"); 124 MODULE_FIRMWARE("amdgpu/arcturus_ip_discovery.bin"); 125 MODULE_FIRMWARE("amdgpu/aldebaran_ip_discovery.bin"); 126 127 #define mmIP_DISCOVERY_VERSION 0x16A00 128 #define mmRCC_CONFIG_MEMSIZE 0xde3 129 #define mmMP0_SMN_C2PMSG_33 0x16061 130 #define mmMM_INDEX 0x0 131 #define mmMM_INDEX_HI 0x6 132 #define mmMM_DATA 0x1 133 134 static const char *hw_id_names[HW_ID_MAX] = { 135 [MP1_HWID] = "MP1", 136 [MP2_HWID] = "MP2", 137 [THM_HWID] = "THM", 138 [SMUIO_HWID] = "SMUIO", 139 [FUSE_HWID] = "FUSE", 140 [CLKA_HWID] = "CLKA", 141 [PWR_HWID] = "PWR", 142 [GC_HWID] = "GC", 143 [UVD_HWID] = "UVD", 144 [AUDIO_AZ_HWID] = "AUDIO_AZ", 145 [ACP_HWID] = "ACP", 146 [DCI_HWID] = "DCI", 147 [DMU_HWID] = "DMU", 148 [DCO_HWID] = "DCO", 149 [DIO_HWID] = "DIO", 150 [XDMA_HWID] = "XDMA", 151 [DCEAZ_HWID] = "DCEAZ", 152 [DAZ_HWID] = "DAZ", 153 [SDPMUX_HWID] = "SDPMUX", 154 [NTB_HWID] = "NTB", 155 [IOHC_HWID] = "IOHC", 156 [L2IMU_HWID] = "L2IMU", 157 [VCE_HWID] = "VCE", 158 [MMHUB_HWID] = "MMHUB", 159 [ATHUB_HWID] = "ATHUB", 160 [DBGU_NBIO_HWID] = "DBGU_NBIO", 161 [DFX_HWID] = "DFX", 162 [DBGU0_HWID] = "DBGU0", 163 [DBGU1_HWID] = "DBGU1", 164 [OSSSYS_HWID] = "OSSSYS", 165 [HDP_HWID] = "HDP", 166 [SDMA0_HWID] = "SDMA0", 167 [SDMA1_HWID] = "SDMA1", 168 [SDMA2_HWID] = "SDMA2", 169 [SDMA3_HWID] = "SDMA3", 170 [LSDMA_HWID] = "LSDMA", 171 [ISP_HWID] = "ISP", 172 [DBGU_IO_HWID] = "DBGU_IO", 173 [DF_HWID] = "DF", 174 [CLKB_HWID] = "CLKB", 175 [FCH_HWID] = "FCH", 176 [DFX_DAP_HWID] = "DFX_DAP", 177 [L1IMU_PCIE_HWID] = "L1IMU_PCIE", 178 [L1IMU_NBIF_HWID] = "L1IMU_NBIF", 179 [L1IMU_IOAGR_HWID] = "L1IMU_IOAGR", 180 [L1IMU3_HWID] = "L1IMU3", 181 [L1IMU4_HWID] = "L1IMU4", 182 [L1IMU5_HWID] = "L1IMU5", 183 [L1IMU6_HWID] = "L1IMU6", 184 [L1IMU7_HWID] = "L1IMU7", 185 [L1IMU8_HWID] = "L1IMU8", 186 [L1IMU9_HWID] = "L1IMU9", 187 [L1IMU10_HWID] = "L1IMU10", 188 [L1IMU11_HWID] = "L1IMU11", 189 [L1IMU12_HWID] = "L1IMU12", 190 [L1IMU13_HWID] = "L1IMU13", 191 [L1IMU14_HWID] = "L1IMU14", 192 [L1IMU15_HWID] = "L1IMU15", 193 [WAFLC_HWID] = "WAFLC", 194 [FCH_USB_PD_HWID] = "FCH_USB_PD", 195 [PCIE_HWID] = "PCIE", 196 [PCS_HWID] = "PCS", 197 [DDCL_HWID] = "DDCL", 198 [SST_HWID] = "SST", 199 [IOAGR_HWID] = "IOAGR", 200 [NBIF_HWID] = "NBIF", 201 [IOAPIC_HWID] = "IOAPIC", 202 [SYSTEMHUB_HWID] = "SYSTEMHUB", 203 [NTBCCP_HWID] = "NTBCCP", 204 [UMC_HWID] = "UMC", 205 [SATA_HWID] = "SATA", 206 [USB_HWID] = "USB", 207 [CCXSEC_HWID] = "CCXSEC", 208 [XGMI_HWID] = "XGMI", 209 [XGBE_HWID] = "XGBE", 210 [MP0_HWID] = "MP0", 211 [VPE_HWID] = "VPE", 212 }; 213 214 static int hw_id_map[MAX_HWIP] = { 215 [GC_HWIP] = GC_HWID, 216 [HDP_HWIP] = HDP_HWID, 217 [SDMA0_HWIP] = SDMA0_HWID, 218 [SDMA1_HWIP] = SDMA1_HWID, 219 [SDMA2_HWIP] = SDMA2_HWID, 220 [SDMA3_HWIP] = SDMA3_HWID, 221 [LSDMA_HWIP] = LSDMA_HWID, 222 [MMHUB_HWIP] = MMHUB_HWID, 223 [ATHUB_HWIP] = ATHUB_HWID, 224 [NBIO_HWIP] = NBIF_HWID, 225 [MP0_HWIP] = MP0_HWID, 226 [MP1_HWIP] = MP1_HWID, 227 [UVD_HWIP] = UVD_HWID, 228 [VCE_HWIP] = VCE_HWID, 229 [DF_HWIP] = DF_HWID, 230 [DCE_HWIP] = DMU_HWID, 231 [OSSSYS_HWIP] = OSSSYS_HWID, 232 [SMUIO_HWIP] = SMUIO_HWID, 233 [PWR_HWIP] = PWR_HWID, 234 [NBIF_HWIP] = NBIF_HWID, 235 [THM_HWIP] = THM_HWID, 236 [CLK_HWIP] = CLKA_HWID, 237 [UMC_HWIP] = UMC_HWID, 238 [XGMI_HWIP] = XGMI_HWID, 239 [DCI_HWIP] = DCI_HWID, 240 [PCIE_HWIP] = PCIE_HWID, 241 [VPE_HWIP] = VPE_HWID, 242 [ISP_HWIP] = ISP_HWID, 243 }; 244 245 static int amdgpu_discovery_read_binary_from_sysmem(struct amdgpu_device *adev, uint8_t *binary) 246 { 247 u64 tmr_offset, tmr_size, pos; 248 void *discv_regn; 249 int ret; 250 251 ret = amdgpu_acpi_get_tmr_info(adev, &tmr_offset, &tmr_size); 252 if (ret) 253 return ret; 254 255 pos = tmr_offset + tmr_size - DISCOVERY_TMR_OFFSET; 256 257 /* This region is read-only and reserved from system use */ 258 discv_regn = memremap(pos, adev->discovery.size, MEMREMAP_WC); 259 if (discv_regn) { 260 memcpy(binary, discv_regn, adev->discovery.size); 261 memunmap(discv_regn); 262 return 0; 263 } 264 265 return -ENOENT; 266 } 267 268 #define IP_DISCOVERY_V2 2 269 #define IP_DISCOVERY_V4 4 270 271 static int amdgpu_discovery_read_binary_from_mem(struct amdgpu_device *adev, 272 uint8_t *binary) 273 { 274 bool sz_valid = true; 275 uint64_t vram_size; 276 int i, ret = 0; 277 u32 msg; 278 279 if (!amdgpu_sriov_vf(adev)) { 280 /* It can take up to two second for IFWI init to complete on some dGPUs, 281 * but generally it should be in the 60-100ms range. Normally this starts 282 * as soon as the device gets power so by the time the OS loads this has long 283 * completed. However, when a card is hotplugged via e.g., USB4, we need to 284 * wait for this to complete. Once the C2PMSG is updated, we can 285 * continue. 286 */ 287 288 for (i = 0; i < 2000; i++) { 289 msg = RREG32(mmMP0_SMN_C2PMSG_33); 290 if (msg & 0x80000000) 291 break; 292 msleep(1); 293 } 294 } 295 296 vram_size = RREG32(mmRCC_CONFIG_MEMSIZE); 297 if (!vram_size || vram_size == U32_MAX) 298 sz_valid = false; 299 else 300 vram_size <<= 20; 301 302 /* 303 * If in VRAM, discovery TMR is marked for reservation. If it is in system mem, 304 * then it is not required to be reserved. 305 */ 306 if (sz_valid) { 307 if (amdgpu_sriov_vf(adev) && adev->virt.is_dynamic_crit_regn_enabled) { 308 /* For SRIOV VFs with dynamic critical region enabled, 309 * we will get the IPD binary via below call. 310 * If dynamic critical is disabled, fall through to normal seq. 311 */ 312 if (amdgpu_virt_get_dynamic_data_info(adev, 313 AMD_SRIOV_MSG_IPD_TABLE_ID, binary, 314 &adev->discovery.size)) { 315 dev_err(adev->dev, 316 "failed to read discovery info from dynamic critical region."); 317 ret = -EINVAL; 318 goto exit; 319 } 320 } else { 321 uint64_t pos = vram_size - DISCOVERY_TMR_OFFSET; 322 323 amdgpu_device_vram_access(adev, pos, (uint32_t *)binary, 324 adev->discovery.size, false); 325 adev->discovery.reserve_tmr = true; 326 } 327 } else { 328 ret = amdgpu_discovery_read_binary_from_sysmem(adev, binary); 329 } 330 331 if (ret) 332 dev_err(adev->dev, 333 "failed to read discovery info from memory, vram size read: %llx", 334 vram_size); 335 exit: 336 return ret; 337 } 338 339 static int amdgpu_discovery_read_binary_from_file(struct amdgpu_device *adev, 340 uint8_t *binary, 341 const char *fw_name) 342 { 343 const struct firmware *fw; 344 int r; 345 346 r = firmware_request_nowarn(&fw, fw_name, adev->dev); 347 if (r) { 348 if (amdgpu_discovery == 2) 349 dev_err(adev->dev, "can't load firmware \"%s\"\n", fw_name); 350 else 351 drm_info(&adev->ddev, "Optional firmware \"%s\" was not found\n", fw_name); 352 return r; 353 } 354 355 memcpy((u8 *)binary, (u8 *)fw->data, fw->size); 356 release_firmware(fw); 357 358 return 0; 359 } 360 361 static uint16_t amdgpu_discovery_calculate_checksum(uint8_t *data, uint32_t size) 362 { 363 uint16_t checksum = 0; 364 int i; 365 366 for (i = 0; i < size; i++) 367 checksum += data[i]; 368 369 return checksum; 370 } 371 372 static inline bool amdgpu_discovery_verify_checksum(uint8_t *data, uint32_t size, 373 uint16_t expected) 374 { 375 return !!(amdgpu_discovery_calculate_checksum(data, size) == expected); 376 } 377 378 static inline bool amdgpu_discovery_verify_binary_signature(uint8_t *binary) 379 { 380 struct binary_header *bhdr; 381 bhdr = (struct binary_header *)binary; 382 383 return (le32_to_cpu(bhdr->binary_signature) == BINARY_SIGNATURE); 384 } 385 386 static void amdgpu_discovery_harvest_config_quirk(struct amdgpu_device *adev) 387 { 388 /* 389 * So far, apply this quirk only on those Navy Flounder boards which 390 * have a bad harvest table of VCN config. 391 */ 392 if ((amdgpu_ip_version(adev, UVD_HWIP, 1) == IP_VERSION(3, 0, 1)) && 393 (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(10, 3, 2))) { 394 switch (adev->pdev->revision) { 395 case 0xC1: 396 case 0xC2: 397 case 0xC3: 398 case 0xC5: 399 case 0xC7: 400 case 0xCF: 401 case 0xDF: 402 adev->vcn.harvest_config |= AMDGPU_VCN_HARVEST_VCN1; 403 adev->vcn.inst_mask &= ~AMDGPU_VCN_HARVEST_VCN1; 404 break; 405 default: 406 break; 407 } 408 } 409 } 410 411 static int amdgpu_discovery_verify_npsinfo(struct amdgpu_device *adev, 412 struct binary_header *bhdr) 413 { 414 uint8_t *discovery_bin = adev->discovery.bin; 415 struct table_info *info; 416 uint16_t checksum; 417 uint16_t offset; 418 419 info = &bhdr->table_list[NPS_INFO]; 420 offset = le16_to_cpu(info->offset); 421 checksum = le16_to_cpu(info->checksum); 422 423 struct nps_info_header *nhdr = 424 (struct nps_info_header *)(discovery_bin + offset); 425 426 if (le32_to_cpu(nhdr->table_id) != NPS_INFO_TABLE_ID) { 427 dev_dbg(adev->dev, "invalid ip discovery nps info table id\n"); 428 return -EINVAL; 429 } 430 431 if (!amdgpu_discovery_verify_checksum(discovery_bin + offset, 432 le32_to_cpu(nhdr->size_bytes), 433 checksum)) { 434 dev_dbg(adev->dev, "invalid nps info data table checksum\n"); 435 return -EINVAL; 436 } 437 438 return 0; 439 } 440 441 static const char *amdgpu_discovery_get_fw_name(struct amdgpu_device *adev) 442 { 443 if (amdgpu_discovery == 2) { 444 /* Assume there is valid discovery TMR in VRAM even if binary is sideloaded */ 445 adev->discovery.reserve_tmr = true; 446 return "amdgpu/ip_discovery.bin"; 447 } 448 449 switch (adev->asic_type) { 450 case CHIP_VEGA10: 451 return "amdgpu/vega10_ip_discovery.bin"; 452 case CHIP_VEGA12: 453 return "amdgpu/vega12_ip_discovery.bin"; 454 case CHIP_RAVEN: 455 if (adev->apu_flags & AMD_APU_IS_RAVEN2) 456 return "amdgpu/raven2_ip_discovery.bin"; 457 else if (adev->apu_flags & AMD_APU_IS_PICASSO) 458 return "amdgpu/picasso_ip_discovery.bin"; 459 else 460 return "amdgpu/raven_ip_discovery.bin"; 461 case CHIP_VEGA20: 462 return "amdgpu/vega20_ip_discovery.bin"; 463 case CHIP_ARCTURUS: 464 return "amdgpu/arcturus_ip_discovery.bin"; 465 case CHIP_ALDEBARAN: 466 return "amdgpu/aldebaran_ip_discovery.bin"; 467 default: 468 return NULL; 469 } 470 } 471 472 static int amdgpu_discovery_init(struct amdgpu_device *adev) 473 { 474 struct table_info *info; 475 struct binary_header *bhdr; 476 uint8_t *discovery_bin; 477 const char *fw_name; 478 uint16_t offset; 479 uint16_t size; 480 uint16_t checksum; 481 int r; 482 483 adev->discovery.bin = kzalloc(DISCOVERY_TMR_SIZE, GFP_KERNEL); 484 if (!adev->discovery.bin) 485 return -ENOMEM; 486 adev->discovery.size = DISCOVERY_TMR_SIZE; 487 adev->discovery.debugfs_blob.data = adev->discovery.bin; 488 adev->discovery.debugfs_blob.size = adev->discovery.size; 489 490 discovery_bin = adev->discovery.bin; 491 /* Read from file if it is the preferred option */ 492 fw_name = amdgpu_discovery_get_fw_name(adev); 493 if (fw_name != NULL) { 494 drm_dbg(&adev->ddev, "use ip discovery information from file"); 495 r = amdgpu_discovery_read_binary_from_file(adev, discovery_bin, 496 fw_name); 497 if (r) 498 goto out; 499 } else { 500 drm_dbg(&adev->ddev, "use ip discovery information from memory"); 501 r = amdgpu_discovery_read_binary_from_mem(adev, discovery_bin); 502 if (r) 503 goto out; 504 } 505 506 /* check the ip discovery binary signature */ 507 if (!amdgpu_discovery_verify_binary_signature(discovery_bin)) { 508 dev_err(adev->dev, 509 "get invalid ip discovery binary signature\n"); 510 r = -EINVAL; 511 goto out; 512 } 513 514 bhdr = (struct binary_header *)discovery_bin; 515 516 offset = offsetof(struct binary_header, binary_checksum) + 517 sizeof(bhdr->binary_checksum); 518 size = le16_to_cpu(bhdr->binary_size) - offset; 519 checksum = le16_to_cpu(bhdr->binary_checksum); 520 521 if (!amdgpu_discovery_verify_checksum(discovery_bin + offset, size, 522 checksum)) { 523 dev_err(adev->dev, "invalid ip discovery binary checksum\n"); 524 r = -EINVAL; 525 goto out; 526 } 527 528 info = &bhdr->table_list[IP_DISCOVERY]; 529 offset = le16_to_cpu(info->offset); 530 checksum = le16_to_cpu(info->checksum); 531 532 if (offset) { 533 struct ip_discovery_header *ihdr = 534 (struct ip_discovery_header *)(discovery_bin + offset); 535 if (le32_to_cpu(ihdr->signature) != DISCOVERY_TABLE_SIGNATURE) { 536 dev_err(adev->dev, "invalid ip discovery data table signature\n"); 537 r = -EINVAL; 538 goto out; 539 } 540 541 if (!amdgpu_discovery_verify_checksum(discovery_bin + offset, 542 le16_to_cpu(ihdr->size), 543 checksum)) { 544 dev_err(adev->dev, "invalid ip discovery data table checksum\n"); 545 r = -EINVAL; 546 goto out; 547 } 548 } 549 550 info = &bhdr->table_list[GC]; 551 offset = le16_to_cpu(info->offset); 552 checksum = le16_to_cpu(info->checksum); 553 554 if (offset) { 555 struct gpu_info_header *ghdr = 556 (struct gpu_info_header *)(discovery_bin + offset); 557 558 if (le32_to_cpu(ghdr->table_id) != GC_TABLE_ID) { 559 dev_err(adev->dev, "invalid ip discovery gc table id\n"); 560 r = -EINVAL; 561 goto out; 562 } 563 564 if (!amdgpu_discovery_verify_checksum(discovery_bin + offset, 565 le32_to_cpu(ghdr->size), 566 checksum)) { 567 dev_err(adev->dev, "invalid gc data table checksum\n"); 568 r = -EINVAL; 569 goto out; 570 } 571 } 572 573 info = &bhdr->table_list[HARVEST_INFO]; 574 offset = le16_to_cpu(info->offset); 575 checksum = le16_to_cpu(info->checksum); 576 577 if (offset) { 578 struct harvest_info_header *hhdr = 579 (struct harvest_info_header *)(discovery_bin + offset); 580 581 if (le32_to_cpu(hhdr->signature) != HARVEST_TABLE_SIGNATURE) { 582 dev_err(adev->dev, "invalid ip discovery harvest table signature\n"); 583 r = -EINVAL; 584 goto out; 585 } 586 587 if (!amdgpu_discovery_verify_checksum( 588 discovery_bin + offset, 589 sizeof(struct harvest_table), checksum)) { 590 dev_err(adev->dev, "invalid harvest data table checksum\n"); 591 r = -EINVAL; 592 goto out; 593 } 594 } 595 596 info = &bhdr->table_list[VCN_INFO]; 597 offset = le16_to_cpu(info->offset); 598 checksum = le16_to_cpu(info->checksum); 599 600 if (offset) { 601 struct vcn_info_header *vhdr = 602 (struct vcn_info_header *)(discovery_bin + offset); 603 604 if (le32_to_cpu(vhdr->table_id) != VCN_INFO_TABLE_ID) { 605 dev_err(adev->dev, "invalid ip discovery vcn table id\n"); 606 r = -EINVAL; 607 goto out; 608 } 609 610 if (!amdgpu_discovery_verify_checksum( 611 discovery_bin + offset, 612 le32_to_cpu(vhdr->size_bytes), checksum)) { 613 dev_err(adev->dev, "invalid vcn data table checksum\n"); 614 r = -EINVAL; 615 goto out; 616 } 617 } 618 619 info = &bhdr->table_list[MALL_INFO]; 620 offset = le16_to_cpu(info->offset); 621 checksum = le16_to_cpu(info->checksum); 622 623 if (0 && offset) { 624 struct mall_info_header *mhdr = 625 (struct mall_info_header *)(discovery_bin + offset); 626 627 if (le32_to_cpu(mhdr->table_id) != MALL_INFO_TABLE_ID) { 628 dev_err(adev->dev, "invalid ip discovery mall table id\n"); 629 r = -EINVAL; 630 goto out; 631 } 632 633 if (!amdgpu_discovery_verify_checksum( 634 discovery_bin + offset, 635 le32_to_cpu(mhdr->size_bytes), checksum)) { 636 dev_err(adev->dev, "invalid mall data table checksum\n"); 637 r = -EINVAL; 638 goto out; 639 } 640 } 641 642 return 0; 643 644 out: 645 kfree(adev->discovery.bin); 646 adev->discovery.bin = NULL; 647 if ((amdgpu_discovery != 2) && 648 (RREG32(mmIP_DISCOVERY_VERSION) == 4)) 649 amdgpu_ras_query_boot_status(adev, 4); 650 return r; 651 } 652 653 static void amdgpu_discovery_sysfs_fini(struct amdgpu_device *adev); 654 655 void amdgpu_discovery_fini(struct amdgpu_device *adev) 656 { 657 amdgpu_discovery_sysfs_fini(adev); 658 kfree(adev->discovery.bin); 659 adev->discovery.bin = NULL; 660 } 661 662 static int amdgpu_discovery_validate_ip(struct amdgpu_device *adev, 663 uint8_t instance, uint16_t hw_id) 664 { 665 if (instance >= HWIP_MAX_INSTANCE) { 666 dev_err(adev->dev, 667 "Unexpected instance_number (%d) from ip discovery blob\n", 668 instance); 669 return -EINVAL; 670 } 671 if (hw_id >= HW_ID_MAX) { 672 dev_err(adev->dev, 673 "Unexpected hw_id (%d) from ip discovery blob\n", 674 hw_id); 675 return -EINVAL; 676 } 677 678 return 0; 679 } 680 681 static void amdgpu_discovery_read_harvest_bit_per_ip(struct amdgpu_device *adev, 682 uint32_t *vcn_harvest_count) 683 { 684 uint8_t *discovery_bin = adev->discovery.bin; 685 struct binary_header *bhdr; 686 struct ip_discovery_header *ihdr; 687 struct die_header *dhdr; 688 struct ip *ip; 689 uint16_t die_offset, ip_offset, num_dies, num_ips; 690 uint16_t hw_id; 691 uint8_t inst; 692 int i, j; 693 694 bhdr = (struct binary_header *)discovery_bin; 695 ihdr = (struct ip_discovery_header 696 *)(discovery_bin + 697 le16_to_cpu(bhdr->table_list[IP_DISCOVERY].offset)); 698 num_dies = le16_to_cpu(ihdr->num_dies); 699 700 /* scan harvest bit of all IP data structures */ 701 for (i = 0; i < num_dies; i++) { 702 die_offset = le16_to_cpu(ihdr->die_info[i].die_offset); 703 dhdr = (struct die_header *)(discovery_bin + die_offset); 704 num_ips = le16_to_cpu(dhdr->num_ips); 705 ip_offset = die_offset + sizeof(*dhdr); 706 707 for (j = 0; j < num_ips; j++) { 708 ip = (struct ip *)(discovery_bin + ip_offset); 709 inst = ip->number_instance; 710 hw_id = le16_to_cpu(ip->hw_id); 711 if (amdgpu_discovery_validate_ip(adev, inst, hw_id)) 712 goto next_ip; 713 714 if (ip->harvest == 1) { 715 switch (hw_id) { 716 case VCN_HWID: 717 (*vcn_harvest_count)++; 718 if (inst == 0) { 719 adev->vcn.harvest_config |= AMDGPU_VCN_HARVEST_VCN0; 720 adev->vcn.inst_mask &= 721 ~AMDGPU_VCN_HARVEST_VCN0; 722 adev->jpeg.inst_mask &= 723 ~AMDGPU_VCN_HARVEST_VCN0; 724 } else { 725 adev->vcn.harvest_config |= AMDGPU_VCN_HARVEST_VCN1; 726 adev->vcn.inst_mask &= 727 ~AMDGPU_VCN_HARVEST_VCN1; 728 adev->jpeg.inst_mask &= 729 ~AMDGPU_VCN_HARVEST_VCN1; 730 } 731 break; 732 case DMU_HWID: 733 adev->harvest_ip_mask |= AMD_HARVEST_IP_DMU_MASK; 734 break; 735 default: 736 break; 737 } 738 } 739 next_ip: 740 ip_offset += struct_size(ip, base_address, 741 ip->num_base_address); 742 } 743 } 744 } 745 746 static void amdgpu_discovery_read_from_harvest_table(struct amdgpu_device *adev, 747 uint32_t *vcn_harvest_count, 748 uint32_t *umc_harvest_count) 749 { 750 uint8_t *discovery_bin = adev->discovery.bin; 751 struct binary_header *bhdr; 752 struct harvest_table *harvest_info; 753 u16 offset; 754 int i; 755 uint32_t umc_harvest_config = 0; 756 757 bhdr = (struct binary_header *)discovery_bin; 758 offset = le16_to_cpu(bhdr->table_list[HARVEST_INFO].offset); 759 760 if (!offset) { 761 dev_err(adev->dev, "invalid harvest table offset\n"); 762 return; 763 } 764 765 harvest_info = (struct harvest_table *)(discovery_bin + offset); 766 767 for (i = 0; i < 32; i++) { 768 if (le16_to_cpu(harvest_info->list[i].hw_id) == 0) 769 break; 770 771 switch (le16_to_cpu(harvest_info->list[i].hw_id)) { 772 case VCN_HWID: 773 (*vcn_harvest_count)++; 774 adev->vcn.harvest_config |= 775 (1 << harvest_info->list[i].number_instance); 776 adev->jpeg.harvest_config |= 777 (1 << harvest_info->list[i].number_instance); 778 779 adev->vcn.inst_mask &= 780 ~(1U << harvest_info->list[i].number_instance); 781 adev->jpeg.inst_mask &= 782 ~(1U << harvest_info->list[i].number_instance); 783 break; 784 case DMU_HWID: 785 adev->harvest_ip_mask |= AMD_HARVEST_IP_DMU_MASK; 786 break; 787 case UMC_HWID: 788 umc_harvest_config |= 789 1 << (le16_to_cpu(harvest_info->list[i].number_instance)); 790 (*umc_harvest_count)++; 791 break; 792 case GC_HWID: 793 adev->gfx.xcc_mask &= 794 ~(1U << harvest_info->list[i].number_instance); 795 break; 796 case SDMA0_HWID: 797 adev->sdma.sdma_mask &= 798 ~(1U << harvest_info->list[i].number_instance); 799 break; 800 #if defined(CONFIG_DRM_AMD_ISP) 801 case ISP_HWID: 802 adev->isp.harvest_config |= 803 ~(1U << harvest_info->list[i].number_instance); 804 break; 805 #endif 806 default: 807 break; 808 } 809 } 810 811 adev->umc.active_mask = ((1 << adev->umc.node_inst_num) - 1) & 812 ~umc_harvest_config; 813 } 814 815 /* ================================================== */ 816 817 struct ip_hw_instance { 818 struct kobject kobj; /* ip_discovery/die/#die/#hw_id/#instance/<attrs...> */ 819 820 int hw_id; 821 u8 num_instance; 822 u8 major, minor, revision; 823 u8 harvest; 824 825 int num_base_addresses; 826 u32 base_addr[] __counted_by(num_base_addresses); 827 }; 828 829 struct ip_hw_id { 830 struct kset hw_id_kset; /* ip_discovery/die/#die/#hw_id/, contains ip_hw_instance */ 831 int hw_id; 832 }; 833 834 struct ip_die_entry { 835 struct kset ip_kset; /* ip_discovery/die/#die/, contains ip_hw_id */ 836 u16 num_ips; 837 }; 838 839 /* -------------------------------------------------- */ 840 841 struct ip_hw_instance_attr { 842 struct attribute attr; 843 ssize_t (*show)(struct ip_hw_instance *ip_hw_instance, char *buf); 844 }; 845 846 static ssize_t hw_id_show(struct ip_hw_instance *ip_hw_instance, char *buf) 847 { 848 return sysfs_emit(buf, "%d\n", ip_hw_instance->hw_id); 849 } 850 851 static ssize_t num_instance_show(struct ip_hw_instance *ip_hw_instance, char *buf) 852 { 853 return sysfs_emit(buf, "%d\n", ip_hw_instance->num_instance); 854 } 855 856 static ssize_t major_show(struct ip_hw_instance *ip_hw_instance, char *buf) 857 { 858 return sysfs_emit(buf, "%d\n", ip_hw_instance->major); 859 } 860 861 static ssize_t minor_show(struct ip_hw_instance *ip_hw_instance, char *buf) 862 { 863 return sysfs_emit(buf, "%d\n", ip_hw_instance->minor); 864 } 865 866 static ssize_t revision_show(struct ip_hw_instance *ip_hw_instance, char *buf) 867 { 868 return sysfs_emit(buf, "%d\n", ip_hw_instance->revision); 869 } 870 871 static ssize_t harvest_show(struct ip_hw_instance *ip_hw_instance, char *buf) 872 { 873 return sysfs_emit(buf, "0x%01X\n", ip_hw_instance->harvest); 874 } 875 876 static ssize_t num_base_addresses_show(struct ip_hw_instance *ip_hw_instance, char *buf) 877 { 878 return sysfs_emit(buf, "%d\n", ip_hw_instance->num_base_addresses); 879 } 880 881 static ssize_t base_addr_show(struct ip_hw_instance *ip_hw_instance, char *buf) 882 { 883 ssize_t res, at; 884 int ii; 885 886 for (res = at = ii = 0; ii < ip_hw_instance->num_base_addresses; ii++) { 887 /* Here we satisfy the condition that, at + size <= PAGE_SIZE. 888 */ 889 if (at + 12 > PAGE_SIZE) 890 break; 891 res = sysfs_emit_at(buf, at, "0x%08X\n", 892 ip_hw_instance->base_addr[ii]); 893 if (res <= 0) 894 break; 895 at += res; 896 } 897 898 return res < 0 ? res : at; 899 } 900 901 static struct ip_hw_instance_attr ip_hw_attr[] = { 902 __ATTR_RO(hw_id), 903 __ATTR_RO(num_instance), 904 __ATTR_RO(major), 905 __ATTR_RO(minor), 906 __ATTR_RO(revision), 907 __ATTR_RO(harvest), 908 __ATTR_RO(num_base_addresses), 909 __ATTR_RO(base_addr), 910 }; 911 912 static struct attribute *ip_hw_instance_attrs[ARRAY_SIZE(ip_hw_attr) + 1]; 913 ATTRIBUTE_GROUPS(ip_hw_instance); 914 915 #define to_ip_hw_instance(x) container_of(x, struct ip_hw_instance, kobj) 916 #define to_ip_hw_instance_attr(x) container_of(x, struct ip_hw_instance_attr, attr) 917 918 static ssize_t ip_hw_instance_attr_show(struct kobject *kobj, 919 struct attribute *attr, 920 char *buf) 921 { 922 struct ip_hw_instance *ip_hw_instance = to_ip_hw_instance(kobj); 923 struct ip_hw_instance_attr *ip_hw_attr = to_ip_hw_instance_attr(attr); 924 925 if (!ip_hw_attr->show) 926 return -EIO; 927 928 return ip_hw_attr->show(ip_hw_instance, buf); 929 } 930 931 static const struct sysfs_ops ip_hw_instance_sysfs_ops = { 932 .show = ip_hw_instance_attr_show, 933 }; 934 935 static void ip_hw_instance_release(struct kobject *kobj) 936 { 937 struct ip_hw_instance *ip_hw_instance = to_ip_hw_instance(kobj); 938 939 kfree(ip_hw_instance); 940 } 941 942 static const struct kobj_type ip_hw_instance_ktype = { 943 .release = ip_hw_instance_release, 944 .sysfs_ops = &ip_hw_instance_sysfs_ops, 945 .default_groups = ip_hw_instance_groups, 946 }; 947 948 /* -------------------------------------------------- */ 949 950 #define to_ip_hw_id(x) container_of(to_kset(x), struct ip_hw_id, hw_id_kset) 951 952 static void ip_hw_id_release(struct kobject *kobj) 953 { 954 struct ip_hw_id *ip_hw_id = to_ip_hw_id(kobj); 955 956 if (!list_empty(&ip_hw_id->hw_id_kset.list)) 957 DRM_ERROR("ip_hw_id->hw_id_kset is not empty"); 958 kfree(ip_hw_id); 959 } 960 961 static const struct kobj_type ip_hw_id_ktype = { 962 .release = ip_hw_id_release, 963 .sysfs_ops = &kobj_sysfs_ops, 964 }; 965 966 /* -------------------------------------------------- */ 967 968 static void die_kobj_release(struct kobject *kobj); 969 static void ip_disc_release(struct kobject *kobj); 970 971 struct ip_die_entry_attribute { 972 struct attribute attr; 973 ssize_t (*show)(struct ip_die_entry *ip_die_entry, char *buf); 974 }; 975 976 #define to_ip_die_entry_attr(x) container_of(x, struct ip_die_entry_attribute, attr) 977 978 static ssize_t num_ips_show(struct ip_die_entry *ip_die_entry, char *buf) 979 { 980 return sysfs_emit(buf, "%d\n", ip_die_entry->num_ips); 981 } 982 983 /* If there are more ip_die_entry attrs, other than the number of IPs, 984 * we can make this intro an array of attrs, and then initialize 985 * ip_die_entry_attrs in a loop. 986 */ 987 static struct ip_die_entry_attribute num_ips_attr = 988 __ATTR_RO(num_ips); 989 990 static struct attribute *ip_die_entry_attrs[] = { 991 &num_ips_attr.attr, 992 NULL, 993 }; 994 ATTRIBUTE_GROUPS(ip_die_entry); /* ip_die_entry_groups */ 995 996 #define to_ip_die_entry(x) container_of(to_kset(x), struct ip_die_entry, ip_kset) 997 998 static ssize_t ip_die_entry_attr_show(struct kobject *kobj, 999 struct attribute *attr, 1000 char *buf) 1001 { 1002 struct ip_die_entry_attribute *ip_die_entry_attr = to_ip_die_entry_attr(attr); 1003 struct ip_die_entry *ip_die_entry = to_ip_die_entry(kobj); 1004 1005 if (!ip_die_entry_attr->show) 1006 return -EIO; 1007 1008 return ip_die_entry_attr->show(ip_die_entry, buf); 1009 } 1010 1011 static void ip_die_entry_release(struct kobject *kobj) 1012 { 1013 struct ip_die_entry *ip_die_entry = to_ip_die_entry(kobj); 1014 1015 if (!list_empty(&ip_die_entry->ip_kset.list)) 1016 DRM_ERROR("ip_die_entry->ip_kset is not empty"); 1017 kfree(ip_die_entry); 1018 } 1019 1020 static const struct sysfs_ops ip_die_entry_sysfs_ops = { 1021 .show = ip_die_entry_attr_show, 1022 }; 1023 1024 static const struct kobj_type ip_die_entry_ktype = { 1025 .release = ip_die_entry_release, 1026 .sysfs_ops = &ip_die_entry_sysfs_ops, 1027 .default_groups = ip_die_entry_groups, 1028 }; 1029 1030 static const struct kobj_type die_kobj_ktype = { 1031 .release = die_kobj_release, 1032 .sysfs_ops = &kobj_sysfs_ops, 1033 }; 1034 1035 static const struct kobj_type ip_discovery_ktype = { 1036 .release = ip_disc_release, 1037 .sysfs_ops = &kobj_sysfs_ops, 1038 }; 1039 1040 struct ip_discovery_top { 1041 struct kobject kobj; /* ip_discovery/ */ 1042 struct kset die_kset; /* ip_discovery/die/, contains ip_die_entry */ 1043 struct amdgpu_device *adev; 1044 }; 1045 1046 static void die_kobj_release(struct kobject *kobj) 1047 { 1048 struct ip_discovery_top *ip_top = container_of(to_kset(kobj), 1049 struct ip_discovery_top, 1050 die_kset); 1051 if (!list_empty(&ip_top->die_kset.list)) 1052 DRM_ERROR("ip_top->die_kset is not empty"); 1053 } 1054 1055 static void ip_disc_release(struct kobject *kobj) 1056 { 1057 struct ip_discovery_top *ip_top = container_of(kobj, struct ip_discovery_top, 1058 kobj); 1059 struct amdgpu_device *adev = ip_top->adev; 1060 1061 kfree(ip_top); 1062 adev->discovery.ip_top = NULL; 1063 } 1064 1065 static uint8_t amdgpu_discovery_get_harvest_info(struct amdgpu_device *adev, 1066 uint16_t hw_id, uint8_t inst) 1067 { 1068 uint8_t harvest = 0; 1069 1070 /* Until a uniform way is figured, get mask based on hwid */ 1071 switch (hw_id) { 1072 case VCN_HWID: 1073 /* VCN vs UVD+VCE */ 1074 if (!amdgpu_ip_version(adev, VCE_HWIP, 0)) 1075 harvest = ((1 << inst) & adev->vcn.inst_mask) == 0; 1076 break; 1077 case DMU_HWID: 1078 if (adev->harvest_ip_mask & AMD_HARVEST_IP_DMU_MASK) 1079 harvest = 0x1; 1080 break; 1081 case UMC_HWID: 1082 /* TODO: It needs another parsing; for now, ignore.*/ 1083 break; 1084 case GC_HWID: 1085 harvest = ((1 << inst) & adev->gfx.xcc_mask) == 0; 1086 break; 1087 case SDMA0_HWID: 1088 harvest = ((1 << inst) & adev->sdma.sdma_mask) == 0; 1089 break; 1090 default: 1091 break; 1092 } 1093 1094 return harvest; 1095 } 1096 1097 static int amdgpu_discovery_sysfs_ips(struct amdgpu_device *adev, 1098 struct ip_die_entry *ip_die_entry, 1099 const size_t _ip_offset, const int num_ips, 1100 bool reg_base_64) 1101 { 1102 uint8_t *discovery_bin = adev->discovery.bin; 1103 int ii, jj, kk, res; 1104 uint16_t hw_id; 1105 uint8_t inst; 1106 1107 DRM_DEBUG("num_ips:%d", num_ips); 1108 1109 /* Find all IPs of a given HW ID, and add their instance to 1110 * #die/#hw_id/#instance/<attributes> 1111 */ 1112 for (ii = 0; ii < HW_ID_MAX; ii++) { 1113 struct ip_hw_id *ip_hw_id = NULL; 1114 size_t ip_offset = _ip_offset; 1115 1116 for (jj = 0; jj < num_ips; jj++) { 1117 struct ip_v4 *ip; 1118 struct ip_hw_instance *ip_hw_instance; 1119 1120 ip = (struct ip_v4 *)(discovery_bin + ip_offset); 1121 inst = ip->instance_number; 1122 hw_id = le16_to_cpu(ip->hw_id); 1123 if (amdgpu_discovery_validate_ip(adev, inst, hw_id) || 1124 hw_id != ii) 1125 goto next_ip; 1126 1127 DRM_DEBUG("match:%d @ ip_offset:%zu", ii, ip_offset); 1128 1129 /* We have a hw_id match; register the hw 1130 * block if not yet registered. 1131 */ 1132 if (!ip_hw_id) { 1133 ip_hw_id = kzalloc(sizeof(*ip_hw_id), GFP_KERNEL); 1134 if (!ip_hw_id) 1135 return -ENOMEM; 1136 ip_hw_id->hw_id = ii; 1137 1138 kobject_set_name(&ip_hw_id->hw_id_kset.kobj, "%d", ii); 1139 ip_hw_id->hw_id_kset.kobj.kset = &ip_die_entry->ip_kset; 1140 ip_hw_id->hw_id_kset.kobj.ktype = &ip_hw_id_ktype; 1141 res = kset_register(&ip_hw_id->hw_id_kset); 1142 if (res) { 1143 DRM_ERROR("Couldn't register ip_hw_id kset"); 1144 kfree(ip_hw_id); 1145 return res; 1146 } 1147 if (hw_id_names[ii]) { 1148 res = sysfs_create_link(&ip_die_entry->ip_kset.kobj, 1149 &ip_hw_id->hw_id_kset.kobj, 1150 hw_id_names[ii]); 1151 if (res) { 1152 DRM_ERROR("Couldn't create IP link %s in IP Die:%s\n", 1153 hw_id_names[ii], 1154 kobject_name(&ip_die_entry->ip_kset.kobj)); 1155 } 1156 } 1157 } 1158 1159 /* Now register its instance. 1160 */ 1161 ip_hw_instance = kzalloc(struct_size(ip_hw_instance, 1162 base_addr, 1163 ip->num_base_address), 1164 GFP_KERNEL); 1165 if (!ip_hw_instance) { 1166 DRM_ERROR("no memory for ip_hw_instance"); 1167 return -ENOMEM; 1168 } 1169 ip_hw_instance->hw_id = le16_to_cpu(ip->hw_id); /* == ii */ 1170 ip_hw_instance->num_instance = ip->instance_number; 1171 ip_hw_instance->major = ip->major; 1172 ip_hw_instance->minor = ip->minor; 1173 ip_hw_instance->revision = ip->revision; 1174 ip_hw_instance->harvest = 1175 amdgpu_discovery_get_harvest_info( 1176 adev, ip_hw_instance->hw_id, 1177 ip_hw_instance->num_instance); 1178 ip_hw_instance->num_base_addresses = ip->num_base_address; 1179 1180 for (kk = 0; kk < ip_hw_instance->num_base_addresses; kk++) { 1181 if (reg_base_64) 1182 ip_hw_instance->base_addr[kk] = 1183 lower_32_bits(le64_to_cpu(ip->base_address_64[kk])) & 0x3FFFFFFF; 1184 else 1185 ip_hw_instance->base_addr[kk] = ip->base_address[kk]; 1186 } 1187 1188 kobject_init(&ip_hw_instance->kobj, &ip_hw_instance_ktype); 1189 ip_hw_instance->kobj.kset = &ip_hw_id->hw_id_kset; 1190 res = kobject_add(&ip_hw_instance->kobj, NULL, 1191 "%d", ip_hw_instance->num_instance); 1192 next_ip: 1193 if (reg_base_64) 1194 ip_offset += struct_size(ip, base_address_64, 1195 ip->num_base_address); 1196 else 1197 ip_offset += struct_size(ip, base_address, 1198 ip->num_base_address); 1199 } 1200 } 1201 1202 return 0; 1203 } 1204 1205 static int amdgpu_discovery_sysfs_recurse(struct amdgpu_device *adev) 1206 { 1207 struct ip_discovery_top *ip_top = adev->discovery.ip_top; 1208 uint8_t *discovery_bin = adev->discovery.bin; 1209 struct binary_header *bhdr; 1210 struct ip_discovery_header *ihdr; 1211 struct die_header *dhdr; 1212 struct kset *die_kset = &ip_top->die_kset; 1213 u16 num_dies, die_offset, num_ips; 1214 size_t ip_offset; 1215 int ii, res; 1216 1217 bhdr = (struct binary_header *)discovery_bin; 1218 ihdr = (struct ip_discovery_header 1219 *)(discovery_bin + 1220 le16_to_cpu(bhdr->table_list[IP_DISCOVERY].offset)); 1221 num_dies = le16_to_cpu(ihdr->num_dies); 1222 1223 DRM_DEBUG("number of dies: %d\n", num_dies); 1224 1225 for (ii = 0; ii < num_dies; ii++) { 1226 struct ip_die_entry *ip_die_entry; 1227 1228 die_offset = le16_to_cpu(ihdr->die_info[ii].die_offset); 1229 dhdr = (struct die_header *)(discovery_bin + die_offset); 1230 num_ips = le16_to_cpu(dhdr->num_ips); 1231 ip_offset = die_offset + sizeof(*dhdr); 1232 1233 /* Add the die to the kset. 1234 * 1235 * dhdr->die_id == ii, which was checked in 1236 * amdgpu_discovery_reg_base_init(). 1237 */ 1238 1239 ip_die_entry = kzalloc(sizeof(*ip_die_entry), GFP_KERNEL); 1240 if (!ip_die_entry) 1241 return -ENOMEM; 1242 1243 ip_die_entry->num_ips = num_ips; 1244 1245 kobject_set_name(&ip_die_entry->ip_kset.kobj, "%d", le16_to_cpu(dhdr->die_id)); 1246 ip_die_entry->ip_kset.kobj.kset = die_kset; 1247 ip_die_entry->ip_kset.kobj.ktype = &ip_die_entry_ktype; 1248 res = kset_register(&ip_die_entry->ip_kset); 1249 if (res) { 1250 DRM_ERROR("Couldn't register ip_die_entry kset"); 1251 kfree(ip_die_entry); 1252 return res; 1253 } 1254 1255 amdgpu_discovery_sysfs_ips(adev, ip_die_entry, ip_offset, num_ips, !!ihdr->base_addr_64_bit); 1256 } 1257 1258 return 0; 1259 } 1260 1261 static int amdgpu_discovery_sysfs_init(struct amdgpu_device *adev) 1262 { 1263 uint8_t *discovery_bin = adev->discovery.bin; 1264 struct ip_discovery_top *ip_top; 1265 struct kset *die_kset; 1266 int res, ii; 1267 1268 if (!discovery_bin) 1269 return -EINVAL; 1270 1271 ip_top = kzalloc(sizeof(*ip_top), GFP_KERNEL); 1272 if (!ip_top) 1273 return -ENOMEM; 1274 1275 ip_top->adev = adev; 1276 adev->discovery.ip_top = ip_top; 1277 res = kobject_init_and_add(&ip_top->kobj, &ip_discovery_ktype, 1278 &adev->dev->kobj, "ip_discovery"); 1279 if (res) { 1280 DRM_ERROR("Couldn't init and add ip_discovery/"); 1281 goto Err; 1282 } 1283 1284 die_kset = &ip_top->die_kset; 1285 kobject_set_name(&die_kset->kobj, "%s", "die"); 1286 die_kset->kobj.parent = &ip_top->kobj; 1287 die_kset->kobj.ktype = &die_kobj_ktype; 1288 res = kset_register(&ip_top->die_kset); 1289 if (res) { 1290 DRM_ERROR("Couldn't register die_kset"); 1291 goto Err; 1292 } 1293 1294 for (ii = 0; ii < ARRAY_SIZE(ip_hw_attr); ii++) 1295 ip_hw_instance_attrs[ii] = &ip_hw_attr[ii].attr; 1296 ip_hw_instance_attrs[ii] = NULL; 1297 1298 res = amdgpu_discovery_sysfs_recurse(adev); 1299 1300 return res; 1301 Err: 1302 kobject_put(&ip_top->kobj); 1303 return res; 1304 } 1305 1306 /* -------------------------------------------------- */ 1307 1308 #define list_to_kobj(el) container_of(el, struct kobject, entry) 1309 1310 static void amdgpu_discovery_sysfs_ip_hw_free(struct ip_hw_id *ip_hw_id) 1311 { 1312 struct list_head *el, *tmp; 1313 struct kset *hw_id_kset; 1314 1315 hw_id_kset = &ip_hw_id->hw_id_kset; 1316 spin_lock(&hw_id_kset->list_lock); 1317 list_for_each_prev_safe(el, tmp, &hw_id_kset->list) { 1318 list_del_init(el); 1319 spin_unlock(&hw_id_kset->list_lock); 1320 /* kobject is embedded in ip_hw_instance */ 1321 kobject_put(list_to_kobj(el)); 1322 spin_lock(&hw_id_kset->list_lock); 1323 } 1324 spin_unlock(&hw_id_kset->list_lock); 1325 kobject_put(&ip_hw_id->hw_id_kset.kobj); 1326 } 1327 1328 static void amdgpu_discovery_sysfs_die_free(struct ip_die_entry *ip_die_entry) 1329 { 1330 struct list_head *el, *tmp; 1331 struct kset *ip_kset; 1332 1333 ip_kset = &ip_die_entry->ip_kset; 1334 spin_lock(&ip_kset->list_lock); 1335 list_for_each_prev_safe(el, tmp, &ip_kset->list) { 1336 list_del_init(el); 1337 spin_unlock(&ip_kset->list_lock); 1338 amdgpu_discovery_sysfs_ip_hw_free(to_ip_hw_id(list_to_kobj(el))); 1339 spin_lock(&ip_kset->list_lock); 1340 } 1341 spin_unlock(&ip_kset->list_lock); 1342 kobject_put(&ip_die_entry->ip_kset.kobj); 1343 } 1344 1345 static void amdgpu_discovery_sysfs_fini(struct amdgpu_device *adev) 1346 { 1347 struct ip_discovery_top *ip_top = adev->discovery.ip_top; 1348 struct list_head *el, *tmp; 1349 struct kset *die_kset; 1350 1351 die_kset = &ip_top->die_kset; 1352 spin_lock(&die_kset->list_lock); 1353 list_for_each_prev_safe(el, tmp, &die_kset->list) { 1354 list_del_init(el); 1355 spin_unlock(&die_kset->list_lock); 1356 amdgpu_discovery_sysfs_die_free(to_ip_die_entry(list_to_kobj(el))); 1357 spin_lock(&die_kset->list_lock); 1358 } 1359 spin_unlock(&die_kset->list_lock); 1360 kobject_put(&ip_top->die_kset.kobj); 1361 kobject_put(&ip_top->kobj); 1362 } 1363 1364 /* ================================================== */ 1365 1366 static int amdgpu_discovery_reg_base_init(struct amdgpu_device *adev) 1367 { 1368 uint8_t num_base_address, subrev, variant; 1369 struct binary_header *bhdr; 1370 struct ip_discovery_header *ihdr; 1371 struct die_header *dhdr; 1372 uint8_t *discovery_bin; 1373 struct ip_v4 *ip; 1374 uint16_t die_offset; 1375 uint16_t ip_offset; 1376 uint16_t num_dies; 1377 uint32_t wafl_ver; 1378 uint16_t num_ips; 1379 uint16_t hw_id; 1380 uint8_t inst; 1381 int hw_ip; 1382 int i, j, k; 1383 int r; 1384 1385 r = amdgpu_discovery_init(adev); 1386 if (r) 1387 return r; 1388 discovery_bin = adev->discovery.bin; 1389 wafl_ver = 0; 1390 adev->gfx.xcc_mask = 0; 1391 adev->sdma.sdma_mask = 0; 1392 adev->vcn.inst_mask = 0; 1393 adev->jpeg.inst_mask = 0; 1394 bhdr = (struct binary_header *)discovery_bin; 1395 ihdr = (struct ip_discovery_header 1396 *)(discovery_bin + 1397 le16_to_cpu(bhdr->table_list[IP_DISCOVERY].offset)); 1398 num_dies = le16_to_cpu(ihdr->num_dies); 1399 1400 DRM_DEBUG("number of dies: %d\n", num_dies); 1401 1402 for (i = 0; i < num_dies; i++) { 1403 die_offset = le16_to_cpu(ihdr->die_info[i].die_offset); 1404 dhdr = (struct die_header *)(discovery_bin + die_offset); 1405 num_ips = le16_to_cpu(dhdr->num_ips); 1406 ip_offset = die_offset + sizeof(*dhdr); 1407 1408 if (le16_to_cpu(dhdr->die_id) != i) { 1409 DRM_ERROR("invalid die id %d, expected %d\n", 1410 le16_to_cpu(dhdr->die_id), i); 1411 return -EINVAL; 1412 } 1413 1414 DRM_DEBUG("number of hardware IPs on die%d: %d\n", 1415 le16_to_cpu(dhdr->die_id), num_ips); 1416 1417 for (j = 0; j < num_ips; j++) { 1418 ip = (struct ip_v4 *)(discovery_bin + ip_offset); 1419 1420 inst = ip->instance_number; 1421 hw_id = le16_to_cpu(ip->hw_id); 1422 if (amdgpu_discovery_validate_ip(adev, inst, hw_id)) 1423 goto next_ip; 1424 1425 num_base_address = ip->num_base_address; 1426 1427 DRM_DEBUG("%s(%d) #%d v%d.%d.%d:\n", 1428 hw_id_names[le16_to_cpu(ip->hw_id)], 1429 le16_to_cpu(ip->hw_id), 1430 ip->instance_number, 1431 ip->major, ip->minor, 1432 ip->revision); 1433 1434 if (le16_to_cpu(ip->hw_id) == VCN_HWID) { 1435 /* Bit [5:0]: original revision value 1436 * Bit [7:6]: en/decode capability: 1437 * 0b00 : VCN function normally 1438 * 0b10 : encode is disabled 1439 * 0b01 : decode is disabled 1440 */ 1441 if (adev->vcn.num_vcn_inst < 1442 AMDGPU_MAX_VCN_INSTANCES) { 1443 adev->vcn.inst[adev->vcn.num_vcn_inst].vcn_config = 1444 ip->revision & 0xc0; 1445 adev->vcn.num_vcn_inst++; 1446 adev->vcn.inst_mask |= 1447 (1U << ip->instance_number); 1448 adev->jpeg.inst_mask |= 1449 (1U << ip->instance_number); 1450 } else { 1451 dev_err(adev->dev, "Too many VCN instances: %d vs %d\n", 1452 adev->vcn.num_vcn_inst + 1, 1453 AMDGPU_MAX_VCN_INSTANCES); 1454 } 1455 ip->revision &= ~0xc0; 1456 } 1457 if (le16_to_cpu(ip->hw_id) == SDMA0_HWID || 1458 le16_to_cpu(ip->hw_id) == SDMA1_HWID || 1459 le16_to_cpu(ip->hw_id) == SDMA2_HWID || 1460 le16_to_cpu(ip->hw_id) == SDMA3_HWID) { 1461 if (adev->sdma.num_instances < 1462 AMDGPU_MAX_SDMA_INSTANCES) { 1463 adev->sdma.num_instances++; 1464 adev->sdma.sdma_mask |= 1465 (1U << ip->instance_number); 1466 } else { 1467 dev_err(adev->dev, "Too many SDMA instances: %d vs %d\n", 1468 adev->sdma.num_instances + 1, 1469 AMDGPU_MAX_SDMA_INSTANCES); 1470 } 1471 } 1472 1473 if (le16_to_cpu(ip->hw_id) == VPE_HWID) { 1474 if (adev->vpe.num_instances < AMDGPU_MAX_VPE_INSTANCES) 1475 adev->vpe.num_instances++; 1476 else 1477 dev_err(adev->dev, "Too many VPE instances: %d vs %d\n", 1478 adev->vpe.num_instances + 1, 1479 AMDGPU_MAX_VPE_INSTANCES); 1480 } 1481 1482 if (le16_to_cpu(ip->hw_id) == UMC_HWID) { 1483 adev->gmc.num_umc++; 1484 adev->umc.node_inst_num++; 1485 } 1486 1487 if (le16_to_cpu(ip->hw_id) == GC_HWID) 1488 adev->gfx.xcc_mask |= 1489 (1U << ip->instance_number); 1490 1491 if (!wafl_ver && le16_to_cpu(ip->hw_id) == WAFLC_HWID) 1492 wafl_ver = IP_VERSION_FULL(ip->major, ip->minor, 1493 ip->revision, 0, 0); 1494 1495 for (k = 0; k < num_base_address; k++) { 1496 /* 1497 * convert the endianness of base addresses in place, 1498 * so that we don't need to convert them when accessing adev->reg_offset. 1499 */ 1500 if (ihdr->base_addr_64_bit) 1501 /* Truncate the 64bit base address from ip discovery 1502 * and only store lower 32bit ip base in reg_offset[]. 1503 * Bits > 32 follows ASIC specific format, thus just 1504 * discard them and handle it within specific ASIC. 1505 * By this way reg_offset[] and related helpers can 1506 * stay unchanged. 1507 * The base address is in dwords, thus clear the 1508 * highest 2 bits to store. 1509 */ 1510 ip->base_address[k] = 1511 lower_32_bits(le64_to_cpu(ip->base_address_64[k])) & 0x3FFFFFFF; 1512 else 1513 ip->base_address[k] = le32_to_cpu(ip->base_address[k]); 1514 DRM_DEBUG("\t0x%08x\n", ip->base_address[k]); 1515 } 1516 1517 for (hw_ip = 0; hw_ip < MAX_HWIP; hw_ip++) { 1518 if (hw_id_map[hw_ip] == le16_to_cpu(ip->hw_id) && 1519 hw_id_map[hw_ip] != 0) { 1520 DRM_DEBUG("set register base offset for %s\n", 1521 hw_id_names[le16_to_cpu(ip->hw_id)]); 1522 adev->reg_offset[hw_ip][ip->instance_number] = 1523 ip->base_address; 1524 /* Instance support is somewhat inconsistent. 1525 * SDMA is a good example. Sienna cichlid has 4 total 1526 * SDMA instances, each enumerated separately (HWIDs 1527 * 42, 43, 68, 69). Arcturus has 8 total SDMA instances, 1528 * but they are enumerated as multiple instances of the 1529 * same HWIDs (4x HWID 42, 4x HWID 43). UMC is another 1530 * example. On most chips there are multiple instances 1531 * with the same HWID. 1532 */ 1533 1534 if (ihdr->version < 3) { 1535 subrev = 0; 1536 variant = 0; 1537 } else { 1538 subrev = ip->sub_revision; 1539 variant = ip->variant; 1540 } 1541 1542 adev->ip_versions[hw_ip] 1543 [ip->instance_number] = 1544 IP_VERSION_FULL(ip->major, 1545 ip->minor, 1546 ip->revision, 1547 variant, 1548 subrev); 1549 } 1550 } 1551 1552 next_ip: 1553 if (ihdr->base_addr_64_bit) 1554 ip_offset += struct_size(ip, base_address_64, ip->num_base_address); 1555 else 1556 ip_offset += struct_size(ip, base_address, ip->num_base_address); 1557 } 1558 } 1559 1560 if (wafl_ver && !adev->ip_versions[XGMI_HWIP][0]) 1561 adev->ip_versions[XGMI_HWIP][0] = wafl_ver; 1562 1563 return 0; 1564 } 1565 1566 static void amdgpu_discovery_harvest_ip(struct amdgpu_device *adev) 1567 { 1568 uint8_t *discovery_bin = adev->discovery.bin; 1569 struct ip_discovery_header *ihdr; 1570 struct binary_header *bhdr; 1571 int vcn_harvest_count = 0; 1572 int umc_harvest_count = 0; 1573 uint16_t offset, ihdr_ver; 1574 1575 bhdr = (struct binary_header *)discovery_bin; 1576 offset = le16_to_cpu(bhdr->table_list[IP_DISCOVERY].offset); 1577 ihdr = (struct ip_discovery_header *)(discovery_bin + offset); 1578 ihdr_ver = le16_to_cpu(ihdr->version); 1579 /* 1580 * Harvest table does not fit Navi1x and legacy GPUs, 1581 * so read harvest bit per IP data structure to set 1582 * harvest configuration. 1583 */ 1584 if (amdgpu_ip_version(adev, GC_HWIP, 0) < IP_VERSION(10, 2, 0) && 1585 ihdr_ver <= 2) { 1586 if ((adev->pdev->device == 0x731E && 1587 (adev->pdev->revision == 0xC6 || 1588 adev->pdev->revision == 0xC7)) || 1589 (adev->pdev->device == 0x7340 && 1590 adev->pdev->revision == 0xC9) || 1591 (adev->pdev->device == 0x7360 && 1592 adev->pdev->revision == 0xC7)) 1593 amdgpu_discovery_read_harvest_bit_per_ip(adev, 1594 &vcn_harvest_count); 1595 } else { 1596 amdgpu_discovery_read_from_harvest_table(adev, 1597 &vcn_harvest_count, 1598 &umc_harvest_count); 1599 } 1600 1601 amdgpu_discovery_harvest_config_quirk(adev); 1602 1603 if (vcn_harvest_count == adev->vcn.num_vcn_inst) { 1604 adev->harvest_ip_mask |= AMD_HARVEST_IP_VCN_MASK; 1605 adev->harvest_ip_mask |= AMD_HARVEST_IP_JPEG_MASK; 1606 } 1607 1608 if (umc_harvest_count < adev->gmc.num_umc) { 1609 adev->gmc.num_umc -= umc_harvest_count; 1610 } 1611 } 1612 1613 union gc_info { 1614 struct gc_info_v1_0 v1; 1615 struct gc_info_v1_1 v1_1; 1616 struct gc_info_v1_2 v1_2; 1617 struct gc_info_v1_3 v1_3; 1618 struct gc_info_v2_0 v2; 1619 struct gc_info_v2_1 v2_1; 1620 }; 1621 1622 static int amdgpu_discovery_get_gfx_info(struct amdgpu_device *adev) 1623 { 1624 uint8_t *discovery_bin = adev->discovery.bin; 1625 struct binary_header *bhdr; 1626 union gc_info *gc_info; 1627 u16 offset; 1628 1629 if (!discovery_bin) { 1630 DRM_ERROR("ip discovery uninitialized\n"); 1631 return -EINVAL; 1632 } 1633 1634 bhdr = (struct binary_header *)discovery_bin; 1635 offset = le16_to_cpu(bhdr->table_list[GC].offset); 1636 1637 if (!offset) 1638 return 0; 1639 1640 gc_info = (union gc_info *)(discovery_bin + offset); 1641 1642 switch (le16_to_cpu(gc_info->v1.header.version_major)) { 1643 case 1: 1644 adev->gfx.config.max_shader_engines = le32_to_cpu(gc_info->v1.gc_num_se); 1645 adev->gfx.config.max_cu_per_sh = 2 * (le32_to_cpu(gc_info->v1.gc_num_wgp0_per_sa) + 1646 le32_to_cpu(gc_info->v1.gc_num_wgp1_per_sa)); 1647 adev->gfx.config.max_sh_per_se = le32_to_cpu(gc_info->v1.gc_num_sa_per_se); 1648 adev->gfx.config.max_backends_per_se = le32_to_cpu(gc_info->v1.gc_num_rb_per_se); 1649 adev->gfx.config.max_texture_channel_caches = le32_to_cpu(gc_info->v1.gc_num_gl2c); 1650 adev->gfx.config.max_gprs = le32_to_cpu(gc_info->v1.gc_num_gprs); 1651 adev->gfx.config.max_gs_threads = le32_to_cpu(gc_info->v1.gc_num_max_gs_thds); 1652 adev->gfx.config.gs_vgt_table_depth = le32_to_cpu(gc_info->v1.gc_gs_table_depth); 1653 adev->gfx.config.gs_prim_buffer_depth = le32_to_cpu(gc_info->v1.gc_gsprim_buff_depth); 1654 adev->gfx.config.double_offchip_lds_buf = le32_to_cpu(gc_info->v1.gc_double_offchip_lds_buffer); 1655 adev->gfx.cu_info.wave_front_size = le32_to_cpu(gc_info->v1.gc_wave_size); 1656 adev->gfx.cu_info.max_waves_per_simd = le32_to_cpu(gc_info->v1.gc_max_waves_per_simd); 1657 adev->gfx.cu_info.max_scratch_slots_per_cu = le32_to_cpu(gc_info->v1.gc_max_scratch_slots_per_cu); 1658 adev->gfx.cu_info.lds_size = le32_to_cpu(gc_info->v1.gc_lds_size); 1659 adev->gfx.config.num_sc_per_sh = le32_to_cpu(gc_info->v1.gc_num_sc_per_se) / 1660 le32_to_cpu(gc_info->v1.gc_num_sa_per_se); 1661 adev->gfx.config.num_packer_per_sc = le32_to_cpu(gc_info->v1.gc_num_packer_per_sc); 1662 if (le16_to_cpu(gc_info->v1.header.version_minor) >= 1) { 1663 adev->gfx.config.gc_num_tcp_per_sa = le32_to_cpu(gc_info->v1_1.gc_num_tcp_per_sa); 1664 adev->gfx.config.gc_num_sdp_interface = le32_to_cpu(gc_info->v1_1.gc_num_sdp_interface); 1665 adev->gfx.config.gc_num_tcps = le32_to_cpu(gc_info->v1_1.gc_num_tcps); 1666 } 1667 if (le16_to_cpu(gc_info->v1.header.version_minor) >= 2) { 1668 adev->gfx.config.gc_num_tcp_per_wpg = le32_to_cpu(gc_info->v1_2.gc_num_tcp_per_wpg); 1669 adev->gfx.config.gc_tcp_l1_size = le32_to_cpu(gc_info->v1_2.gc_tcp_l1_size); 1670 adev->gfx.config.gc_num_sqc_per_wgp = le32_to_cpu(gc_info->v1_2.gc_num_sqc_per_wgp); 1671 adev->gfx.config.gc_l1_instruction_cache_size_per_sqc = le32_to_cpu(gc_info->v1_2.gc_l1_instruction_cache_size_per_sqc); 1672 adev->gfx.config.gc_l1_data_cache_size_per_sqc = le32_to_cpu(gc_info->v1_2.gc_l1_data_cache_size_per_sqc); 1673 adev->gfx.config.gc_gl1c_per_sa = le32_to_cpu(gc_info->v1_2.gc_gl1c_per_sa); 1674 adev->gfx.config.gc_gl1c_size_per_instance = le32_to_cpu(gc_info->v1_2.gc_gl1c_size_per_instance); 1675 adev->gfx.config.gc_gl2c_per_gpu = le32_to_cpu(gc_info->v1_2.gc_gl2c_per_gpu); 1676 } 1677 if (le16_to_cpu(gc_info->v1.header.version_minor) >= 3) { 1678 adev->gfx.config.gc_tcp_size_per_cu = le32_to_cpu(gc_info->v1_3.gc_tcp_size_per_cu); 1679 adev->gfx.config.gc_tcp_cache_line_size = le32_to_cpu(gc_info->v1_3.gc_tcp_cache_line_size); 1680 adev->gfx.config.gc_instruction_cache_size_per_sqc = le32_to_cpu(gc_info->v1_3.gc_instruction_cache_size_per_sqc); 1681 adev->gfx.config.gc_instruction_cache_line_size = le32_to_cpu(gc_info->v1_3.gc_instruction_cache_line_size); 1682 adev->gfx.config.gc_scalar_data_cache_size_per_sqc = le32_to_cpu(gc_info->v1_3.gc_scalar_data_cache_size_per_sqc); 1683 adev->gfx.config.gc_scalar_data_cache_line_size = le32_to_cpu(gc_info->v1_3.gc_scalar_data_cache_line_size); 1684 adev->gfx.config.gc_tcc_size = le32_to_cpu(gc_info->v1_3.gc_tcc_size); 1685 adev->gfx.config.gc_tcc_cache_line_size = le32_to_cpu(gc_info->v1_3.gc_tcc_cache_line_size); 1686 } 1687 break; 1688 case 2: 1689 adev->gfx.config.max_shader_engines = le32_to_cpu(gc_info->v2.gc_num_se); 1690 adev->gfx.config.max_cu_per_sh = le32_to_cpu(gc_info->v2.gc_num_cu_per_sh); 1691 adev->gfx.config.max_sh_per_se = le32_to_cpu(gc_info->v2.gc_num_sh_per_se); 1692 adev->gfx.config.max_backends_per_se = le32_to_cpu(gc_info->v2.gc_num_rb_per_se); 1693 adev->gfx.config.max_texture_channel_caches = le32_to_cpu(gc_info->v2.gc_num_tccs); 1694 adev->gfx.config.max_gprs = le32_to_cpu(gc_info->v2.gc_num_gprs); 1695 adev->gfx.config.max_gs_threads = le32_to_cpu(gc_info->v2.gc_num_max_gs_thds); 1696 adev->gfx.config.gs_vgt_table_depth = le32_to_cpu(gc_info->v2.gc_gs_table_depth); 1697 adev->gfx.config.gs_prim_buffer_depth = le32_to_cpu(gc_info->v2.gc_gsprim_buff_depth); 1698 adev->gfx.config.double_offchip_lds_buf = le32_to_cpu(gc_info->v2.gc_double_offchip_lds_buffer); 1699 adev->gfx.cu_info.wave_front_size = le32_to_cpu(gc_info->v2.gc_wave_size); 1700 adev->gfx.cu_info.max_waves_per_simd = le32_to_cpu(gc_info->v2.gc_max_waves_per_simd); 1701 adev->gfx.cu_info.max_scratch_slots_per_cu = le32_to_cpu(gc_info->v2.gc_max_scratch_slots_per_cu); 1702 adev->gfx.cu_info.lds_size = le32_to_cpu(gc_info->v2.gc_lds_size); 1703 adev->gfx.config.num_sc_per_sh = le32_to_cpu(gc_info->v2.gc_num_sc_per_se) / 1704 le32_to_cpu(gc_info->v2.gc_num_sh_per_se); 1705 adev->gfx.config.num_packer_per_sc = le32_to_cpu(gc_info->v2.gc_num_packer_per_sc); 1706 if (le16_to_cpu(gc_info->v2.header.version_minor) == 1) { 1707 adev->gfx.config.gc_num_tcp_per_sa = le32_to_cpu(gc_info->v2_1.gc_num_tcp_per_sh); 1708 adev->gfx.config.gc_tcp_size_per_cu = le32_to_cpu(gc_info->v2_1.gc_tcp_size_per_cu); 1709 adev->gfx.config.gc_num_sdp_interface = le32_to_cpu(gc_info->v2_1.gc_num_sdp_interface); /* per XCD */ 1710 adev->gfx.config.gc_num_cu_per_sqc = le32_to_cpu(gc_info->v2_1.gc_num_cu_per_sqc); 1711 adev->gfx.config.gc_l1_instruction_cache_size_per_sqc = le32_to_cpu(gc_info->v2_1.gc_instruction_cache_size_per_sqc); 1712 adev->gfx.config.gc_l1_data_cache_size_per_sqc = le32_to_cpu(gc_info->v2_1.gc_scalar_data_cache_size_per_sqc); 1713 adev->gfx.config.gc_tcc_size = le32_to_cpu(gc_info->v2_1.gc_tcc_size); /* per XCD */ 1714 } 1715 break; 1716 default: 1717 dev_err(adev->dev, 1718 "Unhandled GC info table %d.%d\n", 1719 le16_to_cpu(gc_info->v1.header.version_major), 1720 le16_to_cpu(gc_info->v1.header.version_minor)); 1721 return -EINVAL; 1722 } 1723 return 0; 1724 } 1725 1726 union mall_info { 1727 struct mall_info_v1_0 v1; 1728 struct mall_info_v2_0 v2; 1729 }; 1730 1731 static int amdgpu_discovery_get_mall_info(struct amdgpu_device *adev) 1732 { 1733 uint8_t *discovery_bin = adev->discovery.bin; 1734 struct binary_header *bhdr; 1735 union mall_info *mall_info; 1736 u32 u, mall_size_per_umc, m_s_present, half_use; 1737 u64 mall_size; 1738 u16 offset; 1739 1740 if (!discovery_bin) { 1741 DRM_ERROR("ip discovery uninitialized\n"); 1742 return -EINVAL; 1743 } 1744 1745 bhdr = (struct binary_header *)discovery_bin; 1746 offset = le16_to_cpu(bhdr->table_list[MALL_INFO].offset); 1747 1748 if (!offset) 1749 return 0; 1750 1751 mall_info = (union mall_info *)(discovery_bin + offset); 1752 1753 switch (le16_to_cpu(mall_info->v1.header.version_major)) { 1754 case 1: 1755 mall_size = 0; 1756 mall_size_per_umc = le32_to_cpu(mall_info->v1.mall_size_per_m); 1757 m_s_present = le32_to_cpu(mall_info->v1.m_s_present); 1758 half_use = le32_to_cpu(mall_info->v1.m_half_use); 1759 for (u = 0; u < adev->gmc.num_umc; u++) { 1760 if (m_s_present & (1 << u)) 1761 mall_size += mall_size_per_umc * 2; 1762 else if (half_use & (1 << u)) 1763 mall_size += mall_size_per_umc / 2; 1764 else 1765 mall_size += mall_size_per_umc; 1766 } 1767 adev->gmc.mall_size = mall_size; 1768 adev->gmc.m_half_use = half_use; 1769 break; 1770 case 2: 1771 mall_size_per_umc = le32_to_cpu(mall_info->v2.mall_size_per_umc); 1772 adev->gmc.mall_size = (uint64_t)mall_size_per_umc * adev->gmc.num_umc; 1773 break; 1774 default: 1775 dev_err(adev->dev, 1776 "Unhandled MALL info table %d.%d\n", 1777 le16_to_cpu(mall_info->v1.header.version_major), 1778 le16_to_cpu(mall_info->v1.header.version_minor)); 1779 return -EINVAL; 1780 } 1781 return 0; 1782 } 1783 1784 union vcn_info { 1785 struct vcn_info_v1_0 v1; 1786 }; 1787 1788 static int amdgpu_discovery_get_vcn_info(struct amdgpu_device *adev) 1789 { 1790 uint8_t *discovery_bin = adev->discovery.bin; 1791 struct binary_header *bhdr; 1792 union vcn_info *vcn_info; 1793 u16 offset; 1794 int v; 1795 1796 if (!discovery_bin) { 1797 DRM_ERROR("ip discovery uninitialized\n"); 1798 return -EINVAL; 1799 } 1800 1801 /* num_vcn_inst is currently limited to AMDGPU_MAX_VCN_INSTANCES 1802 * which is smaller than VCN_INFO_TABLE_MAX_NUM_INSTANCES 1803 * but that may change in the future with new GPUs so keep this 1804 * check for defensive purposes. 1805 */ 1806 if (adev->vcn.num_vcn_inst > VCN_INFO_TABLE_MAX_NUM_INSTANCES) { 1807 dev_err(adev->dev, "invalid vcn instances\n"); 1808 return -EINVAL; 1809 } 1810 1811 bhdr = (struct binary_header *)discovery_bin; 1812 offset = le16_to_cpu(bhdr->table_list[VCN_INFO].offset); 1813 1814 if (!offset) 1815 return 0; 1816 1817 vcn_info = (union vcn_info *)(discovery_bin + offset); 1818 1819 switch (le16_to_cpu(vcn_info->v1.header.version_major)) { 1820 case 1: 1821 /* num_vcn_inst is currently limited to AMDGPU_MAX_VCN_INSTANCES 1822 * so this won't overflow. 1823 */ 1824 for (v = 0; v < adev->vcn.num_vcn_inst; v++) { 1825 adev->vcn.inst[v].vcn_codec_disable_mask = 1826 le32_to_cpu(vcn_info->v1.instance_info[v].fuse_data.all_bits); 1827 } 1828 break; 1829 default: 1830 dev_err(adev->dev, 1831 "Unhandled VCN info table %d.%d\n", 1832 le16_to_cpu(vcn_info->v1.header.version_major), 1833 le16_to_cpu(vcn_info->v1.header.version_minor)); 1834 return -EINVAL; 1835 } 1836 return 0; 1837 } 1838 1839 union nps_info { 1840 struct nps_info_v1_0 v1; 1841 }; 1842 1843 static int amdgpu_discovery_refresh_nps_info(struct amdgpu_device *adev, 1844 union nps_info *nps_data) 1845 { 1846 uint64_t vram_size, pos, offset; 1847 struct nps_info_header *nhdr; 1848 struct binary_header bhdr; 1849 uint16_t checksum; 1850 1851 vram_size = (uint64_t)RREG32(mmRCC_CONFIG_MEMSIZE) << 20; 1852 pos = vram_size - DISCOVERY_TMR_OFFSET; 1853 amdgpu_device_vram_access(adev, pos, &bhdr, sizeof(bhdr), false); 1854 1855 offset = le16_to_cpu(bhdr.table_list[NPS_INFO].offset); 1856 checksum = le16_to_cpu(bhdr.table_list[NPS_INFO].checksum); 1857 1858 amdgpu_device_vram_access(adev, (pos + offset), nps_data, 1859 sizeof(*nps_data), false); 1860 1861 nhdr = (struct nps_info_header *)(nps_data); 1862 if (!amdgpu_discovery_verify_checksum((uint8_t *)nps_data, 1863 le32_to_cpu(nhdr->size_bytes), 1864 checksum)) { 1865 dev_err(adev->dev, "nps data refresh, checksum mismatch\n"); 1866 return -EINVAL; 1867 } 1868 1869 return 0; 1870 } 1871 1872 int amdgpu_discovery_get_nps_info(struct amdgpu_device *adev, 1873 uint32_t *nps_type, 1874 struct amdgpu_gmc_memrange **ranges, 1875 int *range_cnt, bool refresh) 1876 { 1877 uint8_t *discovery_bin = adev->discovery.bin; 1878 struct amdgpu_gmc_memrange *mem_ranges; 1879 struct binary_header *bhdr; 1880 union nps_info *nps_info; 1881 union nps_info nps_data; 1882 u16 offset; 1883 int i, r; 1884 1885 if (!nps_type || !range_cnt || !ranges) 1886 return -EINVAL; 1887 1888 if (refresh) { 1889 r = amdgpu_discovery_refresh_nps_info(adev, &nps_data); 1890 if (r) 1891 return r; 1892 nps_info = &nps_data; 1893 } else { 1894 if (!discovery_bin) { 1895 dev_err(adev->dev, 1896 "fetch mem range failed, ip discovery uninitialized\n"); 1897 return -EINVAL; 1898 } 1899 1900 bhdr = (struct binary_header *)discovery_bin; 1901 offset = le16_to_cpu(bhdr->table_list[NPS_INFO].offset); 1902 1903 if (!offset) 1904 return -ENOENT; 1905 1906 /* If verification fails, return as if NPS table doesn't exist */ 1907 if (amdgpu_discovery_verify_npsinfo(adev, bhdr)) 1908 return -ENOENT; 1909 1910 nps_info = (union nps_info *)(discovery_bin + offset); 1911 } 1912 1913 switch (le16_to_cpu(nps_info->v1.header.version_major)) { 1914 case 1: 1915 mem_ranges = kvcalloc(nps_info->v1.count, 1916 sizeof(*mem_ranges), 1917 GFP_KERNEL); 1918 if (!mem_ranges) 1919 return -ENOMEM; 1920 *nps_type = nps_info->v1.nps_type; 1921 *range_cnt = nps_info->v1.count; 1922 for (i = 0; i < *range_cnt; i++) { 1923 mem_ranges[i].base_address = 1924 nps_info->v1.instance_info[i].base_address; 1925 mem_ranges[i].limit_address = 1926 nps_info->v1.instance_info[i].limit_address; 1927 mem_ranges[i].nid_mask = -1; 1928 mem_ranges[i].flags = 0; 1929 } 1930 *ranges = mem_ranges; 1931 break; 1932 default: 1933 dev_err(adev->dev, "Unhandled NPS info table %d.%d\n", 1934 le16_to_cpu(nps_info->v1.header.version_major), 1935 le16_to_cpu(nps_info->v1.header.version_minor)); 1936 return -EINVAL; 1937 } 1938 1939 return 0; 1940 } 1941 1942 static int amdgpu_discovery_set_common_ip_blocks(struct amdgpu_device *adev) 1943 { 1944 /* what IP to use for this? */ 1945 switch (amdgpu_ip_version(adev, GC_HWIP, 0)) { 1946 case IP_VERSION(9, 0, 1): 1947 case IP_VERSION(9, 1, 0): 1948 case IP_VERSION(9, 2, 1): 1949 case IP_VERSION(9, 2, 2): 1950 case IP_VERSION(9, 3, 0): 1951 case IP_VERSION(9, 4, 0): 1952 case IP_VERSION(9, 4, 1): 1953 case IP_VERSION(9, 4, 2): 1954 case IP_VERSION(9, 4, 3): 1955 case IP_VERSION(9, 4, 4): 1956 case IP_VERSION(9, 5, 0): 1957 amdgpu_device_ip_block_add(adev, &vega10_common_ip_block); 1958 break; 1959 case IP_VERSION(10, 1, 10): 1960 case IP_VERSION(10, 1, 1): 1961 case IP_VERSION(10, 1, 2): 1962 case IP_VERSION(10, 1, 3): 1963 case IP_VERSION(10, 1, 4): 1964 case IP_VERSION(10, 3, 0): 1965 case IP_VERSION(10, 3, 1): 1966 case IP_VERSION(10, 3, 2): 1967 case IP_VERSION(10, 3, 3): 1968 case IP_VERSION(10, 3, 4): 1969 case IP_VERSION(10, 3, 5): 1970 case IP_VERSION(10, 3, 6): 1971 case IP_VERSION(10, 3, 7): 1972 amdgpu_device_ip_block_add(adev, &nv_common_ip_block); 1973 break; 1974 case IP_VERSION(11, 0, 0): 1975 case IP_VERSION(11, 0, 1): 1976 case IP_VERSION(11, 0, 2): 1977 case IP_VERSION(11, 0, 3): 1978 case IP_VERSION(11, 0, 4): 1979 case IP_VERSION(11, 5, 0): 1980 case IP_VERSION(11, 5, 1): 1981 case IP_VERSION(11, 5, 2): 1982 case IP_VERSION(11, 5, 3): 1983 amdgpu_device_ip_block_add(adev, &soc21_common_ip_block); 1984 break; 1985 case IP_VERSION(12, 0, 0): 1986 case IP_VERSION(12, 0, 1): 1987 amdgpu_device_ip_block_add(adev, &soc24_common_ip_block); 1988 break; 1989 default: 1990 dev_err(adev->dev, 1991 "Failed to add common ip block(GC_HWIP:0x%x)\n", 1992 amdgpu_ip_version(adev, GC_HWIP, 0)); 1993 return -EINVAL; 1994 } 1995 return 0; 1996 } 1997 1998 static int amdgpu_discovery_set_gmc_ip_blocks(struct amdgpu_device *adev) 1999 { 2000 /* use GC or MMHUB IP version */ 2001 switch (amdgpu_ip_version(adev, GC_HWIP, 0)) { 2002 case IP_VERSION(9, 0, 1): 2003 case IP_VERSION(9, 1, 0): 2004 case IP_VERSION(9, 2, 1): 2005 case IP_VERSION(9, 2, 2): 2006 case IP_VERSION(9, 3, 0): 2007 case IP_VERSION(9, 4, 0): 2008 case IP_VERSION(9, 4, 1): 2009 case IP_VERSION(9, 4, 2): 2010 case IP_VERSION(9, 4, 3): 2011 case IP_VERSION(9, 4, 4): 2012 case IP_VERSION(9, 5, 0): 2013 amdgpu_device_ip_block_add(adev, &gmc_v9_0_ip_block); 2014 break; 2015 case IP_VERSION(10, 1, 10): 2016 case IP_VERSION(10, 1, 1): 2017 case IP_VERSION(10, 1, 2): 2018 case IP_VERSION(10, 1, 3): 2019 case IP_VERSION(10, 1, 4): 2020 case IP_VERSION(10, 3, 0): 2021 case IP_VERSION(10, 3, 1): 2022 case IP_VERSION(10, 3, 2): 2023 case IP_VERSION(10, 3, 3): 2024 case IP_VERSION(10, 3, 4): 2025 case IP_VERSION(10, 3, 5): 2026 case IP_VERSION(10, 3, 6): 2027 case IP_VERSION(10, 3, 7): 2028 amdgpu_device_ip_block_add(adev, &gmc_v10_0_ip_block); 2029 break; 2030 case IP_VERSION(11, 0, 0): 2031 case IP_VERSION(11, 0, 1): 2032 case IP_VERSION(11, 0, 2): 2033 case IP_VERSION(11, 0, 3): 2034 case IP_VERSION(11, 0, 4): 2035 case IP_VERSION(11, 5, 0): 2036 case IP_VERSION(11, 5, 1): 2037 case IP_VERSION(11, 5, 2): 2038 case IP_VERSION(11, 5, 3): 2039 amdgpu_device_ip_block_add(adev, &gmc_v11_0_ip_block); 2040 break; 2041 case IP_VERSION(12, 0, 0): 2042 case IP_VERSION(12, 0, 1): 2043 amdgpu_device_ip_block_add(adev, &gmc_v12_0_ip_block); 2044 break; 2045 default: 2046 dev_err(adev->dev, "Failed to add gmc ip block(GC_HWIP:0x%x)\n", 2047 amdgpu_ip_version(adev, GC_HWIP, 0)); 2048 return -EINVAL; 2049 } 2050 return 0; 2051 } 2052 2053 static int amdgpu_discovery_set_ih_ip_blocks(struct amdgpu_device *adev) 2054 { 2055 switch (amdgpu_ip_version(adev, OSSSYS_HWIP, 0)) { 2056 case IP_VERSION(4, 0, 0): 2057 case IP_VERSION(4, 0, 1): 2058 case IP_VERSION(4, 1, 0): 2059 case IP_VERSION(4, 1, 1): 2060 case IP_VERSION(4, 3, 0): 2061 amdgpu_device_ip_block_add(adev, &vega10_ih_ip_block); 2062 break; 2063 case IP_VERSION(4, 2, 0): 2064 case IP_VERSION(4, 2, 1): 2065 case IP_VERSION(4, 4, 0): 2066 case IP_VERSION(4, 4, 2): 2067 case IP_VERSION(4, 4, 5): 2068 amdgpu_device_ip_block_add(adev, &vega20_ih_ip_block); 2069 break; 2070 case IP_VERSION(5, 0, 0): 2071 case IP_VERSION(5, 0, 1): 2072 case IP_VERSION(5, 0, 2): 2073 case IP_VERSION(5, 0, 3): 2074 case IP_VERSION(5, 2, 0): 2075 case IP_VERSION(5, 2, 1): 2076 amdgpu_device_ip_block_add(adev, &navi10_ih_ip_block); 2077 break; 2078 case IP_VERSION(6, 0, 0): 2079 case IP_VERSION(6, 0, 1): 2080 case IP_VERSION(6, 0, 2): 2081 amdgpu_device_ip_block_add(adev, &ih_v6_0_ip_block); 2082 break; 2083 case IP_VERSION(6, 1, 0): 2084 amdgpu_device_ip_block_add(adev, &ih_v6_1_ip_block); 2085 break; 2086 case IP_VERSION(7, 0, 0): 2087 amdgpu_device_ip_block_add(adev, &ih_v7_0_ip_block); 2088 break; 2089 default: 2090 dev_err(adev->dev, 2091 "Failed to add ih ip block(OSSSYS_HWIP:0x%x)\n", 2092 amdgpu_ip_version(adev, OSSSYS_HWIP, 0)); 2093 return -EINVAL; 2094 } 2095 return 0; 2096 } 2097 2098 static int amdgpu_discovery_set_psp_ip_blocks(struct amdgpu_device *adev) 2099 { 2100 switch (amdgpu_ip_version(adev, MP0_HWIP, 0)) { 2101 case IP_VERSION(9, 0, 0): 2102 amdgpu_device_ip_block_add(adev, &psp_v3_1_ip_block); 2103 break; 2104 case IP_VERSION(10, 0, 0): 2105 case IP_VERSION(10, 0, 1): 2106 amdgpu_device_ip_block_add(adev, &psp_v10_0_ip_block); 2107 break; 2108 case IP_VERSION(11, 0, 0): 2109 case IP_VERSION(11, 0, 2): 2110 case IP_VERSION(11, 0, 4): 2111 case IP_VERSION(11, 0, 5): 2112 case IP_VERSION(11, 0, 9): 2113 case IP_VERSION(11, 0, 7): 2114 case IP_VERSION(11, 0, 11): 2115 case IP_VERSION(11, 0, 12): 2116 case IP_VERSION(11, 0, 13): 2117 case IP_VERSION(11, 5, 0): 2118 case IP_VERSION(11, 5, 2): 2119 amdgpu_device_ip_block_add(adev, &psp_v11_0_ip_block); 2120 break; 2121 case IP_VERSION(11, 0, 8): 2122 amdgpu_device_ip_block_add(adev, &psp_v11_0_8_ip_block); 2123 break; 2124 case IP_VERSION(11, 0, 3): 2125 case IP_VERSION(12, 0, 1): 2126 amdgpu_device_ip_block_add(adev, &psp_v12_0_ip_block); 2127 break; 2128 case IP_VERSION(13, 0, 0): 2129 case IP_VERSION(13, 0, 1): 2130 case IP_VERSION(13, 0, 2): 2131 case IP_VERSION(13, 0, 3): 2132 case IP_VERSION(13, 0, 5): 2133 case IP_VERSION(13, 0, 6): 2134 case IP_VERSION(13, 0, 7): 2135 case IP_VERSION(13, 0, 8): 2136 case IP_VERSION(13, 0, 10): 2137 case IP_VERSION(13, 0, 11): 2138 case IP_VERSION(13, 0, 12): 2139 case IP_VERSION(13, 0, 14): 2140 case IP_VERSION(14, 0, 0): 2141 case IP_VERSION(14, 0, 1): 2142 case IP_VERSION(14, 0, 4): 2143 amdgpu_device_ip_block_add(adev, &psp_v13_0_ip_block); 2144 break; 2145 case IP_VERSION(13, 0, 4): 2146 amdgpu_device_ip_block_add(adev, &psp_v13_0_4_ip_block); 2147 break; 2148 case IP_VERSION(14, 0, 2): 2149 case IP_VERSION(14, 0, 3): 2150 case IP_VERSION(14, 0, 5): 2151 amdgpu_device_ip_block_add(adev, &psp_v14_0_ip_block); 2152 break; 2153 default: 2154 dev_err(adev->dev, 2155 "Failed to add psp ip block(MP0_HWIP:0x%x)\n", 2156 amdgpu_ip_version(adev, MP0_HWIP, 0)); 2157 return -EINVAL; 2158 } 2159 return 0; 2160 } 2161 2162 static int amdgpu_discovery_set_smu_ip_blocks(struct amdgpu_device *adev) 2163 { 2164 switch (amdgpu_ip_version(adev, MP1_HWIP, 0)) { 2165 case IP_VERSION(9, 0, 0): 2166 case IP_VERSION(10, 0, 0): 2167 case IP_VERSION(10, 0, 1): 2168 case IP_VERSION(11, 0, 2): 2169 if (adev->asic_type == CHIP_ARCTURUS) 2170 amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block); 2171 else 2172 amdgpu_device_ip_block_add(adev, &pp_smu_ip_block); 2173 break; 2174 case IP_VERSION(11, 0, 0): 2175 case IP_VERSION(11, 0, 5): 2176 case IP_VERSION(11, 0, 9): 2177 case IP_VERSION(11, 0, 7): 2178 case IP_VERSION(11, 0, 11): 2179 case IP_VERSION(11, 0, 12): 2180 case IP_VERSION(11, 0, 13): 2181 case IP_VERSION(11, 5, 0): 2182 case IP_VERSION(11, 5, 2): 2183 amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block); 2184 break; 2185 case IP_VERSION(11, 0, 8): 2186 if (adev->apu_flags & AMD_APU_IS_CYAN_SKILLFISH2) 2187 amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block); 2188 break; 2189 case IP_VERSION(12, 0, 0): 2190 case IP_VERSION(12, 0, 1): 2191 amdgpu_device_ip_block_add(adev, &smu_v12_0_ip_block); 2192 break; 2193 case IP_VERSION(13, 0, 0): 2194 case IP_VERSION(13, 0, 1): 2195 case IP_VERSION(13, 0, 2): 2196 case IP_VERSION(13, 0, 3): 2197 case IP_VERSION(13, 0, 4): 2198 case IP_VERSION(13, 0, 5): 2199 case IP_VERSION(13, 0, 6): 2200 case IP_VERSION(13, 0, 7): 2201 case IP_VERSION(13, 0, 8): 2202 case IP_VERSION(13, 0, 10): 2203 case IP_VERSION(13, 0, 11): 2204 case IP_VERSION(13, 0, 14): 2205 case IP_VERSION(13, 0, 12): 2206 amdgpu_device_ip_block_add(adev, &smu_v13_0_ip_block); 2207 break; 2208 case IP_VERSION(14, 0, 0): 2209 case IP_VERSION(14, 0, 1): 2210 case IP_VERSION(14, 0, 2): 2211 case IP_VERSION(14, 0, 3): 2212 case IP_VERSION(14, 0, 4): 2213 case IP_VERSION(14, 0, 5): 2214 amdgpu_device_ip_block_add(adev, &smu_v14_0_ip_block); 2215 break; 2216 default: 2217 dev_err(adev->dev, 2218 "Failed to add smu ip block(MP1_HWIP:0x%x)\n", 2219 amdgpu_ip_version(adev, MP1_HWIP, 0)); 2220 return -EINVAL; 2221 } 2222 return 0; 2223 } 2224 2225 #if defined(CONFIG_DRM_AMD_DC) 2226 static void amdgpu_discovery_set_sriov_display(struct amdgpu_device *adev) 2227 { 2228 amdgpu_device_set_sriov_virtual_display(adev); 2229 amdgpu_device_ip_block_add(adev, &amdgpu_vkms_ip_block); 2230 } 2231 #endif 2232 2233 static int amdgpu_discovery_set_display_ip_blocks(struct amdgpu_device *adev) 2234 { 2235 if (adev->enable_virtual_display) { 2236 amdgpu_device_ip_block_add(adev, &amdgpu_vkms_ip_block); 2237 return 0; 2238 } 2239 2240 if (!amdgpu_device_has_dc_support(adev)) 2241 return 0; 2242 2243 #if defined(CONFIG_DRM_AMD_DC) 2244 if (amdgpu_ip_version(adev, DCE_HWIP, 0)) { 2245 switch (amdgpu_ip_version(adev, DCE_HWIP, 0)) { 2246 case IP_VERSION(1, 0, 0): 2247 case IP_VERSION(1, 0, 1): 2248 case IP_VERSION(2, 0, 2): 2249 case IP_VERSION(2, 0, 0): 2250 case IP_VERSION(2, 0, 3): 2251 case IP_VERSION(2, 1, 0): 2252 case IP_VERSION(3, 0, 0): 2253 case IP_VERSION(3, 0, 2): 2254 case IP_VERSION(3, 0, 3): 2255 case IP_VERSION(3, 0, 1): 2256 case IP_VERSION(3, 1, 2): 2257 case IP_VERSION(3, 1, 3): 2258 case IP_VERSION(3, 1, 4): 2259 case IP_VERSION(3, 1, 5): 2260 case IP_VERSION(3, 1, 6): 2261 case IP_VERSION(3, 2, 0): 2262 case IP_VERSION(3, 2, 1): 2263 case IP_VERSION(3, 5, 0): 2264 case IP_VERSION(3, 5, 1): 2265 case IP_VERSION(3, 6, 0): 2266 case IP_VERSION(4, 1, 0): 2267 /* TODO: Fix IP version. DC code expects version 4.0.1 */ 2268 if (adev->ip_versions[DCE_HWIP][0] == IP_VERSION(4, 1, 0)) 2269 adev->ip_versions[DCE_HWIP][0] = IP_VERSION(4, 0, 1); 2270 2271 if (amdgpu_sriov_vf(adev)) 2272 amdgpu_discovery_set_sriov_display(adev); 2273 else 2274 amdgpu_device_ip_block_add(adev, &dm_ip_block); 2275 break; 2276 default: 2277 dev_err(adev->dev, 2278 "Failed to add dm ip block(DCE_HWIP:0x%x)\n", 2279 amdgpu_ip_version(adev, DCE_HWIP, 0)); 2280 return -EINVAL; 2281 } 2282 } else if (amdgpu_ip_version(adev, DCI_HWIP, 0)) { 2283 switch (amdgpu_ip_version(adev, DCI_HWIP, 0)) { 2284 case IP_VERSION(12, 0, 0): 2285 case IP_VERSION(12, 0, 1): 2286 case IP_VERSION(12, 1, 0): 2287 if (amdgpu_sriov_vf(adev)) 2288 amdgpu_discovery_set_sriov_display(adev); 2289 else 2290 amdgpu_device_ip_block_add(adev, &dm_ip_block); 2291 break; 2292 default: 2293 dev_err(adev->dev, 2294 "Failed to add dm ip block(DCI_HWIP:0x%x)\n", 2295 amdgpu_ip_version(adev, DCI_HWIP, 0)); 2296 return -EINVAL; 2297 } 2298 } 2299 #endif 2300 return 0; 2301 } 2302 2303 static int amdgpu_discovery_set_gc_ip_blocks(struct amdgpu_device *adev) 2304 { 2305 switch (amdgpu_ip_version(adev, GC_HWIP, 0)) { 2306 case IP_VERSION(9, 0, 1): 2307 case IP_VERSION(9, 1, 0): 2308 case IP_VERSION(9, 2, 1): 2309 case IP_VERSION(9, 2, 2): 2310 case IP_VERSION(9, 3, 0): 2311 case IP_VERSION(9, 4, 0): 2312 case IP_VERSION(9, 4, 1): 2313 case IP_VERSION(9, 4, 2): 2314 amdgpu_device_ip_block_add(adev, &gfx_v9_0_ip_block); 2315 break; 2316 case IP_VERSION(9, 4, 3): 2317 case IP_VERSION(9, 4, 4): 2318 case IP_VERSION(9, 5, 0): 2319 amdgpu_device_ip_block_add(adev, &gfx_v9_4_3_ip_block); 2320 break; 2321 case IP_VERSION(10, 1, 10): 2322 case IP_VERSION(10, 1, 2): 2323 case IP_VERSION(10, 1, 1): 2324 case IP_VERSION(10, 1, 3): 2325 case IP_VERSION(10, 1, 4): 2326 case IP_VERSION(10, 3, 0): 2327 case IP_VERSION(10, 3, 2): 2328 case IP_VERSION(10, 3, 1): 2329 case IP_VERSION(10, 3, 4): 2330 case IP_VERSION(10, 3, 5): 2331 case IP_VERSION(10, 3, 6): 2332 case IP_VERSION(10, 3, 3): 2333 case IP_VERSION(10, 3, 7): 2334 amdgpu_device_ip_block_add(adev, &gfx_v10_0_ip_block); 2335 break; 2336 case IP_VERSION(11, 0, 0): 2337 case IP_VERSION(11, 0, 1): 2338 case IP_VERSION(11, 0, 2): 2339 case IP_VERSION(11, 0, 3): 2340 case IP_VERSION(11, 0, 4): 2341 case IP_VERSION(11, 5, 0): 2342 case IP_VERSION(11, 5, 1): 2343 case IP_VERSION(11, 5, 2): 2344 case IP_VERSION(11, 5, 3): 2345 amdgpu_device_ip_block_add(adev, &gfx_v11_0_ip_block); 2346 break; 2347 case IP_VERSION(12, 0, 0): 2348 case IP_VERSION(12, 0, 1): 2349 amdgpu_device_ip_block_add(adev, &gfx_v12_0_ip_block); 2350 break; 2351 default: 2352 dev_err(adev->dev, "Failed to add gfx ip block(GC_HWIP:0x%x)\n", 2353 amdgpu_ip_version(adev, GC_HWIP, 0)); 2354 return -EINVAL; 2355 } 2356 return 0; 2357 } 2358 2359 static int amdgpu_discovery_set_sdma_ip_blocks(struct amdgpu_device *adev) 2360 { 2361 switch (amdgpu_ip_version(adev, SDMA0_HWIP, 0)) { 2362 case IP_VERSION(4, 0, 0): 2363 case IP_VERSION(4, 0, 1): 2364 case IP_VERSION(4, 1, 0): 2365 case IP_VERSION(4, 1, 1): 2366 case IP_VERSION(4, 1, 2): 2367 case IP_VERSION(4, 2, 0): 2368 case IP_VERSION(4, 2, 2): 2369 case IP_VERSION(4, 4, 0): 2370 amdgpu_device_ip_block_add(adev, &sdma_v4_0_ip_block); 2371 break; 2372 case IP_VERSION(4, 4, 2): 2373 case IP_VERSION(4, 4, 5): 2374 case IP_VERSION(4, 4, 4): 2375 amdgpu_device_ip_block_add(adev, &sdma_v4_4_2_ip_block); 2376 break; 2377 case IP_VERSION(5, 0, 0): 2378 case IP_VERSION(5, 0, 1): 2379 case IP_VERSION(5, 0, 2): 2380 case IP_VERSION(5, 0, 5): 2381 amdgpu_device_ip_block_add(adev, &sdma_v5_0_ip_block); 2382 break; 2383 case IP_VERSION(5, 2, 0): 2384 case IP_VERSION(5, 2, 2): 2385 case IP_VERSION(5, 2, 4): 2386 case IP_VERSION(5, 2, 5): 2387 case IP_VERSION(5, 2, 6): 2388 case IP_VERSION(5, 2, 3): 2389 case IP_VERSION(5, 2, 1): 2390 case IP_VERSION(5, 2, 7): 2391 amdgpu_device_ip_block_add(adev, &sdma_v5_2_ip_block); 2392 break; 2393 case IP_VERSION(6, 0, 0): 2394 case IP_VERSION(6, 0, 1): 2395 case IP_VERSION(6, 0, 2): 2396 case IP_VERSION(6, 0, 3): 2397 case IP_VERSION(6, 1, 0): 2398 case IP_VERSION(6, 1, 1): 2399 case IP_VERSION(6, 1, 2): 2400 case IP_VERSION(6, 1, 3): 2401 amdgpu_device_ip_block_add(adev, &sdma_v6_0_ip_block); 2402 break; 2403 case IP_VERSION(7, 0, 0): 2404 case IP_VERSION(7, 0, 1): 2405 amdgpu_device_ip_block_add(adev, &sdma_v7_0_ip_block); 2406 break; 2407 default: 2408 dev_err(adev->dev, 2409 "Failed to add sdma ip block(SDMA0_HWIP:0x%x)\n", 2410 amdgpu_ip_version(adev, SDMA0_HWIP, 0)); 2411 return -EINVAL; 2412 } 2413 2414 return 0; 2415 } 2416 2417 static int amdgpu_discovery_set_ras_ip_blocks(struct amdgpu_device *adev) 2418 { 2419 switch (amdgpu_ip_version(adev, MP0_HWIP, 0)) { 2420 case IP_VERSION(13, 0, 6): 2421 case IP_VERSION(13, 0, 12): 2422 case IP_VERSION(13, 0, 14): 2423 amdgpu_device_ip_block_add(adev, &ras_v1_0_ip_block); 2424 break; 2425 default: 2426 break; 2427 } 2428 return 0; 2429 } 2430 2431 static int amdgpu_discovery_set_mm_ip_blocks(struct amdgpu_device *adev) 2432 { 2433 if (amdgpu_ip_version(adev, VCE_HWIP, 0)) { 2434 switch (amdgpu_ip_version(adev, UVD_HWIP, 0)) { 2435 case IP_VERSION(7, 0, 0): 2436 case IP_VERSION(7, 2, 0): 2437 /* UVD is not supported on vega20 SR-IOV */ 2438 if (!(adev->asic_type == CHIP_VEGA20 && amdgpu_sriov_vf(adev))) 2439 amdgpu_device_ip_block_add(adev, &uvd_v7_0_ip_block); 2440 break; 2441 default: 2442 dev_err(adev->dev, 2443 "Failed to add uvd v7 ip block(UVD_HWIP:0x%x)\n", 2444 amdgpu_ip_version(adev, UVD_HWIP, 0)); 2445 return -EINVAL; 2446 } 2447 switch (amdgpu_ip_version(adev, VCE_HWIP, 0)) { 2448 case IP_VERSION(4, 0, 0): 2449 case IP_VERSION(4, 1, 0): 2450 /* VCE is not supported on vega20 SR-IOV */ 2451 if (!(adev->asic_type == CHIP_VEGA20 && amdgpu_sriov_vf(adev))) 2452 amdgpu_device_ip_block_add(adev, &vce_v4_0_ip_block); 2453 break; 2454 default: 2455 dev_err(adev->dev, 2456 "Failed to add VCE v4 ip block(VCE_HWIP:0x%x)\n", 2457 amdgpu_ip_version(adev, VCE_HWIP, 0)); 2458 return -EINVAL; 2459 } 2460 } else { 2461 switch (amdgpu_ip_version(adev, UVD_HWIP, 0)) { 2462 case IP_VERSION(1, 0, 0): 2463 case IP_VERSION(1, 0, 1): 2464 amdgpu_device_ip_block_add(adev, &vcn_v1_0_ip_block); 2465 break; 2466 case IP_VERSION(2, 0, 0): 2467 case IP_VERSION(2, 0, 2): 2468 case IP_VERSION(2, 2, 0): 2469 amdgpu_device_ip_block_add(adev, &vcn_v2_0_ip_block); 2470 if (!amdgpu_sriov_vf(adev)) 2471 amdgpu_device_ip_block_add(adev, &jpeg_v2_0_ip_block); 2472 break; 2473 case IP_VERSION(2, 0, 3): 2474 break; 2475 case IP_VERSION(2, 5, 0): 2476 amdgpu_device_ip_block_add(adev, &vcn_v2_5_ip_block); 2477 amdgpu_device_ip_block_add(adev, &jpeg_v2_5_ip_block); 2478 break; 2479 case IP_VERSION(2, 6, 0): 2480 amdgpu_device_ip_block_add(adev, &vcn_v2_6_ip_block); 2481 amdgpu_device_ip_block_add(adev, &jpeg_v2_6_ip_block); 2482 break; 2483 case IP_VERSION(3, 0, 0): 2484 case IP_VERSION(3, 0, 16): 2485 case IP_VERSION(3, 1, 1): 2486 case IP_VERSION(3, 1, 2): 2487 case IP_VERSION(3, 0, 2): 2488 amdgpu_device_ip_block_add(adev, &vcn_v3_0_ip_block); 2489 if (!amdgpu_sriov_vf(adev)) 2490 amdgpu_device_ip_block_add(adev, &jpeg_v3_0_ip_block); 2491 break; 2492 case IP_VERSION(3, 0, 33): 2493 amdgpu_device_ip_block_add(adev, &vcn_v3_0_ip_block); 2494 break; 2495 case IP_VERSION(4, 0, 0): 2496 case IP_VERSION(4, 0, 2): 2497 case IP_VERSION(4, 0, 4): 2498 amdgpu_device_ip_block_add(adev, &vcn_v4_0_ip_block); 2499 amdgpu_device_ip_block_add(adev, &jpeg_v4_0_ip_block); 2500 break; 2501 case IP_VERSION(4, 0, 3): 2502 amdgpu_device_ip_block_add(adev, &vcn_v4_0_3_ip_block); 2503 amdgpu_device_ip_block_add(adev, &jpeg_v4_0_3_ip_block); 2504 break; 2505 case IP_VERSION(4, 0, 5): 2506 case IP_VERSION(4, 0, 6): 2507 amdgpu_device_ip_block_add(adev, &vcn_v4_0_5_ip_block); 2508 amdgpu_device_ip_block_add(adev, &jpeg_v4_0_5_ip_block); 2509 break; 2510 case IP_VERSION(5, 0, 0): 2511 amdgpu_device_ip_block_add(adev, &vcn_v5_0_0_ip_block); 2512 amdgpu_device_ip_block_add(adev, &jpeg_v5_0_0_ip_block); 2513 break; 2514 case IP_VERSION(5, 0, 1): 2515 amdgpu_device_ip_block_add(adev, &vcn_v5_0_1_ip_block); 2516 amdgpu_device_ip_block_add(adev, &jpeg_v5_0_1_ip_block); 2517 break; 2518 default: 2519 dev_err(adev->dev, 2520 "Failed to add vcn/jpeg ip block(UVD_HWIP:0x%x)\n", 2521 amdgpu_ip_version(adev, UVD_HWIP, 0)); 2522 return -EINVAL; 2523 } 2524 } 2525 return 0; 2526 } 2527 2528 static int amdgpu_discovery_set_mes_ip_blocks(struct amdgpu_device *adev) 2529 { 2530 switch (amdgpu_ip_version(adev, GC_HWIP, 0)) { 2531 case IP_VERSION(11, 0, 0): 2532 case IP_VERSION(11, 0, 1): 2533 case IP_VERSION(11, 0, 2): 2534 case IP_VERSION(11, 0, 3): 2535 case IP_VERSION(11, 0, 4): 2536 case IP_VERSION(11, 5, 0): 2537 case IP_VERSION(11, 5, 1): 2538 case IP_VERSION(11, 5, 2): 2539 case IP_VERSION(11, 5, 3): 2540 amdgpu_device_ip_block_add(adev, &mes_v11_0_ip_block); 2541 adev->enable_mes = true; 2542 adev->enable_mes_kiq = true; 2543 break; 2544 case IP_VERSION(12, 0, 0): 2545 case IP_VERSION(12, 0, 1): 2546 amdgpu_device_ip_block_add(adev, &mes_v12_0_ip_block); 2547 adev->enable_mes = true; 2548 adev->enable_mes_kiq = true; 2549 if (amdgpu_uni_mes) 2550 adev->enable_uni_mes = true; 2551 break; 2552 default: 2553 break; 2554 } 2555 return 0; 2556 } 2557 2558 static void amdgpu_discovery_init_soc_config(struct amdgpu_device *adev) 2559 { 2560 switch (amdgpu_ip_version(adev, GC_HWIP, 0)) { 2561 case IP_VERSION(9, 4, 3): 2562 case IP_VERSION(9, 4, 4): 2563 case IP_VERSION(9, 5, 0): 2564 aqua_vanjaram_init_soc_config(adev); 2565 break; 2566 default: 2567 break; 2568 } 2569 } 2570 2571 static int amdgpu_discovery_set_vpe_ip_blocks(struct amdgpu_device *adev) 2572 { 2573 switch (amdgpu_ip_version(adev, VPE_HWIP, 0)) { 2574 case IP_VERSION(6, 1, 0): 2575 case IP_VERSION(6, 1, 1): 2576 case IP_VERSION(6, 1, 3): 2577 amdgpu_device_ip_block_add(adev, &vpe_v6_1_ip_block); 2578 break; 2579 default: 2580 break; 2581 } 2582 2583 return 0; 2584 } 2585 2586 static int amdgpu_discovery_set_umsch_mm_ip_blocks(struct amdgpu_device *adev) 2587 { 2588 switch (amdgpu_ip_version(adev, VCN_HWIP, 0)) { 2589 case IP_VERSION(4, 0, 5): 2590 case IP_VERSION(4, 0, 6): 2591 if (amdgpu_umsch_mm & 0x1) { 2592 amdgpu_device_ip_block_add(adev, &umsch_mm_v4_0_ip_block); 2593 adev->enable_umsch_mm = true; 2594 } 2595 break; 2596 default: 2597 break; 2598 } 2599 2600 return 0; 2601 } 2602 2603 static int amdgpu_discovery_set_isp_ip_blocks(struct amdgpu_device *adev) 2604 { 2605 #if defined(CONFIG_DRM_AMD_ISP) 2606 switch (amdgpu_ip_version(adev, ISP_HWIP, 0)) { 2607 case IP_VERSION(4, 1, 0): 2608 amdgpu_device_ip_block_add(adev, &isp_v4_1_0_ip_block); 2609 break; 2610 case IP_VERSION(4, 1, 1): 2611 amdgpu_device_ip_block_add(adev, &isp_v4_1_1_ip_block); 2612 break; 2613 default: 2614 break; 2615 } 2616 #endif 2617 2618 return 0; 2619 } 2620 2621 int amdgpu_discovery_set_ip_blocks(struct amdgpu_device *adev) 2622 { 2623 int r; 2624 2625 switch (adev->asic_type) { 2626 case CHIP_VEGA10: 2627 /* This is not fatal. We only need the discovery 2628 * binary for sysfs. We don't need it for a 2629 * functional system. 2630 */ 2631 amdgpu_discovery_init(adev); 2632 vega10_reg_base_init(adev); 2633 adev->sdma.num_instances = 2; 2634 adev->sdma.sdma_mask = 3; 2635 adev->gmc.num_umc = 4; 2636 adev->gfx.xcc_mask = 1; 2637 adev->ip_versions[MMHUB_HWIP][0] = IP_VERSION(9, 0, 0); 2638 adev->ip_versions[ATHUB_HWIP][0] = IP_VERSION(9, 0, 0); 2639 adev->ip_versions[OSSSYS_HWIP][0] = IP_VERSION(4, 0, 0); 2640 adev->ip_versions[HDP_HWIP][0] = IP_VERSION(4, 0, 0); 2641 adev->ip_versions[SDMA0_HWIP][0] = IP_VERSION(4, 0, 0); 2642 adev->ip_versions[SDMA1_HWIP][0] = IP_VERSION(4, 0, 0); 2643 adev->ip_versions[DF_HWIP][0] = IP_VERSION(2, 1, 0); 2644 adev->ip_versions[NBIO_HWIP][0] = IP_VERSION(6, 1, 0); 2645 adev->ip_versions[UMC_HWIP][0] = IP_VERSION(6, 0, 0); 2646 adev->ip_versions[MP0_HWIP][0] = IP_VERSION(9, 0, 0); 2647 adev->ip_versions[MP1_HWIP][0] = IP_VERSION(9, 0, 0); 2648 adev->ip_versions[THM_HWIP][0] = IP_VERSION(9, 0, 0); 2649 adev->ip_versions[SMUIO_HWIP][0] = IP_VERSION(9, 0, 0); 2650 adev->ip_versions[GC_HWIP][0] = IP_VERSION(9, 0, 1); 2651 adev->ip_versions[UVD_HWIP][0] = IP_VERSION(7, 0, 0); 2652 adev->ip_versions[VCE_HWIP][0] = IP_VERSION(4, 0, 0); 2653 adev->ip_versions[DCI_HWIP][0] = IP_VERSION(12, 0, 0); 2654 break; 2655 case CHIP_VEGA12: 2656 /* This is not fatal. We only need the discovery 2657 * binary for sysfs. We don't need it for a 2658 * functional system. 2659 */ 2660 amdgpu_discovery_init(adev); 2661 vega10_reg_base_init(adev); 2662 adev->sdma.num_instances = 2; 2663 adev->sdma.sdma_mask = 3; 2664 adev->gmc.num_umc = 4; 2665 adev->gfx.xcc_mask = 1; 2666 adev->ip_versions[MMHUB_HWIP][0] = IP_VERSION(9, 3, 0); 2667 adev->ip_versions[ATHUB_HWIP][0] = IP_VERSION(9, 3, 0); 2668 adev->ip_versions[OSSSYS_HWIP][0] = IP_VERSION(4, 0, 1); 2669 adev->ip_versions[HDP_HWIP][0] = IP_VERSION(4, 0, 1); 2670 adev->ip_versions[SDMA0_HWIP][0] = IP_VERSION(4, 0, 1); 2671 adev->ip_versions[SDMA1_HWIP][0] = IP_VERSION(4, 0, 1); 2672 adev->ip_versions[DF_HWIP][0] = IP_VERSION(2, 5, 0); 2673 adev->ip_versions[NBIO_HWIP][0] = IP_VERSION(6, 2, 0); 2674 adev->ip_versions[UMC_HWIP][0] = IP_VERSION(6, 1, 0); 2675 adev->ip_versions[MP0_HWIP][0] = IP_VERSION(9, 0, 0); 2676 adev->ip_versions[MP1_HWIP][0] = IP_VERSION(9, 0, 0); 2677 adev->ip_versions[THM_HWIP][0] = IP_VERSION(9, 0, 0); 2678 adev->ip_versions[SMUIO_HWIP][0] = IP_VERSION(9, 0, 1); 2679 adev->ip_versions[GC_HWIP][0] = IP_VERSION(9, 2, 1); 2680 adev->ip_versions[UVD_HWIP][0] = IP_VERSION(7, 0, 0); 2681 adev->ip_versions[VCE_HWIP][0] = IP_VERSION(4, 0, 0); 2682 adev->ip_versions[DCI_HWIP][0] = IP_VERSION(12, 0, 1); 2683 break; 2684 case CHIP_RAVEN: 2685 /* This is not fatal. We only need the discovery 2686 * binary for sysfs. We don't need it for a 2687 * functional system. 2688 */ 2689 amdgpu_discovery_init(adev); 2690 vega10_reg_base_init(adev); 2691 adev->sdma.num_instances = 1; 2692 adev->sdma.sdma_mask = 1; 2693 adev->vcn.num_vcn_inst = 1; 2694 adev->gmc.num_umc = 2; 2695 adev->gfx.xcc_mask = 1; 2696 if (adev->apu_flags & AMD_APU_IS_RAVEN2) { 2697 adev->ip_versions[MMHUB_HWIP][0] = IP_VERSION(9, 2, 0); 2698 adev->ip_versions[ATHUB_HWIP][0] = IP_VERSION(9, 2, 0); 2699 adev->ip_versions[OSSSYS_HWIP][0] = IP_VERSION(4, 1, 1); 2700 adev->ip_versions[HDP_HWIP][0] = IP_VERSION(4, 1, 1); 2701 adev->ip_versions[SDMA0_HWIP][0] = IP_VERSION(4, 1, 1); 2702 adev->ip_versions[DF_HWIP][0] = IP_VERSION(2, 1, 1); 2703 adev->ip_versions[NBIO_HWIP][0] = IP_VERSION(7, 0, 1); 2704 adev->ip_versions[UMC_HWIP][0] = IP_VERSION(7, 5, 0); 2705 adev->ip_versions[MP0_HWIP][0] = IP_VERSION(10, 0, 1); 2706 adev->ip_versions[MP1_HWIP][0] = IP_VERSION(10, 0, 1); 2707 adev->ip_versions[THM_HWIP][0] = IP_VERSION(10, 1, 0); 2708 adev->ip_versions[SMUIO_HWIP][0] = IP_VERSION(10, 0, 1); 2709 adev->ip_versions[GC_HWIP][0] = IP_VERSION(9, 2, 2); 2710 adev->ip_versions[UVD_HWIP][0] = IP_VERSION(1, 0, 1); 2711 adev->ip_versions[DCE_HWIP][0] = IP_VERSION(1, 0, 1); 2712 adev->ip_versions[ISP_HWIP][0] = IP_VERSION(2, 0, 0); 2713 } else { 2714 adev->ip_versions[MMHUB_HWIP][0] = IP_VERSION(9, 1, 0); 2715 adev->ip_versions[ATHUB_HWIP][0] = IP_VERSION(9, 1, 0); 2716 adev->ip_versions[OSSSYS_HWIP][0] = IP_VERSION(4, 1, 0); 2717 adev->ip_versions[HDP_HWIP][0] = IP_VERSION(4, 1, 0); 2718 adev->ip_versions[SDMA0_HWIP][0] = IP_VERSION(4, 1, 0); 2719 adev->ip_versions[DF_HWIP][0] = IP_VERSION(2, 1, 0); 2720 adev->ip_versions[NBIO_HWIP][0] = IP_VERSION(7, 0, 0); 2721 adev->ip_versions[UMC_HWIP][0] = IP_VERSION(7, 0, 0); 2722 adev->ip_versions[MP0_HWIP][0] = IP_VERSION(10, 0, 0); 2723 adev->ip_versions[MP1_HWIP][0] = IP_VERSION(10, 0, 0); 2724 adev->ip_versions[THM_HWIP][0] = IP_VERSION(10, 0, 0); 2725 adev->ip_versions[SMUIO_HWIP][0] = IP_VERSION(10, 0, 0); 2726 adev->ip_versions[GC_HWIP][0] = IP_VERSION(9, 1, 0); 2727 adev->ip_versions[UVD_HWIP][0] = IP_VERSION(1, 0, 0); 2728 adev->ip_versions[DCE_HWIP][0] = IP_VERSION(1, 0, 0); 2729 adev->ip_versions[ISP_HWIP][0] = IP_VERSION(2, 0, 0); 2730 } 2731 break; 2732 case CHIP_VEGA20: 2733 /* This is not fatal. We only need the discovery 2734 * binary for sysfs. We don't need it for a 2735 * functional system. 2736 */ 2737 amdgpu_discovery_init(adev); 2738 vega20_reg_base_init(adev); 2739 adev->sdma.num_instances = 2; 2740 adev->sdma.sdma_mask = 3; 2741 adev->gmc.num_umc = 8; 2742 adev->gfx.xcc_mask = 1; 2743 adev->ip_versions[MMHUB_HWIP][0] = IP_VERSION(9, 4, 0); 2744 adev->ip_versions[ATHUB_HWIP][0] = IP_VERSION(9, 4, 0); 2745 adev->ip_versions[OSSSYS_HWIP][0] = IP_VERSION(4, 2, 0); 2746 adev->ip_versions[HDP_HWIP][0] = IP_VERSION(4, 2, 0); 2747 adev->ip_versions[SDMA0_HWIP][0] = IP_VERSION(4, 2, 0); 2748 adev->ip_versions[SDMA1_HWIP][0] = IP_VERSION(4, 2, 0); 2749 adev->ip_versions[DF_HWIP][0] = IP_VERSION(3, 6, 0); 2750 adev->ip_versions[NBIO_HWIP][0] = IP_VERSION(7, 4, 0); 2751 adev->ip_versions[UMC_HWIP][0] = IP_VERSION(6, 1, 1); 2752 adev->ip_versions[MP0_HWIP][0] = IP_VERSION(11, 0, 2); 2753 adev->ip_versions[MP1_HWIP][0] = IP_VERSION(11, 0, 2); 2754 adev->ip_versions[THM_HWIP][0] = IP_VERSION(11, 0, 2); 2755 adev->ip_versions[SMUIO_HWIP][0] = IP_VERSION(11, 0, 2); 2756 adev->ip_versions[GC_HWIP][0] = IP_VERSION(9, 4, 0); 2757 adev->ip_versions[UVD_HWIP][0] = IP_VERSION(7, 2, 0); 2758 adev->ip_versions[UVD_HWIP][1] = IP_VERSION(7, 2, 0); 2759 adev->ip_versions[VCE_HWIP][0] = IP_VERSION(4, 1, 0); 2760 adev->ip_versions[DCI_HWIP][0] = IP_VERSION(12, 1, 0); 2761 break; 2762 case CHIP_ARCTURUS: 2763 /* This is not fatal. We only need the discovery 2764 * binary for sysfs. We don't need it for a 2765 * functional system. 2766 */ 2767 amdgpu_discovery_init(adev); 2768 arct_reg_base_init(adev); 2769 adev->sdma.num_instances = 8; 2770 adev->sdma.sdma_mask = 0xff; 2771 adev->vcn.num_vcn_inst = 2; 2772 adev->gmc.num_umc = 8; 2773 adev->gfx.xcc_mask = 1; 2774 adev->ip_versions[MMHUB_HWIP][0] = IP_VERSION(9, 4, 1); 2775 adev->ip_versions[ATHUB_HWIP][0] = IP_VERSION(9, 4, 1); 2776 adev->ip_versions[OSSSYS_HWIP][0] = IP_VERSION(4, 2, 1); 2777 adev->ip_versions[HDP_HWIP][0] = IP_VERSION(4, 2, 1); 2778 adev->ip_versions[SDMA0_HWIP][0] = IP_VERSION(4, 2, 2); 2779 adev->ip_versions[SDMA1_HWIP][0] = IP_VERSION(4, 2, 2); 2780 adev->ip_versions[SDMA1_HWIP][1] = IP_VERSION(4, 2, 2); 2781 adev->ip_versions[SDMA1_HWIP][2] = IP_VERSION(4, 2, 2); 2782 adev->ip_versions[SDMA1_HWIP][3] = IP_VERSION(4, 2, 2); 2783 adev->ip_versions[SDMA1_HWIP][4] = IP_VERSION(4, 2, 2); 2784 adev->ip_versions[SDMA1_HWIP][5] = IP_VERSION(4, 2, 2); 2785 adev->ip_versions[SDMA1_HWIP][6] = IP_VERSION(4, 2, 2); 2786 adev->ip_versions[DF_HWIP][0] = IP_VERSION(3, 6, 1); 2787 adev->ip_versions[NBIO_HWIP][0] = IP_VERSION(7, 4, 1); 2788 adev->ip_versions[UMC_HWIP][0] = IP_VERSION(6, 1, 2); 2789 adev->ip_versions[MP0_HWIP][0] = IP_VERSION(11, 0, 4); 2790 adev->ip_versions[MP1_HWIP][0] = IP_VERSION(11, 0, 2); 2791 adev->ip_versions[THM_HWIP][0] = IP_VERSION(11, 0, 3); 2792 adev->ip_versions[SMUIO_HWIP][0] = IP_VERSION(11, 0, 3); 2793 adev->ip_versions[GC_HWIP][0] = IP_VERSION(9, 4, 1); 2794 adev->ip_versions[UVD_HWIP][0] = IP_VERSION(2, 5, 0); 2795 adev->ip_versions[UVD_HWIP][1] = IP_VERSION(2, 5, 0); 2796 break; 2797 case CHIP_ALDEBARAN: 2798 /* This is not fatal. We only need the discovery 2799 * binary for sysfs. We don't need it for a 2800 * functional system. 2801 */ 2802 amdgpu_discovery_init(adev); 2803 aldebaran_reg_base_init(adev); 2804 adev->sdma.num_instances = 5; 2805 adev->sdma.sdma_mask = 0x1f; 2806 adev->vcn.num_vcn_inst = 2; 2807 adev->gmc.num_umc = 4; 2808 adev->gfx.xcc_mask = 1; 2809 adev->ip_versions[MMHUB_HWIP][0] = IP_VERSION(9, 4, 2); 2810 adev->ip_versions[ATHUB_HWIP][0] = IP_VERSION(9, 4, 2); 2811 adev->ip_versions[OSSSYS_HWIP][0] = IP_VERSION(4, 4, 0); 2812 adev->ip_versions[HDP_HWIP][0] = IP_VERSION(4, 4, 0); 2813 adev->ip_versions[SDMA0_HWIP][0] = IP_VERSION(4, 4, 0); 2814 adev->ip_versions[SDMA0_HWIP][1] = IP_VERSION(4, 4, 0); 2815 adev->ip_versions[SDMA0_HWIP][2] = IP_VERSION(4, 4, 0); 2816 adev->ip_versions[SDMA0_HWIP][3] = IP_VERSION(4, 4, 0); 2817 adev->ip_versions[SDMA0_HWIP][4] = IP_VERSION(4, 4, 0); 2818 adev->ip_versions[DF_HWIP][0] = IP_VERSION(3, 6, 2); 2819 adev->ip_versions[NBIO_HWIP][0] = IP_VERSION(7, 4, 4); 2820 adev->ip_versions[UMC_HWIP][0] = IP_VERSION(6, 7, 0); 2821 adev->ip_versions[MP0_HWIP][0] = IP_VERSION(13, 0, 2); 2822 adev->ip_versions[MP1_HWIP][0] = IP_VERSION(13, 0, 2); 2823 adev->ip_versions[THM_HWIP][0] = IP_VERSION(13, 0, 2); 2824 adev->ip_versions[SMUIO_HWIP][0] = IP_VERSION(13, 0, 2); 2825 adev->ip_versions[GC_HWIP][0] = IP_VERSION(9, 4, 2); 2826 adev->ip_versions[UVD_HWIP][0] = IP_VERSION(2, 6, 0); 2827 adev->ip_versions[UVD_HWIP][1] = IP_VERSION(2, 6, 0); 2828 adev->ip_versions[XGMI_HWIP][0] = IP_VERSION(6, 1, 0); 2829 break; 2830 case CHIP_CYAN_SKILLFISH: 2831 if (adev->apu_flags & AMD_APU_IS_CYAN_SKILLFISH2) { 2832 r = amdgpu_discovery_reg_base_init(adev); 2833 if (r) 2834 return -EINVAL; 2835 2836 amdgpu_discovery_harvest_ip(adev); 2837 amdgpu_discovery_get_gfx_info(adev); 2838 amdgpu_discovery_get_mall_info(adev); 2839 amdgpu_discovery_get_vcn_info(adev); 2840 } else { 2841 cyan_skillfish_reg_base_init(adev); 2842 adev->sdma.num_instances = 2; 2843 adev->sdma.sdma_mask = 3; 2844 adev->gfx.xcc_mask = 1; 2845 adev->ip_versions[MMHUB_HWIP][0] = IP_VERSION(2, 0, 3); 2846 adev->ip_versions[ATHUB_HWIP][0] = IP_VERSION(2, 0, 3); 2847 adev->ip_versions[OSSSYS_HWIP][0] = IP_VERSION(5, 0, 1); 2848 adev->ip_versions[HDP_HWIP][0] = IP_VERSION(5, 0, 1); 2849 adev->ip_versions[SDMA0_HWIP][0] = IP_VERSION(5, 0, 1); 2850 adev->ip_versions[SDMA1_HWIP][1] = IP_VERSION(5, 0, 1); 2851 adev->ip_versions[DF_HWIP][0] = IP_VERSION(3, 5, 0); 2852 adev->ip_versions[NBIO_HWIP][0] = IP_VERSION(2, 1, 1); 2853 adev->ip_versions[UMC_HWIP][0] = IP_VERSION(8, 1, 1); 2854 adev->ip_versions[MP0_HWIP][0] = IP_VERSION(11, 0, 8); 2855 adev->ip_versions[MP1_HWIP][0] = IP_VERSION(11, 0, 8); 2856 adev->ip_versions[THM_HWIP][0] = IP_VERSION(11, 0, 1); 2857 adev->ip_versions[SMUIO_HWIP][0] = IP_VERSION(11, 0, 8); 2858 adev->ip_versions[GC_HWIP][0] = IP_VERSION(10, 1, 3); 2859 adev->ip_versions[UVD_HWIP][0] = IP_VERSION(2, 0, 3); 2860 } 2861 break; 2862 default: 2863 r = amdgpu_discovery_reg_base_init(adev); 2864 if (r) { 2865 drm_err(&adev->ddev, "discovery failed: %d\n", r); 2866 return r; 2867 } 2868 2869 amdgpu_discovery_harvest_ip(adev); 2870 amdgpu_discovery_get_gfx_info(adev); 2871 amdgpu_discovery_get_mall_info(adev); 2872 amdgpu_discovery_get_vcn_info(adev); 2873 break; 2874 } 2875 2876 amdgpu_discovery_init_soc_config(adev); 2877 amdgpu_discovery_sysfs_init(adev); 2878 2879 switch (amdgpu_ip_version(adev, GC_HWIP, 0)) { 2880 case IP_VERSION(9, 0, 1): 2881 case IP_VERSION(9, 2, 1): 2882 case IP_VERSION(9, 4, 0): 2883 case IP_VERSION(9, 4, 1): 2884 case IP_VERSION(9, 4, 2): 2885 case IP_VERSION(9, 4, 3): 2886 case IP_VERSION(9, 4, 4): 2887 case IP_VERSION(9, 5, 0): 2888 adev->family = AMDGPU_FAMILY_AI; 2889 break; 2890 case IP_VERSION(9, 1, 0): 2891 case IP_VERSION(9, 2, 2): 2892 case IP_VERSION(9, 3, 0): 2893 adev->family = AMDGPU_FAMILY_RV; 2894 break; 2895 case IP_VERSION(10, 1, 10): 2896 case IP_VERSION(10, 1, 1): 2897 case IP_VERSION(10, 1, 2): 2898 case IP_VERSION(10, 1, 3): 2899 case IP_VERSION(10, 1, 4): 2900 case IP_VERSION(10, 3, 0): 2901 case IP_VERSION(10, 3, 2): 2902 case IP_VERSION(10, 3, 4): 2903 case IP_VERSION(10, 3, 5): 2904 adev->family = AMDGPU_FAMILY_NV; 2905 break; 2906 case IP_VERSION(10, 3, 1): 2907 adev->family = AMDGPU_FAMILY_VGH; 2908 adev->apu_flags |= AMD_APU_IS_VANGOGH; 2909 break; 2910 case IP_VERSION(10, 3, 3): 2911 adev->family = AMDGPU_FAMILY_YC; 2912 break; 2913 case IP_VERSION(10, 3, 6): 2914 adev->family = AMDGPU_FAMILY_GC_10_3_6; 2915 break; 2916 case IP_VERSION(10, 3, 7): 2917 adev->family = AMDGPU_FAMILY_GC_10_3_7; 2918 break; 2919 case IP_VERSION(11, 0, 0): 2920 case IP_VERSION(11, 0, 2): 2921 case IP_VERSION(11, 0, 3): 2922 adev->family = AMDGPU_FAMILY_GC_11_0_0; 2923 break; 2924 case IP_VERSION(11, 0, 1): 2925 case IP_VERSION(11, 0, 4): 2926 adev->family = AMDGPU_FAMILY_GC_11_0_1; 2927 break; 2928 case IP_VERSION(11, 5, 0): 2929 case IP_VERSION(11, 5, 1): 2930 case IP_VERSION(11, 5, 2): 2931 case IP_VERSION(11, 5, 3): 2932 adev->family = AMDGPU_FAMILY_GC_11_5_0; 2933 break; 2934 case IP_VERSION(12, 0, 0): 2935 case IP_VERSION(12, 0, 1): 2936 adev->family = AMDGPU_FAMILY_GC_12_0_0; 2937 break; 2938 default: 2939 return -EINVAL; 2940 } 2941 2942 switch (amdgpu_ip_version(adev, GC_HWIP, 0)) { 2943 case IP_VERSION(9, 1, 0): 2944 case IP_VERSION(9, 2, 2): 2945 case IP_VERSION(9, 3, 0): 2946 case IP_VERSION(10, 1, 3): 2947 case IP_VERSION(10, 1, 4): 2948 case IP_VERSION(10, 3, 1): 2949 case IP_VERSION(10, 3, 3): 2950 case IP_VERSION(10, 3, 6): 2951 case IP_VERSION(10, 3, 7): 2952 case IP_VERSION(11, 0, 1): 2953 case IP_VERSION(11, 0, 4): 2954 case IP_VERSION(11, 5, 0): 2955 case IP_VERSION(11, 5, 1): 2956 case IP_VERSION(11, 5, 2): 2957 case IP_VERSION(11, 5, 3): 2958 adev->flags |= AMD_IS_APU; 2959 break; 2960 default: 2961 break; 2962 } 2963 2964 /* set NBIO version */ 2965 switch (amdgpu_ip_version(adev, NBIO_HWIP, 0)) { 2966 case IP_VERSION(6, 1, 0): 2967 case IP_VERSION(6, 2, 0): 2968 adev->nbio.funcs = &nbio_v6_1_funcs; 2969 adev->nbio.hdp_flush_reg = &nbio_v6_1_hdp_flush_reg; 2970 break; 2971 case IP_VERSION(7, 0, 0): 2972 case IP_VERSION(7, 0, 1): 2973 case IP_VERSION(2, 5, 0): 2974 adev->nbio.funcs = &nbio_v7_0_funcs; 2975 adev->nbio.hdp_flush_reg = &nbio_v7_0_hdp_flush_reg; 2976 break; 2977 case IP_VERSION(7, 4, 0): 2978 case IP_VERSION(7, 4, 1): 2979 case IP_VERSION(7, 4, 4): 2980 adev->nbio.funcs = &nbio_v7_4_funcs; 2981 adev->nbio.hdp_flush_reg = &nbio_v7_4_hdp_flush_reg; 2982 break; 2983 case IP_VERSION(7, 9, 0): 2984 case IP_VERSION(7, 9, 1): 2985 adev->nbio.funcs = &nbio_v7_9_funcs; 2986 adev->nbio.hdp_flush_reg = &nbio_v7_9_hdp_flush_reg; 2987 break; 2988 case IP_VERSION(7, 11, 0): 2989 case IP_VERSION(7, 11, 1): 2990 case IP_VERSION(7, 11, 2): 2991 case IP_VERSION(7, 11, 3): 2992 adev->nbio.funcs = &nbio_v7_11_funcs; 2993 adev->nbio.hdp_flush_reg = &nbio_v7_11_hdp_flush_reg; 2994 break; 2995 case IP_VERSION(7, 2, 0): 2996 case IP_VERSION(7, 2, 1): 2997 case IP_VERSION(7, 3, 0): 2998 case IP_VERSION(7, 5, 0): 2999 case IP_VERSION(7, 5, 1): 3000 adev->nbio.funcs = &nbio_v7_2_funcs; 3001 adev->nbio.hdp_flush_reg = &nbio_v7_2_hdp_flush_reg; 3002 break; 3003 case IP_VERSION(2, 1, 1): 3004 case IP_VERSION(2, 3, 0): 3005 case IP_VERSION(2, 3, 1): 3006 case IP_VERSION(2, 3, 2): 3007 case IP_VERSION(3, 3, 0): 3008 case IP_VERSION(3, 3, 1): 3009 case IP_VERSION(3, 3, 2): 3010 case IP_VERSION(3, 3, 3): 3011 adev->nbio.funcs = &nbio_v2_3_funcs; 3012 adev->nbio.hdp_flush_reg = &nbio_v2_3_hdp_flush_reg; 3013 break; 3014 case IP_VERSION(4, 3, 0): 3015 case IP_VERSION(4, 3, 1): 3016 if (amdgpu_sriov_vf(adev)) 3017 adev->nbio.funcs = &nbio_v4_3_sriov_funcs; 3018 else 3019 adev->nbio.funcs = &nbio_v4_3_funcs; 3020 adev->nbio.hdp_flush_reg = &nbio_v4_3_hdp_flush_reg; 3021 break; 3022 case IP_VERSION(7, 7, 0): 3023 case IP_VERSION(7, 7, 1): 3024 adev->nbio.funcs = &nbio_v7_7_funcs; 3025 adev->nbio.hdp_flush_reg = &nbio_v7_7_hdp_flush_reg; 3026 break; 3027 case IP_VERSION(6, 3, 1): 3028 adev->nbio.funcs = &nbif_v6_3_1_funcs; 3029 adev->nbio.hdp_flush_reg = &nbif_v6_3_1_hdp_flush_reg; 3030 break; 3031 default: 3032 break; 3033 } 3034 3035 switch (amdgpu_ip_version(adev, HDP_HWIP, 0)) { 3036 case IP_VERSION(4, 0, 0): 3037 case IP_VERSION(4, 0, 1): 3038 case IP_VERSION(4, 1, 0): 3039 case IP_VERSION(4, 1, 1): 3040 case IP_VERSION(4, 1, 2): 3041 case IP_VERSION(4, 2, 0): 3042 case IP_VERSION(4, 2, 1): 3043 case IP_VERSION(4, 4, 0): 3044 case IP_VERSION(4, 4, 2): 3045 case IP_VERSION(4, 4, 5): 3046 adev->hdp.funcs = &hdp_v4_0_funcs; 3047 break; 3048 case IP_VERSION(5, 0, 0): 3049 case IP_VERSION(5, 0, 1): 3050 case IP_VERSION(5, 0, 2): 3051 case IP_VERSION(5, 0, 3): 3052 case IP_VERSION(5, 0, 4): 3053 case IP_VERSION(5, 2, 0): 3054 adev->hdp.funcs = &hdp_v5_0_funcs; 3055 break; 3056 case IP_VERSION(5, 2, 1): 3057 adev->hdp.funcs = &hdp_v5_2_funcs; 3058 break; 3059 case IP_VERSION(6, 0, 0): 3060 case IP_VERSION(6, 0, 1): 3061 case IP_VERSION(6, 1, 0): 3062 adev->hdp.funcs = &hdp_v6_0_funcs; 3063 break; 3064 case IP_VERSION(7, 0, 0): 3065 adev->hdp.funcs = &hdp_v7_0_funcs; 3066 break; 3067 default: 3068 break; 3069 } 3070 3071 switch (amdgpu_ip_version(adev, DF_HWIP, 0)) { 3072 case IP_VERSION(3, 6, 0): 3073 case IP_VERSION(3, 6, 1): 3074 case IP_VERSION(3, 6, 2): 3075 adev->df.funcs = &df_v3_6_funcs; 3076 break; 3077 case IP_VERSION(2, 1, 0): 3078 case IP_VERSION(2, 1, 1): 3079 case IP_VERSION(2, 5, 0): 3080 case IP_VERSION(3, 5, 1): 3081 case IP_VERSION(3, 5, 2): 3082 adev->df.funcs = &df_v1_7_funcs; 3083 break; 3084 case IP_VERSION(4, 3, 0): 3085 adev->df.funcs = &df_v4_3_funcs; 3086 break; 3087 case IP_VERSION(4, 6, 2): 3088 adev->df.funcs = &df_v4_6_2_funcs; 3089 break; 3090 case IP_VERSION(4, 15, 0): 3091 case IP_VERSION(4, 15, 1): 3092 adev->df.funcs = &df_v4_15_funcs; 3093 break; 3094 default: 3095 break; 3096 } 3097 3098 switch (amdgpu_ip_version(adev, SMUIO_HWIP, 0)) { 3099 case IP_VERSION(9, 0, 0): 3100 case IP_VERSION(9, 0, 1): 3101 case IP_VERSION(10, 0, 0): 3102 case IP_VERSION(10, 0, 1): 3103 case IP_VERSION(10, 0, 2): 3104 adev->smuio.funcs = &smuio_v9_0_funcs; 3105 break; 3106 case IP_VERSION(11, 0, 0): 3107 case IP_VERSION(11, 0, 2): 3108 case IP_VERSION(11, 0, 3): 3109 case IP_VERSION(11, 0, 4): 3110 case IP_VERSION(11, 0, 7): 3111 case IP_VERSION(11, 0, 8): 3112 adev->smuio.funcs = &smuio_v11_0_funcs; 3113 break; 3114 case IP_VERSION(11, 0, 6): 3115 case IP_VERSION(11, 0, 10): 3116 case IP_VERSION(11, 0, 11): 3117 case IP_VERSION(11, 5, 0): 3118 case IP_VERSION(11, 5, 2): 3119 case IP_VERSION(13, 0, 1): 3120 case IP_VERSION(13, 0, 9): 3121 case IP_VERSION(13, 0, 10): 3122 adev->smuio.funcs = &smuio_v11_0_6_funcs; 3123 break; 3124 case IP_VERSION(13, 0, 2): 3125 adev->smuio.funcs = &smuio_v13_0_funcs; 3126 break; 3127 case IP_VERSION(13, 0, 3): 3128 case IP_VERSION(13, 0, 11): 3129 adev->smuio.funcs = &smuio_v13_0_3_funcs; 3130 if (adev->smuio.funcs->get_pkg_type(adev) == AMDGPU_PKG_TYPE_APU) { 3131 adev->flags |= AMD_IS_APU; 3132 } 3133 break; 3134 case IP_VERSION(13, 0, 6): 3135 case IP_VERSION(13, 0, 8): 3136 case IP_VERSION(14, 0, 0): 3137 case IP_VERSION(14, 0, 1): 3138 adev->smuio.funcs = &smuio_v13_0_6_funcs; 3139 break; 3140 case IP_VERSION(14, 0, 2): 3141 adev->smuio.funcs = &smuio_v14_0_2_funcs; 3142 break; 3143 default: 3144 break; 3145 } 3146 3147 switch (amdgpu_ip_version(adev, LSDMA_HWIP, 0)) { 3148 case IP_VERSION(6, 0, 0): 3149 case IP_VERSION(6, 0, 1): 3150 case IP_VERSION(6, 0, 2): 3151 case IP_VERSION(6, 0, 3): 3152 adev->lsdma.funcs = &lsdma_v6_0_funcs; 3153 break; 3154 case IP_VERSION(7, 0, 0): 3155 case IP_VERSION(7, 0, 1): 3156 adev->lsdma.funcs = &lsdma_v7_0_funcs; 3157 break; 3158 default: 3159 break; 3160 } 3161 3162 r = amdgpu_discovery_set_common_ip_blocks(adev); 3163 if (r) 3164 return r; 3165 3166 r = amdgpu_discovery_set_gmc_ip_blocks(adev); 3167 if (r) 3168 return r; 3169 3170 /* For SR-IOV, PSP needs to be initialized before IH */ 3171 if (amdgpu_sriov_vf(adev)) { 3172 r = amdgpu_discovery_set_psp_ip_blocks(adev); 3173 if (r) 3174 return r; 3175 r = amdgpu_discovery_set_ih_ip_blocks(adev); 3176 if (r) 3177 return r; 3178 } else { 3179 r = amdgpu_discovery_set_ih_ip_blocks(adev); 3180 if (r) 3181 return r; 3182 3183 if (likely(adev->firmware.load_type == AMDGPU_FW_LOAD_PSP)) { 3184 r = amdgpu_discovery_set_psp_ip_blocks(adev); 3185 if (r) 3186 return r; 3187 } 3188 } 3189 3190 if (likely(adev->firmware.load_type == AMDGPU_FW_LOAD_PSP)) { 3191 r = amdgpu_discovery_set_smu_ip_blocks(adev); 3192 if (r) 3193 return r; 3194 } 3195 3196 r = amdgpu_discovery_set_display_ip_blocks(adev); 3197 if (r) 3198 return r; 3199 3200 r = amdgpu_discovery_set_gc_ip_blocks(adev); 3201 if (r) 3202 return r; 3203 3204 r = amdgpu_discovery_set_sdma_ip_blocks(adev); 3205 if (r) 3206 return r; 3207 3208 r = amdgpu_discovery_set_ras_ip_blocks(adev); 3209 if (r) 3210 return r; 3211 3212 if ((adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT && 3213 !amdgpu_sriov_vf(adev)) || 3214 (adev->firmware.load_type == AMDGPU_FW_LOAD_RLC_BACKDOOR_AUTO && amdgpu_dpm == 1)) { 3215 r = amdgpu_discovery_set_smu_ip_blocks(adev); 3216 if (r) 3217 return r; 3218 } 3219 3220 r = amdgpu_discovery_set_mm_ip_blocks(adev); 3221 if (r) 3222 return r; 3223 3224 r = amdgpu_discovery_set_mes_ip_blocks(adev); 3225 if (r) 3226 return r; 3227 3228 r = amdgpu_discovery_set_vpe_ip_blocks(adev); 3229 if (r) 3230 return r; 3231 3232 r = amdgpu_discovery_set_umsch_mm_ip_blocks(adev); 3233 if (r) 3234 return r; 3235 3236 r = amdgpu_discovery_set_isp_ip_blocks(adev); 3237 if (r) 3238 return r; 3239 return 0; 3240 } 3241 3242