1 /* 2 * Copyright 2018-2024 Advanced Micro Devices, Inc. All rights reserved. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 */ 23 24 #include <linux/firmware.h> 25 26 #include "amdgpu.h" 27 #include "amdgpu_discovery.h" 28 #include "soc15_hw_ip.h" 29 #include "discovery.h" 30 #include "amdgpu_ras.h" 31 32 #include "soc15.h" 33 #include "gfx_v9_0.h" 34 #include "gfx_v9_4_3.h" 35 #include "gmc_v9_0.h" 36 #include "df_v1_7.h" 37 #include "df_v3_6.h" 38 #include "df_v4_3.h" 39 #include "df_v4_6_2.h" 40 #include "df_v4_15.h" 41 #include "nbio_v6_1.h" 42 #include "nbio_v7_0.h" 43 #include "nbio_v7_4.h" 44 #include "nbio_v7_9.h" 45 #include "nbio_v7_11.h" 46 #include "hdp_v4_0.h" 47 #include "vega10_ih.h" 48 #include "vega20_ih.h" 49 #include "sdma_v4_0.h" 50 #include "sdma_v4_4_2.h" 51 #include "uvd_v7_0.h" 52 #include "vce_v4_0.h" 53 #include "vcn_v1_0.h" 54 #include "vcn_v2_5.h" 55 #include "jpeg_v2_5.h" 56 #include "smuio_v9_0.h" 57 #include "gmc_v10_0.h" 58 #include "gmc_v11_0.h" 59 #include "gmc_v12_0.h" 60 #include "gfxhub_v2_0.h" 61 #include "mmhub_v2_0.h" 62 #include "nbio_v2_3.h" 63 #include "nbio_v4_3.h" 64 #include "nbio_v7_2.h" 65 #include "nbio_v7_7.h" 66 #include "nbif_v6_3_1.h" 67 #include "hdp_v5_0.h" 68 #include "hdp_v5_2.h" 69 #include "hdp_v6_0.h" 70 #include "hdp_v7_0.h" 71 #include "nv.h" 72 #include "soc21.h" 73 #include "soc24.h" 74 #include "soc_v1_0.h" 75 #include "navi10_ih.h" 76 #include "ih_v6_0.h" 77 #include "ih_v6_1.h" 78 #include "ih_v7_0.h" 79 #include "gfx_v10_0.h" 80 #include "gfx_v11_0.h" 81 #include "gfx_v12_0.h" 82 #include "gfx_v12_1.h" 83 #include "sdma_v5_0.h" 84 #include "sdma_v5_2.h" 85 #include "sdma_v6_0.h" 86 #include "sdma_v7_0.h" 87 #include "sdma_v7_1.h" 88 #include "lsdma_v6_0.h" 89 #include "lsdma_v7_0.h" 90 #include "vcn_v2_0.h" 91 #include "jpeg_v2_0.h" 92 #include "vcn_v3_0.h" 93 #include "jpeg_v3_0.h" 94 #include "vcn_v4_0.h" 95 #include "jpeg_v4_0.h" 96 #include "vcn_v4_0_3.h" 97 #include "jpeg_v4_0_3.h" 98 #include "vcn_v4_0_5.h" 99 #include "jpeg_v4_0_5.h" 100 #include "amdgpu_vkms.h" 101 #include "mes_v11_0.h" 102 #include "mes_v12_0.h" 103 #include "mes_v12_1.h" 104 #include "smuio_v11_0.h" 105 #include "smuio_v11_0_6.h" 106 #include "smuio_v13_0.h" 107 #include "smuio_v13_0_3.h" 108 #include "smuio_v13_0_6.h" 109 #include "smuio_v14_0_2.h" 110 #include "smuio_v15_0_8.h" 111 #include "vcn_v5_0_0.h" 112 #include "vcn_v5_0_1.h" 113 #include "jpeg_v5_0_0.h" 114 #include "jpeg_v5_0_1.h" 115 #include "jpeg_v5_3_0.h" 116 117 #include "amdgpu_ras_mgr.h" 118 119 #include "amdgpu_vpe.h" 120 #if defined(CONFIG_DRM_AMD_ISP) 121 #include "amdgpu_isp.h" 122 #endif 123 124 MODULE_FIRMWARE("amdgpu/ip_discovery.bin"); 125 MODULE_FIRMWARE("amdgpu/vega10_ip_discovery.bin"); 126 MODULE_FIRMWARE("amdgpu/vega12_ip_discovery.bin"); 127 MODULE_FIRMWARE("amdgpu/vega20_ip_discovery.bin"); 128 MODULE_FIRMWARE("amdgpu/raven_ip_discovery.bin"); 129 MODULE_FIRMWARE("amdgpu/raven2_ip_discovery.bin"); 130 MODULE_FIRMWARE("amdgpu/picasso_ip_discovery.bin"); 131 MODULE_FIRMWARE("amdgpu/arcturus_ip_discovery.bin"); 132 MODULE_FIRMWARE("amdgpu/aldebaran_ip_discovery.bin"); 133 134 #define mmIP_DISCOVERY_VERSION 0x16A00 135 #define mmRCC_CONFIG_MEMSIZE 0xde3 136 #define mmMP0_SMN_C2PMSG_33 0x16061 137 #define mmMM_INDEX 0x0 138 #define mmMM_INDEX_HI 0x6 139 #define mmMM_DATA 0x1 140 141 static const char *hw_id_names[HW_ID_MAX] = { 142 [MP1_HWID] = "MP1", 143 [MP2_HWID] = "MP2", 144 [THM_HWID] = "THM", 145 [SMUIO_HWID] = "SMUIO", 146 [FUSE_HWID] = "FUSE", 147 [CLKA_HWID] = "CLKA", 148 [PWR_HWID] = "PWR", 149 [GC_HWID] = "GC", 150 [UVD_HWID] = "UVD", 151 [AUDIO_AZ_HWID] = "AUDIO_AZ", 152 [ACP_HWID] = "ACP", 153 [DCI_HWID] = "DCI", 154 [DMU_HWID] = "DMU", 155 [DCO_HWID] = "DCO", 156 [DIO_HWID] = "DIO", 157 [XDMA_HWID] = "XDMA", 158 [DCEAZ_HWID] = "DCEAZ", 159 [DAZ_HWID] = "DAZ", 160 [SDPMUX_HWID] = "SDPMUX", 161 [NTB_HWID] = "NTB", 162 [IOHC_HWID] = "IOHC", 163 [L2IMU_HWID] = "L2IMU", 164 [VCE_HWID] = "VCE", 165 [MMHUB_HWID] = "MMHUB", 166 [ATHUB_HWID] = "ATHUB", 167 [DBGU_NBIO_HWID] = "DBGU_NBIO", 168 [DFX_HWID] = "DFX", 169 [DBGU0_HWID] = "DBGU0", 170 [DBGU1_HWID] = "DBGU1", 171 [OSSSYS_HWID] = "OSSSYS", 172 [HDP_HWID] = "HDP", 173 [SDMA0_HWID] = "SDMA0", 174 [SDMA1_HWID] = "SDMA1", 175 [SDMA2_HWID] = "SDMA2", 176 [SDMA3_HWID] = "SDMA3", 177 [LSDMA_HWID] = "LSDMA", 178 [ISP_HWID] = "ISP", 179 [DBGU_IO_HWID] = "DBGU_IO", 180 [DF_HWID] = "DF", 181 [CLKB_HWID] = "CLKB", 182 [FCH_HWID] = "FCH", 183 [DFX_DAP_HWID] = "DFX_DAP", 184 [L1IMU_PCIE_HWID] = "L1IMU_PCIE", 185 [L1IMU_NBIF_HWID] = "L1IMU_NBIF", 186 [L1IMU_IOAGR_HWID] = "L1IMU_IOAGR", 187 [L1IMU3_HWID] = "L1IMU3", 188 [L1IMU4_HWID] = "L1IMU4", 189 [L1IMU5_HWID] = "L1IMU5", 190 [L1IMU6_HWID] = "L1IMU6", 191 [L1IMU7_HWID] = "L1IMU7", 192 [L1IMU8_HWID] = "L1IMU8", 193 [L1IMU9_HWID] = "L1IMU9", 194 [L1IMU10_HWID] = "L1IMU10", 195 [L1IMU11_HWID] = "L1IMU11", 196 [L1IMU12_HWID] = "L1IMU12", 197 [L1IMU13_HWID] = "L1IMU13", 198 [L1IMU14_HWID] = "L1IMU14", 199 [L1IMU15_HWID] = "L1IMU15", 200 [WAFLC_HWID] = "WAFLC", 201 [FCH_USB_PD_HWID] = "FCH_USB_PD", 202 [PCIE_HWID] = "PCIE", 203 [PCS_HWID] = "PCS", 204 [DDCL_HWID] = "DDCL", 205 [SST_HWID] = "SST", 206 [IOAGR_HWID] = "IOAGR", 207 [NBIF_HWID] = "NBIF", 208 [IOAPIC_HWID] = "IOAPIC", 209 [SYSTEMHUB_HWID] = "SYSTEMHUB", 210 [NTBCCP_HWID] = "NTBCCP", 211 [UMC_HWID] = "UMC", 212 [SATA_HWID] = "SATA", 213 [USB_HWID] = "USB", 214 [CCXSEC_HWID] = "CCXSEC", 215 [XGMI_HWID] = "XGMI", 216 [XGBE_HWID] = "XGBE", 217 [MP0_HWID] = "MP0", 218 [VPE_HWID] = "VPE", 219 [ATU_HWID] = "ATU", 220 [AIGC_HWID] = "AIGC", 221 }; 222 223 static int hw_id_map[MAX_HWIP] = { 224 [GC_HWIP] = GC_HWID, 225 [HDP_HWIP] = HDP_HWID, 226 [SDMA0_HWIP] = SDMA0_HWID, 227 [SDMA1_HWIP] = SDMA1_HWID, 228 [SDMA2_HWIP] = SDMA2_HWID, 229 [SDMA3_HWIP] = SDMA3_HWID, 230 [LSDMA_HWIP] = LSDMA_HWID, 231 [MMHUB_HWIP] = MMHUB_HWID, 232 [ATHUB_HWIP] = ATHUB_HWID, 233 [NBIO_HWIP] = NBIF_HWID, 234 [MP0_HWIP] = MP0_HWID, 235 [MP1_HWIP] = MP1_HWID, 236 [UVD_HWIP] = UVD_HWID, 237 [VCE_HWIP] = VCE_HWID, 238 [DF_HWIP] = DF_HWID, 239 [DCE_HWIP] = DMU_HWID, 240 [OSSSYS_HWIP] = OSSSYS_HWID, 241 [SMUIO_HWIP] = SMUIO_HWID, 242 [PWR_HWIP] = PWR_HWID, 243 [NBIF_HWIP] = NBIF_HWID, 244 [THM_HWIP] = THM_HWID, 245 [CLK_HWIP] = CLKA_HWID, 246 [UMC_HWIP] = UMC_HWID, 247 [XGMI_HWIP] = XGMI_HWID, 248 [DCI_HWIP] = DCI_HWID, 249 [PCIE_HWIP] = PCIE_HWID, 250 [VPE_HWIP] = VPE_HWID, 251 [ISP_HWIP] = ISP_HWID, 252 [ATU_HWIP] = ATU_HWID, 253 }; 254 255 static int amdgpu_discovery_read_binary_from_sysmem(struct amdgpu_device *adev, uint8_t *binary) 256 { 257 u64 tmr_offset, tmr_size, pos; 258 void *discv_regn; 259 int ret; 260 261 ret = amdgpu_acpi_get_tmr_info(adev, &tmr_offset, &tmr_size); 262 if (ret) 263 return ret; 264 265 pos = tmr_offset + tmr_size - DISCOVERY_TMR_OFFSET; 266 267 /* This region is read-only and reserved from system use */ 268 discv_regn = memremap(pos, adev->discovery.size, MEMREMAP_WC); 269 if (discv_regn) { 270 memcpy(binary, discv_regn, adev->discovery.size); 271 memunmap(discv_regn); 272 return 0; 273 } 274 275 return -ENOENT; 276 } 277 278 #define IP_DISCOVERY_V2 2 279 #define IP_DISCOVERY_V4 4 280 281 static int amdgpu_discovery_read_binary_from_mem(struct amdgpu_device *adev, 282 uint8_t *binary) 283 { 284 bool sz_valid = true; 285 uint64_t vram_size; 286 int i, ret = 0; 287 u32 msg; 288 289 if (!amdgpu_sriov_vf(adev)) { 290 /* It can take up to two second for IFWI init to complete on some dGPUs, 291 * but generally it should be in the 60-100ms range. Normally this starts 292 * as soon as the device gets power so by the time the OS loads this has long 293 * completed. However, when a card is hotplugged via e.g., USB4, we need to 294 * wait for this to complete. Once the C2PMSG is updated, we can 295 * continue. 296 */ 297 298 for (i = 0; i < 2000; i++) { 299 msg = RREG32(mmMP0_SMN_C2PMSG_33); 300 if (msg & 0x80000000) 301 break; 302 msleep(1); 303 } 304 } 305 306 vram_size = RREG32(mmRCC_CONFIG_MEMSIZE); 307 if (!vram_size || vram_size == U32_MAX) 308 sz_valid = false; 309 else 310 vram_size <<= 20; 311 312 /* 313 * If in VRAM, discovery TMR is marked for reservation. If it is in system mem, 314 * then it is not required to be reserved. 315 */ 316 if (sz_valid) { 317 if (amdgpu_sriov_vf(adev) && adev->virt.is_dynamic_crit_regn_enabled) { 318 /* For SRIOV VFs with dynamic critical region enabled, 319 * we will get the IPD binary via below call. 320 * If dynamic critical is disabled, fall through to normal seq. 321 */ 322 if (amdgpu_virt_get_dynamic_data_info(adev, 323 AMD_SRIOV_MSG_IPD_TABLE_ID, binary, 324 &adev->discovery.size)) { 325 dev_err(adev->dev, 326 "failed to read discovery info from dynamic critical region."); 327 ret = -EINVAL; 328 goto exit; 329 } 330 } else { 331 uint64_t pos = vram_size - DISCOVERY_TMR_OFFSET; 332 333 amdgpu_device_vram_access(adev, pos, (uint32_t *)binary, 334 adev->discovery.size, false); 335 adev->discovery.reserve_tmr = true; 336 } 337 } else { 338 ret = amdgpu_discovery_read_binary_from_sysmem(adev, binary); 339 } 340 341 if (ret) 342 dev_err(adev->dev, 343 "failed to read discovery info from memory, vram size read: %llx", 344 vram_size); 345 exit: 346 return ret; 347 } 348 349 static int amdgpu_discovery_read_binary_from_file(struct amdgpu_device *adev, 350 uint8_t *binary, 351 const char *fw_name) 352 { 353 const struct firmware *fw; 354 int r; 355 356 r = firmware_request_nowarn(&fw, fw_name, adev->dev); 357 if (r) { 358 if (amdgpu_discovery == 2) 359 dev_err(adev->dev, "can't load firmware \"%s\"\n", fw_name); 360 else 361 drm_info(&adev->ddev, "Optional firmware \"%s\" was not found\n", fw_name); 362 return r; 363 } 364 365 memcpy((u8 *)binary, (u8 *)fw->data, fw->size); 366 release_firmware(fw); 367 368 return 0; 369 } 370 371 static uint16_t amdgpu_discovery_calculate_checksum(uint8_t *data, uint32_t size) 372 { 373 uint16_t checksum = 0; 374 int i; 375 376 for (i = 0; i < size; i++) 377 checksum += data[i]; 378 379 return checksum; 380 } 381 382 static inline bool amdgpu_discovery_verify_checksum(uint8_t *data, uint32_t size, 383 uint16_t expected) 384 { 385 return !!(amdgpu_discovery_calculate_checksum(data, size) == expected); 386 } 387 388 static inline bool amdgpu_discovery_verify_binary_signature(uint8_t *binary) 389 { 390 struct binary_header *bhdr; 391 bhdr = (struct binary_header *)binary; 392 393 return (le32_to_cpu(bhdr->binary_signature) == BINARY_SIGNATURE); 394 } 395 396 static void amdgpu_discovery_harvest_config_quirk(struct amdgpu_device *adev) 397 { 398 /* 399 * So far, apply this quirk only on those Navy Flounder boards which 400 * have a bad harvest table of VCN config. 401 */ 402 if ((amdgpu_ip_version(adev, UVD_HWIP, 1) == IP_VERSION(3, 0, 1)) && 403 (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(10, 3, 2))) { 404 switch (adev->pdev->revision) { 405 case 0xC1: 406 case 0xC2: 407 case 0xC3: 408 case 0xC5: 409 case 0xC7: 410 case 0xCF: 411 case 0xDF: 412 adev->vcn.harvest_config |= AMDGPU_VCN_HARVEST_VCN1; 413 adev->vcn.inst_mask &= ~AMDGPU_VCN_HARVEST_VCN1; 414 break; 415 default: 416 break; 417 } 418 } 419 } 420 421 static int amdgpu_discovery_verify_npsinfo(struct amdgpu_device *adev, 422 struct binary_header *bhdr) 423 { 424 uint8_t *discovery_bin = adev->discovery.bin; 425 struct table_info *info; 426 uint16_t checksum; 427 uint16_t offset; 428 429 info = &bhdr->table_list[NPS_INFO]; 430 offset = le16_to_cpu(info->offset); 431 checksum = le16_to_cpu(info->checksum); 432 433 struct nps_info_header *nhdr = 434 (struct nps_info_header *)(discovery_bin + offset); 435 436 if (le32_to_cpu(nhdr->table_id) != NPS_INFO_TABLE_ID) { 437 dev_dbg(adev->dev, "invalid ip discovery nps info table id\n"); 438 return -EINVAL; 439 } 440 441 if (!amdgpu_discovery_verify_checksum(discovery_bin + offset, 442 le32_to_cpu(nhdr->size_bytes), 443 checksum)) { 444 dev_dbg(adev->dev, "invalid nps info data table checksum\n"); 445 return -EINVAL; 446 } 447 448 return 0; 449 } 450 451 static const char *amdgpu_discovery_get_fw_name(struct amdgpu_device *adev) 452 { 453 if (amdgpu_discovery == 2) { 454 /* Assume there is valid discovery TMR in VRAM even if binary is sideloaded */ 455 adev->discovery.reserve_tmr = true; 456 return "amdgpu/ip_discovery.bin"; 457 } 458 459 switch (adev->asic_type) { 460 case CHIP_VEGA10: 461 return "amdgpu/vega10_ip_discovery.bin"; 462 case CHIP_VEGA12: 463 return "amdgpu/vega12_ip_discovery.bin"; 464 case CHIP_RAVEN: 465 if (adev->apu_flags & AMD_APU_IS_RAVEN2) 466 return "amdgpu/raven2_ip_discovery.bin"; 467 else if (adev->apu_flags & AMD_APU_IS_PICASSO) 468 return "amdgpu/picasso_ip_discovery.bin"; 469 else 470 return "amdgpu/raven_ip_discovery.bin"; 471 case CHIP_VEGA20: 472 return "amdgpu/vega20_ip_discovery.bin"; 473 case CHIP_ARCTURUS: 474 return "amdgpu/arcturus_ip_discovery.bin"; 475 case CHIP_ALDEBARAN: 476 return "amdgpu/aldebaran_ip_discovery.bin"; 477 default: 478 return NULL; 479 } 480 } 481 482 static int amdgpu_discovery_init(struct amdgpu_device *adev) 483 { 484 struct table_info *info; 485 struct binary_header *bhdr; 486 uint8_t *discovery_bin; 487 const char *fw_name; 488 uint16_t offset; 489 uint16_t size; 490 uint16_t checksum; 491 int r; 492 493 adev->discovery.bin = kzalloc(DISCOVERY_TMR_SIZE, GFP_KERNEL); 494 if (!adev->discovery.bin) 495 return -ENOMEM; 496 adev->discovery.size = DISCOVERY_TMR_SIZE; 497 adev->discovery.debugfs_blob.data = adev->discovery.bin; 498 adev->discovery.debugfs_blob.size = adev->discovery.size; 499 500 discovery_bin = adev->discovery.bin; 501 /* Read from file if it is the preferred option */ 502 fw_name = amdgpu_discovery_get_fw_name(adev); 503 if (fw_name != NULL) { 504 drm_dbg(&adev->ddev, "use ip discovery information from file"); 505 r = amdgpu_discovery_read_binary_from_file(adev, discovery_bin, 506 fw_name); 507 if (r) 508 goto out; 509 } else { 510 drm_dbg(&adev->ddev, "use ip discovery information from memory"); 511 r = amdgpu_discovery_read_binary_from_mem(adev, discovery_bin); 512 if (r) 513 goto out; 514 } 515 516 /* check the ip discovery binary signature */ 517 if (!amdgpu_discovery_verify_binary_signature(discovery_bin)) { 518 dev_err(adev->dev, 519 "get invalid ip discovery binary signature\n"); 520 r = -EINVAL; 521 goto out; 522 } 523 524 bhdr = (struct binary_header *)discovery_bin; 525 526 offset = offsetof(struct binary_header, binary_checksum) + 527 sizeof(bhdr->binary_checksum); 528 size = le16_to_cpu(bhdr->binary_size) - offset; 529 checksum = le16_to_cpu(bhdr->binary_checksum); 530 531 if (!amdgpu_discovery_verify_checksum(discovery_bin + offset, size, 532 checksum)) { 533 dev_err(adev->dev, "invalid ip discovery binary checksum\n"); 534 r = -EINVAL; 535 goto out; 536 } 537 538 info = &bhdr->table_list[IP_DISCOVERY]; 539 offset = le16_to_cpu(info->offset); 540 checksum = le16_to_cpu(info->checksum); 541 542 if (offset) { 543 struct ip_discovery_header *ihdr = 544 (struct ip_discovery_header *)(discovery_bin + offset); 545 if (le32_to_cpu(ihdr->signature) != DISCOVERY_TABLE_SIGNATURE) { 546 dev_err(adev->dev, "invalid ip discovery data table signature\n"); 547 r = -EINVAL; 548 goto out; 549 } 550 551 if (!amdgpu_discovery_verify_checksum(discovery_bin + offset, 552 le16_to_cpu(ihdr->size), 553 checksum)) { 554 dev_err(adev->dev, "invalid ip discovery data table checksum\n"); 555 r = -EINVAL; 556 goto out; 557 } 558 } 559 560 info = &bhdr->table_list[GC]; 561 offset = le16_to_cpu(info->offset); 562 checksum = le16_to_cpu(info->checksum); 563 564 if (offset) { 565 struct gpu_info_header *ghdr = 566 (struct gpu_info_header *)(discovery_bin + offset); 567 568 if (le32_to_cpu(ghdr->table_id) != GC_TABLE_ID) { 569 dev_err(adev->dev, "invalid ip discovery gc table id\n"); 570 r = -EINVAL; 571 goto out; 572 } 573 574 if (!amdgpu_discovery_verify_checksum(discovery_bin + offset, 575 le32_to_cpu(ghdr->size), 576 checksum)) { 577 dev_err(adev->dev, "invalid gc data table checksum\n"); 578 r = -EINVAL; 579 goto out; 580 } 581 } 582 583 info = &bhdr->table_list[HARVEST_INFO]; 584 offset = le16_to_cpu(info->offset); 585 checksum = le16_to_cpu(info->checksum); 586 587 if (offset) { 588 struct harvest_info_header *hhdr = 589 (struct harvest_info_header *)(discovery_bin + offset); 590 591 if (le32_to_cpu(hhdr->signature) != HARVEST_TABLE_SIGNATURE) { 592 dev_err(adev->dev, "invalid ip discovery harvest table signature\n"); 593 r = -EINVAL; 594 goto out; 595 } 596 597 if (!amdgpu_discovery_verify_checksum( 598 discovery_bin + offset, 599 sizeof(struct harvest_table), checksum)) { 600 dev_err(adev->dev, "invalid harvest data table checksum\n"); 601 r = -EINVAL; 602 goto out; 603 } 604 } 605 606 info = &bhdr->table_list[VCN_INFO]; 607 offset = le16_to_cpu(info->offset); 608 checksum = le16_to_cpu(info->checksum); 609 610 if (offset) { 611 struct vcn_info_header *vhdr = 612 (struct vcn_info_header *)(discovery_bin + offset); 613 614 if (le32_to_cpu(vhdr->table_id) != VCN_INFO_TABLE_ID) { 615 dev_err(adev->dev, "invalid ip discovery vcn table id\n"); 616 r = -EINVAL; 617 goto out; 618 } 619 620 if (!amdgpu_discovery_verify_checksum( 621 discovery_bin + offset, 622 le32_to_cpu(vhdr->size_bytes), checksum)) { 623 dev_err(adev->dev, "invalid vcn data table checksum\n"); 624 r = -EINVAL; 625 goto out; 626 } 627 } 628 629 info = &bhdr->table_list[MALL_INFO]; 630 offset = le16_to_cpu(info->offset); 631 checksum = le16_to_cpu(info->checksum); 632 633 if (0 && offset) { 634 struct mall_info_header *mhdr = 635 (struct mall_info_header *)(discovery_bin + offset); 636 637 if (le32_to_cpu(mhdr->table_id) != MALL_INFO_TABLE_ID) { 638 dev_err(adev->dev, "invalid ip discovery mall table id\n"); 639 r = -EINVAL; 640 goto out; 641 } 642 643 if (!amdgpu_discovery_verify_checksum( 644 discovery_bin + offset, 645 le32_to_cpu(mhdr->size_bytes), checksum)) { 646 dev_err(adev->dev, "invalid mall data table checksum\n"); 647 r = -EINVAL; 648 goto out; 649 } 650 } 651 652 return 0; 653 654 out: 655 kfree(adev->discovery.bin); 656 adev->discovery.bin = NULL; 657 if ((amdgpu_discovery != 2) && 658 (RREG32(mmIP_DISCOVERY_VERSION) == 4)) 659 amdgpu_ras_query_boot_status(adev, 4); 660 return r; 661 } 662 663 static void amdgpu_discovery_sysfs_fini(struct amdgpu_device *adev); 664 665 void amdgpu_discovery_fini(struct amdgpu_device *adev) 666 { 667 amdgpu_discovery_sysfs_fini(adev); 668 kfree(adev->discovery.bin); 669 adev->discovery.bin = NULL; 670 } 671 672 static int amdgpu_discovery_validate_ip(struct amdgpu_device *adev, 673 uint8_t instance, uint16_t hw_id) 674 { 675 if (instance >= HWIP_MAX_INSTANCE) { 676 dev_err(adev->dev, 677 "Unexpected instance_number (%d) from ip discovery blob\n", 678 instance); 679 return -EINVAL; 680 } 681 if (hw_id >= HW_ID_MAX) { 682 dev_err(adev->dev, 683 "Unexpected hw_id (%d) from ip discovery blob\n", 684 hw_id); 685 return -EINVAL; 686 } 687 688 return 0; 689 } 690 691 static void amdgpu_discovery_read_harvest_bit_per_ip(struct amdgpu_device *adev, 692 uint32_t *vcn_harvest_count) 693 { 694 uint8_t *discovery_bin = adev->discovery.bin; 695 struct binary_header *bhdr; 696 struct ip_discovery_header *ihdr; 697 struct die_header *dhdr; 698 struct ip *ip; 699 uint16_t die_offset, ip_offset, num_dies, num_ips; 700 uint16_t hw_id; 701 uint8_t inst; 702 int i, j; 703 704 bhdr = (struct binary_header *)discovery_bin; 705 ihdr = (struct ip_discovery_header 706 *)(discovery_bin + 707 le16_to_cpu(bhdr->table_list[IP_DISCOVERY].offset)); 708 num_dies = le16_to_cpu(ihdr->num_dies); 709 710 /* scan harvest bit of all IP data structures */ 711 for (i = 0; i < num_dies; i++) { 712 die_offset = le16_to_cpu(ihdr->die_info[i].die_offset); 713 dhdr = (struct die_header *)(discovery_bin + die_offset); 714 num_ips = le16_to_cpu(dhdr->num_ips); 715 ip_offset = die_offset + sizeof(*dhdr); 716 717 for (j = 0; j < num_ips; j++) { 718 ip = (struct ip *)(discovery_bin + ip_offset); 719 inst = ip->number_instance; 720 hw_id = le16_to_cpu(ip->hw_id); 721 if (amdgpu_discovery_validate_ip(adev, inst, hw_id)) 722 goto next_ip; 723 724 if (ip->harvest == 1) { 725 switch (hw_id) { 726 case VCN_HWID: 727 (*vcn_harvest_count)++; 728 if (inst == 0) { 729 adev->vcn.harvest_config |= AMDGPU_VCN_HARVEST_VCN0; 730 adev->vcn.inst_mask &= 731 ~AMDGPU_VCN_HARVEST_VCN0; 732 adev->jpeg.inst_mask &= 733 ~AMDGPU_VCN_HARVEST_VCN0; 734 } else { 735 adev->vcn.harvest_config |= AMDGPU_VCN_HARVEST_VCN1; 736 adev->vcn.inst_mask &= 737 ~AMDGPU_VCN_HARVEST_VCN1; 738 adev->jpeg.inst_mask &= 739 ~AMDGPU_VCN_HARVEST_VCN1; 740 } 741 break; 742 case DMU_HWID: 743 adev->harvest_ip_mask |= AMD_HARVEST_IP_DMU_MASK; 744 break; 745 default: 746 break; 747 } 748 } 749 next_ip: 750 ip_offset += struct_size(ip, base_address, 751 ip->num_base_address); 752 } 753 } 754 } 755 756 static void amdgpu_discovery_read_from_harvest_table(struct amdgpu_device *adev, 757 uint32_t *vcn_harvest_count, 758 uint32_t *umc_harvest_count) 759 { 760 uint8_t *discovery_bin = adev->discovery.bin; 761 struct binary_header *bhdr; 762 struct harvest_table *harvest_info; 763 u16 offset; 764 int i; 765 uint32_t umc_harvest_config = 0; 766 767 bhdr = (struct binary_header *)discovery_bin; 768 offset = le16_to_cpu(bhdr->table_list[HARVEST_INFO].offset); 769 770 if (!offset) { 771 dev_err(adev->dev, "invalid harvest table offset\n"); 772 return; 773 } 774 775 harvest_info = (struct harvest_table *)(discovery_bin + offset); 776 777 for (i = 0; i < 32; i++) { 778 if (le16_to_cpu(harvest_info->list[i].hw_id) == 0) 779 break; 780 781 switch (le16_to_cpu(harvest_info->list[i].hw_id)) { 782 case VCN_HWID: 783 (*vcn_harvest_count)++; 784 adev->vcn.harvest_config |= 785 (1 << harvest_info->list[i].number_instance); 786 adev->jpeg.harvest_config |= 787 (1 << harvest_info->list[i].number_instance); 788 789 adev->vcn.inst_mask &= 790 ~(1U << harvest_info->list[i].number_instance); 791 adev->jpeg.inst_mask &= 792 ~(1U << harvest_info->list[i].number_instance); 793 break; 794 case DMU_HWID: 795 adev->harvest_ip_mask |= AMD_HARVEST_IP_DMU_MASK; 796 break; 797 case UMC_HWID: 798 umc_harvest_config |= 799 1 << (le16_to_cpu(harvest_info->list[i].number_instance)); 800 (*umc_harvest_count)++; 801 break; 802 case GC_HWID: 803 adev->gfx.xcc_mask &= 804 ~(1U << harvest_info->list[i].number_instance); 805 break; 806 case SDMA0_HWID: 807 adev->sdma.sdma_mask &= 808 ~(1U << harvest_info->list[i].number_instance); 809 break; 810 #if defined(CONFIG_DRM_AMD_ISP) 811 case ISP_HWID: 812 adev->isp.harvest_config |= 813 ~(1U << harvest_info->list[i].number_instance); 814 break; 815 #endif 816 default: 817 break; 818 } 819 } 820 821 adev->umc.active_mask = ((1 << adev->umc.node_inst_num) - 1) & 822 ~umc_harvest_config; 823 } 824 825 /* ================================================== */ 826 827 struct ip_hw_instance { 828 struct kobject kobj; /* ip_discovery/die/#die/#hw_id/#instance/<attrs...> */ 829 830 int hw_id; 831 u8 num_instance; 832 u8 major, minor, revision; 833 u8 harvest; 834 835 int num_base_addresses; 836 u32 base_addr[] __counted_by(num_base_addresses); 837 }; 838 839 struct ip_hw_id { 840 struct kset hw_id_kset; /* ip_discovery/die/#die/#hw_id/, contains ip_hw_instance */ 841 int hw_id; 842 }; 843 844 struct ip_die_entry { 845 struct kset ip_kset; /* ip_discovery/die/#die/, contains ip_hw_id */ 846 u16 num_ips; 847 }; 848 849 /* -------------------------------------------------- */ 850 851 struct ip_hw_instance_attr { 852 struct attribute attr; 853 ssize_t (*show)(struct ip_hw_instance *ip_hw_instance, char *buf); 854 }; 855 856 static ssize_t hw_id_show(struct ip_hw_instance *ip_hw_instance, char *buf) 857 { 858 return sysfs_emit(buf, "%d\n", ip_hw_instance->hw_id); 859 } 860 861 static ssize_t num_instance_show(struct ip_hw_instance *ip_hw_instance, char *buf) 862 { 863 return sysfs_emit(buf, "%d\n", ip_hw_instance->num_instance); 864 } 865 866 static ssize_t major_show(struct ip_hw_instance *ip_hw_instance, char *buf) 867 { 868 return sysfs_emit(buf, "%d\n", ip_hw_instance->major); 869 } 870 871 static ssize_t minor_show(struct ip_hw_instance *ip_hw_instance, char *buf) 872 { 873 return sysfs_emit(buf, "%d\n", ip_hw_instance->minor); 874 } 875 876 static ssize_t revision_show(struct ip_hw_instance *ip_hw_instance, char *buf) 877 { 878 return sysfs_emit(buf, "%d\n", ip_hw_instance->revision); 879 } 880 881 static ssize_t harvest_show(struct ip_hw_instance *ip_hw_instance, char *buf) 882 { 883 return sysfs_emit(buf, "0x%01X\n", ip_hw_instance->harvest); 884 } 885 886 static ssize_t num_base_addresses_show(struct ip_hw_instance *ip_hw_instance, char *buf) 887 { 888 return sysfs_emit(buf, "%d\n", ip_hw_instance->num_base_addresses); 889 } 890 891 static ssize_t base_addr_show(struct ip_hw_instance *ip_hw_instance, char *buf) 892 { 893 ssize_t res, at; 894 int ii; 895 896 for (res = at = ii = 0; ii < ip_hw_instance->num_base_addresses; ii++) { 897 /* Here we satisfy the condition that, at + size <= PAGE_SIZE. 898 */ 899 if (at + 12 > PAGE_SIZE) 900 break; 901 res = sysfs_emit_at(buf, at, "0x%08X\n", 902 ip_hw_instance->base_addr[ii]); 903 if (res <= 0) 904 break; 905 at += res; 906 } 907 908 return res < 0 ? res : at; 909 } 910 911 static struct ip_hw_instance_attr ip_hw_attr[] = { 912 __ATTR_RO(hw_id), 913 __ATTR_RO(num_instance), 914 __ATTR_RO(major), 915 __ATTR_RO(minor), 916 __ATTR_RO(revision), 917 __ATTR_RO(harvest), 918 __ATTR_RO(num_base_addresses), 919 __ATTR_RO(base_addr), 920 }; 921 922 static struct attribute *ip_hw_instance_attrs[ARRAY_SIZE(ip_hw_attr) + 1]; 923 ATTRIBUTE_GROUPS(ip_hw_instance); 924 925 #define to_ip_hw_instance(x) container_of(x, struct ip_hw_instance, kobj) 926 #define to_ip_hw_instance_attr(x) container_of(x, struct ip_hw_instance_attr, attr) 927 928 static ssize_t ip_hw_instance_attr_show(struct kobject *kobj, 929 struct attribute *attr, 930 char *buf) 931 { 932 struct ip_hw_instance *ip_hw_instance = to_ip_hw_instance(kobj); 933 struct ip_hw_instance_attr *ip_hw_attr = to_ip_hw_instance_attr(attr); 934 935 if (!ip_hw_attr->show) 936 return -EIO; 937 938 return ip_hw_attr->show(ip_hw_instance, buf); 939 } 940 941 static const struct sysfs_ops ip_hw_instance_sysfs_ops = { 942 .show = ip_hw_instance_attr_show, 943 }; 944 945 static void ip_hw_instance_release(struct kobject *kobj) 946 { 947 struct ip_hw_instance *ip_hw_instance = to_ip_hw_instance(kobj); 948 949 kfree(ip_hw_instance); 950 } 951 952 static const struct kobj_type ip_hw_instance_ktype = { 953 .release = ip_hw_instance_release, 954 .sysfs_ops = &ip_hw_instance_sysfs_ops, 955 .default_groups = ip_hw_instance_groups, 956 }; 957 958 /* -------------------------------------------------- */ 959 960 #define to_ip_hw_id(x) container_of(to_kset(x), struct ip_hw_id, hw_id_kset) 961 962 static void ip_hw_id_release(struct kobject *kobj) 963 { 964 struct ip_hw_id *ip_hw_id = to_ip_hw_id(kobj); 965 966 if (!list_empty(&ip_hw_id->hw_id_kset.list)) 967 DRM_ERROR("ip_hw_id->hw_id_kset is not empty"); 968 kfree(ip_hw_id); 969 } 970 971 static const struct kobj_type ip_hw_id_ktype = { 972 .release = ip_hw_id_release, 973 .sysfs_ops = &kobj_sysfs_ops, 974 }; 975 976 /* -------------------------------------------------- */ 977 978 static void die_kobj_release(struct kobject *kobj); 979 static void ip_disc_release(struct kobject *kobj); 980 981 struct ip_die_entry_attribute { 982 struct attribute attr; 983 ssize_t (*show)(struct ip_die_entry *ip_die_entry, char *buf); 984 }; 985 986 #define to_ip_die_entry_attr(x) container_of(x, struct ip_die_entry_attribute, attr) 987 988 static ssize_t num_ips_show(struct ip_die_entry *ip_die_entry, char *buf) 989 { 990 return sysfs_emit(buf, "%d\n", ip_die_entry->num_ips); 991 } 992 993 /* If there are more ip_die_entry attrs, other than the number of IPs, 994 * we can make this intro an array of attrs, and then initialize 995 * ip_die_entry_attrs in a loop. 996 */ 997 static struct ip_die_entry_attribute num_ips_attr = 998 __ATTR_RO(num_ips); 999 1000 static struct attribute *ip_die_entry_attrs[] = { 1001 &num_ips_attr.attr, 1002 NULL, 1003 }; 1004 ATTRIBUTE_GROUPS(ip_die_entry); /* ip_die_entry_groups */ 1005 1006 #define to_ip_die_entry(x) container_of(to_kset(x), struct ip_die_entry, ip_kset) 1007 1008 static ssize_t ip_die_entry_attr_show(struct kobject *kobj, 1009 struct attribute *attr, 1010 char *buf) 1011 { 1012 struct ip_die_entry_attribute *ip_die_entry_attr = to_ip_die_entry_attr(attr); 1013 struct ip_die_entry *ip_die_entry = to_ip_die_entry(kobj); 1014 1015 if (!ip_die_entry_attr->show) 1016 return -EIO; 1017 1018 return ip_die_entry_attr->show(ip_die_entry, buf); 1019 } 1020 1021 static void ip_die_entry_release(struct kobject *kobj) 1022 { 1023 struct ip_die_entry *ip_die_entry = to_ip_die_entry(kobj); 1024 1025 if (!list_empty(&ip_die_entry->ip_kset.list)) 1026 DRM_ERROR("ip_die_entry->ip_kset is not empty"); 1027 kfree(ip_die_entry); 1028 } 1029 1030 static const struct sysfs_ops ip_die_entry_sysfs_ops = { 1031 .show = ip_die_entry_attr_show, 1032 }; 1033 1034 static const struct kobj_type ip_die_entry_ktype = { 1035 .release = ip_die_entry_release, 1036 .sysfs_ops = &ip_die_entry_sysfs_ops, 1037 .default_groups = ip_die_entry_groups, 1038 }; 1039 1040 static const struct kobj_type die_kobj_ktype = { 1041 .release = die_kobj_release, 1042 .sysfs_ops = &kobj_sysfs_ops, 1043 }; 1044 1045 static const struct kobj_type ip_discovery_ktype = { 1046 .release = ip_disc_release, 1047 .sysfs_ops = &kobj_sysfs_ops, 1048 }; 1049 1050 struct ip_discovery_top { 1051 struct kobject kobj; /* ip_discovery/ */ 1052 struct kset die_kset; /* ip_discovery/die/, contains ip_die_entry */ 1053 struct amdgpu_device *adev; 1054 }; 1055 1056 static void die_kobj_release(struct kobject *kobj) 1057 { 1058 struct ip_discovery_top *ip_top = container_of(to_kset(kobj), 1059 struct ip_discovery_top, 1060 die_kset); 1061 if (!list_empty(&ip_top->die_kset.list)) 1062 DRM_ERROR("ip_top->die_kset is not empty"); 1063 } 1064 1065 static void ip_disc_release(struct kobject *kobj) 1066 { 1067 struct ip_discovery_top *ip_top = container_of(kobj, struct ip_discovery_top, 1068 kobj); 1069 struct amdgpu_device *adev = ip_top->adev; 1070 1071 kfree(ip_top); 1072 adev->discovery.ip_top = NULL; 1073 } 1074 1075 static uint8_t amdgpu_discovery_get_harvest_info(struct amdgpu_device *adev, 1076 uint16_t hw_id, uint8_t inst) 1077 { 1078 uint8_t harvest = 0; 1079 1080 /* Until a uniform way is figured, get mask based on hwid */ 1081 switch (hw_id) { 1082 case VCN_HWID: 1083 /* VCN vs UVD+VCE */ 1084 if (!amdgpu_ip_version(adev, VCE_HWIP, 0)) 1085 harvest = ((1 << inst) & adev->vcn.inst_mask) == 0; 1086 break; 1087 case DMU_HWID: 1088 if (adev->harvest_ip_mask & AMD_HARVEST_IP_DMU_MASK) 1089 harvest = 0x1; 1090 break; 1091 case UMC_HWID: 1092 /* TODO: It needs another parsing; for now, ignore.*/ 1093 break; 1094 case GC_HWID: 1095 harvest = ((1 << inst) & adev->gfx.xcc_mask) == 0; 1096 break; 1097 case SDMA0_HWID: 1098 harvest = ((1 << inst) & adev->sdma.sdma_mask) == 0; 1099 break; 1100 default: 1101 break; 1102 } 1103 1104 return harvest; 1105 } 1106 1107 static int amdgpu_discovery_sysfs_ips(struct amdgpu_device *adev, 1108 struct ip_die_entry *ip_die_entry, 1109 const size_t _ip_offset, const int num_ips, 1110 bool reg_base_64) 1111 { 1112 uint8_t *discovery_bin = adev->discovery.bin; 1113 int ii, jj, kk, res; 1114 uint16_t hw_id; 1115 uint8_t inst; 1116 1117 DRM_DEBUG("num_ips:%d", num_ips); 1118 1119 /* Find all IPs of a given HW ID, and add their instance to 1120 * #die/#hw_id/#instance/<attributes> 1121 */ 1122 for (ii = 0; ii < HW_ID_MAX; ii++) { 1123 struct ip_hw_id *ip_hw_id = NULL; 1124 size_t ip_offset = _ip_offset; 1125 1126 for (jj = 0; jj < num_ips; jj++) { 1127 struct ip_v4 *ip; 1128 struct ip_hw_instance *ip_hw_instance; 1129 1130 ip = (struct ip_v4 *)(discovery_bin + ip_offset); 1131 inst = ip->instance_number; 1132 hw_id = le16_to_cpu(ip->hw_id); 1133 if (amdgpu_discovery_validate_ip(adev, inst, hw_id) || 1134 hw_id != ii) 1135 goto next_ip; 1136 1137 DRM_DEBUG("match:%d @ ip_offset:%zu", ii, ip_offset); 1138 1139 /* We have a hw_id match; register the hw 1140 * block if not yet registered. 1141 */ 1142 if (!ip_hw_id) { 1143 ip_hw_id = kzalloc(sizeof(*ip_hw_id), GFP_KERNEL); 1144 if (!ip_hw_id) 1145 return -ENOMEM; 1146 ip_hw_id->hw_id = ii; 1147 1148 kobject_set_name(&ip_hw_id->hw_id_kset.kobj, "%d", ii); 1149 ip_hw_id->hw_id_kset.kobj.kset = &ip_die_entry->ip_kset; 1150 ip_hw_id->hw_id_kset.kobj.ktype = &ip_hw_id_ktype; 1151 res = kset_register(&ip_hw_id->hw_id_kset); 1152 if (res) { 1153 DRM_ERROR("Couldn't register ip_hw_id kset"); 1154 kfree(ip_hw_id); 1155 return res; 1156 } 1157 if (hw_id_names[ii]) { 1158 res = sysfs_create_link(&ip_die_entry->ip_kset.kobj, 1159 &ip_hw_id->hw_id_kset.kobj, 1160 hw_id_names[ii]); 1161 if (res) { 1162 DRM_ERROR("Couldn't create IP link %s in IP Die:%s\n", 1163 hw_id_names[ii], 1164 kobject_name(&ip_die_entry->ip_kset.kobj)); 1165 } 1166 } 1167 } 1168 1169 /* Now register its instance. 1170 */ 1171 ip_hw_instance = kzalloc(struct_size(ip_hw_instance, 1172 base_addr, 1173 ip->num_base_address), 1174 GFP_KERNEL); 1175 if (!ip_hw_instance) { 1176 DRM_ERROR("no memory for ip_hw_instance"); 1177 return -ENOMEM; 1178 } 1179 ip_hw_instance->hw_id = le16_to_cpu(ip->hw_id); /* == ii */ 1180 ip_hw_instance->num_instance = ip->instance_number; 1181 ip_hw_instance->major = ip->major; 1182 ip_hw_instance->minor = ip->minor; 1183 ip_hw_instance->revision = ip->revision; 1184 ip_hw_instance->harvest = 1185 amdgpu_discovery_get_harvest_info( 1186 adev, ip_hw_instance->hw_id, 1187 ip_hw_instance->num_instance); 1188 ip_hw_instance->num_base_addresses = ip->num_base_address; 1189 1190 for (kk = 0; kk < ip_hw_instance->num_base_addresses; kk++) { 1191 if (reg_base_64) 1192 ip_hw_instance->base_addr[kk] = 1193 lower_32_bits(le64_to_cpu(ip->base_address_64[kk])) & 0x3FFFFFFF; 1194 else 1195 ip_hw_instance->base_addr[kk] = ip->base_address[kk]; 1196 } 1197 1198 kobject_init(&ip_hw_instance->kobj, &ip_hw_instance_ktype); 1199 ip_hw_instance->kobj.kset = &ip_hw_id->hw_id_kset; 1200 res = kobject_add(&ip_hw_instance->kobj, NULL, 1201 "%d", ip_hw_instance->num_instance); 1202 next_ip: 1203 if (reg_base_64) 1204 ip_offset += struct_size(ip, base_address_64, 1205 ip->num_base_address); 1206 else 1207 ip_offset += struct_size(ip, base_address, 1208 ip->num_base_address); 1209 } 1210 } 1211 1212 return 0; 1213 } 1214 1215 static int amdgpu_discovery_sysfs_recurse(struct amdgpu_device *adev) 1216 { 1217 struct ip_discovery_top *ip_top = adev->discovery.ip_top; 1218 uint8_t *discovery_bin = adev->discovery.bin; 1219 struct binary_header *bhdr; 1220 struct ip_discovery_header *ihdr; 1221 struct die_header *dhdr; 1222 struct kset *die_kset = &ip_top->die_kset; 1223 u16 num_dies, die_offset, num_ips; 1224 size_t ip_offset; 1225 int ii, res; 1226 1227 bhdr = (struct binary_header *)discovery_bin; 1228 ihdr = (struct ip_discovery_header 1229 *)(discovery_bin + 1230 le16_to_cpu(bhdr->table_list[IP_DISCOVERY].offset)); 1231 num_dies = le16_to_cpu(ihdr->num_dies); 1232 1233 DRM_DEBUG("number of dies: %d\n", num_dies); 1234 1235 for (ii = 0; ii < num_dies; ii++) { 1236 struct ip_die_entry *ip_die_entry; 1237 1238 die_offset = le16_to_cpu(ihdr->die_info[ii].die_offset); 1239 dhdr = (struct die_header *)(discovery_bin + die_offset); 1240 num_ips = le16_to_cpu(dhdr->num_ips); 1241 ip_offset = die_offset + sizeof(*dhdr); 1242 1243 /* Add the die to the kset. 1244 * 1245 * dhdr->die_id == ii, which was checked in 1246 * amdgpu_discovery_reg_base_init(). 1247 */ 1248 1249 ip_die_entry = kzalloc(sizeof(*ip_die_entry), GFP_KERNEL); 1250 if (!ip_die_entry) 1251 return -ENOMEM; 1252 1253 ip_die_entry->num_ips = num_ips; 1254 1255 kobject_set_name(&ip_die_entry->ip_kset.kobj, "%d", le16_to_cpu(dhdr->die_id)); 1256 ip_die_entry->ip_kset.kobj.kset = die_kset; 1257 ip_die_entry->ip_kset.kobj.ktype = &ip_die_entry_ktype; 1258 res = kset_register(&ip_die_entry->ip_kset); 1259 if (res) { 1260 DRM_ERROR("Couldn't register ip_die_entry kset"); 1261 kfree(ip_die_entry); 1262 return res; 1263 } 1264 1265 amdgpu_discovery_sysfs_ips(adev, ip_die_entry, ip_offset, num_ips, !!ihdr->base_addr_64_bit); 1266 } 1267 1268 return 0; 1269 } 1270 1271 static int amdgpu_discovery_sysfs_init(struct amdgpu_device *adev) 1272 { 1273 uint8_t *discovery_bin = adev->discovery.bin; 1274 struct ip_discovery_top *ip_top; 1275 struct kset *die_kset; 1276 int res, ii; 1277 1278 if (!discovery_bin) 1279 return -EINVAL; 1280 1281 ip_top = kzalloc(sizeof(*ip_top), GFP_KERNEL); 1282 if (!ip_top) 1283 return -ENOMEM; 1284 1285 ip_top->adev = adev; 1286 adev->discovery.ip_top = ip_top; 1287 res = kobject_init_and_add(&ip_top->kobj, &ip_discovery_ktype, 1288 &adev->dev->kobj, "ip_discovery"); 1289 if (res) { 1290 DRM_ERROR("Couldn't init and add ip_discovery/"); 1291 goto Err; 1292 } 1293 1294 die_kset = &ip_top->die_kset; 1295 kobject_set_name(&die_kset->kobj, "%s", "die"); 1296 die_kset->kobj.parent = &ip_top->kobj; 1297 die_kset->kobj.ktype = &die_kobj_ktype; 1298 res = kset_register(&ip_top->die_kset); 1299 if (res) { 1300 DRM_ERROR("Couldn't register die_kset"); 1301 goto Err; 1302 } 1303 1304 for (ii = 0; ii < ARRAY_SIZE(ip_hw_attr); ii++) 1305 ip_hw_instance_attrs[ii] = &ip_hw_attr[ii].attr; 1306 ip_hw_instance_attrs[ii] = NULL; 1307 1308 res = amdgpu_discovery_sysfs_recurse(adev); 1309 1310 return res; 1311 Err: 1312 kobject_put(&ip_top->kobj); 1313 return res; 1314 } 1315 1316 /* -------------------------------------------------- */ 1317 1318 #define list_to_kobj(el) container_of(el, struct kobject, entry) 1319 1320 static void amdgpu_discovery_sysfs_ip_hw_free(struct ip_hw_id *ip_hw_id) 1321 { 1322 struct list_head *el, *tmp; 1323 struct kset *hw_id_kset; 1324 1325 hw_id_kset = &ip_hw_id->hw_id_kset; 1326 spin_lock(&hw_id_kset->list_lock); 1327 list_for_each_prev_safe(el, tmp, &hw_id_kset->list) { 1328 list_del_init(el); 1329 spin_unlock(&hw_id_kset->list_lock); 1330 /* kobject is embedded in ip_hw_instance */ 1331 kobject_put(list_to_kobj(el)); 1332 spin_lock(&hw_id_kset->list_lock); 1333 } 1334 spin_unlock(&hw_id_kset->list_lock); 1335 kobject_put(&ip_hw_id->hw_id_kset.kobj); 1336 } 1337 1338 static void amdgpu_discovery_sysfs_die_free(struct ip_die_entry *ip_die_entry) 1339 { 1340 struct list_head *el, *tmp; 1341 struct kset *ip_kset; 1342 1343 ip_kset = &ip_die_entry->ip_kset; 1344 spin_lock(&ip_kset->list_lock); 1345 list_for_each_prev_safe(el, tmp, &ip_kset->list) { 1346 list_del_init(el); 1347 spin_unlock(&ip_kset->list_lock); 1348 amdgpu_discovery_sysfs_ip_hw_free(to_ip_hw_id(list_to_kobj(el))); 1349 spin_lock(&ip_kset->list_lock); 1350 } 1351 spin_unlock(&ip_kset->list_lock); 1352 kobject_put(&ip_die_entry->ip_kset.kobj); 1353 } 1354 1355 static void amdgpu_discovery_sysfs_fini(struct amdgpu_device *adev) 1356 { 1357 struct ip_discovery_top *ip_top = adev->discovery.ip_top; 1358 struct list_head *el, *tmp; 1359 struct kset *die_kset; 1360 1361 die_kset = &ip_top->die_kset; 1362 spin_lock(&die_kset->list_lock); 1363 list_for_each_prev_safe(el, tmp, &die_kset->list) { 1364 list_del_init(el); 1365 spin_unlock(&die_kset->list_lock); 1366 amdgpu_discovery_sysfs_die_free(to_ip_die_entry(list_to_kobj(el))); 1367 spin_lock(&die_kset->list_lock); 1368 } 1369 spin_unlock(&die_kset->list_lock); 1370 kobject_put(&ip_top->die_kset.kobj); 1371 kobject_put(&ip_top->kobj); 1372 } 1373 1374 /* ================================================== */ 1375 1376 static int amdgpu_discovery_reg_base_init(struct amdgpu_device *adev) 1377 { 1378 uint8_t num_base_address, subrev, variant; 1379 struct binary_header *bhdr; 1380 struct ip_discovery_header *ihdr; 1381 struct die_header *dhdr; 1382 uint8_t *discovery_bin; 1383 struct ip_v4 *ip; 1384 uint16_t die_offset; 1385 uint16_t ip_offset; 1386 uint16_t num_dies; 1387 uint32_t wafl_ver; 1388 uint16_t num_ips; 1389 uint16_t hw_id; 1390 uint8_t inst; 1391 int hw_ip; 1392 int i, j, k; 1393 int r; 1394 1395 r = amdgpu_discovery_init(adev); 1396 if (r) 1397 return r; 1398 discovery_bin = adev->discovery.bin; 1399 wafl_ver = 0; 1400 adev->gfx.xcc_mask = 0; 1401 adev->sdma.sdma_mask = 0; 1402 adev->vcn.inst_mask = 0; 1403 adev->jpeg.inst_mask = 0; 1404 bhdr = (struct binary_header *)discovery_bin; 1405 ihdr = (struct ip_discovery_header 1406 *)(discovery_bin + 1407 le16_to_cpu(bhdr->table_list[IP_DISCOVERY].offset)); 1408 num_dies = le16_to_cpu(ihdr->num_dies); 1409 1410 DRM_DEBUG("number of dies: %d\n", num_dies); 1411 1412 for (i = 0; i < num_dies; i++) { 1413 die_offset = le16_to_cpu(ihdr->die_info[i].die_offset); 1414 dhdr = (struct die_header *)(discovery_bin + die_offset); 1415 num_ips = le16_to_cpu(dhdr->num_ips); 1416 ip_offset = die_offset + sizeof(*dhdr); 1417 1418 if (le16_to_cpu(dhdr->die_id) != i) { 1419 DRM_ERROR("invalid die id %d, expected %d\n", 1420 le16_to_cpu(dhdr->die_id), i); 1421 return -EINVAL; 1422 } 1423 1424 DRM_DEBUG("number of hardware IPs on die%d: %d\n", 1425 le16_to_cpu(dhdr->die_id), num_ips); 1426 1427 for (j = 0; j < num_ips; j++) { 1428 ip = (struct ip_v4 *)(discovery_bin + ip_offset); 1429 1430 inst = ip->instance_number; 1431 hw_id = le16_to_cpu(ip->hw_id); 1432 if (amdgpu_discovery_validate_ip(adev, inst, hw_id)) 1433 goto next_ip; 1434 1435 num_base_address = ip->num_base_address; 1436 1437 DRM_DEBUG("%s(%d) #%d v%d.%d.%d:\n", 1438 hw_id_names[le16_to_cpu(ip->hw_id)], 1439 le16_to_cpu(ip->hw_id), 1440 ip->instance_number, 1441 ip->major, ip->minor, 1442 ip->revision); 1443 1444 if (le16_to_cpu(ip->hw_id) == VCN_HWID) { 1445 /* Bit [5:0]: original revision value 1446 * Bit [7:6]: en/decode capability: 1447 * 0b00 : VCN function normally 1448 * 0b10 : encode is disabled 1449 * 0b01 : decode is disabled 1450 */ 1451 if (adev->vcn.num_vcn_inst < 1452 AMDGPU_MAX_VCN_INSTANCES) { 1453 adev->vcn.inst[adev->vcn.num_vcn_inst].vcn_config = 1454 ip->revision & 0xc0; 1455 adev->vcn.num_vcn_inst++; 1456 adev->vcn.inst_mask |= 1457 (1U << ip->instance_number); 1458 adev->jpeg.inst_mask |= 1459 (1U << ip->instance_number); 1460 } else { 1461 dev_err(adev->dev, "Too many VCN instances: %d vs %d\n", 1462 adev->vcn.num_vcn_inst + 1, 1463 AMDGPU_MAX_VCN_INSTANCES); 1464 } 1465 ip->revision &= ~0xc0; 1466 } 1467 if (le16_to_cpu(ip->hw_id) == SDMA0_HWID || 1468 le16_to_cpu(ip->hw_id) == SDMA1_HWID || 1469 le16_to_cpu(ip->hw_id) == SDMA2_HWID || 1470 le16_to_cpu(ip->hw_id) == SDMA3_HWID) { 1471 if (adev->sdma.num_instances < 1472 AMDGPU_MAX_SDMA_INSTANCES) { 1473 adev->sdma.num_instances++; 1474 adev->sdma.sdma_mask |= 1475 (1U << ip->instance_number); 1476 } else { 1477 dev_err(adev->dev, "Too many SDMA instances: %d vs %d\n", 1478 adev->sdma.num_instances + 1, 1479 AMDGPU_MAX_SDMA_INSTANCES); 1480 } 1481 } 1482 1483 if (le16_to_cpu(ip->hw_id) == VPE_HWID) { 1484 if (adev->vpe.num_instances < AMDGPU_MAX_VPE_INSTANCES) 1485 adev->vpe.num_instances++; 1486 else 1487 dev_err(adev->dev, "Too many VPE instances: %d vs %d\n", 1488 adev->vpe.num_instances + 1, 1489 AMDGPU_MAX_VPE_INSTANCES); 1490 } 1491 1492 if (le16_to_cpu(ip->hw_id) == UMC_HWID) { 1493 adev->gmc.num_umc++; 1494 adev->umc.node_inst_num++; 1495 } 1496 1497 if (le16_to_cpu(ip->hw_id) == GC_HWID) 1498 adev->gfx.xcc_mask |= 1499 (1U << ip->instance_number); 1500 1501 if (!wafl_ver && le16_to_cpu(ip->hw_id) == WAFLC_HWID) 1502 wafl_ver = IP_VERSION_FULL(ip->major, ip->minor, 1503 ip->revision, 0, 0); 1504 1505 for (k = 0; k < num_base_address; k++) { 1506 /* 1507 * convert the endianness of base addresses in place, 1508 * so that we don't need to convert them when accessing adev->reg_offset. 1509 */ 1510 if (ihdr->base_addr_64_bit) 1511 /* Truncate the 64bit base address from ip discovery 1512 * and only store lower 32bit ip base in reg_offset[]. 1513 * Bits > 32 follows ASIC specific format, thus just 1514 * discard them and handle it within specific ASIC. 1515 * By this way reg_offset[] and related helpers can 1516 * stay unchanged. 1517 * The base address is in dwords, thus clear the 1518 * highest 2 bits to store. 1519 */ 1520 ip->base_address[k] = 1521 lower_32_bits(le64_to_cpu(ip->base_address_64[k])) & 0x3FFFFFFF; 1522 else 1523 ip->base_address[k] = le32_to_cpu(ip->base_address[k]); 1524 DRM_DEBUG("\t0x%08x\n", ip->base_address[k]); 1525 } 1526 1527 for (hw_ip = 0; hw_ip < MAX_HWIP; hw_ip++) { 1528 if (hw_id_map[hw_ip] == le16_to_cpu(ip->hw_id) && 1529 hw_id_map[hw_ip] != 0) { 1530 DRM_DEBUG("set register base offset for %s\n", 1531 hw_id_names[le16_to_cpu(ip->hw_id)]); 1532 adev->reg_offset[hw_ip][ip->instance_number] = 1533 ip->base_address; 1534 /* Instance support is somewhat inconsistent. 1535 * SDMA is a good example. Sienna cichlid has 4 total 1536 * SDMA instances, each enumerated separately (HWIDs 1537 * 42, 43, 68, 69). Arcturus has 8 total SDMA instances, 1538 * but they are enumerated as multiple instances of the 1539 * same HWIDs (4x HWID 42, 4x HWID 43). UMC is another 1540 * example. On most chips there are multiple instances 1541 * with the same HWID. 1542 */ 1543 1544 if (ihdr->version < 3) { 1545 subrev = 0; 1546 variant = 0; 1547 } else { 1548 subrev = ip->sub_revision; 1549 variant = ip->variant; 1550 } 1551 1552 adev->ip_versions[hw_ip] 1553 [ip->instance_number] = 1554 IP_VERSION_FULL(ip->major, 1555 ip->minor, 1556 ip->revision, 1557 variant, 1558 subrev); 1559 } 1560 } 1561 1562 next_ip: 1563 if (ihdr->base_addr_64_bit) 1564 ip_offset += struct_size(ip, base_address_64, ip->num_base_address); 1565 else 1566 ip_offset += struct_size(ip, base_address, ip->num_base_address); 1567 } 1568 } 1569 1570 if (wafl_ver && !adev->ip_versions[XGMI_HWIP][0]) 1571 adev->ip_versions[XGMI_HWIP][0] = wafl_ver; 1572 1573 return 0; 1574 } 1575 1576 static void amdgpu_discovery_harvest_ip(struct amdgpu_device *adev) 1577 { 1578 uint8_t *discovery_bin = adev->discovery.bin; 1579 struct ip_discovery_header *ihdr; 1580 struct binary_header *bhdr; 1581 int vcn_harvest_count = 0; 1582 int umc_harvest_count = 0; 1583 uint16_t offset, ihdr_ver; 1584 1585 bhdr = (struct binary_header *)discovery_bin; 1586 offset = le16_to_cpu(bhdr->table_list[IP_DISCOVERY].offset); 1587 ihdr = (struct ip_discovery_header *)(discovery_bin + offset); 1588 ihdr_ver = le16_to_cpu(ihdr->version); 1589 /* 1590 * Harvest table does not fit Navi1x and legacy GPUs, 1591 * so read harvest bit per IP data structure to set 1592 * harvest configuration. 1593 */ 1594 if (amdgpu_ip_version(adev, GC_HWIP, 0) < IP_VERSION(10, 2, 0) && 1595 ihdr_ver <= 2) { 1596 if ((adev->pdev->device == 0x731E && 1597 (adev->pdev->revision == 0xC6 || 1598 adev->pdev->revision == 0xC7)) || 1599 (adev->pdev->device == 0x7340 && 1600 adev->pdev->revision == 0xC9) || 1601 (adev->pdev->device == 0x7360 && 1602 adev->pdev->revision == 0xC7)) 1603 amdgpu_discovery_read_harvest_bit_per_ip(adev, 1604 &vcn_harvest_count); 1605 } else { 1606 amdgpu_discovery_read_from_harvest_table(adev, 1607 &vcn_harvest_count, 1608 &umc_harvest_count); 1609 } 1610 1611 amdgpu_discovery_harvest_config_quirk(adev); 1612 1613 if (vcn_harvest_count == adev->vcn.num_vcn_inst) { 1614 adev->harvest_ip_mask |= AMD_HARVEST_IP_VCN_MASK; 1615 adev->harvest_ip_mask |= AMD_HARVEST_IP_JPEG_MASK; 1616 } 1617 1618 if (umc_harvest_count < adev->gmc.num_umc) { 1619 adev->gmc.num_umc -= umc_harvest_count; 1620 } 1621 } 1622 1623 union gc_info { 1624 struct gc_info_v1_0 v1; 1625 struct gc_info_v1_1 v1_1; 1626 struct gc_info_v1_2 v1_2; 1627 struct gc_info_v1_3 v1_3; 1628 struct gc_info_v2_0 v2; 1629 struct gc_info_v2_1 v2_1; 1630 }; 1631 1632 static int amdgpu_discovery_get_gfx_info(struct amdgpu_device *adev) 1633 { 1634 uint8_t *discovery_bin = adev->discovery.bin; 1635 struct binary_header *bhdr; 1636 union gc_info *gc_info; 1637 u16 offset; 1638 1639 if (!discovery_bin) { 1640 DRM_ERROR("ip discovery uninitialized\n"); 1641 return -EINVAL; 1642 } 1643 1644 bhdr = (struct binary_header *)discovery_bin; 1645 offset = le16_to_cpu(bhdr->table_list[GC].offset); 1646 1647 if (!offset) 1648 return 0; 1649 1650 gc_info = (union gc_info *)(discovery_bin + offset); 1651 1652 switch (le16_to_cpu(gc_info->v1.header.version_major)) { 1653 case 1: 1654 adev->gfx.config.max_shader_engines = le32_to_cpu(gc_info->v1.gc_num_se); 1655 adev->gfx.config.max_cu_per_sh = 2 * (le32_to_cpu(gc_info->v1.gc_num_wgp0_per_sa) + 1656 le32_to_cpu(gc_info->v1.gc_num_wgp1_per_sa)); 1657 adev->gfx.config.max_sh_per_se = le32_to_cpu(gc_info->v1.gc_num_sa_per_se); 1658 adev->gfx.config.max_backends_per_se = le32_to_cpu(gc_info->v1.gc_num_rb_per_se); 1659 adev->gfx.config.max_texture_channel_caches = le32_to_cpu(gc_info->v1.gc_num_gl2c); 1660 adev->gfx.config.max_gprs = le32_to_cpu(gc_info->v1.gc_num_gprs); 1661 adev->gfx.config.max_gs_threads = le32_to_cpu(gc_info->v1.gc_num_max_gs_thds); 1662 adev->gfx.config.gs_vgt_table_depth = le32_to_cpu(gc_info->v1.gc_gs_table_depth); 1663 adev->gfx.config.gs_prim_buffer_depth = le32_to_cpu(gc_info->v1.gc_gsprim_buff_depth); 1664 adev->gfx.config.double_offchip_lds_buf = le32_to_cpu(gc_info->v1.gc_double_offchip_lds_buffer); 1665 adev->gfx.cu_info.wave_front_size = le32_to_cpu(gc_info->v1.gc_wave_size); 1666 adev->gfx.cu_info.max_waves_per_simd = le32_to_cpu(gc_info->v1.gc_max_waves_per_simd); 1667 adev->gfx.cu_info.max_scratch_slots_per_cu = le32_to_cpu(gc_info->v1.gc_max_scratch_slots_per_cu); 1668 adev->gfx.cu_info.lds_size = le32_to_cpu(gc_info->v1.gc_lds_size); 1669 adev->gfx.config.num_sc_per_sh = le32_to_cpu(gc_info->v1.gc_num_sc_per_se) / 1670 le32_to_cpu(gc_info->v1.gc_num_sa_per_se); 1671 adev->gfx.config.num_packer_per_sc = le32_to_cpu(gc_info->v1.gc_num_packer_per_sc); 1672 if (le16_to_cpu(gc_info->v1.header.version_minor) >= 1) { 1673 adev->gfx.config.gc_num_tcp_per_sa = le32_to_cpu(gc_info->v1_1.gc_num_tcp_per_sa); 1674 adev->gfx.config.gc_num_sdp_interface = le32_to_cpu(gc_info->v1_1.gc_num_sdp_interface); 1675 adev->gfx.config.gc_num_tcps = le32_to_cpu(gc_info->v1_1.gc_num_tcps); 1676 } 1677 if (le16_to_cpu(gc_info->v1.header.version_minor) >= 2) { 1678 adev->gfx.config.gc_num_tcp_per_wpg = le32_to_cpu(gc_info->v1_2.gc_num_tcp_per_wpg); 1679 adev->gfx.config.gc_tcp_l1_size = le32_to_cpu(gc_info->v1_2.gc_tcp_l1_size); 1680 adev->gfx.config.gc_num_sqc_per_wgp = le32_to_cpu(gc_info->v1_2.gc_num_sqc_per_wgp); 1681 adev->gfx.config.gc_l1_instruction_cache_size_per_sqc = le32_to_cpu(gc_info->v1_2.gc_l1_instruction_cache_size_per_sqc); 1682 adev->gfx.config.gc_l1_data_cache_size_per_sqc = le32_to_cpu(gc_info->v1_2.gc_l1_data_cache_size_per_sqc); 1683 adev->gfx.config.gc_gl1c_per_sa = le32_to_cpu(gc_info->v1_2.gc_gl1c_per_sa); 1684 adev->gfx.config.gc_gl1c_size_per_instance = le32_to_cpu(gc_info->v1_2.gc_gl1c_size_per_instance); 1685 adev->gfx.config.gc_gl2c_per_gpu = le32_to_cpu(gc_info->v1_2.gc_gl2c_per_gpu); 1686 } 1687 if (le16_to_cpu(gc_info->v1.header.version_minor) >= 3) { 1688 adev->gfx.config.gc_tcp_size_per_cu = le32_to_cpu(gc_info->v1_3.gc_tcp_size_per_cu); 1689 adev->gfx.config.gc_tcp_cache_line_size = le32_to_cpu(gc_info->v1_3.gc_tcp_cache_line_size); 1690 adev->gfx.config.gc_instruction_cache_size_per_sqc = le32_to_cpu(gc_info->v1_3.gc_instruction_cache_size_per_sqc); 1691 adev->gfx.config.gc_instruction_cache_line_size = le32_to_cpu(gc_info->v1_3.gc_instruction_cache_line_size); 1692 adev->gfx.config.gc_scalar_data_cache_size_per_sqc = le32_to_cpu(gc_info->v1_3.gc_scalar_data_cache_size_per_sqc); 1693 adev->gfx.config.gc_scalar_data_cache_line_size = le32_to_cpu(gc_info->v1_3.gc_scalar_data_cache_line_size); 1694 adev->gfx.config.gc_tcc_size = le32_to_cpu(gc_info->v1_3.gc_tcc_size); 1695 adev->gfx.config.gc_tcc_cache_line_size = le32_to_cpu(gc_info->v1_3.gc_tcc_cache_line_size); 1696 } 1697 break; 1698 case 2: 1699 adev->gfx.config.max_shader_engines = le32_to_cpu(gc_info->v2.gc_num_se); 1700 adev->gfx.config.max_cu_per_sh = le32_to_cpu(gc_info->v2.gc_num_cu_per_sh); 1701 adev->gfx.config.max_sh_per_se = le32_to_cpu(gc_info->v2.gc_num_sh_per_se); 1702 adev->gfx.config.max_backends_per_se = le32_to_cpu(gc_info->v2.gc_num_rb_per_se); 1703 adev->gfx.config.max_texture_channel_caches = le32_to_cpu(gc_info->v2.gc_num_tccs); 1704 adev->gfx.config.max_gprs = le32_to_cpu(gc_info->v2.gc_num_gprs); 1705 adev->gfx.config.max_gs_threads = le32_to_cpu(gc_info->v2.gc_num_max_gs_thds); 1706 adev->gfx.config.gs_vgt_table_depth = le32_to_cpu(gc_info->v2.gc_gs_table_depth); 1707 adev->gfx.config.gs_prim_buffer_depth = le32_to_cpu(gc_info->v2.gc_gsprim_buff_depth); 1708 adev->gfx.config.double_offchip_lds_buf = le32_to_cpu(gc_info->v2.gc_double_offchip_lds_buffer); 1709 adev->gfx.cu_info.wave_front_size = le32_to_cpu(gc_info->v2.gc_wave_size); 1710 adev->gfx.cu_info.max_waves_per_simd = le32_to_cpu(gc_info->v2.gc_max_waves_per_simd); 1711 adev->gfx.cu_info.max_scratch_slots_per_cu = le32_to_cpu(gc_info->v2.gc_max_scratch_slots_per_cu); 1712 adev->gfx.cu_info.lds_size = le32_to_cpu(gc_info->v2.gc_lds_size); 1713 adev->gfx.config.num_sc_per_sh = le32_to_cpu(gc_info->v2.gc_num_sc_per_se) / 1714 le32_to_cpu(gc_info->v2.gc_num_sh_per_se); 1715 adev->gfx.config.num_packer_per_sc = le32_to_cpu(gc_info->v2.gc_num_packer_per_sc); 1716 if (le16_to_cpu(gc_info->v2.header.version_minor) == 1) { 1717 adev->gfx.config.gc_num_tcp_per_sa = le32_to_cpu(gc_info->v2_1.gc_num_tcp_per_sh); 1718 adev->gfx.config.gc_tcp_size_per_cu = le32_to_cpu(gc_info->v2_1.gc_tcp_size_per_cu); 1719 adev->gfx.config.gc_num_sdp_interface = le32_to_cpu(gc_info->v2_1.gc_num_sdp_interface); /* per XCD */ 1720 adev->gfx.config.gc_num_cu_per_sqc = le32_to_cpu(gc_info->v2_1.gc_num_cu_per_sqc); 1721 adev->gfx.config.gc_l1_instruction_cache_size_per_sqc = le32_to_cpu(gc_info->v2_1.gc_instruction_cache_size_per_sqc); 1722 adev->gfx.config.gc_l1_data_cache_size_per_sqc = le32_to_cpu(gc_info->v2_1.gc_scalar_data_cache_size_per_sqc); 1723 adev->gfx.config.gc_tcc_size = le32_to_cpu(gc_info->v2_1.gc_tcc_size); /* per XCD */ 1724 } 1725 break; 1726 default: 1727 dev_err(adev->dev, 1728 "Unhandled GC info table %d.%d\n", 1729 le16_to_cpu(gc_info->v1.header.version_major), 1730 le16_to_cpu(gc_info->v1.header.version_minor)); 1731 return -EINVAL; 1732 } 1733 return 0; 1734 } 1735 1736 union mall_info { 1737 struct mall_info_v1_0 v1; 1738 struct mall_info_v2_0 v2; 1739 }; 1740 1741 static int amdgpu_discovery_get_mall_info(struct amdgpu_device *adev) 1742 { 1743 uint8_t *discovery_bin = adev->discovery.bin; 1744 struct binary_header *bhdr; 1745 union mall_info *mall_info; 1746 u32 u, mall_size_per_umc, m_s_present, half_use; 1747 u64 mall_size; 1748 u16 offset; 1749 1750 if (!discovery_bin) { 1751 DRM_ERROR("ip discovery uninitialized\n"); 1752 return -EINVAL; 1753 } 1754 1755 bhdr = (struct binary_header *)discovery_bin; 1756 offset = le16_to_cpu(bhdr->table_list[MALL_INFO].offset); 1757 1758 if (!offset) 1759 return 0; 1760 1761 mall_info = (union mall_info *)(discovery_bin + offset); 1762 1763 switch (le16_to_cpu(mall_info->v1.header.version_major)) { 1764 case 1: 1765 mall_size = 0; 1766 mall_size_per_umc = le32_to_cpu(mall_info->v1.mall_size_per_m); 1767 m_s_present = le32_to_cpu(mall_info->v1.m_s_present); 1768 half_use = le32_to_cpu(mall_info->v1.m_half_use); 1769 for (u = 0; u < adev->gmc.num_umc; u++) { 1770 if (m_s_present & (1 << u)) 1771 mall_size += mall_size_per_umc * 2; 1772 else if (half_use & (1 << u)) 1773 mall_size += mall_size_per_umc / 2; 1774 else 1775 mall_size += mall_size_per_umc; 1776 } 1777 adev->gmc.mall_size = mall_size; 1778 adev->gmc.m_half_use = half_use; 1779 break; 1780 case 2: 1781 mall_size_per_umc = le32_to_cpu(mall_info->v2.mall_size_per_umc); 1782 adev->gmc.mall_size = (uint64_t)mall_size_per_umc * adev->gmc.num_umc; 1783 break; 1784 default: 1785 dev_err(adev->dev, 1786 "Unhandled MALL info table %d.%d\n", 1787 le16_to_cpu(mall_info->v1.header.version_major), 1788 le16_to_cpu(mall_info->v1.header.version_minor)); 1789 return -EINVAL; 1790 } 1791 return 0; 1792 } 1793 1794 union vcn_info { 1795 struct vcn_info_v1_0 v1; 1796 }; 1797 1798 static int amdgpu_discovery_get_vcn_info(struct amdgpu_device *adev) 1799 { 1800 uint8_t *discovery_bin = adev->discovery.bin; 1801 struct binary_header *bhdr; 1802 union vcn_info *vcn_info; 1803 u16 offset; 1804 int v; 1805 1806 if (!discovery_bin) { 1807 DRM_ERROR("ip discovery uninitialized\n"); 1808 return -EINVAL; 1809 } 1810 1811 /* num_vcn_inst is currently limited to AMDGPU_MAX_VCN_INSTANCES 1812 * which is smaller than VCN_INFO_TABLE_MAX_NUM_INSTANCES 1813 * but that may change in the future with new GPUs so keep this 1814 * check for defensive purposes. 1815 */ 1816 if (adev->vcn.num_vcn_inst > VCN_INFO_TABLE_MAX_NUM_INSTANCES) { 1817 dev_err(adev->dev, "invalid vcn instances\n"); 1818 return -EINVAL; 1819 } 1820 1821 bhdr = (struct binary_header *)discovery_bin; 1822 offset = le16_to_cpu(bhdr->table_list[VCN_INFO].offset); 1823 1824 if (!offset) 1825 return 0; 1826 1827 vcn_info = (union vcn_info *)(discovery_bin + offset); 1828 1829 switch (le16_to_cpu(vcn_info->v1.header.version_major)) { 1830 case 1: 1831 /* num_vcn_inst is currently limited to AMDGPU_MAX_VCN_INSTANCES 1832 * so this won't overflow. 1833 */ 1834 for (v = 0; v < adev->vcn.num_vcn_inst; v++) { 1835 adev->vcn.inst[v].vcn_codec_disable_mask = 1836 le32_to_cpu(vcn_info->v1.instance_info[v].fuse_data.all_bits); 1837 } 1838 break; 1839 default: 1840 dev_err(adev->dev, 1841 "Unhandled VCN info table %d.%d\n", 1842 le16_to_cpu(vcn_info->v1.header.version_major), 1843 le16_to_cpu(vcn_info->v1.header.version_minor)); 1844 return -EINVAL; 1845 } 1846 return 0; 1847 } 1848 1849 union nps_info { 1850 struct nps_info_v1_0 v1; 1851 }; 1852 1853 static int amdgpu_discovery_refresh_nps_info(struct amdgpu_device *adev, 1854 union nps_info *nps_data) 1855 { 1856 uint64_t vram_size, pos, offset; 1857 struct nps_info_header *nhdr; 1858 struct binary_header bhdr; 1859 uint16_t checksum; 1860 1861 vram_size = (uint64_t)RREG32(mmRCC_CONFIG_MEMSIZE) << 20; 1862 pos = vram_size - DISCOVERY_TMR_OFFSET; 1863 amdgpu_device_vram_access(adev, pos, &bhdr, sizeof(bhdr), false); 1864 1865 offset = le16_to_cpu(bhdr.table_list[NPS_INFO].offset); 1866 checksum = le16_to_cpu(bhdr.table_list[NPS_INFO].checksum); 1867 1868 amdgpu_device_vram_access(adev, (pos + offset), nps_data, 1869 sizeof(*nps_data), false); 1870 1871 nhdr = (struct nps_info_header *)(nps_data); 1872 if (!amdgpu_discovery_verify_checksum((uint8_t *)nps_data, 1873 le32_to_cpu(nhdr->size_bytes), 1874 checksum)) { 1875 dev_err(adev->dev, "nps data refresh, checksum mismatch\n"); 1876 return -EINVAL; 1877 } 1878 1879 return 0; 1880 } 1881 1882 int amdgpu_discovery_get_nps_info(struct amdgpu_device *adev, 1883 uint32_t *nps_type, 1884 struct amdgpu_gmc_memrange **ranges, 1885 int *range_cnt, bool refresh) 1886 { 1887 uint8_t *discovery_bin = adev->discovery.bin; 1888 struct amdgpu_gmc_memrange *mem_ranges; 1889 struct binary_header *bhdr; 1890 union nps_info *nps_info; 1891 union nps_info nps_data; 1892 u16 offset; 1893 int i, r; 1894 1895 if (!nps_type || !range_cnt || !ranges) 1896 return -EINVAL; 1897 1898 if (refresh) { 1899 r = amdgpu_discovery_refresh_nps_info(adev, &nps_data); 1900 if (r) 1901 return r; 1902 nps_info = &nps_data; 1903 } else { 1904 if (!discovery_bin) { 1905 dev_err(adev->dev, 1906 "fetch mem range failed, ip discovery uninitialized\n"); 1907 return -EINVAL; 1908 } 1909 1910 bhdr = (struct binary_header *)discovery_bin; 1911 offset = le16_to_cpu(bhdr->table_list[NPS_INFO].offset); 1912 1913 if (!offset) 1914 return -ENOENT; 1915 1916 /* If verification fails, return as if NPS table doesn't exist */ 1917 if (amdgpu_discovery_verify_npsinfo(adev, bhdr)) 1918 return -ENOENT; 1919 1920 nps_info = (union nps_info *)(discovery_bin + offset); 1921 } 1922 1923 switch (le16_to_cpu(nps_info->v1.header.version_major)) { 1924 case 1: 1925 mem_ranges = kvcalloc(nps_info->v1.count, 1926 sizeof(*mem_ranges), 1927 GFP_KERNEL); 1928 if (!mem_ranges) 1929 return -ENOMEM; 1930 *nps_type = nps_info->v1.nps_type; 1931 *range_cnt = nps_info->v1.count; 1932 for (i = 0; i < *range_cnt; i++) { 1933 mem_ranges[i].base_address = 1934 nps_info->v1.instance_info[i].base_address; 1935 mem_ranges[i].limit_address = 1936 nps_info->v1.instance_info[i].limit_address; 1937 mem_ranges[i].nid_mask = -1; 1938 mem_ranges[i].flags = 0; 1939 } 1940 *ranges = mem_ranges; 1941 break; 1942 default: 1943 dev_err(adev->dev, "Unhandled NPS info table %d.%d\n", 1944 le16_to_cpu(nps_info->v1.header.version_major), 1945 le16_to_cpu(nps_info->v1.header.version_minor)); 1946 return -EINVAL; 1947 } 1948 1949 return 0; 1950 } 1951 1952 static int amdgpu_discovery_set_common_ip_blocks(struct amdgpu_device *adev) 1953 { 1954 /* what IP to use for this? */ 1955 switch (amdgpu_ip_version(adev, GC_HWIP, 0)) { 1956 case IP_VERSION(9, 0, 1): 1957 case IP_VERSION(9, 1, 0): 1958 case IP_VERSION(9, 2, 1): 1959 case IP_VERSION(9, 2, 2): 1960 case IP_VERSION(9, 3, 0): 1961 case IP_VERSION(9, 4, 0): 1962 case IP_VERSION(9, 4, 1): 1963 case IP_VERSION(9, 4, 2): 1964 case IP_VERSION(9, 4, 3): 1965 case IP_VERSION(9, 4, 4): 1966 case IP_VERSION(9, 5, 0): 1967 amdgpu_device_ip_block_add(adev, &vega10_common_ip_block); 1968 break; 1969 case IP_VERSION(10, 1, 10): 1970 case IP_VERSION(10, 1, 1): 1971 case IP_VERSION(10, 1, 2): 1972 case IP_VERSION(10, 1, 3): 1973 case IP_VERSION(10, 1, 4): 1974 case IP_VERSION(10, 3, 0): 1975 case IP_VERSION(10, 3, 1): 1976 case IP_VERSION(10, 3, 2): 1977 case IP_VERSION(10, 3, 3): 1978 case IP_VERSION(10, 3, 4): 1979 case IP_VERSION(10, 3, 5): 1980 case IP_VERSION(10, 3, 6): 1981 case IP_VERSION(10, 3, 7): 1982 amdgpu_device_ip_block_add(adev, &nv_common_ip_block); 1983 break; 1984 case IP_VERSION(11, 0, 0): 1985 case IP_VERSION(11, 0, 1): 1986 case IP_VERSION(11, 0, 2): 1987 case IP_VERSION(11, 0, 3): 1988 case IP_VERSION(11, 0, 4): 1989 case IP_VERSION(11, 5, 0): 1990 case IP_VERSION(11, 5, 1): 1991 case IP_VERSION(11, 5, 2): 1992 case IP_VERSION(11, 5, 3): 1993 case IP_VERSION(11, 5, 4): 1994 amdgpu_device_ip_block_add(adev, &soc21_common_ip_block); 1995 break; 1996 case IP_VERSION(12, 0, 0): 1997 case IP_VERSION(12, 0, 1): 1998 amdgpu_device_ip_block_add(adev, &soc24_common_ip_block); 1999 break; 2000 case IP_VERSION(12, 1, 0): 2001 amdgpu_device_ip_block_add(adev, &soc_v1_0_common_ip_block); 2002 break; 2003 default: 2004 dev_err(adev->dev, 2005 "Failed to add common ip block(GC_HWIP:0x%x)\n", 2006 amdgpu_ip_version(adev, GC_HWIP, 0)); 2007 return -EINVAL; 2008 } 2009 return 0; 2010 } 2011 2012 static int amdgpu_discovery_set_gmc_ip_blocks(struct amdgpu_device *adev) 2013 { 2014 /* use GC or MMHUB IP version */ 2015 switch (amdgpu_ip_version(adev, GC_HWIP, 0)) { 2016 case IP_VERSION(9, 0, 1): 2017 case IP_VERSION(9, 1, 0): 2018 case IP_VERSION(9, 2, 1): 2019 case IP_VERSION(9, 2, 2): 2020 case IP_VERSION(9, 3, 0): 2021 case IP_VERSION(9, 4, 0): 2022 case IP_VERSION(9, 4, 1): 2023 case IP_VERSION(9, 4, 2): 2024 case IP_VERSION(9, 4, 3): 2025 case IP_VERSION(9, 4, 4): 2026 case IP_VERSION(9, 5, 0): 2027 amdgpu_device_ip_block_add(adev, &gmc_v9_0_ip_block); 2028 break; 2029 case IP_VERSION(10, 1, 10): 2030 case IP_VERSION(10, 1, 1): 2031 case IP_VERSION(10, 1, 2): 2032 case IP_VERSION(10, 1, 3): 2033 case IP_VERSION(10, 1, 4): 2034 case IP_VERSION(10, 3, 0): 2035 case IP_VERSION(10, 3, 1): 2036 case IP_VERSION(10, 3, 2): 2037 case IP_VERSION(10, 3, 3): 2038 case IP_VERSION(10, 3, 4): 2039 case IP_VERSION(10, 3, 5): 2040 case IP_VERSION(10, 3, 6): 2041 case IP_VERSION(10, 3, 7): 2042 amdgpu_device_ip_block_add(adev, &gmc_v10_0_ip_block); 2043 break; 2044 case IP_VERSION(11, 0, 0): 2045 case IP_VERSION(11, 0, 1): 2046 case IP_VERSION(11, 0, 2): 2047 case IP_VERSION(11, 0, 3): 2048 case IP_VERSION(11, 0, 4): 2049 case IP_VERSION(11, 5, 0): 2050 case IP_VERSION(11, 5, 1): 2051 case IP_VERSION(11, 5, 2): 2052 case IP_VERSION(11, 5, 3): 2053 case IP_VERSION(11, 5, 4): 2054 amdgpu_device_ip_block_add(adev, &gmc_v11_0_ip_block); 2055 break; 2056 case IP_VERSION(12, 0, 0): 2057 case IP_VERSION(12, 0, 1): 2058 case IP_VERSION(12, 1, 0): 2059 amdgpu_device_ip_block_add(adev, &gmc_v12_0_ip_block); 2060 break; 2061 default: 2062 dev_err(adev->dev, "Failed to add gmc ip block(GC_HWIP:0x%x)\n", 2063 amdgpu_ip_version(adev, GC_HWIP, 0)); 2064 return -EINVAL; 2065 } 2066 return 0; 2067 } 2068 2069 static int amdgpu_discovery_set_ih_ip_blocks(struct amdgpu_device *adev) 2070 { 2071 switch (amdgpu_ip_version(adev, OSSSYS_HWIP, 0)) { 2072 case IP_VERSION(4, 0, 0): 2073 case IP_VERSION(4, 0, 1): 2074 case IP_VERSION(4, 1, 0): 2075 case IP_VERSION(4, 1, 1): 2076 case IP_VERSION(4, 3, 0): 2077 amdgpu_device_ip_block_add(adev, &vega10_ih_ip_block); 2078 break; 2079 case IP_VERSION(4, 2, 0): 2080 case IP_VERSION(4, 2, 1): 2081 case IP_VERSION(4, 4, 0): 2082 case IP_VERSION(4, 4, 2): 2083 case IP_VERSION(4, 4, 5): 2084 amdgpu_device_ip_block_add(adev, &vega20_ih_ip_block); 2085 break; 2086 case IP_VERSION(5, 0, 0): 2087 case IP_VERSION(5, 0, 1): 2088 case IP_VERSION(5, 0, 2): 2089 case IP_VERSION(5, 0, 3): 2090 case IP_VERSION(5, 2, 0): 2091 case IP_VERSION(5, 2, 1): 2092 amdgpu_device_ip_block_add(adev, &navi10_ih_ip_block); 2093 break; 2094 case IP_VERSION(6, 0, 0): 2095 case IP_VERSION(6, 0, 1): 2096 case IP_VERSION(6, 0, 2): 2097 amdgpu_device_ip_block_add(adev, &ih_v6_0_ip_block); 2098 break; 2099 case IP_VERSION(6, 1, 0): 2100 case IP_VERSION(6, 1, 1): 2101 amdgpu_device_ip_block_add(adev, &ih_v6_1_ip_block); 2102 break; 2103 case IP_VERSION(7, 0, 0): 2104 case IP_VERSION(7, 1, 0): 2105 amdgpu_device_ip_block_add(adev, &ih_v7_0_ip_block); 2106 break; 2107 default: 2108 dev_err(adev->dev, 2109 "Failed to add ih ip block(OSSSYS_HWIP:0x%x)\n", 2110 amdgpu_ip_version(adev, OSSSYS_HWIP, 0)); 2111 return -EINVAL; 2112 } 2113 return 0; 2114 } 2115 2116 static int amdgpu_discovery_set_psp_ip_blocks(struct amdgpu_device *adev) 2117 { 2118 switch (amdgpu_ip_version(adev, MP0_HWIP, 0)) { 2119 case IP_VERSION(9, 0, 0): 2120 amdgpu_device_ip_block_add(adev, &psp_v3_1_ip_block); 2121 break; 2122 case IP_VERSION(10, 0, 0): 2123 case IP_VERSION(10, 0, 1): 2124 amdgpu_device_ip_block_add(adev, &psp_v10_0_ip_block); 2125 break; 2126 case IP_VERSION(11, 0, 0): 2127 case IP_VERSION(11, 0, 2): 2128 case IP_VERSION(11, 0, 4): 2129 case IP_VERSION(11, 0, 5): 2130 case IP_VERSION(11, 0, 9): 2131 case IP_VERSION(11, 0, 7): 2132 case IP_VERSION(11, 0, 11): 2133 case IP_VERSION(11, 0, 12): 2134 case IP_VERSION(11, 0, 13): 2135 case IP_VERSION(11, 5, 0): 2136 case IP_VERSION(11, 5, 2): 2137 amdgpu_device_ip_block_add(adev, &psp_v11_0_ip_block); 2138 break; 2139 case IP_VERSION(11, 0, 8): 2140 amdgpu_device_ip_block_add(adev, &psp_v11_0_8_ip_block); 2141 break; 2142 case IP_VERSION(11, 0, 3): 2143 case IP_VERSION(12, 0, 1): 2144 amdgpu_device_ip_block_add(adev, &psp_v12_0_ip_block); 2145 break; 2146 case IP_VERSION(13, 0, 0): 2147 case IP_VERSION(13, 0, 1): 2148 case IP_VERSION(13, 0, 2): 2149 case IP_VERSION(13, 0, 3): 2150 case IP_VERSION(13, 0, 5): 2151 case IP_VERSION(13, 0, 6): 2152 case IP_VERSION(13, 0, 7): 2153 case IP_VERSION(13, 0, 8): 2154 case IP_VERSION(13, 0, 10): 2155 case IP_VERSION(13, 0, 11): 2156 case IP_VERSION(13, 0, 12): 2157 case IP_VERSION(13, 0, 14): 2158 case IP_VERSION(14, 0, 0): 2159 case IP_VERSION(14, 0, 1): 2160 case IP_VERSION(14, 0, 4): 2161 amdgpu_device_ip_block_add(adev, &psp_v13_0_ip_block); 2162 break; 2163 case IP_VERSION(13, 0, 4): 2164 amdgpu_device_ip_block_add(adev, &psp_v13_0_4_ip_block); 2165 break; 2166 case IP_VERSION(14, 0, 2): 2167 case IP_VERSION(14, 0, 3): 2168 case IP_VERSION(14, 0, 5): 2169 amdgpu_device_ip_block_add(adev, &psp_v14_0_ip_block); 2170 break; 2171 case IP_VERSION(15, 0, 0): 2172 amdgpu_device_ip_block_add(adev, &psp_v15_0_ip_block); 2173 break; 2174 case IP_VERSION(15, 0, 8): 2175 amdgpu_device_ip_block_add(adev, &psp_v15_0_8_ip_block); 2176 break; 2177 default: 2178 dev_err(adev->dev, 2179 "Failed to add psp ip block(MP0_HWIP:0x%x)\n", 2180 amdgpu_ip_version(adev, MP0_HWIP, 0)); 2181 return -EINVAL; 2182 } 2183 return 0; 2184 } 2185 2186 static int amdgpu_discovery_set_smu_ip_blocks(struct amdgpu_device *adev) 2187 { 2188 switch (amdgpu_ip_version(adev, MP1_HWIP, 0)) { 2189 case IP_VERSION(9, 0, 0): 2190 case IP_VERSION(10, 0, 0): 2191 case IP_VERSION(10, 0, 1): 2192 case IP_VERSION(11, 0, 2): 2193 if (adev->asic_type == CHIP_ARCTURUS) 2194 amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block); 2195 else 2196 amdgpu_device_ip_block_add(adev, &pp_smu_ip_block); 2197 break; 2198 case IP_VERSION(11, 0, 0): 2199 case IP_VERSION(11, 0, 5): 2200 case IP_VERSION(11, 0, 9): 2201 case IP_VERSION(11, 0, 7): 2202 case IP_VERSION(11, 0, 11): 2203 case IP_VERSION(11, 0, 12): 2204 case IP_VERSION(11, 0, 13): 2205 case IP_VERSION(11, 5, 0): 2206 case IP_VERSION(11, 5, 2): 2207 amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block); 2208 break; 2209 case IP_VERSION(11, 0, 8): 2210 if (adev->apu_flags & AMD_APU_IS_CYAN_SKILLFISH2) 2211 amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block); 2212 break; 2213 case IP_VERSION(12, 0, 0): 2214 case IP_VERSION(12, 0, 1): 2215 amdgpu_device_ip_block_add(adev, &smu_v12_0_ip_block); 2216 break; 2217 case IP_VERSION(13, 0, 0): 2218 case IP_VERSION(13, 0, 1): 2219 case IP_VERSION(13, 0, 2): 2220 case IP_VERSION(13, 0, 3): 2221 case IP_VERSION(13, 0, 4): 2222 case IP_VERSION(13, 0, 5): 2223 case IP_VERSION(13, 0, 6): 2224 case IP_VERSION(13, 0, 7): 2225 case IP_VERSION(13, 0, 8): 2226 case IP_VERSION(13, 0, 10): 2227 case IP_VERSION(13, 0, 11): 2228 case IP_VERSION(13, 0, 14): 2229 case IP_VERSION(13, 0, 12): 2230 amdgpu_device_ip_block_add(adev, &smu_v13_0_ip_block); 2231 break; 2232 case IP_VERSION(14, 0, 0): 2233 case IP_VERSION(14, 0, 1): 2234 case IP_VERSION(14, 0, 2): 2235 case IP_VERSION(14, 0, 3): 2236 case IP_VERSION(14, 0, 4): 2237 case IP_VERSION(14, 0, 5): 2238 amdgpu_device_ip_block_add(adev, &smu_v14_0_ip_block); 2239 break; 2240 default: 2241 dev_err(adev->dev, 2242 "Failed to add smu ip block(MP1_HWIP:0x%x)\n", 2243 amdgpu_ip_version(adev, MP1_HWIP, 0)); 2244 return -EINVAL; 2245 } 2246 return 0; 2247 } 2248 2249 #if defined(CONFIG_DRM_AMD_DC) 2250 static void amdgpu_discovery_set_sriov_display(struct amdgpu_device *adev) 2251 { 2252 amdgpu_device_set_sriov_virtual_display(adev); 2253 amdgpu_device_ip_block_add(adev, &amdgpu_vkms_ip_block); 2254 } 2255 #endif 2256 2257 static int amdgpu_discovery_set_display_ip_blocks(struct amdgpu_device *adev) 2258 { 2259 if (adev->enable_virtual_display) { 2260 amdgpu_device_ip_block_add(adev, &amdgpu_vkms_ip_block); 2261 return 0; 2262 } 2263 2264 if (!amdgpu_device_has_dc_support(adev)) 2265 return 0; 2266 2267 #if defined(CONFIG_DRM_AMD_DC) 2268 if (amdgpu_ip_version(adev, DCE_HWIP, 0)) { 2269 switch (amdgpu_ip_version(adev, DCE_HWIP, 0)) { 2270 case IP_VERSION(1, 0, 0): 2271 case IP_VERSION(1, 0, 1): 2272 case IP_VERSION(2, 0, 2): 2273 case IP_VERSION(2, 0, 0): 2274 case IP_VERSION(2, 0, 3): 2275 case IP_VERSION(2, 1, 0): 2276 case IP_VERSION(3, 0, 0): 2277 case IP_VERSION(3, 0, 2): 2278 case IP_VERSION(3, 0, 3): 2279 case IP_VERSION(3, 0, 1): 2280 case IP_VERSION(3, 1, 2): 2281 case IP_VERSION(3, 1, 3): 2282 case IP_VERSION(3, 1, 4): 2283 case IP_VERSION(3, 1, 5): 2284 case IP_VERSION(3, 1, 6): 2285 case IP_VERSION(3, 2, 0): 2286 case IP_VERSION(3, 2, 1): 2287 case IP_VERSION(3, 5, 0): 2288 case IP_VERSION(3, 5, 1): 2289 case IP_VERSION(3, 6, 0): 2290 case IP_VERSION(4, 1, 0): 2291 /* TODO: Fix IP version. DC code expects version 4.0.1 */ 2292 if (adev->ip_versions[DCE_HWIP][0] == IP_VERSION(4, 1, 0)) 2293 adev->ip_versions[DCE_HWIP][0] = IP_VERSION(4, 0, 1); 2294 2295 if (amdgpu_sriov_vf(adev)) 2296 amdgpu_discovery_set_sriov_display(adev); 2297 else 2298 amdgpu_device_ip_block_add(adev, &dm_ip_block); 2299 break; 2300 default: 2301 dev_err(adev->dev, 2302 "Failed to add dm ip block(DCE_HWIP:0x%x)\n", 2303 amdgpu_ip_version(adev, DCE_HWIP, 0)); 2304 return -EINVAL; 2305 } 2306 } else if (amdgpu_ip_version(adev, DCI_HWIP, 0)) { 2307 switch (amdgpu_ip_version(adev, DCI_HWIP, 0)) { 2308 case IP_VERSION(12, 0, 0): 2309 case IP_VERSION(12, 0, 1): 2310 case IP_VERSION(12, 1, 0): 2311 if (amdgpu_sriov_vf(adev)) 2312 amdgpu_discovery_set_sriov_display(adev); 2313 else 2314 amdgpu_device_ip_block_add(adev, &dm_ip_block); 2315 break; 2316 default: 2317 dev_err(adev->dev, 2318 "Failed to add dm ip block(DCI_HWIP:0x%x)\n", 2319 amdgpu_ip_version(adev, DCI_HWIP, 0)); 2320 return -EINVAL; 2321 } 2322 } 2323 #endif 2324 return 0; 2325 } 2326 2327 static int amdgpu_discovery_set_gc_ip_blocks(struct amdgpu_device *adev) 2328 { 2329 switch (amdgpu_ip_version(adev, GC_HWIP, 0)) { 2330 case IP_VERSION(9, 0, 1): 2331 case IP_VERSION(9, 1, 0): 2332 case IP_VERSION(9, 2, 1): 2333 case IP_VERSION(9, 2, 2): 2334 case IP_VERSION(9, 3, 0): 2335 case IP_VERSION(9, 4, 0): 2336 case IP_VERSION(9, 4, 1): 2337 case IP_VERSION(9, 4, 2): 2338 amdgpu_device_ip_block_add(adev, &gfx_v9_0_ip_block); 2339 break; 2340 case IP_VERSION(9, 4, 3): 2341 case IP_VERSION(9, 4, 4): 2342 case IP_VERSION(9, 5, 0): 2343 amdgpu_device_ip_block_add(adev, &gfx_v9_4_3_ip_block); 2344 break; 2345 case IP_VERSION(10, 1, 10): 2346 case IP_VERSION(10, 1, 2): 2347 case IP_VERSION(10, 1, 1): 2348 case IP_VERSION(10, 1, 3): 2349 case IP_VERSION(10, 1, 4): 2350 case IP_VERSION(10, 3, 0): 2351 case IP_VERSION(10, 3, 2): 2352 case IP_VERSION(10, 3, 1): 2353 case IP_VERSION(10, 3, 4): 2354 case IP_VERSION(10, 3, 5): 2355 case IP_VERSION(10, 3, 6): 2356 case IP_VERSION(10, 3, 3): 2357 case IP_VERSION(10, 3, 7): 2358 amdgpu_device_ip_block_add(adev, &gfx_v10_0_ip_block); 2359 break; 2360 case IP_VERSION(11, 0, 0): 2361 case IP_VERSION(11, 0, 1): 2362 case IP_VERSION(11, 0, 2): 2363 case IP_VERSION(11, 0, 3): 2364 case IP_VERSION(11, 0, 4): 2365 case IP_VERSION(11, 5, 0): 2366 case IP_VERSION(11, 5, 1): 2367 case IP_VERSION(11, 5, 2): 2368 case IP_VERSION(11, 5, 3): 2369 case IP_VERSION(11, 5, 4): 2370 amdgpu_device_ip_block_add(adev, &gfx_v11_0_ip_block); 2371 break; 2372 case IP_VERSION(12, 0, 0): 2373 case IP_VERSION(12, 0, 1): 2374 amdgpu_device_ip_block_add(adev, &gfx_v12_0_ip_block); 2375 break; 2376 case IP_VERSION(12, 1, 0): 2377 amdgpu_device_ip_block_add(adev, &gfx_v12_1_ip_block); 2378 break; 2379 default: 2380 dev_err(adev->dev, "Failed to add gfx ip block(GC_HWIP:0x%x)\n", 2381 amdgpu_ip_version(adev, GC_HWIP, 0)); 2382 return -EINVAL; 2383 } 2384 return 0; 2385 } 2386 2387 static int amdgpu_discovery_set_sdma_ip_blocks(struct amdgpu_device *adev) 2388 { 2389 switch (amdgpu_ip_version(adev, SDMA0_HWIP, 0)) { 2390 case IP_VERSION(4, 0, 0): 2391 case IP_VERSION(4, 0, 1): 2392 case IP_VERSION(4, 1, 0): 2393 case IP_VERSION(4, 1, 1): 2394 case IP_VERSION(4, 1, 2): 2395 case IP_VERSION(4, 2, 0): 2396 case IP_VERSION(4, 2, 2): 2397 case IP_VERSION(4, 4, 0): 2398 amdgpu_device_ip_block_add(adev, &sdma_v4_0_ip_block); 2399 break; 2400 case IP_VERSION(4, 4, 2): 2401 case IP_VERSION(4, 4, 5): 2402 case IP_VERSION(4, 4, 4): 2403 amdgpu_device_ip_block_add(adev, &sdma_v4_4_2_ip_block); 2404 break; 2405 case IP_VERSION(5, 0, 0): 2406 case IP_VERSION(5, 0, 1): 2407 case IP_VERSION(5, 0, 2): 2408 case IP_VERSION(5, 0, 5): 2409 amdgpu_device_ip_block_add(adev, &sdma_v5_0_ip_block); 2410 break; 2411 case IP_VERSION(5, 2, 0): 2412 case IP_VERSION(5, 2, 2): 2413 case IP_VERSION(5, 2, 4): 2414 case IP_VERSION(5, 2, 5): 2415 case IP_VERSION(5, 2, 6): 2416 case IP_VERSION(5, 2, 3): 2417 case IP_VERSION(5, 2, 1): 2418 case IP_VERSION(5, 2, 7): 2419 amdgpu_device_ip_block_add(adev, &sdma_v5_2_ip_block); 2420 break; 2421 case IP_VERSION(6, 0, 0): 2422 case IP_VERSION(6, 0, 1): 2423 case IP_VERSION(6, 0, 2): 2424 case IP_VERSION(6, 0, 3): 2425 case IP_VERSION(6, 1, 0): 2426 case IP_VERSION(6, 1, 1): 2427 case IP_VERSION(6, 1, 2): 2428 case IP_VERSION(6, 1, 3): 2429 case IP_VERSION(6, 1, 4): 2430 amdgpu_device_ip_block_add(adev, &sdma_v6_0_ip_block); 2431 break; 2432 case IP_VERSION(7, 0, 0): 2433 case IP_VERSION(7, 0, 1): 2434 amdgpu_device_ip_block_add(adev, &sdma_v7_0_ip_block); 2435 break; 2436 case IP_VERSION(7, 1, 0): 2437 amdgpu_device_ip_block_add(adev, &sdma_v7_1_ip_block); 2438 break; 2439 default: 2440 dev_err(adev->dev, 2441 "Failed to add sdma ip block(SDMA0_HWIP:0x%x)\n", 2442 amdgpu_ip_version(adev, SDMA0_HWIP, 0)); 2443 return -EINVAL; 2444 } 2445 2446 return 0; 2447 } 2448 2449 static int amdgpu_discovery_set_ras_ip_blocks(struct amdgpu_device *adev) 2450 { 2451 switch (amdgpu_ip_version(adev, MP0_HWIP, 0)) { 2452 case IP_VERSION(13, 0, 6): 2453 case IP_VERSION(13, 0, 12): 2454 case IP_VERSION(13, 0, 14): 2455 amdgpu_device_ip_block_add(adev, &ras_v1_0_ip_block); 2456 break; 2457 default: 2458 break; 2459 } 2460 return 0; 2461 } 2462 2463 static int amdgpu_discovery_set_mm_ip_blocks(struct amdgpu_device *adev) 2464 { 2465 if (amdgpu_ip_version(adev, VCE_HWIP, 0)) { 2466 switch (amdgpu_ip_version(adev, UVD_HWIP, 0)) { 2467 case IP_VERSION(7, 0, 0): 2468 case IP_VERSION(7, 2, 0): 2469 /* UVD is not supported on vega20 SR-IOV */ 2470 if (!(adev->asic_type == CHIP_VEGA20 && amdgpu_sriov_vf(adev))) 2471 amdgpu_device_ip_block_add(adev, &uvd_v7_0_ip_block); 2472 break; 2473 default: 2474 dev_err(adev->dev, 2475 "Failed to add uvd v7 ip block(UVD_HWIP:0x%x)\n", 2476 amdgpu_ip_version(adev, UVD_HWIP, 0)); 2477 return -EINVAL; 2478 } 2479 switch (amdgpu_ip_version(adev, VCE_HWIP, 0)) { 2480 case IP_VERSION(4, 0, 0): 2481 case IP_VERSION(4, 1, 0): 2482 /* VCE is not supported on vega20 SR-IOV */ 2483 if (!(adev->asic_type == CHIP_VEGA20 && amdgpu_sriov_vf(adev))) 2484 amdgpu_device_ip_block_add(adev, &vce_v4_0_ip_block); 2485 break; 2486 default: 2487 dev_err(adev->dev, 2488 "Failed to add VCE v4 ip block(VCE_HWIP:0x%x)\n", 2489 amdgpu_ip_version(adev, VCE_HWIP, 0)); 2490 return -EINVAL; 2491 } 2492 } else { 2493 switch (amdgpu_ip_version(adev, UVD_HWIP, 0)) { 2494 case IP_VERSION(1, 0, 0): 2495 case IP_VERSION(1, 0, 1): 2496 amdgpu_device_ip_block_add(adev, &vcn_v1_0_ip_block); 2497 break; 2498 case IP_VERSION(2, 0, 0): 2499 case IP_VERSION(2, 0, 2): 2500 case IP_VERSION(2, 2, 0): 2501 amdgpu_device_ip_block_add(adev, &vcn_v2_0_ip_block); 2502 if (!amdgpu_sriov_vf(adev)) 2503 amdgpu_device_ip_block_add(adev, &jpeg_v2_0_ip_block); 2504 break; 2505 case IP_VERSION(2, 0, 3): 2506 break; 2507 case IP_VERSION(2, 5, 0): 2508 amdgpu_device_ip_block_add(adev, &vcn_v2_5_ip_block); 2509 amdgpu_device_ip_block_add(adev, &jpeg_v2_5_ip_block); 2510 break; 2511 case IP_VERSION(2, 6, 0): 2512 amdgpu_device_ip_block_add(adev, &vcn_v2_6_ip_block); 2513 amdgpu_device_ip_block_add(adev, &jpeg_v2_6_ip_block); 2514 break; 2515 case IP_VERSION(3, 0, 0): 2516 case IP_VERSION(3, 0, 16): 2517 case IP_VERSION(3, 1, 1): 2518 case IP_VERSION(3, 1, 2): 2519 case IP_VERSION(3, 0, 2): 2520 amdgpu_device_ip_block_add(adev, &vcn_v3_0_ip_block); 2521 if (!amdgpu_sriov_vf(adev)) 2522 amdgpu_device_ip_block_add(adev, &jpeg_v3_0_ip_block); 2523 break; 2524 case IP_VERSION(3, 0, 33): 2525 amdgpu_device_ip_block_add(adev, &vcn_v3_0_ip_block); 2526 break; 2527 case IP_VERSION(4, 0, 0): 2528 case IP_VERSION(4, 0, 2): 2529 case IP_VERSION(4, 0, 4): 2530 amdgpu_device_ip_block_add(adev, &vcn_v4_0_ip_block); 2531 amdgpu_device_ip_block_add(adev, &jpeg_v4_0_ip_block); 2532 break; 2533 case IP_VERSION(4, 0, 3): 2534 amdgpu_device_ip_block_add(adev, &vcn_v4_0_3_ip_block); 2535 amdgpu_device_ip_block_add(adev, &jpeg_v4_0_3_ip_block); 2536 break; 2537 case IP_VERSION(4, 0, 5): 2538 case IP_VERSION(4, 0, 6): 2539 amdgpu_device_ip_block_add(adev, &vcn_v4_0_5_ip_block); 2540 amdgpu_device_ip_block_add(adev, &jpeg_v4_0_5_ip_block); 2541 break; 2542 case IP_VERSION(5, 0, 0): 2543 amdgpu_device_ip_block_add(adev, &vcn_v5_0_0_ip_block); 2544 amdgpu_device_ip_block_add(adev, &jpeg_v5_0_0_ip_block); 2545 break; 2546 case IP_VERSION(5, 3, 0): 2547 amdgpu_device_ip_block_add(adev, &vcn_v5_0_0_ip_block); 2548 amdgpu_device_ip_block_add(adev, &jpeg_v5_3_0_ip_block); 2549 break; 2550 case IP_VERSION(5, 0, 1): 2551 amdgpu_device_ip_block_add(adev, &vcn_v5_0_1_ip_block); 2552 amdgpu_device_ip_block_add(adev, &jpeg_v5_0_1_ip_block); 2553 break; 2554 default: 2555 dev_err(adev->dev, 2556 "Failed to add vcn/jpeg ip block(UVD_HWIP:0x%x)\n", 2557 amdgpu_ip_version(adev, UVD_HWIP, 0)); 2558 return -EINVAL; 2559 } 2560 } 2561 return 0; 2562 } 2563 2564 static int amdgpu_discovery_set_mes_ip_blocks(struct amdgpu_device *adev) 2565 { 2566 switch (amdgpu_ip_version(adev, GC_HWIP, 0)) { 2567 case IP_VERSION(11, 0, 0): 2568 case IP_VERSION(11, 0, 1): 2569 case IP_VERSION(11, 0, 2): 2570 case IP_VERSION(11, 0, 3): 2571 case IP_VERSION(11, 0, 4): 2572 case IP_VERSION(11, 5, 0): 2573 case IP_VERSION(11, 5, 1): 2574 case IP_VERSION(11, 5, 2): 2575 case IP_VERSION(11, 5, 3): 2576 case IP_VERSION(11, 5, 4): 2577 amdgpu_device_ip_block_add(adev, &mes_v11_0_ip_block); 2578 adev->enable_mes = true; 2579 adev->enable_mes_kiq = true; 2580 break; 2581 case IP_VERSION(12, 0, 0): 2582 case IP_VERSION(12, 0, 1): 2583 amdgpu_device_ip_block_add(adev, &mes_v12_0_ip_block); 2584 adev->enable_mes = true; 2585 adev->enable_mes_kiq = true; 2586 if (amdgpu_uni_mes) 2587 adev->enable_uni_mes = true; 2588 break; 2589 case IP_VERSION(12, 1, 0): 2590 amdgpu_device_ip_block_add(adev, &mes_v12_1_ip_block); 2591 adev->enable_mes = true; 2592 adev->enable_mes_kiq = true; 2593 if (amdgpu_uni_mes) 2594 adev->enable_uni_mes = true; 2595 break; 2596 default: 2597 break; 2598 } 2599 return 0; 2600 } 2601 2602 static void amdgpu_discovery_init_soc_config(struct amdgpu_device *adev) 2603 { 2604 switch (amdgpu_ip_version(adev, GC_HWIP, 0)) { 2605 case IP_VERSION(9, 4, 3): 2606 case IP_VERSION(9, 4, 4): 2607 case IP_VERSION(9, 5, 0): 2608 aqua_vanjaram_init_soc_config(adev); 2609 break; 2610 case IP_VERSION(12, 1, 0): 2611 soc_v1_0_init_soc_config(adev); 2612 break; 2613 default: 2614 break; 2615 } 2616 } 2617 2618 static int amdgpu_discovery_set_vpe_ip_blocks(struct amdgpu_device *adev) 2619 { 2620 switch (amdgpu_ip_version(adev, VPE_HWIP, 0)) { 2621 case IP_VERSION(6, 1, 0): 2622 case IP_VERSION(6, 1, 1): 2623 case IP_VERSION(6, 1, 3): 2624 amdgpu_device_ip_block_add(adev, &vpe_v6_1_ip_block); 2625 break; 2626 default: 2627 break; 2628 } 2629 2630 return 0; 2631 } 2632 2633 static int amdgpu_discovery_set_umsch_mm_ip_blocks(struct amdgpu_device *adev) 2634 { 2635 switch (amdgpu_ip_version(adev, VCN_HWIP, 0)) { 2636 case IP_VERSION(4, 0, 5): 2637 case IP_VERSION(4, 0, 6): 2638 if (amdgpu_umsch_mm & 0x1) { 2639 amdgpu_device_ip_block_add(adev, &umsch_mm_v4_0_ip_block); 2640 adev->enable_umsch_mm = true; 2641 } 2642 break; 2643 default: 2644 break; 2645 } 2646 2647 return 0; 2648 } 2649 2650 static int amdgpu_discovery_set_isp_ip_blocks(struct amdgpu_device *adev) 2651 { 2652 #if defined(CONFIG_DRM_AMD_ISP) 2653 switch (amdgpu_ip_version(adev, ISP_HWIP, 0)) { 2654 case IP_VERSION(4, 1, 0): 2655 amdgpu_device_ip_block_add(adev, &isp_v4_1_0_ip_block); 2656 break; 2657 case IP_VERSION(4, 1, 1): 2658 amdgpu_device_ip_block_add(adev, &isp_v4_1_1_ip_block); 2659 break; 2660 default: 2661 break; 2662 } 2663 #endif 2664 2665 return 0; 2666 } 2667 2668 int amdgpu_discovery_set_ip_blocks(struct amdgpu_device *adev) 2669 { 2670 int r; 2671 2672 switch (adev->asic_type) { 2673 case CHIP_VEGA10: 2674 /* This is not fatal. We only need the discovery 2675 * binary for sysfs. We don't need it for a 2676 * functional system. 2677 */ 2678 amdgpu_discovery_init(adev); 2679 vega10_reg_base_init(adev); 2680 adev->sdma.num_instances = 2; 2681 adev->sdma.sdma_mask = 3; 2682 adev->gmc.num_umc = 4; 2683 adev->gfx.xcc_mask = 1; 2684 adev->ip_versions[MMHUB_HWIP][0] = IP_VERSION(9, 0, 0); 2685 adev->ip_versions[ATHUB_HWIP][0] = IP_VERSION(9, 0, 0); 2686 adev->ip_versions[OSSSYS_HWIP][0] = IP_VERSION(4, 0, 0); 2687 adev->ip_versions[HDP_HWIP][0] = IP_VERSION(4, 0, 0); 2688 adev->ip_versions[SDMA0_HWIP][0] = IP_VERSION(4, 0, 0); 2689 adev->ip_versions[SDMA1_HWIP][0] = IP_VERSION(4, 0, 0); 2690 adev->ip_versions[DF_HWIP][0] = IP_VERSION(2, 1, 0); 2691 adev->ip_versions[NBIO_HWIP][0] = IP_VERSION(6, 1, 0); 2692 adev->ip_versions[UMC_HWIP][0] = IP_VERSION(6, 0, 0); 2693 adev->ip_versions[MP0_HWIP][0] = IP_VERSION(9, 0, 0); 2694 adev->ip_versions[MP1_HWIP][0] = IP_VERSION(9, 0, 0); 2695 adev->ip_versions[THM_HWIP][0] = IP_VERSION(9, 0, 0); 2696 adev->ip_versions[SMUIO_HWIP][0] = IP_VERSION(9, 0, 0); 2697 adev->ip_versions[GC_HWIP][0] = IP_VERSION(9, 0, 1); 2698 adev->ip_versions[UVD_HWIP][0] = IP_VERSION(7, 0, 0); 2699 adev->ip_versions[VCE_HWIP][0] = IP_VERSION(4, 0, 0); 2700 adev->ip_versions[DCI_HWIP][0] = IP_VERSION(12, 0, 0); 2701 break; 2702 case CHIP_VEGA12: 2703 /* This is not fatal. We only need the discovery 2704 * binary for sysfs. We don't need it for a 2705 * functional system. 2706 */ 2707 amdgpu_discovery_init(adev); 2708 vega10_reg_base_init(adev); 2709 adev->sdma.num_instances = 2; 2710 adev->sdma.sdma_mask = 3; 2711 adev->gmc.num_umc = 4; 2712 adev->gfx.xcc_mask = 1; 2713 adev->ip_versions[MMHUB_HWIP][0] = IP_VERSION(9, 3, 0); 2714 adev->ip_versions[ATHUB_HWIP][0] = IP_VERSION(9, 3, 0); 2715 adev->ip_versions[OSSSYS_HWIP][0] = IP_VERSION(4, 0, 1); 2716 adev->ip_versions[HDP_HWIP][0] = IP_VERSION(4, 0, 1); 2717 adev->ip_versions[SDMA0_HWIP][0] = IP_VERSION(4, 0, 1); 2718 adev->ip_versions[SDMA1_HWIP][0] = IP_VERSION(4, 0, 1); 2719 adev->ip_versions[DF_HWIP][0] = IP_VERSION(2, 5, 0); 2720 adev->ip_versions[NBIO_HWIP][0] = IP_VERSION(6, 2, 0); 2721 adev->ip_versions[UMC_HWIP][0] = IP_VERSION(6, 1, 0); 2722 adev->ip_versions[MP0_HWIP][0] = IP_VERSION(9, 0, 0); 2723 adev->ip_versions[MP1_HWIP][0] = IP_VERSION(9, 0, 0); 2724 adev->ip_versions[THM_HWIP][0] = IP_VERSION(9, 0, 0); 2725 adev->ip_versions[SMUIO_HWIP][0] = IP_VERSION(9, 0, 1); 2726 adev->ip_versions[GC_HWIP][0] = IP_VERSION(9, 2, 1); 2727 adev->ip_versions[UVD_HWIP][0] = IP_VERSION(7, 0, 0); 2728 adev->ip_versions[VCE_HWIP][0] = IP_VERSION(4, 0, 0); 2729 adev->ip_versions[DCI_HWIP][0] = IP_VERSION(12, 0, 1); 2730 break; 2731 case CHIP_RAVEN: 2732 /* This is not fatal. We only need the discovery 2733 * binary for sysfs. We don't need it for a 2734 * functional system. 2735 */ 2736 amdgpu_discovery_init(adev); 2737 vega10_reg_base_init(adev); 2738 adev->sdma.num_instances = 1; 2739 adev->sdma.sdma_mask = 1; 2740 adev->vcn.num_vcn_inst = 1; 2741 adev->gmc.num_umc = 2; 2742 adev->gfx.xcc_mask = 1; 2743 if (adev->apu_flags & AMD_APU_IS_RAVEN2) { 2744 adev->ip_versions[MMHUB_HWIP][0] = IP_VERSION(9, 2, 0); 2745 adev->ip_versions[ATHUB_HWIP][0] = IP_VERSION(9, 2, 0); 2746 adev->ip_versions[OSSSYS_HWIP][0] = IP_VERSION(4, 1, 1); 2747 adev->ip_versions[HDP_HWIP][0] = IP_VERSION(4, 1, 1); 2748 adev->ip_versions[SDMA0_HWIP][0] = IP_VERSION(4, 1, 1); 2749 adev->ip_versions[DF_HWIP][0] = IP_VERSION(2, 1, 1); 2750 adev->ip_versions[NBIO_HWIP][0] = IP_VERSION(7, 0, 1); 2751 adev->ip_versions[UMC_HWIP][0] = IP_VERSION(7, 5, 0); 2752 adev->ip_versions[MP0_HWIP][0] = IP_VERSION(10, 0, 1); 2753 adev->ip_versions[MP1_HWIP][0] = IP_VERSION(10, 0, 1); 2754 adev->ip_versions[THM_HWIP][0] = IP_VERSION(10, 1, 0); 2755 adev->ip_versions[SMUIO_HWIP][0] = IP_VERSION(10, 0, 1); 2756 adev->ip_versions[GC_HWIP][0] = IP_VERSION(9, 2, 2); 2757 adev->ip_versions[UVD_HWIP][0] = IP_VERSION(1, 0, 1); 2758 adev->ip_versions[DCE_HWIP][0] = IP_VERSION(1, 0, 1); 2759 adev->ip_versions[ISP_HWIP][0] = IP_VERSION(2, 0, 0); 2760 } else { 2761 adev->ip_versions[MMHUB_HWIP][0] = IP_VERSION(9, 1, 0); 2762 adev->ip_versions[ATHUB_HWIP][0] = IP_VERSION(9, 1, 0); 2763 adev->ip_versions[OSSSYS_HWIP][0] = IP_VERSION(4, 1, 0); 2764 adev->ip_versions[HDP_HWIP][0] = IP_VERSION(4, 1, 0); 2765 adev->ip_versions[SDMA0_HWIP][0] = IP_VERSION(4, 1, 0); 2766 adev->ip_versions[DF_HWIP][0] = IP_VERSION(2, 1, 0); 2767 adev->ip_versions[NBIO_HWIP][0] = IP_VERSION(7, 0, 0); 2768 adev->ip_versions[UMC_HWIP][0] = IP_VERSION(7, 0, 0); 2769 adev->ip_versions[MP0_HWIP][0] = IP_VERSION(10, 0, 0); 2770 adev->ip_versions[MP1_HWIP][0] = IP_VERSION(10, 0, 0); 2771 adev->ip_versions[THM_HWIP][0] = IP_VERSION(10, 0, 0); 2772 adev->ip_versions[SMUIO_HWIP][0] = IP_VERSION(10, 0, 0); 2773 adev->ip_versions[GC_HWIP][0] = IP_VERSION(9, 1, 0); 2774 adev->ip_versions[UVD_HWIP][0] = IP_VERSION(1, 0, 0); 2775 adev->ip_versions[DCE_HWIP][0] = IP_VERSION(1, 0, 0); 2776 adev->ip_versions[ISP_HWIP][0] = IP_VERSION(2, 0, 0); 2777 } 2778 break; 2779 case CHIP_VEGA20: 2780 /* This is not fatal. We only need the discovery 2781 * binary for sysfs. We don't need it for a 2782 * functional system. 2783 */ 2784 amdgpu_discovery_init(adev); 2785 vega20_reg_base_init(adev); 2786 adev->sdma.num_instances = 2; 2787 adev->sdma.sdma_mask = 3; 2788 adev->gmc.num_umc = 8; 2789 adev->gfx.xcc_mask = 1; 2790 adev->ip_versions[MMHUB_HWIP][0] = IP_VERSION(9, 4, 0); 2791 adev->ip_versions[ATHUB_HWIP][0] = IP_VERSION(9, 4, 0); 2792 adev->ip_versions[OSSSYS_HWIP][0] = IP_VERSION(4, 2, 0); 2793 adev->ip_versions[HDP_HWIP][0] = IP_VERSION(4, 2, 0); 2794 adev->ip_versions[SDMA0_HWIP][0] = IP_VERSION(4, 2, 0); 2795 adev->ip_versions[SDMA1_HWIP][0] = IP_VERSION(4, 2, 0); 2796 adev->ip_versions[DF_HWIP][0] = IP_VERSION(3, 6, 0); 2797 adev->ip_versions[NBIO_HWIP][0] = IP_VERSION(7, 4, 0); 2798 adev->ip_versions[UMC_HWIP][0] = IP_VERSION(6, 1, 1); 2799 adev->ip_versions[MP0_HWIP][0] = IP_VERSION(11, 0, 2); 2800 adev->ip_versions[MP1_HWIP][0] = IP_VERSION(11, 0, 2); 2801 adev->ip_versions[THM_HWIP][0] = IP_VERSION(11, 0, 2); 2802 adev->ip_versions[SMUIO_HWIP][0] = IP_VERSION(11, 0, 2); 2803 adev->ip_versions[GC_HWIP][0] = IP_VERSION(9, 4, 0); 2804 adev->ip_versions[UVD_HWIP][0] = IP_VERSION(7, 2, 0); 2805 adev->ip_versions[UVD_HWIP][1] = IP_VERSION(7, 2, 0); 2806 adev->ip_versions[VCE_HWIP][0] = IP_VERSION(4, 1, 0); 2807 adev->ip_versions[DCI_HWIP][0] = IP_VERSION(12, 1, 0); 2808 break; 2809 case CHIP_ARCTURUS: 2810 /* This is not fatal. We only need the discovery 2811 * binary for sysfs. We don't need it for a 2812 * functional system. 2813 */ 2814 amdgpu_discovery_init(adev); 2815 arct_reg_base_init(adev); 2816 adev->sdma.num_instances = 8; 2817 adev->sdma.sdma_mask = 0xff; 2818 adev->vcn.num_vcn_inst = 2; 2819 adev->gmc.num_umc = 8; 2820 adev->gfx.xcc_mask = 1; 2821 adev->ip_versions[MMHUB_HWIP][0] = IP_VERSION(9, 4, 1); 2822 adev->ip_versions[ATHUB_HWIP][0] = IP_VERSION(9, 4, 1); 2823 adev->ip_versions[OSSSYS_HWIP][0] = IP_VERSION(4, 2, 1); 2824 adev->ip_versions[HDP_HWIP][0] = IP_VERSION(4, 2, 1); 2825 adev->ip_versions[SDMA0_HWIP][0] = IP_VERSION(4, 2, 2); 2826 adev->ip_versions[SDMA1_HWIP][0] = IP_VERSION(4, 2, 2); 2827 adev->ip_versions[SDMA1_HWIP][1] = IP_VERSION(4, 2, 2); 2828 adev->ip_versions[SDMA1_HWIP][2] = IP_VERSION(4, 2, 2); 2829 adev->ip_versions[SDMA1_HWIP][3] = IP_VERSION(4, 2, 2); 2830 adev->ip_versions[SDMA1_HWIP][4] = IP_VERSION(4, 2, 2); 2831 adev->ip_versions[SDMA1_HWIP][5] = IP_VERSION(4, 2, 2); 2832 adev->ip_versions[SDMA1_HWIP][6] = IP_VERSION(4, 2, 2); 2833 adev->ip_versions[DF_HWIP][0] = IP_VERSION(3, 6, 1); 2834 adev->ip_versions[NBIO_HWIP][0] = IP_VERSION(7, 4, 1); 2835 adev->ip_versions[UMC_HWIP][0] = IP_VERSION(6, 1, 2); 2836 adev->ip_versions[MP0_HWIP][0] = IP_VERSION(11, 0, 4); 2837 adev->ip_versions[MP1_HWIP][0] = IP_VERSION(11, 0, 2); 2838 adev->ip_versions[THM_HWIP][0] = IP_VERSION(11, 0, 3); 2839 adev->ip_versions[SMUIO_HWIP][0] = IP_VERSION(11, 0, 3); 2840 adev->ip_versions[GC_HWIP][0] = IP_VERSION(9, 4, 1); 2841 adev->ip_versions[UVD_HWIP][0] = IP_VERSION(2, 5, 0); 2842 adev->ip_versions[UVD_HWIP][1] = IP_VERSION(2, 5, 0); 2843 break; 2844 case CHIP_ALDEBARAN: 2845 /* This is not fatal. We only need the discovery 2846 * binary for sysfs. We don't need it for a 2847 * functional system. 2848 */ 2849 amdgpu_discovery_init(adev); 2850 aldebaran_reg_base_init(adev); 2851 adev->sdma.num_instances = 5; 2852 adev->sdma.sdma_mask = 0x1f; 2853 adev->vcn.num_vcn_inst = 2; 2854 adev->gmc.num_umc = 4; 2855 adev->gfx.xcc_mask = 1; 2856 adev->ip_versions[MMHUB_HWIP][0] = IP_VERSION(9, 4, 2); 2857 adev->ip_versions[ATHUB_HWIP][0] = IP_VERSION(9, 4, 2); 2858 adev->ip_versions[OSSSYS_HWIP][0] = IP_VERSION(4, 4, 0); 2859 adev->ip_versions[HDP_HWIP][0] = IP_VERSION(4, 4, 0); 2860 adev->ip_versions[SDMA0_HWIP][0] = IP_VERSION(4, 4, 0); 2861 adev->ip_versions[SDMA0_HWIP][1] = IP_VERSION(4, 4, 0); 2862 adev->ip_versions[SDMA0_HWIP][2] = IP_VERSION(4, 4, 0); 2863 adev->ip_versions[SDMA0_HWIP][3] = IP_VERSION(4, 4, 0); 2864 adev->ip_versions[SDMA0_HWIP][4] = IP_VERSION(4, 4, 0); 2865 adev->ip_versions[DF_HWIP][0] = IP_VERSION(3, 6, 2); 2866 adev->ip_versions[NBIO_HWIP][0] = IP_VERSION(7, 4, 4); 2867 adev->ip_versions[UMC_HWIP][0] = IP_VERSION(6, 7, 0); 2868 adev->ip_versions[MP0_HWIP][0] = IP_VERSION(13, 0, 2); 2869 adev->ip_versions[MP1_HWIP][0] = IP_VERSION(13, 0, 2); 2870 adev->ip_versions[THM_HWIP][0] = IP_VERSION(13, 0, 2); 2871 adev->ip_versions[SMUIO_HWIP][0] = IP_VERSION(13, 0, 2); 2872 adev->ip_versions[GC_HWIP][0] = IP_VERSION(9, 4, 2); 2873 adev->ip_versions[UVD_HWIP][0] = IP_VERSION(2, 6, 0); 2874 adev->ip_versions[UVD_HWIP][1] = IP_VERSION(2, 6, 0); 2875 adev->ip_versions[XGMI_HWIP][0] = IP_VERSION(6, 1, 0); 2876 break; 2877 case CHIP_CYAN_SKILLFISH: 2878 if (adev->apu_flags & AMD_APU_IS_CYAN_SKILLFISH2) { 2879 r = amdgpu_discovery_reg_base_init(adev); 2880 if (r) 2881 return -EINVAL; 2882 2883 amdgpu_discovery_harvest_ip(adev); 2884 amdgpu_discovery_get_gfx_info(adev); 2885 amdgpu_discovery_get_mall_info(adev); 2886 amdgpu_discovery_get_vcn_info(adev); 2887 } else { 2888 cyan_skillfish_reg_base_init(adev); 2889 adev->sdma.num_instances = 2; 2890 adev->sdma.sdma_mask = 3; 2891 adev->gfx.xcc_mask = 1; 2892 adev->ip_versions[MMHUB_HWIP][0] = IP_VERSION(2, 0, 3); 2893 adev->ip_versions[ATHUB_HWIP][0] = IP_VERSION(2, 0, 3); 2894 adev->ip_versions[OSSSYS_HWIP][0] = IP_VERSION(5, 0, 1); 2895 adev->ip_versions[HDP_HWIP][0] = IP_VERSION(5, 0, 1); 2896 adev->ip_versions[SDMA0_HWIP][0] = IP_VERSION(5, 0, 1); 2897 adev->ip_versions[SDMA1_HWIP][1] = IP_VERSION(5, 0, 1); 2898 adev->ip_versions[DF_HWIP][0] = IP_VERSION(3, 5, 0); 2899 adev->ip_versions[NBIO_HWIP][0] = IP_VERSION(2, 1, 1); 2900 adev->ip_versions[UMC_HWIP][0] = IP_VERSION(8, 1, 1); 2901 adev->ip_versions[MP0_HWIP][0] = IP_VERSION(11, 0, 8); 2902 adev->ip_versions[MP1_HWIP][0] = IP_VERSION(11, 0, 8); 2903 adev->ip_versions[THM_HWIP][0] = IP_VERSION(11, 0, 1); 2904 adev->ip_versions[SMUIO_HWIP][0] = IP_VERSION(11, 0, 8); 2905 adev->ip_versions[GC_HWIP][0] = IP_VERSION(10, 1, 3); 2906 adev->ip_versions[UVD_HWIP][0] = IP_VERSION(2, 0, 3); 2907 } 2908 break; 2909 default: 2910 r = amdgpu_discovery_reg_base_init(adev); 2911 if (r) { 2912 drm_err(&adev->ddev, "discovery failed: %d\n", r); 2913 return r; 2914 } 2915 2916 amdgpu_discovery_harvest_ip(adev); 2917 amdgpu_discovery_get_gfx_info(adev); 2918 amdgpu_discovery_get_mall_info(adev); 2919 amdgpu_discovery_get_vcn_info(adev); 2920 break; 2921 } 2922 2923 amdgpu_discovery_init_soc_config(adev); 2924 amdgpu_discovery_sysfs_init(adev); 2925 2926 switch (amdgpu_ip_version(adev, GC_HWIP, 0)) { 2927 case IP_VERSION(9, 0, 1): 2928 case IP_VERSION(9, 2, 1): 2929 case IP_VERSION(9, 4, 0): 2930 case IP_VERSION(9, 4, 1): 2931 case IP_VERSION(9, 4, 2): 2932 case IP_VERSION(9, 4, 3): 2933 case IP_VERSION(9, 4, 4): 2934 case IP_VERSION(9, 5, 0): 2935 adev->family = AMDGPU_FAMILY_AI; 2936 break; 2937 case IP_VERSION(9, 1, 0): 2938 case IP_VERSION(9, 2, 2): 2939 case IP_VERSION(9, 3, 0): 2940 adev->family = AMDGPU_FAMILY_RV; 2941 break; 2942 case IP_VERSION(10, 1, 10): 2943 case IP_VERSION(10, 1, 1): 2944 case IP_VERSION(10, 1, 2): 2945 case IP_VERSION(10, 1, 3): 2946 case IP_VERSION(10, 1, 4): 2947 case IP_VERSION(10, 3, 0): 2948 case IP_VERSION(10, 3, 2): 2949 case IP_VERSION(10, 3, 4): 2950 case IP_VERSION(10, 3, 5): 2951 adev->family = AMDGPU_FAMILY_NV; 2952 break; 2953 case IP_VERSION(10, 3, 1): 2954 adev->family = AMDGPU_FAMILY_VGH; 2955 adev->apu_flags |= AMD_APU_IS_VANGOGH; 2956 break; 2957 case IP_VERSION(10, 3, 3): 2958 adev->family = AMDGPU_FAMILY_YC; 2959 break; 2960 case IP_VERSION(10, 3, 6): 2961 adev->family = AMDGPU_FAMILY_GC_10_3_6; 2962 break; 2963 case IP_VERSION(10, 3, 7): 2964 adev->family = AMDGPU_FAMILY_GC_10_3_7; 2965 break; 2966 case IP_VERSION(11, 0, 0): 2967 case IP_VERSION(11, 0, 2): 2968 case IP_VERSION(11, 0, 3): 2969 adev->family = AMDGPU_FAMILY_GC_11_0_0; 2970 break; 2971 case IP_VERSION(11, 0, 1): 2972 case IP_VERSION(11, 0, 4): 2973 adev->family = AMDGPU_FAMILY_GC_11_0_1; 2974 break; 2975 case IP_VERSION(11, 5, 0): 2976 case IP_VERSION(11, 5, 1): 2977 case IP_VERSION(11, 5, 2): 2978 case IP_VERSION(11, 5, 3): 2979 case IP_VERSION(11, 5, 4): 2980 adev->family = AMDGPU_FAMILY_GC_11_5_0; 2981 break; 2982 case IP_VERSION(12, 0, 0): 2983 case IP_VERSION(12, 0, 1): 2984 case IP_VERSION(12, 1, 0): 2985 adev->family = AMDGPU_FAMILY_GC_12_0_0; 2986 break; 2987 default: 2988 return -EINVAL; 2989 } 2990 2991 switch (amdgpu_ip_version(adev, GC_HWIP, 0)) { 2992 case IP_VERSION(9, 1, 0): 2993 case IP_VERSION(9, 2, 2): 2994 case IP_VERSION(9, 3, 0): 2995 case IP_VERSION(10, 1, 3): 2996 case IP_VERSION(10, 1, 4): 2997 case IP_VERSION(10, 3, 1): 2998 case IP_VERSION(10, 3, 3): 2999 case IP_VERSION(10, 3, 6): 3000 case IP_VERSION(10, 3, 7): 3001 case IP_VERSION(11, 0, 1): 3002 case IP_VERSION(11, 0, 4): 3003 case IP_VERSION(11, 5, 0): 3004 case IP_VERSION(11, 5, 1): 3005 case IP_VERSION(11, 5, 2): 3006 case IP_VERSION(11, 5, 3): 3007 case IP_VERSION(11, 5, 4): 3008 adev->flags |= AMD_IS_APU; 3009 break; 3010 default: 3011 break; 3012 } 3013 3014 /* set NBIO version */ 3015 switch (amdgpu_ip_version(adev, NBIO_HWIP, 0)) { 3016 case IP_VERSION(6, 1, 0): 3017 case IP_VERSION(6, 2, 0): 3018 adev->nbio.funcs = &nbio_v6_1_funcs; 3019 adev->nbio.hdp_flush_reg = &nbio_v6_1_hdp_flush_reg; 3020 break; 3021 case IP_VERSION(7, 0, 0): 3022 case IP_VERSION(7, 0, 1): 3023 case IP_VERSION(2, 5, 0): 3024 adev->nbio.funcs = &nbio_v7_0_funcs; 3025 adev->nbio.hdp_flush_reg = &nbio_v7_0_hdp_flush_reg; 3026 break; 3027 case IP_VERSION(7, 4, 0): 3028 case IP_VERSION(7, 4, 1): 3029 case IP_VERSION(7, 4, 4): 3030 adev->nbio.funcs = &nbio_v7_4_funcs; 3031 adev->nbio.hdp_flush_reg = &nbio_v7_4_hdp_flush_reg; 3032 break; 3033 case IP_VERSION(7, 9, 0): 3034 case IP_VERSION(7, 9, 1): 3035 adev->nbio.funcs = &nbio_v7_9_funcs; 3036 adev->nbio.hdp_flush_reg = &nbio_v7_9_hdp_flush_reg; 3037 break; 3038 case IP_VERSION(7, 11, 0): 3039 case IP_VERSION(7, 11, 1): 3040 case IP_VERSION(7, 11, 2): 3041 case IP_VERSION(7, 11, 3): 3042 adev->nbio.funcs = &nbio_v7_11_funcs; 3043 adev->nbio.hdp_flush_reg = &nbio_v7_11_hdp_flush_reg; 3044 break; 3045 case IP_VERSION(7, 2, 0): 3046 case IP_VERSION(7, 2, 1): 3047 case IP_VERSION(7, 3, 0): 3048 case IP_VERSION(7, 5, 0): 3049 case IP_VERSION(7, 5, 1): 3050 adev->nbio.funcs = &nbio_v7_2_funcs; 3051 adev->nbio.hdp_flush_reg = &nbio_v7_2_hdp_flush_reg; 3052 break; 3053 case IP_VERSION(2, 1, 1): 3054 case IP_VERSION(2, 3, 0): 3055 case IP_VERSION(2, 3, 1): 3056 case IP_VERSION(2, 3, 2): 3057 case IP_VERSION(3, 3, 0): 3058 case IP_VERSION(3, 3, 1): 3059 case IP_VERSION(3, 3, 2): 3060 case IP_VERSION(3, 3, 3): 3061 adev->nbio.funcs = &nbio_v2_3_funcs; 3062 adev->nbio.hdp_flush_reg = &nbio_v2_3_hdp_flush_reg; 3063 break; 3064 case IP_VERSION(4, 3, 0): 3065 case IP_VERSION(4, 3, 1): 3066 if (amdgpu_sriov_vf(adev)) 3067 adev->nbio.funcs = &nbio_v4_3_sriov_funcs; 3068 else 3069 adev->nbio.funcs = &nbio_v4_3_funcs; 3070 adev->nbio.hdp_flush_reg = &nbio_v4_3_hdp_flush_reg; 3071 break; 3072 case IP_VERSION(7, 7, 0): 3073 case IP_VERSION(7, 7, 1): 3074 adev->nbio.funcs = &nbio_v7_7_funcs; 3075 adev->nbio.hdp_flush_reg = &nbio_v7_7_hdp_flush_reg; 3076 break; 3077 case IP_VERSION(6, 3, 1): 3078 case IP_VERSION(7, 11, 4): 3079 adev->nbio.funcs = &nbif_v6_3_1_funcs; 3080 adev->nbio.hdp_flush_reg = &nbif_v6_3_1_hdp_flush_reg; 3081 break; 3082 default: 3083 break; 3084 } 3085 3086 switch (amdgpu_ip_version(adev, HDP_HWIP, 0)) { 3087 case IP_VERSION(4, 0, 0): 3088 case IP_VERSION(4, 0, 1): 3089 case IP_VERSION(4, 1, 0): 3090 case IP_VERSION(4, 1, 1): 3091 case IP_VERSION(4, 1, 2): 3092 case IP_VERSION(4, 2, 0): 3093 case IP_VERSION(4, 2, 1): 3094 case IP_VERSION(4, 4, 0): 3095 case IP_VERSION(4, 4, 2): 3096 case IP_VERSION(4, 4, 5): 3097 adev->hdp.funcs = &hdp_v4_0_funcs; 3098 break; 3099 case IP_VERSION(5, 0, 0): 3100 case IP_VERSION(5, 0, 1): 3101 case IP_VERSION(5, 0, 2): 3102 case IP_VERSION(5, 0, 3): 3103 case IP_VERSION(5, 0, 4): 3104 case IP_VERSION(5, 2, 0): 3105 adev->hdp.funcs = &hdp_v5_0_funcs; 3106 break; 3107 case IP_VERSION(5, 2, 1): 3108 adev->hdp.funcs = &hdp_v5_2_funcs; 3109 break; 3110 case IP_VERSION(6, 0, 0): 3111 case IP_VERSION(6, 0, 1): 3112 case IP_VERSION(6, 1, 0): 3113 case IP_VERSION(6, 1, 1): 3114 adev->hdp.funcs = &hdp_v6_0_funcs; 3115 break; 3116 case IP_VERSION(7, 0, 0): 3117 adev->hdp.funcs = &hdp_v7_0_funcs; 3118 break; 3119 default: 3120 break; 3121 } 3122 3123 switch (amdgpu_ip_version(adev, DF_HWIP, 0)) { 3124 case IP_VERSION(3, 6, 0): 3125 case IP_VERSION(3, 6, 1): 3126 case IP_VERSION(3, 6, 2): 3127 adev->df.funcs = &df_v3_6_funcs; 3128 break; 3129 case IP_VERSION(2, 1, 0): 3130 case IP_VERSION(2, 1, 1): 3131 case IP_VERSION(2, 5, 0): 3132 case IP_VERSION(3, 5, 1): 3133 case IP_VERSION(3, 5, 2): 3134 adev->df.funcs = &df_v1_7_funcs; 3135 break; 3136 case IP_VERSION(4, 3, 0): 3137 adev->df.funcs = &df_v4_3_funcs; 3138 break; 3139 case IP_VERSION(4, 6, 2): 3140 adev->df.funcs = &df_v4_6_2_funcs; 3141 break; 3142 case IP_VERSION(4, 15, 0): 3143 case IP_VERSION(4, 15, 1): 3144 adev->df.funcs = &df_v4_15_funcs; 3145 break; 3146 default: 3147 break; 3148 } 3149 3150 switch (amdgpu_ip_version(adev, SMUIO_HWIP, 0)) { 3151 case IP_VERSION(9, 0, 0): 3152 case IP_VERSION(9, 0, 1): 3153 case IP_VERSION(10, 0, 0): 3154 case IP_VERSION(10, 0, 1): 3155 case IP_VERSION(10, 0, 2): 3156 adev->smuio.funcs = &smuio_v9_0_funcs; 3157 break; 3158 case IP_VERSION(11, 0, 0): 3159 case IP_VERSION(11, 0, 2): 3160 case IP_VERSION(11, 0, 3): 3161 case IP_VERSION(11, 0, 4): 3162 case IP_VERSION(11, 0, 7): 3163 case IP_VERSION(11, 0, 8): 3164 adev->smuio.funcs = &smuio_v11_0_funcs; 3165 break; 3166 case IP_VERSION(11, 0, 6): 3167 case IP_VERSION(11, 0, 10): 3168 case IP_VERSION(11, 0, 11): 3169 case IP_VERSION(11, 5, 0): 3170 case IP_VERSION(11, 5, 2): 3171 case IP_VERSION(13, 0, 1): 3172 case IP_VERSION(13, 0, 9): 3173 case IP_VERSION(13, 0, 10): 3174 adev->smuio.funcs = &smuio_v11_0_6_funcs; 3175 break; 3176 case IP_VERSION(13, 0, 2): 3177 adev->smuio.funcs = &smuio_v13_0_funcs; 3178 break; 3179 case IP_VERSION(13, 0, 3): 3180 case IP_VERSION(13, 0, 11): 3181 adev->smuio.funcs = &smuio_v13_0_3_funcs; 3182 if (adev->smuio.funcs->get_pkg_type(adev) == AMDGPU_PKG_TYPE_APU) { 3183 adev->flags |= AMD_IS_APU; 3184 } 3185 break; 3186 case IP_VERSION(13, 0, 6): 3187 case IP_VERSION(13, 0, 8): 3188 case IP_VERSION(14, 0, 0): 3189 case IP_VERSION(14, 0, 1): 3190 adev->smuio.funcs = &smuio_v13_0_6_funcs; 3191 break; 3192 case IP_VERSION(14, 0, 2): 3193 adev->smuio.funcs = &smuio_v14_0_2_funcs; 3194 break; 3195 case IP_VERSION(15, 0, 8): 3196 adev->smuio.funcs = &smuio_v15_0_8_funcs; 3197 break; 3198 default: 3199 break; 3200 } 3201 3202 switch (amdgpu_ip_version(adev, LSDMA_HWIP, 0)) { 3203 case IP_VERSION(6, 0, 0): 3204 case IP_VERSION(6, 0, 1): 3205 case IP_VERSION(6, 0, 2): 3206 case IP_VERSION(6, 0, 3): 3207 adev->lsdma.funcs = &lsdma_v6_0_funcs; 3208 break; 3209 case IP_VERSION(7, 0, 0): 3210 case IP_VERSION(7, 0, 1): 3211 adev->lsdma.funcs = &lsdma_v7_0_funcs; 3212 break; 3213 default: 3214 break; 3215 } 3216 3217 r = amdgpu_discovery_set_common_ip_blocks(adev); 3218 if (r) 3219 return r; 3220 3221 r = amdgpu_discovery_set_gmc_ip_blocks(adev); 3222 if (r) 3223 return r; 3224 3225 /* For SR-IOV, PSP needs to be initialized before IH */ 3226 if (amdgpu_sriov_vf(adev)) { 3227 r = amdgpu_discovery_set_psp_ip_blocks(adev); 3228 if (r) 3229 return r; 3230 r = amdgpu_discovery_set_ih_ip_blocks(adev); 3231 if (r) 3232 return r; 3233 } else { 3234 r = amdgpu_discovery_set_ih_ip_blocks(adev); 3235 if (r) 3236 return r; 3237 3238 if (likely(adev->firmware.load_type == AMDGPU_FW_LOAD_PSP)) { 3239 r = amdgpu_discovery_set_psp_ip_blocks(adev); 3240 if (r) 3241 return r; 3242 } 3243 } 3244 3245 if (likely(adev->firmware.load_type == AMDGPU_FW_LOAD_PSP)) { 3246 r = amdgpu_discovery_set_smu_ip_blocks(adev); 3247 if (r) 3248 return r; 3249 } 3250 3251 r = amdgpu_discovery_set_display_ip_blocks(adev); 3252 if (r) 3253 return r; 3254 3255 r = amdgpu_discovery_set_gc_ip_blocks(adev); 3256 if (r) 3257 return r; 3258 3259 r = amdgpu_discovery_set_sdma_ip_blocks(adev); 3260 if (r) 3261 return r; 3262 3263 r = amdgpu_discovery_set_ras_ip_blocks(adev); 3264 if (r) 3265 return r; 3266 3267 if ((adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT && 3268 !amdgpu_sriov_vf(adev) && 3269 amdgpu_dpm == 1) || 3270 (adev->firmware.load_type == AMDGPU_FW_LOAD_RLC_BACKDOOR_AUTO && 3271 amdgpu_dpm == 1)) { 3272 r = amdgpu_discovery_set_smu_ip_blocks(adev); 3273 if (r) 3274 return r; 3275 } 3276 3277 r = amdgpu_discovery_set_mm_ip_blocks(adev); 3278 if (r) 3279 return r; 3280 3281 r = amdgpu_discovery_set_mes_ip_blocks(adev); 3282 if (r) 3283 return r; 3284 3285 r = amdgpu_discovery_set_vpe_ip_blocks(adev); 3286 if (r) 3287 return r; 3288 3289 r = amdgpu_discovery_set_umsch_mm_ip_blocks(adev); 3290 if (r) 3291 return r; 3292 3293 r = amdgpu_discovery_set_isp_ip_blocks(adev); 3294 if (r) 3295 return r; 3296 return 0; 3297 } 3298 3299