1 /* 2 * Copyright 2018-2024 Advanced Micro Devices, Inc. All rights reserved. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 */ 23 24 #include <linux/firmware.h> 25 26 #include "amdgpu.h" 27 #include "amdgpu_discovery.h" 28 #include "soc15_hw_ip.h" 29 #include "discovery.h" 30 #include "amdgpu_ras.h" 31 32 #include "soc15.h" 33 #include "gfx_v9_0.h" 34 #include "gfx_v9_4_3.h" 35 #include "gmc_v9_0.h" 36 #include "df_v1_7.h" 37 #include "df_v3_6.h" 38 #include "df_v4_3.h" 39 #include "df_v4_6_2.h" 40 #include "df_v4_15.h" 41 #include "nbio_v6_1.h" 42 #include "nbio_v7_0.h" 43 #include "nbio_v7_4.h" 44 #include "nbio_v7_9.h" 45 #include "nbio_v7_11.h" 46 #include "hdp_v4_0.h" 47 #include "vega10_ih.h" 48 #include "vega20_ih.h" 49 #include "sdma_v4_0.h" 50 #include "sdma_v4_4_2.h" 51 #include "uvd_v7_0.h" 52 #include "vce_v4_0.h" 53 #include "vcn_v1_0.h" 54 #include "vcn_v2_5.h" 55 #include "jpeg_v2_5.h" 56 #include "smuio_v9_0.h" 57 #include "gmc_v10_0.h" 58 #include "gmc_v11_0.h" 59 #include "gmc_v12_0.h" 60 #include "gfxhub_v2_0.h" 61 #include "mmhub_v2_0.h" 62 #include "nbio_v2_3.h" 63 #include "nbio_v4_3.h" 64 #include "nbio_v7_2.h" 65 #include "nbio_v7_7.h" 66 #include "nbif_v6_3_1.h" 67 #include "hdp_v5_0.h" 68 #include "hdp_v5_2.h" 69 #include "hdp_v6_0.h" 70 #include "hdp_v7_0.h" 71 #include "nv.h" 72 #include "soc21.h" 73 #include "soc24.h" 74 #include "soc_v1_0.h" 75 #include "navi10_ih.h" 76 #include "ih_v6_0.h" 77 #include "ih_v6_1.h" 78 #include "ih_v7_0.h" 79 #include "gfx_v10_0.h" 80 #include "gfx_v11_0.h" 81 #include "gfx_v12_0.h" 82 #include "gfx_v12_1.h" 83 #include "sdma_v5_0.h" 84 #include "sdma_v5_2.h" 85 #include "sdma_v6_0.h" 86 #include "sdma_v7_0.h" 87 #include "sdma_v7_1.h" 88 #include "lsdma_v6_0.h" 89 #include "lsdma_v7_0.h" 90 #include "vcn_v2_0.h" 91 #include "jpeg_v2_0.h" 92 #include "vcn_v3_0.h" 93 #include "jpeg_v3_0.h" 94 #include "vcn_v4_0.h" 95 #include "jpeg_v4_0.h" 96 #include "vcn_v4_0_3.h" 97 #include "jpeg_v4_0_3.h" 98 #include "vcn_v4_0_5.h" 99 #include "jpeg_v4_0_5.h" 100 #include "amdgpu_vkms.h" 101 #include "mes_v11_0.h" 102 #include "mes_v12_0.h" 103 #include "mes_v12_1.h" 104 #include "smuio_v11_0.h" 105 #include "smuio_v11_0_6.h" 106 #include "smuio_v13_0.h" 107 #include "smuio_v13_0_3.h" 108 #include "smuio_v13_0_6.h" 109 #include "smuio_v14_0_2.h" 110 #include "smuio_v15_0_0.h" 111 #include "smuio_v15_0_8.h" 112 #include "vcn_v5_0_0.h" 113 #include "vcn_v5_0_1.h" 114 #include "jpeg_v5_0_0.h" 115 #include "jpeg_v5_0_1.h" 116 #include "jpeg_v5_3_0.h" 117 118 #include "amdgpu_ras_mgr.h" 119 120 #include "amdgpu_vpe.h" 121 #if defined(CONFIG_DRM_AMD_ISP) 122 #include "amdgpu_isp.h" 123 #endif 124 125 MODULE_FIRMWARE("amdgpu/ip_discovery.bin"); 126 MODULE_FIRMWARE("amdgpu/vega10_ip_discovery.bin"); 127 MODULE_FIRMWARE("amdgpu/vega12_ip_discovery.bin"); 128 MODULE_FIRMWARE("amdgpu/vega20_ip_discovery.bin"); 129 MODULE_FIRMWARE("amdgpu/raven_ip_discovery.bin"); 130 MODULE_FIRMWARE("amdgpu/raven2_ip_discovery.bin"); 131 MODULE_FIRMWARE("amdgpu/picasso_ip_discovery.bin"); 132 MODULE_FIRMWARE("amdgpu/arcturus_ip_discovery.bin"); 133 MODULE_FIRMWARE("amdgpu/aldebaran_ip_discovery.bin"); 134 135 #define mmIP_DISCOVERY_VERSION 0x16A00 136 #define mmRCC_CONFIG_MEMSIZE 0xde3 137 #define mmMP0_SMN_C2PMSG_33 0x16061 138 #define mmMM_INDEX 0x0 139 #define mmMM_INDEX_HI 0x6 140 #define mmMM_DATA 0x1 141 142 static const char *hw_id_names[HW_ID_MAX] = { 143 [MP1_HWID] = "MP1", 144 [MP2_HWID] = "MP2", 145 [THM_HWID] = "THM", 146 [SMUIO_HWID] = "SMUIO", 147 [FUSE_HWID] = "FUSE", 148 [CLKA_HWID] = "CLKA", 149 [PWR_HWID] = "PWR", 150 [GC_HWID] = "GC", 151 [UVD_HWID] = "UVD", 152 [AUDIO_AZ_HWID] = "AUDIO_AZ", 153 [ACP_HWID] = "ACP", 154 [DCI_HWID] = "DCI", 155 [DMU_HWID] = "DMU", 156 [DCO_HWID] = "DCO", 157 [DIO_HWID] = "DIO", 158 [XDMA_HWID] = "XDMA", 159 [DCEAZ_HWID] = "DCEAZ", 160 [DAZ_HWID] = "DAZ", 161 [SDPMUX_HWID] = "SDPMUX", 162 [NTB_HWID] = "NTB", 163 [IOHC_HWID] = "IOHC", 164 [L2IMU_HWID] = "L2IMU", 165 [VCE_HWID] = "VCE", 166 [MMHUB_HWID] = "MMHUB", 167 [ATHUB_HWID] = "ATHUB", 168 [DBGU_NBIO_HWID] = "DBGU_NBIO", 169 [DFX_HWID] = "DFX", 170 [DBGU0_HWID] = "DBGU0", 171 [DBGU1_HWID] = "DBGU1", 172 [OSSSYS_HWID] = "OSSSYS", 173 [HDP_HWID] = "HDP", 174 [SDMA0_HWID] = "SDMA0", 175 [SDMA1_HWID] = "SDMA1", 176 [SDMA2_HWID] = "SDMA2", 177 [SDMA3_HWID] = "SDMA3", 178 [LSDMA_HWID] = "LSDMA", 179 [ISP_HWID] = "ISP", 180 [DBGU_IO_HWID] = "DBGU_IO", 181 [DF_HWID] = "DF", 182 [CLKB_HWID] = "CLKB", 183 [FCH_HWID] = "FCH", 184 [DFX_DAP_HWID] = "DFX_DAP", 185 [L1IMU_PCIE_HWID] = "L1IMU_PCIE", 186 [L1IMU_NBIF_HWID] = "L1IMU_NBIF", 187 [L1IMU_IOAGR_HWID] = "L1IMU_IOAGR", 188 [L1IMU3_HWID] = "L1IMU3", 189 [L1IMU4_HWID] = "L1IMU4", 190 [L1IMU5_HWID] = "L1IMU5", 191 [L1IMU6_HWID] = "L1IMU6", 192 [L1IMU7_HWID] = "L1IMU7", 193 [L1IMU8_HWID] = "L1IMU8", 194 [L1IMU9_HWID] = "L1IMU9", 195 [L1IMU10_HWID] = "L1IMU10", 196 [L1IMU11_HWID] = "L1IMU11", 197 [L1IMU12_HWID] = "L1IMU12", 198 [L1IMU13_HWID] = "L1IMU13", 199 [L1IMU14_HWID] = "L1IMU14", 200 [L1IMU15_HWID] = "L1IMU15", 201 [WAFLC_HWID] = "WAFLC", 202 [FCH_USB_PD_HWID] = "FCH_USB_PD", 203 [PCIE_HWID] = "PCIE", 204 [PCS_HWID] = "PCS", 205 [DDCL_HWID] = "DDCL", 206 [SST_HWID] = "SST", 207 [IOAGR_HWID] = "IOAGR", 208 [NBIF_HWID] = "NBIF", 209 [IOAPIC_HWID] = "IOAPIC", 210 [SYSTEMHUB_HWID] = "SYSTEMHUB", 211 [NTBCCP_HWID] = "NTBCCP", 212 [UMC_HWID] = "UMC", 213 [SATA_HWID] = "SATA", 214 [USB_HWID] = "USB", 215 [CCXSEC_HWID] = "CCXSEC", 216 [XGMI_HWID] = "XGMI", 217 [XGBE_HWID] = "XGBE", 218 [MP0_HWID] = "MP0", 219 [VPE_HWID] = "VPE", 220 [ATU_HWID] = "ATU", 221 [AIGC_HWID] = "AIGC", 222 }; 223 224 static int hw_id_map[MAX_HWIP] = { 225 [GC_HWIP] = GC_HWID, 226 [HDP_HWIP] = HDP_HWID, 227 [SDMA0_HWIP] = SDMA0_HWID, 228 [SDMA1_HWIP] = SDMA1_HWID, 229 [SDMA2_HWIP] = SDMA2_HWID, 230 [SDMA3_HWIP] = SDMA3_HWID, 231 [LSDMA_HWIP] = LSDMA_HWID, 232 [MMHUB_HWIP] = MMHUB_HWID, 233 [ATHUB_HWIP] = ATHUB_HWID, 234 [NBIO_HWIP] = NBIF_HWID, 235 [MP0_HWIP] = MP0_HWID, 236 [MP1_HWIP] = MP1_HWID, 237 [UVD_HWIP] = UVD_HWID, 238 [VCE_HWIP] = VCE_HWID, 239 [DF_HWIP] = DF_HWID, 240 [DCE_HWIP] = DMU_HWID, 241 [OSSSYS_HWIP] = OSSSYS_HWID, 242 [SMUIO_HWIP] = SMUIO_HWID, 243 [PWR_HWIP] = PWR_HWID, 244 [NBIF_HWIP] = NBIF_HWID, 245 [THM_HWIP] = THM_HWID, 246 [CLK_HWIP] = CLKA_HWID, 247 [UMC_HWIP] = UMC_HWID, 248 [XGMI_HWIP] = XGMI_HWID, 249 [DCI_HWIP] = DCI_HWID, 250 [PCIE_HWIP] = PCIE_HWID, 251 [VPE_HWIP] = VPE_HWID, 252 [ISP_HWIP] = ISP_HWID, 253 [ATU_HWIP] = ATU_HWID, 254 }; 255 256 static int amdgpu_discovery_read_binary_from_sysmem(struct amdgpu_device *adev, uint8_t *binary) 257 { 258 u64 tmr_offset, tmr_size, pos; 259 void *discv_regn; 260 int ret; 261 262 ret = amdgpu_acpi_get_tmr_info(adev, &tmr_offset, &tmr_size); 263 if (ret) 264 return ret; 265 266 pos = tmr_offset + tmr_size - DISCOVERY_TMR_OFFSET; 267 268 /* This region is read-only and reserved from system use */ 269 discv_regn = memremap(pos, adev->discovery.size, MEMREMAP_WC); 270 if (discv_regn) { 271 memcpy(binary, discv_regn, adev->discovery.size); 272 memunmap(discv_regn); 273 return 0; 274 } 275 276 return -ENOENT; 277 } 278 279 #define IP_DISCOVERY_V2 2 280 #define IP_DISCOVERY_V4 4 281 282 static int amdgpu_discovery_read_binary_from_mem(struct amdgpu_device *adev, 283 uint8_t *binary) 284 { 285 bool sz_valid = true; 286 uint64_t vram_size; 287 int i, ret = 0; 288 u32 msg; 289 290 if (!amdgpu_sriov_vf(adev)) { 291 /* It can take up to two second for IFWI init to complete on some dGPUs, 292 * but generally it should be in the 60-100ms range. Normally this starts 293 * as soon as the device gets power so by the time the OS loads this has long 294 * completed. However, when a card is hotplugged via e.g., USB4, we need to 295 * wait for this to complete. Once the C2PMSG is updated, we can 296 * continue. 297 */ 298 299 for (i = 0; i < 2000; i++) { 300 msg = RREG32(mmMP0_SMN_C2PMSG_33); 301 if (msg & 0x80000000) 302 break; 303 msleep(1); 304 } 305 } 306 307 vram_size = RREG32(mmRCC_CONFIG_MEMSIZE); 308 if (!vram_size || vram_size == U32_MAX) 309 sz_valid = false; 310 else 311 vram_size <<= 20; 312 313 /* 314 * If in VRAM, discovery TMR is marked for reservation. If it is in system mem, 315 * then it is not required to be reserved. 316 */ 317 if (sz_valid) { 318 if (amdgpu_sriov_vf(adev) && adev->virt.is_dynamic_crit_regn_enabled) { 319 /* For SRIOV VFs with dynamic critical region enabled, 320 * we will get the IPD binary via below call. 321 * If dynamic critical is disabled, fall through to normal seq. 322 */ 323 if (amdgpu_virt_get_dynamic_data_info(adev, 324 AMD_SRIOV_MSG_IPD_TABLE_ID, binary, 325 &adev->discovery.size)) { 326 dev_err(adev->dev, 327 "failed to read discovery info from dynamic critical region."); 328 ret = -EINVAL; 329 goto exit; 330 } 331 } else { 332 uint64_t pos = vram_size - DISCOVERY_TMR_OFFSET; 333 334 amdgpu_device_vram_access(adev, pos, (uint32_t *)binary, 335 adev->discovery.size, false); 336 adev->discovery.reserve_tmr = true; 337 } 338 } else { 339 ret = amdgpu_discovery_read_binary_from_sysmem(adev, binary); 340 } 341 342 if (ret) 343 dev_err(adev->dev, 344 "failed to read discovery info from memory, vram size read: %llx", 345 vram_size); 346 exit: 347 return ret; 348 } 349 350 static int amdgpu_discovery_read_binary_from_file(struct amdgpu_device *adev, 351 uint8_t *binary, 352 const char *fw_name) 353 { 354 const struct firmware *fw; 355 int r; 356 357 r = firmware_request_nowarn(&fw, fw_name, adev->dev); 358 if (r) { 359 if (amdgpu_discovery == 2) 360 dev_err(adev->dev, "can't load firmware \"%s\"\n", fw_name); 361 else 362 drm_info(&adev->ddev, "Optional firmware \"%s\" was not found\n", fw_name); 363 return r; 364 } 365 366 memcpy((u8 *)binary, (u8 *)fw->data, fw->size); 367 release_firmware(fw); 368 369 return 0; 370 } 371 372 static uint16_t amdgpu_discovery_calculate_checksum(uint8_t *data, uint32_t size) 373 { 374 uint16_t checksum = 0; 375 int i; 376 377 for (i = 0; i < size; i++) 378 checksum += data[i]; 379 380 return checksum; 381 } 382 383 static inline bool amdgpu_discovery_verify_checksum(uint8_t *data, uint32_t size, 384 uint16_t expected) 385 { 386 return !!(amdgpu_discovery_calculate_checksum(data, size) == expected); 387 } 388 389 static inline bool amdgpu_discovery_verify_binary_signature(uint8_t *binary) 390 { 391 struct binary_header *bhdr; 392 bhdr = (struct binary_header *)binary; 393 394 return (le32_to_cpu(bhdr->binary_signature) == BINARY_SIGNATURE); 395 } 396 397 static void amdgpu_discovery_harvest_config_quirk(struct amdgpu_device *adev) 398 { 399 /* 400 * So far, apply this quirk only on those Navy Flounder boards which 401 * have a bad harvest table of VCN config. 402 */ 403 if ((amdgpu_ip_version(adev, UVD_HWIP, 1) == IP_VERSION(3, 0, 1)) && 404 (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(10, 3, 2))) { 405 switch (adev->pdev->revision) { 406 case 0xC1: 407 case 0xC2: 408 case 0xC3: 409 case 0xC5: 410 case 0xC7: 411 case 0xCF: 412 case 0xDF: 413 adev->vcn.harvest_config |= AMDGPU_VCN_HARVEST_VCN1; 414 adev->vcn.inst_mask &= ~AMDGPU_VCN_HARVEST_VCN1; 415 break; 416 default: 417 break; 418 } 419 } 420 } 421 422 static int amdgpu_discovery_verify_npsinfo(struct amdgpu_device *adev, 423 struct binary_header *bhdr) 424 { 425 uint8_t *discovery_bin = adev->discovery.bin; 426 struct table_info *info; 427 uint16_t checksum; 428 uint16_t offset; 429 430 info = &bhdr->table_list[NPS_INFO]; 431 offset = le16_to_cpu(info->offset); 432 checksum = le16_to_cpu(info->checksum); 433 434 struct nps_info_header *nhdr = 435 (struct nps_info_header *)(discovery_bin + offset); 436 437 if (le32_to_cpu(nhdr->table_id) != NPS_INFO_TABLE_ID) { 438 dev_dbg(adev->dev, "invalid ip discovery nps info table id\n"); 439 return -EINVAL; 440 } 441 442 if (!amdgpu_discovery_verify_checksum(discovery_bin + offset, 443 le32_to_cpu(nhdr->size_bytes), 444 checksum)) { 445 dev_dbg(adev->dev, "invalid nps info data table checksum\n"); 446 return -EINVAL; 447 } 448 449 return 0; 450 } 451 452 static const char *amdgpu_discovery_get_fw_name(struct amdgpu_device *adev) 453 { 454 if (amdgpu_discovery == 2) { 455 /* Assume there is valid discovery TMR in VRAM even if binary is sideloaded */ 456 adev->discovery.reserve_tmr = true; 457 return "amdgpu/ip_discovery.bin"; 458 } 459 460 switch (adev->asic_type) { 461 case CHIP_VEGA10: 462 return "amdgpu/vega10_ip_discovery.bin"; 463 case CHIP_VEGA12: 464 return "amdgpu/vega12_ip_discovery.bin"; 465 case CHIP_RAVEN: 466 if (adev->apu_flags & AMD_APU_IS_RAVEN2) 467 return "amdgpu/raven2_ip_discovery.bin"; 468 else if (adev->apu_flags & AMD_APU_IS_PICASSO) 469 return "amdgpu/picasso_ip_discovery.bin"; 470 else 471 return "amdgpu/raven_ip_discovery.bin"; 472 case CHIP_VEGA20: 473 return "amdgpu/vega20_ip_discovery.bin"; 474 case CHIP_ARCTURUS: 475 return "amdgpu/arcturus_ip_discovery.bin"; 476 case CHIP_ALDEBARAN: 477 return "amdgpu/aldebaran_ip_discovery.bin"; 478 default: 479 return NULL; 480 } 481 } 482 483 static int amdgpu_discovery_init(struct amdgpu_device *adev) 484 { 485 struct table_info *info; 486 struct binary_header *bhdr; 487 uint8_t *discovery_bin; 488 const char *fw_name; 489 uint16_t offset; 490 uint16_t size; 491 uint16_t checksum; 492 int r; 493 494 adev->discovery.bin = kzalloc(DISCOVERY_TMR_SIZE, GFP_KERNEL); 495 if (!adev->discovery.bin) 496 return -ENOMEM; 497 adev->discovery.size = DISCOVERY_TMR_SIZE; 498 adev->discovery.debugfs_blob.data = adev->discovery.bin; 499 adev->discovery.debugfs_blob.size = adev->discovery.size; 500 501 discovery_bin = adev->discovery.bin; 502 /* Read from file if it is the preferred option */ 503 fw_name = amdgpu_discovery_get_fw_name(adev); 504 if (fw_name != NULL) { 505 drm_dbg(&adev->ddev, "use ip discovery information from file"); 506 r = amdgpu_discovery_read_binary_from_file(adev, discovery_bin, 507 fw_name); 508 if (r) 509 goto out; 510 } else { 511 drm_dbg(&adev->ddev, "use ip discovery information from memory"); 512 r = amdgpu_discovery_read_binary_from_mem(adev, discovery_bin); 513 if (r) 514 goto out; 515 } 516 517 /* check the ip discovery binary signature */ 518 if (!amdgpu_discovery_verify_binary_signature(discovery_bin)) { 519 dev_err(adev->dev, 520 "get invalid ip discovery binary signature\n"); 521 r = -EINVAL; 522 goto out; 523 } 524 525 bhdr = (struct binary_header *)discovery_bin; 526 527 offset = offsetof(struct binary_header, binary_checksum) + 528 sizeof(bhdr->binary_checksum); 529 size = le16_to_cpu(bhdr->binary_size) - offset; 530 checksum = le16_to_cpu(bhdr->binary_checksum); 531 532 if (!amdgpu_discovery_verify_checksum(discovery_bin + offset, size, 533 checksum)) { 534 dev_err(adev->dev, "invalid ip discovery binary checksum\n"); 535 r = -EINVAL; 536 goto out; 537 } 538 539 info = &bhdr->table_list[IP_DISCOVERY]; 540 offset = le16_to_cpu(info->offset); 541 checksum = le16_to_cpu(info->checksum); 542 543 if (offset) { 544 struct ip_discovery_header *ihdr = 545 (struct ip_discovery_header *)(discovery_bin + offset); 546 if (le32_to_cpu(ihdr->signature) != DISCOVERY_TABLE_SIGNATURE) { 547 dev_err(adev->dev, "invalid ip discovery data table signature\n"); 548 r = -EINVAL; 549 goto out; 550 } 551 552 if (!amdgpu_discovery_verify_checksum(discovery_bin + offset, 553 le16_to_cpu(ihdr->size), 554 checksum)) { 555 dev_err(adev->dev, "invalid ip discovery data table checksum\n"); 556 r = -EINVAL; 557 goto out; 558 } 559 } 560 561 info = &bhdr->table_list[GC]; 562 offset = le16_to_cpu(info->offset); 563 checksum = le16_to_cpu(info->checksum); 564 565 if (offset) { 566 struct gpu_info_header *ghdr = 567 (struct gpu_info_header *)(discovery_bin + offset); 568 569 if (le32_to_cpu(ghdr->table_id) != GC_TABLE_ID) { 570 dev_err(adev->dev, "invalid ip discovery gc table id\n"); 571 r = -EINVAL; 572 goto out; 573 } 574 575 if (!amdgpu_discovery_verify_checksum(discovery_bin + offset, 576 le32_to_cpu(ghdr->size), 577 checksum)) { 578 dev_err(adev->dev, "invalid gc data table checksum\n"); 579 r = -EINVAL; 580 goto out; 581 } 582 } 583 584 info = &bhdr->table_list[HARVEST_INFO]; 585 offset = le16_to_cpu(info->offset); 586 checksum = le16_to_cpu(info->checksum); 587 588 if (offset) { 589 struct harvest_info_header *hhdr = 590 (struct harvest_info_header *)(discovery_bin + offset); 591 592 if (le32_to_cpu(hhdr->signature) != HARVEST_TABLE_SIGNATURE) { 593 dev_err(adev->dev, "invalid ip discovery harvest table signature\n"); 594 r = -EINVAL; 595 goto out; 596 } 597 598 if (!amdgpu_discovery_verify_checksum( 599 discovery_bin + offset, 600 sizeof(struct harvest_table), checksum)) { 601 dev_err(adev->dev, "invalid harvest data table checksum\n"); 602 r = -EINVAL; 603 goto out; 604 } 605 } 606 607 info = &bhdr->table_list[VCN_INFO]; 608 offset = le16_to_cpu(info->offset); 609 checksum = le16_to_cpu(info->checksum); 610 611 if (offset) { 612 struct vcn_info_header *vhdr = 613 (struct vcn_info_header *)(discovery_bin + offset); 614 615 if (le32_to_cpu(vhdr->table_id) != VCN_INFO_TABLE_ID) { 616 dev_err(adev->dev, "invalid ip discovery vcn table id\n"); 617 r = -EINVAL; 618 goto out; 619 } 620 621 if (!amdgpu_discovery_verify_checksum( 622 discovery_bin + offset, 623 le32_to_cpu(vhdr->size_bytes), checksum)) { 624 dev_err(adev->dev, "invalid vcn data table checksum\n"); 625 r = -EINVAL; 626 goto out; 627 } 628 } 629 630 info = &bhdr->table_list[MALL_INFO]; 631 offset = le16_to_cpu(info->offset); 632 checksum = le16_to_cpu(info->checksum); 633 634 if (0 && offset) { 635 struct mall_info_header *mhdr = 636 (struct mall_info_header *)(discovery_bin + offset); 637 638 if (le32_to_cpu(mhdr->table_id) != MALL_INFO_TABLE_ID) { 639 dev_err(adev->dev, "invalid ip discovery mall table id\n"); 640 r = -EINVAL; 641 goto out; 642 } 643 644 if (!amdgpu_discovery_verify_checksum( 645 discovery_bin + offset, 646 le32_to_cpu(mhdr->size_bytes), checksum)) { 647 dev_err(adev->dev, "invalid mall data table checksum\n"); 648 r = -EINVAL; 649 goto out; 650 } 651 } 652 653 return 0; 654 655 out: 656 kfree(adev->discovery.bin); 657 adev->discovery.bin = NULL; 658 if ((amdgpu_discovery != 2) && 659 (RREG32(mmIP_DISCOVERY_VERSION) == 4)) 660 amdgpu_ras_query_boot_status(adev, 4); 661 return r; 662 } 663 664 static void amdgpu_discovery_sysfs_fini(struct amdgpu_device *adev); 665 666 void amdgpu_discovery_fini(struct amdgpu_device *adev) 667 { 668 amdgpu_discovery_sysfs_fini(adev); 669 kfree(adev->discovery.bin); 670 adev->discovery.bin = NULL; 671 } 672 673 static int amdgpu_discovery_validate_ip(struct amdgpu_device *adev, 674 uint8_t instance, uint16_t hw_id) 675 { 676 if (instance >= HWIP_MAX_INSTANCE) { 677 dev_err(adev->dev, 678 "Unexpected instance_number (%d) from ip discovery blob\n", 679 instance); 680 return -EINVAL; 681 } 682 if (hw_id >= HW_ID_MAX) { 683 dev_err(adev->dev, 684 "Unexpected hw_id (%d) from ip discovery blob\n", 685 hw_id); 686 return -EINVAL; 687 } 688 689 return 0; 690 } 691 692 static void amdgpu_discovery_read_harvest_bit_per_ip(struct amdgpu_device *adev, 693 uint32_t *vcn_harvest_count) 694 { 695 uint8_t *discovery_bin = adev->discovery.bin; 696 struct binary_header *bhdr; 697 struct ip_discovery_header *ihdr; 698 struct die_header *dhdr; 699 struct ip *ip; 700 uint16_t die_offset, ip_offset, num_dies, num_ips; 701 uint16_t hw_id; 702 uint8_t inst; 703 int i, j; 704 705 bhdr = (struct binary_header *)discovery_bin; 706 ihdr = (struct ip_discovery_header 707 *)(discovery_bin + 708 le16_to_cpu(bhdr->table_list[IP_DISCOVERY].offset)); 709 num_dies = le16_to_cpu(ihdr->num_dies); 710 711 /* scan harvest bit of all IP data structures */ 712 for (i = 0; i < num_dies; i++) { 713 die_offset = le16_to_cpu(ihdr->die_info[i].die_offset); 714 dhdr = (struct die_header *)(discovery_bin + die_offset); 715 num_ips = le16_to_cpu(dhdr->num_ips); 716 ip_offset = die_offset + sizeof(*dhdr); 717 718 for (j = 0; j < num_ips; j++) { 719 ip = (struct ip *)(discovery_bin + ip_offset); 720 inst = ip->number_instance; 721 hw_id = le16_to_cpu(ip->hw_id); 722 if (amdgpu_discovery_validate_ip(adev, inst, hw_id)) 723 goto next_ip; 724 725 if (ip->harvest == 1) { 726 switch (hw_id) { 727 case VCN_HWID: 728 (*vcn_harvest_count)++; 729 if (inst == 0) { 730 adev->vcn.harvest_config |= AMDGPU_VCN_HARVEST_VCN0; 731 adev->vcn.inst_mask &= 732 ~AMDGPU_VCN_HARVEST_VCN0; 733 adev->jpeg.inst_mask &= 734 ~AMDGPU_VCN_HARVEST_VCN0; 735 } else { 736 adev->vcn.harvest_config |= AMDGPU_VCN_HARVEST_VCN1; 737 adev->vcn.inst_mask &= 738 ~AMDGPU_VCN_HARVEST_VCN1; 739 adev->jpeg.inst_mask &= 740 ~AMDGPU_VCN_HARVEST_VCN1; 741 } 742 break; 743 case DMU_HWID: 744 adev->harvest_ip_mask |= AMD_HARVEST_IP_DMU_MASK; 745 break; 746 default: 747 break; 748 } 749 } 750 next_ip: 751 ip_offset += struct_size(ip, base_address, 752 ip->num_base_address); 753 } 754 } 755 } 756 757 static void amdgpu_discovery_read_from_harvest_table(struct amdgpu_device *adev, 758 uint32_t *vcn_harvest_count, 759 uint32_t *umc_harvest_count) 760 { 761 uint8_t *discovery_bin = adev->discovery.bin; 762 struct binary_header *bhdr; 763 struct harvest_table *harvest_info; 764 u16 offset; 765 int i; 766 uint32_t umc_harvest_config = 0; 767 768 bhdr = (struct binary_header *)discovery_bin; 769 offset = le16_to_cpu(bhdr->table_list[HARVEST_INFO].offset); 770 771 if (!offset) { 772 dev_err(adev->dev, "invalid harvest table offset\n"); 773 return; 774 } 775 776 harvest_info = (struct harvest_table *)(discovery_bin + offset); 777 778 for (i = 0; i < 32; i++) { 779 if (le16_to_cpu(harvest_info->list[i].hw_id) == 0) 780 break; 781 782 switch (le16_to_cpu(harvest_info->list[i].hw_id)) { 783 case VCN_HWID: 784 (*vcn_harvest_count)++; 785 adev->vcn.harvest_config |= 786 (1 << harvest_info->list[i].number_instance); 787 adev->jpeg.harvest_config |= 788 (1 << harvest_info->list[i].number_instance); 789 790 adev->vcn.inst_mask &= 791 ~(1U << harvest_info->list[i].number_instance); 792 adev->jpeg.inst_mask &= 793 ~(1U << harvest_info->list[i].number_instance); 794 break; 795 case DMU_HWID: 796 adev->harvest_ip_mask |= AMD_HARVEST_IP_DMU_MASK; 797 break; 798 case UMC_HWID: 799 umc_harvest_config |= 800 1 << (le16_to_cpu(harvest_info->list[i].number_instance)); 801 (*umc_harvest_count)++; 802 break; 803 case GC_HWID: 804 adev->gfx.xcc_mask &= 805 ~(1U << harvest_info->list[i].number_instance); 806 break; 807 case SDMA0_HWID: 808 adev->sdma.sdma_mask &= 809 ~(1U << harvest_info->list[i].number_instance); 810 break; 811 #if defined(CONFIG_DRM_AMD_ISP) 812 case ISP_HWID: 813 adev->isp.harvest_config |= 814 ~(1U << harvest_info->list[i].number_instance); 815 break; 816 #endif 817 default: 818 break; 819 } 820 } 821 822 adev->umc.active_mask = ((1 << adev->umc.node_inst_num) - 1) & 823 ~umc_harvest_config; 824 } 825 826 /* ================================================== */ 827 828 struct ip_hw_instance { 829 struct kobject kobj; /* ip_discovery/die/#die/#hw_id/#instance/<attrs...> */ 830 831 int hw_id; 832 u8 num_instance; 833 u8 major, minor, revision; 834 u8 harvest; 835 836 int num_base_addresses; 837 u32 base_addr[] __counted_by(num_base_addresses); 838 }; 839 840 struct ip_hw_id { 841 struct kset hw_id_kset; /* ip_discovery/die/#die/#hw_id/, contains ip_hw_instance */ 842 int hw_id; 843 }; 844 845 struct ip_die_entry { 846 struct kset ip_kset; /* ip_discovery/die/#die/, contains ip_hw_id */ 847 u16 num_ips; 848 }; 849 850 /* -------------------------------------------------- */ 851 852 struct ip_hw_instance_attr { 853 struct attribute attr; 854 ssize_t (*show)(struct ip_hw_instance *ip_hw_instance, char *buf); 855 }; 856 857 static ssize_t hw_id_show(struct ip_hw_instance *ip_hw_instance, char *buf) 858 { 859 return sysfs_emit(buf, "%d\n", ip_hw_instance->hw_id); 860 } 861 862 static ssize_t num_instance_show(struct ip_hw_instance *ip_hw_instance, char *buf) 863 { 864 return sysfs_emit(buf, "%d\n", ip_hw_instance->num_instance); 865 } 866 867 static ssize_t major_show(struct ip_hw_instance *ip_hw_instance, char *buf) 868 { 869 return sysfs_emit(buf, "%d\n", ip_hw_instance->major); 870 } 871 872 static ssize_t minor_show(struct ip_hw_instance *ip_hw_instance, char *buf) 873 { 874 return sysfs_emit(buf, "%d\n", ip_hw_instance->minor); 875 } 876 877 static ssize_t revision_show(struct ip_hw_instance *ip_hw_instance, char *buf) 878 { 879 return sysfs_emit(buf, "%d\n", ip_hw_instance->revision); 880 } 881 882 static ssize_t harvest_show(struct ip_hw_instance *ip_hw_instance, char *buf) 883 { 884 return sysfs_emit(buf, "0x%01X\n", ip_hw_instance->harvest); 885 } 886 887 static ssize_t num_base_addresses_show(struct ip_hw_instance *ip_hw_instance, char *buf) 888 { 889 return sysfs_emit(buf, "%d\n", ip_hw_instance->num_base_addresses); 890 } 891 892 static ssize_t base_addr_show(struct ip_hw_instance *ip_hw_instance, char *buf) 893 { 894 ssize_t at; 895 int ii; 896 897 for (at = ii = 0; ii < ip_hw_instance->num_base_addresses; ii++) { 898 /* Here we satisfy the condition that, at + size <= PAGE_SIZE. 899 */ 900 if (at + 12 > PAGE_SIZE) 901 break; 902 at += sysfs_emit_at(buf, at, "0x%08X\n", 903 ip_hw_instance->base_addr[ii]); 904 } 905 906 return at; 907 } 908 909 static struct ip_hw_instance_attr ip_hw_attr[] = { 910 __ATTR_RO(hw_id), 911 __ATTR_RO(num_instance), 912 __ATTR_RO(major), 913 __ATTR_RO(minor), 914 __ATTR_RO(revision), 915 __ATTR_RO(harvest), 916 __ATTR_RO(num_base_addresses), 917 __ATTR_RO(base_addr), 918 }; 919 920 static struct attribute *ip_hw_instance_attrs[ARRAY_SIZE(ip_hw_attr) + 1]; 921 ATTRIBUTE_GROUPS(ip_hw_instance); 922 923 #define to_ip_hw_instance(x) container_of(x, struct ip_hw_instance, kobj) 924 #define to_ip_hw_instance_attr(x) container_of(x, struct ip_hw_instance_attr, attr) 925 926 static ssize_t ip_hw_instance_attr_show(struct kobject *kobj, 927 struct attribute *attr, 928 char *buf) 929 { 930 struct ip_hw_instance *ip_hw_instance = to_ip_hw_instance(kobj); 931 struct ip_hw_instance_attr *ip_hw_attr = to_ip_hw_instance_attr(attr); 932 933 if (!ip_hw_attr->show) 934 return -EIO; 935 936 return ip_hw_attr->show(ip_hw_instance, buf); 937 } 938 939 static const struct sysfs_ops ip_hw_instance_sysfs_ops = { 940 .show = ip_hw_instance_attr_show, 941 }; 942 943 static void ip_hw_instance_release(struct kobject *kobj) 944 { 945 struct ip_hw_instance *ip_hw_instance = to_ip_hw_instance(kobj); 946 947 kfree(ip_hw_instance); 948 } 949 950 static const struct kobj_type ip_hw_instance_ktype = { 951 .release = ip_hw_instance_release, 952 .sysfs_ops = &ip_hw_instance_sysfs_ops, 953 .default_groups = ip_hw_instance_groups, 954 }; 955 956 /* -------------------------------------------------- */ 957 958 #define to_ip_hw_id(x) container_of(to_kset(x), struct ip_hw_id, hw_id_kset) 959 960 static void ip_hw_id_release(struct kobject *kobj) 961 { 962 struct ip_hw_id *ip_hw_id = to_ip_hw_id(kobj); 963 964 if (!list_empty(&ip_hw_id->hw_id_kset.list)) 965 DRM_ERROR("ip_hw_id->hw_id_kset is not empty"); 966 kfree(ip_hw_id); 967 } 968 969 static const struct kobj_type ip_hw_id_ktype = { 970 .release = ip_hw_id_release, 971 .sysfs_ops = &kobj_sysfs_ops, 972 }; 973 974 /* -------------------------------------------------- */ 975 976 static void die_kobj_release(struct kobject *kobj); 977 static void ip_disc_release(struct kobject *kobj); 978 979 struct ip_die_entry_attribute { 980 struct attribute attr; 981 ssize_t (*show)(struct ip_die_entry *ip_die_entry, char *buf); 982 }; 983 984 #define to_ip_die_entry_attr(x) container_of(x, struct ip_die_entry_attribute, attr) 985 986 static ssize_t num_ips_show(struct ip_die_entry *ip_die_entry, char *buf) 987 { 988 return sysfs_emit(buf, "%d\n", ip_die_entry->num_ips); 989 } 990 991 /* If there are more ip_die_entry attrs, other than the number of IPs, 992 * we can make this intro an array of attrs, and then initialize 993 * ip_die_entry_attrs in a loop. 994 */ 995 static struct ip_die_entry_attribute num_ips_attr = 996 __ATTR_RO(num_ips); 997 998 static struct attribute *ip_die_entry_attrs[] = { 999 &num_ips_attr.attr, 1000 NULL, 1001 }; 1002 ATTRIBUTE_GROUPS(ip_die_entry); /* ip_die_entry_groups */ 1003 1004 #define to_ip_die_entry(x) container_of(to_kset(x), struct ip_die_entry, ip_kset) 1005 1006 static ssize_t ip_die_entry_attr_show(struct kobject *kobj, 1007 struct attribute *attr, 1008 char *buf) 1009 { 1010 struct ip_die_entry_attribute *ip_die_entry_attr = to_ip_die_entry_attr(attr); 1011 struct ip_die_entry *ip_die_entry = to_ip_die_entry(kobj); 1012 1013 if (!ip_die_entry_attr->show) 1014 return -EIO; 1015 1016 return ip_die_entry_attr->show(ip_die_entry, buf); 1017 } 1018 1019 static void ip_die_entry_release(struct kobject *kobj) 1020 { 1021 struct ip_die_entry *ip_die_entry = to_ip_die_entry(kobj); 1022 1023 if (!list_empty(&ip_die_entry->ip_kset.list)) 1024 DRM_ERROR("ip_die_entry->ip_kset is not empty"); 1025 kfree(ip_die_entry); 1026 } 1027 1028 static const struct sysfs_ops ip_die_entry_sysfs_ops = { 1029 .show = ip_die_entry_attr_show, 1030 }; 1031 1032 static const struct kobj_type ip_die_entry_ktype = { 1033 .release = ip_die_entry_release, 1034 .sysfs_ops = &ip_die_entry_sysfs_ops, 1035 .default_groups = ip_die_entry_groups, 1036 }; 1037 1038 static const struct kobj_type die_kobj_ktype = { 1039 .release = die_kobj_release, 1040 .sysfs_ops = &kobj_sysfs_ops, 1041 }; 1042 1043 static const struct kobj_type ip_discovery_ktype = { 1044 .release = ip_disc_release, 1045 .sysfs_ops = &kobj_sysfs_ops, 1046 }; 1047 1048 struct ip_discovery_top { 1049 struct kobject kobj; /* ip_discovery/ */ 1050 struct kset die_kset; /* ip_discovery/die/, contains ip_die_entry */ 1051 struct amdgpu_device *adev; 1052 }; 1053 1054 static void die_kobj_release(struct kobject *kobj) 1055 { 1056 struct ip_discovery_top *ip_top = container_of(to_kset(kobj), 1057 struct ip_discovery_top, 1058 die_kset); 1059 if (!list_empty(&ip_top->die_kset.list)) 1060 DRM_ERROR("ip_top->die_kset is not empty"); 1061 } 1062 1063 static void ip_disc_release(struct kobject *kobj) 1064 { 1065 struct ip_discovery_top *ip_top = container_of(kobj, struct ip_discovery_top, 1066 kobj); 1067 struct amdgpu_device *adev = ip_top->adev; 1068 1069 kfree(ip_top); 1070 adev->discovery.ip_top = NULL; 1071 } 1072 1073 static uint8_t amdgpu_discovery_get_harvest_info(struct amdgpu_device *adev, 1074 uint16_t hw_id, uint8_t inst) 1075 { 1076 uint8_t harvest = 0; 1077 1078 /* Until a uniform way is figured, get mask based on hwid */ 1079 switch (hw_id) { 1080 case VCN_HWID: 1081 /* VCN vs UVD+VCE */ 1082 if (!amdgpu_ip_version(adev, VCE_HWIP, 0)) 1083 harvest = ((1 << inst) & adev->vcn.inst_mask) == 0; 1084 break; 1085 case DMU_HWID: 1086 if (adev->harvest_ip_mask & AMD_HARVEST_IP_DMU_MASK) 1087 harvest = 0x1; 1088 break; 1089 case UMC_HWID: 1090 /* TODO: It needs another parsing; for now, ignore.*/ 1091 break; 1092 case GC_HWID: 1093 harvest = ((1 << inst) & adev->gfx.xcc_mask) == 0; 1094 break; 1095 case SDMA0_HWID: 1096 harvest = ((1 << inst) & adev->sdma.sdma_mask) == 0; 1097 break; 1098 default: 1099 break; 1100 } 1101 1102 return harvest; 1103 } 1104 1105 static int amdgpu_discovery_sysfs_ips(struct amdgpu_device *adev, 1106 struct ip_die_entry *ip_die_entry, 1107 const size_t _ip_offset, const int num_ips, 1108 bool reg_base_64) 1109 { 1110 uint8_t *discovery_bin = adev->discovery.bin; 1111 int ii, jj, kk, res; 1112 uint16_t hw_id; 1113 uint8_t inst; 1114 1115 DRM_DEBUG("num_ips:%d", num_ips); 1116 1117 /* Find all IPs of a given HW ID, and add their instance to 1118 * #die/#hw_id/#instance/<attributes> 1119 */ 1120 for (ii = 0; ii < HW_ID_MAX; ii++) { 1121 struct ip_hw_id *ip_hw_id = NULL; 1122 size_t ip_offset = _ip_offset; 1123 1124 for (jj = 0; jj < num_ips; jj++) { 1125 struct ip_v4 *ip; 1126 struct ip_hw_instance *ip_hw_instance; 1127 1128 ip = (struct ip_v4 *)(discovery_bin + ip_offset); 1129 inst = ip->instance_number; 1130 hw_id = le16_to_cpu(ip->hw_id); 1131 if (amdgpu_discovery_validate_ip(adev, inst, hw_id) || 1132 hw_id != ii) 1133 goto next_ip; 1134 1135 DRM_DEBUG("match:%d @ ip_offset:%zu", ii, ip_offset); 1136 1137 /* We have a hw_id match; register the hw 1138 * block if not yet registered. 1139 */ 1140 if (!ip_hw_id) { 1141 ip_hw_id = kzalloc(sizeof(*ip_hw_id), GFP_KERNEL); 1142 if (!ip_hw_id) 1143 return -ENOMEM; 1144 ip_hw_id->hw_id = ii; 1145 1146 kobject_set_name(&ip_hw_id->hw_id_kset.kobj, "%d", ii); 1147 ip_hw_id->hw_id_kset.kobj.kset = &ip_die_entry->ip_kset; 1148 ip_hw_id->hw_id_kset.kobj.ktype = &ip_hw_id_ktype; 1149 res = kset_register(&ip_hw_id->hw_id_kset); 1150 if (res) { 1151 DRM_ERROR("Couldn't register ip_hw_id kset"); 1152 kfree(ip_hw_id); 1153 return res; 1154 } 1155 if (hw_id_names[ii]) { 1156 res = sysfs_create_link(&ip_die_entry->ip_kset.kobj, 1157 &ip_hw_id->hw_id_kset.kobj, 1158 hw_id_names[ii]); 1159 if (res) { 1160 DRM_ERROR("Couldn't create IP link %s in IP Die:%s\n", 1161 hw_id_names[ii], 1162 kobject_name(&ip_die_entry->ip_kset.kobj)); 1163 } 1164 } 1165 } 1166 1167 /* Now register its instance. 1168 */ 1169 ip_hw_instance = kzalloc(struct_size(ip_hw_instance, 1170 base_addr, 1171 ip->num_base_address), 1172 GFP_KERNEL); 1173 if (!ip_hw_instance) { 1174 DRM_ERROR("no memory for ip_hw_instance"); 1175 return -ENOMEM; 1176 } 1177 ip_hw_instance->hw_id = le16_to_cpu(ip->hw_id); /* == ii */ 1178 ip_hw_instance->num_instance = ip->instance_number; 1179 ip_hw_instance->major = ip->major; 1180 ip_hw_instance->minor = ip->minor; 1181 ip_hw_instance->revision = ip->revision; 1182 ip_hw_instance->harvest = 1183 amdgpu_discovery_get_harvest_info( 1184 adev, ip_hw_instance->hw_id, 1185 ip_hw_instance->num_instance); 1186 ip_hw_instance->num_base_addresses = ip->num_base_address; 1187 1188 for (kk = 0; kk < ip_hw_instance->num_base_addresses; kk++) { 1189 if (reg_base_64) 1190 ip_hw_instance->base_addr[kk] = 1191 lower_32_bits(le64_to_cpu(ip->base_address_64[kk])) & 0x3FFFFFFF; 1192 else 1193 ip_hw_instance->base_addr[kk] = ip->base_address[kk]; 1194 } 1195 1196 kobject_init(&ip_hw_instance->kobj, &ip_hw_instance_ktype); 1197 ip_hw_instance->kobj.kset = &ip_hw_id->hw_id_kset; 1198 res = kobject_add(&ip_hw_instance->kobj, NULL, 1199 "%d", ip_hw_instance->num_instance); 1200 next_ip: 1201 if (reg_base_64) 1202 ip_offset += struct_size(ip, base_address_64, 1203 ip->num_base_address); 1204 else 1205 ip_offset += struct_size(ip, base_address, 1206 ip->num_base_address); 1207 } 1208 } 1209 1210 return 0; 1211 } 1212 1213 static int amdgpu_discovery_sysfs_recurse(struct amdgpu_device *adev) 1214 { 1215 struct ip_discovery_top *ip_top = adev->discovery.ip_top; 1216 uint8_t *discovery_bin = adev->discovery.bin; 1217 struct binary_header *bhdr; 1218 struct ip_discovery_header *ihdr; 1219 struct die_header *dhdr; 1220 struct kset *die_kset = &ip_top->die_kset; 1221 u16 num_dies, die_offset, num_ips; 1222 size_t ip_offset; 1223 int ii, res; 1224 1225 bhdr = (struct binary_header *)discovery_bin; 1226 ihdr = (struct ip_discovery_header 1227 *)(discovery_bin + 1228 le16_to_cpu(bhdr->table_list[IP_DISCOVERY].offset)); 1229 num_dies = le16_to_cpu(ihdr->num_dies); 1230 1231 DRM_DEBUG("number of dies: %d\n", num_dies); 1232 1233 for (ii = 0; ii < num_dies; ii++) { 1234 struct ip_die_entry *ip_die_entry; 1235 1236 die_offset = le16_to_cpu(ihdr->die_info[ii].die_offset); 1237 dhdr = (struct die_header *)(discovery_bin + die_offset); 1238 num_ips = le16_to_cpu(dhdr->num_ips); 1239 ip_offset = die_offset + sizeof(*dhdr); 1240 1241 /* Add the die to the kset. 1242 * 1243 * dhdr->die_id == ii, which was checked in 1244 * amdgpu_discovery_reg_base_init(). 1245 */ 1246 1247 ip_die_entry = kzalloc(sizeof(*ip_die_entry), GFP_KERNEL); 1248 if (!ip_die_entry) 1249 return -ENOMEM; 1250 1251 ip_die_entry->num_ips = num_ips; 1252 1253 kobject_set_name(&ip_die_entry->ip_kset.kobj, "%d", le16_to_cpu(dhdr->die_id)); 1254 ip_die_entry->ip_kset.kobj.kset = die_kset; 1255 ip_die_entry->ip_kset.kobj.ktype = &ip_die_entry_ktype; 1256 res = kset_register(&ip_die_entry->ip_kset); 1257 if (res) { 1258 DRM_ERROR("Couldn't register ip_die_entry kset"); 1259 kfree(ip_die_entry); 1260 return res; 1261 } 1262 1263 amdgpu_discovery_sysfs_ips(adev, ip_die_entry, ip_offset, num_ips, !!ihdr->base_addr_64_bit); 1264 } 1265 1266 return 0; 1267 } 1268 1269 static int amdgpu_discovery_sysfs_init(struct amdgpu_device *adev) 1270 { 1271 uint8_t *discovery_bin = adev->discovery.bin; 1272 struct ip_discovery_top *ip_top; 1273 struct kset *die_kset; 1274 int res, ii; 1275 1276 if (!discovery_bin) 1277 return -EINVAL; 1278 1279 ip_top = kzalloc(sizeof(*ip_top), GFP_KERNEL); 1280 if (!ip_top) 1281 return -ENOMEM; 1282 1283 ip_top->adev = adev; 1284 adev->discovery.ip_top = ip_top; 1285 res = kobject_init_and_add(&ip_top->kobj, &ip_discovery_ktype, 1286 &adev->dev->kobj, "ip_discovery"); 1287 if (res) { 1288 DRM_ERROR("Couldn't init and add ip_discovery/"); 1289 goto Err; 1290 } 1291 1292 die_kset = &ip_top->die_kset; 1293 kobject_set_name(&die_kset->kobj, "%s", "die"); 1294 die_kset->kobj.parent = &ip_top->kobj; 1295 die_kset->kobj.ktype = &die_kobj_ktype; 1296 res = kset_register(&ip_top->die_kset); 1297 if (res) { 1298 DRM_ERROR("Couldn't register die_kset"); 1299 goto Err; 1300 } 1301 1302 for (ii = 0; ii < ARRAY_SIZE(ip_hw_attr); ii++) 1303 ip_hw_instance_attrs[ii] = &ip_hw_attr[ii].attr; 1304 ip_hw_instance_attrs[ii] = NULL; 1305 1306 res = amdgpu_discovery_sysfs_recurse(adev); 1307 1308 return res; 1309 Err: 1310 kobject_put(&ip_top->kobj); 1311 return res; 1312 } 1313 1314 /* -------------------------------------------------- */ 1315 1316 #define list_to_kobj(el) container_of(el, struct kobject, entry) 1317 1318 static void amdgpu_discovery_sysfs_ip_hw_free(struct ip_hw_id *ip_hw_id) 1319 { 1320 struct list_head *el, *tmp; 1321 struct kset *hw_id_kset; 1322 1323 hw_id_kset = &ip_hw_id->hw_id_kset; 1324 spin_lock(&hw_id_kset->list_lock); 1325 list_for_each_prev_safe(el, tmp, &hw_id_kset->list) { 1326 list_del_init(el); 1327 spin_unlock(&hw_id_kset->list_lock); 1328 /* kobject is embedded in ip_hw_instance */ 1329 kobject_put(list_to_kobj(el)); 1330 spin_lock(&hw_id_kset->list_lock); 1331 } 1332 spin_unlock(&hw_id_kset->list_lock); 1333 kobject_put(&ip_hw_id->hw_id_kset.kobj); 1334 } 1335 1336 static void amdgpu_discovery_sysfs_die_free(struct ip_die_entry *ip_die_entry) 1337 { 1338 struct list_head *el, *tmp; 1339 struct kset *ip_kset; 1340 1341 ip_kset = &ip_die_entry->ip_kset; 1342 spin_lock(&ip_kset->list_lock); 1343 list_for_each_prev_safe(el, tmp, &ip_kset->list) { 1344 list_del_init(el); 1345 spin_unlock(&ip_kset->list_lock); 1346 amdgpu_discovery_sysfs_ip_hw_free(to_ip_hw_id(list_to_kobj(el))); 1347 spin_lock(&ip_kset->list_lock); 1348 } 1349 spin_unlock(&ip_kset->list_lock); 1350 kobject_put(&ip_die_entry->ip_kset.kobj); 1351 } 1352 1353 static void amdgpu_discovery_sysfs_fini(struct amdgpu_device *adev) 1354 { 1355 struct ip_discovery_top *ip_top = adev->discovery.ip_top; 1356 struct list_head *el, *tmp; 1357 struct kset *die_kset; 1358 1359 die_kset = &ip_top->die_kset; 1360 spin_lock(&die_kset->list_lock); 1361 list_for_each_prev_safe(el, tmp, &die_kset->list) { 1362 list_del_init(el); 1363 spin_unlock(&die_kset->list_lock); 1364 amdgpu_discovery_sysfs_die_free(to_ip_die_entry(list_to_kobj(el))); 1365 spin_lock(&die_kset->list_lock); 1366 } 1367 spin_unlock(&die_kset->list_lock); 1368 kobject_put(&ip_top->die_kset.kobj); 1369 kobject_put(&ip_top->kobj); 1370 } 1371 1372 /* ================================================== */ 1373 1374 static int amdgpu_discovery_reg_base_init(struct amdgpu_device *adev) 1375 { 1376 uint8_t num_base_address, subrev, variant; 1377 struct binary_header *bhdr; 1378 struct ip_discovery_header *ihdr; 1379 struct die_header *dhdr; 1380 uint8_t *discovery_bin; 1381 struct ip_v4 *ip; 1382 uint16_t die_offset; 1383 uint16_t ip_offset; 1384 uint16_t num_dies; 1385 uint32_t wafl_ver; 1386 uint16_t num_ips; 1387 uint16_t hw_id; 1388 uint8_t inst; 1389 int hw_ip; 1390 int i, j, k; 1391 int r; 1392 1393 r = amdgpu_discovery_init(adev); 1394 if (r) 1395 return r; 1396 discovery_bin = adev->discovery.bin; 1397 wafl_ver = 0; 1398 adev->gfx.xcc_mask = 0; 1399 adev->sdma.sdma_mask = 0; 1400 adev->vcn.inst_mask = 0; 1401 adev->jpeg.inst_mask = 0; 1402 bhdr = (struct binary_header *)discovery_bin; 1403 ihdr = (struct ip_discovery_header 1404 *)(discovery_bin + 1405 le16_to_cpu(bhdr->table_list[IP_DISCOVERY].offset)); 1406 num_dies = le16_to_cpu(ihdr->num_dies); 1407 1408 DRM_DEBUG("number of dies: %d\n", num_dies); 1409 1410 for (i = 0; i < num_dies; i++) { 1411 die_offset = le16_to_cpu(ihdr->die_info[i].die_offset); 1412 dhdr = (struct die_header *)(discovery_bin + die_offset); 1413 num_ips = le16_to_cpu(dhdr->num_ips); 1414 ip_offset = die_offset + sizeof(*dhdr); 1415 1416 if (le16_to_cpu(dhdr->die_id) != i) { 1417 DRM_ERROR("invalid die id %d, expected %d\n", 1418 le16_to_cpu(dhdr->die_id), i); 1419 return -EINVAL; 1420 } 1421 1422 DRM_DEBUG("number of hardware IPs on die%d: %d\n", 1423 le16_to_cpu(dhdr->die_id), num_ips); 1424 1425 for (j = 0; j < num_ips; j++) { 1426 ip = (struct ip_v4 *)(discovery_bin + ip_offset); 1427 1428 inst = ip->instance_number; 1429 hw_id = le16_to_cpu(ip->hw_id); 1430 if (amdgpu_discovery_validate_ip(adev, inst, hw_id)) 1431 goto next_ip; 1432 1433 num_base_address = ip->num_base_address; 1434 1435 DRM_DEBUG("%s(%d) #%d v%d.%d.%d:\n", 1436 hw_id_names[le16_to_cpu(ip->hw_id)], 1437 le16_to_cpu(ip->hw_id), 1438 ip->instance_number, 1439 ip->major, ip->minor, 1440 ip->revision); 1441 1442 if (le16_to_cpu(ip->hw_id) == VCN_HWID) { 1443 /* Bit [5:0]: original revision value 1444 * Bit [7:6]: en/decode capability: 1445 * 0b00 : VCN function normally 1446 * 0b10 : encode is disabled 1447 * 0b01 : decode is disabled 1448 */ 1449 if (adev->vcn.num_vcn_inst < 1450 AMDGPU_MAX_VCN_INSTANCES) { 1451 adev->vcn.inst[adev->vcn.num_vcn_inst].vcn_config = 1452 ip->revision & 0xc0; 1453 adev->vcn.num_vcn_inst++; 1454 adev->vcn.inst_mask |= 1455 (1U << ip->instance_number); 1456 adev->jpeg.inst_mask |= 1457 (1U << ip->instance_number); 1458 } else { 1459 dev_err(adev->dev, "Too many VCN instances: %d vs %d\n", 1460 adev->vcn.num_vcn_inst + 1, 1461 AMDGPU_MAX_VCN_INSTANCES); 1462 } 1463 ip->revision &= ~0xc0; 1464 } 1465 if (le16_to_cpu(ip->hw_id) == SDMA0_HWID || 1466 le16_to_cpu(ip->hw_id) == SDMA1_HWID || 1467 le16_to_cpu(ip->hw_id) == SDMA2_HWID || 1468 le16_to_cpu(ip->hw_id) == SDMA3_HWID) { 1469 if (adev->sdma.num_instances < 1470 AMDGPU_MAX_SDMA_INSTANCES) { 1471 adev->sdma.num_instances++; 1472 adev->sdma.sdma_mask |= 1473 (1U << ip->instance_number); 1474 } else { 1475 dev_err(adev->dev, "Too many SDMA instances: %d vs %d\n", 1476 adev->sdma.num_instances + 1, 1477 AMDGPU_MAX_SDMA_INSTANCES); 1478 } 1479 } 1480 1481 if (le16_to_cpu(ip->hw_id) == VPE_HWID) { 1482 if (adev->vpe.num_instances < AMDGPU_MAX_VPE_INSTANCES) 1483 adev->vpe.num_instances++; 1484 else 1485 dev_err(adev->dev, "Too many VPE instances: %d vs %d\n", 1486 adev->vpe.num_instances + 1, 1487 AMDGPU_MAX_VPE_INSTANCES); 1488 } 1489 1490 if (le16_to_cpu(ip->hw_id) == UMC_HWID) { 1491 adev->gmc.num_umc++; 1492 adev->umc.node_inst_num++; 1493 } 1494 1495 if (le16_to_cpu(ip->hw_id) == GC_HWID) 1496 adev->gfx.xcc_mask |= 1497 (1U << ip->instance_number); 1498 1499 if (!wafl_ver && le16_to_cpu(ip->hw_id) == WAFLC_HWID) 1500 wafl_ver = IP_VERSION_FULL(ip->major, ip->minor, 1501 ip->revision, 0, 0); 1502 1503 for (k = 0; k < num_base_address; k++) { 1504 /* 1505 * convert the endianness of base addresses in place, 1506 * so that we don't need to convert them when accessing adev->reg_offset. 1507 */ 1508 if (ihdr->base_addr_64_bit) 1509 /* Truncate the 64bit base address from ip discovery 1510 * and only store lower 32bit ip base in reg_offset[]. 1511 * Bits > 32 follows ASIC specific format, thus just 1512 * discard them and handle it within specific ASIC. 1513 * By this way reg_offset[] and related helpers can 1514 * stay unchanged. 1515 * The base address is in dwords, thus clear the 1516 * highest 2 bits to store. 1517 */ 1518 ip->base_address[k] = 1519 lower_32_bits(le64_to_cpu(ip->base_address_64[k])) & 0x3FFFFFFF; 1520 else 1521 ip->base_address[k] = le32_to_cpu(ip->base_address[k]); 1522 DRM_DEBUG("\t0x%08x\n", ip->base_address[k]); 1523 } 1524 1525 for (hw_ip = 0; hw_ip < MAX_HWIP; hw_ip++) { 1526 if (hw_id_map[hw_ip] == le16_to_cpu(ip->hw_id) && 1527 hw_id_map[hw_ip] != 0) { 1528 DRM_DEBUG("set register base offset for %s\n", 1529 hw_id_names[le16_to_cpu(ip->hw_id)]); 1530 adev->reg_offset[hw_ip][ip->instance_number] = 1531 ip->base_address; 1532 /* Instance support is somewhat inconsistent. 1533 * SDMA is a good example. Sienna cichlid has 4 total 1534 * SDMA instances, each enumerated separately (HWIDs 1535 * 42, 43, 68, 69). Arcturus has 8 total SDMA instances, 1536 * but they are enumerated as multiple instances of the 1537 * same HWIDs (4x HWID 42, 4x HWID 43). UMC is another 1538 * example. On most chips there are multiple instances 1539 * with the same HWID. 1540 */ 1541 1542 if (ihdr->version < 3) { 1543 subrev = 0; 1544 variant = 0; 1545 } else { 1546 subrev = ip->sub_revision; 1547 variant = ip->variant; 1548 } 1549 1550 adev->ip_versions[hw_ip] 1551 [ip->instance_number] = 1552 IP_VERSION_FULL(ip->major, 1553 ip->minor, 1554 ip->revision, 1555 variant, 1556 subrev); 1557 } 1558 } 1559 1560 next_ip: 1561 if (ihdr->base_addr_64_bit) 1562 ip_offset += struct_size(ip, base_address_64, ip->num_base_address); 1563 else 1564 ip_offset += struct_size(ip, base_address, ip->num_base_address); 1565 } 1566 } 1567 1568 if (wafl_ver && !adev->ip_versions[XGMI_HWIP][0]) 1569 adev->ip_versions[XGMI_HWIP][0] = wafl_ver; 1570 1571 return 0; 1572 } 1573 1574 static void amdgpu_discovery_harvest_ip(struct amdgpu_device *adev) 1575 { 1576 uint8_t *discovery_bin = adev->discovery.bin; 1577 struct ip_discovery_header *ihdr; 1578 struct binary_header *bhdr; 1579 int vcn_harvest_count = 0; 1580 int umc_harvest_count = 0; 1581 uint16_t offset, ihdr_ver; 1582 1583 bhdr = (struct binary_header *)discovery_bin; 1584 offset = le16_to_cpu(bhdr->table_list[IP_DISCOVERY].offset); 1585 ihdr = (struct ip_discovery_header *)(discovery_bin + offset); 1586 ihdr_ver = le16_to_cpu(ihdr->version); 1587 /* 1588 * Harvest table does not fit Navi1x and legacy GPUs, 1589 * so read harvest bit per IP data structure to set 1590 * harvest configuration. 1591 */ 1592 if (amdgpu_ip_version(adev, GC_HWIP, 0) < IP_VERSION(10, 2, 0) && 1593 ihdr_ver <= 2) { 1594 if ((adev->pdev->device == 0x731E && 1595 (adev->pdev->revision == 0xC6 || 1596 adev->pdev->revision == 0xC7)) || 1597 (adev->pdev->device == 0x7340 && 1598 adev->pdev->revision == 0xC9) || 1599 (adev->pdev->device == 0x7360 && 1600 adev->pdev->revision == 0xC7)) 1601 amdgpu_discovery_read_harvest_bit_per_ip(adev, 1602 &vcn_harvest_count); 1603 } else { 1604 amdgpu_discovery_read_from_harvest_table(adev, 1605 &vcn_harvest_count, 1606 &umc_harvest_count); 1607 } 1608 1609 amdgpu_discovery_harvest_config_quirk(adev); 1610 1611 if (vcn_harvest_count == adev->vcn.num_vcn_inst) { 1612 adev->harvest_ip_mask |= AMD_HARVEST_IP_VCN_MASK; 1613 adev->harvest_ip_mask |= AMD_HARVEST_IP_JPEG_MASK; 1614 } 1615 1616 if (umc_harvest_count < adev->gmc.num_umc) { 1617 adev->gmc.num_umc -= umc_harvest_count; 1618 } 1619 } 1620 1621 union gc_info { 1622 struct gc_info_v1_0 v1; 1623 struct gc_info_v1_1 v1_1; 1624 struct gc_info_v1_2 v1_2; 1625 struct gc_info_v1_3 v1_3; 1626 struct gc_info_v2_0 v2; 1627 struct gc_info_v2_1 v2_1; 1628 }; 1629 1630 static int amdgpu_discovery_get_gfx_info(struct amdgpu_device *adev) 1631 { 1632 uint8_t *discovery_bin = adev->discovery.bin; 1633 struct binary_header *bhdr; 1634 union gc_info *gc_info; 1635 u16 offset; 1636 1637 if (!discovery_bin) { 1638 DRM_ERROR("ip discovery uninitialized\n"); 1639 return -EINVAL; 1640 } 1641 1642 bhdr = (struct binary_header *)discovery_bin; 1643 offset = le16_to_cpu(bhdr->table_list[GC].offset); 1644 1645 if (!offset) 1646 return 0; 1647 1648 gc_info = (union gc_info *)(discovery_bin + offset); 1649 1650 switch (le16_to_cpu(gc_info->v1.header.version_major)) { 1651 case 1: 1652 adev->gfx.config.max_shader_engines = le32_to_cpu(gc_info->v1.gc_num_se); 1653 adev->gfx.config.max_cu_per_sh = 2 * (le32_to_cpu(gc_info->v1.gc_num_wgp0_per_sa) + 1654 le32_to_cpu(gc_info->v1.gc_num_wgp1_per_sa)); 1655 adev->gfx.config.max_sh_per_se = le32_to_cpu(gc_info->v1.gc_num_sa_per_se); 1656 adev->gfx.config.max_backends_per_se = le32_to_cpu(gc_info->v1.gc_num_rb_per_se); 1657 adev->gfx.config.max_texture_channel_caches = le32_to_cpu(gc_info->v1.gc_num_gl2c); 1658 adev->gfx.config.max_gprs = le32_to_cpu(gc_info->v1.gc_num_gprs); 1659 adev->gfx.config.max_gs_threads = le32_to_cpu(gc_info->v1.gc_num_max_gs_thds); 1660 adev->gfx.config.gs_vgt_table_depth = le32_to_cpu(gc_info->v1.gc_gs_table_depth); 1661 adev->gfx.config.gs_prim_buffer_depth = le32_to_cpu(gc_info->v1.gc_gsprim_buff_depth); 1662 adev->gfx.config.double_offchip_lds_buf = le32_to_cpu(gc_info->v1.gc_double_offchip_lds_buffer); 1663 adev->gfx.cu_info.wave_front_size = le32_to_cpu(gc_info->v1.gc_wave_size); 1664 adev->gfx.cu_info.max_waves_per_simd = le32_to_cpu(gc_info->v1.gc_max_waves_per_simd); 1665 adev->gfx.cu_info.max_scratch_slots_per_cu = le32_to_cpu(gc_info->v1.gc_max_scratch_slots_per_cu); 1666 adev->gfx.cu_info.lds_size = le32_to_cpu(gc_info->v1.gc_lds_size); 1667 adev->gfx.config.num_sc_per_sh = le32_to_cpu(gc_info->v1.gc_num_sc_per_se) / 1668 le32_to_cpu(gc_info->v1.gc_num_sa_per_se); 1669 adev->gfx.config.num_packer_per_sc = le32_to_cpu(gc_info->v1.gc_num_packer_per_sc); 1670 if (le16_to_cpu(gc_info->v1.header.version_minor) >= 1) { 1671 adev->gfx.config.gc_num_tcp_per_sa = le32_to_cpu(gc_info->v1_1.gc_num_tcp_per_sa); 1672 adev->gfx.config.gc_num_sdp_interface = le32_to_cpu(gc_info->v1_1.gc_num_sdp_interface); 1673 adev->gfx.config.gc_num_tcps = le32_to_cpu(gc_info->v1_1.gc_num_tcps); 1674 } 1675 if (le16_to_cpu(gc_info->v1.header.version_minor) >= 2) { 1676 adev->gfx.config.gc_num_tcp_per_wpg = le32_to_cpu(gc_info->v1_2.gc_num_tcp_per_wpg); 1677 adev->gfx.config.gc_tcp_l1_size = le32_to_cpu(gc_info->v1_2.gc_tcp_l1_size); 1678 adev->gfx.config.gc_num_sqc_per_wgp = le32_to_cpu(gc_info->v1_2.gc_num_sqc_per_wgp); 1679 adev->gfx.config.gc_l1_instruction_cache_size_per_sqc = le32_to_cpu(gc_info->v1_2.gc_l1_instruction_cache_size_per_sqc); 1680 adev->gfx.config.gc_l1_data_cache_size_per_sqc = le32_to_cpu(gc_info->v1_2.gc_l1_data_cache_size_per_sqc); 1681 adev->gfx.config.gc_gl1c_per_sa = le32_to_cpu(gc_info->v1_2.gc_gl1c_per_sa); 1682 adev->gfx.config.gc_gl1c_size_per_instance = le32_to_cpu(gc_info->v1_2.gc_gl1c_size_per_instance); 1683 adev->gfx.config.gc_gl2c_per_gpu = le32_to_cpu(gc_info->v1_2.gc_gl2c_per_gpu); 1684 } 1685 if (le16_to_cpu(gc_info->v1.header.version_minor) >= 3) { 1686 adev->gfx.config.gc_tcp_size_per_cu = le32_to_cpu(gc_info->v1_3.gc_tcp_size_per_cu); 1687 adev->gfx.config.gc_tcp_cache_line_size = le32_to_cpu(gc_info->v1_3.gc_tcp_cache_line_size); 1688 adev->gfx.config.gc_instruction_cache_size_per_sqc = le32_to_cpu(gc_info->v1_3.gc_instruction_cache_size_per_sqc); 1689 adev->gfx.config.gc_instruction_cache_line_size = le32_to_cpu(gc_info->v1_3.gc_instruction_cache_line_size); 1690 adev->gfx.config.gc_scalar_data_cache_size_per_sqc = le32_to_cpu(gc_info->v1_3.gc_scalar_data_cache_size_per_sqc); 1691 adev->gfx.config.gc_scalar_data_cache_line_size = le32_to_cpu(gc_info->v1_3.gc_scalar_data_cache_line_size); 1692 adev->gfx.config.gc_tcc_size = le32_to_cpu(gc_info->v1_3.gc_tcc_size); 1693 adev->gfx.config.gc_tcc_cache_line_size = le32_to_cpu(gc_info->v1_3.gc_tcc_cache_line_size); 1694 } 1695 break; 1696 case 2: 1697 adev->gfx.config.max_shader_engines = le32_to_cpu(gc_info->v2.gc_num_se); 1698 adev->gfx.config.max_cu_per_sh = le32_to_cpu(gc_info->v2.gc_num_cu_per_sh); 1699 adev->gfx.config.max_sh_per_se = le32_to_cpu(gc_info->v2.gc_num_sh_per_se); 1700 adev->gfx.config.max_backends_per_se = le32_to_cpu(gc_info->v2.gc_num_rb_per_se); 1701 adev->gfx.config.max_texture_channel_caches = le32_to_cpu(gc_info->v2.gc_num_tccs); 1702 adev->gfx.config.max_gprs = le32_to_cpu(gc_info->v2.gc_num_gprs); 1703 adev->gfx.config.max_gs_threads = le32_to_cpu(gc_info->v2.gc_num_max_gs_thds); 1704 adev->gfx.config.gs_vgt_table_depth = le32_to_cpu(gc_info->v2.gc_gs_table_depth); 1705 adev->gfx.config.gs_prim_buffer_depth = le32_to_cpu(gc_info->v2.gc_gsprim_buff_depth); 1706 adev->gfx.config.double_offchip_lds_buf = le32_to_cpu(gc_info->v2.gc_double_offchip_lds_buffer); 1707 adev->gfx.cu_info.wave_front_size = le32_to_cpu(gc_info->v2.gc_wave_size); 1708 adev->gfx.cu_info.max_waves_per_simd = le32_to_cpu(gc_info->v2.gc_max_waves_per_simd); 1709 adev->gfx.cu_info.max_scratch_slots_per_cu = le32_to_cpu(gc_info->v2.gc_max_scratch_slots_per_cu); 1710 adev->gfx.cu_info.lds_size = le32_to_cpu(gc_info->v2.gc_lds_size); 1711 adev->gfx.config.num_sc_per_sh = le32_to_cpu(gc_info->v2.gc_num_sc_per_se) / 1712 le32_to_cpu(gc_info->v2.gc_num_sh_per_se); 1713 adev->gfx.config.num_packer_per_sc = le32_to_cpu(gc_info->v2.gc_num_packer_per_sc); 1714 if (le16_to_cpu(gc_info->v2.header.version_minor) == 1) { 1715 adev->gfx.config.gc_num_tcp_per_sa = le32_to_cpu(gc_info->v2_1.gc_num_tcp_per_sh); 1716 adev->gfx.config.gc_tcp_size_per_cu = le32_to_cpu(gc_info->v2_1.gc_tcp_size_per_cu); 1717 adev->gfx.config.gc_num_sdp_interface = le32_to_cpu(gc_info->v2_1.gc_num_sdp_interface); /* per XCD */ 1718 adev->gfx.config.gc_num_cu_per_sqc = le32_to_cpu(gc_info->v2_1.gc_num_cu_per_sqc); 1719 adev->gfx.config.gc_l1_instruction_cache_size_per_sqc = le32_to_cpu(gc_info->v2_1.gc_instruction_cache_size_per_sqc); 1720 adev->gfx.config.gc_l1_data_cache_size_per_sqc = le32_to_cpu(gc_info->v2_1.gc_scalar_data_cache_size_per_sqc); 1721 adev->gfx.config.gc_tcc_size = le32_to_cpu(gc_info->v2_1.gc_tcc_size); /* per XCD */ 1722 } 1723 break; 1724 default: 1725 dev_err(adev->dev, 1726 "Unhandled GC info table %d.%d\n", 1727 le16_to_cpu(gc_info->v1.header.version_major), 1728 le16_to_cpu(gc_info->v1.header.version_minor)); 1729 return -EINVAL; 1730 } 1731 return 0; 1732 } 1733 1734 union mall_info { 1735 struct mall_info_v1_0 v1; 1736 struct mall_info_v2_0 v2; 1737 }; 1738 1739 static int amdgpu_discovery_get_mall_info(struct amdgpu_device *adev) 1740 { 1741 uint8_t *discovery_bin = adev->discovery.bin; 1742 struct binary_header *bhdr; 1743 union mall_info *mall_info; 1744 u32 u, mall_size_per_umc, m_s_present, half_use; 1745 u64 mall_size; 1746 u16 offset; 1747 1748 if (!discovery_bin) { 1749 DRM_ERROR("ip discovery uninitialized\n"); 1750 return -EINVAL; 1751 } 1752 1753 bhdr = (struct binary_header *)discovery_bin; 1754 offset = le16_to_cpu(bhdr->table_list[MALL_INFO].offset); 1755 1756 if (!offset) 1757 return 0; 1758 1759 mall_info = (union mall_info *)(discovery_bin + offset); 1760 1761 switch (le16_to_cpu(mall_info->v1.header.version_major)) { 1762 case 1: 1763 mall_size = 0; 1764 mall_size_per_umc = le32_to_cpu(mall_info->v1.mall_size_per_m); 1765 m_s_present = le32_to_cpu(mall_info->v1.m_s_present); 1766 half_use = le32_to_cpu(mall_info->v1.m_half_use); 1767 for (u = 0; u < adev->gmc.num_umc; u++) { 1768 if (m_s_present & (1 << u)) 1769 mall_size += mall_size_per_umc * 2; 1770 else if (half_use & (1 << u)) 1771 mall_size += mall_size_per_umc / 2; 1772 else 1773 mall_size += mall_size_per_umc; 1774 } 1775 adev->gmc.mall_size = mall_size; 1776 adev->gmc.m_half_use = half_use; 1777 break; 1778 case 2: 1779 mall_size_per_umc = le32_to_cpu(mall_info->v2.mall_size_per_umc); 1780 adev->gmc.mall_size = (uint64_t)mall_size_per_umc * adev->gmc.num_umc; 1781 break; 1782 default: 1783 dev_err(adev->dev, 1784 "Unhandled MALL info table %d.%d\n", 1785 le16_to_cpu(mall_info->v1.header.version_major), 1786 le16_to_cpu(mall_info->v1.header.version_minor)); 1787 return -EINVAL; 1788 } 1789 return 0; 1790 } 1791 1792 union vcn_info { 1793 struct vcn_info_v1_0 v1; 1794 }; 1795 1796 static int amdgpu_discovery_get_vcn_info(struct amdgpu_device *adev) 1797 { 1798 uint8_t *discovery_bin = adev->discovery.bin; 1799 struct binary_header *bhdr; 1800 union vcn_info *vcn_info; 1801 u16 offset; 1802 int v; 1803 1804 if (!discovery_bin) { 1805 DRM_ERROR("ip discovery uninitialized\n"); 1806 return -EINVAL; 1807 } 1808 1809 /* num_vcn_inst is currently limited to AMDGPU_MAX_VCN_INSTANCES 1810 * which is smaller than VCN_INFO_TABLE_MAX_NUM_INSTANCES 1811 * but that may change in the future with new GPUs so keep this 1812 * check for defensive purposes. 1813 */ 1814 if (adev->vcn.num_vcn_inst > VCN_INFO_TABLE_MAX_NUM_INSTANCES) { 1815 dev_err(adev->dev, "invalid vcn instances\n"); 1816 return -EINVAL; 1817 } 1818 1819 bhdr = (struct binary_header *)discovery_bin; 1820 offset = le16_to_cpu(bhdr->table_list[VCN_INFO].offset); 1821 1822 if (!offset) 1823 return 0; 1824 1825 vcn_info = (union vcn_info *)(discovery_bin + offset); 1826 1827 switch (le16_to_cpu(vcn_info->v1.header.version_major)) { 1828 case 1: 1829 /* num_vcn_inst is currently limited to AMDGPU_MAX_VCN_INSTANCES 1830 * so this won't overflow. 1831 */ 1832 for (v = 0; v < adev->vcn.num_vcn_inst; v++) { 1833 adev->vcn.inst[v].vcn_codec_disable_mask = 1834 le32_to_cpu(vcn_info->v1.instance_info[v].fuse_data.all_bits); 1835 } 1836 break; 1837 default: 1838 dev_err(adev->dev, 1839 "Unhandled VCN info table %d.%d\n", 1840 le16_to_cpu(vcn_info->v1.header.version_major), 1841 le16_to_cpu(vcn_info->v1.header.version_minor)); 1842 return -EINVAL; 1843 } 1844 return 0; 1845 } 1846 1847 union nps_info { 1848 struct nps_info_v1_0 v1; 1849 }; 1850 1851 static int amdgpu_discovery_refresh_nps_info(struct amdgpu_device *adev, 1852 union nps_info *nps_data) 1853 { 1854 uint64_t vram_size, pos, offset; 1855 struct nps_info_header *nhdr; 1856 struct binary_header bhdr; 1857 uint16_t checksum; 1858 1859 vram_size = (uint64_t)RREG32(mmRCC_CONFIG_MEMSIZE) << 20; 1860 pos = vram_size - DISCOVERY_TMR_OFFSET; 1861 amdgpu_device_vram_access(adev, pos, &bhdr, sizeof(bhdr), false); 1862 1863 offset = le16_to_cpu(bhdr.table_list[NPS_INFO].offset); 1864 checksum = le16_to_cpu(bhdr.table_list[NPS_INFO].checksum); 1865 1866 amdgpu_device_vram_access(adev, (pos + offset), nps_data, 1867 sizeof(*nps_data), false); 1868 1869 nhdr = (struct nps_info_header *)(nps_data); 1870 if (!amdgpu_discovery_verify_checksum((uint8_t *)nps_data, 1871 le32_to_cpu(nhdr->size_bytes), 1872 checksum)) { 1873 dev_err(adev->dev, "nps data refresh, checksum mismatch\n"); 1874 return -EINVAL; 1875 } 1876 1877 return 0; 1878 } 1879 1880 int amdgpu_discovery_get_nps_info(struct amdgpu_device *adev, 1881 uint32_t *nps_type, 1882 struct amdgpu_gmc_memrange **ranges, 1883 int *range_cnt, bool refresh) 1884 { 1885 uint8_t *discovery_bin = adev->discovery.bin; 1886 struct amdgpu_gmc_memrange *mem_ranges; 1887 struct binary_header *bhdr; 1888 union nps_info *nps_info; 1889 union nps_info nps_data; 1890 u16 offset; 1891 int i, r; 1892 1893 if (!nps_type || !range_cnt || !ranges) 1894 return -EINVAL; 1895 1896 if (refresh) { 1897 r = amdgpu_discovery_refresh_nps_info(adev, &nps_data); 1898 if (r) 1899 return r; 1900 nps_info = &nps_data; 1901 } else { 1902 if (!discovery_bin) { 1903 dev_err(adev->dev, 1904 "fetch mem range failed, ip discovery uninitialized\n"); 1905 return -EINVAL; 1906 } 1907 1908 bhdr = (struct binary_header *)discovery_bin; 1909 offset = le16_to_cpu(bhdr->table_list[NPS_INFO].offset); 1910 1911 if (!offset) 1912 return -ENOENT; 1913 1914 /* If verification fails, return as if NPS table doesn't exist */ 1915 if (amdgpu_discovery_verify_npsinfo(adev, bhdr)) 1916 return -ENOENT; 1917 1918 nps_info = (union nps_info *)(discovery_bin + offset); 1919 } 1920 1921 switch (le16_to_cpu(nps_info->v1.header.version_major)) { 1922 case 1: 1923 mem_ranges = kvcalloc(nps_info->v1.count, 1924 sizeof(*mem_ranges), 1925 GFP_KERNEL); 1926 if (!mem_ranges) 1927 return -ENOMEM; 1928 *nps_type = nps_info->v1.nps_type; 1929 *range_cnt = nps_info->v1.count; 1930 for (i = 0; i < *range_cnt; i++) { 1931 mem_ranges[i].base_address = 1932 nps_info->v1.instance_info[i].base_address; 1933 mem_ranges[i].limit_address = 1934 nps_info->v1.instance_info[i].limit_address; 1935 mem_ranges[i].nid_mask = -1; 1936 mem_ranges[i].flags = 0; 1937 } 1938 *ranges = mem_ranges; 1939 break; 1940 default: 1941 dev_err(adev->dev, "Unhandled NPS info table %d.%d\n", 1942 le16_to_cpu(nps_info->v1.header.version_major), 1943 le16_to_cpu(nps_info->v1.header.version_minor)); 1944 return -EINVAL; 1945 } 1946 1947 return 0; 1948 } 1949 1950 static int amdgpu_discovery_set_common_ip_blocks(struct amdgpu_device *adev) 1951 { 1952 /* what IP to use for this? */ 1953 switch (amdgpu_ip_version(adev, GC_HWIP, 0)) { 1954 case IP_VERSION(9, 0, 1): 1955 case IP_VERSION(9, 1, 0): 1956 case IP_VERSION(9, 2, 1): 1957 case IP_VERSION(9, 2, 2): 1958 case IP_VERSION(9, 3, 0): 1959 case IP_VERSION(9, 4, 0): 1960 case IP_VERSION(9, 4, 1): 1961 case IP_VERSION(9, 4, 2): 1962 case IP_VERSION(9, 4, 3): 1963 case IP_VERSION(9, 4, 4): 1964 case IP_VERSION(9, 5, 0): 1965 amdgpu_device_ip_block_add(adev, &vega10_common_ip_block); 1966 break; 1967 case IP_VERSION(10, 1, 10): 1968 case IP_VERSION(10, 1, 1): 1969 case IP_VERSION(10, 1, 2): 1970 case IP_VERSION(10, 1, 3): 1971 case IP_VERSION(10, 1, 4): 1972 case IP_VERSION(10, 3, 0): 1973 case IP_VERSION(10, 3, 1): 1974 case IP_VERSION(10, 3, 2): 1975 case IP_VERSION(10, 3, 3): 1976 case IP_VERSION(10, 3, 4): 1977 case IP_VERSION(10, 3, 5): 1978 case IP_VERSION(10, 3, 6): 1979 case IP_VERSION(10, 3, 7): 1980 amdgpu_device_ip_block_add(adev, &nv_common_ip_block); 1981 break; 1982 case IP_VERSION(11, 0, 0): 1983 case IP_VERSION(11, 0, 1): 1984 case IP_VERSION(11, 0, 2): 1985 case IP_VERSION(11, 0, 3): 1986 case IP_VERSION(11, 0, 4): 1987 case IP_VERSION(11, 5, 0): 1988 case IP_VERSION(11, 5, 1): 1989 case IP_VERSION(11, 5, 2): 1990 case IP_VERSION(11, 5, 3): 1991 case IP_VERSION(11, 5, 4): 1992 amdgpu_device_ip_block_add(adev, &soc21_common_ip_block); 1993 break; 1994 case IP_VERSION(12, 0, 0): 1995 case IP_VERSION(12, 0, 1): 1996 amdgpu_device_ip_block_add(adev, &soc24_common_ip_block); 1997 break; 1998 case IP_VERSION(12, 1, 0): 1999 amdgpu_device_ip_block_add(adev, &soc_v1_0_common_ip_block); 2000 break; 2001 default: 2002 dev_err(adev->dev, 2003 "Failed to add common ip block(GC_HWIP:0x%x)\n", 2004 amdgpu_ip_version(adev, GC_HWIP, 0)); 2005 return -EINVAL; 2006 } 2007 return 0; 2008 } 2009 2010 static int amdgpu_discovery_set_gmc_ip_blocks(struct amdgpu_device *adev) 2011 { 2012 /* use GC or MMHUB IP version */ 2013 switch (amdgpu_ip_version(adev, GC_HWIP, 0)) { 2014 case IP_VERSION(9, 0, 1): 2015 case IP_VERSION(9, 1, 0): 2016 case IP_VERSION(9, 2, 1): 2017 case IP_VERSION(9, 2, 2): 2018 case IP_VERSION(9, 3, 0): 2019 case IP_VERSION(9, 4, 0): 2020 case IP_VERSION(9, 4, 1): 2021 case IP_VERSION(9, 4, 2): 2022 case IP_VERSION(9, 4, 3): 2023 case IP_VERSION(9, 4, 4): 2024 case IP_VERSION(9, 5, 0): 2025 amdgpu_device_ip_block_add(adev, &gmc_v9_0_ip_block); 2026 break; 2027 case IP_VERSION(10, 1, 10): 2028 case IP_VERSION(10, 1, 1): 2029 case IP_VERSION(10, 1, 2): 2030 case IP_VERSION(10, 1, 3): 2031 case IP_VERSION(10, 1, 4): 2032 case IP_VERSION(10, 3, 0): 2033 case IP_VERSION(10, 3, 1): 2034 case IP_VERSION(10, 3, 2): 2035 case IP_VERSION(10, 3, 3): 2036 case IP_VERSION(10, 3, 4): 2037 case IP_VERSION(10, 3, 5): 2038 case IP_VERSION(10, 3, 6): 2039 case IP_VERSION(10, 3, 7): 2040 amdgpu_device_ip_block_add(adev, &gmc_v10_0_ip_block); 2041 break; 2042 case IP_VERSION(11, 0, 0): 2043 case IP_VERSION(11, 0, 1): 2044 case IP_VERSION(11, 0, 2): 2045 case IP_VERSION(11, 0, 3): 2046 case IP_VERSION(11, 0, 4): 2047 case IP_VERSION(11, 5, 0): 2048 case IP_VERSION(11, 5, 1): 2049 case IP_VERSION(11, 5, 2): 2050 case IP_VERSION(11, 5, 3): 2051 case IP_VERSION(11, 5, 4): 2052 amdgpu_device_ip_block_add(adev, &gmc_v11_0_ip_block); 2053 break; 2054 case IP_VERSION(12, 0, 0): 2055 case IP_VERSION(12, 0, 1): 2056 case IP_VERSION(12, 1, 0): 2057 amdgpu_device_ip_block_add(adev, &gmc_v12_0_ip_block); 2058 break; 2059 default: 2060 dev_err(adev->dev, "Failed to add gmc ip block(GC_HWIP:0x%x)\n", 2061 amdgpu_ip_version(adev, GC_HWIP, 0)); 2062 return -EINVAL; 2063 } 2064 return 0; 2065 } 2066 2067 static int amdgpu_discovery_set_ih_ip_blocks(struct amdgpu_device *adev) 2068 { 2069 switch (amdgpu_ip_version(adev, OSSSYS_HWIP, 0)) { 2070 case IP_VERSION(4, 0, 0): 2071 case IP_VERSION(4, 0, 1): 2072 case IP_VERSION(4, 1, 0): 2073 case IP_VERSION(4, 1, 1): 2074 case IP_VERSION(4, 3, 0): 2075 amdgpu_device_ip_block_add(adev, &vega10_ih_ip_block); 2076 break; 2077 case IP_VERSION(4, 2, 0): 2078 case IP_VERSION(4, 2, 1): 2079 case IP_VERSION(4, 4, 0): 2080 case IP_VERSION(4, 4, 2): 2081 case IP_VERSION(4, 4, 5): 2082 amdgpu_device_ip_block_add(adev, &vega20_ih_ip_block); 2083 break; 2084 case IP_VERSION(5, 0, 0): 2085 case IP_VERSION(5, 0, 1): 2086 case IP_VERSION(5, 0, 2): 2087 case IP_VERSION(5, 0, 3): 2088 case IP_VERSION(5, 2, 0): 2089 case IP_VERSION(5, 2, 1): 2090 amdgpu_device_ip_block_add(adev, &navi10_ih_ip_block); 2091 break; 2092 case IP_VERSION(6, 0, 0): 2093 case IP_VERSION(6, 0, 1): 2094 case IP_VERSION(6, 0, 2): 2095 amdgpu_device_ip_block_add(adev, &ih_v6_0_ip_block); 2096 break; 2097 case IP_VERSION(6, 1, 0): 2098 case IP_VERSION(6, 1, 1): 2099 amdgpu_device_ip_block_add(adev, &ih_v6_1_ip_block); 2100 break; 2101 case IP_VERSION(7, 0, 0): 2102 case IP_VERSION(7, 1, 0): 2103 amdgpu_device_ip_block_add(adev, &ih_v7_0_ip_block); 2104 break; 2105 default: 2106 dev_err(adev->dev, 2107 "Failed to add ih ip block(OSSSYS_HWIP:0x%x)\n", 2108 amdgpu_ip_version(adev, OSSSYS_HWIP, 0)); 2109 return -EINVAL; 2110 } 2111 return 0; 2112 } 2113 2114 static int amdgpu_discovery_set_psp_ip_blocks(struct amdgpu_device *adev) 2115 { 2116 switch (amdgpu_ip_version(adev, MP0_HWIP, 0)) { 2117 case IP_VERSION(9, 0, 0): 2118 amdgpu_device_ip_block_add(adev, &psp_v3_1_ip_block); 2119 break; 2120 case IP_VERSION(10, 0, 0): 2121 case IP_VERSION(10, 0, 1): 2122 amdgpu_device_ip_block_add(adev, &psp_v10_0_ip_block); 2123 break; 2124 case IP_VERSION(11, 0, 0): 2125 case IP_VERSION(11, 0, 2): 2126 case IP_VERSION(11, 0, 4): 2127 case IP_VERSION(11, 0, 5): 2128 case IP_VERSION(11, 0, 9): 2129 case IP_VERSION(11, 0, 7): 2130 case IP_VERSION(11, 0, 11): 2131 case IP_VERSION(11, 0, 12): 2132 case IP_VERSION(11, 0, 13): 2133 case IP_VERSION(11, 5, 0): 2134 case IP_VERSION(11, 5, 2): 2135 amdgpu_device_ip_block_add(adev, &psp_v11_0_ip_block); 2136 break; 2137 case IP_VERSION(11, 0, 8): 2138 amdgpu_device_ip_block_add(adev, &psp_v11_0_8_ip_block); 2139 break; 2140 case IP_VERSION(11, 0, 3): 2141 case IP_VERSION(12, 0, 1): 2142 amdgpu_device_ip_block_add(adev, &psp_v12_0_ip_block); 2143 break; 2144 case IP_VERSION(13, 0, 0): 2145 case IP_VERSION(13, 0, 1): 2146 case IP_VERSION(13, 0, 2): 2147 case IP_VERSION(13, 0, 3): 2148 case IP_VERSION(13, 0, 5): 2149 case IP_VERSION(13, 0, 6): 2150 case IP_VERSION(13, 0, 7): 2151 case IP_VERSION(13, 0, 8): 2152 case IP_VERSION(13, 0, 10): 2153 case IP_VERSION(13, 0, 11): 2154 case IP_VERSION(13, 0, 12): 2155 case IP_VERSION(13, 0, 14): 2156 case IP_VERSION(14, 0, 0): 2157 case IP_VERSION(14, 0, 1): 2158 case IP_VERSION(14, 0, 4): 2159 amdgpu_device_ip_block_add(adev, &psp_v13_0_ip_block); 2160 break; 2161 case IP_VERSION(13, 0, 4): 2162 amdgpu_device_ip_block_add(adev, &psp_v13_0_4_ip_block); 2163 break; 2164 case IP_VERSION(14, 0, 2): 2165 case IP_VERSION(14, 0, 3): 2166 case IP_VERSION(14, 0, 5): 2167 amdgpu_device_ip_block_add(adev, &psp_v14_0_ip_block); 2168 break; 2169 case IP_VERSION(15, 0, 0): 2170 amdgpu_device_ip_block_add(adev, &psp_v15_0_ip_block); 2171 break; 2172 case IP_VERSION(15, 0, 8): 2173 amdgpu_device_ip_block_add(adev, &psp_v15_0_8_ip_block); 2174 break; 2175 default: 2176 dev_err(adev->dev, 2177 "Failed to add psp ip block(MP0_HWIP:0x%x)\n", 2178 amdgpu_ip_version(adev, MP0_HWIP, 0)); 2179 return -EINVAL; 2180 } 2181 return 0; 2182 } 2183 2184 static int amdgpu_discovery_set_smu_ip_blocks(struct amdgpu_device *adev) 2185 { 2186 switch (amdgpu_ip_version(adev, MP1_HWIP, 0)) { 2187 case IP_VERSION(9, 0, 0): 2188 case IP_VERSION(10, 0, 0): 2189 case IP_VERSION(10, 0, 1): 2190 case IP_VERSION(11, 0, 2): 2191 if (adev->asic_type == CHIP_ARCTURUS) 2192 amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block); 2193 else 2194 amdgpu_device_ip_block_add(adev, &pp_smu_ip_block); 2195 break; 2196 case IP_VERSION(11, 0, 0): 2197 case IP_VERSION(11, 0, 5): 2198 case IP_VERSION(11, 0, 9): 2199 case IP_VERSION(11, 0, 7): 2200 case IP_VERSION(11, 0, 11): 2201 case IP_VERSION(11, 0, 12): 2202 case IP_VERSION(11, 0, 13): 2203 case IP_VERSION(11, 5, 0): 2204 case IP_VERSION(11, 5, 2): 2205 amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block); 2206 break; 2207 case IP_VERSION(11, 0, 8): 2208 if (adev->apu_flags & AMD_APU_IS_CYAN_SKILLFISH2) 2209 amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block); 2210 break; 2211 case IP_VERSION(12, 0, 0): 2212 case IP_VERSION(12, 0, 1): 2213 amdgpu_device_ip_block_add(adev, &smu_v12_0_ip_block); 2214 break; 2215 case IP_VERSION(13, 0, 0): 2216 case IP_VERSION(13, 0, 1): 2217 case IP_VERSION(13, 0, 2): 2218 case IP_VERSION(13, 0, 3): 2219 case IP_VERSION(13, 0, 4): 2220 case IP_VERSION(13, 0, 5): 2221 case IP_VERSION(13, 0, 6): 2222 case IP_VERSION(13, 0, 7): 2223 case IP_VERSION(13, 0, 8): 2224 case IP_VERSION(13, 0, 10): 2225 case IP_VERSION(13, 0, 11): 2226 case IP_VERSION(13, 0, 14): 2227 case IP_VERSION(13, 0, 12): 2228 amdgpu_device_ip_block_add(adev, &smu_v13_0_ip_block); 2229 break; 2230 case IP_VERSION(14, 0, 0): 2231 case IP_VERSION(14, 0, 1): 2232 case IP_VERSION(14, 0, 2): 2233 case IP_VERSION(14, 0, 3): 2234 case IP_VERSION(14, 0, 4): 2235 case IP_VERSION(14, 0, 5): 2236 amdgpu_device_ip_block_add(adev, &smu_v14_0_ip_block); 2237 break; 2238 case IP_VERSION(15, 0, 0): 2239 amdgpu_device_ip_block_add(adev, &smu_v15_0_ip_block); 2240 break; 2241 default: 2242 dev_err(adev->dev, 2243 "Failed to add smu ip block(MP1_HWIP:0x%x)\n", 2244 amdgpu_ip_version(adev, MP1_HWIP, 0)); 2245 return -EINVAL; 2246 } 2247 return 0; 2248 } 2249 2250 #if defined(CONFIG_DRM_AMD_DC) 2251 static void amdgpu_discovery_set_sriov_display(struct amdgpu_device *adev) 2252 { 2253 amdgpu_device_set_sriov_virtual_display(adev); 2254 amdgpu_device_ip_block_add(adev, &amdgpu_vkms_ip_block); 2255 } 2256 #endif 2257 2258 static int amdgpu_discovery_set_display_ip_blocks(struct amdgpu_device *adev) 2259 { 2260 if (adev->enable_virtual_display) { 2261 amdgpu_device_ip_block_add(adev, &amdgpu_vkms_ip_block); 2262 return 0; 2263 } 2264 2265 if (!amdgpu_device_has_dc_support(adev)) 2266 return 0; 2267 2268 #if defined(CONFIG_DRM_AMD_DC) 2269 if (amdgpu_ip_version(adev, DCE_HWIP, 0)) { 2270 switch (amdgpu_ip_version(adev, DCE_HWIP, 0)) { 2271 case IP_VERSION(1, 0, 0): 2272 case IP_VERSION(1, 0, 1): 2273 case IP_VERSION(2, 0, 2): 2274 case IP_VERSION(2, 0, 0): 2275 case IP_VERSION(2, 0, 3): 2276 case IP_VERSION(2, 1, 0): 2277 case IP_VERSION(3, 0, 0): 2278 case IP_VERSION(3, 0, 2): 2279 case IP_VERSION(3, 0, 3): 2280 case IP_VERSION(3, 0, 1): 2281 case IP_VERSION(3, 1, 2): 2282 case IP_VERSION(3, 1, 3): 2283 case IP_VERSION(3, 1, 4): 2284 case IP_VERSION(3, 1, 5): 2285 case IP_VERSION(3, 1, 6): 2286 case IP_VERSION(3, 2, 0): 2287 case IP_VERSION(3, 2, 1): 2288 case IP_VERSION(3, 5, 0): 2289 case IP_VERSION(3, 5, 1): 2290 case IP_VERSION(3, 6, 0): 2291 case IP_VERSION(4, 1, 0): 2292 /* TODO: Fix IP version. DC code expects version 4.0.1 */ 2293 if (adev->ip_versions[DCE_HWIP][0] == IP_VERSION(4, 1, 0)) 2294 adev->ip_versions[DCE_HWIP][0] = IP_VERSION(4, 0, 1); 2295 2296 if (amdgpu_sriov_vf(adev)) 2297 amdgpu_discovery_set_sriov_display(adev); 2298 else 2299 amdgpu_device_ip_block_add(adev, &dm_ip_block); 2300 break; 2301 default: 2302 dev_err(adev->dev, 2303 "Failed to add dm ip block(DCE_HWIP:0x%x)\n", 2304 amdgpu_ip_version(adev, DCE_HWIP, 0)); 2305 return -EINVAL; 2306 } 2307 } else if (amdgpu_ip_version(adev, DCI_HWIP, 0)) { 2308 switch (amdgpu_ip_version(adev, DCI_HWIP, 0)) { 2309 case IP_VERSION(12, 0, 0): 2310 case IP_VERSION(12, 0, 1): 2311 case IP_VERSION(12, 1, 0): 2312 if (amdgpu_sriov_vf(adev)) 2313 amdgpu_discovery_set_sriov_display(adev); 2314 else 2315 amdgpu_device_ip_block_add(adev, &dm_ip_block); 2316 break; 2317 default: 2318 dev_err(adev->dev, 2319 "Failed to add dm ip block(DCI_HWIP:0x%x)\n", 2320 amdgpu_ip_version(adev, DCI_HWIP, 0)); 2321 return -EINVAL; 2322 } 2323 } 2324 #endif 2325 return 0; 2326 } 2327 2328 static int amdgpu_discovery_set_gc_ip_blocks(struct amdgpu_device *adev) 2329 { 2330 switch (amdgpu_ip_version(adev, GC_HWIP, 0)) { 2331 case IP_VERSION(9, 0, 1): 2332 case IP_VERSION(9, 1, 0): 2333 case IP_VERSION(9, 2, 1): 2334 case IP_VERSION(9, 2, 2): 2335 case IP_VERSION(9, 3, 0): 2336 case IP_VERSION(9, 4, 0): 2337 case IP_VERSION(9, 4, 1): 2338 case IP_VERSION(9, 4, 2): 2339 amdgpu_device_ip_block_add(adev, &gfx_v9_0_ip_block); 2340 break; 2341 case IP_VERSION(9, 4, 3): 2342 case IP_VERSION(9, 4, 4): 2343 case IP_VERSION(9, 5, 0): 2344 amdgpu_device_ip_block_add(adev, &gfx_v9_4_3_ip_block); 2345 break; 2346 case IP_VERSION(10, 1, 10): 2347 case IP_VERSION(10, 1, 2): 2348 case IP_VERSION(10, 1, 1): 2349 case IP_VERSION(10, 1, 3): 2350 case IP_VERSION(10, 1, 4): 2351 case IP_VERSION(10, 3, 0): 2352 case IP_VERSION(10, 3, 2): 2353 case IP_VERSION(10, 3, 1): 2354 case IP_VERSION(10, 3, 4): 2355 case IP_VERSION(10, 3, 5): 2356 case IP_VERSION(10, 3, 6): 2357 case IP_VERSION(10, 3, 3): 2358 case IP_VERSION(10, 3, 7): 2359 amdgpu_device_ip_block_add(adev, &gfx_v10_0_ip_block); 2360 break; 2361 case IP_VERSION(11, 0, 0): 2362 case IP_VERSION(11, 0, 1): 2363 case IP_VERSION(11, 0, 2): 2364 case IP_VERSION(11, 0, 3): 2365 case IP_VERSION(11, 0, 4): 2366 case IP_VERSION(11, 5, 0): 2367 case IP_VERSION(11, 5, 1): 2368 case IP_VERSION(11, 5, 2): 2369 case IP_VERSION(11, 5, 3): 2370 case IP_VERSION(11, 5, 4): 2371 amdgpu_device_ip_block_add(adev, &gfx_v11_0_ip_block); 2372 break; 2373 case IP_VERSION(12, 0, 0): 2374 case IP_VERSION(12, 0, 1): 2375 amdgpu_device_ip_block_add(adev, &gfx_v12_0_ip_block); 2376 break; 2377 case IP_VERSION(12, 1, 0): 2378 amdgpu_device_ip_block_add(adev, &gfx_v12_1_ip_block); 2379 break; 2380 default: 2381 dev_err(adev->dev, "Failed to add gfx ip block(GC_HWIP:0x%x)\n", 2382 amdgpu_ip_version(adev, GC_HWIP, 0)); 2383 return -EINVAL; 2384 } 2385 return 0; 2386 } 2387 2388 static int amdgpu_discovery_set_sdma_ip_blocks(struct amdgpu_device *adev) 2389 { 2390 switch (amdgpu_ip_version(adev, SDMA0_HWIP, 0)) { 2391 case IP_VERSION(4, 0, 0): 2392 case IP_VERSION(4, 0, 1): 2393 case IP_VERSION(4, 1, 0): 2394 case IP_VERSION(4, 1, 1): 2395 case IP_VERSION(4, 1, 2): 2396 case IP_VERSION(4, 2, 0): 2397 case IP_VERSION(4, 2, 2): 2398 case IP_VERSION(4, 4, 0): 2399 amdgpu_device_ip_block_add(adev, &sdma_v4_0_ip_block); 2400 break; 2401 case IP_VERSION(4, 4, 2): 2402 case IP_VERSION(4, 4, 5): 2403 case IP_VERSION(4, 4, 4): 2404 amdgpu_device_ip_block_add(adev, &sdma_v4_4_2_ip_block); 2405 break; 2406 case IP_VERSION(5, 0, 0): 2407 case IP_VERSION(5, 0, 1): 2408 case IP_VERSION(5, 0, 2): 2409 case IP_VERSION(5, 0, 5): 2410 amdgpu_device_ip_block_add(adev, &sdma_v5_0_ip_block); 2411 break; 2412 case IP_VERSION(5, 2, 0): 2413 case IP_VERSION(5, 2, 2): 2414 case IP_VERSION(5, 2, 4): 2415 case IP_VERSION(5, 2, 5): 2416 case IP_VERSION(5, 2, 6): 2417 case IP_VERSION(5, 2, 3): 2418 case IP_VERSION(5, 2, 1): 2419 case IP_VERSION(5, 2, 7): 2420 amdgpu_device_ip_block_add(adev, &sdma_v5_2_ip_block); 2421 break; 2422 case IP_VERSION(6, 0, 0): 2423 case IP_VERSION(6, 0, 1): 2424 case IP_VERSION(6, 0, 2): 2425 case IP_VERSION(6, 0, 3): 2426 case IP_VERSION(6, 1, 0): 2427 case IP_VERSION(6, 1, 1): 2428 case IP_VERSION(6, 1, 2): 2429 case IP_VERSION(6, 1, 3): 2430 case IP_VERSION(6, 1, 4): 2431 amdgpu_device_ip_block_add(adev, &sdma_v6_0_ip_block); 2432 break; 2433 case IP_VERSION(7, 0, 0): 2434 case IP_VERSION(7, 0, 1): 2435 amdgpu_device_ip_block_add(adev, &sdma_v7_0_ip_block); 2436 break; 2437 case IP_VERSION(7, 1, 0): 2438 amdgpu_device_ip_block_add(adev, &sdma_v7_1_ip_block); 2439 break; 2440 default: 2441 dev_err(adev->dev, 2442 "Failed to add sdma ip block(SDMA0_HWIP:0x%x)\n", 2443 amdgpu_ip_version(adev, SDMA0_HWIP, 0)); 2444 return -EINVAL; 2445 } 2446 2447 return 0; 2448 } 2449 2450 static int amdgpu_discovery_set_ras_ip_blocks(struct amdgpu_device *adev) 2451 { 2452 switch (amdgpu_ip_version(adev, MP0_HWIP, 0)) { 2453 case IP_VERSION(13, 0, 6): 2454 case IP_VERSION(13, 0, 12): 2455 case IP_VERSION(13, 0, 14): 2456 amdgpu_device_ip_block_add(adev, &ras_v1_0_ip_block); 2457 break; 2458 default: 2459 break; 2460 } 2461 return 0; 2462 } 2463 2464 static int amdgpu_discovery_set_mm_ip_blocks(struct amdgpu_device *adev) 2465 { 2466 if (amdgpu_ip_version(adev, VCE_HWIP, 0)) { 2467 switch (amdgpu_ip_version(adev, UVD_HWIP, 0)) { 2468 case IP_VERSION(7, 0, 0): 2469 case IP_VERSION(7, 2, 0): 2470 /* UVD is not supported on vega20 SR-IOV */ 2471 if (!(adev->asic_type == CHIP_VEGA20 && amdgpu_sriov_vf(adev))) 2472 amdgpu_device_ip_block_add(adev, &uvd_v7_0_ip_block); 2473 break; 2474 default: 2475 dev_err(adev->dev, 2476 "Failed to add uvd v7 ip block(UVD_HWIP:0x%x)\n", 2477 amdgpu_ip_version(adev, UVD_HWIP, 0)); 2478 return -EINVAL; 2479 } 2480 switch (amdgpu_ip_version(adev, VCE_HWIP, 0)) { 2481 case IP_VERSION(4, 0, 0): 2482 case IP_VERSION(4, 1, 0): 2483 /* VCE is not supported on vega20 SR-IOV */ 2484 if (!(adev->asic_type == CHIP_VEGA20 && amdgpu_sriov_vf(adev))) 2485 amdgpu_device_ip_block_add(adev, &vce_v4_0_ip_block); 2486 break; 2487 default: 2488 dev_err(adev->dev, 2489 "Failed to add VCE v4 ip block(VCE_HWIP:0x%x)\n", 2490 amdgpu_ip_version(adev, VCE_HWIP, 0)); 2491 return -EINVAL; 2492 } 2493 } else { 2494 switch (amdgpu_ip_version(adev, UVD_HWIP, 0)) { 2495 case IP_VERSION(1, 0, 0): 2496 case IP_VERSION(1, 0, 1): 2497 amdgpu_device_ip_block_add(adev, &vcn_v1_0_ip_block); 2498 break; 2499 case IP_VERSION(2, 0, 0): 2500 case IP_VERSION(2, 0, 2): 2501 case IP_VERSION(2, 2, 0): 2502 amdgpu_device_ip_block_add(adev, &vcn_v2_0_ip_block); 2503 if (!amdgpu_sriov_vf(adev)) 2504 amdgpu_device_ip_block_add(adev, &jpeg_v2_0_ip_block); 2505 break; 2506 case IP_VERSION(2, 0, 3): 2507 break; 2508 case IP_VERSION(2, 5, 0): 2509 amdgpu_device_ip_block_add(adev, &vcn_v2_5_ip_block); 2510 amdgpu_device_ip_block_add(adev, &jpeg_v2_5_ip_block); 2511 break; 2512 case IP_VERSION(2, 6, 0): 2513 amdgpu_device_ip_block_add(adev, &vcn_v2_6_ip_block); 2514 amdgpu_device_ip_block_add(adev, &jpeg_v2_6_ip_block); 2515 break; 2516 case IP_VERSION(3, 0, 0): 2517 case IP_VERSION(3, 0, 16): 2518 case IP_VERSION(3, 1, 1): 2519 case IP_VERSION(3, 1, 2): 2520 case IP_VERSION(3, 0, 2): 2521 amdgpu_device_ip_block_add(adev, &vcn_v3_0_ip_block); 2522 if (!amdgpu_sriov_vf(adev)) 2523 amdgpu_device_ip_block_add(adev, &jpeg_v3_0_ip_block); 2524 break; 2525 case IP_VERSION(3, 0, 33): 2526 amdgpu_device_ip_block_add(adev, &vcn_v3_0_ip_block); 2527 break; 2528 case IP_VERSION(4, 0, 0): 2529 case IP_VERSION(4, 0, 2): 2530 case IP_VERSION(4, 0, 4): 2531 amdgpu_device_ip_block_add(adev, &vcn_v4_0_ip_block); 2532 amdgpu_device_ip_block_add(adev, &jpeg_v4_0_ip_block); 2533 break; 2534 case IP_VERSION(4, 0, 3): 2535 amdgpu_device_ip_block_add(adev, &vcn_v4_0_3_ip_block); 2536 amdgpu_device_ip_block_add(adev, &jpeg_v4_0_3_ip_block); 2537 break; 2538 case IP_VERSION(4, 0, 5): 2539 case IP_VERSION(4, 0, 6): 2540 amdgpu_device_ip_block_add(adev, &vcn_v4_0_5_ip_block); 2541 amdgpu_device_ip_block_add(adev, &jpeg_v4_0_5_ip_block); 2542 break; 2543 case IP_VERSION(5, 0, 0): 2544 amdgpu_device_ip_block_add(adev, &vcn_v5_0_0_ip_block); 2545 amdgpu_device_ip_block_add(adev, &jpeg_v5_0_0_ip_block); 2546 break; 2547 case IP_VERSION(5, 3, 0): 2548 amdgpu_device_ip_block_add(adev, &vcn_v5_0_0_ip_block); 2549 amdgpu_device_ip_block_add(adev, &jpeg_v5_3_0_ip_block); 2550 break; 2551 case IP_VERSION(5, 0, 1): 2552 amdgpu_device_ip_block_add(adev, &vcn_v5_0_1_ip_block); 2553 amdgpu_device_ip_block_add(adev, &jpeg_v5_0_1_ip_block); 2554 break; 2555 default: 2556 dev_err(adev->dev, 2557 "Failed to add vcn/jpeg ip block(UVD_HWIP:0x%x)\n", 2558 amdgpu_ip_version(adev, UVD_HWIP, 0)); 2559 return -EINVAL; 2560 } 2561 } 2562 return 0; 2563 } 2564 2565 static int amdgpu_discovery_set_mes_ip_blocks(struct amdgpu_device *adev) 2566 { 2567 switch (amdgpu_ip_version(adev, GC_HWIP, 0)) { 2568 case IP_VERSION(11, 0, 0): 2569 case IP_VERSION(11, 0, 1): 2570 case IP_VERSION(11, 0, 2): 2571 case IP_VERSION(11, 0, 3): 2572 case IP_VERSION(11, 0, 4): 2573 case IP_VERSION(11, 5, 0): 2574 case IP_VERSION(11, 5, 1): 2575 case IP_VERSION(11, 5, 2): 2576 case IP_VERSION(11, 5, 3): 2577 case IP_VERSION(11, 5, 4): 2578 amdgpu_device_ip_block_add(adev, &mes_v11_0_ip_block); 2579 adev->enable_mes = true; 2580 adev->enable_mes_kiq = true; 2581 break; 2582 case IP_VERSION(12, 0, 0): 2583 case IP_VERSION(12, 0, 1): 2584 amdgpu_device_ip_block_add(adev, &mes_v12_0_ip_block); 2585 adev->enable_mes = true; 2586 adev->enable_mes_kiq = true; 2587 if (amdgpu_uni_mes) 2588 adev->enable_uni_mes = true; 2589 break; 2590 case IP_VERSION(12, 1, 0): 2591 amdgpu_device_ip_block_add(adev, &mes_v12_1_ip_block); 2592 adev->enable_mes = true; 2593 adev->enable_mes_kiq = true; 2594 if (amdgpu_uni_mes) 2595 adev->enable_uni_mes = true; 2596 break; 2597 default: 2598 break; 2599 } 2600 return 0; 2601 } 2602 2603 static void amdgpu_discovery_init_soc_config(struct amdgpu_device *adev) 2604 { 2605 switch (amdgpu_ip_version(adev, GC_HWIP, 0)) { 2606 case IP_VERSION(9, 4, 3): 2607 case IP_VERSION(9, 4, 4): 2608 case IP_VERSION(9, 5, 0): 2609 aqua_vanjaram_init_soc_config(adev); 2610 break; 2611 case IP_VERSION(12, 1, 0): 2612 soc_v1_0_init_soc_config(adev); 2613 break; 2614 default: 2615 break; 2616 } 2617 } 2618 2619 static int amdgpu_discovery_set_vpe_ip_blocks(struct amdgpu_device *adev) 2620 { 2621 switch (amdgpu_ip_version(adev, VPE_HWIP, 0)) { 2622 case IP_VERSION(6, 1, 0): 2623 case IP_VERSION(6, 1, 1): 2624 case IP_VERSION(6, 1, 3): 2625 amdgpu_device_ip_block_add(adev, &vpe_v6_1_ip_block); 2626 break; 2627 default: 2628 break; 2629 } 2630 2631 return 0; 2632 } 2633 2634 static int amdgpu_discovery_set_umsch_mm_ip_blocks(struct amdgpu_device *adev) 2635 { 2636 switch (amdgpu_ip_version(adev, VCN_HWIP, 0)) { 2637 case IP_VERSION(4, 0, 5): 2638 case IP_VERSION(4, 0, 6): 2639 if (amdgpu_umsch_mm & 0x1) { 2640 amdgpu_device_ip_block_add(adev, &umsch_mm_v4_0_ip_block); 2641 adev->enable_umsch_mm = true; 2642 } 2643 break; 2644 default: 2645 break; 2646 } 2647 2648 return 0; 2649 } 2650 2651 static int amdgpu_discovery_set_isp_ip_blocks(struct amdgpu_device *adev) 2652 { 2653 #if defined(CONFIG_DRM_AMD_ISP) 2654 switch (amdgpu_ip_version(adev, ISP_HWIP, 0)) { 2655 case IP_VERSION(4, 1, 0): 2656 amdgpu_device_ip_block_add(adev, &isp_v4_1_0_ip_block); 2657 break; 2658 case IP_VERSION(4, 1, 1): 2659 amdgpu_device_ip_block_add(adev, &isp_v4_1_1_ip_block); 2660 break; 2661 default: 2662 break; 2663 } 2664 #endif 2665 2666 return 0; 2667 } 2668 2669 int amdgpu_discovery_set_ip_blocks(struct amdgpu_device *adev) 2670 { 2671 int r; 2672 2673 switch (adev->asic_type) { 2674 case CHIP_VEGA10: 2675 /* This is not fatal. We only need the discovery 2676 * binary for sysfs. We don't need it for a 2677 * functional system. 2678 */ 2679 amdgpu_discovery_init(adev); 2680 vega10_reg_base_init(adev); 2681 adev->sdma.num_instances = 2; 2682 adev->sdma.sdma_mask = 3; 2683 adev->gmc.num_umc = 4; 2684 adev->gfx.xcc_mask = 1; 2685 adev->ip_versions[MMHUB_HWIP][0] = IP_VERSION(9, 0, 0); 2686 adev->ip_versions[ATHUB_HWIP][0] = IP_VERSION(9, 0, 0); 2687 adev->ip_versions[OSSSYS_HWIP][0] = IP_VERSION(4, 0, 0); 2688 adev->ip_versions[HDP_HWIP][0] = IP_VERSION(4, 0, 0); 2689 adev->ip_versions[SDMA0_HWIP][0] = IP_VERSION(4, 0, 0); 2690 adev->ip_versions[SDMA1_HWIP][0] = IP_VERSION(4, 0, 0); 2691 adev->ip_versions[DF_HWIP][0] = IP_VERSION(2, 1, 0); 2692 adev->ip_versions[NBIO_HWIP][0] = IP_VERSION(6, 1, 0); 2693 adev->ip_versions[UMC_HWIP][0] = IP_VERSION(6, 0, 0); 2694 adev->ip_versions[MP0_HWIP][0] = IP_VERSION(9, 0, 0); 2695 adev->ip_versions[MP1_HWIP][0] = IP_VERSION(9, 0, 0); 2696 adev->ip_versions[THM_HWIP][0] = IP_VERSION(9, 0, 0); 2697 adev->ip_versions[SMUIO_HWIP][0] = IP_VERSION(9, 0, 0); 2698 adev->ip_versions[GC_HWIP][0] = IP_VERSION(9, 0, 1); 2699 adev->ip_versions[UVD_HWIP][0] = IP_VERSION(7, 0, 0); 2700 adev->ip_versions[VCE_HWIP][0] = IP_VERSION(4, 0, 0); 2701 adev->ip_versions[DCI_HWIP][0] = IP_VERSION(12, 0, 0); 2702 break; 2703 case CHIP_VEGA12: 2704 /* This is not fatal. We only need the discovery 2705 * binary for sysfs. We don't need it for a 2706 * functional system. 2707 */ 2708 amdgpu_discovery_init(adev); 2709 vega10_reg_base_init(adev); 2710 adev->sdma.num_instances = 2; 2711 adev->sdma.sdma_mask = 3; 2712 adev->gmc.num_umc = 4; 2713 adev->gfx.xcc_mask = 1; 2714 adev->ip_versions[MMHUB_HWIP][0] = IP_VERSION(9, 3, 0); 2715 adev->ip_versions[ATHUB_HWIP][0] = IP_VERSION(9, 3, 0); 2716 adev->ip_versions[OSSSYS_HWIP][0] = IP_VERSION(4, 0, 1); 2717 adev->ip_versions[HDP_HWIP][0] = IP_VERSION(4, 0, 1); 2718 adev->ip_versions[SDMA0_HWIP][0] = IP_VERSION(4, 0, 1); 2719 adev->ip_versions[SDMA1_HWIP][0] = IP_VERSION(4, 0, 1); 2720 adev->ip_versions[DF_HWIP][0] = IP_VERSION(2, 5, 0); 2721 adev->ip_versions[NBIO_HWIP][0] = IP_VERSION(6, 2, 0); 2722 adev->ip_versions[UMC_HWIP][0] = IP_VERSION(6, 1, 0); 2723 adev->ip_versions[MP0_HWIP][0] = IP_VERSION(9, 0, 0); 2724 adev->ip_versions[MP1_HWIP][0] = IP_VERSION(9, 0, 0); 2725 adev->ip_versions[THM_HWIP][0] = IP_VERSION(9, 0, 0); 2726 adev->ip_versions[SMUIO_HWIP][0] = IP_VERSION(9, 0, 1); 2727 adev->ip_versions[GC_HWIP][0] = IP_VERSION(9, 2, 1); 2728 adev->ip_versions[UVD_HWIP][0] = IP_VERSION(7, 0, 0); 2729 adev->ip_versions[VCE_HWIP][0] = IP_VERSION(4, 0, 0); 2730 adev->ip_versions[DCI_HWIP][0] = IP_VERSION(12, 0, 1); 2731 break; 2732 case CHIP_RAVEN: 2733 /* This is not fatal. We only need the discovery 2734 * binary for sysfs. We don't need it for a 2735 * functional system. 2736 */ 2737 amdgpu_discovery_init(adev); 2738 vega10_reg_base_init(adev); 2739 adev->sdma.num_instances = 1; 2740 adev->sdma.sdma_mask = 1; 2741 adev->vcn.num_vcn_inst = 1; 2742 adev->gmc.num_umc = 2; 2743 adev->gfx.xcc_mask = 1; 2744 if (adev->apu_flags & AMD_APU_IS_RAVEN2) { 2745 adev->ip_versions[MMHUB_HWIP][0] = IP_VERSION(9, 2, 0); 2746 adev->ip_versions[ATHUB_HWIP][0] = IP_VERSION(9, 2, 0); 2747 adev->ip_versions[OSSSYS_HWIP][0] = IP_VERSION(4, 1, 1); 2748 adev->ip_versions[HDP_HWIP][0] = IP_VERSION(4, 1, 1); 2749 adev->ip_versions[SDMA0_HWIP][0] = IP_VERSION(4, 1, 1); 2750 adev->ip_versions[DF_HWIP][0] = IP_VERSION(2, 1, 1); 2751 adev->ip_versions[NBIO_HWIP][0] = IP_VERSION(7, 0, 1); 2752 adev->ip_versions[UMC_HWIP][0] = IP_VERSION(7, 5, 0); 2753 adev->ip_versions[MP0_HWIP][0] = IP_VERSION(10, 0, 1); 2754 adev->ip_versions[MP1_HWIP][0] = IP_VERSION(10, 0, 1); 2755 adev->ip_versions[THM_HWIP][0] = IP_VERSION(10, 1, 0); 2756 adev->ip_versions[SMUIO_HWIP][0] = IP_VERSION(10, 0, 1); 2757 adev->ip_versions[GC_HWIP][0] = IP_VERSION(9, 2, 2); 2758 adev->ip_versions[UVD_HWIP][0] = IP_VERSION(1, 0, 1); 2759 adev->ip_versions[DCE_HWIP][0] = IP_VERSION(1, 0, 1); 2760 adev->ip_versions[ISP_HWIP][0] = IP_VERSION(2, 0, 0); 2761 } else { 2762 adev->ip_versions[MMHUB_HWIP][0] = IP_VERSION(9, 1, 0); 2763 adev->ip_versions[ATHUB_HWIP][0] = IP_VERSION(9, 1, 0); 2764 adev->ip_versions[OSSSYS_HWIP][0] = IP_VERSION(4, 1, 0); 2765 adev->ip_versions[HDP_HWIP][0] = IP_VERSION(4, 1, 0); 2766 adev->ip_versions[SDMA0_HWIP][0] = IP_VERSION(4, 1, 0); 2767 adev->ip_versions[DF_HWIP][0] = IP_VERSION(2, 1, 0); 2768 adev->ip_versions[NBIO_HWIP][0] = IP_VERSION(7, 0, 0); 2769 adev->ip_versions[UMC_HWIP][0] = IP_VERSION(7, 0, 0); 2770 adev->ip_versions[MP0_HWIP][0] = IP_VERSION(10, 0, 0); 2771 adev->ip_versions[MP1_HWIP][0] = IP_VERSION(10, 0, 0); 2772 adev->ip_versions[THM_HWIP][0] = IP_VERSION(10, 0, 0); 2773 adev->ip_versions[SMUIO_HWIP][0] = IP_VERSION(10, 0, 0); 2774 adev->ip_versions[GC_HWIP][0] = IP_VERSION(9, 1, 0); 2775 adev->ip_versions[UVD_HWIP][0] = IP_VERSION(1, 0, 0); 2776 adev->ip_versions[DCE_HWIP][0] = IP_VERSION(1, 0, 0); 2777 adev->ip_versions[ISP_HWIP][0] = IP_VERSION(2, 0, 0); 2778 } 2779 break; 2780 case CHIP_VEGA20: 2781 /* This is not fatal. We only need the discovery 2782 * binary for sysfs. We don't need it for a 2783 * functional system. 2784 */ 2785 amdgpu_discovery_init(adev); 2786 vega20_reg_base_init(adev); 2787 adev->sdma.num_instances = 2; 2788 adev->sdma.sdma_mask = 3; 2789 adev->gmc.num_umc = 8; 2790 adev->gfx.xcc_mask = 1; 2791 adev->ip_versions[MMHUB_HWIP][0] = IP_VERSION(9, 4, 0); 2792 adev->ip_versions[ATHUB_HWIP][0] = IP_VERSION(9, 4, 0); 2793 adev->ip_versions[OSSSYS_HWIP][0] = IP_VERSION(4, 2, 0); 2794 adev->ip_versions[HDP_HWIP][0] = IP_VERSION(4, 2, 0); 2795 adev->ip_versions[SDMA0_HWIP][0] = IP_VERSION(4, 2, 0); 2796 adev->ip_versions[SDMA1_HWIP][0] = IP_VERSION(4, 2, 0); 2797 adev->ip_versions[DF_HWIP][0] = IP_VERSION(3, 6, 0); 2798 adev->ip_versions[NBIO_HWIP][0] = IP_VERSION(7, 4, 0); 2799 adev->ip_versions[UMC_HWIP][0] = IP_VERSION(6, 1, 1); 2800 adev->ip_versions[MP0_HWIP][0] = IP_VERSION(11, 0, 2); 2801 adev->ip_versions[MP1_HWIP][0] = IP_VERSION(11, 0, 2); 2802 adev->ip_versions[THM_HWIP][0] = IP_VERSION(11, 0, 2); 2803 adev->ip_versions[SMUIO_HWIP][0] = IP_VERSION(11, 0, 2); 2804 adev->ip_versions[GC_HWIP][0] = IP_VERSION(9, 4, 0); 2805 adev->ip_versions[UVD_HWIP][0] = IP_VERSION(7, 2, 0); 2806 adev->ip_versions[UVD_HWIP][1] = IP_VERSION(7, 2, 0); 2807 adev->ip_versions[VCE_HWIP][0] = IP_VERSION(4, 1, 0); 2808 adev->ip_versions[DCI_HWIP][0] = IP_VERSION(12, 1, 0); 2809 break; 2810 case CHIP_ARCTURUS: 2811 /* This is not fatal. We only need the discovery 2812 * binary for sysfs. We don't need it for a 2813 * functional system. 2814 */ 2815 amdgpu_discovery_init(adev); 2816 arct_reg_base_init(adev); 2817 adev->sdma.num_instances = 8; 2818 adev->sdma.sdma_mask = 0xff; 2819 adev->vcn.num_vcn_inst = 2; 2820 adev->gmc.num_umc = 8; 2821 adev->gfx.xcc_mask = 1; 2822 adev->ip_versions[MMHUB_HWIP][0] = IP_VERSION(9, 4, 1); 2823 adev->ip_versions[ATHUB_HWIP][0] = IP_VERSION(9, 4, 1); 2824 adev->ip_versions[OSSSYS_HWIP][0] = IP_VERSION(4, 2, 1); 2825 adev->ip_versions[HDP_HWIP][0] = IP_VERSION(4, 2, 1); 2826 adev->ip_versions[SDMA0_HWIP][0] = IP_VERSION(4, 2, 2); 2827 adev->ip_versions[SDMA1_HWIP][0] = IP_VERSION(4, 2, 2); 2828 adev->ip_versions[SDMA1_HWIP][1] = IP_VERSION(4, 2, 2); 2829 adev->ip_versions[SDMA1_HWIP][2] = IP_VERSION(4, 2, 2); 2830 adev->ip_versions[SDMA1_HWIP][3] = IP_VERSION(4, 2, 2); 2831 adev->ip_versions[SDMA1_HWIP][4] = IP_VERSION(4, 2, 2); 2832 adev->ip_versions[SDMA1_HWIP][5] = IP_VERSION(4, 2, 2); 2833 adev->ip_versions[SDMA1_HWIP][6] = IP_VERSION(4, 2, 2); 2834 adev->ip_versions[DF_HWIP][0] = IP_VERSION(3, 6, 1); 2835 adev->ip_versions[NBIO_HWIP][0] = IP_VERSION(7, 4, 1); 2836 adev->ip_versions[UMC_HWIP][0] = IP_VERSION(6, 1, 2); 2837 adev->ip_versions[MP0_HWIP][0] = IP_VERSION(11, 0, 4); 2838 adev->ip_versions[MP1_HWIP][0] = IP_VERSION(11, 0, 2); 2839 adev->ip_versions[THM_HWIP][0] = IP_VERSION(11, 0, 3); 2840 adev->ip_versions[SMUIO_HWIP][0] = IP_VERSION(11, 0, 3); 2841 adev->ip_versions[GC_HWIP][0] = IP_VERSION(9, 4, 1); 2842 adev->ip_versions[UVD_HWIP][0] = IP_VERSION(2, 5, 0); 2843 adev->ip_versions[UVD_HWIP][1] = IP_VERSION(2, 5, 0); 2844 break; 2845 case CHIP_ALDEBARAN: 2846 /* This is not fatal. We only need the discovery 2847 * binary for sysfs. We don't need it for a 2848 * functional system. 2849 */ 2850 amdgpu_discovery_init(adev); 2851 aldebaran_reg_base_init(adev); 2852 adev->sdma.num_instances = 5; 2853 adev->sdma.sdma_mask = 0x1f; 2854 adev->vcn.num_vcn_inst = 2; 2855 adev->gmc.num_umc = 4; 2856 adev->gfx.xcc_mask = 1; 2857 adev->ip_versions[MMHUB_HWIP][0] = IP_VERSION(9, 4, 2); 2858 adev->ip_versions[ATHUB_HWIP][0] = IP_VERSION(9, 4, 2); 2859 adev->ip_versions[OSSSYS_HWIP][0] = IP_VERSION(4, 4, 0); 2860 adev->ip_versions[HDP_HWIP][0] = IP_VERSION(4, 4, 0); 2861 adev->ip_versions[SDMA0_HWIP][0] = IP_VERSION(4, 4, 0); 2862 adev->ip_versions[SDMA0_HWIP][1] = IP_VERSION(4, 4, 0); 2863 adev->ip_versions[SDMA0_HWIP][2] = IP_VERSION(4, 4, 0); 2864 adev->ip_versions[SDMA0_HWIP][3] = IP_VERSION(4, 4, 0); 2865 adev->ip_versions[SDMA0_HWIP][4] = IP_VERSION(4, 4, 0); 2866 adev->ip_versions[DF_HWIP][0] = IP_VERSION(3, 6, 2); 2867 adev->ip_versions[NBIO_HWIP][0] = IP_VERSION(7, 4, 4); 2868 adev->ip_versions[UMC_HWIP][0] = IP_VERSION(6, 7, 0); 2869 adev->ip_versions[MP0_HWIP][0] = IP_VERSION(13, 0, 2); 2870 adev->ip_versions[MP1_HWIP][0] = IP_VERSION(13, 0, 2); 2871 adev->ip_versions[THM_HWIP][0] = IP_VERSION(13, 0, 2); 2872 adev->ip_versions[SMUIO_HWIP][0] = IP_VERSION(13, 0, 2); 2873 adev->ip_versions[GC_HWIP][0] = IP_VERSION(9, 4, 2); 2874 adev->ip_versions[UVD_HWIP][0] = IP_VERSION(2, 6, 0); 2875 adev->ip_versions[UVD_HWIP][1] = IP_VERSION(2, 6, 0); 2876 adev->ip_versions[XGMI_HWIP][0] = IP_VERSION(6, 1, 0); 2877 break; 2878 case CHIP_CYAN_SKILLFISH: 2879 if (adev->apu_flags & AMD_APU_IS_CYAN_SKILLFISH2) { 2880 r = amdgpu_discovery_reg_base_init(adev); 2881 if (r) 2882 return -EINVAL; 2883 2884 amdgpu_discovery_harvest_ip(adev); 2885 amdgpu_discovery_get_gfx_info(adev); 2886 amdgpu_discovery_get_mall_info(adev); 2887 amdgpu_discovery_get_vcn_info(adev); 2888 } else { 2889 cyan_skillfish_reg_base_init(adev); 2890 adev->sdma.num_instances = 2; 2891 adev->sdma.sdma_mask = 3; 2892 adev->gfx.xcc_mask = 1; 2893 adev->ip_versions[MMHUB_HWIP][0] = IP_VERSION(2, 0, 3); 2894 adev->ip_versions[ATHUB_HWIP][0] = IP_VERSION(2, 0, 3); 2895 adev->ip_versions[OSSSYS_HWIP][0] = IP_VERSION(5, 0, 1); 2896 adev->ip_versions[HDP_HWIP][0] = IP_VERSION(5, 0, 1); 2897 adev->ip_versions[SDMA0_HWIP][0] = IP_VERSION(5, 0, 1); 2898 adev->ip_versions[SDMA1_HWIP][1] = IP_VERSION(5, 0, 1); 2899 adev->ip_versions[DF_HWIP][0] = IP_VERSION(3, 5, 0); 2900 adev->ip_versions[NBIO_HWIP][0] = IP_VERSION(2, 1, 1); 2901 adev->ip_versions[UMC_HWIP][0] = IP_VERSION(8, 1, 1); 2902 adev->ip_versions[MP0_HWIP][0] = IP_VERSION(11, 0, 8); 2903 adev->ip_versions[MP1_HWIP][0] = IP_VERSION(11, 0, 8); 2904 adev->ip_versions[THM_HWIP][0] = IP_VERSION(11, 0, 1); 2905 adev->ip_versions[SMUIO_HWIP][0] = IP_VERSION(11, 0, 8); 2906 adev->ip_versions[GC_HWIP][0] = IP_VERSION(10, 1, 3); 2907 adev->ip_versions[UVD_HWIP][0] = IP_VERSION(2, 0, 3); 2908 } 2909 break; 2910 default: 2911 r = amdgpu_discovery_reg_base_init(adev); 2912 if (r) { 2913 drm_err(&adev->ddev, "discovery failed: %d\n", r); 2914 return r; 2915 } 2916 2917 amdgpu_discovery_harvest_ip(adev); 2918 amdgpu_discovery_get_gfx_info(adev); 2919 amdgpu_discovery_get_mall_info(adev); 2920 amdgpu_discovery_get_vcn_info(adev); 2921 break; 2922 } 2923 2924 amdgpu_discovery_init_soc_config(adev); 2925 amdgpu_discovery_sysfs_init(adev); 2926 2927 switch (amdgpu_ip_version(adev, GC_HWIP, 0)) { 2928 case IP_VERSION(9, 0, 1): 2929 case IP_VERSION(9, 2, 1): 2930 case IP_VERSION(9, 4, 0): 2931 case IP_VERSION(9, 4, 1): 2932 case IP_VERSION(9, 4, 2): 2933 case IP_VERSION(9, 4, 3): 2934 case IP_VERSION(9, 4, 4): 2935 case IP_VERSION(9, 5, 0): 2936 adev->family = AMDGPU_FAMILY_AI; 2937 break; 2938 case IP_VERSION(9, 1, 0): 2939 case IP_VERSION(9, 2, 2): 2940 case IP_VERSION(9, 3, 0): 2941 adev->family = AMDGPU_FAMILY_RV; 2942 break; 2943 case IP_VERSION(10, 1, 10): 2944 case IP_VERSION(10, 1, 1): 2945 case IP_VERSION(10, 1, 2): 2946 case IP_VERSION(10, 1, 3): 2947 case IP_VERSION(10, 1, 4): 2948 case IP_VERSION(10, 3, 0): 2949 case IP_VERSION(10, 3, 2): 2950 case IP_VERSION(10, 3, 4): 2951 case IP_VERSION(10, 3, 5): 2952 adev->family = AMDGPU_FAMILY_NV; 2953 break; 2954 case IP_VERSION(10, 3, 1): 2955 adev->family = AMDGPU_FAMILY_VGH; 2956 adev->apu_flags |= AMD_APU_IS_VANGOGH; 2957 break; 2958 case IP_VERSION(10, 3, 3): 2959 adev->family = AMDGPU_FAMILY_YC; 2960 break; 2961 case IP_VERSION(10, 3, 6): 2962 adev->family = AMDGPU_FAMILY_GC_10_3_6; 2963 break; 2964 case IP_VERSION(10, 3, 7): 2965 adev->family = AMDGPU_FAMILY_GC_10_3_7; 2966 break; 2967 case IP_VERSION(11, 0, 0): 2968 case IP_VERSION(11, 0, 2): 2969 case IP_VERSION(11, 0, 3): 2970 adev->family = AMDGPU_FAMILY_GC_11_0_0; 2971 break; 2972 case IP_VERSION(11, 0, 1): 2973 case IP_VERSION(11, 0, 4): 2974 adev->family = AMDGPU_FAMILY_GC_11_0_1; 2975 break; 2976 case IP_VERSION(11, 5, 0): 2977 case IP_VERSION(11, 5, 1): 2978 case IP_VERSION(11, 5, 2): 2979 case IP_VERSION(11, 5, 3): 2980 case IP_VERSION(11, 5, 4): 2981 adev->family = AMDGPU_FAMILY_GC_11_5_0; 2982 break; 2983 case IP_VERSION(12, 0, 0): 2984 case IP_VERSION(12, 0, 1): 2985 case IP_VERSION(12, 1, 0): 2986 adev->family = AMDGPU_FAMILY_GC_12_0_0; 2987 break; 2988 default: 2989 return -EINVAL; 2990 } 2991 2992 switch (amdgpu_ip_version(adev, GC_HWIP, 0)) { 2993 case IP_VERSION(9, 1, 0): 2994 case IP_VERSION(9, 2, 2): 2995 case IP_VERSION(9, 3, 0): 2996 case IP_VERSION(10, 1, 3): 2997 case IP_VERSION(10, 1, 4): 2998 case IP_VERSION(10, 3, 1): 2999 case IP_VERSION(10, 3, 3): 3000 case IP_VERSION(10, 3, 6): 3001 case IP_VERSION(10, 3, 7): 3002 case IP_VERSION(11, 0, 1): 3003 case IP_VERSION(11, 0, 4): 3004 case IP_VERSION(11, 5, 0): 3005 case IP_VERSION(11, 5, 1): 3006 case IP_VERSION(11, 5, 2): 3007 case IP_VERSION(11, 5, 3): 3008 case IP_VERSION(11, 5, 4): 3009 adev->flags |= AMD_IS_APU; 3010 break; 3011 default: 3012 break; 3013 } 3014 3015 /* set NBIO version */ 3016 switch (amdgpu_ip_version(adev, NBIO_HWIP, 0)) { 3017 case IP_VERSION(6, 1, 0): 3018 case IP_VERSION(6, 2, 0): 3019 adev->nbio.funcs = &nbio_v6_1_funcs; 3020 adev->nbio.hdp_flush_reg = &nbio_v6_1_hdp_flush_reg; 3021 break; 3022 case IP_VERSION(7, 0, 0): 3023 case IP_VERSION(7, 0, 1): 3024 case IP_VERSION(2, 5, 0): 3025 adev->nbio.funcs = &nbio_v7_0_funcs; 3026 adev->nbio.hdp_flush_reg = &nbio_v7_0_hdp_flush_reg; 3027 break; 3028 case IP_VERSION(7, 4, 0): 3029 case IP_VERSION(7, 4, 1): 3030 case IP_VERSION(7, 4, 4): 3031 adev->nbio.funcs = &nbio_v7_4_funcs; 3032 adev->nbio.hdp_flush_reg = &nbio_v7_4_hdp_flush_reg; 3033 break; 3034 case IP_VERSION(7, 9, 0): 3035 case IP_VERSION(7, 9, 1): 3036 adev->nbio.funcs = &nbio_v7_9_funcs; 3037 adev->nbio.hdp_flush_reg = &nbio_v7_9_hdp_flush_reg; 3038 break; 3039 case IP_VERSION(7, 11, 0): 3040 case IP_VERSION(7, 11, 1): 3041 case IP_VERSION(7, 11, 2): 3042 case IP_VERSION(7, 11, 3): 3043 adev->nbio.funcs = &nbio_v7_11_funcs; 3044 adev->nbio.hdp_flush_reg = &nbio_v7_11_hdp_flush_reg; 3045 break; 3046 case IP_VERSION(7, 2, 0): 3047 case IP_VERSION(7, 2, 1): 3048 case IP_VERSION(7, 3, 0): 3049 case IP_VERSION(7, 5, 0): 3050 case IP_VERSION(7, 5, 1): 3051 adev->nbio.funcs = &nbio_v7_2_funcs; 3052 adev->nbio.hdp_flush_reg = &nbio_v7_2_hdp_flush_reg; 3053 break; 3054 case IP_VERSION(2, 1, 1): 3055 case IP_VERSION(2, 3, 0): 3056 case IP_VERSION(2, 3, 1): 3057 case IP_VERSION(2, 3, 2): 3058 case IP_VERSION(3, 3, 0): 3059 case IP_VERSION(3, 3, 1): 3060 case IP_VERSION(3, 3, 2): 3061 case IP_VERSION(3, 3, 3): 3062 adev->nbio.funcs = &nbio_v2_3_funcs; 3063 adev->nbio.hdp_flush_reg = &nbio_v2_3_hdp_flush_reg; 3064 break; 3065 case IP_VERSION(4, 3, 0): 3066 case IP_VERSION(4, 3, 1): 3067 if (amdgpu_sriov_vf(adev)) 3068 adev->nbio.funcs = &nbio_v4_3_sriov_funcs; 3069 else 3070 adev->nbio.funcs = &nbio_v4_3_funcs; 3071 adev->nbio.hdp_flush_reg = &nbio_v4_3_hdp_flush_reg; 3072 break; 3073 case IP_VERSION(7, 7, 0): 3074 case IP_VERSION(7, 7, 1): 3075 adev->nbio.funcs = &nbio_v7_7_funcs; 3076 adev->nbio.hdp_flush_reg = &nbio_v7_7_hdp_flush_reg; 3077 break; 3078 case IP_VERSION(6, 3, 1): 3079 case IP_VERSION(7, 11, 4): 3080 adev->nbio.funcs = &nbif_v6_3_1_funcs; 3081 adev->nbio.hdp_flush_reg = &nbif_v6_3_1_hdp_flush_reg; 3082 break; 3083 default: 3084 break; 3085 } 3086 3087 switch (amdgpu_ip_version(adev, HDP_HWIP, 0)) { 3088 case IP_VERSION(4, 0, 0): 3089 case IP_VERSION(4, 0, 1): 3090 case IP_VERSION(4, 1, 0): 3091 case IP_VERSION(4, 1, 1): 3092 case IP_VERSION(4, 1, 2): 3093 case IP_VERSION(4, 2, 0): 3094 case IP_VERSION(4, 2, 1): 3095 case IP_VERSION(4, 4, 0): 3096 case IP_VERSION(4, 4, 2): 3097 case IP_VERSION(4, 4, 5): 3098 adev->hdp.funcs = &hdp_v4_0_funcs; 3099 break; 3100 case IP_VERSION(5, 0, 0): 3101 case IP_VERSION(5, 0, 1): 3102 case IP_VERSION(5, 0, 2): 3103 case IP_VERSION(5, 0, 3): 3104 case IP_VERSION(5, 0, 4): 3105 case IP_VERSION(5, 2, 0): 3106 adev->hdp.funcs = &hdp_v5_0_funcs; 3107 break; 3108 case IP_VERSION(5, 2, 1): 3109 adev->hdp.funcs = &hdp_v5_2_funcs; 3110 break; 3111 case IP_VERSION(6, 0, 0): 3112 case IP_VERSION(6, 0, 1): 3113 case IP_VERSION(6, 1, 0): 3114 case IP_VERSION(6, 1, 1): 3115 adev->hdp.funcs = &hdp_v6_0_funcs; 3116 break; 3117 case IP_VERSION(7, 0, 0): 3118 adev->hdp.funcs = &hdp_v7_0_funcs; 3119 break; 3120 default: 3121 break; 3122 } 3123 3124 switch (amdgpu_ip_version(adev, DF_HWIP, 0)) { 3125 case IP_VERSION(3, 6, 0): 3126 case IP_VERSION(3, 6, 1): 3127 case IP_VERSION(3, 6, 2): 3128 adev->df.funcs = &df_v3_6_funcs; 3129 break; 3130 case IP_VERSION(2, 1, 0): 3131 case IP_VERSION(2, 1, 1): 3132 case IP_VERSION(2, 5, 0): 3133 case IP_VERSION(3, 5, 1): 3134 case IP_VERSION(3, 5, 2): 3135 adev->df.funcs = &df_v1_7_funcs; 3136 break; 3137 case IP_VERSION(4, 3, 0): 3138 adev->df.funcs = &df_v4_3_funcs; 3139 break; 3140 case IP_VERSION(4, 6, 2): 3141 adev->df.funcs = &df_v4_6_2_funcs; 3142 break; 3143 case IP_VERSION(4, 15, 0): 3144 case IP_VERSION(4, 15, 1): 3145 adev->df.funcs = &df_v4_15_funcs; 3146 break; 3147 default: 3148 break; 3149 } 3150 3151 switch (amdgpu_ip_version(adev, SMUIO_HWIP, 0)) { 3152 case IP_VERSION(9, 0, 0): 3153 case IP_VERSION(9, 0, 1): 3154 case IP_VERSION(10, 0, 0): 3155 case IP_VERSION(10, 0, 1): 3156 case IP_VERSION(10, 0, 2): 3157 adev->smuio.funcs = &smuio_v9_0_funcs; 3158 break; 3159 case IP_VERSION(11, 0, 0): 3160 case IP_VERSION(11, 0, 2): 3161 case IP_VERSION(11, 0, 3): 3162 case IP_VERSION(11, 0, 4): 3163 case IP_VERSION(11, 0, 7): 3164 case IP_VERSION(11, 0, 8): 3165 adev->smuio.funcs = &smuio_v11_0_funcs; 3166 break; 3167 case IP_VERSION(11, 0, 6): 3168 case IP_VERSION(11, 0, 10): 3169 case IP_VERSION(11, 0, 11): 3170 case IP_VERSION(11, 5, 0): 3171 case IP_VERSION(11, 5, 2): 3172 case IP_VERSION(13, 0, 1): 3173 case IP_VERSION(13, 0, 9): 3174 case IP_VERSION(13, 0, 10): 3175 adev->smuio.funcs = &smuio_v11_0_6_funcs; 3176 break; 3177 case IP_VERSION(13, 0, 2): 3178 adev->smuio.funcs = &smuio_v13_0_funcs; 3179 break; 3180 case IP_VERSION(13, 0, 3): 3181 case IP_VERSION(13, 0, 11): 3182 adev->smuio.funcs = &smuio_v13_0_3_funcs; 3183 if (adev->smuio.funcs->get_pkg_type(adev) == AMDGPU_PKG_TYPE_APU) { 3184 adev->flags |= AMD_IS_APU; 3185 } 3186 break; 3187 case IP_VERSION(13, 0, 6): 3188 case IP_VERSION(13, 0, 8): 3189 case IP_VERSION(14, 0, 0): 3190 case IP_VERSION(14, 0, 1): 3191 adev->smuio.funcs = &smuio_v13_0_6_funcs; 3192 break; 3193 case IP_VERSION(14, 0, 2): 3194 adev->smuio.funcs = &smuio_v14_0_2_funcs; 3195 break; 3196 case IP_VERSION(15, 0, 0): 3197 adev->smuio.funcs = &smuio_v15_0_0_funcs; 3198 break; 3199 case IP_VERSION(15, 0, 8): 3200 adev->smuio.funcs = &smuio_v15_0_8_funcs; 3201 break; 3202 default: 3203 break; 3204 } 3205 3206 switch (amdgpu_ip_version(adev, LSDMA_HWIP, 0)) { 3207 case IP_VERSION(6, 0, 0): 3208 case IP_VERSION(6, 0, 1): 3209 case IP_VERSION(6, 0, 2): 3210 case IP_VERSION(6, 0, 3): 3211 adev->lsdma.funcs = &lsdma_v6_0_funcs; 3212 break; 3213 case IP_VERSION(7, 0, 0): 3214 case IP_VERSION(7, 0, 1): 3215 adev->lsdma.funcs = &lsdma_v7_0_funcs; 3216 break; 3217 default: 3218 break; 3219 } 3220 3221 r = amdgpu_discovery_set_common_ip_blocks(adev); 3222 if (r) 3223 return r; 3224 3225 r = amdgpu_discovery_set_gmc_ip_blocks(adev); 3226 if (r) 3227 return r; 3228 3229 /* For SR-IOV, PSP needs to be initialized before IH */ 3230 if (amdgpu_sriov_vf(adev)) { 3231 r = amdgpu_discovery_set_psp_ip_blocks(adev); 3232 if (r) 3233 return r; 3234 r = amdgpu_discovery_set_ih_ip_blocks(adev); 3235 if (r) 3236 return r; 3237 } else { 3238 r = amdgpu_discovery_set_ih_ip_blocks(adev); 3239 if (r) 3240 return r; 3241 3242 if (likely(adev->firmware.load_type == AMDGPU_FW_LOAD_PSP)) { 3243 r = amdgpu_discovery_set_psp_ip_blocks(adev); 3244 if (r) 3245 return r; 3246 } 3247 } 3248 3249 if (likely(adev->firmware.load_type == AMDGPU_FW_LOAD_PSP)) { 3250 r = amdgpu_discovery_set_smu_ip_blocks(adev); 3251 if (r) 3252 return r; 3253 } 3254 3255 r = amdgpu_discovery_set_display_ip_blocks(adev); 3256 if (r) 3257 return r; 3258 3259 r = amdgpu_discovery_set_gc_ip_blocks(adev); 3260 if (r) 3261 return r; 3262 3263 r = amdgpu_discovery_set_sdma_ip_blocks(adev); 3264 if (r) 3265 return r; 3266 3267 r = amdgpu_discovery_set_ras_ip_blocks(adev); 3268 if (r) 3269 return r; 3270 3271 if ((adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT && 3272 !amdgpu_sriov_vf(adev) && 3273 amdgpu_dpm == 1) || 3274 (adev->firmware.load_type == AMDGPU_FW_LOAD_RLC_BACKDOOR_AUTO && 3275 amdgpu_dpm == 1)) { 3276 r = amdgpu_discovery_set_smu_ip_blocks(adev); 3277 if (r) 3278 return r; 3279 } 3280 3281 r = amdgpu_discovery_set_mm_ip_blocks(adev); 3282 if (r) 3283 return r; 3284 3285 r = amdgpu_discovery_set_mes_ip_blocks(adev); 3286 if (r) 3287 return r; 3288 3289 r = amdgpu_discovery_set_vpe_ip_blocks(adev); 3290 if (r) 3291 return r; 3292 3293 r = amdgpu_discovery_set_umsch_mm_ip_blocks(adev); 3294 if (r) 3295 return r; 3296 3297 r = amdgpu_discovery_set_isp_ip_blocks(adev); 3298 if (r) 3299 return r; 3300 return 0; 3301 } 3302 3303