1 /* 2 * Copyright 2018 Advanced Micro Devices, Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 */ 23 24 #include <linux/firmware.h> 25 26 #include "amdgpu.h" 27 #include "amdgpu_discovery.h" 28 #include "soc15_hw_ip.h" 29 #include "discovery.h" 30 #include "amdgpu_ras.h" 31 32 #include "soc15.h" 33 #include "gfx_v9_0.h" 34 #include "gfx_v9_4_3.h" 35 #include "gmc_v9_0.h" 36 #include "df_v1_7.h" 37 #include "df_v3_6.h" 38 #include "df_v4_3.h" 39 #include "df_v4_6_2.h" 40 #include "nbio_v6_1.h" 41 #include "nbio_v7_0.h" 42 #include "nbio_v7_4.h" 43 #include "nbio_v7_9.h" 44 #include "nbio_v7_11.h" 45 #include "hdp_v4_0.h" 46 #include "vega10_ih.h" 47 #include "vega20_ih.h" 48 #include "sdma_v4_0.h" 49 #include "sdma_v4_4_2.h" 50 #include "uvd_v7_0.h" 51 #include "vce_v4_0.h" 52 #include "vcn_v1_0.h" 53 #include "vcn_v2_5.h" 54 #include "jpeg_v2_5.h" 55 #include "smuio_v9_0.h" 56 #include "gmc_v10_0.h" 57 #include "gmc_v11_0.h" 58 #include "gfxhub_v2_0.h" 59 #include "mmhub_v2_0.h" 60 #include "nbio_v2_3.h" 61 #include "nbio_v4_3.h" 62 #include "nbio_v7_2.h" 63 #include "nbio_v7_7.h" 64 #include "nbif_v6_3_1.h" 65 #include "hdp_v5_0.h" 66 #include "hdp_v5_2.h" 67 #include "hdp_v6_0.h" 68 #include "hdp_v7_0.h" 69 #include "nv.h" 70 #include "soc21.h" 71 #include "navi10_ih.h" 72 #include "ih_v6_0.h" 73 #include "ih_v6_1.h" 74 #include "ih_v7_0.h" 75 #include "gfx_v10_0.h" 76 #include "gfx_v11_0.h" 77 #include "sdma_v5_0.h" 78 #include "sdma_v5_2.h" 79 #include "sdma_v6_0.h" 80 #include "lsdma_v6_0.h" 81 #include "lsdma_v7_0.h" 82 #include "vcn_v2_0.h" 83 #include "jpeg_v2_0.h" 84 #include "vcn_v3_0.h" 85 #include "jpeg_v3_0.h" 86 #include "vcn_v4_0.h" 87 #include "jpeg_v4_0.h" 88 #include "vcn_v4_0_3.h" 89 #include "jpeg_v4_0_3.h" 90 #include "vcn_v4_0_5.h" 91 #include "jpeg_v4_0_5.h" 92 #include "amdgpu_vkms.h" 93 #include "mes_v10_1.h" 94 #include "mes_v11_0.h" 95 #include "smuio_v11_0.h" 96 #include "smuio_v11_0_6.h" 97 #include "smuio_v13_0.h" 98 #include "smuio_v13_0_3.h" 99 #include "smuio_v13_0_6.h" 100 #include "smuio_v14_0_2.h" 101 #include "vcn_v5_0_0.h" 102 #include "jpeg_v5_0_0.h" 103 104 #include "amdgpu_vpe.h" 105 106 #define FIRMWARE_IP_DISCOVERY "amdgpu/ip_discovery.bin" 107 MODULE_FIRMWARE(FIRMWARE_IP_DISCOVERY); 108 109 #define mmIP_DISCOVERY_VERSION 0x16A00 110 #define mmRCC_CONFIG_MEMSIZE 0xde3 111 #define mmMP0_SMN_C2PMSG_33 0x16061 112 #define mmMM_INDEX 0x0 113 #define mmMM_INDEX_HI 0x6 114 #define mmMM_DATA 0x1 115 116 static const char *hw_id_names[HW_ID_MAX] = { 117 [MP1_HWID] = "MP1", 118 [MP2_HWID] = "MP2", 119 [THM_HWID] = "THM", 120 [SMUIO_HWID] = "SMUIO", 121 [FUSE_HWID] = "FUSE", 122 [CLKA_HWID] = "CLKA", 123 [PWR_HWID] = "PWR", 124 [GC_HWID] = "GC", 125 [UVD_HWID] = "UVD", 126 [AUDIO_AZ_HWID] = "AUDIO_AZ", 127 [ACP_HWID] = "ACP", 128 [DCI_HWID] = "DCI", 129 [DMU_HWID] = "DMU", 130 [DCO_HWID] = "DCO", 131 [DIO_HWID] = "DIO", 132 [XDMA_HWID] = "XDMA", 133 [DCEAZ_HWID] = "DCEAZ", 134 [DAZ_HWID] = "DAZ", 135 [SDPMUX_HWID] = "SDPMUX", 136 [NTB_HWID] = "NTB", 137 [IOHC_HWID] = "IOHC", 138 [L2IMU_HWID] = "L2IMU", 139 [VCE_HWID] = "VCE", 140 [MMHUB_HWID] = "MMHUB", 141 [ATHUB_HWID] = "ATHUB", 142 [DBGU_NBIO_HWID] = "DBGU_NBIO", 143 [DFX_HWID] = "DFX", 144 [DBGU0_HWID] = "DBGU0", 145 [DBGU1_HWID] = "DBGU1", 146 [OSSSYS_HWID] = "OSSSYS", 147 [HDP_HWID] = "HDP", 148 [SDMA0_HWID] = "SDMA0", 149 [SDMA1_HWID] = "SDMA1", 150 [SDMA2_HWID] = "SDMA2", 151 [SDMA3_HWID] = "SDMA3", 152 [LSDMA_HWID] = "LSDMA", 153 [ISP_HWID] = "ISP", 154 [DBGU_IO_HWID] = "DBGU_IO", 155 [DF_HWID] = "DF", 156 [CLKB_HWID] = "CLKB", 157 [FCH_HWID] = "FCH", 158 [DFX_DAP_HWID] = "DFX_DAP", 159 [L1IMU_PCIE_HWID] = "L1IMU_PCIE", 160 [L1IMU_NBIF_HWID] = "L1IMU_NBIF", 161 [L1IMU_IOAGR_HWID] = "L1IMU_IOAGR", 162 [L1IMU3_HWID] = "L1IMU3", 163 [L1IMU4_HWID] = "L1IMU4", 164 [L1IMU5_HWID] = "L1IMU5", 165 [L1IMU6_HWID] = "L1IMU6", 166 [L1IMU7_HWID] = "L1IMU7", 167 [L1IMU8_HWID] = "L1IMU8", 168 [L1IMU9_HWID] = "L1IMU9", 169 [L1IMU10_HWID] = "L1IMU10", 170 [L1IMU11_HWID] = "L1IMU11", 171 [L1IMU12_HWID] = "L1IMU12", 172 [L1IMU13_HWID] = "L1IMU13", 173 [L1IMU14_HWID] = "L1IMU14", 174 [L1IMU15_HWID] = "L1IMU15", 175 [WAFLC_HWID] = "WAFLC", 176 [FCH_USB_PD_HWID] = "FCH_USB_PD", 177 [PCIE_HWID] = "PCIE", 178 [PCS_HWID] = "PCS", 179 [DDCL_HWID] = "DDCL", 180 [SST_HWID] = "SST", 181 [IOAGR_HWID] = "IOAGR", 182 [NBIF_HWID] = "NBIF", 183 [IOAPIC_HWID] = "IOAPIC", 184 [SYSTEMHUB_HWID] = "SYSTEMHUB", 185 [NTBCCP_HWID] = "NTBCCP", 186 [UMC_HWID] = "UMC", 187 [SATA_HWID] = "SATA", 188 [USB_HWID] = "USB", 189 [CCXSEC_HWID] = "CCXSEC", 190 [XGMI_HWID] = "XGMI", 191 [XGBE_HWID] = "XGBE", 192 [MP0_HWID] = "MP0", 193 [VPE_HWID] = "VPE", 194 }; 195 196 static int hw_id_map[MAX_HWIP] = { 197 [GC_HWIP] = GC_HWID, 198 [HDP_HWIP] = HDP_HWID, 199 [SDMA0_HWIP] = SDMA0_HWID, 200 [SDMA1_HWIP] = SDMA1_HWID, 201 [SDMA2_HWIP] = SDMA2_HWID, 202 [SDMA3_HWIP] = SDMA3_HWID, 203 [LSDMA_HWIP] = LSDMA_HWID, 204 [MMHUB_HWIP] = MMHUB_HWID, 205 [ATHUB_HWIP] = ATHUB_HWID, 206 [NBIO_HWIP] = NBIF_HWID, 207 [MP0_HWIP] = MP0_HWID, 208 [MP1_HWIP] = MP1_HWID, 209 [UVD_HWIP] = UVD_HWID, 210 [VCE_HWIP] = VCE_HWID, 211 [DF_HWIP] = DF_HWID, 212 [DCE_HWIP] = DMU_HWID, 213 [OSSSYS_HWIP] = OSSSYS_HWID, 214 [SMUIO_HWIP] = SMUIO_HWID, 215 [PWR_HWIP] = PWR_HWID, 216 [NBIF_HWIP] = NBIF_HWID, 217 [THM_HWIP] = THM_HWID, 218 [CLK_HWIP] = CLKA_HWID, 219 [UMC_HWIP] = UMC_HWID, 220 [XGMI_HWIP] = XGMI_HWID, 221 [DCI_HWIP] = DCI_HWID, 222 [PCIE_HWIP] = PCIE_HWID, 223 [VPE_HWIP] = VPE_HWID, 224 }; 225 226 static int amdgpu_discovery_read_binary_from_sysmem(struct amdgpu_device *adev, uint8_t *binary) 227 { 228 u64 tmr_offset, tmr_size, pos; 229 void *discv_regn; 230 int ret; 231 232 ret = amdgpu_acpi_get_tmr_info(adev, &tmr_offset, &tmr_size); 233 if (ret) 234 return ret; 235 236 pos = tmr_offset + tmr_size - DISCOVERY_TMR_OFFSET; 237 238 /* This region is read-only and reserved from system use */ 239 discv_regn = memremap(pos, adev->mman.discovery_tmr_size, MEMREMAP_WC); 240 if (discv_regn) { 241 memcpy(binary, discv_regn, adev->mman.discovery_tmr_size); 242 memunmap(discv_regn); 243 return 0; 244 } 245 246 return -ENOENT; 247 } 248 249 #define IP_DISCOVERY_V2 2 250 #define IP_DISCOVERY_V4 4 251 252 static int amdgpu_discovery_read_binary_from_mem(struct amdgpu_device *adev, 253 uint8_t *binary) 254 { 255 uint64_t vram_size; 256 u32 msg; 257 int i, ret = 0; 258 259 /* It can take up to a second for IFWI init to complete on some dGPUs, 260 * but generally it should be in the 60-100ms range. Normally this starts 261 * as soon as the device gets power so by the time the OS loads this has long 262 * completed. However, when a card is hotplugged via e.g., USB4, we need to 263 * wait for this to complete. Once the C2PMSG is updated, we can 264 * continue. 265 */ 266 267 for (i = 0; i < 1000; i++) { 268 msg = RREG32(mmMP0_SMN_C2PMSG_33); 269 if (msg & 0x80000000) 270 break; 271 usleep_range(1000, 1100); 272 } 273 274 vram_size = (uint64_t)RREG32(mmRCC_CONFIG_MEMSIZE) << 20; 275 276 if (vram_size) { 277 uint64_t pos = vram_size - DISCOVERY_TMR_OFFSET; 278 amdgpu_device_vram_access(adev, pos, (uint32_t *)binary, 279 adev->mman.discovery_tmr_size, false); 280 } else { 281 ret = amdgpu_discovery_read_binary_from_sysmem(adev, binary); 282 } 283 284 return ret; 285 } 286 287 static int amdgpu_discovery_read_binary_from_file(struct amdgpu_device *adev, uint8_t *binary) 288 { 289 const struct firmware *fw; 290 const char *fw_name; 291 int r; 292 293 switch (amdgpu_discovery) { 294 case 2: 295 fw_name = FIRMWARE_IP_DISCOVERY; 296 break; 297 default: 298 dev_warn(adev->dev, "amdgpu_discovery is not set properly\n"); 299 return -EINVAL; 300 } 301 302 r = request_firmware(&fw, fw_name, adev->dev); 303 if (r) { 304 dev_err(adev->dev, "can't load firmware \"%s\"\n", 305 fw_name); 306 return r; 307 } 308 309 memcpy((u8 *)binary, (u8 *)fw->data, fw->size); 310 release_firmware(fw); 311 312 return 0; 313 } 314 315 static uint16_t amdgpu_discovery_calculate_checksum(uint8_t *data, uint32_t size) 316 { 317 uint16_t checksum = 0; 318 int i; 319 320 for (i = 0; i < size; i++) 321 checksum += data[i]; 322 323 return checksum; 324 } 325 326 static inline bool amdgpu_discovery_verify_checksum(uint8_t *data, uint32_t size, 327 uint16_t expected) 328 { 329 return !!(amdgpu_discovery_calculate_checksum(data, size) == expected); 330 } 331 332 static inline bool amdgpu_discovery_verify_binary_signature(uint8_t *binary) 333 { 334 struct binary_header *bhdr; 335 bhdr = (struct binary_header *)binary; 336 337 return (le32_to_cpu(bhdr->binary_signature) == BINARY_SIGNATURE); 338 } 339 340 static void amdgpu_discovery_harvest_config_quirk(struct amdgpu_device *adev) 341 { 342 /* 343 * So far, apply this quirk only on those Navy Flounder boards which 344 * have a bad harvest table of VCN config. 345 */ 346 if ((amdgpu_ip_version(adev, UVD_HWIP, 1) == IP_VERSION(3, 0, 1)) && 347 (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(10, 3, 2))) { 348 switch (adev->pdev->revision) { 349 case 0xC1: 350 case 0xC2: 351 case 0xC3: 352 case 0xC5: 353 case 0xC7: 354 case 0xCF: 355 case 0xDF: 356 adev->vcn.harvest_config |= AMDGPU_VCN_HARVEST_VCN1; 357 adev->vcn.inst_mask &= ~AMDGPU_VCN_HARVEST_VCN1; 358 break; 359 default: 360 break; 361 } 362 } 363 } 364 365 static int amdgpu_discovery_init(struct amdgpu_device *adev) 366 { 367 struct table_info *info; 368 struct binary_header *bhdr; 369 uint16_t offset; 370 uint16_t size; 371 uint16_t checksum; 372 int r; 373 374 adev->mman.discovery_tmr_size = DISCOVERY_TMR_SIZE; 375 adev->mman.discovery_bin = kzalloc(adev->mman.discovery_tmr_size, GFP_KERNEL); 376 if (!adev->mman.discovery_bin) 377 return -ENOMEM; 378 379 /* Read from file if it is the preferred option */ 380 if (amdgpu_discovery == 2) { 381 dev_info(adev->dev, "use ip discovery information from file"); 382 r = amdgpu_discovery_read_binary_from_file(adev, adev->mman.discovery_bin); 383 384 if (r) { 385 dev_err(adev->dev, "failed to read ip discovery binary from file\n"); 386 r = -EINVAL; 387 goto out; 388 } 389 390 } else { 391 r = amdgpu_discovery_read_binary_from_mem( 392 adev, adev->mman.discovery_bin); 393 if (r) 394 goto out; 395 } 396 397 /* check the ip discovery binary signature */ 398 if (!amdgpu_discovery_verify_binary_signature(adev->mman.discovery_bin)) { 399 dev_err(adev->dev, 400 "get invalid ip discovery binary signature\n"); 401 r = -EINVAL; 402 goto out; 403 } 404 405 bhdr = (struct binary_header *)adev->mman.discovery_bin; 406 407 offset = offsetof(struct binary_header, binary_checksum) + 408 sizeof(bhdr->binary_checksum); 409 size = le16_to_cpu(bhdr->binary_size) - offset; 410 checksum = le16_to_cpu(bhdr->binary_checksum); 411 412 if (!amdgpu_discovery_verify_checksum(adev->mman.discovery_bin + offset, 413 size, checksum)) { 414 dev_err(adev->dev, "invalid ip discovery binary checksum\n"); 415 r = -EINVAL; 416 goto out; 417 } 418 419 info = &bhdr->table_list[IP_DISCOVERY]; 420 offset = le16_to_cpu(info->offset); 421 checksum = le16_to_cpu(info->checksum); 422 423 if (offset) { 424 struct ip_discovery_header *ihdr = 425 (struct ip_discovery_header *)(adev->mman.discovery_bin + offset); 426 if (le32_to_cpu(ihdr->signature) != DISCOVERY_TABLE_SIGNATURE) { 427 dev_err(adev->dev, "invalid ip discovery data table signature\n"); 428 r = -EINVAL; 429 goto out; 430 } 431 432 if (!amdgpu_discovery_verify_checksum(adev->mman.discovery_bin + offset, 433 le16_to_cpu(ihdr->size), checksum)) { 434 dev_err(adev->dev, "invalid ip discovery data table checksum\n"); 435 r = -EINVAL; 436 goto out; 437 } 438 } 439 440 info = &bhdr->table_list[GC]; 441 offset = le16_to_cpu(info->offset); 442 checksum = le16_to_cpu(info->checksum); 443 444 if (offset) { 445 struct gpu_info_header *ghdr = 446 (struct gpu_info_header *)(adev->mman.discovery_bin + offset); 447 448 if (le32_to_cpu(ghdr->table_id) != GC_TABLE_ID) { 449 dev_err(adev->dev, "invalid ip discovery gc table id\n"); 450 r = -EINVAL; 451 goto out; 452 } 453 454 if (!amdgpu_discovery_verify_checksum(adev->mman.discovery_bin + offset, 455 le32_to_cpu(ghdr->size), checksum)) { 456 dev_err(adev->dev, "invalid gc data table checksum\n"); 457 r = -EINVAL; 458 goto out; 459 } 460 } 461 462 info = &bhdr->table_list[HARVEST_INFO]; 463 offset = le16_to_cpu(info->offset); 464 checksum = le16_to_cpu(info->checksum); 465 466 if (offset) { 467 struct harvest_info_header *hhdr = 468 (struct harvest_info_header *)(adev->mman.discovery_bin + offset); 469 470 if (le32_to_cpu(hhdr->signature) != HARVEST_TABLE_SIGNATURE) { 471 dev_err(adev->dev, "invalid ip discovery harvest table signature\n"); 472 r = -EINVAL; 473 goto out; 474 } 475 476 if (!amdgpu_discovery_verify_checksum(adev->mman.discovery_bin + offset, 477 sizeof(struct harvest_table), checksum)) { 478 dev_err(adev->dev, "invalid harvest data table checksum\n"); 479 r = -EINVAL; 480 goto out; 481 } 482 } 483 484 info = &bhdr->table_list[VCN_INFO]; 485 offset = le16_to_cpu(info->offset); 486 checksum = le16_to_cpu(info->checksum); 487 488 if (offset) { 489 struct vcn_info_header *vhdr = 490 (struct vcn_info_header *)(adev->mman.discovery_bin + offset); 491 492 if (le32_to_cpu(vhdr->table_id) != VCN_INFO_TABLE_ID) { 493 dev_err(adev->dev, "invalid ip discovery vcn table id\n"); 494 r = -EINVAL; 495 goto out; 496 } 497 498 if (!amdgpu_discovery_verify_checksum(adev->mman.discovery_bin + offset, 499 le32_to_cpu(vhdr->size_bytes), checksum)) { 500 dev_err(adev->dev, "invalid vcn data table checksum\n"); 501 r = -EINVAL; 502 goto out; 503 } 504 } 505 506 info = &bhdr->table_list[MALL_INFO]; 507 offset = le16_to_cpu(info->offset); 508 checksum = le16_to_cpu(info->checksum); 509 510 if (0 && offset) { 511 struct mall_info_header *mhdr = 512 (struct mall_info_header *)(adev->mman.discovery_bin + offset); 513 514 if (le32_to_cpu(mhdr->table_id) != MALL_INFO_TABLE_ID) { 515 dev_err(adev->dev, "invalid ip discovery mall table id\n"); 516 r = -EINVAL; 517 goto out; 518 } 519 520 if (!amdgpu_discovery_verify_checksum(adev->mman.discovery_bin + offset, 521 le32_to_cpu(mhdr->size_bytes), checksum)) { 522 dev_err(adev->dev, "invalid mall data table checksum\n"); 523 r = -EINVAL; 524 goto out; 525 } 526 } 527 528 return 0; 529 530 out: 531 kfree(adev->mman.discovery_bin); 532 adev->mman.discovery_bin = NULL; 533 if ((amdgpu_discovery != 2) && 534 (RREG32(mmIP_DISCOVERY_VERSION) == 4)) 535 amdgpu_ras_query_boot_status(adev, 4); 536 return r; 537 } 538 539 static void amdgpu_discovery_sysfs_fini(struct amdgpu_device *adev); 540 541 void amdgpu_discovery_fini(struct amdgpu_device *adev) 542 { 543 amdgpu_discovery_sysfs_fini(adev); 544 kfree(adev->mman.discovery_bin); 545 adev->mman.discovery_bin = NULL; 546 } 547 548 static int amdgpu_discovery_validate_ip(const struct ip_v4 *ip) 549 { 550 if (ip->instance_number >= HWIP_MAX_INSTANCE) { 551 DRM_ERROR("Unexpected instance_number (%d) from ip discovery blob\n", 552 ip->instance_number); 553 return -EINVAL; 554 } 555 if (le16_to_cpu(ip->hw_id) >= HW_ID_MAX) { 556 DRM_ERROR("Unexpected hw_id (%d) from ip discovery blob\n", 557 le16_to_cpu(ip->hw_id)); 558 return -EINVAL; 559 } 560 561 return 0; 562 } 563 564 static void amdgpu_discovery_read_harvest_bit_per_ip(struct amdgpu_device *adev, 565 uint32_t *vcn_harvest_count) 566 { 567 struct binary_header *bhdr; 568 struct ip_discovery_header *ihdr; 569 struct die_header *dhdr; 570 struct ip_v4 *ip; 571 uint16_t die_offset, ip_offset, num_dies, num_ips; 572 int i, j; 573 574 bhdr = (struct binary_header *)adev->mman.discovery_bin; 575 ihdr = (struct ip_discovery_header *)(adev->mman.discovery_bin + 576 le16_to_cpu(bhdr->table_list[IP_DISCOVERY].offset)); 577 num_dies = le16_to_cpu(ihdr->num_dies); 578 579 /* scan harvest bit of all IP data structures */ 580 for (i = 0; i < num_dies; i++) { 581 die_offset = le16_to_cpu(ihdr->die_info[i].die_offset); 582 dhdr = (struct die_header *)(adev->mman.discovery_bin + die_offset); 583 num_ips = le16_to_cpu(dhdr->num_ips); 584 ip_offset = die_offset + sizeof(*dhdr); 585 586 for (j = 0; j < num_ips; j++) { 587 ip = (struct ip_v4 *)(adev->mman.discovery_bin + ip_offset); 588 589 if (amdgpu_discovery_validate_ip(ip)) 590 goto next_ip; 591 592 if (le16_to_cpu(ip->variant) == 1) { 593 switch (le16_to_cpu(ip->hw_id)) { 594 case VCN_HWID: 595 (*vcn_harvest_count)++; 596 if (ip->instance_number == 0) { 597 adev->vcn.harvest_config |= AMDGPU_VCN_HARVEST_VCN0; 598 adev->vcn.inst_mask &= 599 ~AMDGPU_VCN_HARVEST_VCN0; 600 adev->jpeg.inst_mask &= 601 ~AMDGPU_VCN_HARVEST_VCN0; 602 } else { 603 adev->vcn.harvest_config |= AMDGPU_VCN_HARVEST_VCN1; 604 adev->vcn.inst_mask &= 605 ~AMDGPU_VCN_HARVEST_VCN1; 606 adev->jpeg.inst_mask &= 607 ~AMDGPU_VCN_HARVEST_VCN1; 608 } 609 break; 610 case DMU_HWID: 611 adev->harvest_ip_mask |= AMD_HARVEST_IP_DMU_MASK; 612 break; 613 default: 614 break; 615 } 616 } 617 next_ip: 618 if (ihdr->base_addr_64_bit) 619 ip_offset += struct_size(ip, base_address_64, ip->num_base_address); 620 else 621 ip_offset += struct_size(ip, base_address, ip->num_base_address); 622 } 623 } 624 } 625 626 static void amdgpu_discovery_read_from_harvest_table(struct amdgpu_device *adev, 627 uint32_t *vcn_harvest_count, 628 uint32_t *umc_harvest_count) 629 { 630 struct binary_header *bhdr; 631 struct harvest_table *harvest_info; 632 u16 offset; 633 int i; 634 uint32_t umc_harvest_config = 0; 635 636 bhdr = (struct binary_header *)adev->mman.discovery_bin; 637 offset = le16_to_cpu(bhdr->table_list[HARVEST_INFO].offset); 638 639 if (!offset) { 640 dev_err(adev->dev, "invalid harvest table offset\n"); 641 return; 642 } 643 644 harvest_info = (struct harvest_table *)(adev->mman.discovery_bin + offset); 645 646 for (i = 0; i < 32; i++) { 647 if (le16_to_cpu(harvest_info->list[i].hw_id) == 0) 648 break; 649 650 switch (le16_to_cpu(harvest_info->list[i].hw_id)) { 651 case VCN_HWID: 652 (*vcn_harvest_count)++; 653 adev->vcn.harvest_config |= 654 (1 << harvest_info->list[i].number_instance); 655 adev->jpeg.harvest_config |= 656 (1 << harvest_info->list[i].number_instance); 657 658 adev->vcn.inst_mask &= 659 ~(1U << harvest_info->list[i].number_instance); 660 adev->jpeg.inst_mask &= 661 ~(1U << harvest_info->list[i].number_instance); 662 break; 663 case DMU_HWID: 664 adev->harvest_ip_mask |= AMD_HARVEST_IP_DMU_MASK; 665 break; 666 case UMC_HWID: 667 umc_harvest_config |= 668 1 << (le16_to_cpu(harvest_info->list[i].number_instance)); 669 (*umc_harvest_count)++; 670 break; 671 case GC_HWID: 672 adev->gfx.xcc_mask &= 673 ~(1U << harvest_info->list[i].number_instance); 674 break; 675 case SDMA0_HWID: 676 adev->sdma.sdma_mask &= 677 ~(1U << harvest_info->list[i].number_instance); 678 break; 679 default: 680 break; 681 } 682 } 683 684 adev->umc.active_mask = ((1 << adev->umc.node_inst_num) - 1) & 685 ~umc_harvest_config; 686 } 687 688 /* ================================================== */ 689 690 struct ip_hw_instance { 691 struct kobject kobj; /* ip_discovery/die/#die/#hw_id/#instance/<attrs...> */ 692 693 int hw_id; 694 u8 num_instance; 695 u8 major, minor, revision; 696 u8 harvest; 697 698 int num_base_addresses; 699 u32 base_addr[] __counted_by(num_base_addresses); 700 }; 701 702 struct ip_hw_id { 703 struct kset hw_id_kset; /* ip_discovery/die/#die/#hw_id/, contains ip_hw_instance */ 704 int hw_id; 705 }; 706 707 struct ip_die_entry { 708 struct kset ip_kset; /* ip_discovery/die/#die/, contains ip_hw_id */ 709 u16 num_ips; 710 }; 711 712 /* -------------------------------------------------- */ 713 714 struct ip_hw_instance_attr { 715 struct attribute attr; 716 ssize_t (*show)(struct ip_hw_instance *ip_hw_instance, char *buf); 717 }; 718 719 static ssize_t hw_id_show(struct ip_hw_instance *ip_hw_instance, char *buf) 720 { 721 return sysfs_emit(buf, "%d\n", ip_hw_instance->hw_id); 722 } 723 724 static ssize_t num_instance_show(struct ip_hw_instance *ip_hw_instance, char *buf) 725 { 726 return sysfs_emit(buf, "%d\n", ip_hw_instance->num_instance); 727 } 728 729 static ssize_t major_show(struct ip_hw_instance *ip_hw_instance, char *buf) 730 { 731 return sysfs_emit(buf, "%d\n", ip_hw_instance->major); 732 } 733 734 static ssize_t minor_show(struct ip_hw_instance *ip_hw_instance, char *buf) 735 { 736 return sysfs_emit(buf, "%d\n", ip_hw_instance->minor); 737 } 738 739 static ssize_t revision_show(struct ip_hw_instance *ip_hw_instance, char *buf) 740 { 741 return sysfs_emit(buf, "%d\n", ip_hw_instance->revision); 742 } 743 744 static ssize_t harvest_show(struct ip_hw_instance *ip_hw_instance, char *buf) 745 { 746 return sysfs_emit(buf, "0x%01X\n", ip_hw_instance->harvest); 747 } 748 749 static ssize_t num_base_addresses_show(struct ip_hw_instance *ip_hw_instance, char *buf) 750 { 751 return sysfs_emit(buf, "%d\n", ip_hw_instance->num_base_addresses); 752 } 753 754 static ssize_t base_addr_show(struct ip_hw_instance *ip_hw_instance, char *buf) 755 { 756 ssize_t res, at; 757 int ii; 758 759 for (res = at = ii = 0; ii < ip_hw_instance->num_base_addresses; ii++) { 760 /* Here we satisfy the condition that, at + size <= PAGE_SIZE. 761 */ 762 if (at + 12 > PAGE_SIZE) 763 break; 764 res = sysfs_emit_at(buf, at, "0x%08X\n", 765 ip_hw_instance->base_addr[ii]); 766 if (res <= 0) 767 break; 768 at += res; 769 } 770 771 return res < 0 ? res : at; 772 } 773 774 static struct ip_hw_instance_attr ip_hw_attr[] = { 775 __ATTR_RO(hw_id), 776 __ATTR_RO(num_instance), 777 __ATTR_RO(major), 778 __ATTR_RO(minor), 779 __ATTR_RO(revision), 780 __ATTR_RO(harvest), 781 __ATTR_RO(num_base_addresses), 782 __ATTR_RO(base_addr), 783 }; 784 785 static struct attribute *ip_hw_instance_attrs[ARRAY_SIZE(ip_hw_attr) + 1]; 786 ATTRIBUTE_GROUPS(ip_hw_instance); 787 788 #define to_ip_hw_instance(x) container_of(x, struct ip_hw_instance, kobj) 789 #define to_ip_hw_instance_attr(x) container_of(x, struct ip_hw_instance_attr, attr) 790 791 static ssize_t ip_hw_instance_attr_show(struct kobject *kobj, 792 struct attribute *attr, 793 char *buf) 794 { 795 struct ip_hw_instance *ip_hw_instance = to_ip_hw_instance(kobj); 796 struct ip_hw_instance_attr *ip_hw_attr = to_ip_hw_instance_attr(attr); 797 798 if (!ip_hw_attr->show) 799 return -EIO; 800 801 return ip_hw_attr->show(ip_hw_instance, buf); 802 } 803 804 static const struct sysfs_ops ip_hw_instance_sysfs_ops = { 805 .show = ip_hw_instance_attr_show, 806 }; 807 808 static void ip_hw_instance_release(struct kobject *kobj) 809 { 810 struct ip_hw_instance *ip_hw_instance = to_ip_hw_instance(kobj); 811 812 kfree(ip_hw_instance); 813 } 814 815 static const struct kobj_type ip_hw_instance_ktype = { 816 .release = ip_hw_instance_release, 817 .sysfs_ops = &ip_hw_instance_sysfs_ops, 818 .default_groups = ip_hw_instance_groups, 819 }; 820 821 /* -------------------------------------------------- */ 822 823 #define to_ip_hw_id(x) container_of(to_kset(x), struct ip_hw_id, hw_id_kset) 824 825 static void ip_hw_id_release(struct kobject *kobj) 826 { 827 struct ip_hw_id *ip_hw_id = to_ip_hw_id(kobj); 828 829 if (!list_empty(&ip_hw_id->hw_id_kset.list)) 830 DRM_ERROR("ip_hw_id->hw_id_kset is not empty"); 831 kfree(ip_hw_id); 832 } 833 834 static const struct kobj_type ip_hw_id_ktype = { 835 .release = ip_hw_id_release, 836 .sysfs_ops = &kobj_sysfs_ops, 837 }; 838 839 /* -------------------------------------------------- */ 840 841 static void die_kobj_release(struct kobject *kobj); 842 static void ip_disc_release(struct kobject *kobj); 843 844 struct ip_die_entry_attribute { 845 struct attribute attr; 846 ssize_t (*show)(struct ip_die_entry *ip_die_entry, char *buf); 847 }; 848 849 #define to_ip_die_entry_attr(x) container_of(x, struct ip_die_entry_attribute, attr) 850 851 static ssize_t num_ips_show(struct ip_die_entry *ip_die_entry, char *buf) 852 { 853 return sysfs_emit(buf, "%d\n", ip_die_entry->num_ips); 854 } 855 856 /* If there are more ip_die_entry attrs, other than the number of IPs, 857 * we can make this intro an array of attrs, and then initialize 858 * ip_die_entry_attrs in a loop. 859 */ 860 static struct ip_die_entry_attribute num_ips_attr = 861 __ATTR_RO(num_ips); 862 863 static struct attribute *ip_die_entry_attrs[] = { 864 &num_ips_attr.attr, 865 NULL, 866 }; 867 ATTRIBUTE_GROUPS(ip_die_entry); /* ip_die_entry_groups */ 868 869 #define to_ip_die_entry(x) container_of(to_kset(x), struct ip_die_entry, ip_kset) 870 871 static ssize_t ip_die_entry_attr_show(struct kobject *kobj, 872 struct attribute *attr, 873 char *buf) 874 { 875 struct ip_die_entry_attribute *ip_die_entry_attr = to_ip_die_entry_attr(attr); 876 struct ip_die_entry *ip_die_entry = to_ip_die_entry(kobj); 877 878 if (!ip_die_entry_attr->show) 879 return -EIO; 880 881 return ip_die_entry_attr->show(ip_die_entry, buf); 882 } 883 884 static void ip_die_entry_release(struct kobject *kobj) 885 { 886 struct ip_die_entry *ip_die_entry = to_ip_die_entry(kobj); 887 888 if (!list_empty(&ip_die_entry->ip_kset.list)) 889 DRM_ERROR("ip_die_entry->ip_kset is not empty"); 890 kfree(ip_die_entry); 891 } 892 893 static const struct sysfs_ops ip_die_entry_sysfs_ops = { 894 .show = ip_die_entry_attr_show, 895 }; 896 897 static const struct kobj_type ip_die_entry_ktype = { 898 .release = ip_die_entry_release, 899 .sysfs_ops = &ip_die_entry_sysfs_ops, 900 .default_groups = ip_die_entry_groups, 901 }; 902 903 static const struct kobj_type die_kobj_ktype = { 904 .release = die_kobj_release, 905 .sysfs_ops = &kobj_sysfs_ops, 906 }; 907 908 static const struct kobj_type ip_discovery_ktype = { 909 .release = ip_disc_release, 910 .sysfs_ops = &kobj_sysfs_ops, 911 }; 912 913 struct ip_discovery_top { 914 struct kobject kobj; /* ip_discovery/ */ 915 struct kset die_kset; /* ip_discovery/die/, contains ip_die_entry */ 916 struct amdgpu_device *adev; 917 }; 918 919 static void die_kobj_release(struct kobject *kobj) 920 { 921 struct ip_discovery_top *ip_top = container_of(to_kset(kobj), 922 struct ip_discovery_top, 923 die_kset); 924 if (!list_empty(&ip_top->die_kset.list)) 925 DRM_ERROR("ip_top->die_kset is not empty"); 926 } 927 928 static void ip_disc_release(struct kobject *kobj) 929 { 930 struct ip_discovery_top *ip_top = container_of(kobj, struct ip_discovery_top, 931 kobj); 932 struct amdgpu_device *adev = ip_top->adev; 933 934 adev->ip_top = NULL; 935 kfree(ip_top); 936 } 937 938 static uint8_t amdgpu_discovery_get_harvest_info(struct amdgpu_device *adev, 939 uint16_t hw_id, uint8_t inst) 940 { 941 uint8_t harvest = 0; 942 943 /* Until a uniform way is figured, get mask based on hwid */ 944 switch (hw_id) { 945 case VCN_HWID: 946 harvest = ((1 << inst) & adev->vcn.inst_mask) == 0; 947 break; 948 case DMU_HWID: 949 if (adev->harvest_ip_mask & AMD_HARVEST_IP_DMU_MASK) 950 harvest = 0x1; 951 break; 952 case UMC_HWID: 953 /* TODO: It needs another parsing; for now, ignore.*/ 954 break; 955 case GC_HWID: 956 harvest = ((1 << inst) & adev->gfx.xcc_mask) == 0; 957 break; 958 case SDMA0_HWID: 959 harvest = ((1 << inst) & adev->sdma.sdma_mask) == 0; 960 break; 961 default: 962 break; 963 } 964 965 return harvest; 966 } 967 968 static int amdgpu_discovery_sysfs_ips(struct amdgpu_device *adev, 969 struct ip_die_entry *ip_die_entry, 970 const size_t _ip_offset, const int num_ips, 971 bool reg_base_64) 972 { 973 int ii, jj, kk, res; 974 975 DRM_DEBUG("num_ips:%d", num_ips); 976 977 /* Find all IPs of a given HW ID, and add their instance to 978 * #die/#hw_id/#instance/<attributes> 979 */ 980 for (ii = 0; ii < HW_ID_MAX; ii++) { 981 struct ip_hw_id *ip_hw_id = NULL; 982 size_t ip_offset = _ip_offset; 983 984 for (jj = 0; jj < num_ips; jj++) { 985 struct ip_v4 *ip; 986 struct ip_hw_instance *ip_hw_instance; 987 988 ip = (struct ip_v4 *)(adev->mman.discovery_bin + ip_offset); 989 if (amdgpu_discovery_validate_ip(ip) || 990 le16_to_cpu(ip->hw_id) != ii) 991 goto next_ip; 992 993 DRM_DEBUG("match:%d @ ip_offset:%zu", ii, ip_offset); 994 995 /* We have a hw_id match; register the hw 996 * block if not yet registered. 997 */ 998 if (!ip_hw_id) { 999 ip_hw_id = kzalloc(sizeof(*ip_hw_id), GFP_KERNEL); 1000 if (!ip_hw_id) 1001 return -ENOMEM; 1002 ip_hw_id->hw_id = ii; 1003 1004 kobject_set_name(&ip_hw_id->hw_id_kset.kobj, "%d", ii); 1005 ip_hw_id->hw_id_kset.kobj.kset = &ip_die_entry->ip_kset; 1006 ip_hw_id->hw_id_kset.kobj.ktype = &ip_hw_id_ktype; 1007 res = kset_register(&ip_hw_id->hw_id_kset); 1008 if (res) { 1009 DRM_ERROR("Couldn't register ip_hw_id kset"); 1010 kfree(ip_hw_id); 1011 return res; 1012 } 1013 if (hw_id_names[ii]) { 1014 res = sysfs_create_link(&ip_die_entry->ip_kset.kobj, 1015 &ip_hw_id->hw_id_kset.kobj, 1016 hw_id_names[ii]); 1017 if (res) { 1018 DRM_ERROR("Couldn't create IP link %s in IP Die:%s\n", 1019 hw_id_names[ii], 1020 kobject_name(&ip_die_entry->ip_kset.kobj)); 1021 } 1022 } 1023 } 1024 1025 /* Now register its instance. 1026 */ 1027 ip_hw_instance = kzalloc(struct_size(ip_hw_instance, 1028 base_addr, 1029 ip->num_base_address), 1030 GFP_KERNEL); 1031 if (!ip_hw_instance) { 1032 DRM_ERROR("no memory for ip_hw_instance"); 1033 return -ENOMEM; 1034 } 1035 ip_hw_instance->hw_id = le16_to_cpu(ip->hw_id); /* == ii */ 1036 ip_hw_instance->num_instance = ip->instance_number; 1037 ip_hw_instance->major = ip->major; 1038 ip_hw_instance->minor = ip->minor; 1039 ip_hw_instance->revision = ip->revision; 1040 ip_hw_instance->harvest = 1041 amdgpu_discovery_get_harvest_info( 1042 adev, ip_hw_instance->hw_id, 1043 ip_hw_instance->num_instance); 1044 ip_hw_instance->num_base_addresses = ip->num_base_address; 1045 1046 for (kk = 0; kk < ip_hw_instance->num_base_addresses; kk++) { 1047 if (reg_base_64) 1048 ip_hw_instance->base_addr[kk] = 1049 lower_32_bits(le64_to_cpu(ip->base_address_64[kk])) & 0x3FFFFFFF; 1050 else 1051 ip_hw_instance->base_addr[kk] = ip->base_address[kk]; 1052 } 1053 1054 kobject_init(&ip_hw_instance->kobj, &ip_hw_instance_ktype); 1055 ip_hw_instance->kobj.kset = &ip_hw_id->hw_id_kset; 1056 res = kobject_add(&ip_hw_instance->kobj, NULL, 1057 "%d", ip_hw_instance->num_instance); 1058 next_ip: 1059 if (reg_base_64) 1060 ip_offset += struct_size(ip, base_address_64, 1061 ip->num_base_address); 1062 else 1063 ip_offset += struct_size(ip, base_address, 1064 ip->num_base_address); 1065 } 1066 } 1067 1068 return 0; 1069 } 1070 1071 static int amdgpu_discovery_sysfs_recurse(struct amdgpu_device *adev) 1072 { 1073 struct binary_header *bhdr; 1074 struct ip_discovery_header *ihdr; 1075 struct die_header *dhdr; 1076 struct kset *die_kset = &adev->ip_top->die_kset; 1077 u16 num_dies, die_offset, num_ips; 1078 size_t ip_offset; 1079 int ii, res; 1080 1081 bhdr = (struct binary_header *)adev->mman.discovery_bin; 1082 ihdr = (struct ip_discovery_header *)(adev->mman.discovery_bin + 1083 le16_to_cpu(bhdr->table_list[IP_DISCOVERY].offset)); 1084 num_dies = le16_to_cpu(ihdr->num_dies); 1085 1086 DRM_DEBUG("number of dies: %d\n", num_dies); 1087 1088 for (ii = 0; ii < num_dies; ii++) { 1089 struct ip_die_entry *ip_die_entry; 1090 1091 die_offset = le16_to_cpu(ihdr->die_info[ii].die_offset); 1092 dhdr = (struct die_header *)(adev->mman.discovery_bin + die_offset); 1093 num_ips = le16_to_cpu(dhdr->num_ips); 1094 ip_offset = die_offset + sizeof(*dhdr); 1095 1096 /* Add the die to the kset. 1097 * 1098 * dhdr->die_id == ii, which was checked in 1099 * amdgpu_discovery_reg_base_init(). 1100 */ 1101 1102 ip_die_entry = kzalloc(sizeof(*ip_die_entry), GFP_KERNEL); 1103 if (!ip_die_entry) 1104 return -ENOMEM; 1105 1106 ip_die_entry->num_ips = num_ips; 1107 1108 kobject_set_name(&ip_die_entry->ip_kset.kobj, "%d", le16_to_cpu(dhdr->die_id)); 1109 ip_die_entry->ip_kset.kobj.kset = die_kset; 1110 ip_die_entry->ip_kset.kobj.ktype = &ip_die_entry_ktype; 1111 res = kset_register(&ip_die_entry->ip_kset); 1112 if (res) { 1113 DRM_ERROR("Couldn't register ip_die_entry kset"); 1114 kfree(ip_die_entry); 1115 return res; 1116 } 1117 1118 amdgpu_discovery_sysfs_ips(adev, ip_die_entry, ip_offset, num_ips, !!ihdr->base_addr_64_bit); 1119 } 1120 1121 return 0; 1122 } 1123 1124 static int amdgpu_discovery_sysfs_init(struct amdgpu_device *adev) 1125 { 1126 struct kset *die_kset; 1127 int res, ii; 1128 1129 if (!adev->mman.discovery_bin) 1130 return -EINVAL; 1131 1132 adev->ip_top = kzalloc(sizeof(*adev->ip_top), GFP_KERNEL); 1133 if (!adev->ip_top) 1134 return -ENOMEM; 1135 1136 adev->ip_top->adev = adev; 1137 1138 res = kobject_init_and_add(&adev->ip_top->kobj, &ip_discovery_ktype, 1139 &adev->dev->kobj, "ip_discovery"); 1140 if (res) { 1141 DRM_ERROR("Couldn't init and add ip_discovery/"); 1142 goto Err; 1143 } 1144 1145 die_kset = &adev->ip_top->die_kset; 1146 kobject_set_name(&die_kset->kobj, "%s", "die"); 1147 die_kset->kobj.parent = &adev->ip_top->kobj; 1148 die_kset->kobj.ktype = &die_kobj_ktype; 1149 res = kset_register(&adev->ip_top->die_kset); 1150 if (res) { 1151 DRM_ERROR("Couldn't register die_kset"); 1152 goto Err; 1153 } 1154 1155 for (ii = 0; ii < ARRAY_SIZE(ip_hw_attr); ii++) 1156 ip_hw_instance_attrs[ii] = &ip_hw_attr[ii].attr; 1157 ip_hw_instance_attrs[ii] = NULL; 1158 1159 res = amdgpu_discovery_sysfs_recurse(adev); 1160 1161 return res; 1162 Err: 1163 kobject_put(&adev->ip_top->kobj); 1164 return res; 1165 } 1166 1167 /* -------------------------------------------------- */ 1168 1169 #define list_to_kobj(el) container_of(el, struct kobject, entry) 1170 1171 static void amdgpu_discovery_sysfs_ip_hw_free(struct ip_hw_id *ip_hw_id) 1172 { 1173 struct list_head *el, *tmp; 1174 struct kset *hw_id_kset; 1175 1176 hw_id_kset = &ip_hw_id->hw_id_kset; 1177 spin_lock(&hw_id_kset->list_lock); 1178 list_for_each_prev_safe(el, tmp, &hw_id_kset->list) { 1179 list_del_init(el); 1180 spin_unlock(&hw_id_kset->list_lock); 1181 /* kobject is embedded in ip_hw_instance */ 1182 kobject_put(list_to_kobj(el)); 1183 spin_lock(&hw_id_kset->list_lock); 1184 } 1185 spin_unlock(&hw_id_kset->list_lock); 1186 kobject_put(&ip_hw_id->hw_id_kset.kobj); 1187 } 1188 1189 static void amdgpu_discovery_sysfs_die_free(struct ip_die_entry *ip_die_entry) 1190 { 1191 struct list_head *el, *tmp; 1192 struct kset *ip_kset; 1193 1194 ip_kset = &ip_die_entry->ip_kset; 1195 spin_lock(&ip_kset->list_lock); 1196 list_for_each_prev_safe(el, tmp, &ip_kset->list) { 1197 list_del_init(el); 1198 spin_unlock(&ip_kset->list_lock); 1199 amdgpu_discovery_sysfs_ip_hw_free(to_ip_hw_id(list_to_kobj(el))); 1200 spin_lock(&ip_kset->list_lock); 1201 } 1202 spin_unlock(&ip_kset->list_lock); 1203 kobject_put(&ip_die_entry->ip_kset.kobj); 1204 } 1205 1206 static void amdgpu_discovery_sysfs_fini(struct amdgpu_device *adev) 1207 { 1208 struct list_head *el, *tmp; 1209 struct kset *die_kset; 1210 1211 die_kset = &adev->ip_top->die_kset; 1212 spin_lock(&die_kset->list_lock); 1213 list_for_each_prev_safe(el, tmp, &die_kset->list) { 1214 list_del_init(el); 1215 spin_unlock(&die_kset->list_lock); 1216 amdgpu_discovery_sysfs_die_free(to_ip_die_entry(list_to_kobj(el))); 1217 spin_lock(&die_kset->list_lock); 1218 } 1219 spin_unlock(&die_kset->list_lock); 1220 kobject_put(&adev->ip_top->die_kset.kobj); 1221 kobject_put(&adev->ip_top->kobj); 1222 } 1223 1224 /* ================================================== */ 1225 1226 static int amdgpu_discovery_reg_base_init(struct amdgpu_device *adev) 1227 { 1228 uint8_t num_base_address, subrev, variant; 1229 struct binary_header *bhdr; 1230 struct ip_discovery_header *ihdr; 1231 struct die_header *dhdr; 1232 struct ip_v4 *ip; 1233 uint16_t die_offset; 1234 uint16_t ip_offset; 1235 uint16_t num_dies; 1236 uint16_t num_ips; 1237 int hw_ip; 1238 int i, j, k; 1239 int r; 1240 1241 r = amdgpu_discovery_init(adev); 1242 if (r) { 1243 DRM_ERROR("amdgpu_discovery_init failed\n"); 1244 return r; 1245 } 1246 1247 adev->gfx.xcc_mask = 0; 1248 adev->sdma.sdma_mask = 0; 1249 adev->vcn.inst_mask = 0; 1250 adev->jpeg.inst_mask = 0; 1251 bhdr = (struct binary_header *)adev->mman.discovery_bin; 1252 ihdr = (struct ip_discovery_header *)(adev->mman.discovery_bin + 1253 le16_to_cpu(bhdr->table_list[IP_DISCOVERY].offset)); 1254 num_dies = le16_to_cpu(ihdr->num_dies); 1255 1256 DRM_DEBUG("number of dies: %d\n", num_dies); 1257 1258 for (i = 0; i < num_dies; i++) { 1259 die_offset = le16_to_cpu(ihdr->die_info[i].die_offset); 1260 dhdr = (struct die_header *)(adev->mman.discovery_bin + die_offset); 1261 num_ips = le16_to_cpu(dhdr->num_ips); 1262 ip_offset = die_offset + sizeof(*dhdr); 1263 1264 if (le16_to_cpu(dhdr->die_id) != i) { 1265 DRM_ERROR("invalid die id %d, expected %d\n", 1266 le16_to_cpu(dhdr->die_id), i); 1267 return -EINVAL; 1268 } 1269 1270 DRM_DEBUG("number of hardware IPs on die%d: %d\n", 1271 le16_to_cpu(dhdr->die_id), num_ips); 1272 1273 for (j = 0; j < num_ips; j++) { 1274 ip = (struct ip_v4 *)(adev->mman.discovery_bin + ip_offset); 1275 1276 if (amdgpu_discovery_validate_ip(ip)) 1277 goto next_ip; 1278 1279 num_base_address = ip->num_base_address; 1280 1281 DRM_DEBUG("%s(%d) #%d v%d.%d.%d:\n", 1282 hw_id_names[le16_to_cpu(ip->hw_id)], 1283 le16_to_cpu(ip->hw_id), 1284 ip->instance_number, 1285 ip->major, ip->minor, 1286 ip->revision); 1287 1288 if (le16_to_cpu(ip->hw_id) == VCN_HWID) { 1289 /* Bit [5:0]: original revision value 1290 * Bit [7:6]: en/decode capability: 1291 * 0b00 : VCN function normally 1292 * 0b10 : encode is disabled 1293 * 0b01 : decode is disabled 1294 */ 1295 if (adev->vcn.num_vcn_inst < 1296 AMDGPU_MAX_VCN_INSTANCES) { 1297 adev->vcn.vcn_config[adev->vcn.num_vcn_inst] = 1298 ip->revision & 0xc0; 1299 adev->vcn.num_vcn_inst++; 1300 adev->vcn.inst_mask |= 1301 (1U << ip->instance_number); 1302 adev->jpeg.inst_mask |= 1303 (1U << ip->instance_number); 1304 } else { 1305 dev_err(adev->dev, "Too many VCN instances: %d vs %d\n", 1306 adev->vcn.num_vcn_inst + 1, 1307 AMDGPU_MAX_VCN_INSTANCES); 1308 } 1309 ip->revision &= ~0xc0; 1310 } 1311 if (le16_to_cpu(ip->hw_id) == SDMA0_HWID || 1312 le16_to_cpu(ip->hw_id) == SDMA1_HWID || 1313 le16_to_cpu(ip->hw_id) == SDMA2_HWID || 1314 le16_to_cpu(ip->hw_id) == SDMA3_HWID) { 1315 if (adev->sdma.num_instances < 1316 AMDGPU_MAX_SDMA_INSTANCES) { 1317 adev->sdma.num_instances++; 1318 adev->sdma.sdma_mask |= 1319 (1U << ip->instance_number); 1320 } else { 1321 dev_err(adev->dev, "Too many SDMA instances: %d vs %d\n", 1322 adev->sdma.num_instances + 1, 1323 AMDGPU_MAX_SDMA_INSTANCES); 1324 } 1325 } 1326 1327 if (le16_to_cpu(ip->hw_id) == VPE_HWID) { 1328 if (adev->vpe.num_instances < AMDGPU_MAX_VPE_INSTANCES) 1329 adev->vpe.num_instances++; 1330 else 1331 dev_err(adev->dev, "Too many VPE instances: %d vs %d\n", 1332 adev->vpe.num_instances + 1, 1333 AMDGPU_MAX_VPE_INSTANCES); 1334 } 1335 1336 if (le16_to_cpu(ip->hw_id) == UMC_HWID) { 1337 adev->gmc.num_umc++; 1338 adev->umc.node_inst_num++; 1339 } 1340 1341 if (le16_to_cpu(ip->hw_id) == GC_HWID) 1342 adev->gfx.xcc_mask |= 1343 (1U << ip->instance_number); 1344 1345 for (k = 0; k < num_base_address; k++) { 1346 /* 1347 * convert the endianness of base addresses in place, 1348 * so that we don't need to convert them when accessing adev->reg_offset. 1349 */ 1350 if (ihdr->base_addr_64_bit) 1351 /* Truncate the 64bit base address from ip discovery 1352 * and only store lower 32bit ip base in reg_offset[]. 1353 * Bits > 32 follows ASIC specific format, thus just 1354 * discard them and handle it within specific ASIC. 1355 * By this way reg_offset[] and related helpers can 1356 * stay unchanged. 1357 * The base address is in dwords, thus clear the 1358 * highest 2 bits to store. 1359 */ 1360 ip->base_address[k] = 1361 lower_32_bits(le64_to_cpu(ip->base_address_64[k])) & 0x3FFFFFFF; 1362 else 1363 ip->base_address[k] = le32_to_cpu(ip->base_address[k]); 1364 DRM_DEBUG("\t0x%08x\n", ip->base_address[k]); 1365 } 1366 1367 for (hw_ip = 0; hw_ip < MAX_HWIP; hw_ip++) { 1368 if (hw_id_map[hw_ip] == le16_to_cpu(ip->hw_id) && 1369 hw_id_map[hw_ip] != 0) { 1370 DRM_DEBUG("set register base offset for %s\n", 1371 hw_id_names[le16_to_cpu(ip->hw_id)]); 1372 adev->reg_offset[hw_ip][ip->instance_number] = 1373 ip->base_address; 1374 /* Instance support is somewhat inconsistent. 1375 * SDMA is a good example. Sienna cichlid has 4 total 1376 * SDMA instances, each enumerated separately (HWIDs 1377 * 42, 43, 68, 69). Arcturus has 8 total SDMA instances, 1378 * but they are enumerated as multiple instances of the 1379 * same HWIDs (4x HWID 42, 4x HWID 43). UMC is another 1380 * example. On most chips there are multiple instances 1381 * with the same HWID. 1382 */ 1383 1384 if (ihdr->version < 3) { 1385 subrev = 0; 1386 variant = 0; 1387 } else { 1388 subrev = ip->sub_revision; 1389 variant = ip->variant; 1390 } 1391 1392 adev->ip_versions[hw_ip] 1393 [ip->instance_number] = 1394 IP_VERSION_FULL(ip->major, 1395 ip->minor, 1396 ip->revision, 1397 variant, 1398 subrev); 1399 } 1400 } 1401 1402 next_ip: 1403 if (ihdr->base_addr_64_bit) 1404 ip_offset += struct_size(ip, base_address_64, ip->num_base_address); 1405 else 1406 ip_offset += struct_size(ip, base_address, ip->num_base_address); 1407 } 1408 } 1409 1410 return 0; 1411 } 1412 1413 static void amdgpu_discovery_harvest_ip(struct amdgpu_device *adev) 1414 { 1415 int vcn_harvest_count = 0; 1416 int umc_harvest_count = 0; 1417 1418 /* 1419 * Harvest table does not fit Navi1x and legacy GPUs, 1420 * so read harvest bit per IP data structure to set 1421 * harvest configuration. 1422 */ 1423 if (amdgpu_ip_version(adev, GC_HWIP, 0) < IP_VERSION(10, 2, 0) && 1424 amdgpu_ip_version(adev, GC_HWIP, 0) != IP_VERSION(9, 4, 3)) { 1425 if ((adev->pdev->device == 0x731E && 1426 (adev->pdev->revision == 0xC6 || 1427 adev->pdev->revision == 0xC7)) || 1428 (adev->pdev->device == 0x7340 && 1429 adev->pdev->revision == 0xC9) || 1430 (adev->pdev->device == 0x7360 && 1431 adev->pdev->revision == 0xC7)) 1432 amdgpu_discovery_read_harvest_bit_per_ip(adev, 1433 &vcn_harvest_count); 1434 } else { 1435 amdgpu_discovery_read_from_harvest_table(adev, 1436 &vcn_harvest_count, 1437 &umc_harvest_count); 1438 } 1439 1440 amdgpu_discovery_harvest_config_quirk(adev); 1441 1442 if (vcn_harvest_count == adev->vcn.num_vcn_inst) { 1443 adev->harvest_ip_mask |= AMD_HARVEST_IP_VCN_MASK; 1444 adev->harvest_ip_mask |= AMD_HARVEST_IP_JPEG_MASK; 1445 } 1446 1447 if (umc_harvest_count < adev->gmc.num_umc) { 1448 adev->gmc.num_umc -= umc_harvest_count; 1449 } 1450 } 1451 1452 union gc_info { 1453 struct gc_info_v1_0 v1; 1454 struct gc_info_v1_1 v1_1; 1455 struct gc_info_v1_2 v1_2; 1456 struct gc_info_v2_0 v2; 1457 struct gc_info_v2_1 v2_1; 1458 }; 1459 1460 static int amdgpu_discovery_get_gfx_info(struct amdgpu_device *adev) 1461 { 1462 struct binary_header *bhdr; 1463 union gc_info *gc_info; 1464 u16 offset; 1465 1466 if (!adev->mman.discovery_bin) { 1467 DRM_ERROR("ip discovery uninitialized\n"); 1468 return -EINVAL; 1469 } 1470 1471 bhdr = (struct binary_header *)adev->mman.discovery_bin; 1472 offset = le16_to_cpu(bhdr->table_list[GC].offset); 1473 1474 if (!offset) 1475 return 0; 1476 1477 gc_info = (union gc_info *)(adev->mman.discovery_bin + offset); 1478 1479 switch (le16_to_cpu(gc_info->v1.header.version_major)) { 1480 case 1: 1481 adev->gfx.config.max_shader_engines = le32_to_cpu(gc_info->v1.gc_num_se); 1482 adev->gfx.config.max_cu_per_sh = 2 * (le32_to_cpu(gc_info->v1.gc_num_wgp0_per_sa) + 1483 le32_to_cpu(gc_info->v1.gc_num_wgp1_per_sa)); 1484 adev->gfx.config.max_sh_per_se = le32_to_cpu(gc_info->v1.gc_num_sa_per_se); 1485 adev->gfx.config.max_backends_per_se = le32_to_cpu(gc_info->v1.gc_num_rb_per_se); 1486 adev->gfx.config.max_texture_channel_caches = le32_to_cpu(gc_info->v1.gc_num_gl2c); 1487 adev->gfx.config.max_gprs = le32_to_cpu(gc_info->v1.gc_num_gprs); 1488 adev->gfx.config.max_gs_threads = le32_to_cpu(gc_info->v1.gc_num_max_gs_thds); 1489 adev->gfx.config.gs_vgt_table_depth = le32_to_cpu(gc_info->v1.gc_gs_table_depth); 1490 adev->gfx.config.gs_prim_buffer_depth = le32_to_cpu(gc_info->v1.gc_gsprim_buff_depth); 1491 adev->gfx.config.double_offchip_lds_buf = le32_to_cpu(gc_info->v1.gc_double_offchip_lds_buffer); 1492 adev->gfx.cu_info.wave_front_size = le32_to_cpu(gc_info->v1.gc_wave_size); 1493 adev->gfx.cu_info.max_waves_per_simd = le32_to_cpu(gc_info->v1.gc_max_waves_per_simd); 1494 adev->gfx.cu_info.max_scratch_slots_per_cu = le32_to_cpu(gc_info->v1.gc_max_scratch_slots_per_cu); 1495 adev->gfx.cu_info.lds_size = le32_to_cpu(gc_info->v1.gc_lds_size); 1496 adev->gfx.config.num_sc_per_sh = le32_to_cpu(gc_info->v1.gc_num_sc_per_se) / 1497 le32_to_cpu(gc_info->v1.gc_num_sa_per_se); 1498 adev->gfx.config.num_packer_per_sc = le32_to_cpu(gc_info->v1.gc_num_packer_per_sc); 1499 if (le16_to_cpu(gc_info->v1.header.version_minor) >= 1) { 1500 adev->gfx.config.gc_num_tcp_per_sa = le32_to_cpu(gc_info->v1_1.gc_num_tcp_per_sa); 1501 adev->gfx.config.gc_num_sdp_interface = le32_to_cpu(gc_info->v1_1.gc_num_sdp_interface); 1502 adev->gfx.config.gc_num_tcps = le32_to_cpu(gc_info->v1_1.gc_num_tcps); 1503 } 1504 if (le16_to_cpu(gc_info->v1.header.version_minor) >= 2) { 1505 adev->gfx.config.gc_num_tcp_per_wpg = le32_to_cpu(gc_info->v1_2.gc_num_tcp_per_wpg); 1506 adev->gfx.config.gc_tcp_l1_size = le32_to_cpu(gc_info->v1_2.gc_tcp_l1_size); 1507 adev->gfx.config.gc_num_sqc_per_wgp = le32_to_cpu(gc_info->v1_2.gc_num_sqc_per_wgp); 1508 adev->gfx.config.gc_l1_instruction_cache_size_per_sqc = le32_to_cpu(gc_info->v1_2.gc_l1_instruction_cache_size_per_sqc); 1509 adev->gfx.config.gc_l1_data_cache_size_per_sqc = le32_to_cpu(gc_info->v1_2.gc_l1_data_cache_size_per_sqc); 1510 adev->gfx.config.gc_gl1c_per_sa = le32_to_cpu(gc_info->v1_2.gc_gl1c_per_sa); 1511 adev->gfx.config.gc_gl1c_size_per_instance = le32_to_cpu(gc_info->v1_2.gc_gl1c_size_per_instance); 1512 adev->gfx.config.gc_gl2c_per_gpu = le32_to_cpu(gc_info->v1_2.gc_gl2c_per_gpu); 1513 } 1514 break; 1515 case 2: 1516 adev->gfx.config.max_shader_engines = le32_to_cpu(gc_info->v2.gc_num_se); 1517 adev->gfx.config.max_cu_per_sh = le32_to_cpu(gc_info->v2.gc_num_cu_per_sh); 1518 adev->gfx.config.max_sh_per_se = le32_to_cpu(gc_info->v2.gc_num_sh_per_se); 1519 adev->gfx.config.max_backends_per_se = le32_to_cpu(gc_info->v2.gc_num_rb_per_se); 1520 adev->gfx.config.max_texture_channel_caches = le32_to_cpu(gc_info->v2.gc_num_tccs); 1521 adev->gfx.config.max_gprs = le32_to_cpu(gc_info->v2.gc_num_gprs); 1522 adev->gfx.config.max_gs_threads = le32_to_cpu(gc_info->v2.gc_num_max_gs_thds); 1523 adev->gfx.config.gs_vgt_table_depth = le32_to_cpu(gc_info->v2.gc_gs_table_depth); 1524 adev->gfx.config.gs_prim_buffer_depth = le32_to_cpu(gc_info->v2.gc_gsprim_buff_depth); 1525 adev->gfx.config.double_offchip_lds_buf = le32_to_cpu(gc_info->v2.gc_double_offchip_lds_buffer); 1526 adev->gfx.cu_info.wave_front_size = le32_to_cpu(gc_info->v2.gc_wave_size); 1527 adev->gfx.cu_info.max_waves_per_simd = le32_to_cpu(gc_info->v2.gc_max_waves_per_simd); 1528 adev->gfx.cu_info.max_scratch_slots_per_cu = le32_to_cpu(gc_info->v2.gc_max_scratch_slots_per_cu); 1529 adev->gfx.cu_info.lds_size = le32_to_cpu(gc_info->v2.gc_lds_size); 1530 adev->gfx.config.num_sc_per_sh = le32_to_cpu(gc_info->v2.gc_num_sc_per_se) / 1531 le32_to_cpu(gc_info->v2.gc_num_sh_per_se); 1532 adev->gfx.config.num_packer_per_sc = le32_to_cpu(gc_info->v2.gc_num_packer_per_sc); 1533 if (le16_to_cpu(gc_info->v2.header.version_minor) == 1) { 1534 adev->gfx.config.gc_num_tcp_per_sa = le32_to_cpu(gc_info->v2_1.gc_num_tcp_per_sh); 1535 adev->gfx.config.gc_tcp_size_per_cu = le32_to_cpu(gc_info->v2_1.gc_tcp_size_per_cu); 1536 adev->gfx.config.gc_num_sdp_interface = le32_to_cpu(gc_info->v2_1.gc_num_sdp_interface); /* per XCD */ 1537 adev->gfx.config.gc_num_cu_per_sqc = le32_to_cpu(gc_info->v2_1.gc_num_cu_per_sqc); 1538 adev->gfx.config.gc_l1_instruction_cache_size_per_sqc = le32_to_cpu(gc_info->v2_1.gc_instruction_cache_size_per_sqc); 1539 adev->gfx.config.gc_l1_data_cache_size_per_sqc = le32_to_cpu(gc_info->v2_1.gc_scalar_data_cache_size_per_sqc); 1540 adev->gfx.config.gc_tcc_size = le32_to_cpu(gc_info->v2_1.gc_tcc_size); /* per XCD */ 1541 } 1542 break; 1543 default: 1544 dev_err(adev->dev, 1545 "Unhandled GC info table %d.%d\n", 1546 le16_to_cpu(gc_info->v1.header.version_major), 1547 le16_to_cpu(gc_info->v1.header.version_minor)); 1548 return -EINVAL; 1549 } 1550 return 0; 1551 } 1552 1553 union mall_info { 1554 struct mall_info_v1_0 v1; 1555 struct mall_info_v2_0 v2; 1556 }; 1557 1558 static int amdgpu_discovery_get_mall_info(struct amdgpu_device *adev) 1559 { 1560 struct binary_header *bhdr; 1561 union mall_info *mall_info; 1562 u32 u, mall_size_per_umc, m_s_present, half_use; 1563 u64 mall_size; 1564 u16 offset; 1565 1566 if (!adev->mman.discovery_bin) { 1567 DRM_ERROR("ip discovery uninitialized\n"); 1568 return -EINVAL; 1569 } 1570 1571 bhdr = (struct binary_header *)adev->mman.discovery_bin; 1572 offset = le16_to_cpu(bhdr->table_list[MALL_INFO].offset); 1573 1574 if (!offset) 1575 return 0; 1576 1577 mall_info = (union mall_info *)(adev->mman.discovery_bin + offset); 1578 1579 switch (le16_to_cpu(mall_info->v1.header.version_major)) { 1580 case 1: 1581 mall_size = 0; 1582 mall_size_per_umc = le32_to_cpu(mall_info->v1.mall_size_per_m); 1583 m_s_present = le32_to_cpu(mall_info->v1.m_s_present); 1584 half_use = le32_to_cpu(mall_info->v1.m_half_use); 1585 for (u = 0; u < adev->gmc.num_umc; u++) { 1586 if (m_s_present & (1 << u)) 1587 mall_size += mall_size_per_umc * 2; 1588 else if (half_use & (1 << u)) 1589 mall_size += mall_size_per_umc / 2; 1590 else 1591 mall_size += mall_size_per_umc; 1592 } 1593 adev->gmc.mall_size = mall_size; 1594 adev->gmc.m_half_use = half_use; 1595 break; 1596 case 2: 1597 mall_size_per_umc = le32_to_cpu(mall_info->v2.mall_size_per_umc); 1598 adev->gmc.mall_size = mall_size_per_umc * adev->gmc.num_umc; 1599 break; 1600 default: 1601 dev_err(adev->dev, 1602 "Unhandled MALL info table %d.%d\n", 1603 le16_to_cpu(mall_info->v1.header.version_major), 1604 le16_to_cpu(mall_info->v1.header.version_minor)); 1605 return -EINVAL; 1606 } 1607 return 0; 1608 } 1609 1610 union vcn_info { 1611 struct vcn_info_v1_0 v1; 1612 }; 1613 1614 static int amdgpu_discovery_get_vcn_info(struct amdgpu_device *adev) 1615 { 1616 struct binary_header *bhdr; 1617 union vcn_info *vcn_info; 1618 u16 offset; 1619 int v; 1620 1621 if (!adev->mman.discovery_bin) { 1622 DRM_ERROR("ip discovery uninitialized\n"); 1623 return -EINVAL; 1624 } 1625 1626 /* num_vcn_inst is currently limited to AMDGPU_MAX_VCN_INSTANCES 1627 * which is smaller than VCN_INFO_TABLE_MAX_NUM_INSTANCES 1628 * but that may change in the future with new GPUs so keep this 1629 * check for defensive purposes. 1630 */ 1631 if (adev->vcn.num_vcn_inst > VCN_INFO_TABLE_MAX_NUM_INSTANCES) { 1632 dev_err(adev->dev, "invalid vcn instances\n"); 1633 return -EINVAL; 1634 } 1635 1636 bhdr = (struct binary_header *)adev->mman.discovery_bin; 1637 offset = le16_to_cpu(bhdr->table_list[VCN_INFO].offset); 1638 1639 if (!offset) 1640 return 0; 1641 1642 vcn_info = (union vcn_info *)(adev->mman.discovery_bin + offset); 1643 1644 switch (le16_to_cpu(vcn_info->v1.header.version_major)) { 1645 case 1: 1646 /* num_vcn_inst is currently limited to AMDGPU_MAX_VCN_INSTANCES 1647 * so this won't overflow. 1648 */ 1649 for (v = 0; v < adev->vcn.num_vcn_inst; v++) { 1650 adev->vcn.vcn_codec_disable_mask[v] = 1651 le32_to_cpu(vcn_info->v1.instance_info[v].fuse_data.all_bits); 1652 } 1653 break; 1654 default: 1655 dev_err(adev->dev, 1656 "Unhandled VCN info table %d.%d\n", 1657 le16_to_cpu(vcn_info->v1.header.version_major), 1658 le16_to_cpu(vcn_info->v1.header.version_minor)); 1659 return -EINVAL; 1660 } 1661 return 0; 1662 } 1663 1664 static int amdgpu_discovery_set_common_ip_blocks(struct amdgpu_device *adev) 1665 { 1666 /* what IP to use for this? */ 1667 switch (amdgpu_ip_version(adev, GC_HWIP, 0)) { 1668 case IP_VERSION(9, 0, 1): 1669 case IP_VERSION(9, 1, 0): 1670 case IP_VERSION(9, 2, 1): 1671 case IP_VERSION(9, 2, 2): 1672 case IP_VERSION(9, 3, 0): 1673 case IP_VERSION(9, 4, 0): 1674 case IP_VERSION(9, 4, 1): 1675 case IP_VERSION(9, 4, 2): 1676 case IP_VERSION(9, 4, 3): 1677 amdgpu_device_ip_block_add(adev, &vega10_common_ip_block); 1678 break; 1679 case IP_VERSION(10, 1, 10): 1680 case IP_VERSION(10, 1, 1): 1681 case IP_VERSION(10, 1, 2): 1682 case IP_VERSION(10, 1, 3): 1683 case IP_VERSION(10, 1, 4): 1684 case IP_VERSION(10, 3, 0): 1685 case IP_VERSION(10, 3, 1): 1686 case IP_VERSION(10, 3, 2): 1687 case IP_VERSION(10, 3, 3): 1688 case IP_VERSION(10, 3, 4): 1689 case IP_VERSION(10, 3, 5): 1690 case IP_VERSION(10, 3, 6): 1691 case IP_VERSION(10, 3, 7): 1692 amdgpu_device_ip_block_add(adev, &nv_common_ip_block); 1693 break; 1694 case IP_VERSION(11, 0, 0): 1695 case IP_VERSION(11, 0, 1): 1696 case IP_VERSION(11, 0, 2): 1697 case IP_VERSION(11, 0, 3): 1698 case IP_VERSION(11, 0, 4): 1699 case IP_VERSION(11, 5, 0): 1700 case IP_VERSION(11, 5, 1): 1701 amdgpu_device_ip_block_add(adev, &soc21_common_ip_block); 1702 break; 1703 default: 1704 dev_err(adev->dev, 1705 "Failed to add common ip block(GC_HWIP:0x%x)\n", 1706 amdgpu_ip_version(adev, GC_HWIP, 0)); 1707 return -EINVAL; 1708 } 1709 return 0; 1710 } 1711 1712 static int amdgpu_discovery_set_gmc_ip_blocks(struct amdgpu_device *adev) 1713 { 1714 /* use GC or MMHUB IP version */ 1715 switch (amdgpu_ip_version(adev, GC_HWIP, 0)) { 1716 case IP_VERSION(9, 0, 1): 1717 case IP_VERSION(9, 1, 0): 1718 case IP_VERSION(9, 2, 1): 1719 case IP_VERSION(9, 2, 2): 1720 case IP_VERSION(9, 3, 0): 1721 case IP_VERSION(9, 4, 0): 1722 case IP_VERSION(9, 4, 1): 1723 case IP_VERSION(9, 4, 2): 1724 case IP_VERSION(9, 4, 3): 1725 amdgpu_device_ip_block_add(adev, &gmc_v9_0_ip_block); 1726 break; 1727 case IP_VERSION(10, 1, 10): 1728 case IP_VERSION(10, 1, 1): 1729 case IP_VERSION(10, 1, 2): 1730 case IP_VERSION(10, 1, 3): 1731 case IP_VERSION(10, 1, 4): 1732 case IP_VERSION(10, 3, 0): 1733 case IP_VERSION(10, 3, 1): 1734 case IP_VERSION(10, 3, 2): 1735 case IP_VERSION(10, 3, 3): 1736 case IP_VERSION(10, 3, 4): 1737 case IP_VERSION(10, 3, 5): 1738 case IP_VERSION(10, 3, 6): 1739 case IP_VERSION(10, 3, 7): 1740 amdgpu_device_ip_block_add(adev, &gmc_v10_0_ip_block); 1741 break; 1742 case IP_VERSION(11, 0, 0): 1743 case IP_VERSION(11, 0, 1): 1744 case IP_VERSION(11, 0, 2): 1745 case IP_VERSION(11, 0, 3): 1746 case IP_VERSION(11, 0, 4): 1747 case IP_VERSION(11, 5, 0): 1748 case IP_VERSION(11, 5, 1): 1749 amdgpu_device_ip_block_add(adev, &gmc_v11_0_ip_block); 1750 break; 1751 default: 1752 dev_err(adev->dev, "Failed to add gmc ip block(GC_HWIP:0x%x)\n", 1753 amdgpu_ip_version(adev, GC_HWIP, 0)); 1754 return -EINVAL; 1755 } 1756 return 0; 1757 } 1758 1759 static int amdgpu_discovery_set_ih_ip_blocks(struct amdgpu_device *adev) 1760 { 1761 switch (amdgpu_ip_version(adev, OSSSYS_HWIP, 0)) { 1762 case IP_VERSION(4, 0, 0): 1763 case IP_VERSION(4, 0, 1): 1764 case IP_VERSION(4, 1, 0): 1765 case IP_VERSION(4, 1, 1): 1766 case IP_VERSION(4, 3, 0): 1767 amdgpu_device_ip_block_add(adev, &vega10_ih_ip_block); 1768 break; 1769 case IP_VERSION(4, 2, 0): 1770 case IP_VERSION(4, 2, 1): 1771 case IP_VERSION(4, 4, 0): 1772 case IP_VERSION(4, 4, 2): 1773 amdgpu_device_ip_block_add(adev, &vega20_ih_ip_block); 1774 break; 1775 case IP_VERSION(5, 0, 0): 1776 case IP_VERSION(5, 0, 1): 1777 case IP_VERSION(5, 0, 2): 1778 case IP_VERSION(5, 0, 3): 1779 case IP_VERSION(5, 2, 0): 1780 case IP_VERSION(5, 2, 1): 1781 amdgpu_device_ip_block_add(adev, &navi10_ih_ip_block); 1782 break; 1783 case IP_VERSION(6, 0, 0): 1784 case IP_VERSION(6, 0, 1): 1785 case IP_VERSION(6, 0, 2): 1786 amdgpu_device_ip_block_add(adev, &ih_v6_0_ip_block); 1787 break; 1788 case IP_VERSION(6, 1, 0): 1789 amdgpu_device_ip_block_add(adev, &ih_v6_1_ip_block); 1790 break; 1791 case IP_VERSION(7, 0, 0): 1792 amdgpu_device_ip_block_add(adev, &ih_v7_0_ip_block); 1793 break; 1794 default: 1795 dev_err(adev->dev, 1796 "Failed to add ih ip block(OSSSYS_HWIP:0x%x)\n", 1797 amdgpu_ip_version(adev, OSSSYS_HWIP, 0)); 1798 return -EINVAL; 1799 } 1800 return 0; 1801 } 1802 1803 static int amdgpu_discovery_set_psp_ip_blocks(struct amdgpu_device *adev) 1804 { 1805 switch (amdgpu_ip_version(adev, MP0_HWIP, 0)) { 1806 case IP_VERSION(9, 0, 0): 1807 amdgpu_device_ip_block_add(adev, &psp_v3_1_ip_block); 1808 break; 1809 case IP_VERSION(10, 0, 0): 1810 case IP_VERSION(10, 0, 1): 1811 amdgpu_device_ip_block_add(adev, &psp_v10_0_ip_block); 1812 break; 1813 case IP_VERSION(11, 0, 0): 1814 case IP_VERSION(11, 0, 2): 1815 case IP_VERSION(11, 0, 4): 1816 case IP_VERSION(11, 0, 5): 1817 case IP_VERSION(11, 0, 9): 1818 case IP_VERSION(11, 0, 7): 1819 case IP_VERSION(11, 0, 11): 1820 case IP_VERSION(11, 0, 12): 1821 case IP_VERSION(11, 0, 13): 1822 case IP_VERSION(11, 5, 0): 1823 amdgpu_device_ip_block_add(adev, &psp_v11_0_ip_block); 1824 break; 1825 case IP_VERSION(11, 0, 8): 1826 amdgpu_device_ip_block_add(adev, &psp_v11_0_8_ip_block); 1827 break; 1828 case IP_VERSION(11, 0, 3): 1829 case IP_VERSION(12, 0, 1): 1830 amdgpu_device_ip_block_add(adev, &psp_v12_0_ip_block); 1831 break; 1832 case IP_VERSION(13, 0, 0): 1833 case IP_VERSION(13, 0, 1): 1834 case IP_VERSION(13, 0, 2): 1835 case IP_VERSION(13, 0, 3): 1836 case IP_VERSION(13, 0, 5): 1837 case IP_VERSION(13, 0, 6): 1838 case IP_VERSION(13, 0, 7): 1839 case IP_VERSION(13, 0, 8): 1840 case IP_VERSION(13, 0, 10): 1841 case IP_VERSION(13, 0, 11): 1842 case IP_VERSION(14, 0, 0): 1843 case IP_VERSION(14, 0, 1): 1844 amdgpu_device_ip_block_add(adev, &psp_v13_0_ip_block); 1845 break; 1846 case IP_VERSION(13, 0, 4): 1847 amdgpu_device_ip_block_add(adev, &psp_v13_0_4_ip_block); 1848 break; 1849 case IP_VERSION(14, 0, 2): 1850 case IP_VERSION(14, 0, 3): 1851 amdgpu_device_ip_block_add(adev, &psp_v14_0_ip_block); 1852 break; 1853 default: 1854 dev_err(adev->dev, 1855 "Failed to add psp ip block(MP0_HWIP:0x%x)\n", 1856 amdgpu_ip_version(adev, MP0_HWIP, 0)); 1857 return -EINVAL; 1858 } 1859 return 0; 1860 } 1861 1862 static int amdgpu_discovery_set_smu_ip_blocks(struct amdgpu_device *adev) 1863 { 1864 switch (amdgpu_ip_version(adev, MP1_HWIP, 0)) { 1865 case IP_VERSION(9, 0, 0): 1866 case IP_VERSION(10, 0, 0): 1867 case IP_VERSION(10, 0, 1): 1868 case IP_VERSION(11, 0, 2): 1869 if (adev->asic_type == CHIP_ARCTURUS) 1870 amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block); 1871 else 1872 amdgpu_device_ip_block_add(adev, &pp_smu_ip_block); 1873 break; 1874 case IP_VERSION(11, 0, 0): 1875 case IP_VERSION(11, 0, 5): 1876 case IP_VERSION(11, 0, 9): 1877 case IP_VERSION(11, 0, 7): 1878 case IP_VERSION(11, 0, 8): 1879 case IP_VERSION(11, 0, 11): 1880 case IP_VERSION(11, 0, 12): 1881 case IP_VERSION(11, 0, 13): 1882 case IP_VERSION(11, 5, 0): 1883 amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block); 1884 break; 1885 case IP_VERSION(12, 0, 0): 1886 case IP_VERSION(12, 0, 1): 1887 amdgpu_device_ip_block_add(adev, &smu_v12_0_ip_block); 1888 break; 1889 case IP_VERSION(13, 0, 0): 1890 case IP_VERSION(13, 0, 1): 1891 case IP_VERSION(13, 0, 2): 1892 case IP_VERSION(13, 0, 3): 1893 case IP_VERSION(13, 0, 4): 1894 case IP_VERSION(13, 0, 5): 1895 case IP_VERSION(13, 0, 6): 1896 case IP_VERSION(13, 0, 7): 1897 case IP_VERSION(13, 0, 8): 1898 case IP_VERSION(13, 0, 10): 1899 case IP_VERSION(13, 0, 11): 1900 amdgpu_device_ip_block_add(adev, &smu_v13_0_ip_block); 1901 break; 1902 case IP_VERSION(14, 0, 0): 1903 case IP_VERSION(14, 0, 1): 1904 case IP_VERSION(14, 0, 2): 1905 case IP_VERSION(14, 0, 3): 1906 amdgpu_device_ip_block_add(adev, &smu_v14_0_ip_block); 1907 break; 1908 default: 1909 dev_err(adev->dev, 1910 "Failed to add smu ip block(MP1_HWIP:0x%x)\n", 1911 amdgpu_ip_version(adev, MP1_HWIP, 0)); 1912 return -EINVAL; 1913 } 1914 return 0; 1915 } 1916 1917 #if defined(CONFIG_DRM_AMD_DC) 1918 static void amdgpu_discovery_set_sriov_display(struct amdgpu_device *adev) 1919 { 1920 amdgpu_device_set_sriov_virtual_display(adev); 1921 amdgpu_device_ip_block_add(adev, &amdgpu_vkms_ip_block); 1922 } 1923 #endif 1924 1925 static int amdgpu_discovery_set_display_ip_blocks(struct amdgpu_device *adev) 1926 { 1927 if (adev->enable_virtual_display) { 1928 amdgpu_device_ip_block_add(adev, &amdgpu_vkms_ip_block); 1929 return 0; 1930 } 1931 1932 if (!amdgpu_device_has_dc_support(adev)) 1933 return 0; 1934 1935 #if defined(CONFIG_DRM_AMD_DC) 1936 if (amdgpu_ip_version(adev, DCE_HWIP, 0)) { 1937 switch (amdgpu_ip_version(adev, DCE_HWIP, 0)) { 1938 case IP_VERSION(1, 0, 0): 1939 case IP_VERSION(1, 0, 1): 1940 case IP_VERSION(2, 0, 2): 1941 case IP_VERSION(2, 0, 0): 1942 case IP_VERSION(2, 0, 3): 1943 case IP_VERSION(2, 1, 0): 1944 case IP_VERSION(3, 0, 0): 1945 case IP_VERSION(3, 0, 2): 1946 case IP_VERSION(3, 0, 3): 1947 case IP_VERSION(3, 0, 1): 1948 case IP_VERSION(3, 1, 2): 1949 case IP_VERSION(3, 1, 3): 1950 case IP_VERSION(3, 1, 4): 1951 case IP_VERSION(3, 1, 5): 1952 case IP_VERSION(3, 1, 6): 1953 case IP_VERSION(3, 2, 0): 1954 case IP_VERSION(3, 2, 1): 1955 case IP_VERSION(3, 5, 0): 1956 case IP_VERSION(3, 5, 1): 1957 if (amdgpu_sriov_vf(adev)) 1958 amdgpu_discovery_set_sriov_display(adev); 1959 else 1960 amdgpu_device_ip_block_add(adev, &dm_ip_block); 1961 break; 1962 default: 1963 dev_err(adev->dev, 1964 "Failed to add dm ip block(DCE_HWIP:0x%x)\n", 1965 amdgpu_ip_version(adev, DCE_HWIP, 0)); 1966 return -EINVAL; 1967 } 1968 } else if (amdgpu_ip_version(adev, DCI_HWIP, 0)) { 1969 switch (amdgpu_ip_version(adev, DCI_HWIP, 0)) { 1970 case IP_VERSION(12, 0, 0): 1971 case IP_VERSION(12, 0, 1): 1972 case IP_VERSION(12, 1, 0): 1973 if (amdgpu_sriov_vf(adev)) 1974 amdgpu_discovery_set_sriov_display(adev); 1975 else 1976 amdgpu_device_ip_block_add(adev, &dm_ip_block); 1977 break; 1978 default: 1979 dev_err(adev->dev, 1980 "Failed to add dm ip block(DCI_HWIP:0x%x)\n", 1981 amdgpu_ip_version(adev, DCI_HWIP, 0)); 1982 return -EINVAL; 1983 } 1984 } 1985 #endif 1986 return 0; 1987 } 1988 1989 static int amdgpu_discovery_set_gc_ip_blocks(struct amdgpu_device *adev) 1990 { 1991 switch (amdgpu_ip_version(adev, GC_HWIP, 0)) { 1992 case IP_VERSION(9, 0, 1): 1993 case IP_VERSION(9, 1, 0): 1994 case IP_VERSION(9, 2, 1): 1995 case IP_VERSION(9, 2, 2): 1996 case IP_VERSION(9, 3, 0): 1997 case IP_VERSION(9, 4, 0): 1998 case IP_VERSION(9, 4, 1): 1999 case IP_VERSION(9, 4, 2): 2000 amdgpu_device_ip_block_add(adev, &gfx_v9_0_ip_block); 2001 break; 2002 case IP_VERSION(9, 4, 3): 2003 amdgpu_device_ip_block_add(adev, &gfx_v9_4_3_ip_block); 2004 break; 2005 case IP_VERSION(10, 1, 10): 2006 case IP_VERSION(10, 1, 2): 2007 case IP_VERSION(10, 1, 1): 2008 case IP_VERSION(10, 1, 3): 2009 case IP_VERSION(10, 1, 4): 2010 case IP_VERSION(10, 3, 0): 2011 case IP_VERSION(10, 3, 2): 2012 case IP_VERSION(10, 3, 1): 2013 case IP_VERSION(10, 3, 4): 2014 case IP_VERSION(10, 3, 5): 2015 case IP_VERSION(10, 3, 6): 2016 case IP_VERSION(10, 3, 3): 2017 case IP_VERSION(10, 3, 7): 2018 amdgpu_device_ip_block_add(adev, &gfx_v10_0_ip_block); 2019 break; 2020 case IP_VERSION(11, 0, 0): 2021 case IP_VERSION(11, 0, 1): 2022 case IP_VERSION(11, 0, 2): 2023 case IP_VERSION(11, 0, 3): 2024 case IP_VERSION(11, 0, 4): 2025 case IP_VERSION(11, 5, 0): 2026 case IP_VERSION(11, 5, 1): 2027 amdgpu_device_ip_block_add(adev, &gfx_v11_0_ip_block); 2028 break; 2029 default: 2030 dev_err(adev->dev, "Failed to add gfx ip block(GC_HWIP:0x%x)\n", 2031 amdgpu_ip_version(adev, GC_HWIP, 0)); 2032 return -EINVAL; 2033 } 2034 return 0; 2035 } 2036 2037 static int amdgpu_discovery_set_sdma_ip_blocks(struct amdgpu_device *adev) 2038 { 2039 switch (amdgpu_ip_version(adev, SDMA0_HWIP, 0)) { 2040 case IP_VERSION(4, 0, 0): 2041 case IP_VERSION(4, 0, 1): 2042 case IP_VERSION(4, 1, 0): 2043 case IP_VERSION(4, 1, 1): 2044 case IP_VERSION(4, 1, 2): 2045 case IP_VERSION(4, 2, 0): 2046 case IP_VERSION(4, 2, 2): 2047 case IP_VERSION(4, 4, 0): 2048 amdgpu_device_ip_block_add(adev, &sdma_v4_0_ip_block); 2049 break; 2050 case IP_VERSION(4, 4, 2): 2051 amdgpu_device_ip_block_add(adev, &sdma_v4_4_2_ip_block); 2052 break; 2053 case IP_VERSION(5, 0, 0): 2054 case IP_VERSION(5, 0, 1): 2055 case IP_VERSION(5, 0, 2): 2056 case IP_VERSION(5, 0, 5): 2057 amdgpu_device_ip_block_add(adev, &sdma_v5_0_ip_block); 2058 break; 2059 case IP_VERSION(5, 2, 0): 2060 case IP_VERSION(5, 2, 2): 2061 case IP_VERSION(5, 2, 4): 2062 case IP_VERSION(5, 2, 5): 2063 case IP_VERSION(5, 2, 6): 2064 case IP_VERSION(5, 2, 3): 2065 case IP_VERSION(5, 2, 1): 2066 case IP_VERSION(5, 2, 7): 2067 amdgpu_device_ip_block_add(adev, &sdma_v5_2_ip_block); 2068 break; 2069 case IP_VERSION(6, 0, 0): 2070 case IP_VERSION(6, 0, 1): 2071 case IP_VERSION(6, 0, 2): 2072 case IP_VERSION(6, 0, 3): 2073 case IP_VERSION(6, 1, 0): 2074 case IP_VERSION(6, 1, 1): 2075 amdgpu_device_ip_block_add(adev, &sdma_v6_0_ip_block); 2076 break; 2077 default: 2078 dev_err(adev->dev, 2079 "Failed to add sdma ip block(SDMA0_HWIP:0x%x)\n", 2080 amdgpu_ip_version(adev, SDMA0_HWIP, 0)); 2081 return -EINVAL; 2082 } 2083 return 0; 2084 } 2085 2086 static int amdgpu_discovery_set_mm_ip_blocks(struct amdgpu_device *adev) 2087 { 2088 if (amdgpu_ip_version(adev, VCE_HWIP, 0)) { 2089 switch (amdgpu_ip_version(adev, UVD_HWIP, 0)) { 2090 case IP_VERSION(7, 0, 0): 2091 case IP_VERSION(7, 2, 0): 2092 /* UVD is not supported on vega20 SR-IOV */ 2093 if (!(adev->asic_type == CHIP_VEGA20 && amdgpu_sriov_vf(adev))) 2094 amdgpu_device_ip_block_add(adev, &uvd_v7_0_ip_block); 2095 break; 2096 default: 2097 dev_err(adev->dev, 2098 "Failed to add uvd v7 ip block(UVD_HWIP:0x%x)\n", 2099 amdgpu_ip_version(adev, UVD_HWIP, 0)); 2100 return -EINVAL; 2101 } 2102 switch (amdgpu_ip_version(adev, VCE_HWIP, 0)) { 2103 case IP_VERSION(4, 0, 0): 2104 case IP_VERSION(4, 1, 0): 2105 /* VCE is not supported on vega20 SR-IOV */ 2106 if (!(adev->asic_type == CHIP_VEGA20 && amdgpu_sriov_vf(adev))) 2107 amdgpu_device_ip_block_add(adev, &vce_v4_0_ip_block); 2108 break; 2109 default: 2110 dev_err(adev->dev, 2111 "Failed to add VCE v4 ip block(VCE_HWIP:0x%x)\n", 2112 amdgpu_ip_version(adev, VCE_HWIP, 0)); 2113 return -EINVAL; 2114 } 2115 } else { 2116 switch (amdgpu_ip_version(adev, UVD_HWIP, 0)) { 2117 case IP_VERSION(1, 0, 0): 2118 case IP_VERSION(1, 0, 1): 2119 amdgpu_device_ip_block_add(adev, &vcn_v1_0_ip_block); 2120 break; 2121 case IP_VERSION(2, 0, 0): 2122 case IP_VERSION(2, 0, 2): 2123 case IP_VERSION(2, 2, 0): 2124 amdgpu_device_ip_block_add(adev, &vcn_v2_0_ip_block); 2125 if (!amdgpu_sriov_vf(adev)) 2126 amdgpu_device_ip_block_add(adev, &jpeg_v2_0_ip_block); 2127 break; 2128 case IP_VERSION(2, 0, 3): 2129 break; 2130 case IP_VERSION(2, 5, 0): 2131 amdgpu_device_ip_block_add(adev, &vcn_v2_5_ip_block); 2132 amdgpu_device_ip_block_add(adev, &jpeg_v2_5_ip_block); 2133 break; 2134 case IP_VERSION(2, 6, 0): 2135 amdgpu_device_ip_block_add(adev, &vcn_v2_6_ip_block); 2136 amdgpu_device_ip_block_add(adev, &jpeg_v2_6_ip_block); 2137 break; 2138 case IP_VERSION(3, 0, 0): 2139 case IP_VERSION(3, 0, 16): 2140 case IP_VERSION(3, 1, 1): 2141 case IP_VERSION(3, 1, 2): 2142 case IP_VERSION(3, 0, 2): 2143 amdgpu_device_ip_block_add(adev, &vcn_v3_0_ip_block); 2144 if (!amdgpu_sriov_vf(adev)) 2145 amdgpu_device_ip_block_add(adev, &jpeg_v3_0_ip_block); 2146 break; 2147 case IP_VERSION(3, 0, 33): 2148 amdgpu_device_ip_block_add(adev, &vcn_v3_0_ip_block); 2149 break; 2150 case IP_VERSION(4, 0, 0): 2151 case IP_VERSION(4, 0, 2): 2152 case IP_VERSION(4, 0, 4): 2153 amdgpu_device_ip_block_add(adev, &vcn_v4_0_ip_block); 2154 amdgpu_device_ip_block_add(adev, &jpeg_v4_0_ip_block); 2155 break; 2156 case IP_VERSION(4, 0, 3): 2157 amdgpu_device_ip_block_add(adev, &vcn_v4_0_3_ip_block); 2158 amdgpu_device_ip_block_add(adev, &jpeg_v4_0_3_ip_block); 2159 break; 2160 case IP_VERSION(4, 0, 5): 2161 case IP_VERSION(4, 0, 6): 2162 amdgpu_device_ip_block_add(adev, &vcn_v4_0_5_ip_block); 2163 amdgpu_device_ip_block_add(adev, &jpeg_v4_0_5_ip_block); 2164 break; 2165 case IP_VERSION(5, 0, 0): 2166 amdgpu_device_ip_block_add(adev, &vcn_v5_0_0_ip_block); 2167 amdgpu_device_ip_block_add(adev, &jpeg_v5_0_0_ip_block); 2168 break; 2169 default: 2170 dev_err(adev->dev, 2171 "Failed to add vcn/jpeg ip block(UVD_HWIP:0x%x)\n", 2172 amdgpu_ip_version(adev, UVD_HWIP, 0)); 2173 return -EINVAL; 2174 } 2175 } 2176 return 0; 2177 } 2178 2179 static int amdgpu_discovery_set_mes_ip_blocks(struct amdgpu_device *adev) 2180 { 2181 switch (amdgpu_ip_version(adev, GC_HWIP, 0)) { 2182 case IP_VERSION(10, 1, 10): 2183 case IP_VERSION(10, 1, 1): 2184 case IP_VERSION(10, 1, 2): 2185 case IP_VERSION(10, 1, 3): 2186 case IP_VERSION(10, 1, 4): 2187 case IP_VERSION(10, 3, 0): 2188 case IP_VERSION(10, 3, 1): 2189 case IP_VERSION(10, 3, 2): 2190 case IP_VERSION(10, 3, 3): 2191 case IP_VERSION(10, 3, 4): 2192 case IP_VERSION(10, 3, 5): 2193 case IP_VERSION(10, 3, 6): 2194 if (amdgpu_mes) { 2195 amdgpu_device_ip_block_add(adev, &mes_v10_1_ip_block); 2196 adev->enable_mes = true; 2197 if (amdgpu_mes_kiq) 2198 adev->enable_mes_kiq = true; 2199 } 2200 break; 2201 case IP_VERSION(11, 0, 0): 2202 case IP_VERSION(11, 0, 1): 2203 case IP_VERSION(11, 0, 2): 2204 case IP_VERSION(11, 0, 3): 2205 case IP_VERSION(11, 0, 4): 2206 case IP_VERSION(11, 5, 0): 2207 case IP_VERSION(11, 5, 1): 2208 amdgpu_device_ip_block_add(adev, &mes_v11_0_ip_block); 2209 adev->enable_mes = true; 2210 adev->enable_mes_kiq = true; 2211 break; 2212 default: 2213 break; 2214 } 2215 return 0; 2216 } 2217 2218 static void amdgpu_discovery_init_soc_config(struct amdgpu_device *adev) 2219 { 2220 switch (amdgpu_ip_version(adev, GC_HWIP, 0)) { 2221 case IP_VERSION(9, 4, 3): 2222 aqua_vanjaram_init_soc_config(adev); 2223 break; 2224 default: 2225 break; 2226 } 2227 } 2228 2229 static int amdgpu_discovery_set_vpe_ip_blocks(struct amdgpu_device *adev) 2230 { 2231 switch (amdgpu_ip_version(adev, VPE_HWIP, 0)) { 2232 case IP_VERSION(6, 1, 0): 2233 case IP_VERSION(6, 1, 1): 2234 amdgpu_device_ip_block_add(adev, &vpe_v6_1_ip_block); 2235 break; 2236 default: 2237 break; 2238 } 2239 2240 return 0; 2241 } 2242 2243 static int amdgpu_discovery_set_umsch_mm_ip_blocks(struct amdgpu_device *adev) 2244 { 2245 switch (amdgpu_ip_version(adev, VCN_HWIP, 0)) { 2246 case IP_VERSION(4, 0, 5): 2247 case IP_VERSION(4, 0, 6): 2248 if (amdgpu_umsch_mm & 0x1) { 2249 amdgpu_device_ip_block_add(adev, &umsch_mm_v4_0_ip_block); 2250 adev->enable_umsch_mm = true; 2251 } 2252 break; 2253 default: 2254 break; 2255 } 2256 2257 return 0; 2258 } 2259 2260 int amdgpu_discovery_set_ip_blocks(struct amdgpu_device *adev) 2261 { 2262 int r; 2263 2264 switch (adev->asic_type) { 2265 case CHIP_VEGA10: 2266 vega10_reg_base_init(adev); 2267 adev->sdma.num_instances = 2; 2268 adev->gmc.num_umc = 4; 2269 adev->ip_versions[MMHUB_HWIP][0] = IP_VERSION(9, 0, 0); 2270 adev->ip_versions[ATHUB_HWIP][0] = IP_VERSION(9, 0, 0); 2271 adev->ip_versions[OSSSYS_HWIP][0] = IP_VERSION(4, 0, 0); 2272 adev->ip_versions[HDP_HWIP][0] = IP_VERSION(4, 0, 0); 2273 adev->ip_versions[SDMA0_HWIP][0] = IP_VERSION(4, 0, 0); 2274 adev->ip_versions[SDMA1_HWIP][0] = IP_VERSION(4, 0, 0); 2275 adev->ip_versions[DF_HWIP][0] = IP_VERSION(2, 1, 0); 2276 adev->ip_versions[NBIO_HWIP][0] = IP_VERSION(6, 1, 0); 2277 adev->ip_versions[UMC_HWIP][0] = IP_VERSION(6, 0, 0); 2278 adev->ip_versions[MP0_HWIP][0] = IP_VERSION(9, 0, 0); 2279 adev->ip_versions[MP1_HWIP][0] = IP_VERSION(9, 0, 0); 2280 adev->ip_versions[THM_HWIP][0] = IP_VERSION(9, 0, 0); 2281 adev->ip_versions[SMUIO_HWIP][0] = IP_VERSION(9, 0, 0); 2282 adev->ip_versions[GC_HWIP][0] = IP_VERSION(9, 0, 1); 2283 adev->ip_versions[UVD_HWIP][0] = IP_VERSION(7, 0, 0); 2284 adev->ip_versions[VCE_HWIP][0] = IP_VERSION(4, 0, 0); 2285 adev->ip_versions[DCI_HWIP][0] = IP_VERSION(12, 0, 0); 2286 break; 2287 case CHIP_VEGA12: 2288 vega10_reg_base_init(adev); 2289 adev->sdma.num_instances = 2; 2290 adev->gmc.num_umc = 4; 2291 adev->ip_versions[MMHUB_HWIP][0] = IP_VERSION(9, 3, 0); 2292 adev->ip_versions[ATHUB_HWIP][0] = IP_VERSION(9, 3, 0); 2293 adev->ip_versions[OSSSYS_HWIP][0] = IP_VERSION(4, 0, 1); 2294 adev->ip_versions[HDP_HWIP][0] = IP_VERSION(4, 0, 1); 2295 adev->ip_versions[SDMA0_HWIP][0] = IP_VERSION(4, 0, 1); 2296 adev->ip_versions[SDMA1_HWIP][0] = IP_VERSION(4, 0, 1); 2297 adev->ip_versions[DF_HWIP][0] = IP_VERSION(2, 5, 0); 2298 adev->ip_versions[NBIO_HWIP][0] = IP_VERSION(6, 2, 0); 2299 adev->ip_versions[UMC_HWIP][0] = IP_VERSION(6, 1, 0); 2300 adev->ip_versions[MP0_HWIP][0] = IP_VERSION(9, 0, 0); 2301 adev->ip_versions[MP1_HWIP][0] = IP_VERSION(9, 0, 0); 2302 adev->ip_versions[THM_HWIP][0] = IP_VERSION(9, 0, 0); 2303 adev->ip_versions[SMUIO_HWIP][0] = IP_VERSION(9, 0, 1); 2304 adev->ip_versions[GC_HWIP][0] = IP_VERSION(9, 2, 1); 2305 adev->ip_versions[UVD_HWIP][0] = IP_VERSION(7, 0, 0); 2306 adev->ip_versions[VCE_HWIP][0] = IP_VERSION(4, 0, 0); 2307 adev->ip_versions[DCI_HWIP][0] = IP_VERSION(12, 0, 1); 2308 break; 2309 case CHIP_RAVEN: 2310 vega10_reg_base_init(adev); 2311 adev->sdma.num_instances = 1; 2312 adev->vcn.num_vcn_inst = 1; 2313 adev->gmc.num_umc = 2; 2314 if (adev->apu_flags & AMD_APU_IS_RAVEN2) { 2315 adev->ip_versions[MMHUB_HWIP][0] = IP_VERSION(9, 2, 0); 2316 adev->ip_versions[ATHUB_HWIP][0] = IP_VERSION(9, 2, 0); 2317 adev->ip_versions[OSSSYS_HWIP][0] = IP_VERSION(4, 1, 1); 2318 adev->ip_versions[HDP_HWIP][0] = IP_VERSION(4, 1, 1); 2319 adev->ip_versions[SDMA0_HWIP][0] = IP_VERSION(4, 1, 1); 2320 adev->ip_versions[DF_HWIP][0] = IP_VERSION(2, 1, 1); 2321 adev->ip_versions[NBIO_HWIP][0] = IP_VERSION(7, 0, 1); 2322 adev->ip_versions[UMC_HWIP][0] = IP_VERSION(7, 5, 0); 2323 adev->ip_versions[MP0_HWIP][0] = IP_VERSION(10, 0, 1); 2324 adev->ip_versions[MP1_HWIP][0] = IP_VERSION(10, 0, 1); 2325 adev->ip_versions[THM_HWIP][0] = IP_VERSION(10, 1, 0); 2326 adev->ip_versions[SMUIO_HWIP][0] = IP_VERSION(10, 0, 1); 2327 adev->ip_versions[GC_HWIP][0] = IP_VERSION(9, 2, 2); 2328 adev->ip_versions[UVD_HWIP][0] = IP_VERSION(1, 0, 1); 2329 adev->ip_versions[DCE_HWIP][0] = IP_VERSION(1, 0, 1); 2330 } else { 2331 adev->ip_versions[MMHUB_HWIP][0] = IP_VERSION(9, 1, 0); 2332 adev->ip_versions[ATHUB_HWIP][0] = IP_VERSION(9, 1, 0); 2333 adev->ip_versions[OSSSYS_HWIP][0] = IP_VERSION(4, 1, 0); 2334 adev->ip_versions[HDP_HWIP][0] = IP_VERSION(4, 1, 0); 2335 adev->ip_versions[SDMA0_HWIP][0] = IP_VERSION(4, 1, 0); 2336 adev->ip_versions[DF_HWIP][0] = IP_VERSION(2, 1, 0); 2337 adev->ip_versions[NBIO_HWIP][0] = IP_VERSION(7, 0, 0); 2338 adev->ip_versions[UMC_HWIP][0] = IP_VERSION(7, 0, 0); 2339 adev->ip_versions[MP0_HWIP][0] = IP_VERSION(10, 0, 0); 2340 adev->ip_versions[MP1_HWIP][0] = IP_VERSION(10, 0, 0); 2341 adev->ip_versions[THM_HWIP][0] = IP_VERSION(10, 0, 0); 2342 adev->ip_versions[SMUIO_HWIP][0] = IP_VERSION(10, 0, 0); 2343 adev->ip_versions[GC_HWIP][0] = IP_VERSION(9, 1, 0); 2344 adev->ip_versions[UVD_HWIP][0] = IP_VERSION(1, 0, 0); 2345 adev->ip_versions[DCE_HWIP][0] = IP_VERSION(1, 0, 0); 2346 } 2347 break; 2348 case CHIP_VEGA20: 2349 vega20_reg_base_init(adev); 2350 adev->sdma.num_instances = 2; 2351 adev->gmc.num_umc = 8; 2352 adev->ip_versions[MMHUB_HWIP][0] = IP_VERSION(9, 4, 0); 2353 adev->ip_versions[ATHUB_HWIP][0] = IP_VERSION(9, 4, 0); 2354 adev->ip_versions[OSSSYS_HWIP][0] = IP_VERSION(4, 2, 0); 2355 adev->ip_versions[HDP_HWIP][0] = IP_VERSION(4, 2, 0); 2356 adev->ip_versions[SDMA0_HWIP][0] = IP_VERSION(4, 2, 0); 2357 adev->ip_versions[SDMA1_HWIP][0] = IP_VERSION(4, 2, 0); 2358 adev->ip_versions[DF_HWIP][0] = IP_VERSION(3, 6, 0); 2359 adev->ip_versions[NBIO_HWIP][0] = IP_VERSION(7, 4, 0); 2360 adev->ip_versions[UMC_HWIP][0] = IP_VERSION(6, 1, 1); 2361 adev->ip_versions[MP0_HWIP][0] = IP_VERSION(11, 0, 2); 2362 adev->ip_versions[MP1_HWIP][0] = IP_VERSION(11, 0, 2); 2363 adev->ip_versions[THM_HWIP][0] = IP_VERSION(11, 0, 2); 2364 adev->ip_versions[SMUIO_HWIP][0] = IP_VERSION(11, 0, 2); 2365 adev->ip_versions[GC_HWIP][0] = IP_VERSION(9, 4, 0); 2366 adev->ip_versions[UVD_HWIP][0] = IP_VERSION(7, 2, 0); 2367 adev->ip_versions[UVD_HWIP][1] = IP_VERSION(7, 2, 0); 2368 adev->ip_versions[VCE_HWIP][0] = IP_VERSION(4, 1, 0); 2369 adev->ip_versions[DCI_HWIP][0] = IP_VERSION(12, 1, 0); 2370 break; 2371 case CHIP_ARCTURUS: 2372 arct_reg_base_init(adev); 2373 adev->sdma.num_instances = 8; 2374 adev->vcn.num_vcn_inst = 2; 2375 adev->gmc.num_umc = 8; 2376 adev->ip_versions[MMHUB_HWIP][0] = IP_VERSION(9, 4, 1); 2377 adev->ip_versions[ATHUB_HWIP][0] = IP_VERSION(9, 4, 1); 2378 adev->ip_versions[OSSSYS_HWIP][0] = IP_VERSION(4, 2, 1); 2379 adev->ip_versions[HDP_HWIP][0] = IP_VERSION(4, 2, 1); 2380 adev->ip_versions[SDMA0_HWIP][0] = IP_VERSION(4, 2, 2); 2381 adev->ip_versions[SDMA1_HWIP][0] = IP_VERSION(4, 2, 2); 2382 adev->ip_versions[SDMA1_HWIP][1] = IP_VERSION(4, 2, 2); 2383 adev->ip_versions[SDMA1_HWIP][2] = IP_VERSION(4, 2, 2); 2384 adev->ip_versions[SDMA1_HWIP][3] = IP_VERSION(4, 2, 2); 2385 adev->ip_versions[SDMA1_HWIP][4] = IP_VERSION(4, 2, 2); 2386 adev->ip_versions[SDMA1_HWIP][5] = IP_VERSION(4, 2, 2); 2387 adev->ip_versions[SDMA1_HWIP][6] = IP_VERSION(4, 2, 2); 2388 adev->ip_versions[DF_HWIP][0] = IP_VERSION(3, 6, 1); 2389 adev->ip_versions[NBIO_HWIP][0] = IP_VERSION(7, 4, 1); 2390 adev->ip_versions[UMC_HWIP][0] = IP_VERSION(6, 1, 2); 2391 adev->ip_versions[MP0_HWIP][0] = IP_VERSION(11, 0, 4); 2392 adev->ip_versions[MP1_HWIP][0] = IP_VERSION(11, 0, 2); 2393 adev->ip_versions[THM_HWIP][0] = IP_VERSION(11, 0, 3); 2394 adev->ip_versions[SMUIO_HWIP][0] = IP_VERSION(11, 0, 3); 2395 adev->ip_versions[GC_HWIP][0] = IP_VERSION(9, 4, 1); 2396 adev->ip_versions[UVD_HWIP][0] = IP_VERSION(2, 5, 0); 2397 adev->ip_versions[UVD_HWIP][1] = IP_VERSION(2, 5, 0); 2398 break; 2399 case CHIP_ALDEBARAN: 2400 aldebaran_reg_base_init(adev); 2401 adev->sdma.num_instances = 5; 2402 adev->vcn.num_vcn_inst = 2; 2403 adev->gmc.num_umc = 4; 2404 adev->ip_versions[MMHUB_HWIP][0] = IP_VERSION(9, 4, 2); 2405 adev->ip_versions[ATHUB_HWIP][0] = IP_VERSION(9, 4, 2); 2406 adev->ip_versions[OSSSYS_HWIP][0] = IP_VERSION(4, 4, 0); 2407 adev->ip_versions[HDP_HWIP][0] = IP_VERSION(4, 4, 0); 2408 adev->ip_versions[SDMA0_HWIP][0] = IP_VERSION(4, 4, 0); 2409 adev->ip_versions[SDMA0_HWIP][1] = IP_VERSION(4, 4, 0); 2410 adev->ip_versions[SDMA0_HWIP][2] = IP_VERSION(4, 4, 0); 2411 adev->ip_versions[SDMA0_HWIP][3] = IP_VERSION(4, 4, 0); 2412 adev->ip_versions[SDMA0_HWIP][4] = IP_VERSION(4, 4, 0); 2413 adev->ip_versions[DF_HWIP][0] = IP_VERSION(3, 6, 2); 2414 adev->ip_versions[NBIO_HWIP][0] = IP_VERSION(7, 4, 4); 2415 adev->ip_versions[UMC_HWIP][0] = IP_VERSION(6, 7, 0); 2416 adev->ip_versions[MP0_HWIP][0] = IP_VERSION(13, 0, 2); 2417 adev->ip_versions[MP1_HWIP][0] = IP_VERSION(13, 0, 2); 2418 adev->ip_versions[THM_HWIP][0] = IP_VERSION(13, 0, 2); 2419 adev->ip_versions[SMUIO_HWIP][0] = IP_VERSION(13, 0, 2); 2420 adev->ip_versions[GC_HWIP][0] = IP_VERSION(9, 4, 2); 2421 adev->ip_versions[UVD_HWIP][0] = IP_VERSION(2, 6, 0); 2422 adev->ip_versions[UVD_HWIP][1] = IP_VERSION(2, 6, 0); 2423 adev->ip_versions[XGMI_HWIP][0] = IP_VERSION(6, 1, 0); 2424 break; 2425 default: 2426 r = amdgpu_discovery_reg_base_init(adev); 2427 if (r) 2428 return -EINVAL; 2429 2430 amdgpu_discovery_harvest_ip(adev); 2431 amdgpu_discovery_get_gfx_info(adev); 2432 amdgpu_discovery_get_mall_info(adev); 2433 amdgpu_discovery_get_vcn_info(adev); 2434 break; 2435 } 2436 2437 amdgpu_discovery_init_soc_config(adev); 2438 amdgpu_discovery_sysfs_init(adev); 2439 2440 switch (amdgpu_ip_version(adev, GC_HWIP, 0)) { 2441 case IP_VERSION(9, 0, 1): 2442 case IP_VERSION(9, 2, 1): 2443 case IP_VERSION(9, 4, 0): 2444 case IP_VERSION(9, 4, 1): 2445 case IP_VERSION(9, 4, 2): 2446 case IP_VERSION(9, 4, 3): 2447 adev->family = AMDGPU_FAMILY_AI; 2448 break; 2449 case IP_VERSION(9, 1, 0): 2450 case IP_VERSION(9, 2, 2): 2451 case IP_VERSION(9, 3, 0): 2452 adev->family = AMDGPU_FAMILY_RV; 2453 break; 2454 case IP_VERSION(10, 1, 10): 2455 case IP_VERSION(10, 1, 1): 2456 case IP_VERSION(10, 1, 2): 2457 case IP_VERSION(10, 1, 3): 2458 case IP_VERSION(10, 1, 4): 2459 case IP_VERSION(10, 3, 0): 2460 case IP_VERSION(10, 3, 2): 2461 case IP_VERSION(10, 3, 4): 2462 case IP_VERSION(10, 3, 5): 2463 adev->family = AMDGPU_FAMILY_NV; 2464 break; 2465 case IP_VERSION(10, 3, 1): 2466 adev->family = AMDGPU_FAMILY_VGH; 2467 adev->apu_flags |= AMD_APU_IS_VANGOGH; 2468 break; 2469 case IP_VERSION(10, 3, 3): 2470 adev->family = AMDGPU_FAMILY_YC; 2471 break; 2472 case IP_VERSION(10, 3, 6): 2473 adev->family = AMDGPU_FAMILY_GC_10_3_6; 2474 break; 2475 case IP_VERSION(10, 3, 7): 2476 adev->family = AMDGPU_FAMILY_GC_10_3_7; 2477 break; 2478 case IP_VERSION(11, 0, 0): 2479 case IP_VERSION(11, 0, 2): 2480 case IP_VERSION(11, 0, 3): 2481 adev->family = AMDGPU_FAMILY_GC_11_0_0; 2482 break; 2483 case IP_VERSION(11, 0, 1): 2484 case IP_VERSION(11, 0, 4): 2485 adev->family = AMDGPU_FAMILY_GC_11_0_1; 2486 break; 2487 case IP_VERSION(11, 5, 0): 2488 case IP_VERSION(11, 5, 1): 2489 adev->family = AMDGPU_FAMILY_GC_11_5_0; 2490 break; 2491 default: 2492 return -EINVAL; 2493 } 2494 2495 switch (amdgpu_ip_version(adev, GC_HWIP, 0)) { 2496 case IP_VERSION(9, 1, 0): 2497 case IP_VERSION(9, 2, 2): 2498 case IP_VERSION(9, 3, 0): 2499 case IP_VERSION(10, 1, 3): 2500 case IP_VERSION(10, 1, 4): 2501 case IP_VERSION(10, 3, 1): 2502 case IP_VERSION(10, 3, 3): 2503 case IP_VERSION(10, 3, 6): 2504 case IP_VERSION(10, 3, 7): 2505 case IP_VERSION(11, 0, 1): 2506 case IP_VERSION(11, 0, 4): 2507 case IP_VERSION(11, 5, 0): 2508 case IP_VERSION(11, 5, 1): 2509 adev->flags |= AMD_IS_APU; 2510 break; 2511 default: 2512 break; 2513 } 2514 2515 if (amdgpu_ip_version(adev, XGMI_HWIP, 0) == IP_VERSION(4, 8, 0)) 2516 adev->gmc.xgmi.supported = true; 2517 2518 if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 3)) 2519 adev->ip_versions[XGMI_HWIP][0] = IP_VERSION(6, 4, 0); 2520 2521 /* set NBIO version */ 2522 switch (amdgpu_ip_version(adev, NBIO_HWIP, 0)) { 2523 case IP_VERSION(6, 1, 0): 2524 case IP_VERSION(6, 2, 0): 2525 adev->nbio.funcs = &nbio_v6_1_funcs; 2526 adev->nbio.hdp_flush_reg = &nbio_v6_1_hdp_flush_reg; 2527 break; 2528 case IP_VERSION(7, 0, 0): 2529 case IP_VERSION(7, 0, 1): 2530 case IP_VERSION(2, 5, 0): 2531 adev->nbio.funcs = &nbio_v7_0_funcs; 2532 adev->nbio.hdp_flush_reg = &nbio_v7_0_hdp_flush_reg; 2533 break; 2534 case IP_VERSION(7, 4, 0): 2535 case IP_VERSION(7, 4, 1): 2536 case IP_VERSION(7, 4, 4): 2537 adev->nbio.funcs = &nbio_v7_4_funcs; 2538 adev->nbio.hdp_flush_reg = &nbio_v7_4_hdp_flush_reg; 2539 break; 2540 case IP_VERSION(7, 9, 0): 2541 adev->nbio.funcs = &nbio_v7_9_funcs; 2542 adev->nbio.hdp_flush_reg = &nbio_v7_9_hdp_flush_reg; 2543 break; 2544 case IP_VERSION(7, 11, 0): 2545 case IP_VERSION(7, 11, 1): 2546 adev->nbio.funcs = &nbio_v7_11_funcs; 2547 adev->nbio.hdp_flush_reg = &nbio_v7_11_hdp_flush_reg; 2548 break; 2549 case IP_VERSION(7, 2, 0): 2550 case IP_VERSION(7, 2, 1): 2551 case IP_VERSION(7, 3, 0): 2552 case IP_VERSION(7, 5, 0): 2553 case IP_VERSION(7, 5, 1): 2554 adev->nbio.funcs = &nbio_v7_2_funcs; 2555 adev->nbio.hdp_flush_reg = &nbio_v7_2_hdp_flush_reg; 2556 break; 2557 case IP_VERSION(2, 1, 1): 2558 case IP_VERSION(2, 3, 0): 2559 case IP_VERSION(2, 3, 1): 2560 case IP_VERSION(2, 3, 2): 2561 case IP_VERSION(3, 3, 0): 2562 case IP_VERSION(3, 3, 1): 2563 case IP_VERSION(3, 3, 2): 2564 case IP_VERSION(3, 3, 3): 2565 adev->nbio.funcs = &nbio_v2_3_funcs; 2566 adev->nbio.hdp_flush_reg = &nbio_v2_3_hdp_flush_reg; 2567 break; 2568 case IP_VERSION(4, 3, 0): 2569 case IP_VERSION(4, 3, 1): 2570 if (amdgpu_sriov_vf(adev)) 2571 adev->nbio.funcs = &nbio_v4_3_sriov_funcs; 2572 else 2573 adev->nbio.funcs = &nbio_v4_3_funcs; 2574 adev->nbio.hdp_flush_reg = &nbio_v4_3_hdp_flush_reg; 2575 break; 2576 case IP_VERSION(7, 7, 0): 2577 case IP_VERSION(7, 7, 1): 2578 adev->nbio.funcs = &nbio_v7_7_funcs; 2579 adev->nbio.hdp_flush_reg = &nbio_v7_7_hdp_flush_reg; 2580 break; 2581 case IP_VERSION(6, 3, 1): 2582 adev->nbio.funcs = &nbif_v6_3_1_funcs; 2583 adev->nbio.hdp_flush_reg = &nbif_v6_3_1_hdp_flush_reg; 2584 break; 2585 default: 2586 break; 2587 } 2588 2589 switch (amdgpu_ip_version(adev, HDP_HWIP, 0)) { 2590 case IP_VERSION(4, 0, 0): 2591 case IP_VERSION(4, 0, 1): 2592 case IP_VERSION(4, 1, 0): 2593 case IP_VERSION(4, 1, 1): 2594 case IP_VERSION(4, 1, 2): 2595 case IP_VERSION(4, 2, 0): 2596 case IP_VERSION(4, 2, 1): 2597 case IP_VERSION(4, 4, 0): 2598 case IP_VERSION(4, 4, 2): 2599 adev->hdp.funcs = &hdp_v4_0_funcs; 2600 break; 2601 case IP_VERSION(5, 0, 0): 2602 case IP_VERSION(5, 0, 1): 2603 case IP_VERSION(5, 0, 2): 2604 case IP_VERSION(5, 0, 3): 2605 case IP_VERSION(5, 0, 4): 2606 case IP_VERSION(5, 2, 0): 2607 adev->hdp.funcs = &hdp_v5_0_funcs; 2608 break; 2609 case IP_VERSION(5, 2, 1): 2610 adev->hdp.funcs = &hdp_v5_2_funcs; 2611 break; 2612 case IP_VERSION(6, 0, 0): 2613 case IP_VERSION(6, 0, 1): 2614 case IP_VERSION(6, 1, 0): 2615 adev->hdp.funcs = &hdp_v6_0_funcs; 2616 break; 2617 case IP_VERSION(7, 0, 0): 2618 adev->hdp.funcs = &hdp_v7_0_funcs; 2619 break; 2620 default: 2621 break; 2622 } 2623 2624 switch (amdgpu_ip_version(adev, DF_HWIP, 0)) { 2625 case IP_VERSION(3, 6, 0): 2626 case IP_VERSION(3, 6, 1): 2627 case IP_VERSION(3, 6, 2): 2628 adev->df.funcs = &df_v3_6_funcs; 2629 break; 2630 case IP_VERSION(2, 1, 0): 2631 case IP_VERSION(2, 1, 1): 2632 case IP_VERSION(2, 5, 0): 2633 case IP_VERSION(3, 5, 1): 2634 case IP_VERSION(3, 5, 2): 2635 adev->df.funcs = &df_v1_7_funcs; 2636 break; 2637 case IP_VERSION(4, 3, 0): 2638 adev->df.funcs = &df_v4_3_funcs; 2639 break; 2640 case IP_VERSION(4, 6, 2): 2641 adev->df.funcs = &df_v4_6_2_funcs; 2642 break; 2643 default: 2644 break; 2645 } 2646 2647 switch (amdgpu_ip_version(adev, SMUIO_HWIP, 0)) { 2648 case IP_VERSION(9, 0, 0): 2649 case IP_VERSION(9, 0, 1): 2650 case IP_VERSION(10, 0, 0): 2651 case IP_VERSION(10, 0, 1): 2652 case IP_VERSION(10, 0, 2): 2653 adev->smuio.funcs = &smuio_v9_0_funcs; 2654 break; 2655 case IP_VERSION(11, 0, 0): 2656 case IP_VERSION(11, 0, 2): 2657 case IP_VERSION(11, 0, 3): 2658 case IP_VERSION(11, 0, 4): 2659 case IP_VERSION(11, 0, 7): 2660 case IP_VERSION(11, 0, 8): 2661 adev->smuio.funcs = &smuio_v11_0_funcs; 2662 break; 2663 case IP_VERSION(11, 0, 6): 2664 case IP_VERSION(11, 0, 10): 2665 case IP_VERSION(11, 0, 11): 2666 case IP_VERSION(11, 5, 0): 2667 case IP_VERSION(13, 0, 1): 2668 case IP_VERSION(13, 0, 9): 2669 case IP_VERSION(13, 0, 10): 2670 adev->smuio.funcs = &smuio_v11_0_6_funcs; 2671 break; 2672 case IP_VERSION(13, 0, 2): 2673 adev->smuio.funcs = &smuio_v13_0_funcs; 2674 break; 2675 case IP_VERSION(13, 0, 3): 2676 adev->smuio.funcs = &smuio_v13_0_3_funcs; 2677 if (adev->smuio.funcs->get_pkg_type(adev) == AMDGPU_PKG_TYPE_APU) { 2678 adev->flags |= AMD_IS_APU; 2679 } 2680 break; 2681 case IP_VERSION(13, 0, 6): 2682 case IP_VERSION(13, 0, 8): 2683 case IP_VERSION(14, 0, 0): 2684 case IP_VERSION(14, 0, 1): 2685 adev->smuio.funcs = &smuio_v13_0_6_funcs; 2686 break; 2687 case IP_VERSION(14, 0, 2): 2688 adev->smuio.funcs = &smuio_v14_0_2_funcs; 2689 break; 2690 default: 2691 break; 2692 } 2693 2694 switch (amdgpu_ip_version(adev, LSDMA_HWIP, 0)) { 2695 case IP_VERSION(6, 0, 0): 2696 case IP_VERSION(6, 0, 1): 2697 case IP_VERSION(6, 0, 2): 2698 case IP_VERSION(6, 0, 3): 2699 adev->lsdma.funcs = &lsdma_v6_0_funcs; 2700 break; 2701 case IP_VERSION(7, 0, 0): 2702 case IP_VERSION(7, 0, 1): 2703 adev->lsdma.funcs = &lsdma_v7_0_funcs; 2704 break; 2705 default: 2706 break; 2707 } 2708 2709 r = amdgpu_discovery_set_common_ip_blocks(adev); 2710 if (r) 2711 return r; 2712 2713 r = amdgpu_discovery_set_gmc_ip_blocks(adev); 2714 if (r) 2715 return r; 2716 2717 /* For SR-IOV, PSP needs to be initialized before IH */ 2718 if (amdgpu_sriov_vf(adev)) { 2719 r = amdgpu_discovery_set_psp_ip_blocks(adev); 2720 if (r) 2721 return r; 2722 r = amdgpu_discovery_set_ih_ip_blocks(adev); 2723 if (r) 2724 return r; 2725 } else { 2726 r = amdgpu_discovery_set_ih_ip_blocks(adev); 2727 if (r) 2728 return r; 2729 2730 if (likely(adev->firmware.load_type == AMDGPU_FW_LOAD_PSP)) { 2731 r = amdgpu_discovery_set_psp_ip_blocks(adev); 2732 if (r) 2733 return r; 2734 } 2735 } 2736 2737 if (likely(adev->firmware.load_type == AMDGPU_FW_LOAD_PSP)) { 2738 r = amdgpu_discovery_set_smu_ip_blocks(adev); 2739 if (r) 2740 return r; 2741 } 2742 2743 r = amdgpu_discovery_set_display_ip_blocks(adev); 2744 if (r) 2745 return r; 2746 2747 r = amdgpu_discovery_set_gc_ip_blocks(adev); 2748 if (r) 2749 return r; 2750 2751 r = amdgpu_discovery_set_sdma_ip_blocks(adev); 2752 if (r) 2753 return r; 2754 2755 if ((adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT && 2756 !amdgpu_sriov_vf(adev)) || 2757 (adev->firmware.load_type == AMDGPU_FW_LOAD_RLC_BACKDOOR_AUTO && amdgpu_dpm == 1)) { 2758 r = amdgpu_discovery_set_smu_ip_blocks(adev); 2759 if (r) 2760 return r; 2761 } 2762 2763 r = amdgpu_discovery_set_mm_ip_blocks(adev); 2764 if (r) 2765 return r; 2766 2767 r = amdgpu_discovery_set_mes_ip_blocks(adev); 2768 if (r) 2769 return r; 2770 2771 r = amdgpu_discovery_set_vpe_ip_blocks(adev); 2772 if (r) 2773 return r; 2774 2775 r = amdgpu_discovery_set_umsch_mm_ip_blocks(adev); 2776 if (r) 2777 return r; 2778 2779 return 0; 2780 } 2781 2782