1 /* 2 * Copyright 2018 Advanced Micro Devices, Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 */ 23 24 #include <linux/firmware.h> 25 26 #include "amdgpu.h" 27 #include "amdgpu_discovery.h" 28 #include "soc15_hw_ip.h" 29 #include "discovery.h" 30 31 #include "soc15.h" 32 #include "gfx_v9_0.h" 33 #include "gmc_v9_0.h" 34 #include "df_v1_7.h" 35 #include "df_v3_6.h" 36 #include "nbio_v6_1.h" 37 #include "nbio_v7_0.h" 38 #include "nbio_v7_4.h" 39 #include "hdp_v4_0.h" 40 #include "vega10_ih.h" 41 #include "vega20_ih.h" 42 #include "sdma_v4_0.h" 43 #include "uvd_v7_0.h" 44 #include "vce_v4_0.h" 45 #include "vcn_v1_0.h" 46 #include "vcn_v2_5.h" 47 #include "jpeg_v2_5.h" 48 #include "smuio_v9_0.h" 49 #include "gmc_v10_0.h" 50 #include "gmc_v11_0.h" 51 #include "gfxhub_v2_0.h" 52 #include "mmhub_v2_0.h" 53 #include "nbio_v2_3.h" 54 #include "nbio_v4_3.h" 55 #include "nbio_v7_2.h" 56 #include "nbio_v7_7.h" 57 #include "hdp_v5_0.h" 58 #include "hdp_v5_2.h" 59 #include "hdp_v6_0.h" 60 #include "nv.h" 61 #include "soc21.h" 62 #include "navi10_ih.h" 63 #include "ih_v6_0.h" 64 #include "gfx_v10_0.h" 65 #include "gfx_v11_0.h" 66 #include "sdma_v5_0.h" 67 #include "sdma_v5_2.h" 68 #include "sdma_v6_0.h" 69 #include "vcn_v2_0.h" 70 #include "jpeg_v2_0.h" 71 #include "vcn_v3_0.h" 72 #include "jpeg_v3_0.h" 73 #include "vcn_v4_0.h" 74 #include "jpeg_v4_0.h" 75 #include "amdgpu_vkms.h" 76 #include "mes_v10_1.h" 77 #include "mes_v11_0.h" 78 #include "smuio_v11_0.h" 79 #include "smuio_v11_0_6.h" 80 #include "smuio_v13_0.h" 81 #include "smuio_v13_0_6.h" 82 83 #define FIRMWARE_IP_DISCOVERY "amdgpu/ip_discovery.bin" 84 MODULE_FIRMWARE(FIRMWARE_IP_DISCOVERY); 85 86 #define mmRCC_CONFIG_MEMSIZE 0xde3 87 #define mmMM_INDEX 0x0 88 #define mmMM_INDEX_HI 0x6 89 #define mmMM_DATA 0x1 90 91 static const char *hw_id_names[HW_ID_MAX] = { 92 [MP1_HWID] = "MP1", 93 [MP2_HWID] = "MP2", 94 [THM_HWID] = "THM", 95 [SMUIO_HWID] = "SMUIO", 96 [FUSE_HWID] = "FUSE", 97 [CLKA_HWID] = "CLKA", 98 [PWR_HWID] = "PWR", 99 [GC_HWID] = "GC", 100 [UVD_HWID] = "UVD", 101 [AUDIO_AZ_HWID] = "AUDIO_AZ", 102 [ACP_HWID] = "ACP", 103 [DCI_HWID] = "DCI", 104 [DMU_HWID] = "DMU", 105 [DCO_HWID] = "DCO", 106 [DIO_HWID] = "DIO", 107 [XDMA_HWID] = "XDMA", 108 [DCEAZ_HWID] = "DCEAZ", 109 [DAZ_HWID] = "DAZ", 110 [SDPMUX_HWID] = "SDPMUX", 111 [NTB_HWID] = "NTB", 112 [IOHC_HWID] = "IOHC", 113 [L2IMU_HWID] = "L2IMU", 114 [VCE_HWID] = "VCE", 115 [MMHUB_HWID] = "MMHUB", 116 [ATHUB_HWID] = "ATHUB", 117 [DBGU_NBIO_HWID] = "DBGU_NBIO", 118 [DFX_HWID] = "DFX", 119 [DBGU0_HWID] = "DBGU0", 120 [DBGU1_HWID] = "DBGU1", 121 [OSSSYS_HWID] = "OSSSYS", 122 [HDP_HWID] = "HDP", 123 [SDMA0_HWID] = "SDMA0", 124 [SDMA1_HWID] = "SDMA1", 125 [SDMA2_HWID] = "SDMA2", 126 [SDMA3_HWID] = "SDMA3", 127 [ISP_HWID] = "ISP", 128 [DBGU_IO_HWID] = "DBGU_IO", 129 [DF_HWID] = "DF", 130 [CLKB_HWID] = "CLKB", 131 [FCH_HWID] = "FCH", 132 [DFX_DAP_HWID] = "DFX_DAP", 133 [L1IMU_PCIE_HWID] = "L1IMU_PCIE", 134 [L1IMU_NBIF_HWID] = "L1IMU_NBIF", 135 [L1IMU_IOAGR_HWID] = "L1IMU_IOAGR", 136 [L1IMU3_HWID] = "L1IMU3", 137 [L1IMU4_HWID] = "L1IMU4", 138 [L1IMU5_HWID] = "L1IMU5", 139 [L1IMU6_HWID] = "L1IMU6", 140 [L1IMU7_HWID] = "L1IMU7", 141 [L1IMU8_HWID] = "L1IMU8", 142 [L1IMU9_HWID] = "L1IMU9", 143 [L1IMU10_HWID] = "L1IMU10", 144 [L1IMU11_HWID] = "L1IMU11", 145 [L1IMU12_HWID] = "L1IMU12", 146 [L1IMU13_HWID] = "L1IMU13", 147 [L1IMU14_HWID] = "L1IMU14", 148 [L1IMU15_HWID] = "L1IMU15", 149 [WAFLC_HWID] = "WAFLC", 150 [FCH_USB_PD_HWID] = "FCH_USB_PD", 151 [PCIE_HWID] = "PCIE", 152 [PCS_HWID] = "PCS", 153 [DDCL_HWID] = "DDCL", 154 [SST_HWID] = "SST", 155 [IOAGR_HWID] = "IOAGR", 156 [NBIF_HWID] = "NBIF", 157 [IOAPIC_HWID] = "IOAPIC", 158 [SYSTEMHUB_HWID] = "SYSTEMHUB", 159 [NTBCCP_HWID] = "NTBCCP", 160 [UMC_HWID] = "UMC", 161 [SATA_HWID] = "SATA", 162 [USB_HWID] = "USB", 163 [CCXSEC_HWID] = "CCXSEC", 164 [XGMI_HWID] = "XGMI", 165 [XGBE_HWID] = "XGBE", 166 [MP0_HWID] = "MP0", 167 }; 168 169 static int hw_id_map[MAX_HWIP] = { 170 [GC_HWIP] = GC_HWID, 171 [HDP_HWIP] = HDP_HWID, 172 [SDMA0_HWIP] = SDMA0_HWID, 173 [SDMA1_HWIP] = SDMA1_HWID, 174 [SDMA2_HWIP] = SDMA2_HWID, 175 [SDMA3_HWIP] = SDMA3_HWID, 176 [MMHUB_HWIP] = MMHUB_HWID, 177 [ATHUB_HWIP] = ATHUB_HWID, 178 [NBIO_HWIP] = NBIF_HWID, 179 [MP0_HWIP] = MP0_HWID, 180 [MP1_HWIP] = MP1_HWID, 181 [UVD_HWIP] = UVD_HWID, 182 [VCE_HWIP] = VCE_HWID, 183 [DF_HWIP] = DF_HWID, 184 [DCE_HWIP] = DMU_HWID, 185 [OSSSYS_HWIP] = OSSSYS_HWID, 186 [SMUIO_HWIP] = SMUIO_HWID, 187 [PWR_HWIP] = PWR_HWID, 188 [NBIF_HWIP] = NBIF_HWID, 189 [THM_HWIP] = THM_HWID, 190 [CLK_HWIP] = CLKA_HWID, 191 [UMC_HWIP] = UMC_HWID, 192 [XGMI_HWIP] = XGMI_HWID, 193 [DCI_HWIP] = DCI_HWID, 194 }; 195 196 static int amdgpu_discovery_read_binary_from_vram(struct amdgpu_device *adev, uint8_t *binary) 197 { 198 uint64_t vram_size = (uint64_t)RREG32(mmRCC_CONFIG_MEMSIZE) << 20; 199 uint64_t pos = vram_size - DISCOVERY_TMR_OFFSET; 200 201 amdgpu_device_vram_access(adev, pos, (uint32_t *)binary, 202 adev->mman.discovery_tmr_size, false); 203 return 0; 204 } 205 206 static int amdgpu_discovery_read_binary_from_file(struct amdgpu_device *adev, uint8_t *binary) 207 { 208 const struct firmware *fw; 209 const char *fw_name; 210 int r; 211 212 switch (amdgpu_discovery) { 213 case 2: 214 fw_name = FIRMWARE_IP_DISCOVERY; 215 break; 216 default: 217 dev_warn(adev->dev, "amdgpu_discovery is not set properly\n"); 218 return -EINVAL; 219 } 220 221 r = request_firmware(&fw, fw_name, adev->dev); 222 if (r) { 223 dev_err(adev->dev, "can't load firmware \"%s\"\n", 224 fw_name); 225 return r; 226 } 227 228 memcpy((u8 *)binary, (u8 *)fw->data, adev->mman.discovery_tmr_size); 229 release_firmware(fw); 230 231 return 0; 232 } 233 234 static uint16_t amdgpu_discovery_calculate_checksum(uint8_t *data, uint32_t size) 235 { 236 uint16_t checksum = 0; 237 int i; 238 239 for (i = 0; i < size; i++) 240 checksum += data[i]; 241 242 return checksum; 243 } 244 245 static inline bool amdgpu_discovery_verify_checksum(uint8_t *data, uint32_t size, 246 uint16_t expected) 247 { 248 return !!(amdgpu_discovery_calculate_checksum(data, size) == expected); 249 } 250 251 static inline bool amdgpu_discovery_verify_binary_signature(uint8_t *binary) 252 { 253 struct binary_header *bhdr; 254 bhdr = (struct binary_header *)binary; 255 256 return (le32_to_cpu(bhdr->binary_signature) == BINARY_SIGNATURE); 257 } 258 259 static void amdgpu_discovery_harvest_config_quirk(struct amdgpu_device *adev) 260 { 261 /* 262 * So far, apply this quirk only on those Navy Flounder boards which 263 * have a bad harvest table of VCN config. 264 */ 265 if ((adev->ip_versions[UVD_HWIP][1] == IP_VERSION(3, 0, 1)) && 266 (adev->ip_versions[GC_HWIP][0] == IP_VERSION(10, 3, 2))) { 267 switch (adev->pdev->revision) { 268 case 0xC1: 269 case 0xC2: 270 case 0xC3: 271 case 0xC5: 272 case 0xC7: 273 case 0xCF: 274 case 0xDF: 275 adev->vcn.harvest_config |= AMDGPU_VCN_HARVEST_VCN1; 276 break; 277 default: 278 break; 279 } 280 } 281 } 282 283 static int amdgpu_discovery_init(struct amdgpu_device *adev) 284 { 285 struct table_info *info; 286 struct binary_header *bhdr; 287 uint16_t offset; 288 uint16_t size; 289 uint16_t checksum; 290 int r; 291 292 adev->mman.discovery_tmr_size = DISCOVERY_TMR_SIZE; 293 adev->mman.discovery_bin = kzalloc(adev->mman.discovery_tmr_size, GFP_KERNEL); 294 if (!adev->mman.discovery_bin) 295 return -ENOMEM; 296 297 r = amdgpu_discovery_read_binary_from_vram(adev, adev->mman.discovery_bin); 298 if (r) { 299 dev_err(adev->dev, "failed to read ip discovery binary from vram\n"); 300 r = -EINVAL; 301 goto out; 302 } 303 304 if (!amdgpu_discovery_verify_binary_signature(adev->mman.discovery_bin)) { 305 dev_warn(adev->dev, "get invalid ip discovery binary signature from vram\n"); 306 /* retry read ip discovery binary from file */ 307 r = amdgpu_discovery_read_binary_from_file(adev, adev->mman.discovery_bin); 308 if (r) { 309 dev_err(adev->dev, "failed to read ip discovery binary from file\n"); 310 r = -EINVAL; 311 goto out; 312 } 313 /* check the ip discovery binary signature */ 314 if(!amdgpu_discovery_verify_binary_signature(adev->mman.discovery_bin)) { 315 dev_warn(adev->dev, "get invalid ip discovery binary signature from file\n"); 316 r = -EINVAL; 317 goto out; 318 } 319 } 320 321 bhdr = (struct binary_header *)adev->mman.discovery_bin; 322 323 offset = offsetof(struct binary_header, binary_checksum) + 324 sizeof(bhdr->binary_checksum); 325 size = le16_to_cpu(bhdr->binary_size) - offset; 326 checksum = le16_to_cpu(bhdr->binary_checksum); 327 328 if (!amdgpu_discovery_verify_checksum(adev->mman.discovery_bin + offset, 329 size, checksum)) { 330 dev_err(adev->dev, "invalid ip discovery binary checksum\n"); 331 r = -EINVAL; 332 goto out; 333 } 334 335 info = &bhdr->table_list[IP_DISCOVERY]; 336 offset = le16_to_cpu(info->offset); 337 checksum = le16_to_cpu(info->checksum); 338 339 if (offset) { 340 struct ip_discovery_header *ihdr = 341 (struct ip_discovery_header *)(adev->mman.discovery_bin + offset); 342 if (le32_to_cpu(ihdr->signature) != DISCOVERY_TABLE_SIGNATURE) { 343 dev_err(adev->dev, "invalid ip discovery data table signature\n"); 344 r = -EINVAL; 345 goto out; 346 } 347 348 if (!amdgpu_discovery_verify_checksum(adev->mman.discovery_bin + offset, 349 le16_to_cpu(ihdr->size), checksum)) { 350 dev_err(adev->dev, "invalid ip discovery data table checksum\n"); 351 r = -EINVAL; 352 goto out; 353 } 354 } 355 356 info = &bhdr->table_list[GC]; 357 offset = le16_to_cpu(info->offset); 358 checksum = le16_to_cpu(info->checksum); 359 360 if (offset) { 361 struct gpu_info_header *ghdr = 362 (struct gpu_info_header *)(adev->mman.discovery_bin + offset); 363 364 if (le32_to_cpu(ghdr->table_id) != GC_TABLE_ID) { 365 dev_err(adev->dev, "invalid ip discovery gc table id\n"); 366 r = -EINVAL; 367 goto out; 368 } 369 370 if (!amdgpu_discovery_verify_checksum(adev->mman.discovery_bin + offset, 371 le32_to_cpu(ghdr->size), checksum)) { 372 dev_err(adev->dev, "invalid gc data table checksum\n"); 373 r = -EINVAL; 374 goto out; 375 } 376 } 377 378 info = &bhdr->table_list[HARVEST_INFO]; 379 offset = le16_to_cpu(info->offset); 380 checksum = le16_to_cpu(info->checksum); 381 382 if (offset) { 383 struct harvest_info_header *hhdr = 384 (struct harvest_info_header *)(adev->mman.discovery_bin + offset); 385 386 if (le32_to_cpu(hhdr->signature) != HARVEST_TABLE_SIGNATURE) { 387 dev_err(adev->dev, "invalid ip discovery harvest table signature\n"); 388 r = -EINVAL; 389 goto out; 390 } 391 392 if (!amdgpu_discovery_verify_checksum(adev->mman.discovery_bin + offset, 393 sizeof(struct harvest_table), checksum)) { 394 dev_err(adev->dev, "invalid harvest data table checksum\n"); 395 r = -EINVAL; 396 goto out; 397 } 398 } 399 400 info = &bhdr->table_list[VCN_INFO]; 401 offset = le16_to_cpu(info->offset); 402 checksum = le16_to_cpu(info->checksum); 403 404 if (offset) { 405 struct vcn_info_header *vhdr = 406 (struct vcn_info_header *)(adev->mman.discovery_bin + offset); 407 408 if (le32_to_cpu(vhdr->table_id) != VCN_INFO_TABLE_ID) { 409 dev_err(adev->dev, "invalid ip discovery vcn table id\n"); 410 r = -EINVAL; 411 goto out; 412 } 413 414 if (!amdgpu_discovery_verify_checksum(adev->mman.discovery_bin + offset, 415 le32_to_cpu(vhdr->size_bytes), checksum)) { 416 dev_err(adev->dev, "invalid vcn data table checksum\n"); 417 r = -EINVAL; 418 goto out; 419 } 420 } 421 422 info = &bhdr->table_list[MALL_INFO]; 423 offset = le16_to_cpu(info->offset); 424 checksum = le16_to_cpu(info->checksum); 425 426 if (0 && offset) { 427 struct mall_info_header *mhdr = 428 (struct mall_info_header *)(adev->mman.discovery_bin + offset); 429 430 if (le32_to_cpu(mhdr->table_id) != MALL_INFO_TABLE_ID) { 431 dev_err(adev->dev, "invalid ip discovery mall table id\n"); 432 r = -EINVAL; 433 goto out; 434 } 435 436 if (!amdgpu_discovery_verify_checksum(adev->mman.discovery_bin + offset, 437 le32_to_cpu(mhdr->size_bytes), checksum)) { 438 dev_err(adev->dev, "invalid mall data table checksum\n"); 439 r = -EINVAL; 440 goto out; 441 } 442 } 443 444 return 0; 445 446 out: 447 kfree(adev->mman.discovery_bin); 448 adev->mman.discovery_bin = NULL; 449 450 return r; 451 } 452 453 static void amdgpu_discovery_sysfs_fini(struct amdgpu_device *adev); 454 455 void amdgpu_discovery_fini(struct amdgpu_device *adev) 456 { 457 amdgpu_discovery_sysfs_fini(adev); 458 kfree(adev->mman.discovery_bin); 459 adev->mman.discovery_bin = NULL; 460 } 461 462 static int amdgpu_discovery_validate_ip(const struct ip *ip) 463 { 464 if (ip->number_instance >= HWIP_MAX_INSTANCE) { 465 DRM_ERROR("Unexpected number_instance (%d) from ip discovery blob\n", 466 ip->number_instance); 467 return -EINVAL; 468 } 469 if (le16_to_cpu(ip->hw_id) >= HW_ID_MAX) { 470 DRM_ERROR("Unexpected hw_id (%d) from ip discovery blob\n", 471 le16_to_cpu(ip->hw_id)); 472 return -EINVAL; 473 } 474 475 return 0; 476 } 477 478 static void amdgpu_discovery_read_harvest_bit_per_ip(struct amdgpu_device *adev, 479 uint32_t *vcn_harvest_count) 480 { 481 struct binary_header *bhdr; 482 struct ip_discovery_header *ihdr; 483 struct die_header *dhdr; 484 struct ip *ip; 485 uint16_t die_offset, ip_offset, num_dies, num_ips; 486 int i, j; 487 488 bhdr = (struct binary_header *)adev->mman.discovery_bin; 489 ihdr = (struct ip_discovery_header *)(adev->mman.discovery_bin + 490 le16_to_cpu(bhdr->table_list[IP_DISCOVERY].offset)); 491 num_dies = le16_to_cpu(ihdr->num_dies); 492 493 /* scan harvest bit of all IP data structures */ 494 for (i = 0; i < num_dies; i++) { 495 die_offset = le16_to_cpu(ihdr->die_info[i].die_offset); 496 dhdr = (struct die_header *)(adev->mman.discovery_bin + die_offset); 497 num_ips = le16_to_cpu(dhdr->num_ips); 498 ip_offset = die_offset + sizeof(*dhdr); 499 500 for (j = 0; j < num_ips; j++) { 501 ip = (struct ip *)(adev->mman.discovery_bin + ip_offset); 502 503 if (amdgpu_discovery_validate_ip(ip)) 504 goto next_ip; 505 506 if (le16_to_cpu(ip->harvest) == 1) { 507 switch (le16_to_cpu(ip->hw_id)) { 508 case VCN_HWID: 509 (*vcn_harvest_count)++; 510 if (ip->number_instance == 0) 511 adev->vcn.harvest_config |= AMDGPU_VCN_HARVEST_VCN0; 512 else 513 adev->vcn.harvest_config |= AMDGPU_VCN_HARVEST_VCN1; 514 break; 515 case DMU_HWID: 516 adev->harvest_ip_mask |= AMD_HARVEST_IP_DMU_MASK; 517 break; 518 default: 519 break; 520 } 521 } 522 next_ip: 523 ip_offset += struct_size(ip, base_address, ip->num_base_address); 524 } 525 } 526 } 527 528 static void amdgpu_discovery_read_from_harvest_table(struct amdgpu_device *adev, 529 uint32_t *vcn_harvest_count, 530 uint32_t *umc_harvest_count) 531 { 532 struct binary_header *bhdr; 533 struct harvest_table *harvest_info; 534 u16 offset; 535 int i; 536 537 bhdr = (struct binary_header *)adev->mman.discovery_bin; 538 offset = le16_to_cpu(bhdr->table_list[HARVEST_INFO].offset); 539 540 if (!offset) { 541 dev_err(adev->dev, "invalid harvest table offset\n"); 542 return; 543 } 544 545 harvest_info = (struct harvest_table *)(adev->mman.discovery_bin + offset); 546 547 for (i = 0; i < 32; i++) { 548 if (le16_to_cpu(harvest_info->list[i].hw_id) == 0) 549 break; 550 551 switch (le16_to_cpu(harvest_info->list[i].hw_id)) { 552 case VCN_HWID: 553 (*vcn_harvest_count)++; 554 if (harvest_info->list[i].number_instance == 0) 555 adev->vcn.harvest_config |= AMDGPU_VCN_HARVEST_VCN0; 556 else 557 adev->vcn.harvest_config |= AMDGPU_VCN_HARVEST_VCN1; 558 break; 559 case DMU_HWID: 560 adev->harvest_ip_mask |= AMD_HARVEST_IP_DMU_MASK; 561 break; 562 case UMC_HWID: 563 (*umc_harvest_count)++; 564 break; 565 default: 566 break; 567 } 568 } 569 } 570 571 /* ================================================== */ 572 573 struct ip_hw_instance { 574 struct kobject kobj; /* ip_discovery/die/#die/#hw_id/#instance/<attrs...> */ 575 576 int hw_id; 577 u8 num_instance; 578 u8 major, minor, revision; 579 u8 harvest; 580 581 int num_base_addresses; 582 u32 base_addr[]; 583 }; 584 585 struct ip_hw_id { 586 struct kset hw_id_kset; /* ip_discovery/die/#die/#hw_id/, contains ip_hw_instance */ 587 int hw_id; 588 }; 589 590 struct ip_die_entry { 591 struct kset ip_kset; /* ip_discovery/die/#die/, contains ip_hw_id */ 592 u16 num_ips; 593 }; 594 595 /* -------------------------------------------------- */ 596 597 struct ip_hw_instance_attr { 598 struct attribute attr; 599 ssize_t (*show)(struct ip_hw_instance *ip_hw_instance, char *buf); 600 }; 601 602 static ssize_t hw_id_show(struct ip_hw_instance *ip_hw_instance, char *buf) 603 { 604 return sysfs_emit(buf, "%d\n", ip_hw_instance->hw_id); 605 } 606 607 static ssize_t num_instance_show(struct ip_hw_instance *ip_hw_instance, char *buf) 608 { 609 return sysfs_emit(buf, "%d\n", ip_hw_instance->num_instance); 610 } 611 612 static ssize_t major_show(struct ip_hw_instance *ip_hw_instance, char *buf) 613 { 614 return sysfs_emit(buf, "%d\n", ip_hw_instance->major); 615 } 616 617 static ssize_t minor_show(struct ip_hw_instance *ip_hw_instance, char *buf) 618 { 619 return sysfs_emit(buf, "%d\n", ip_hw_instance->minor); 620 } 621 622 static ssize_t revision_show(struct ip_hw_instance *ip_hw_instance, char *buf) 623 { 624 return sysfs_emit(buf, "%d\n", ip_hw_instance->revision); 625 } 626 627 static ssize_t harvest_show(struct ip_hw_instance *ip_hw_instance, char *buf) 628 { 629 return sysfs_emit(buf, "0x%01X\n", ip_hw_instance->harvest); 630 } 631 632 static ssize_t num_base_addresses_show(struct ip_hw_instance *ip_hw_instance, char *buf) 633 { 634 return sysfs_emit(buf, "%d\n", ip_hw_instance->num_base_addresses); 635 } 636 637 static ssize_t base_addr_show(struct ip_hw_instance *ip_hw_instance, char *buf) 638 { 639 ssize_t res, at; 640 int ii; 641 642 for (res = at = ii = 0; ii < ip_hw_instance->num_base_addresses; ii++) { 643 /* Here we satisfy the condition that, at + size <= PAGE_SIZE. 644 */ 645 if (at + 12 > PAGE_SIZE) 646 break; 647 res = sysfs_emit_at(buf, at, "0x%08X\n", 648 ip_hw_instance->base_addr[ii]); 649 if (res <= 0) 650 break; 651 at += res; 652 } 653 654 return res < 0 ? res : at; 655 } 656 657 static struct ip_hw_instance_attr ip_hw_attr[] = { 658 __ATTR_RO(hw_id), 659 __ATTR_RO(num_instance), 660 __ATTR_RO(major), 661 __ATTR_RO(minor), 662 __ATTR_RO(revision), 663 __ATTR_RO(harvest), 664 __ATTR_RO(num_base_addresses), 665 __ATTR_RO(base_addr), 666 }; 667 668 static struct attribute *ip_hw_instance_attrs[ARRAY_SIZE(ip_hw_attr) + 1]; 669 ATTRIBUTE_GROUPS(ip_hw_instance); 670 671 #define to_ip_hw_instance(x) container_of(x, struct ip_hw_instance, kobj) 672 #define to_ip_hw_instance_attr(x) container_of(x, struct ip_hw_instance_attr, attr) 673 674 static ssize_t ip_hw_instance_attr_show(struct kobject *kobj, 675 struct attribute *attr, 676 char *buf) 677 { 678 struct ip_hw_instance *ip_hw_instance = to_ip_hw_instance(kobj); 679 struct ip_hw_instance_attr *ip_hw_attr = to_ip_hw_instance_attr(attr); 680 681 if (!ip_hw_attr->show) 682 return -EIO; 683 684 return ip_hw_attr->show(ip_hw_instance, buf); 685 } 686 687 static const struct sysfs_ops ip_hw_instance_sysfs_ops = { 688 .show = ip_hw_instance_attr_show, 689 }; 690 691 static void ip_hw_instance_release(struct kobject *kobj) 692 { 693 struct ip_hw_instance *ip_hw_instance = to_ip_hw_instance(kobj); 694 695 kfree(ip_hw_instance); 696 } 697 698 static struct kobj_type ip_hw_instance_ktype = { 699 .release = ip_hw_instance_release, 700 .sysfs_ops = &ip_hw_instance_sysfs_ops, 701 .default_groups = ip_hw_instance_groups, 702 }; 703 704 /* -------------------------------------------------- */ 705 706 #define to_ip_hw_id(x) container_of(to_kset(x), struct ip_hw_id, hw_id_kset) 707 708 static void ip_hw_id_release(struct kobject *kobj) 709 { 710 struct ip_hw_id *ip_hw_id = to_ip_hw_id(kobj); 711 712 if (!list_empty(&ip_hw_id->hw_id_kset.list)) 713 DRM_ERROR("ip_hw_id->hw_id_kset is not empty"); 714 kfree(ip_hw_id); 715 } 716 717 static struct kobj_type ip_hw_id_ktype = { 718 .release = ip_hw_id_release, 719 .sysfs_ops = &kobj_sysfs_ops, 720 }; 721 722 /* -------------------------------------------------- */ 723 724 static void die_kobj_release(struct kobject *kobj); 725 static void ip_disc_release(struct kobject *kobj); 726 727 struct ip_die_entry_attribute { 728 struct attribute attr; 729 ssize_t (*show)(struct ip_die_entry *ip_die_entry, char *buf); 730 }; 731 732 #define to_ip_die_entry_attr(x) container_of(x, struct ip_die_entry_attribute, attr) 733 734 static ssize_t num_ips_show(struct ip_die_entry *ip_die_entry, char *buf) 735 { 736 return sysfs_emit(buf, "%d\n", ip_die_entry->num_ips); 737 } 738 739 /* If there are more ip_die_entry attrs, other than the number of IPs, 740 * we can make this intro an array of attrs, and then initialize 741 * ip_die_entry_attrs in a loop. 742 */ 743 static struct ip_die_entry_attribute num_ips_attr = 744 __ATTR_RO(num_ips); 745 746 static struct attribute *ip_die_entry_attrs[] = { 747 &num_ips_attr.attr, 748 NULL, 749 }; 750 ATTRIBUTE_GROUPS(ip_die_entry); /* ip_die_entry_groups */ 751 752 #define to_ip_die_entry(x) container_of(to_kset(x), struct ip_die_entry, ip_kset) 753 754 static ssize_t ip_die_entry_attr_show(struct kobject *kobj, 755 struct attribute *attr, 756 char *buf) 757 { 758 struct ip_die_entry_attribute *ip_die_entry_attr = to_ip_die_entry_attr(attr); 759 struct ip_die_entry *ip_die_entry = to_ip_die_entry(kobj); 760 761 if (!ip_die_entry_attr->show) 762 return -EIO; 763 764 return ip_die_entry_attr->show(ip_die_entry, buf); 765 } 766 767 static void ip_die_entry_release(struct kobject *kobj) 768 { 769 struct ip_die_entry *ip_die_entry = to_ip_die_entry(kobj); 770 771 if (!list_empty(&ip_die_entry->ip_kset.list)) 772 DRM_ERROR("ip_die_entry->ip_kset is not empty"); 773 kfree(ip_die_entry); 774 } 775 776 static const struct sysfs_ops ip_die_entry_sysfs_ops = { 777 .show = ip_die_entry_attr_show, 778 }; 779 780 static struct kobj_type ip_die_entry_ktype = { 781 .release = ip_die_entry_release, 782 .sysfs_ops = &ip_die_entry_sysfs_ops, 783 .default_groups = ip_die_entry_groups, 784 }; 785 786 static struct kobj_type die_kobj_ktype = { 787 .release = die_kobj_release, 788 .sysfs_ops = &kobj_sysfs_ops, 789 }; 790 791 static struct kobj_type ip_discovery_ktype = { 792 .release = ip_disc_release, 793 .sysfs_ops = &kobj_sysfs_ops, 794 }; 795 796 struct ip_discovery_top { 797 struct kobject kobj; /* ip_discovery/ */ 798 struct kset die_kset; /* ip_discovery/die/, contains ip_die_entry */ 799 struct amdgpu_device *adev; 800 }; 801 802 static void die_kobj_release(struct kobject *kobj) 803 { 804 struct ip_discovery_top *ip_top = container_of(to_kset(kobj), 805 struct ip_discovery_top, 806 die_kset); 807 if (!list_empty(&ip_top->die_kset.list)) 808 DRM_ERROR("ip_top->die_kset is not empty"); 809 } 810 811 static void ip_disc_release(struct kobject *kobj) 812 { 813 struct ip_discovery_top *ip_top = container_of(kobj, struct ip_discovery_top, 814 kobj); 815 struct amdgpu_device *adev = ip_top->adev; 816 817 adev->ip_top = NULL; 818 kfree(ip_top); 819 } 820 821 static int amdgpu_discovery_sysfs_ips(struct amdgpu_device *adev, 822 struct ip_die_entry *ip_die_entry, 823 const size_t _ip_offset, const int num_ips) 824 { 825 int ii, jj, kk, res; 826 827 DRM_DEBUG("num_ips:%d", num_ips); 828 829 /* Find all IPs of a given HW ID, and add their instance to 830 * #die/#hw_id/#instance/<attributes> 831 */ 832 for (ii = 0; ii < HW_ID_MAX; ii++) { 833 struct ip_hw_id *ip_hw_id = NULL; 834 size_t ip_offset = _ip_offset; 835 836 for (jj = 0; jj < num_ips; jj++) { 837 struct ip *ip; 838 struct ip_hw_instance *ip_hw_instance; 839 840 ip = (struct ip *)(adev->mman.discovery_bin + ip_offset); 841 if (amdgpu_discovery_validate_ip(ip) || 842 le16_to_cpu(ip->hw_id) != ii) 843 goto next_ip; 844 845 DRM_DEBUG("match:%d @ ip_offset:%zu", ii, ip_offset); 846 847 /* We have a hw_id match; register the hw 848 * block if not yet registered. 849 */ 850 if (!ip_hw_id) { 851 ip_hw_id = kzalloc(sizeof(*ip_hw_id), GFP_KERNEL); 852 if (!ip_hw_id) 853 return -ENOMEM; 854 ip_hw_id->hw_id = ii; 855 856 kobject_set_name(&ip_hw_id->hw_id_kset.kobj, "%d", ii); 857 ip_hw_id->hw_id_kset.kobj.kset = &ip_die_entry->ip_kset; 858 ip_hw_id->hw_id_kset.kobj.ktype = &ip_hw_id_ktype; 859 res = kset_register(&ip_hw_id->hw_id_kset); 860 if (res) { 861 DRM_ERROR("Couldn't register ip_hw_id kset"); 862 kfree(ip_hw_id); 863 return res; 864 } 865 if (hw_id_names[ii]) { 866 res = sysfs_create_link(&ip_die_entry->ip_kset.kobj, 867 &ip_hw_id->hw_id_kset.kobj, 868 hw_id_names[ii]); 869 if (res) { 870 DRM_ERROR("Couldn't create IP link %s in IP Die:%s\n", 871 hw_id_names[ii], 872 kobject_name(&ip_die_entry->ip_kset.kobj)); 873 } 874 } 875 } 876 877 /* Now register its instance. 878 */ 879 ip_hw_instance = kzalloc(struct_size(ip_hw_instance, 880 base_addr, 881 ip->num_base_address), 882 GFP_KERNEL); 883 if (!ip_hw_instance) { 884 DRM_ERROR("no memory for ip_hw_instance"); 885 return -ENOMEM; 886 } 887 ip_hw_instance->hw_id = le16_to_cpu(ip->hw_id); /* == ii */ 888 ip_hw_instance->num_instance = ip->number_instance; 889 ip_hw_instance->major = ip->major; 890 ip_hw_instance->minor = ip->minor; 891 ip_hw_instance->revision = ip->revision; 892 ip_hw_instance->harvest = ip->harvest; 893 ip_hw_instance->num_base_addresses = ip->num_base_address; 894 895 for (kk = 0; kk < ip_hw_instance->num_base_addresses; kk++) 896 ip_hw_instance->base_addr[kk] = ip->base_address[kk]; 897 898 kobject_init(&ip_hw_instance->kobj, &ip_hw_instance_ktype); 899 ip_hw_instance->kobj.kset = &ip_hw_id->hw_id_kset; 900 res = kobject_add(&ip_hw_instance->kobj, NULL, 901 "%d", ip_hw_instance->num_instance); 902 next_ip: 903 ip_offset += struct_size(ip, base_address, ip->num_base_address); 904 } 905 } 906 907 return 0; 908 } 909 910 static int amdgpu_discovery_sysfs_recurse(struct amdgpu_device *adev) 911 { 912 struct binary_header *bhdr; 913 struct ip_discovery_header *ihdr; 914 struct die_header *dhdr; 915 struct kset *die_kset = &adev->ip_top->die_kset; 916 u16 num_dies, die_offset, num_ips; 917 size_t ip_offset; 918 int ii, res; 919 920 bhdr = (struct binary_header *)adev->mman.discovery_bin; 921 ihdr = (struct ip_discovery_header *)(adev->mman.discovery_bin + 922 le16_to_cpu(bhdr->table_list[IP_DISCOVERY].offset)); 923 num_dies = le16_to_cpu(ihdr->num_dies); 924 925 DRM_DEBUG("number of dies: %d\n", num_dies); 926 927 for (ii = 0; ii < num_dies; ii++) { 928 struct ip_die_entry *ip_die_entry; 929 930 die_offset = le16_to_cpu(ihdr->die_info[ii].die_offset); 931 dhdr = (struct die_header *)(adev->mman.discovery_bin + die_offset); 932 num_ips = le16_to_cpu(dhdr->num_ips); 933 ip_offset = die_offset + sizeof(*dhdr); 934 935 /* Add the die to the kset. 936 * 937 * dhdr->die_id == ii, which was checked in 938 * amdgpu_discovery_reg_base_init(). 939 */ 940 941 ip_die_entry = kzalloc(sizeof(*ip_die_entry), GFP_KERNEL); 942 if (!ip_die_entry) 943 return -ENOMEM; 944 945 ip_die_entry->num_ips = num_ips; 946 947 kobject_set_name(&ip_die_entry->ip_kset.kobj, "%d", le16_to_cpu(dhdr->die_id)); 948 ip_die_entry->ip_kset.kobj.kset = die_kset; 949 ip_die_entry->ip_kset.kobj.ktype = &ip_die_entry_ktype; 950 res = kset_register(&ip_die_entry->ip_kset); 951 if (res) { 952 DRM_ERROR("Couldn't register ip_die_entry kset"); 953 kfree(ip_die_entry); 954 return res; 955 } 956 957 amdgpu_discovery_sysfs_ips(adev, ip_die_entry, ip_offset, num_ips); 958 } 959 960 return 0; 961 } 962 963 static int amdgpu_discovery_sysfs_init(struct amdgpu_device *adev) 964 { 965 struct kset *die_kset; 966 int res, ii; 967 968 adev->ip_top = kzalloc(sizeof(*adev->ip_top), GFP_KERNEL); 969 if (!adev->ip_top) 970 return -ENOMEM; 971 972 adev->ip_top->adev = adev; 973 974 res = kobject_init_and_add(&adev->ip_top->kobj, &ip_discovery_ktype, 975 &adev->dev->kobj, "ip_discovery"); 976 if (res) { 977 DRM_ERROR("Couldn't init and add ip_discovery/"); 978 goto Err; 979 } 980 981 die_kset = &adev->ip_top->die_kset; 982 kobject_set_name(&die_kset->kobj, "%s", "die"); 983 die_kset->kobj.parent = &adev->ip_top->kobj; 984 die_kset->kobj.ktype = &die_kobj_ktype; 985 res = kset_register(&adev->ip_top->die_kset); 986 if (res) { 987 DRM_ERROR("Couldn't register die_kset"); 988 goto Err; 989 } 990 991 for (ii = 0; ii < ARRAY_SIZE(ip_hw_attr); ii++) 992 ip_hw_instance_attrs[ii] = &ip_hw_attr[ii].attr; 993 ip_hw_instance_attrs[ii] = NULL; 994 995 res = amdgpu_discovery_sysfs_recurse(adev); 996 997 return res; 998 Err: 999 kobject_put(&adev->ip_top->kobj); 1000 return res; 1001 } 1002 1003 /* -------------------------------------------------- */ 1004 1005 #define list_to_kobj(el) container_of(el, struct kobject, entry) 1006 1007 static void amdgpu_discovery_sysfs_ip_hw_free(struct ip_hw_id *ip_hw_id) 1008 { 1009 struct list_head *el, *tmp; 1010 struct kset *hw_id_kset; 1011 1012 hw_id_kset = &ip_hw_id->hw_id_kset; 1013 spin_lock(&hw_id_kset->list_lock); 1014 list_for_each_prev_safe(el, tmp, &hw_id_kset->list) { 1015 list_del_init(el); 1016 spin_unlock(&hw_id_kset->list_lock); 1017 /* kobject is embedded in ip_hw_instance */ 1018 kobject_put(list_to_kobj(el)); 1019 spin_lock(&hw_id_kset->list_lock); 1020 } 1021 spin_unlock(&hw_id_kset->list_lock); 1022 kobject_put(&ip_hw_id->hw_id_kset.kobj); 1023 } 1024 1025 static void amdgpu_discovery_sysfs_die_free(struct ip_die_entry *ip_die_entry) 1026 { 1027 struct list_head *el, *tmp; 1028 struct kset *ip_kset; 1029 1030 ip_kset = &ip_die_entry->ip_kset; 1031 spin_lock(&ip_kset->list_lock); 1032 list_for_each_prev_safe(el, tmp, &ip_kset->list) { 1033 list_del_init(el); 1034 spin_unlock(&ip_kset->list_lock); 1035 amdgpu_discovery_sysfs_ip_hw_free(to_ip_hw_id(list_to_kobj(el))); 1036 spin_lock(&ip_kset->list_lock); 1037 } 1038 spin_unlock(&ip_kset->list_lock); 1039 kobject_put(&ip_die_entry->ip_kset.kobj); 1040 } 1041 1042 static void amdgpu_discovery_sysfs_fini(struct amdgpu_device *adev) 1043 { 1044 struct list_head *el, *tmp; 1045 struct kset *die_kset; 1046 1047 die_kset = &adev->ip_top->die_kset; 1048 spin_lock(&die_kset->list_lock); 1049 list_for_each_prev_safe(el, tmp, &die_kset->list) { 1050 list_del_init(el); 1051 spin_unlock(&die_kset->list_lock); 1052 amdgpu_discovery_sysfs_die_free(to_ip_die_entry(list_to_kobj(el))); 1053 spin_lock(&die_kset->list_lock); 1054 } 1055 spin_unlock(&die_kset->list_lock); 1056 kobject_put(&adev->ip_top->die_kset.kobj); 1057 kobject_put(&adev->ip_top->kobj); 1058 } 1059 1060 /* ================================================== */ 1061 1062 static int amdgpu_discovery_reg_base_init(struct amdgpu_device *adev) 1063 { 1064 struct binary_header *bhdr; 1065 struct ip_discovery_header *ihdr; 1066 struct die_header *dhdr; 1067 struct ip *ip; 1068 uint16_t die_offset; 1069 uint16_t ip_offset; 1070 uint16_t num_dies; 1071 uint16_t num_ips; 1072 uint8_t num_base_address; 1073 int hw_ip; 1074 int i, j, k; 1075 int r; 1076 1077 r = amdgpu_discovery_init(adev); 1078 if (r) { 1079 DRM_ERROR("amdgpu_discovery_init failed\n"); 1080 return r; 1081 } 1082 1083 bhdr = (struct binary_header *)adev->mman.discovery_bin; 1084 ihdr = (struct ip_discovery_header *)(adev->mman.discovery_bin + 1085 le16_to_cpu(bhdr->table_list[IP_DISCOVERY].offset)); 1086 num_dies = le16_to_cpu(ihdr->num_dies); 1087 1088 DRM_DEBUG("number of dies: %d\n", num_dies); 1089 1090 for (i = 0; i < num_dies; i++) { 1091 die_offset = le16_to_cpu(ihdr->die_info[i].die_offset); 1092 dhdr = (struct die_header *)(adev->mman.discovery_bin + die_offset); 1093 num_ips = le16_to_cpu(dhdr->num_ips); 1094 ip_offset = die_offset + sizeof(*dhdr); 1095 1096 if (le16_to_cpu(dhdr->die_id) != i) { 1097 DRM_ERROR("invalid die id %d, expected %d\n", 1098 le16_to_cpu(dhdr->die_id), i); 1099 return -EINVAL; 1100 } 1101 1102 DRM_DEBUG("number of hardware IPs on die%d: %d\n", 1103 le16_to_cpu(dhdr->die_id), num_ips); 1104 1105 for (j = 0; j < num_ips; j++) { 1106 ip = (struct ip *)(adev->mman.discovery_bin + ip_offset); 1107 1108 if (amdgpu_discovery_validate_ip(ip)) 1109 goto next_ip; 1110 1111 num_base_address = ip->num_base_address; 1112 1113 DRM_DEBUG("%s(%d) #%d v%d.%d.%d:\n", 1114 hw_id_names[le16_to_cpu(ip->hw_id)], 1115 le16_to_cpu(ip->hw_id), 1116 ip->number_instance, 1117 ip->major, ip->minor, 1118 ip->revision); 1119 1120 if (le16_to_cpu(ip->hw_id) == VCN_HWID) { 1121 /* Bit [5:0]: original revision value 1122 * Bit [7:6]: en/decode capability: 1123 * 0b00 : VCN function normally 1124 * 0b10 : encode is disabled 1125 * 0b01 : decode is disabled 1126 */ 1127 adev->vcn.vcn_config[adev->vcn.num_vcn_inst] = 1128 ip->revision & 0xc0; 1129 ip->revision &= ~0xc0; 1130 adev->vcn.num_vcn_inst++; 1131 } 1132 if (le16_to_cpu(ip->hw_id) == SDMA0_HWID || 1133 le16_to_cpu(ip->hw_id) == SDMA1_HWID || 1134 le16_to_cpu(ip->hw_id) == SDMA2_HWID || 1135 le16_to_cpu(ip->hw_id) == SDMA3_HWID) 1136 adev->sdma.num_instances++; 1137 1138 if (le16_to_cpu(ip->hw_id) == UMC_HWID) 1139 adev->gmc.num_umc++; 1140 1141 for (k = 0; k < num_base_address; k++) { 1142 /* 1143 * convert the endianness of base addresses in place, 1144 * so that we don't need to convert them when accessing adev->reg_offset. 1145 */ 1146 ip->base_address[k] = le32_to_cpu(ip->base_address[k]); 1147 DRM_DEBUG("\t0x%08x\n", ip->base_address[k]); 1148 } 1149 1150 for (hw_ip = 0; hw_ip < MAX_HWIP; hw_ip++) { 1151 if (hw_id_map[hw_ip] == le16_to_cpu(ip->hw_id)) { 1152 DRM_DEBUG("set register base offset for %s\n", 1153 hw_id_names[le16_to_cpu(ip->hw_id)]); 1154 adev->reg_offset[hw_ip][ip->number_instance] = 1155 ip->base_address; 1156 /* Instance support is somewhat inconsistent. 1157 * SDMA is a good example. Sienna cichlid has 4 total 1158 * SDMA instances, each enumerated separately (HWIDs 1159 * 42, 43, 68, 69). Arcturus has 8 total SDMA instances, 1160 * but they are enumerated as multiple instances of the 1161 * same HWIDs (4x HWID 42, 4x HWID 43). UMC is another 1162 * example. On most chips there are multiple instances 1163 * with the same HWID. 1164 */ 1165 adev->ip_versions[hw_ip][ip->number_instance] = 1166 IP_VERSION(ip->major, ip->minor, ip->revision); 1167 } 1168 } 1169 1170 next_ip: 1171 ip_offset += struct_size(ip, base_address, ip->num_base_address); 1172 } 1173 } 1174 1175 amdgpu_discovery_sysfs_init(adev); 1176 1177 return 0; 1178 } 1179 1180 int amdgpu_discovery_get_ip_version(struct amdgpu_device *adev, int hw_id, int number_instance, 1181 int *major, int *minor, int *revision) 1182 { 1183 struct binary_header *bhdr; 1184 struct ip_discovery_header *ihdr; 1185 struct die_header *dhdr; 1186 struct ip *ip; 1187 uint16_t die_offset; 1188 uint16_t ip_offset; 1189 uint16_t num_dies; 1190 uint16_t num_ips; 1191 int i, j; 1192 1193 if (!adev->mman.discovery_bin) { 1194 DRM_ERROR("ip discovery uninitialized\n"); 1195 return -EINVAL; 1196 } 1197 1198 bhdr = (struct binary_header *)adev->mman.discovery_bin; 1199 ihdr = (struct ip_discovery_header *)(adev->mman.discovery_bin + 1200 le16_to_cpu(bhdr->table_list[IP_DISCOVERY].offset)); 1201 num_dies = le16_to_cpu(ihdr->num_dies); 1202 1203 for (i = 0; i < num_dies; i++) { 1204 die_offset = le16_to_cpu(ihdr->die_info[i].die_offset); 1205 dhdr = (struct die_header *)(adev->mman.discovery_bin + die_offset); 1206 num_ips = le16_to_cpu(dhdr->num_ips); 1207 ip_offset = die_offset + sizeof(*dhdr); 1208 1209 for (j = 0; j < num_ips; j++) { 1210 ip = (struct ip *)(adev->mman.discovery_bin + ip_offset); 1211 1212 if ((le16_to_cpu(ip->hw_id) == hw_id) && (ip->number_instance == number_instance)) { 1213 if (major) 1214 *major = ip->major; 1215 if (minor) 1216 *minor = ip->minor; 1217 if (revision) 1218 *revision = ip->revision; 1219 return 0; 1220 } 1221 ip_offset += struct_size(ip, base_address, ip->num_base_address); 1222 } 1223 } 1224 1225 return -EINVAL; 1226 } 1227 1228 static void amdgpu_discovery_harvest_ip(struct amdgpu_device *adev) 1229 { 1230 int vcn_harvest_count = 0; 1231 int umc_harvest_count = 0; 1232 1233 /* 1234 * Harvest table does not fit Navi1x and legacy GPUs, 1235 * so read harvest bit per IP data structure to set 1236 * harvest configuration. 1237 */ 1238 if (adev->ip_versions[GC_HWIP][0] < IP_VERSION(10, 2, 0)) { 1239 if ((adev->pdev->device == 0x731E && 1240 (adev->pdev->revision == 0xC6 || 1241 adev->pdev->revision == 0xC7)) || 1242 (adev->pdev->device == 0x7340 && 1243 adev->pdev->revision == 0xC9) || 1244 (adev->pdev->device == 0x7360 && 1245 adev->pdev->revision == 0xC7)) 1246 amdgpu_discovery_read_harvest_bit_per_ip(adev, 1247 &vcn_harvest_count); 1248 } else { 1249 amdgpu_discovery_read_from_harvest_table(adev, 1250 &vcn_harvest_count, 1251 &umc_harvest_count); 1252 } 1253 1254 amdgpu_discovery_harvest_config_quirk(adev); 1255 1256 if (vcn_harvest_count == adev->vcn.num_vcn_inst) { 1257 adev->harvest_ip_mask |= AMD_HARVEST_IP_VCN_MASK; 1258 adev->harvest_ip_mask |= AMD_HARVEST_IP_JPEG_MASK; 1259 } 1260 1261 if (umc_harvest_count < adev->gmc.num_umc) { 1262 adev->gmc.num_umc -= umc_harvest_count; 1263 } 1264 } 1265 1266 union gc_info { 1267 struct gc_info_v1_0 v1; 1268 struct gc_info_v1_1 v1_1; 1269 struct gc_info_v1_2 v1_2; 1270 struct gc_info_v2_0 v2; 1271 }; 1272 1273 static int amdgpu_discovery_get_gfx_info(struct amdgpu_device *adev) 1274 { 1275 struct binary_header *bhdr; 1276 union gc_info *gc_info; 1277 u16 offset; 1278 1279 if (!adev->mman.discovery_bin) { 1280 DRM_ERROR("ip discovery uninitialized\n"); 1281 return -EINVAL; 1282 } 1283 1284 bhdr = (struct binary_header *)adev->mman.discovery_bin; 1285 offset = le16_to_cpu(bhdr->table_list[GC].offset); 1286 1287 if (!offset) 1288 return 0; 1289 1290 gc_info = (union gc_info *)(adev->mman.discovery_bin + offset); 1291 1292 switch (le16_to_cpu(gc_info->v1.header.version_major)) { 1293 case 1: 1294 adev->gfx.config.max_shader_engines = le32_to_cpu(gc_info->v1.gc_num_se); 1295 adev->gfx.config.max_cu_per_sh = 2 * (le32_to_cpu(gc_info->v1.gc_num_wgp0_per_sa) + 1296 le32_to_cpu(gc_info->v1.gc_num_wgp1_per_sa)); 1297 adev->gfx.config.max_sh_per_se = le32_to_cpu(gc_info->v1.gc_num_sa_per_se); 1298 adev->gfx.config.max_backends_per_se = le32_to_cpu(gc_info->v1.gc_num_rb_per_se); 1299 adev->gfx.config.max_texture_channel_caches = le32_to_cpu(gc_info->v1.gc_num_gl2c); 1300 adev->gfx.config.max_gprs = le32_to_cpu(gc_info->v1.gc_num_gprs); 1301 adev->gfx.config.max_gs_threads = le32_to_cpu(gc_info->v1.gc_num_max_gs_thds); 1302 adev->gfx.config.gs_vgt_table_depth = le32_to_cpu(gc_info->v1.gc_gs_table_depth); 1303 adev->gfx.config.gs_prim_buffer_depth = le32_to_cpu(gc_info->v1.gc_gsprim_buff_depth); 1304 adev->gfx.config.double_offchip_lds_buf = le32_to_cpu(gc_info->v1.gc_double_offchip_lds_buffer); 1305 adev->gfx.cu_info.wave_front_size = le32_to_cpu(gc_info->v1.gc_wave_size); 1306 adev->gfx.cu_info.max_waves_per_simd = le32_to_cpu(gc_info->v1.gc_max_waves_per_simd); 1307 adev->gfx.cu_info.max_scratch_slots_per_cu = le32_to_cpu(gc_info->v1.gc_max_scratch_slots_per_cu); 1308 adev->gfx.cu_info.lds_size = le32_to_cpu(gc_info->v1.gc_lds_size); 1309 adev->gfx.config.num_sc_per_sh = le32_to_cpu(gc_info->v1.gc_num_sc_per_se) / 1310 le32_to_cpu(gc_info->v1.gc_num_sa_per_se); 1311 adev->gfx.config.num_packer_per_sc = le32_to_cpu(gc_info->v1.gc_num_packer_per_sc); 1312 if (gc_info->v1.header.version_minor >= 1) { 1313 adev->gfx.config.gc_num_tcp_per_sa = le32_to_cpu(gc_info->v1_1.gc_num_tcp_per_sa); 1314 adev->gfx.config.gc_num_sdp_interface = le32_to_cpu(gc_info->v1_1.gc_num_sdp_interface); 1315 adev->gfx.config.gc_num_tcps = le32_to_cpu(gc_info->v1_1.gc_num_tcps); 1316 } 1317 if (gc_info->v1.header.version_minor >= 2) { 1318 adev->gfx.config.gc_num_tcp_per_wpg = le32_to_cpu(gc_info->v1_2.gc_num_tcp_per_wpg); 1319 adev->gfx.config.gc_tcp_l1_size = le32_to_cpu(gc_info->v1_2.gc_tcp_l1_size); 1320 adev->gfx.config.gc_num_sqc_per_wgp = le32_to_cpu(gc_info->v1_2.gc_num_sqc_per_wgp); 1321 adev->gfx.config.gc_l1_instruction_cache_size_per_sqc = le32_to_cpu(gc_info->v1_2.gc_l1_instruction_cache_size_per_sqc); 1322 adev->gfx.config.gc_l1_data_cache_size_per_sqc = le32_to_cpu(gc_info->v1_2.gc_l1_data_cache_size_per_sqc); 1323 adev->gfx.config.gc_gl1c_per_sa = le32_to_cpu(gc_info->v1_2.gc_gl1c_per_sa); 1324 adev->gfx.config.gc_gl1c_size_per_instance = le32_to_cpu(gc_info->v1_2.gc_gl1c_size_per_instance); 1325 adev->gfx.config.gc_gl2c_per_gpu = le32_to_cpu(gc_info->v1_2.gc_gl2c_per_gpu); 1326 } 1327 break; 1328 case 2: 1329 adev->gfx.config.max_shader_engines = le32_to_cpu(gc_info->v2.gc_num_se); 1330 adev->gfx.config.max_cu_per_sh = le32_to_cpu(gc_info->v2.gc_num_cu_per_sh); 1331 adev->gfx.config.max_sh_per_se = le32_to_cpu(gc_info->v2.gc_num_sh_per_se); 1332 adev->gfx.config.max_backends_per_se = le32_to_cpu(gc_info->v2.gc_num_rb_per_se); 1333 adev->gfx.config.max_texture_channel_caches = le32_to_cpu(gc_info->v2.gc_num_tccs); 1334 adev->gfx.config.max_gprs = le32_to_cpu(gc_info->v2.gc_num_gprs); 1335 adev->gfx.config.max_gs_threads = le32_to_cpu(gc_info->v2.gc_num_max_gs_thds); 1336 adev->gfx.config.gs_vgt_table_depth = le32_to_cpu(gc_info->v2.gc_gs_table_depth); 1337 adev->gfx.config.gs_prim_buffer_depth = le32_to_cpu(gc_info->v2.gc_gsprim_buff_depth); 1338 adev->gfx.config.double_offchip_lds_buf = le32_to_cpu(gc_info->v2.gc_double_offchip_lds_buffer); 1339 adev->gfx.cu_info.wave_front_size = le32_to_cpu(gc_info->v2.gc_wave_size); 1340 adev->gfx.cu_info.max_waves_per_simd = le32_to_cpu(gc_info->v2.gc_max_waves_per_simd); 1341 adev->gfx.cu_info.max_scratch_slots_per_cu = le32_to_cpu(gc_info->v2.gc_max_scratch_slots_per_cu); 1342 adev->gfx.cu_info.lds_size = le32_to_cpu(gc_info->v2.gc_lds_size); 1343 adev->gfx.config.num_sc_per_sh = le32_to_cpu(gc_info->v2.gc_num_sc_per_se) / 1344 le32_to_cpu(gc_info->v2.gc_num_sh_per_se); 1345 adev->gfx.config.num_packer_per_sc = le32_to_cpu(gc_info->v2.gc_num_packer_per_sc); 1346 break; 1347 default: 1348 dev_err(adev->dev, 1349 "Unhandled GC info table %d.%d\n", 1350 le16_to_cpu(gc_info->v1.header.version_major), 1351 le16_to_cpu(gc_info->v1.header.version_minor)); 1352 return -EINVAL; 1353 } 1354 return 0; 1355 } 1356 1357 union mall_info { 1358 struct mall_info_v1_0 v1; 1359 }; 1360 1361 int amdgpu_discovery_get_mall_info(struct amdgpu_device *adev) 1362 { 1363 struct binary_header *bhdr; 1364 union mall_info *mall_info; 1365 u32 u, mall_size_per_umc, m_s_present, half_use; 1366 u64 mall_size; 1367 u16 offset; 1368 1369 if (!adev->mman.discovery_bin) { 1370 DRM_ERROR("ip discovery uninitialized\n"); 1371 return -EINVAL; 1372 } 1373 1374 bhdr = (struct binary_header *)adev->mman.discovery_bin; 1375 offset = le16_to_cpu(bhdr->table_list[MALL_INFO].offset); 1376 1377 if (!offset) 1378 return 0; 1379 1380 mall_info = (union mall_info *)(adev->mman.discovery_bin + offset); 1381 1382 switch (le16_to_cpu(mall_info->v1.header.version_major)) { 1383 case 1: 1384 mall_size = 0; 1385 mall_size_per_umc = le32_to_cpu(mall_info->v1.mall_size_per_m); 1386 m_s_present = le32_to_cpu(mall_info->v1.m_s_present); 1387 half_use = le32_to_cpu(mall_info->v1.m_half_use); 1388 for (u = 0; u < adev->gmc.num_umc; u++) { 1389 if (m_s_present & (1 << u)) 1390 mall_size += mall_size_per_umc * 2; 1391 else if (half_use & (1 << u)) 1392 mall_size += mall_size_per_umc / 2; 1393 else 1394 mall_size += mall_size_per_umc; 1395 } 1396 adev->gmc.mall_size = mall_size; 1397 break; 1398 default: 1399 dev_err(adev->dev, 1400 "Unhandled MALL info table %d.%d\n", 1401 le16_to_cpu(mall_info->v1.header.version_major), 1402 le16_to_cpu(mall_info->v1.header.version_minor)); 1403 return -EINVAL; 1404 } 1405 return 0; 1406 } 1407 1408 union vcn_info { 1409 struct vcn_info_v1_0 v1; 1410 }; 1411 1412 static int amdgpu_discovery_get_vcn_info(struct amdgpu_device *adev) 1413 { 1414 struct binary_header *bhdr; 1415 union vcn_info *vcn_info; 1416 u16 offset; 1417 int v; 1418 1419 if (!adev->mman.discovery_bin) { 1420 DRM_ERROR("ip discovery uninitialized\n"); 1421 return -EINVAL; 1422 } 1423 1424 if (adev->vcn.num_vcn_inst > VCN_INFO_TABLE_MAX_NUM_INSTANCES) { 1425 dev_err(adev->dev, "invalid vcn instances\n"); 1426 return -EINVAL; 1427 } 1428 1429 bhdr = (struct binary_header *)adev->mman.discovery_bin; 1430 offset = le16_to_cpu(bhdr->table_list[VCN_INFO].offset); 1431 1432 if (!offset) 1433 return 0; 1434 1435 vcn_info = (union vcn_info *)(adev->mman.discovery_bin + offset); 1436 1437 switch (le16_to_cpu(vcn_info->v1.header.version_major)) { 1438 case 1: 1439 for (v = 0; v < adev->vcn.num_vcn_inst; v++) { 1440 adev->vcn.vcn_codec_disable_mask[v] = 1441 le32_to_cpu(vcn_info->v1.instance_info[v].fuse_data.all_bits); 1442 } 1443 break; 1444 default: 1445 dev_err(adev->dev, 1446 "Unhandled VCN info table %d.%d\n", 1447 le16_to_cpu(vcn_info->v1.header.version_major), 1448 le16_to_cpu(vcn_info->v1.header.version_minor)); 1449 return -EINVAL; 1450 } 1451 return 0; 1452 } 1453 1454 static int amdgpu_discovery_set_common_ip_blocks(struct amdgpu_device *adev) 1455 { 1456 /* what IP to use for this? */ 1457 switch (adev->ip_versions[GC_HWIP][0]) { 1458 case IP_VERSION(9, 0, 1): 1459 case IP_VERSION(9, 1, 0): 1460 case IP_VERSION(9, 2, 1): 1461 case IP_VERSION(9, 2, 2): 1462 case IP_VERSION(9, 3, 0): 1463 case IP_VERSION(9, 4, 0): 1464 case IP_VERSION(9, 4, 1): 1465 case IP_VERSION(9, 4, 2): 1466 amdgpu_device_ip_block_add(adev, &vega10_common_ip_block); 1467 break; 1468 case IP_VERSION(10, 1, 10): 1469 case IP_VERSION(10, 1, 1): 1470 case IP_VERSION(10, 1, 2): 1471 case IP_VERSION(10, 1, 3): 1472 case IP_VERSION(10, 1, 4): 1473 case IP_VERSION(10, 3, 0): 1474 case IP_VERSION(10, 3, 1): 1475 case IP_VERSION(10, 3, 2): 1476 case IP_VERSION(10, 3, 3): 1477 case IP_VERSION(10, 3, 4): 1478 case IP_VERSION(10, 3, 5): 1479 case IP_VERSION(10, 3, 6): 1480 case IP_VERSION(10, 3, 7): 1481 amdgpu_device_ip_block_add(adev, &nv_common_ip_block); 1482 break; 1483 case IP_VERSION(11, 0, 0): 1484 case IP_VERSION(11, 0, 1): 1485 case IP_VERSION(11, 0, 2): 1486 amdgpu_device_ip_block_add(adev, &soc21_common_ip_block); 1487 break; 1488 default: 1489 dev_err(adev->dev, 1490 "Failed to add common ip block(GC_HWIP:0x%x)\n", 1491 adev->ip_versions[GC_HWIP][0]); 1492 return -EINVAL; 1493 } 1494 return 0; 1495 } 1496 1497 static int amdgpu_discovery_set_gmc_ip_blocks(struct amdgpu_device *adev) 1498 { 1499 /* use GC or MMHUB IP version */ 1500 switch (adev->ip_versions[GC_HWIP][0]) { 1501 case IP_VERSION(9, 0, 1): 1502 case IP_VERSION(9, 1, 0): 1503 case IP_VERSION(9, 2, 1): 1504 case IP_VERSION(9, 2, 2): 1505 case IP_VERSION(9, 3, 0): 1506 case IP_VERSION(9, 4, 0): 1507 case IP_VERSION(9, 4, 1): 1508 case IP_VERSION(9, 4, 2): 1509 amdgpu_device_ip_block_add(adev, &gmc_v9_0_ip_block); 1510 break; 1511 case IP_VERSION(10, 1, 10): 1512 case IP_VERSION(10, 1, 1): 1513 case IP_VERSION(10, 1, 2): 1514 case IP_VERSION(10, 1, 3): 1515 case IP_VERSION(10, 1, 4): 1516 case IP_VERSION(10, 3, 0): 1517 case IP_VERSION(10, 3, 1): 1518 case IP_VERSION(10, 3, 2): 1519 case IP_VERSION(10, 3, 3): 1520 case IP_VERSION(10, 3, 4): 1521 case IP_VERSION(10, 3, 5): 1522 case IP_VERSION(10, 3, 6): 1523 case IP_VERSION(10, 3, 7): 1524 amdgpu_device_ip_block_add(adev, &gmc_v10_0_ip_block); 1525 break; 1526 case IP_VERSION(11, 0, 0): 1527 case IP_VERSION(11, 0, 1): 1528 case IP_VERSION(11, 0, 2): 1529 amdgpu_device_ip_block_add(adev, &gmc_v11_0_ip_block); 1530 break; 1531 default: 1532 dev_err(adev->dev, 1533 "Failed to add gmc ip block(GC_HWIP:0x%x)\n", 1534 adev->ip_versions[GC_HWIP][0]); 1535 return -EINVAL; 1536 } 1537 return 0; 1538 } 1539 1540 static int amdgpu_discovery_set_ih_ip_blocks(struct amdgpu_device *adev) 1541 { 1542 switch (adev->ip_versions[OSSSYS_HWIP][0]) { 1543 case IP_VERSION(4, 0, 0): 1544 case IP_VERSION(4, 0, 1): 1545 case IP_VERSION(4, 1, 0): 1546 case IP_VERSION(4, 1, 1): 1547 case IP_VERSION(4, 3, 0): 1548 amdgpu_device_ip_block_add(adev, &vega10_ih_ip_block); 1549 break; 1550 case IP_VERSION(4, 2, 0): 1551 case IP_VERSION(4, 2, 1): 1552 case IP_VERSION(4, 4, 0): 1553 amdgpu_device_ip_block_add(adev, &vega20_ih_ip_block); 1554 break; 1555 case IP_VERSION(5, 0, 0): 1556 case IP_VERSION(5, 0, 1): 1557 case IP_VERSION(5, 0, 2): 1558 case IP_VERSION(5, 0, 3): 1559 case IP_VERSION(5, 2, 0): 1560 case IP_VERSION(5, 2, 1): 1561 amdgpu_device_ip_block_add(adev, &navi10_ih_ip_block); 1562 break; 1563 case IP_VERSION(6, 0, 0): 1564 case IP_VERSION(6, 0, 1): 1565 case IP_VERSION(6, 0, 2): 1566 amdgpu_device_ip_block_add(adev, &ih_v6_0_ip_block); 1567 break; 1568 default: 1569 dev_err(adev->dev, 1570 "Failed to add ih ip block(OSSSYS_HWIP:0x%x)\n", 1571 adev->ip_versions[OSSSYS_HWIP][0]); 1572 return -EINVAL; 1573 } 1574 return 0; 1575 } 1576 1577 static int amdgpu_discovery_set_psp_ip_blocks(struct amdgpu_device *adev) 1578 { 1579 switch (adev->ip_versions[MP0_HWIP][0]) { 1580 case IP_VERSION(9, 0, 0): 1581 amdgpu_device_ip_block_add(adev, &psp_v3_1_ip_block); 1582 break; 1583 case IP_VERSION(10, 0, 0): 1584 case IP_VERSION(10, 0, 1): 1585 amdgpu_device_ip_block_add(adev, &psp_v10_0_ip_block); 1586 break; 1587 case IP_VERSION(11, 0, 0): 1588 case IP_VERSION(11, 0, 2): 1589 case IP_VERSION(11, 0, 4): 1590 case IP_VERSION(11, 0, 5): 1591 case IP_VERSION(11, 0, 9): 1592 case IP_VERSION(11, 0, 7): 1593 case IP_VERSION(11, 0, 11): 1594 case IP_VERSION(11, 0, 12): 1595 case IP_VERSION(11, 0, 13): 1596 case IP_VERSION(11, 5, 0): 1597 amdgpu_device_ip_block_add(adev, &psp_v11_0_ip_block); 1598 break; 1599 case IP_VERSION(11, 0, 8): 1600 amdgpu_device_ip_block_add(adev, &psp_v11_0_8_ip_block); 1601 break; 1602 case IP_VERSION(11, 0, 3): 1603 case IP_VERSION(12, 0, 1): 1604 amdgpu_device_ip_block_add(adev, &psp_v12_0_ip_block); 1605 break; 1606 case IP_VERSION(13, 0, 0): 1607 case IP_VERSION(13, 0, 1): 1608 case IP_VERSION(13, 0, 2): 1609 case IP_VERSION(13, 0, 3): 1610 case IP_VERSION(13, 0, 4): 1611 case IP_VERSION(13, 0, 5): 1612 case IP_VERSION(13, 0, 7): 1613 case IP_VERSION(13, 0, 8): 1614 amdgpu_device_ip_block_add(adev, &psp_v13_0_ip_block); 1615 break; 1616 default: 1617 dev_err(adev->dev, 1618 "Failed to add psp ip block(MP0_HWIP:0x%x)\n", 1619 adev->ip_versions[MP0_HWIP][0]); 1620 return -EINVAL; 1621 } 1622 return 0; 1623 } 1624 1625 static int amdgpu_discovery_set_smu_ip_blocks(struct amdgpu_device *adev) 1626 { 1627 switch (adev->ip_versions[MP1_HWIP][0]) { 1628 case IP_VERSION(9, 0, 0): 1629 case IP_VERSION(10, 0, 0): 1630 case IP_VERSION(10, 0, 1): 1631 case IP_VERSION(11, 0, 2): 1632 if (adev->asic_type == CHIP_ARCTURUS) 1633 amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block); 1634 else 1635 amdgpu_device_ip_block_add(adev, &pp_smu_ip_block); 1636 break; 1637 case IP_VERSION(11, 0, 0): 1638 case IP_VERSION(11, 0, 5): 1639 case IP_VERSION(11, 0, 9): 1640 case IP_VERSION(11, 0, 7): 1641 case IP_VERSION(11, 0, 8): 1642 case IP_VERSION(11, 0, 11): 1643 case IP_VERSION(11, 0, 12): 1644 case IP_VERSION(11, 0, 13): 1645 case IP_VERSION(11, 5, 0): 1646 amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block); 1647 break; 1648 case IP_VERSION(12, 0, 0): 1649 case IP_VERSION(12, 0, 1): 1650 amdgpu_device_ip_block_add(adev, &smu_v12_0_ip_block); 1651 break; 1652 case IP_VERSION(13, 0, 0): 1653 case IP_VERSION(13, 0, 1): 1654 case IP_VERSION(13, 0, 2): 1655 case IP_VERSION(13, 0, 3): 1656 case IP_VERSION(13, 0, 5): 1657 case IP_VERSION(13, 0, 7): 1658 case IP_VERSION(13, 0, 8): 1659 amdgpu_device_ip_block_add(adev, &smu_v13_0_ip_block); 1660 break; 1661 default: 1662 dev_err(adev->dev, 1663 "Failed to add smu ip block(MP1_HWIP:0x%x)\n", 1664 adev->ip_versions[MP1_HWIP][0]); 1665 return -EINVAL; 1666 } 1667 return 0; 1668 } 1669 1670 static int amdgpu_discovery_set_display_ip_blocks(struct amdgpu_device *adev) 1671 { 1672 if (adev->enable_virtual_display || amdgpu_sriov_vf(adev)) { 1673 amdgpu_device_ip_block_add(adev, &amdgpu_vkms_ip_block); 1674 return 0; 1675 } 1676 1677 if (!amdgpu_device_has_dc_support(adev)) 1678 return 0; 1679 1680 #if defined(CONFIG_DRM_AMD_DC) 1681 if (adev->ip_versions[DCE_HWIP][0]) { 1682 switch (adev->ip_versions[DCE_HWIP][0]) { 1683 case IP_VERSION(1, 0, 0): 1684 case IP_VERSION(1, 0, 1): 1685 case IP_VERSION(2, 0, 2): 1686 case IP_VERSION(2, 0, 0): 1687 case IP_VERSION(2, 0, 3): 1688 case IP_VERSION(2, 1, 0): 1689 case IP_VERSION(3, 0, 0): 1690 case IP_VERSION(3, 0, 2): 1691 case IP_VERSION(3, 0, 3): 1692 case IP_VERSION(3, 0, 1): 1693 case IP_VERSION(3, 1, 2): 1694 case IP_VERSION(3, 1, 3): 1695 case IP_VERSION(3, 1, 5): 1696 case IP_VERSION(3, 1, 6): 1697 amdgpu_device_ip_block_add(adev, &dm_ip_block); 1698 break; 1699 default: 1700 dev_err(adev->dev, 1701 "Failed to add dm ip block(DCE_HWIP:0x%x)\n", 1702 adev->ip_versions[DCE_HWIP][0]); 1703 return -EINVAL; 1704 } 1705 } else if (adev->ip_versions[DCI_HWIP][0]) { 1706 switch (adev->ip_versions[DCI_HWIP][0]) { 1707 case IP_VERSION(12, 0, 0): 1708 case IP_VERSION(12, 0, 1): 1709 case IP_VERSION(12, 1, 0): 1710 amdgpu_device_ip_block_add(adev, &dm_ip_block); 1711 break; 1712 default: 1713 dev_err(adev->dev, 1714 "Failed to add dm ip block(DCI_HWIP:0x%x)\n", 1715 adev->ip_versions[DCI_HWIP][0]); 1716 return -EINVAL; 1717 } 1718 } 1719 #endif 1720 return 0; 1721 } 1722 1723 static int amdgpu_discovery_set_gc_ip_blocks(struct amdgpu_device *adev) 1724 { 1725 switch (adev->ip_versions[GC_HWIP][0]) { 1726 case IP_VERSION(9, 0, 1): 1727 case IP_VERSION(9, 1, 0): 1728 case IP_VERSION(9, 2, 1): 1729 case IP_VERSION(9, 2, 2): 1730 case IP_VERSION(9, 3, 0): 1731 case IP_VERSION(9, 4, 0): 1732 case IP_VERSION(9, 4, 1): 1733 case IP_VERSION(9, 4, 2): 1734 amdgpu_device_ip_block_add(adev, &gfx_v9_0_ip_block); 1735 break; 1736 case IP_VERSION(10, 1, 10): 1737 case IP_VERSION(10, 1, 2): 1738 case IP_VERSION(10, 1, 1): 1739 case IP_VERSION(10, 1, 3): 1740 case IP_VERSION(10, 1, 4): 1741 case IP_VERSION(10, 3, 0): 1742 case IP_VERSION(10, 3, 2): 1743 case IP_VERSION(10, 3, 1): 1744 case IP_VERSION(10, 3, 4): 1745 case IP_VERSION(10, 3, 5): 1746 case IP_VERSION(10, 3, 6): 1747 case IP_VERSION(10, 3, 3): 1748 case IP_VERSION(10, 3, 7): 1749 amdgpu_device_ip_block_add(adev, &gfx_v10_0_ip_block); 1750 break; 1751 case IP_VERSION(11, 0, 0): 1752 case IP_VERSION(11, 0, 1): 1753 case IP_VERSION(11, 0, 2): 1754 amdgpu_device_ip_block_add(adev, &gfx_v11_0_ip_block); 1755 break; 1756 default: 1757 dev_err(adev->dev, 1758 "Failed to add gfx ip block(GC_HWIP:0x%x)\n", 1759 adev->ip_versions[GC_HWIP][0]); 1760 return -EINVAL; 1761 } 1762 return 0; 1763 } 1764 1765 static int amdgpu_discovery_set_sdma_ip_blocks(struct amdgpu_device *adev) 1766 { 1767 switch (adev->ip_versions[SDMA0_HWIP][0]) { 1768 case IP_VERSION(4, 0, 0): 1769 case IP_VERSION(4, 0, 1): 1770 case IP_VERSION(4, 1, 0): 1771 case IP_VERSION(4, 1, 1): 1772 case IP_VERSION(4, 1, 2): 1773 case IP_VERSION(4, 2, 0): 1774 case IP_VERSION(4, 2, 2): 1775 case IP_VERSION(4, 4, 0): 1776 amdgpu_device_ip_block_add(adev, &sdma_v4_0_ip_block); 1777 break; 1778 case IP_VERSION(5, 0, 0): 1779 case IP_VERSION(5, 0, 1): 1780 case IP_VERSION(5, 0, 2): 1781 case IP_VERSION(5, 0, 5): 1782 amdgpu_device_ip_block_add(adev, &sdma_v5_0_ip_block); 1783 break; 1784 case IP_VERSION(5, 2, 0): 1785 case IP_VERSION(5, 2, 2): 1786 case IP_VERSION(5, 2, 4): 1787 case IP_VERSION(5, 2, 5): 1788 case IP_VERSION(5, 2, 6): 1789 case IP_VERSION(5, 2, 3): 1790 case IP_VERSION(5, 2, 1): 1791 case IP_VERSION(5, 2, 7): 1792 amdgpu_device_ip_block_add(adev, &sdma_v5_2_ip_block); 1793 break; 1794 case IP_VERSION(6, 0, 0): 1795 case IP_VERSION(6, 0, 1): 1796 case IP_VERSION(6, 0, 2): 1797 amdgpu_device_ip_block_add(adev, &sdma_v6_0_ip_block); 1798 break; 1799 default: 1800 dev_err(adev->dev, 1801 "Failed to add sdma ip block(SDMA0_HWIP:0x%x)\n", 1802 adev->ip_versions[SDMA0_HWIP][0]); 1803 return -EINVAL; 1804 } 1805 return 0; 1806 } 1807 1808 static int amdgpu_discovery_set_mm_ip_blocks(struct amdgpu_device *adev) 1809 { 1810 if (adev->ip_versions[VCE_HWIP][0]) { 1811 switch (adev->ip_versions[UVD_HWIP][0]) { 1812 case IP_VERSION(7, 0, 0): 1813 case IP_VERSION(7, 2, 0): 1814 /* UVD is not supported on vega20 SR-IOV */ 1815 if (!(adev->asic_type == CHIP_VEGA20 && amdgpu_sriov_vf(adev))) 1816 amdgpu_device_ip_block_add(adev, &uvd_v7_0_ip_block); 1817 break; 1818 default: 1819 dev_err(adev->dev, 1820 "Failed to add uvd v7 ip block(UVD_HWIP:0x%x)\n", 1821 adev->ip_versions[UVD_HWIP][0]); 1822 return -EINVAL; 1823 } 1824 switch (adev->ip_versions[VCE_HWIP][0]) { 1825 case IP_VERSION(4, 0, 0): 1826 case IP_VERSION(4, 1, 0): 1827 /* VCE is not supported on vega20 SR-IOV */ 1828 if (!(adev->asic_type == CHIP_VEGA20 && amdgpu_sriov_vf(adev))) 1829 amdgpu_device_ip_block_add(adev, &vce_v4_0_ip_block); 1830 break; 1831 default: 1832 dev_err(adev->dev, 1833 "Failed to add VCE v4 ip block(VCE_HWIP:0x%x)\n", 1834 adev->ip_versions[VCE_HWIP][0]); 1835 return -EINVAL; 1836 } 1837 } else { 1838 switch (adev->ip_versions[UVD_HWIP][0]) { 1839 case IP_VERSION(1, 0, 0): 1840 case IP_VERSION(1, 0, 1): 1841 amdgpu_device_ip_block_add(adev, &vcn_v1_0_ip_block); 1842 break; 1843 case IP_VERSION(2, 0, 0): 1844 case IP_VERSION(2, 0, 2): 1845 case IP_VERSION(2, 2, 0): 1846 amdgpu_device_ip_block_add(adev, &vcn_v2_0_ip_block); 1847 if (!amdgpu_sriov_vf(adev)) 1848 amdgpu_device_ip_block_add(adev, &jpeg_v2_0_ip_block); 1849 break; 1850 case IP_VERSION(2, 0, 3): 1851 break; 1852 case IP_VERSION(2, 5, 0): 1853 amdgpu_device_ip_block_add(adev, &vcn_v2_5_ip_block); 1854 amdgpu_device_ip_block_add(adev, &jpeg_v2_5_ip_block); 1855 break; 1856 case IP_VERSION(2, 6, 0): 1857 amdgpu_device_ip_block_add(adev, &vcn_v2_6_ip_block); 1858 amdgpu_device_ip_block_add(adev, &jpeg_v2_6_ip_block); 1859 break; 1860 case IP_VERSION(3, 0, 0): 1861 case IP_VERSION(3, 0, 16): 1862 case IP_VERSION(3, 1, 1): 1863 case IP_VERSION(3, 1, 2): 1864 case IP_VERSION(3, 0, 2): 1865 case IP_VERSION(3, 0, 192): 1866 amdgpu_device_ip_block_add(adev, &vcn_v3_0_ip_block); 1867 if (!amdgpu_sriov_vf(adev)) 1868 amdgpu_device_ip_block_add(adev, &jpeg_v3_0_ip_block); 1869 break; 1870 case IP_VERSION(3, 0, 33): 1871 amdgpu_device_ip_block_add(adev, &vcn_v3_0_ip_block); 1872 break; 1873 case IP_VERSION(4, 0, 0): 1874 case IP_VERSION(4, 0, 4): 1875 amdgpu_device_ip_block_add(adev, &vcn_v4_0_ip_block); 1876 amdgpu_device_ip_block_add(adev, &jpeg_v4_0_ip_block); 1877 break; 1878 default: 1879 dev_err(adev->dev, 1880 "Failed to add vcn/jpeg ip block(UVD_HWIP:0x%x)\n", 1881 adev->ip_versions[UVD_HWIP][0]); 1882 return -EINVAL; 1883 } 1884 } 1885 return 0; 1886 } 1887 1888 static int amdgpu_discovery_set_mes_ip_blocks(struct amdgpu_device *adev) 1889 { 1890 switch (adev->ip_versions[GC_HWIP][0]) { 1891 case IP_VERSION(10, 1, 10): 1892 case IP_VERSION(10, 1, 1): 1893 case IP_VERSION(10, 1, 2): 1894 case IP_VERSION(10, 1, 3): 1895 case IP_VERSION(10, 1, 4): 1896 case IP_VERSION(10, 3, 0): 1897 case IP_VERSION(10, 3, 1): 1898 case IP_VERSION(10, 3, 2): 1899 case IP_VERSION(10, 3, 3): 1900 case IP_VERSION(10, 3, 4): 1901 case IP_VERSION(10, 3, 5): 1902 case IP_VERSION(10, 3, 6): 1903 if (amdgpu_mes) { 1904 amdgpu_device_ip_block_add(adev, &mes_v10_1_ip_block); 1905 adev->enable_mes = true; 1906 if (amdgpu_mes_kiq) 1907 adev->enable_mes_kiq = true; 1908 } 1909 break; 1910 case IP_VERSION(11, 0, 0): 1911 case IP_VERSION(11, 0, 1): 1912 case IP_VERSION(11, 0, 2): 1913 amdgpu_device_ip_block_add(adev, &mes_v11_0_ip_block); 1914 adev->enable_mes = true; 1915 adev->enable_mes_kiq = true; 1916 break; 1917 default: 1918 break; 1919 } 1920 return 0; 1921 } 1922 1923 int amdgpu_discovery_set_ip_blocks(struct amdgpu_device *adev) 1924 { 1925 int r; 1926 1927 switch (adev->asic_type) { 1928 case CHIP_VEGA10: 1929 vega10_reg_base_init(adev); 1930 adev->sdma.num_instances = 2; 1931 adev->gmc.num_umc = 4; 1932 adev->ip_versions[MMHUB_HWIP][0] = IP_VERSION(9, 0, 0); 1933 adev->ip_versions[ATHUB_HWIP][0] = IP_VERSION(9, 0, 0); 1934 adev->ip_versions[OSSSYS_HWIP][0] = IP_VERSION(4, 0, 0); 1935 adev->ip_versions[HDP_HWIP][0] = IP_VERSION(4, 0, 0); 1936 adev->ip_versions[SDMA0_HWIP][0] = IP_VERSION(4, 0, 0); 1937 adev->ip_versions[SDMA1_HWIP][0] = IP_VERSION(4, 0, 0); 1938 adev->ip_versions[DF_HWIP][0] = IP_VERSION(2, 1, 0); 1939 adev->ip_versions[NBIO_HWIP][0] = IP_VERSION(6, 1, 0); 1940 adev->ip_versions[UMC_HWIP][0] = IP_VERSION(6, 0, 0); 1941 adev->ip_versions[MP0_HWIP][0] = IP_VERSION(9, 0, 0); 1942 adev->ip_versions[MP1_HWIP][0] = IP_VERSION(9, 0, 0); 1943 adev->ip_versions[THM_HWIP][0] = IP_VERSION(9, 0, 0); 1944 adev->ip_versions[SMUIO_HWIP][0] = IP_VERSION(9, 0, 0); 1945 adev->ip_versions[GC_HWIP][0] = IP_VERSION(9, 0, 1); 1946 adev->ip_versions[UVD_HWIP][0] = IP_VERSION(7, 0, 0); 1947 adev->ip_versions[VCE_HWIP][0] = IP_VERSION(4, 0, 0); 1948 adev->ip_versions[DCI_HWIP][0] = IP_VERSION(12, 0, 0); 1949 break; 1950 case CHIP_VEGA12: 1951 vega10_reg_base_init(adev); 1952 adev->sdma.num_instances = 2; 1953 adev->gmc.num_umc = 4; 1954 adev->ip_versions[MMHUB_HWIP][0] = IP_VERSION(9, 3, 0); 1955 adev->ip_versions[ATHUB_HWIP][0] = IP_VERSION(9, 3, 0); 1956 adev->ip_versions[OSSSYS_HWIP][0] = IP_VERSION(4, 0, 1); 1957 adev->ip_versions[HDP_HWIP][0] = IP_VERSION(4, 0, 1); 1958 adev->ip_versions[SDMA0_HWIP][0] = IP_VERSION(4, 0, 1); 1959 adev->ip_versions[SDMA1_HWIP][0] = IP_VERSION(4, 0, 1); 1960 adev->ip_versions[DF_HWIP][0] = IP_VERSION(2, 5, 0); 1961 adev->ip_versions[NBIO_HWIP][0] = IP_VERSION(6, 2, 0); 1962 adev->ip_versions[UMC_HWIP][0] = IP_VERSION(6, 1, 0); 1963 adev->ip_versions[MP0_HWIP][0] = IP_VERSION(9, 0, 0); 1964 adev->ip_versions[MP1_HWIP][0] = IP_VERSION(9, 0, 0); 1965 adev->ip_versions[THM_HWIP][0] = IP_VERSION(9, 0, 0); 1966 adev->ip_versions[SMUIO_HWIP][0] = IP_VERSION(9, 0, 1); 1967 adev->ip_versions[GC_HWIP][0] = IP_VERSION(9, 2, 1); 1968 adev->ip_versions[UVD_HWIP][0] = IP_VERSION(7, 0, 0); 1969 adev->ip_versions[VCE_HWIP][0] = IP_VERSION(4, 0, 0); 1970 adev->ip_versions[DCI_HWIP][0] = IP_VERSION(12, 0, 1); 1971 break; 1972 case CHIP_RAVEN: 1973 vega10_reg_base_init(adev); 1974 adev->sdma.num_instances = 1; 1975 adev->vcn.num_vcn_inst = 1; 1976 adev->gmc.num_umc = 2; 1977 if (adev->apu_flags & AMD_APU_IS_RAVEN2) { 1978 adev->ip_versions[MMHUB_HWIP][0] = IP_VERSION(9, 2, 0); 1979 adev->ip_versions[ATHUB_HWIP][0] = IP_VERSION(9, 2, 0); 1980 adev->ip_versions[OSSSYS_HWIP][0] = IP_VERSION(4, 1, 1); 1981 adev->ip_versions[HDP_HWIP][0] = IP_VERSION(4, 1, 1); 1982 adev->ip_versions[SDMA0_HWIP][0] = IP_VERSION(4, 1, 1); 1983 adev->ip_versions[DF_HWIP][0] = IP_VERSION(2, 1, 1); 1984 adev->ip_versions[NBIO_HWIP][0] = IP_VERSION(7, 0, 1); 1985 adev->ip_versions[UMC_HWIP][0] = IP_VERSION(7, 5, 0); 1986 adev->ip_versions[MP0_HWIP][0] = IP_VERSION(10, 0, 1); 1987 adev->ip_versions[MP1_HWIP][0] = IP_VERSION(10, 0, 1); 1988 adev->ip_versions[THM_HWIP][0] = IP_VERSION(10, 1, 0); 1989 adev->ip_versions[SMUIO_HWIP][0] = IP_VERSION(10, 0, 1); 1990 adev->ip_versions[GC_HWIP][0] = IP_VERSION(9, 2, 2); 1991 adev->ip_versions[UVD_HWIP][0] = IP_VERSION(1, 0, 1); 1992 adev->ip_versions[DCE_HWIP][0] = IP_VERSION(1, 0, 1); 1993 } else { 1994 adev->ip_versions[MMHUB_HWIP][0] = IP_VERSION(9, 1, 0); 1995 adev->ip_versions[ATHUB_HWIP][0] = IP_VERSION(9, 1, 0); 1996 adev->ip_versions[OSSSYS_HWIP][0] = IP_VERSION(4, 1, 0); 1997 adev->ip_versions[HDP_HWIP][0] = IP_VERSION(4, 1, 0); 1998 adev->ip_versions[SDMA0_HWIP][0] = IP_VERSION(4, 1, 0); 1999 adev->ip_versions[DF_HWIP][0] = IP_VERSION(2, 1, 0); 2000 adev->ip_versions[NBIO_HWIP][0] = IP_VERSION(7, 0, 0); 2001 adev->ip_versions[UMC_HWIP][0] = IP_VERSION(7, 0, 0); 2002 adev->ip_versions[MP0_HWIP][0] = IP_VERSION(10, 0, 0); 2003 adev->ip_versions[MP1_HWIP][0] = IP_VERSION(10, 0, 0); 2004 adev->ip_versions[THM_HWIP][0] = IP_VERSION(10, 0, 0); 2005 adev->ip_versions[SMUIO_HWIP][0] = IP_VERSION(10, 0, 0); 2006 adev->ip_versions[GC_HWIP][0] = IP_VERSION(9, 1, 0); 2007 adev->ip_versions[UVD_HWIP][0] = IP_VERSION(1, 0, 0); 2008 adev->ip_versions[DCE_HWIP][0] = IP_VERSION(1, 0, 0); 2009 } 2010 break; 2011 case CHIP_VEGA20: 2012 vega20_reg_base_init(adev); 2013 adev->sdma.num_instances = 2; 2014 adev->gmc.num_umc = 8; 2015 adev->ip_versions[MMHUB_HWIP][0] = IP_VERSION(9, 4, 0); 2016 adev->ip_versions[ATHUB_HWIP][0] = IP_VERSION(9, 4, 0); 2017 adev->ip_versions[OSSSYS_HWIP][0] = IP_VERSION(4, 2, 0); 2018 adev->ip_versions[HDP_HWIP][0] = IP_VERSION(4, 2, 0); 2019 adev->ip_versions[SDMA0_HWIP][0] = IP_VERSION(4, 2, 0); 2020 adev->ip_versions[SDMA1_HWIP][0] = IP_VERSION(4, 2, 0); 2021 adev->ip_versions[DF_HWIP][0] = IP_VERSION(3, 6, 0); 2022 adev->ip_versions[NBIO_HWIP][0] = IP_VERSION(7, 4, 0); 2023 adev->ip_versions[UMC_HWIP][0] = IP_VERSION(6, 1, 1); 2024 adev->ip_versions[MP0_HWIP][0] = IP_VERSION(11, 0, 2); 2025 adev->ip_versions[MP1_HWIP][0] = IP_VERSION(11, 0, 2); 2026 adev->ip_versions[THM_HWIP][0] = IP_VERSION(11, 0, 2); 2027 adev->ip_versions[SMUIO_HWIP][0] = IP_VERSION(11, 0, 2); 2028 adev->ip_versions[GC_HWIP][0] = IP_VERSION(9, 4, 0); 2029 adev->ip_versions[UVD_HWIP][0] = IP_VERSION(7, 2, 0); 2030 adev->ip_versions[UVD_HWIP][1] = IP_VERSION(7, 2, 0); 2031 adev->ip_versions[VCE_HWIP][0] = IP_VERSION(4, 1, 0); 2032 adev->ip_versions[DCI_HWIP][0] = IP_VERSION(12, 1, 0); 2033 break; 2034 case CHIP_ARCTURUS: 2035 arct_reg_base_init(adev); 2036 adev->sdma.num_instances = 8; 2037 adev->vcn.num_vcn_inst = 2; 2038 adev->gmc.num_umc = 8; 2039 adev->ip_versions[MMHUB_HWIP][0] = IP_VERSION(9, 4, 1); 2040 adev->ip_versions[ATHUB_HWIP][0] = IP_VERSION(9, 4, 1); 2041 adev->ip_versions[OSSSYS_HWIP][0] = IP_VERSION(4, 2, 1); 2042 adev->ip_versions[HDP_HWIP][0] = IP_VERSION(4, 2, 1); 2043 adev->ip_versions[SDMA0_HWIP][0] = IP_VERSION(4, 2, 2); 2044 adev->ip_versions[SDMA1_HWIP][0] = IP_VERSION(4, 2, 2); 2045 adev->ip_versions[SDMA1_HWIP][1] = IP_VERSION(4, 2, 2); 2046 adev->ip_versions[SDMA1_HWIP][2] = IP_VERSION(4, 2, 2); 2047 adev->ip_versions[SDMA1_HWIP][3] = IP_VERSION(4, 2, 2); 2048 adev->ip_versions[SDMA1_HWIP][4] = IP_VERSION(4, 2, 2); 2049 adev->ip_versions[SDMA1_HWIP][5] = IP_VERSION(4, 2, 2); 2050 adev->ip_versions[SDMA1_HWIP][6] = IP_VERSION(4, 2, 2); 2051 adev->ip_versions[DF_HWIP][0] = IP_VERSION(3, 6, 1); 2052 adev->ip_versions[NBIO_HWIP][0] = IP_VERSION(7, 4, 1); 2053 adev->ip_versions[UMC_HWIP][0] = IP_VERSION(6, 1, 2); 2054 adev->ip_versions[MP0_HWIP][0] = IP_VERSION(11, 0, 4); 2055 adev->ip_versions[MP1_HWIP][0] = IP_VERSION(11, 0, 2); 2056 adev->ip_versions[THM_HWIP][0] = IP_VERSION(11, 0, 3); 2057 adev->ip_versions[SMUIO_HWIP][0] = IP_VERSION(11, 0, 3); 2058 adev->ip_versions[GC_HWIP][0] = IP_VERSION(9, 4, 1); 2059 adev->ip_versions[UVD_HWIP][0] = IP_VERSION(2, 5, 0); 2060 adev->ip_versions[UVD_HWIP][1] = IP_VERSION(2, 5, 0); 2061 break; 2062 case CHIP_ALDEBARAN: 2063 aldebaran_reg_base_init(adev); 2064 adev->sdma.num_instances = 5; 2065 adev->vcn.num_vcn_inst = 2; 2066 adev->gmc.num_umc = 4; 2067 adev->ip_versions[MMHUB_HWIP][0] = IP_VERSION(9, 4, 2); 2068 adev->ip_versions[ATHUB_HWIP][0] = IP_VERSION(9, 4, 2); 2069 adev->ip_versions[OSSSYS_HWIP][0] = IP_VERSION(4, 4, 0); 2070 adev->ip_versions[HDP_HWIP][0] = IP_VERSION(4, 4, 0); 2071 adev->ip_versions[SDMA0_HWIP][0] = IP_VERSION(4, 4, 0); 2072 adev->ip_versions[SDMA0_HWIP][1] = IP_VERSION(4, 4, 0); 2073 adev->ip_versions[SDMA0_HWIP][2] = IP_VERSION(4, 4, 0); 2074 adev->ip_versions[SDMA0_HWIP][3] = IP_VERSION(4, 4, 0); 2075 adev->ip_versions[SDMA0_HWIP][4] = IP_VERSION(4, 4, 0); 2076 adev->ip_versions[DF_HWIP][0] = IP_VERSION(3, 6, 2); 2077 adev->ip_versions[NBIO_HWIP][0] = IP_VERSION(7, 4, 4); 2078 adev->ip_versions[UMC_HWIP][0] = IP_VERSION(6, 7, 0); 2079 adev->ip_versions[MP0_HWIP][0] = IP_VERSION(13, 0, 2); 2080 adev->ip_versions[MP1_HWIP][0] = IP_VERSION(13, 0, 2); 2081 adev->ip_versions[THM_HWIP][0] = IP_VERSION(13, 0, 2); 2082 adev->ip_versions[SMUIO_HWIP][0] = IP_VERSION(13, 0, 2); 2083 adev->ip_versions[GC_HWIP][0] = IP_VERSION(9, 4, 2); 2084 adev->ip_versions[UVD_HWIP][0] = IP_VERSION(2, 6, 0); 2085 adev->ip_versions[UVD_HWIP][1] = IP_VERSION(2, 6, 0); 2086 adev->ip_versions[XGMI_HWIP][0] = IP_VERSION(6, 1, 0); 2087 break; 2088 default: 2089 r = amdgpu_discovery_reg_base_init(adev); 2090 if (r) 2091 return -EINVAL; 2092 2093 amdgpu_discovery_harvest_ip(adev); 2094 amdgpu_discovery_get_gfx_info(adev); 2095 amdgpu_discovery_get_mall_info(adev); 2096 amdgpu_discovery_get_vcn_info(adev); 2097 break; 2098 } 2099 2100 switch (adev->ip_versions[GC_HWIP][0]) { 2101 case IP_VERSION(9, 0, 1): 2102 case IP_VERSION(9, 2, 1): 2103 case IP_VERSION(9, 4, 0): 2104 case IP_VERSION(9, 4, 1): 2105 case IP_VERSION(9, 4, 2): 2106 adev->family = AMDGPU_FAMILY_AI; 2107 break; 2108 case IP_VERSION(9, 1, 0): 2109 case IP_VERSION(9, 2, 2): 2110 case IP_VERSION(9, 3, 0): 2111 adev->family = AMDGPU_FAMILY_RV; 2112 break; 2113 case IP_VERSION(10, 1, 10): 2114 case IP_VERSION(10, 1, 1): 2115 case IP_VERSION(10, 1, 2): 2116 case IP_VERSION(10, 1, 3): 2117 case IP_VERSION(10, 1, 4): 2118 case IP_VERSION(10, 3, 0): 2119 case IP_VERSION(10, 3, 2): 2120 case IP_VERSION(10, 3, 4): 2121 case IP_VERSION(10, 3, 5): 2122 adev->family = AMDGPU_FAMILY_NV; 2123 break; 2124 case IP_VERSION(10, 3, 1): 2125 adev->family = AMDGPU_FAMILY_VGH; 2126 break; 2127 case IP_VERSION(10, 3, 3): 2128 adev->family = AMDGPU_FAMILY_YC; 2129 break; 2130 case IP_VERSION(10, 3, 6): 2131 adev->family = AMDGPU_FAMILY_GC_10_3_6; 2132 break; 2133 case IP_VERSION(10, 3, 7): 2134 adev->family = AMDGPU_FAMILY_GC_10_3_7; 2135 break; 2136 case IP_VERSION(11, 0, 0): 2137 case IP_VERSION(11, 0, 2): 2138 adev->family = AMDGPU_FAMILY_GC_11_0_0; 2139 break; 2140 case IP_VERSION(11, 0, 1): 2141 adev->family = AMDGPU_FAMILY_GC_11_0_1; 2142 break; 2143 default: 2144 return -EINVAL; 2145 } 2146 2147 switch (adev->ip_versions[GC_HWIP][0]) { 2148 case IP_VERSION(9, 1, 0): 2149 case IP_VERSION(9, 2, 2): 2150 case IP_VERSION(9, 3, 0): 2151 case IP_VERSION(10, 1, 3): 2152 case IP_VERSION(10, 1, 4): 2153 case IP_VERSION(10, 3, 1): 2154 case IP_VERSION(10, 3, 3): 2155 case IP_VERSION(10, 3, 6): 2156 case IP_VERSION(10, 3, 7): 2157 case IP_VERSION(11, 0, 1): 2158 adev->flags |= AMD_IS_APU; 2159 break; 2160 default: 2161 break; 2162 } 2163 2164 if (adev->ip_versions[XGMI_HWIP][0] == IP_VERSION(4, 8, 0)) 2165 adev->gmc.xgmi.supported = true; 2166 2167 /* set NBIO version */ 2168 switch (adev->ip_versions[NBIO_HWIP][0]) { 2169 case IP_VERSION(6, 1, 0): 2170 case IP_VERSION(6, 2, 0): 2171 adev->nbio.funcs = &nbio_v6_1_funcs; 2172 adev->nbio.hdp_flush_reg = &nbio_v6_1_hdp_flush_reg; 2173 break; 2174 case IP_VERSION(7, 0, 0): 2175 case IP_VERSION(7, 0, 1): 2176 case IP_VERSION(2, 5, 0): 2177 adev->nbio.funcs = &nbio_v7_0_funcs; 2178 adev->nbio.hdp_flush_reg = &nbio_v7_0_hdp_flush_reg; 2179 break; 2180 case IP_VERSION(7, 4, 0): 2181 case IP_VERSION(7, 4, 1): 2182 adev->nbio.funcs = &nbio_v7_4_funcs; 2183 adev->nbio.hdp_flush_reg = &nbio_v7_4_hdp_flush_reg; 2184 break; 2185 case IP_VERSION(7, 4, 4): 2186 adev->nbio.funcs = &nbio_v7_4_funcs; 2187 adev->nbio.hdp_flush_reg = &nbio_v7_4_hdp_flush_reg_ald; 2188 break; 2189 case IP_VERSION(7, 2, 0): 2190 case IP_VERSION(7, 2, 1): 2191 case IP_VERSION(7, 3, 0): 2192 case IP_VERSION(7, 5, 0): 2193 case IP_VERSION(7, 5, 1): 2194 adev->nbio.funcs = &nbio_v7_2_funcs; 2195 adev->nbio.hdp_flush_reg = &nbio_v7_2_hdp_flush_reg; 2196 break; 2197 case IP_VERSION(2, 1, 1): 2198 case IP_VERSION(2, 3, 0): 2199 case IP_VERSION(2, 3, 1): 2200 case IP_VERSION(2, 3, 2): 2201 adev->nbio.funcs = &nbio_v2_3_funcs; 2202 adev->nbio.hdp_flush_reg = &nbio_v2_3_hdp_flush_reg; 2203 break; 2204 case IP_VERSION(3, 3, 0): 2205 case IP_VERSION(3, 3, 1): 2206 case IP_VERSION(3, 3, 2): 2207 case IP_VERSION(3, 3, 3): 2208 adev->nbio.funcs = &nbio_v2_3_funcs; 2209 adev->nbio.hdp_flush_reg = &nbio_v2_3_hdp_flush_reg_sc; 2210 break; 2211 case IP_VERSION(4, 3, 0): 2212 case IP_VERSION(4, 3, 1): 2213 adev->nbio.funcs = &nbio_v4_3_funcs; 2214 adev->nbio.hdp_flush_reg = &nbio_v4_3_hdp_flush_reg; 2215 break; 2216 case IP_VERSION(7, 7, 0): 2217 adev->nbio.funcs = &nbio_v7_7_funcs; 2218 adev->nbio.hdp_flush_reg = &nbio_v7_7_hdp_flush_reg; 2219 break; 2220 default: 2221 break; 2222 } 2223 2224 switch (adev->ip_versions[HDP_HWIP][0]) { 2225 case IP_VERSION(4, 0, 0): 2226 case IP_VERSION(4, 0, 1): 2227 case IP_VERSION(4, 1, 0): 2228 case IP_VERSION(4, 1, 1): 2229 case IP_VERSION(4, 1, 2): 2230 case IP_VERSION(4, 2, 0): 2231 case IP_VERSION(4, 2, 1): 2232 case IP_VERSION(4, 4, 0): 2233 adev->hdp.funcs = &hdp_v4_0_funcs; 2234 break; 2235 case IP_VERSION(5, 0, 0): 2236 case IP_VERSION(5, 0, 1): 2237 case IP_VERSION(5, 0, 2): 2238 case IP_VERSION(5, 0, 3): 2239 case IP_VERSION(5, 0, 4): 2240 case IP_VERSION(5, 2, 0): 2241 adev->hdp.funcs = &hdp_v5_0_funcs; 2242 break; 2243 case IP_VERSION(5, 2, 1): 2244 adev->hdp.funcs = &hdp_v5_2_funcs; 2245 break; 2246 case IP_VERSION(6, 0, 0): 2247 case IP_VERSION(6, 0, 1): 2248 adev->hdp.funcs = &hdp_v6_0_funcs; 2249 break; 2250 default: 2251 break; 2252 } 2253 2254 switch (adev->ip_versions[DF_HWIP][0]) { 2255 case IP_VERSION(3, 6, 0): 2256 case IP_VERSION(3, 6, 1): 2257 case IP_VERSION(3, 6, 2): 2258 adev->df.funcs = &df_v3_6_funcs; 2259 break; 2260 case IP_VERSION(2, 1, 0): 2261 case IP_VERSION(2, 1, 1): 2262 case IP_VERSION(2, 5, 0): 2263 case IP_VERSION(3, 5, 1): 2264 case IP_VERSION(3, 5, 2): 2265 adev->df.funcs = &df_v1_7_funcs; 2266 break; 2267 default: 2268 break; 2269 } 2270 2271 switch (adev->ip_versions[SMUIO_HWIP][0]) { 2272 case IP_VERSION(9, 0, 0): 2273 case IP_VERSION(9, 0, 1): 2274 case IP_VERSION(10, 0, 0): 2275 case IP_VERSION(10, 0, 1): 2276 case IP_VERSION(10, 0, 2): 2277 adev->smuio.funcs = &smuio_v9_0_funcs; 2278 break; 2279 case IP_VERSION(11, 0, 0): 2280 case IP_VERSION(11, 0, 2): 2281 case IP_VERSION(11, 0, 3): 2282 case IP_VERSION(11, 0, 4): 2283 case IP_VERSION(11, 0, 7): 2284 case IP_VERSION(11, 0, 8): 2285 adev->smuio.funcs = &smuio_v11_0_funcs; 2286 break; 2287 case IP_VERSION(11, 0, 6): 2288 case IP_VERSION(11, 0, 10): 2289 case IP_VERSION(11, 0, 11): 2290 case IP_VERSION(11, 5, 0): 2291 case IP_VERSION(13, 0, 1): 2292 case IP_VERSION(13, 0, 9): 2293 case IP_VERSION(13, 0, 10): 2294 adev->smuio.funcs = &smuio_v11_0_6_funcs; 2295 break; 2296 case IP_VERSION(13, 0, 2): 2297 adev->smuio.funcs = &smuio_v13_0_funcs; 2298 break; 2299 case IP_VERSION(13, 0, 6): 2300 case IP_VERSION(13, 0, 8): 2301 adev->smuio.funcs = &smuio_v13_0_6_funcs; 2302 break; 2303 default: 2304 break; 2305 } 2306 2307 r = amdgpu_discovery_set_common_ip_blocks(adev); 2308 if (r) 2309 return r; 2310 2311 r = amdgpu_discovery_set_gmc_ip_blocks(adev); 2312 if (r) 2313 return r; 2314 2315 /* For SR-IOV, PSP needs to be initialized before IH */ 2316 if (amdgpu_sriov_vf(adev)) { 2317 r = amdgpu_discovery_set_psp_ip_blocks(adev); 2318 if (r) 2319 return r; 2320 r = amdgpu_discovery_set_ih_ip_blocks(adev); 2321 if (r) 2322 return r; 2323 } else { 2324 r = amdgpu_discovery_set_ih_ip_blocks(adev); 2325 if (r) 2326 return r; 2327 2328 if (likely(adev->firmware.load_type == AMDGPU_FW_LOAD_PSP)) { 2329 r = amdgpu_discovery_set_psp_ip_blocks(adev); 2330 if (r) 2331 return r; 2332 } 2333 } 2334 2335 if (likely(adev->firmware.load_type == AMDGPU_FW_LOAD_PSP)) { 2336 r = amdgpu_discovery_set_smu_ip_blocks(adev); 2337 if (r) 2338 return r; 2339 } 2340 2341 r = amdgpu_discovery_set_display_ip_blocks(adev); 2342 if (r) 2343 return r; 2344 2345 r = amdgpu_discovery_set_gc_ip_blocks(adev); 2346 if (r) 2347 return r; 2348 2349 r = amdgpu_discovery_set_sdma_ip_blocks(adev); 2350 if (r) 2351 return r; 2352 2353 if ((adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT && 2354 !amdgpu_sriov_vf(adev)) || 2355 (adev->firmware.load_type == AMDGPU_FW_LOAD_RLC_BACKDOOR_AUTO && amdgpu_dpm == 1)) { 2356 r = amdgpu_discovery_set_smu_ip_blocks(adev); 2357 if (r) 2358 return r; 2359 } 2360 2361 r = amdgpu_discovery_set_mm_ip_blocks(adev); 2362 if (r) 2363 return r; 2364 2365 r = amdgpu_discovery_set_mes_ip_blocks(adev); 2366 if (r) 2367 return r; 2368 2369 return 0; 2370 } 2371 2372