xref: /linux/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c (revision 80a0e828293389358f7db56adcdcb22b28df5e11)
1 /*
2  * Copyright 2018-2024 Advanced Micro Devices, Inc. All rights reserved.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  */
23 
24 #include <linux/firmware.h>
25 
26 #include "amdgpu.h"
27 #include "amdgpu_discovery.h"
28 #include "soc15_hw_ip.h"
29 #include "discovery.h"
30 #include "amdgpu_ras.h"
31 
32 #include "soc15.h"
33 #include "gfx_v9_0.h"
34 #include "gfx_v9_4_3.h"
35 #include "gmc_v9_0.h"
36 #include "df_v1_7.h"
37 #include "df_v3_6.h"
38 #include "df_v4_3.h"
39 #include "df_v4_6_2.h"
40 #include "df_v4_15.h"
41 #include "nbio_v6_1.h"
42 #include "nbio_v7_0.h"
43 #include "nbio_v7_4.h"
44 #include "nbio_v7_9.h"
45 #include "nbio_v7_11.h"
46 #include "hdp_v4_0.h"
47 #include "vega10_ih.h"
48 #include "vega20_ih.h"
49 #include "sdma_v4_0.h"
50 #include "sdma_v4_4_2.h"
51 #include "uvd_v7_0.h"
52 #include "vce_v4_0.h"
53 #include "vcn_v1_0.h"
54 #include "vcn_v2_5.h"
55 #include "jpeg_v2_5.h"
56 #include "smuio_v9_0.h"
57 #include "gmc_v10_0.h"
58 #include "gmc_v11_0.h"
59 #include "gmc_v12_0.h"
60 #include "gfxhub_v2_0.h"
61 #include "mmhub_v2_0.h"
62 #include "nbio_v2_3.h"
63 #include "nbio_v4_3.h"
64 #include "nbio_v7_2.h"
65 #include "nbio_v7_7.h"
66 #include "nbif_v6_3_1.h"
67 #include "hdp_v5_0.h"
68 #include "hdp_v5_2.h"
69 #include "hdp_v6_0.h"
70 #include "hdp_v7_0.h"
71 #include "nv.h"
72 #include "soc21.h"
73 #include "soc24.h"
74 #include "navi10_ih.h"
75 #include "ih_v6_0.h"
76 #include "ih_v6_1.h"
77 #include "ih_v7_0.h"
78 #include "gfx_v10_0.h"
79 #include "gfx_v11_0.h"
80 #include "gfx_v12_0.h"
81 #include "sdma_v5_0.h"
82 #include "sdma_v5_2.h"
83 #include "sdma_v6_0.h"
84 #include "sdma_v7_0.h"
85 #include "lsdma_v6_0.h"
86 #include "lsdma_v7_0.h"
87 #include "vcn_v2_0.h"
88 #include "jpeg_v2_0.h"
89 #include "vcn_v3_0.h"
90 #include "jpeg_v3_0.h"
91 #include "vcn_v4_0.h"
92 #include "jpeg_v4_0.h"
93 #include "vcn_v4_0_3.h"
94 #include "jpeg_v4_0_3.h"
95 #include "vcn_v4_0_5.h"
96 #include "jpeg_v4_0_5.h"
97 #include "amdgpu_vkms.h"
98 #include "mes_v11_0.h"
99 #include "mes_v12_0.h"
100 #include "smuio_v11_0.h"
101 #include "smuio_v11_0_6.h"
102 #include "smuio_v13_0.h"
103 #include "smuio_v13_0_3.h"
104 #include "smuio_v13_0_6.h"
105 #include "smuio_v14_0_2.h"
106 #include "vcn_v5_0_0.h"
107 #include "vcn_v5_0_1.h"
108 #include "jpeg_v5_0_0.h"
109 #include "jpeg_v5_0_1.h"
110 
111 #include "amdgpu_vpe.h"
112 #if defined(CONFIG_DRM_AMD_ISP)
113 #include "amdgpu_isp.h"
114 #endif
115 
116 MODULE_FIRMWARE("amdgpu/ip_discovery.bin");
117 MODULE_FIRMWARE("amdgpu/vega10_ip_discovery.bin");
118 MODULE_FIRMWARE("amdgpu/vega12_ip_discovery.bin");
119 MODULE_FIRMWARE("amdgpu/vega20_ip_discovery.bin");
120 MODULE_FIRMWARE("amdgpu/raven_ip_discovery.bin");
121 MODULE_FIRMWARE("amdgpu/raven2_ip_discovery.bin");
122 MODULE_FIRMWARE("amdgpu/picasso_ip_discovery.bin");
123 
124 #define mmIP_DISCOVERY_VERSION  0x16A00
125 #define mmRCC_CONFIG_MEMSIZE	0xde3
126 #define mmMP0_SMN_C2PMSG_33	0x16061
127 #define mmMM_INDEX		0x0
128 #define mmMM_INDEX_HI		0x6
129 #define mmMM_DATA		0x1
130 
131 static const char *hw_id_names[HW_ID_MAX] = {
132 	[MP1_HWID]		= "MP1",
133 	[MP2_HWID]		= "MP2",
134 	[THM_HWID]		= "THM",
135 	[SMUIO_HWID]		= "SMUIO",
136 	[FUSE_HWID]		= "FUSE",
137 	[CLKA_HWID]		= "CLKA",
138 	[PWR_HWID]		= "PWR",
139 	[GC_HWID]		= "GC",
140 	[UVD_HWID]		= "UVD",
141 	[AUDIO_AZ_HWID]		= "AUDIO_AZ",
142 	[ACP_HWID]		= "ACP",
143 	[DCI_HWID]		= "DCI",
144 	[DMU_HWID]		= "DMU",
145 	[DCO_HWID]		= "DCO",
146 	[DIO_HWID]		= "DIO",
147 	[XDMA_HWID]		= "XDMA",
148 	[DCEAZ_HWID]		= "DCEAZ",
149 	[DAZ_HWID]		= "DAZ",
150 	[SDPMUX_HWID]		= "SDPMUX",
151 	[NTB_HWID]		= "NTB",
152 	[IOHC_HWID]		= "IOHC",
153 	[L2IMU_HWID]		= "L2IMU",
154 	[VCE_HWID]		= "VCE",
155 	[MMHUB_HWID]		= "MMHUB",
156 	[ATHUB_HWID]		= "ATHUB",
157 	[DBGU_NBIO_HWID]	= "DBGU_NBIO",
158 	[DFX_HWID]		= "DFX",
159 	[DBGU0_HWID]		= "DBGU0",
160 	[DBGU1_HWID]		= "DBGU1",
161 	[OSSSYS_HWID]		= "OSSSYS",
162 	[HDP_HWID]		= "HDP",
163 	[SDMA0_HWID]		= "SDMA0",
164 	[SDMA1_HWID]		= "SDMA1",
165 	[SDMA2_HWID]		= "SDMA2",
166 	[SDMA3_HWID]		= "SDMA3",
167 	[LSDMA_HWID]		= "LSDMA",
168 	[ISP_HWID]		= "ISP",
169 	[DBGU_IO_HWID]		= "DBGU_IO",
170 	[DF_HWID]		= "DF",
171 	[CLKB_HWID]		= "CLKB",
172 	[FCH_HWID]		= "FCH",
173 	[DFX_DAP_HWID]		= "DFX_DAP",
174 	[L1IMU_PCIE_HWID]	= "L1IMU_PCIE",
175 	[L1IMU_NBIF_HWID]	= "L1IMU_NBIF",
176 	[L1IMU_IOAGR_HWID]	= "L1IMU_IOAGR",
177 	[L1IMU3_HWID]		= "L1IMU3",
178 	[L1IMU4_HWID]		= "L1IMU4",
179 	[L1IMU5_HWID]		= "L1IMU5",
180 	[L1IMU6_HWID]		= "L1IMU6",
181 	[L1IMU7_HWID]		= "L1IMU7",
182 	[L1IMU8_HWID]		= "L1IMU8",
183 	[L1IMU9_HWID]		= "L1IMU9",
184 	[L1IMU10_HWID]		= "L1IMU10",
185 	[L1IMU11_HWID]		= "L1IMU11",
186 	[L1IMU12_HWID]		= "L1IMU12",
187 	[L1IMU13_HWID]		= "L1IMU13",
188 	[L1IMU14_HWID]		= "L1IMU14",
189 	[L1IMU15_HWID]		= "L1IMU15",
190 	[WAFLC_HWID]		= "WAFLC",
191 	[FCH_USB_PD_HWID]	= "FCH_USB_PD",
192 	[PCIE_HWID]		= "PCIE",
193 	[PCS_HWID]		= "PCS",
194 	[DDCL_HWID]		= "DDCL",
195 	[SST_HWID]		= "SST",
196 	[IOAGR_HWID]		= "IOAGR",
197 	[NBIF_HWID]		= "NBIF",
198 	[IOAPIC_HWID]		= "IOAPIC",
199 	[SYSTEMHUB_HWID]	= "SYSTEMHUB",
200 	[NTBCCP_HWID]		= "NTBCCP",
201 	[UMC_HWID]		= "UMC",
202 	[SATA_HWID]		= "SATA",
203 	[USB_HWID]		= "USB",
204 	[CCXSEC_HWID]		= "CCXSEC",
205 	[XGMI_HWID]		= "XGMI",
206 	[XGBE_HWID]		= "XGBE",
207 	[MP0_HWID]		= "MP0",
208 	[VPE_HWID]		= "VPE",
209 };
210 
211 static int hw_id_map[MAX_HWIP] = {
212 	[GC_HWIP]	= GC_HWID,
213 	[HDP_HWIP]	= HDP_HWID,
214 	[SDMA0_HWIP]	= SDMA0_HWID,
215 	[SDMA1_HWIP]	= SDMA1_HWID,
216 	[SDMA2_HWIP]    = SDMA2_HWID,
217 	[SDMA3_HWIP]    = SDMA3_HWID,
218 	[LSDMA_HWIP]    = LSDMA_HWID,
219 	[MMHUB_HWIP]	= MMHUB_HWID,
220 	[ATHUB_HWIP]	= ATHUB_HWID,
221 	[NBIO_HWIP]	= NBIF_HWID,
222 	[MP0_HWIP]	= MP0_HWID,
223 	[MP1_HWIP]	= MP1_HWID,
224 	[UVD_HWIP]	= UVD_HWID,
225 	[VCE_HWIP]	= VCE_HWID,
226 	[DF_HWIP]	= DF_HWID,
227 	[DCE_HWIP]	= DMU_HWID,
228 	[OSSSYS_HWIP]	= OSSSYS_HWID,
229 	[SMUIO_HWIP]	= SMUIO_HWID,
230 	[PWR_HWIP]	= PWR_HWID,
231 	[NBIF_HWIP]	= NBIF_HWID,
232 	[THM_HWIP]	= THM_HWID,
233 	[CLK_HWIP]	= CLKA_HWID,
234 	[UMC_HWIP]	= UMC_HWID,
235 	[XGMI_HWIP]	= XGMI_HWID,
236 	[DCI_HWIP]	= DCI_HWID,
237 	[PCIE_HWIP]	= PCIE_HWID,
238 	[VPE_HWIP]	= VPE_HWID,
239 	[ISP_HWIP]	= ISP_HWID,
240 };
241 
242 static int amdgpu_discovery_read_binary_from_sysmem(struct amdgpu_device *adev, uint8_t *binary)
243 {
244 	u64 tmr_offset, tmr_size, pos;
245 	void *discv_regn;
246 	int ret;
247 
248 	ret = amdgpu_acpi_get_tmr_info(adev, &tmr_offset, &tmr_size);
249 	if (ret)
250 		return ret;
251 
252 	pos = tmr_offset + tmr_size - DISCOVERY_TMR_OFFSET;
253 
254 	/* This region is read-only and reserved from system use */
255 	discv_regn = memremap(pos, adev->mman.discovery_tmr_size, MEMREMAP_WC);
256 	if (discv_regn) {
257 		memcpy(binary, discv_regn, adev->mman.discovery_tmr_size);
258 		memunmap(discv_regn);
259 		return 0;
260 	}
261 
262 	return -ENOENT;
263 }
264 
265 #define IP_DISCOVERY_V2		2
266 #define IP_DISCOVERY_V4		4
267 
268 static int amdgpu_discovery_read_binary_from_mem(struct amdgpu_device *adev,
269 						 uint8_t *binary)
270 {
271 	uint64_t vram_size;
272 	u32 msg;
273 	int i, ret = 0;
274 
275 	if (!amdgpu_sriov_vf(adev)) {
276 		/* It can take up to a second for IFWI init to complete on some dGPUs,
277 		 * but generally it should be in the 60-100ms range.  Normally this starts
278 		 * as soon as the device gets power so by the time the OS loads this has long
279 		 * completed.  However, when a card is hotplugged via e.g., USB4, we need to
280 		 * wait for this to complete.  Once the C2PMSG is updated, we can
281 		 * continue.
282 		 */
283 
284 		for (i = 0; i < 1000; i++) {
285 			msg = RREG32(mmMP0_SMN_C2PMSG_33);
286 			if (msg & 0x80000000)
287 				break;
288 			msleep(1);
289 		}
290 	}
291 
292 	vram_size = (uint64_t)RREG32(mmRCC_CONFIG_MEMSIZE) << 20;
293 
294 	if (vram_size) {
295 		uint64_t pos = vram_size - DISCOVERY_TMR_OFFSET;
296 		amdgpu_device_vram_access(adev, pos, (uint32_t *)binary,
297 					  adev->mman.discovery_tmr_size, false);
298 	} else {
299 		ret = amdgpu_discovery_read_binary_from_sysmem(adev, binary);
300 	}
301 
302 	return ret;
303 }
304 
305 static int amdgpu_discovery_read_binary_from_file(struct amdgpu_device *adev,
306 							uint8_t *binary,
307 							const char *fw_name)
308 {
309 	const struct firmware *fw;
310 	int r;
311 
312 	r = request_firmware(&fw, fw_name, adev->dev);
313 	if (r) {
314 		dev_err(adev->dev, "can't load firmware \"%s\"\n",
315 			fw_name);
316 		return r;
317 	}
318 
319 	memcpy((u8 *)binary, (u8 *)fw->data, fw->size);
320 	release_firmware(fw);
321 
322 	return 0;
323 }
324 
325 static uint16_t amdgpu_discovery_calculate_checksum(uint8_t *data, uint32_t size)
326 {
327 	uint16_t checksum = 0;
328 	int i;
329 
330 	for (i = 0; i < size; i++)
331 		checksum += data[i];
332 
333 	return checksum;
334 }
335 
336 static inline bool amdgpu_discovery_verify_checksum(uint8_t *data, uint32_t size,
337 						    uint16_t expected)
338 {
339 	return !!(amdgpu_discovery_calculate_checksum(data, size) == expected);
340 }
341 
342 static inline bool amdgpu_discovery_verify_binary_signature(uint8_t *binary)
343 {
344 	struct binary_header *bhdr;
345 	bhdr = (struct binary_header *)binary;
346 
347 	return (le32_to_cpu(bhdr->binary_signature) == BINARY_SIGNATURE);
348 }
349 
350 static void amdgpu_discovery_harvest_config_quirk(struct amdgpu_device *adev)
351 {
352 	/*
353 	 * So far, apply this quirk only on those Navy Flounder boards which
354 	 * have a bad harvest table of VCN config.
355 	 */
356 	if ((amdgpu_ip_version(adev, UVD_HWIP, 1) == IP_VERSION(3, 0, 1)) &&
357 	    (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(10, 3, 2))) {
358 		switch (adev->pdev->revision) {
359 		case 0xC1:
360 		case 0xC2:
361 		case 0xC3:
362 		case 0xC5:
363 		case 0xC7:
364 		case 0xCF:
365 		case 0xDF:
366 			adev->vcn.harvest_config |= AMDGPU_VCN_HARVEST_VCN1;
367 			adev->vcn.inst_mask &= ~AMDGPU_VCN_HARVEST_VCN1;
368 			break;
369 		default:
370 			break;
371 		}
372 	}
373 }
374 
375 static int amdgpu_discovery_verify_npsinfo(struct amdgpu_device *adev,
376 					   struct binary_header *bhdr)
377 {
378 	struct table_info *info;
379 	uint16_t checksum;
380 	uint16_t offset;
381 
382 	info = &bhdr->table_list[NPS_INFO];
383 	offset = le16_to_cpu(info->offset);
384 	checksum = le16_to_cpu(info->checksum);
385 
386 	struct nps_info_header *nhdr =
387 		(struct nps_info_header *)(adev->mman.discovery_bin + offset);
388 
389 	if (le32_to_cpu(nhdr->table_id) != NPS_INFO_TABLE_ID) {
390 		dev_dbg(adev->dev, "invalid ip discovery nps info table id\n");
391 		return -EINVAL;
392 	}
393 
394 	if (!amdgpu_discovery_verify_checksum(adev->mman.discovery_bin + offset,
395 					      le32_to_cpu(nhdr->size_bytes),
396 					      checksum)) {
397 		dev_dbg(adev->dev, "invalid nps info data table checksum\n");
398 		return -EINVAL;
399 	}
400 
401 	return 0;
402 }
403 
404 static const char *amdgpu_discovery_get_fw_name(struct amdgpu_device *adev)
405 {
406 	if (amdgpu_discovery == 2)
407 		return "amdgpu/ip_discovery.bin";
408 
409 	switch (adev->asic_type) {
410 	case CHIP_VEGA10:
411 		return "amdgpu/vega10_ip_discovery.bin";
412 	case CHIP_VEGA12:
413 		return "amdgpu/vega12_ip_discovery.bin";
414 	case CHIP_RAVEN:
415 		if (adev->apu_flags & AMD_APU_IS_RAVEN2)
416 			return "amdgpu/raven2_ip_discovery.bin";
417 		else if (adev->apu_flags & AMD_APU_IS_PICASSO)
418 			return "amdgpu/picasso_ip_discovery.bin";
419 		else
420 			return "amdgpu/raven_ip_discovery.bin";
421 	case CHIP_VEGA20:
422 		return "amdgpu/vega20_ip_discovery.bin";
423 	case CHIP_ARCTURUS:
424 		return "amdgpu/arcturus_ip_discovery.bin";
425 	case CHIP_ALDEBARAN:
426 		return "amdgpu/aldebaran_ip_discovery.bin";
427 	default:
428 		return NULL;
429 	}
430 }
431 
432 static int amdgpu_discovery_init(struct amdgpu_device *adev)
433 {
434 	struct table_info *info;
435 	struct binary_header *bhdr;
436 	const char *fw_name;
437 	uint16_t offset;
438 	uint16_t size;
439 	uint16_t checksum;
440 	int r;
441 
442 	adev->mman.discovery_tmr_size = DISCOVERY_TMR_SIZE;
443 	adev->mman.discovery_bin = kzalloc(adev->mman.discovery_tmr_size, GFP_KERNEL);
444 	if (!adev->mman.discovery_bin)
445 		return -ENOMEM;
446 
447 	/* Read from file if it is the preferred option */
448 	fw_name = amdgpu_discovery_get_fw_name(adev);
449 	if (fw_name != NULL) {
450 		dev_info(adev->dev, "use ip discovery information from file");
451 		r = amdgpu_discovery_read_binary_from_file(adev, adev->mman.discovery_bin, fw_name);
452 
453 		if (r) {
454 			dev_err(adev->dev, "failed to read ip discovery binary from file\n");
455 			r = -EINVAL;
456 			goto out;
457 		}
458 
459 	} else {
460 		r = amdgpu_discovery_read_binary_from_mem(
461 			adev, adev->mman.discovery_bin);
462 		if (r)
463 			goto out;
464 	}
465 
466 	/* check the ip discovery binary signature */
467 	if (!amdgpu_discovery_verify_binary_signature(adev->mman.discovery_bin)) {
468 		dev_err(adev->dev,
469 			"get invalid ip discovery binary signature\n");
470 		r = -EINVAL;
471 		goto out;
472 	}
473 
474 	bhdr = (struct binary_header *)adev->mman.discovery_bin;
475 
476 	offset = offsetof(struct binary_header, binary_checksum) +
477 		sizeof(bhdr->binary_checksum);
478 	size = le16_to_cpu(bhdr->binary_size) - offset;
479 	checksum = le16_to_cpu(bhdr->binary_checksum);
480 
481 	if (!amdgpu_discovery_verify_checksum(adev->mman.discovery_bin + offset,
482 					      size, checksum)) {
483 		dev_err(adev->dev, "invalid ip discovery binary checksum\n");
484 		r = -EINVAL;
485 		goto out;
486 	}
487 
488 	info = &bhdr->table_list[IP_DISCOVERY];
489 	offset = le16_to_cpu(info->offset);
490 	checksum = le16_to_cpu(info->checksum);
491 
492 	if (offset) {
493 		struct ip_discovery_header *ihdr =
494 			(struct ip_discovery_header *)(adev->mman.discovery_bin + offset);
495 		if (le32_to_cpu(ihdr->signature) != DISCOVERY_TABLE_SIGNATURE) {
496 			dev_err(adev->dev, "invalid ip discovery data table signature\n");
497 			r = -EINVAL;
498 			goto out;
499 		}
500 
501 		if (!amdgpu_discovery_verify_checksum(adev->mman.discovery_bin + offset,
502 						      le16_to_cpu(ihdr->size), checksum)) {
503 			dev_err(adev->dev, "invalid ip discovery data table checksum\n");
504 			r = -EINVAL;
505 			goto out;
506 		}
507 	}
508 
509 	info = &bhdr->table_list[GC];
510 	offset = le16_to_cpu(info->offset);
511 	checksum = le16_to_cpu(info->checksum);
512 
513 	if (offset) {
514 		struct gpu_info_header *ghdr =
515 			(struct gpu_info_header *)(adev->mman.discovery_bin + offset);
516 
517 		if (le32_to_cpu(ghdr->table_id) != GC_TABLE_ID) {
518 			dev_err(adev->dev, "invalid ip discovery gc table id\n");
519 			r = -EINVAL;
520 			goto out;
521 		}
522 
523 		if (!amdgpu_discovery_verify_checksum(adev->mman.discovery_bin + offset,
524 						      le32_to_cpu(ghdr->size), checksum)) {
525 			dev_err(adev->dev, "invalid gc data table checksum\n");
526 			r = -EINVAL;
527 			goto out;
528 		}
529 	}
530 
531 	info = &bhdr->table_list[HARVEST_INFO];
532 	offset = le16_to_cpu(info->offset);
533 	checksum = le16_to_cpu(info->checksum);
534 
535 	if (offset) {
536 		struct harvest_info_header *hhdr =
537 			(struct harvest_info_header *)(adev->mman.discovery_bin + offset);
538 
539 		if (le32_to_cpu(hhdr->signature) != HARVEST_TABLE_SIGNATURE) {
540 			dev_err(adev->dev, "invalid ip discovery harvest table signature\n");
541 			r = -EINVAL;
542 			goto out;
543 		}
544 
545 		if (!amdgpu_discovery_verify_checksum(adev->mman.discovery_bin + offset,
546 						      sizeof(struct harvest_table), checksum)) {
547 			dev_err(adev->dev, "invalid harvest data table checksum\n");
548 			r = -EINVAL;
549 			goto out;
550 		}
551 	}
552 
553 	info = &bhdr->table_list[VCN_INFO];
554 	offset = le16_to_cpu(info->offset);
555 	checksum = le16_to_cpu(info->checksum);
556 
557 	if (offset) {
558 		struct vcn_info_header *vhdr =
559 			(struct vcn_info_header *)(adev->mman.discovery_bin + offset);
560 
561 		if (le32_to_cpu(vhdr->table_id) != VCN_INFO_TABLE_ID) {
562 			dev_err(adev->dev, "invalid ip discovery vcn table id\n");
563 			r = -EINVAL;
564 			goto out;
565 		}
566 
567 		if (!amdgpu_discovery_verify_checksum(adev->mman.discovery_bin + offset,
568 						      le32_to_cpu(vhdr->size_bytes), checksum)) {
569 			dev_err(adev->dev, "invalid vcn data table checksum\n");
570 			r = -EINVAL;
571 			goto out;
572 		}
573 	}
574 
575 	info = &bhdr->table_list[MALL_INFO];
576 	offset = le16_to_cpu(info->offset);
577 	checksum = le16_to_cpu(info->checksum);
578 
579 	if (0 && offset) {
580 		struct mall_info_header *mhdr =
581 			(struct mall_info_header *)(adev->mman.discovery_bin + offset);
582 
583 		if (le32_to_cpu(mhdr->table_id) != MALL_INFO_TABLE_ID) {
584 			dev_err(adev->dev, "invalid ip discovery mall table id\n");
585 			r = -EINVAL;
586 			goto out;
587 		}
588 
589 		if (!amdgpu_discovery_verify_checksum(adev->mman.discovery_bin + offset,
590 						      le32_to_cpu(mhdr->size_bytes), checksum)) {
591 			dev_err(adev->dev, "invalid mall data table checksum\n");
592 			r = -EINVAL;
593 			goto out;
594 		}
595 	}
596 
597 	return 0;
598 
599 out:
600 	kfree(adev->mman.discovery_bin);
601 	adev->mman.discovery_bin = NULL;
602 	if ((amdgpu_discovery != 2) &&
603 	    (RREG32(mmIP_DISCOVERY_VERSION) == 4))
604 		amdgpu_ras_query_boot_status(adev, 4);
605 	return r;
606 }
607 
608 static void amdgpu_discovery_sysfs_fini(struct amdgpu_device *adev);
609 
610 void amdgpu_discovery_fini(struct amdgpu_device *adev)
611 {
612 	amdgpu_discovery_sysfs_fini(adev);
613 	kfree(adev->mman.discovery_bin);
614 	adev->mman.discovery_bin = NULL;
615 }
616 
617 static int amdgpu_discovery_validate_ip(struct amdgpu_device *adev,
618 					uint8_t instance, uint16_t hw_id)
619 {
620 	if (instance >= HWIP_MAX_INSTANCE) {
621 		dev_err(adev->dev,
622 			"Unexpected instance_number (%d) from ip discovery blob\n",
623 			instance);
624 		return -EINVAL;
625 	}
626 	if (hw_id >= HW_ID_MAX) {
627 		dev_err(adev->dev,
628 			"Unexpected hw_id (%d) from ip discovery blob\n",
629 			hw_id);
630 		return -EINVAL;
631 	}
632 
633 	return 0;
634 }
635 
636 static void amdgpu_discovery_read_harvest_bit_per_ip(struct amdgpu_device *adev,
637 						uint32_t *vcn_harvest_count)
638 {
639 	struct binary_header *bhdr;
640 	struct ip_discovery_header *ihdr;
641 	struct die_header *dhdr;
642 	struct ip *ip;
643 	uint16_t die_offset, ip_offset, num_dies, num_ips;
644 	uint16_t hw_id;
645 	uint8_t inst;
646 	int i, j;
647 
648 	bhdr = (struct binary_header *)adev->mman.discovery_bin;
649 	ihdr = (struct ip_discovery_header *)(adev->mman.discovery_bin +
650 			le16_to_cpu(bhdr->table_list[IP_DISCOVERY].offset));
651 	num_dies = le16_to_cpu(ihdr->num_dies);
652 
653 	/* scan harvest bit of all IP data structures */
654 	for (i = 0; i < num_dies; i++) {
655 		die_offset = le16_to_cpu(ihdr->die_info[i].die_offset);
656 		dhdr = (struct die_header *)(adev->mman.discovery_bin + die_offset);
657 		num_ips = le16_to_cpu(dhdr->num_ips);
658 		ip_offset = die_offset + sizeof(*dhdr);
659 
660 		for (j = 0; j < num_ips; j++) {
661 			ip = (struct ip *)(adev->mman.discovery_bin +
662 					   ip_offset);
663 			inst = ip->number_instance;
664 			hw_id = le16_to_cpu(ip->hw_id);
665 			if (amdgpu_discovery_validate_ip(adev, inst, hw_id))
666 				goto next_ip;
667 
668 			if (ip->harvest == 1) {
669 				switch (hw_id) {
670 				case VCN_HWID:
671 					(*vcn_harvest_count)++;
672 					if (inst == 0) {
673 						adev->vcn.harvest_config |= AMDGPU_VCN_HARVEST_VCN0;
674 						adev->vcn.inst_mask &=
675 							~AMDGPU_VCN_HARVEST_VCN0;
676 						adev->jpeg.inst_mask &=
677 							~AMDGPU_VCN_HARVEST_VCN0;
678 					} else {
679 						adev->vcn.harvest_config |= AMDGPU_VCN_HARVEST_VCN1;
680 						adev->vcn.inst_mask &=
681 							~AMDGPU_VCN_HARVEST_VCN1;
682 						adev->jpeg.inst_mask &=
683 							~AMDGPU_VCN_HARVEST_VCN1;
684 					}
685 					break;
686 				case DMU_HWID:
687 					adev->harvest_ip_mask |= AMD_HARVEST_IP_DMU_MASK;
688 					break;
689 				default:
690 					break;
691 				}
692 			}
693 next_ip:
694 			ip_offset += struct_size(ip, base_address,
695 						 ip->num_base_address);
696 		}
697 	}
698 }
699 
700 static void amdgpu_discovery_read_from_harvest_table(struct amdgpu_device *adev,
701 						     uint32_t *vcn_harvest_count,
702 						     uint32_t *umc_harvest_count)
703 {
704 	struct binary_header *bhdr;
705 	struct harvest_table *harvest_info;
706 	u16 offset;
707 	int i;
708 	uint32_t umc_harvest_config = 0;
709 
710 	bhdr = (struct binary_header *)adev->mman.discovery_bin;
711 	offset = le16_to_cpu(bhdr->table_list[HARVEST_INFO].offset);
712 
713 	if (!offset) {
714 		dev_err(adev->dev, "invalid harvest table offset\n");
715 		return;
716 	}
717 
718 	harvest_info = (struct harvest_table *)(adev->mman.discovery_bin + offset);
719 
720 	for (i = 0; i < 32; i++) {
721 		if (le16_to_cpu(harvest_info->list[i].hw_id) == 0)
722 			break;
723 
724 		switch (le16_to_cpu(harvest_info->list[i].hw_id)) {
725 		case VCN_HWID:
726 			(*vcn_harvest_count)++;
727 			adev->vcn.harvest_config |=
728 				(1 << harvest_info->list[i].number_instance);
729 			adev->jpeg.harvest_config |=
730 				(1 << harvest_info->list[i].number_instance);
731 
732 			adev->vcn.inst_mask &=
733 				~(1U << harvest_info->list[i].number_instance);
734 			adev->jpeg.inst_mask &=
735 				~(1U << harvest_info->list[i].number_instance);
736 			break;
737 		case DMU_HWID:
738 			adev->harvest_ip_mask |= AMD_HARVEST_IP_DMU_MASK;
739 			break;
740 		case UMC_HWID:
741 			umc_harvest_config |=
742 				1 << (le16_to_cpu(harvest_info->list[i].number_instance));
743 			(*umc_harvest_count)++;
744 			break;
745 		case GC_HWID:
746 			adev->gfx.xcc_mask &=
747 				~(1U << harvest_info->list[i].number_instance);
748 			break;
749 		case SDMA0_HWID:
750 			adev->sdma.sdma_mask &=
751 				~(1U << harvest_info->list[i].number_instance);
752 			break;
753 #if defined(CONFIG_DRM_AMD_ISP)
754 		case ISP_HWID:
755 			adev->isp.harvest_config |=
756 				~(1U << harvest_info->list[i].number_instance);
757 			break;
758 #endif
759 		default:
760 			break;
761 		}
762 	}
763 
764 	adev->umc.active_mask = ((1 << adev->umc.node_inst_num) - 1) &
765 				~umc_harvest_config;
766 }
767 
768 /* ================================================== */
769 
770 struct ip_hw_instance {
771 	struct kobject kobj; /* ip_discovery/die/#die/#hw_id/#instance/<attrs...> */
772 
773 	int hw_id;
774 	u8  num_instance;
775 	u8  major, minor, revision;
776 	u8  harvest;
777 
778 	int num_base_addresses;
779 	u32 base_addr[] __counted_by(num_base_addresses);
780 };
781 
782 struct ip_hw_id {
783 	struct kset hw_id_kset;  /* ip_discovery/die/#die/#hw_id/, contains ip_hw_instance */
784 	int hw_id;
785 };
786 
787 struct ip_die_entry {
788 	struct kset ip_kset;     /* ip_discovery/die/#die/, contains ip_hw_id  */
789 	u16 num_ips;
790 };
791 
792 /* -------------------------------------------------- */
793 
794 struct ip_hw_instance_attr {
795 	struct attribute attr;
796 	ssize_t (*show)(struct ip_hw_instance *ip_hw_instance, char *buf);
797 };
798 
799 static ssize_t hw_id_show(struct ip_hw_instance *ip_hw_instance, char *buf)
800 {
801 	return sysfs_emit(buf, "%d\n", ip_hw_instance->hw_id);
802 }
803 
804 static ssize_t num_instance_show(struct ip_hw_instance *ip_hw_instance, char *buf)
805 {
806 	return sysfs_emit(buf, "%d\n", ip_hw_instance->num_instance);
807 }
808 
809 static ssize_t major_show(struct ip_hw_instance *ip_hw_instance, char *buf)
810 {
811 	return sysfs_emit(buf, "%d\n", ip_hw_instance->major);
812 }
813 
814 static ssize_t minor_show(struct ip_hw_instance *ip_hw_instance, char *buf)
815 {
816 	return sysfs_emit(buf, "%d\n", ip_hw_instance->minor);
817 }
818 
819 static ssize_t revision_show(struct ip_hw_instance *ip_hw_instance, char *buf)
820 {
821 	return sysfs_emit(buf, "%d\n", ip_hw_instance->revision);
822 }
823 
824 static ssize_t harvest_show(struct ip_hw_instance *ip_hw_instance, char *buf)
825 {
826 	return sysfs_emit(buf, "0x%01X\n", ip_hw_instance->harvest);
827 }
828 
829 static ssize_t num_base_addresses_show(struct ip_hw_instance *ip_hw_instance, char *buf)
830 {
831 	return sysfs_emit(buf, "%d\n", ip_hw_instance->num_base_addresses);
832 }
833 
834 static ssize_t base_addr_show(struct ip_hw_instance *ip_hw_instance, char *buf)
835 {
836 	ssize_t res, at;
837 	int ii;
838 
839 	for (res = at = ii = 0; ii < ip_hw_instance->num_base_addresses; ii++) {
840 		/* Here we satisfy the condition that, at + size <= PAGE_SIZE.
841 		 */
842 		if (at + 12 > PAGE_SIZE)
843 			break;
844 		res = sysfs_emit_at(buf, at, "0x%08X\n",
845 				    ip_hw_instance->base_addr[ii]);
846 		if (res <= 0)
847 			break;
848 		at += res;
849 	}
850 
851 	return res < 0 ? res : at;
852 }
853 
854 static struct ip_hw_instance_attr ip_hw_attr[] = {
855 	__ATTR_RO(hw_id),
856 	__ATTR_RO(num_instance),
857 	__ATTR_RO(major),
858 	__ATTR_RO(minor),
859 	__ATTR_RO(revision),
860 	__ATTR_RO(harvest),
861 	__ATTR_RO(num_base_addresses),
862 	__ATTR_RO(base_addr),
863 };
864 
865 static struct attribute *ip_hw_instance_attrs[ARRAY_SIZE(ip_hw_attr) + 1];
866 ATTRIBUTE_GROUPS(ip_hw_instance);
867 
868 #define to_ip_hw_instance(x) container_of(x, struct ip_hw_instance, kobj)
869 #define to_ip_hw_instance_attr(x) container_of(x, struct ip_hw_instance_attr, attr)
870 
871 static ssize_t ip_hw_instance_attr_show(struct kobject *kobj,
872 					struct attribute *attr,
873 					char *buf)
874 {
875 	struct ip_hw_instance *ip_hw_instance = to_ip_hw_instance(kobj);
876 	struct ip_hw_instance_attr *ip_hw_attr = to_ip_hw_instance_attr(attr);
877 
878 	if (!ip_hw_attr->show)
879 		return -EIO;
880 
881 	return ip_hw_attr->show(ip_hw_instance, buf);
882 }
883 
884 static const struct sysfs_ops ip_hw_instance_sysfs_ops = {
885 	.show = ip_hw_instance_attr_show,
886 };
887 
888 static void ip_hw_instance_release(struct kobject *kobj)
889 {
890 	struct ip_hw_instance *ip_hw_instance = to_ip_hw_instance(kobj);
891 
892 	kfree(ip_hw_instance);
893 }
894 
895 static const struct kobj_type ip_hw_instance_ktype = {
896 	.release = ip_hw_instance_release,
897 	.sysfs_ops = &ip_hw_instance_sysfs_ops,
898 	.default_groups = ip_hw_instance_groups,
899 };
900 
901 /* -------------------------------------------------- */
902 
903 #define to_ip_hw_id(x)  container_of(to_kset(x), struct ip_hw_id, hw_id_kset)
904 
905 static void ip_hw_id_release(struct kobject *kobj)
906 {
907 	struct ip_hw_id *ip_hw_id = to_ip_hw_id(kobj);
908 
909 	if (!list_empty(&ip_hw_id->hw_id_kset.list))
910 		DRM_ERROR("ip_hw_id->hw_id_kset is not empty");
911 	kfree(ip_hw_id);
912 }
913 
914 static const struct kobj_type ip_hw_id_ktype = {
915 	.release = ip_hw_id_release,
916 	.sysfs_ops = &kobj_sysfs_ops,
917 };
918 
919 /* -------------------------------------------------- */
920 
921 static void die_kobj_release(struct kobject *kobj);
922 static void ip_disc_release(struct kobject *kobj);
923 
924 struct ip_die_entry_attribute {
925 	struct attribute attr;
926 	ssize_t (*show)(struct ip_die_entry *ip_die_entry, char *buf);
927 };
928 
929 #define to_ip_die_entry_attr(x)  container_of(x, struct ip_die_entry_attribute, attr)
930 
931 static ssize_t num_ips_show(struct ip_die_entry *ip_die_entry, char *buf)
932 {
933 	return sysfs_emit(buf, "%d\n", ip_die_entry->num_ips);
934 }
935 
936 /* If there are more ip_die_entry attrs, other than the number of IPs,
937  * we can make this intro an array of attrs, and then initialize
938  * ip_die_entry_attrs in a loop.
939  */
940 static struct ip_die_entry_attribute num_ips_attr =
941 	__ATTR_RO(num_ips);
942 
943 static struct attribute *ip_die_entry_attrs[] = {
944 	&num_ips_attr.attr,
945 	NULL,
946 };
947 ATTRIBUTE_GROUPS(ip_die_entry); /* ip_die_entry_groups */
948 
949 #define to_ip_die_entry(x) container_of(to_kset(x), struct ip_die_entry, ip_kset)
950 
951 static ssize_t ip_die_entry_attr_show(struct kobject *kobj,
952 				      struct attribute *attr,
953 				      char *buf)
954 {
955 	struct ip_die_entry_attribute *ip_die_entry_attr = to_ip_die_entry_attr(attr);
956 	struct ip_die_entry *ip_die_entry = to_ip_die_entry(kobj);
957 
958 	if (!ip_die_entry_attr->show)
959 		return -EIO;
960 
961 	return ip_die_entry_attr->show(ip_die_entry, buf);
962 }
963 
964 static void ip_die_entry_release(struct kobject *kobj)
965 {
966 	struct ip_die_entry *ip_die_entry = to_ip_die_entry(kobj);
967 
968 	if (!list_empty(&ip_die_entry->ip_kset.list))
969 		DRM_ERROR("ip_die_entry->ip_kset is not empty");
970 	kfree(ip_die_entry);
971 }
972 
973 static const struct sysfs_ops ip_die_entry_sysfs_ops = {
974 	.show = ip_die_entry_attr_show,
975 };
976 
977 static const struct kobj_type ip_die_entry_ktype = {
978 	.release = ip_die_entry_release,
979 	.sysfs_ops = &ip_die_entry_sysfs_ops,
980 	.default_groups = ip_die_entry_groups,
981 };
982 
983 static const struct kobj_type die_kobj_ktype = {
984 	.release = die_kobj_release,
985 	.sysfs_ops = &kobj_sysfs_ops,
986 };
987 
988 static const struct kobj_type ip_discovery_ktype = {
989 	.release = ip_disc_release,
990 	.sysfs_ops = &kobj_sysfs_ops,
991 };
992 
993 struct ip_discovery_top {
994 	struct kobject kobj;    /* ip_discovery/ */
995 	struct kset die_kset;   /* ip_discovery/die/, contains ip_die_entry */
996 	struct amdgpu_device *adev;
997 };
998 
999 static void die_kobj_release(struct kobject *kobj)
1000 {
1001 	struct ip_discovery_top *ip_top = container_of(to_kset(kobj),
1002 						       struct ip_discovery_top,
1003 						       die_kset);
1004 	if (!list_empty(&ip_top->die_kset.list))
1005 		DRM_ERROR("ip_top->die_kset is not empty");
1006 }
1007 
1008 static void ip_disc_release(struct kobject *kobj)
1009 {
1010 	struct ip_discovery_top *ip_top = container_of(kobj, struct ip_discovery_top,
1011 						       kobj);
1012 	struct amdgpu_device *adev = ip_top->adev;
1013 
1014 	adev->ip_top = NULL;
1015 	kfree(ip_top);
1016 }
1017 
1018 static uint8_t amdgpu_discovery_get_harvest_info(struct amdgpu_device *adev,
1019 						 uint16_t hw_id, uint8_t inst)
1020 {
1021 	uint8_t harvest = 0;
1022 
1023 	/* Until a uniform way is figured, get mask based on hwid */
1024 	switch (hw_id) {
1025 	case VCN_HWID:
1026 		harvest = ((1 << inst) & adev->vcn.inst_mask) == 0;
1027 		break;
1028 	case DMU_HWID:
1029 		if (adev->harvest_ip_mask & AMD_HARVEST_IP_DMU_MASK)
1030 			harvest = 0x1;
1031 		break;
1032 	case UMC_HWID:
1033 		/* TODO: It needs another parsing; for now, ignore.*/
1034 		break;
1035 	case GC_HWID:
1036 		harvest = ((1 << inst) & adev->gfx.xcc_mask) == 0;
1037 		break;
1038 	case SDMA0_HWID:
1039 		harvest = ((1 << inst) & adev->sdma.sdma_mask) == 0;
1040 		break;
1041 	default:
1042 		break;
1043 	}
1044 
1045 	return harvest;
1046 }
1047 
1048 static int amdgpu_discovery_sysfs_ips(struct amdgpu_device *adev,
1049 				      struct ip_die_entry *ip_die_entry,
1050 				      const size_t _ip_offset, const int num_ips,
1051 				      bool reg_base_64)
1052 {
1053 	int ii, jj, kk, res;
1054 	uint16_t hw_id;
1055 	uint8_t inst;
1056 
1057 	DRM_DEBUG("num_ips:%d", num_ips);
1058 
1059 	/* Find all IPs of a given HW ID, and add their instance to
1060 	 * #die/#hw_id/#instance/<attributes>
1061 	 */
1062 	for (ii = 0; ii < HW_ID_MAX; ii++) {
1063 		struct ip_hw_id *ip_hw_id = NULL;
1064 		size_t ip_offset = _ip_offset;
1065 
1066 		for (jj = 0; jj < num_ips; jj++) {
1067 			struct ip_v4 *ip;
1068 			struct ip_hw_instance *ip_hw_instance;
1069 
1070 			ip = (struct ip_v4 *)(adev->mman.discovery_bin + ip_offset);
1071 			inst = ip->instance_number;
1072 			hw_id = le16_to_cpu(ip->hw_id);
1073 			if (amdgpu_discovery_validate_ip(adev, inst, hw_id) ||
1074 			    hw_id != ii)
1075 				goto next_ip;
1076 
1077 			DRM_DEBUG("match:%d @ ip_offset:%zu", ii, ip_offset);
1078 
1079 			/* We have a hw_id match; register the hw
1080 			 * block if not yet registered.
1081 			 */
1082 			if (!ip_hw_id) {
1083 				ip_hw_id = kzalloc(sizeof(*ip_hw_id), GFP_KERNEL);
1084 				if (!ip_hw_id)
1085 					return -ENOMEM;
1086 				ip_hw_id->hw_id = ii;
1087 
1088 				kobject_set_name(&ip_hw_id->hw_id_kset.kobj, "%d", ii);
1089 				ip_hw_id->hw_id_kset.kobj.kset = &ip_die_entry->ip_kset;
1090 				ip_hw_id->hw_id_kset.kobj.ktype = &ip_hw_id_ktype;
1091 				res = kset_register(&ip_hw_id->hw_id_kset);
1092 				if (res) {
1093 					DRM_ERROR("Couldn't register ip_hw_id kset");
1094 					kfree(ip_hw_id);
1095 					return res;
1096 				}
1097 				if (hw_id_names[ii]) {
1098 					res = sysfs_create_link(&ip_die_entry->ip_kset.kobj,
1099 								&ip_hw_id->hw_id_kset.kobj,
1100 								hw_id_names[ii]);
1101 					if (res) {
1102 						DRM_ERROR("Couldn't create IP link %s in IP Die:%s\n",
1103 							  hw_id_names[ii],
1104 							  kobject_name(&ip_die_entry->ip_kset.kobj));
1105 					}
1106 				}
1107 			}
1108 
1109 			/* Now register its instance.
1110 			 */
1111 			ip_hw_instance = kzalloc(struct_size(ip_hw_instance,
1112 							     base_addr,
1113 							     ip->num_base_address),
1114 						 GFP_KERNEL);
1115 			if (!ip_hw_instance) {
1116 				DRM_ERROR("no memory for ip_hw_instance");
1117 				return -ENOMEM;
1118 			}
1119 			ip_hw_instance->hw_id = le16_to_cpu(ip->hw_id); /* == ii */
1120 			ip_hw_instance->num_instance = ip->instance_number;
1121 			ip_hw_instance->major = ip->major;
1122 			ip_hw_instance->minor = ip->minor;
1123 			ip_hw_instance->revision = ip->revision;
1124 			ip_hw_instance->harvest =
1125 				amdgpu_discovery_get_harvest_info(
1126 					adev, ip_hw_instance->hw_id,
1127 					ip_hw_instance->num_instance);
1128 			ip_hw_instance->num_base_addresses = ip->num_base_address;
1129 
1130 			for (kk = 0; kk < ip_hw_instance->num_base_addresses; kk++) {
1131 				if (reg_base_64)
1132 					ip_hw_instance->base_addr[kk] =
1133 						lower_32_bits(le64_to_cpu(ip->base_address_64[kk])) & 0x3FFFFFFF;
1134 				else
1135 					ip_hw_instance->base_addr[kk] = ip->base_address[kk];
1136 			}
1137 
1138 			kobject_init(&ip_hw_instance->kobj, &ip_hw_instance_ktype);
1139 			ip_hw_instance->kobj.kset = &ip_hw_id->hw_id_kset;
1140 			res = kobject_add(&ip_hw_instance->kobj, NULL,
1141 					  "%d", ip_hw_instance->num_instance);
1142 next_ip:
1143 			if (reg_base_64)
1144 				ip_offset += struct_size(ip, base_address_64,
1145 							 ip->num_base_address);
1146 			else
1147 				ip_offset += struct_size(ip, base_address,
1148 							 ip->num_base_address);
1149 		}
1150 	}
1151 
1152 	return 0;
1153 }
1154 
1155 static int amdgpu_discovery_sysfs_recurse(struct amdgpu_device *adev)
1156 {
1157 	struct binary_header *bhdr;
1158 	struct ip_discovery_header *ihdr;
1159 	struct die_header *dhdr;
1160 	struct kset *die_kset = &adev->ip_top->die_kset;
1161 	u16 num_dies, die_offset, num_ips;
1162 	size_t ip_offset;
1163 	int ii, res;
1164 
1165 	bhdr = (struct binary_header *)adev->mman.discovery_bin;
1166 	ihdr = (struct ip_discovery_header *)(adev->mman.discovery_bin +
1167 					      le16_to_cpu(bhdr->table_list[IP_DISCOVERY].offset));
1168 	num_dies = le16_to_cpu(ihdr->num_dies);
1169 
1170 	DRM_DEBUG("number of dies: %d\n", num_dies);
1171 
1172 	for (ii = 0; ii < num_dies; ii++) {
1173 		struct ip_die_entry *ip_die_entry;
1174 
1175 		die_offset = le16_to_cpu(ihdr->die_info[ii].die_offset);
1176 		dhdr = (struct die_header *)(adev->mman.discovery_bin + die_offset);
1177 		num_ips = le16_to_cpu(dhdr->num_ips);
1178 		ip_offset = die_offset + sizeof(*dhdr);
1179 
1180 		/* Add the die to the kset.
1181 		 *
1182 		 * dhdr->die_id == ii, which was checked in
1183 		 * amdgpu_discovery_reg_base_init().
1184 		 */
1185 
1186 		ip_die_entry = kzalloc(sizeof(*ip_die_entry), GFP_KERNEL);
1187 		if (!ip_die_entry)
1188 			return -ENOMEM;
1189 
1190 		ip_die_entry->num_ips = num_ips;
1191 
1192 		kobject_set_name(&ip_die_entry->ip_kset.kobj, "%d", le16_to_cpu(dhdr->die_id));
1193 		ip_die_entry->ip_kset.kobj.kset = die_kset;
1194 		ip_die_entry->ip_kset.kobj.ktype = &ip_die_entry_ktype;
1195 		res = kset_register(&ip_die_entry->ip_kset);
1196 		if (res) {
1197 			DRM_ERROR("Couldn't register ip_die_entry kset");
1198 			kfree(ip_die_entry);
1199 			return res;
1200 		}
1201 
1202 		amdgpu_discovery_sysfs_ips(adev, ip_die_entry, ip_offset, num_ips, !!ihdr->base_addr_64_bit);
1203 	}
1204 
1205 	return 0;
1206 }
1207 
1208 static int amdgpu_discovery_sysfs_init(struct amdgpu_device *adev)
1209 {
1210 	struct kset *die_kset;
1211 	int res, ii;
1212 
1213 	if (!adev->mman.discovery_bin)
1214 		return -EINVAL;
1215 
1216 	adev->ip_top = kzalloc(sizeof(*adev->ip_top), GFP_KERNEL);
1217 	if (!adev->ip_top)
1218 		return -ENOMEM;
1219 
1220 	adev->ip_top->adev = adev;
1221 
1222 	res = kobject_init_and_add(&adev->ip_top->kobj, &ip_discovery_ktype,
1223 				   &adev->dev->kobj, "ip_discovery");
1224 	if (res) {
1225 		DRM_ERROR("Couldn't init and add ip_discovery/");
1226 		goto Err;
1227 	}
1228 
1229 	die_kset = &adev->ip_top->die_kset;
1230 	kobject_set_name(&die_kset->kobj, "%s", "die");
1231 	die_kset->kobj.parent = &adev->ip_top->kobj;
1232 	die_kset->kobj.ktype = &die_kobj_ktype;
1233 	res = kset_register(&adev->ip_top->die_kset);
1234 	if (res) {
1235 		DRM_ERROR("Couldn't register die_kset");
1236 		goto Err;
1237 	}
1238 
1239 	for (ii = 0; ii < ARRAY_SIZE(ip_hw_attr); ii++)
1240 		ip_hw_instance_attrs[ii] = &ip_hw_attr[ii].attr;
1241 	ip_hw_instance_attrs[ii] = NULL;
1242 
1243 	res = amdgpu_discovery_sysfs_recurse(adev);
1244 
1245 	return res;
1246 Err:
1247 	kobject_put(&adev->ip_top->kobj);
1248 	return res;
1249 }
1250 
1251 /* -------------------------------------------------- */
1252 
1253 #define list_to_kobj(el) container_of(el, struct kobject, entry)
1254 
1255 static void amdgpu_discovery_sysfs_ip_hw_free(struct ip_hw_id *ip_hw_id)
1256 {
1257 	struct list_head *el, *tmp;
1258 	struct kset *hw_id_kset;
1259 
1260 	hw_id_kset = &ip_hw_id->hw_id_kset;
1261 	spin_lock(&hw_id_kset->list_lock);
1262 	list_for_each_prev_safe(el, tmp, &hw_id_kset->list) {
1263 		list_del_init(el);
1264 		spin_unlock(&hw_id_kset->list_lock);
1265 		/* kobject is embedded in ip_hw_instance */
1266 		kobject_put(list_to_kobj(el));
1267 		spin_lock(&hw_id_kset->list_lock);
1268 	}
1269 	spin_unlock(&hw_id_kset->list_lock);
1270 	kobject_put(&ip_hw_id->hw_id_kset.kobj);
1271 }
1272 
1273 static void amdgpu_discovery_sysfs_die_free(struct ip_die_entry *ip_die_entry)
1274 {
1275 	struct list_head *el, *tmp;
1276 	struct kset *ip_kset;
1277 
1278 	ip_kset = &ip_die_entry->ip_kset;
1279 	spin_lock(&ip_kset->list_lock);
1280 	list_for_each_prev_safe(el, tmp, &ip_kset->list) {
1281 		list_del_init(el);
1282 		spin_unlock(&ip_kset->list_lock);
1283 		amdgpu_discovery_sysfs_ip_hw_free(to_ip_hw_id(list_to_kobj(el)));
1284 		spin_lock(&ip_kset->list_lock);
1285 	}
1286 	spin_unlock(&ip_kset->list_lock);
1287 	kobject_put(&ip_die_entry->ip_kset.kobj);
1288 }
1289 
1290 static void amdgpu_discovery_sysfs_fini(struct amdgpu_device *adev)
1291 {
1292 	struct list_head *el, *tmp;
1293 	struct kset *die_kset;
1294 
1295 	die_kset = &adev->ip_top->die_kset;
1296 	spin_lock(&die_kset->list_lock);
1297 	list_for_each_prev_safe(el, tmp, &die_kset->list) {
1298 		list_del_init(el);
1299 		spin_unlock(&die_kset->list_lock);
1300 		amdgpu_discovery_sysfs_die_free(to_ip_die_entry(list_to_kobj(el)));
1301 		spin_lock(&die_kset->list_lock);
1302 	}
1303 	spin_unlock(&die_kset->list_lock);
1304 	kobject_put(&adev->ip_top->die_kset.kobj);
1305 	kobject_put(&adev->ip_top->kobj);
1306 }
1307 
1308 /* ================================================== */
1309 
1310 static int amdgpu_discovery_reg_base_init(struct amdgpu_device *adev)
1311 {
1312 	uint8_t num_base_address, subrev, variant;
1313 	struct binary_header *bhdr;
1314 	struct ip_discovery_header *ihdr;
1315 	struct die_header *dhdr;
1316 	struct ip_v4 *ip;
1317 	uint16_t die_offset;
1318 	uint16_t ip_offset;
1319 	uint16_t num_dies;
1320 	uint32_t wafl_ver;
1321 	uint16_t num_ips;
1322 	uint16_t hw_id;
1323 	uint8_t inst;
1324 	int hw_ip;
1325 	int i, j, k;
1326 	int r;
1327 
1328 	r = amdgpu_discovery_init(adev);
1329 	if (r) {
1330 		DRM_ERROR("amdgpu_discovery_init failed\n");
1331 		return r;
1332 	}
1333 
1334 	wafl_ver = 0;
1335 	adev->gfx.xcc_mask = 0;
1336 	adev->sdma.sdma_mask = 0;
1337 	adev->vcn.inst_mask = 0;
1338 	adev->jpeg.inst_mask = 0;
1339 	bhdr = (struct binary_header *)adev->mman.discovery_bin;
1340 	ihdr = (struct ip_discovery_header *)(adev->mman.discovery_bin +
1341 			le16_to_cpu(bhdr->table_list[IP_DISCOVERY].offset));
1342 	num_dies = le16_to_cpu(ihdr->num_dies);
1343 
1344 	DRM_DEBUG("number of dies: %d\n", num_dies);
1345 
1346 	for (i = 0; i < num_dies; i++) {
1347 		die_offset = le16_to_cpu(ihdr->die_info[i].die_offset);
1348 		dhdr = (struct die_header *)(adev->mman.discovery_bin + die_offset);
1349 		num_ips = le16_to_cpu(dhdr->num_ips);
1350 		ip_offset = die_offset + sizeof(*dhdr);
1351 
1352 		if (le16_to_cpu(dhdr->die_id) != i) {
1353 			DRM_ERROR("invalid die id %d, expected %d\n",
1354 					le16_to_cpu(dhdr->die_id), i);
1355 			return -EINVAL;
1356 		}
1357 
1358 		DRM_DEBUG("number of hardware IPs on die%d: %d\n",
1359 				le16_to_cpu(dhdr->die_id), num_ips);
1360 
1361 		for (j = 0; j < num_ips; j++) {
1362 			ip = (struct ip_v4 *)(adev->mman.discovery_bin + ip_offset);
1363 
1364 			inst = ip->instance_number;
1365 			hw_id = le16_to_cpu(ip->hw_id);
1366 			if (amdgpu_discovery_validate_ip(adev, inst, hw_id))
1367 				goto next_ip;
1368 
1369 			num_base_address = ip->num_base_address;
1370 
1371 			DRM_DEBUG("%s(%d) #%d v%d.%d.%d:\n",
1372 				  hw_id_names[le16_to_cpu(ip->hw_id)],
1373 				  le16_to_cpu(ip->hw_id),
1374 				  ip->instance_number,
1375 				  ip->major, ip->minor,
1376 				  ip->revision);
1377 
1378 			if (le16_to_cpu(ip->hw_id) == VCN_HWID) {
1379 				/* Bit [5:0]: original revision value
1380 				 * Bit [7:6]: en/decode capability:
1381 				 *     0b00 : VCN function normally
1382 				 *     0b10 : encode is disabled
1383 				 *     0b01 : decode is disabled
1384 				 */
1385 				if (adev->vcn.num_vcn_inst <
1386 				    AMDGPU_MAX_VCN_INSTANCES) {
1387 					adev->vcn.inst[adev->vcn.num_vcn_inst].vcn_config =
1388 						ip->revision & 0xc0;
1389 					adev->vcn.num_vcn_inst++;
1390 					adev->vcn.inst_mask |=
1391 						(1U << ip->instance_number);
1392 					adev->jpeg.inst_mask |=
1393 						(1U << ip->instance_number);
1394 				} else {
1395 					dev_err(adev->dev, "Too many VCN instances: %d vs %d\n",
1396 						adev->vcn.num_vcn_inst + 1,
1397 						AMDGPU_MAX_VCN_INSTANCES);
1398 				}
1399 				ip->revision &= ~0xc0;
1400 			}
1401 			if (le16_to_cpu(ip->hw_id) == SDMA0_HWID ||
1402 			    le16_to_cpu(ip->hw_id) == SDMA1_HWID ||
1403 			    le16_to_cpu(ip->hw_id) == SDMA2_HWID ||
1404 			    le16_to_cpu(ip->hw_id) == SDMA3_HWID) {
1405 				if (adev->sdma.num_instances <
1406 				    AMDGPU_MAX_SDMA_INSTANCES) {
1407 					adev->sdma.num_instances++;
1408 					adev->sdma.sdma_mask |=
1409 						(1U << ip->instance_number);
1410 				} else {
1411 					dev_err(adev->dev, "Too many SDMA instances: %d vs %d\n",
1412 						adev->sdma.num_instances + 1,
1413 						AMDGPU_MAX_SDMA_INSTANCES);
1414 				}
1415 			}
1416 
1417 			if (le16_to_cpu(ip->hw_id) == VPE_HWID) {
1418 				if (adev->vpe.num_instances < AMDGPU_MAX_VPE_INSTANCES)
1419 					adev->vpe.num_instances++;
1420 				else
1421 					dev_err(adev->dev, "Too many VPE instances: %d vs %d\n",
1422 						adev->vpe.num_instances + 1,
1423 						AMDGPU_MAX_VPE_INSTANCES);
1424 			}
1425 
1426 			if (le16_to_cpu(ip->hw_id) == UMC_HWID) {
1427 				adev->gmc.num_umc++;
1428 				adev->umc.node_inst_num++;
1429 			}
1430 
1431 			if (le16_to_cpu(ip->hw_id) == GC_HWID)
1432 				adev->gfx.xcc_mask |=
1433 					(1U << ip->instance_number);
1434 
1435 			if (!wafl_ver && le16_to_cpu(ip->hw_id) == WAFLC_HWID)
1436 				wafl_ver = IP_VERSION_FULL(ip->major, ip->minor,
1437 							   ip->revision, 0, 0);
1438 
1439 			for (k = 0; k < num_base_address; k++) {
1440 				/*
1441 				 * convert the endianness of base addresses in place,
1442 				 * so that we don't need to convert them when accessing adev->reg_offset.
1443 				 */
1444 				if (ihdr->base_addr_64_bit)
1445 					/* Truncate the 64bit base address from ip discovery
1446 					 * and only store lower 32bit ip base in reg_offset[].
1447 					 * Bits > 32 follows ASIC specific format, thus just
1448 					 * discard them and handle it within specific ASIC.
1449 					 * By this way reg_offset[] and related helpers can
1450 					 * stay unchanged.
1451 					 * The base address is in dwords, thus clear the
1452 					 * highest 2 bits to store.
1453 					 */
1454 					ip->base_address[k] =
1455 						lower_32_bits(le64_to_cpu(ip->base_address_64[k])) & 0x3FFFFFFF;
1456 				else
1457 					ip->base_address[k] = le32_to_cpu(ip->base_address[k]);
1458 				DRM_DEBUG("\t0x%08x\n", ip->base_address[k]);
1459 			}
1460 
1461 			for (hw_ip = 0; hw_ip < MAX_HWIP; hw_ip++) {
1462 				if (hw_id_map[hw_ip] == le16_to_cpu(ip->hw_id) &&
1463 				    hw_id_map[hw_ip] != 0) {
1464 					DRM_DEBUG("set register base offset for %s\n",
1465 							hw_id_names[le16_to_cpu(ip->hw_id)]);
1466 					adev->reg_offset[hw_ip][ip->instance_number] =
1467 						ip->base_address;
1468 					/* Instance support is somewhat inconsistent.
1469 					 * SDMA is a good example.  Sienna cichlid has 4 total
1470 					 * SDMA instances, each enumerated separately (HWIDs
1471 					 * 42, 43, 68, 69).  Arcturus has 8 total SDMA instances,
1472 					 * but they are enumerated as multiple instances of the
1473 					 * same HWIDs (4x HWID 42, 4x HWID 43).  UMC is another
1474 					 * example.  On most chips there are multiple instances
1475 					 * with the same HWID.
1476 					 */
1477 
1478 					if (ihdr->version < 3) {
1479 						subrev = 0;
1480 						variant = 0;
1481 					} else {
1482 						subrev = ip->sub_revision;
1483 						variant = ip->variant;
1484 					}
1485 
1486 					adev->ip_versions[hw_ip]
1487 							 [ip->instance_number] =
1488 						IP_VERSION_FULL(ip->major,
1489 								ip->minor,
1490 								ip->revision,
1491 								variant,
1492 								subrev);
1493 				}
1494 			}
1495 
1496 next_ip:
1497 			if (ihdr->base_addr_64_bit)
1498 				ip_offset += struct_size(ip, base_address_64, ip->num_base_address);
1499 			else
1500 				ip_offset += struct_size(ip, base_address, ip->num_base_address);
1501 		}
1502 	}
1503 
1504 	if (wafl_ver && !adev->ip_versions[XGMI_HWIP][0])
1505 		adev->ip_versions[XGMI_HWIP][0] = wafl_ver;
1506 
1507 	return 0;
1508 }
1509 
1510 static void amdgpu_discovery_harvest_ip(struct amdgpu_device *adev)
1511 {
1512 	struct ip_discovery_header *ihdr;
1513 	struct binary_header *bhdr;
1514 	int vcn_harvest_count = 0;
1515 	int umc_harvest_count = 0;
1516 	uint16_t offset, ihdr_ver;
1517 
1518 	bhdr = (struct binary_header *)adev->mman.discovery_bin;
1519 	offset = le16_to_cpu(bhdr->table_list[IP_DISCOVERY].offset);
1520 	ihdr = (struct ip_discovery_header *)(adev->mman.discovery_bin +
1521 					      offset);
1522 	ihdr_ver = le16_to_cpu(ihdr->version);
1523 	/*
1524 	 * Harvest table does not fit Navi1x and legacy GPUs,
1525 	 * so read harvest bit per IP data structure to set
1526 	 * harvest configuration.
1527 	 */
1528 	if (amdgpu_ip_version(adev, GC_HWIP, 0) < IP_VERSION(10, 2, 0) &&
1529 	    ihdr_ver <= 2) {
1530 		if ((adev->pdev->device == 0x731E &&
1531 			(adev->pdev->revision == 0xC6 ||
1532 			 adev->pdev->revision == 0xC7)) ||
1533 			(adev->pdev->device == 0x7340 &&
1534 			 adev->pdev->revision == 0xC9) ||
1535 			(adev->pdev->device == 0x7360 &&
1536 			 adev->pdev->revision == 0xC7))
1537 			amdgpu_discovery_read_harvest_bit_per_ip(adev,
1538 				&vcn_harvest_count);
1539 	} else {
1540 		amdgpu_discovery_read_from_harvest_table(adev,
1541 							 &vcn_harvest_count,
1542 							 &umc_harvest_count);
1543 	}
1544 
1545 	amdgpu_discovery_harvest_config_quirk(adev);
1546 
1547 	if (vcn_harvest_count == adev->vcn.num_vcn_inst) {
1548 		adev->harvest_ip_mask |= AMD_HARVEST_IP_VCN_MASK;
1549 		adev->harvest_ip_mask |= AMD_HARVEST_IP_JPEG_MASK;
1550 	}
1551 
1552 	if (umc_harvest_count < adev->gmc.num_umc) {
1553 		adev->gmc.num_umc -= umc_harvest_count;
1554 	}
1555 }
1556 
1557 union gc_info {
1558 	struct gc_info_v1_0 v1;
1559 	struct gc_info_v1_1 v1_1;
1560 	struct gc_info_v1_2 v1_2;
1561 	struct gc_info_v1_3 v1_3;
1562 	struct gc_info_v2_0 v2;
1563 	struct gc_info_v2_1 v2_1;
1564 };
1565 
1566 static int amdgpu_discovery_get_gfx_info(struct amdgpu_device *adev)
1567 {
1568 	struct binary_header *bhdr;
1569 	union gc_info *gc_info;
1570 	u16 offset;
1571 
1572 	if (!adev->mman.discovery_bin) {
1573 		DRM_ERROR("ip discovery uninitialized\n");
1574 		return -EINVAL;
1575 	}
1576 
1577 	bhdr = (struct binary_header *)adev->mman.discovery_bin;
1578 	offset = le16_to_cpu(bhdr->table_list[GC].offset);
1579 
1580 	if (!offset)
1581 		return 0;
1582 
1583 	gc_info = (union gc_info *)(adev->mman.discovery_bin + offset);
1584 
1585 	switch (le16_to_cpu(gc_info->v1.header.version_major)) {
1586 	case 1:
1587 		adev->gfx.config.max_shader_engines = le32_to_cpu(gc_info->v1.gc_num_se);
1588 		adev->gfx.config.max_cu_per_sh = 2 * (le32_to_cpu(gc_info->v1.gc_num_wgp0_per_sa) +
1589 						      le32_to_cpu(gc_info->v1.gc_num_wgp1_per_sa));
1590 		adev->gfx.config.max_sh_per_se = le32_to_cpu(gc_info->v1.gc_num_sa_per_se);
1591 		adev->gfx.config.max_backends_per_se = le32_to_cpu(gc_info->v1.gc_num_rb_per_se);
1592 		adev->gfx.config.max_texture_channel_caches = le32_to_cpu(gc_info->v1.gc_num_gl2c);
1593 		adev->gfx.config.max_gprs = le32_to_cpu(gc_info->v1.gc_num_gprs);
1594 		adev->gfx.config.max_gs_threads = le32_to_cpu(gc_info->v1.gc_num_max_gs_thds);
1595 		adev->gfx.config.gs_vgt_table_depth = le32_to_cpu(gc_info->v1.gc_gs_table_depth);
1596 		adev->gfx.config.gs_prim_buffer_depth = le32_to_cpu(gc_info->v1.gc_gsprim_buff_depth);
1597 		adev->gfx.config.double_offchip_lds_buf = le32_to_cpu(gc_info->v1.gc_double_offchip_lds_buffer);
1598 		adev->gfx.cu_info.wave_front_size = le32_to_cpu(gc_info->v1.gc_wave_size);
1599 		adev->gfx.cu_info.max_waves_per_simd = le32_to_cpu(gc_info->v1.gc_max_waves_per_simd);
1600 		adev->gfx.cu_info.max_scratch_slots_per_cu = le32_to_cpu(gc_info->v1.gc_max_scratch_slots_per_cu);
1601 		adev->gfx.cu_info.lds_size = le32_to_cpu(gc_info->v1.gc_lds_size);
1602 		adev->gfx.config.num_sc_per_sh = le32_to_cpu(gc_info->v1.gc_num_sc_per_se) /
1603 			le32_to_cpu(gc_info->v1.gc_num_sa_per_se);
1604 		adev->gfx.config.num_packer_per_sc = le32_to_cpu(gc_info->v1.gc_num_packer_per_sc);
1605 		if (le16_to_cpu(gc_info->v1.header.version_minor) >= 1) {
1606 			adev->gfx.config.gc_num_tcp_per_sa = le32_to_cpu(gc_info->v1_1.gc_num_tcp_per_sa);
1607 			adev->gfx.config.gc_num_sdp_interface = le32_to_cpu(gc_info->v1_1.gc_num_sdp_interface);
1608 			adev->gfx.config.gc_num_tcps = le32_to_cpu(gc_info->v1_1.gc_num_tcps);
1609 		}
1610 		if (le16_to_cpu(gc_info->v1.header.version_minor) >= 2) {
1611 			adev->gfx.config.gc_num_tcp_per_wpg = le32_to_cpu(gc_info->v1_2.gc_num_tcp_per_wpg);
1612 			adev->gfx.config.gc_tcp_l1_size = le32_to_cpu(gc_info->v1_2.gc_tcp_l1_size);
1613 			adev->gfx.config.gc_num_sqc_per_wgp = le32_to_cpu(gc_info->v1_2.gc_num_sqc_per_wgp);
1614 			adev->gfx.config.gc_l1_instruction_cache_size_per_sqc = le32_to_cpu(gc_info->v1_2.gc_l1_instruction_cache_size_per_sqc);
1615 			adev->gfx.config.gc_l1_data_cache_size_per_sqc = le32_to_cpu(gc_info->v1_2.gc_l1_data_cache_size_per_sqc);
1616 			adev->gfx.config.gc_gl1c_per_sa = le32_to_cpu(gc_info->v1_2.gc_gl1c_per_sa);
1617 			adev->gfx.config.gc_gl1c_size_per_instance = le32_to_cpu(gc_info->v1_2.gc_gl1c_size_per_instance);
1618 			adev->gfx.config.gc_gl2c_per_gpu = le32_to_cpu(gc_info->v1_2.gc_gl2c_per_gpu);
1619 		}
1620 		if (le16_to_cpu(gc_info->v1.header.version_minor) >= 3) {
1621 			adev->gfx.config.gc_tcp_size_per_cu = le32_to_cpu(gc_info->v1_3.gc_tcp_size_per_cu);
1622 			adev->gfx.config.gc_tcp_cache_line_size = le32_to_cpu(gc_info->v1_3.gc_tcp_cache_line_size);
1623 			adev->gfx.config.gc_instruction_cache_size_per_sqc = le32_to_cpu(gc_info->v1_3.gc_instruction_cache_size_per_sqc);
1624 			adev->gfx.config.gc_instruction_cache_line_size = le32_to_cpu(gc_info->v1_3.gc_instruction_cache_line_size);
1625 			adev->gfx.config.gc_scalar_data_cache_size_per_sqc = le32_to_cpu(gc_info->v1_3.gc_scalar_data_cache_size_per_sqc);
1626 			adev->gfx.config.gc_scalar_data_cache_line_size = le32_to_cpu(gc_info->v1_3.gc_scalar_data_cache_line_size);
1627 			adev->gfx.config.gc_tcc_size = le32_to_cpu(gc_info->v1_3.gc_tcc_size);
1628 			adev->gfx.config.gc_tcc_cache_line_size = le32_to_cpu(gc_info->v1_3.gc_tcc_cache_line_size);
1629 		}
1630 		break;
1631 	case 2:
1632 		adev->gfx.config.max_shader_engines = le32_to_cpu(gc_info->v2.gc_num_se);
1633 		adev->gfx.config.max_cu_per_sh = le32_to_cpu(gc_info->v2.gc_num_cu_per_sh);
1634 		adev->gfx.config.max_sh_per_se = le32_to_cpu(gc_info->v2.gc_num_sh_per_se);
1635 		adev->gfx.config.max_backends_per_se = le32_to_cpu(gc_info->v2.gc_num_rb_per_se);
1636 		adev->gfx.config.max_texture_channel_caches = le32_to_cpu(gc_info->v2.gc_num_tccs);
1637 		adev->gfx.config.max_gprs = le32_to_cpu(gc_info->v2.gc_num_gprs);
1638 		adev->gfx.config.max_gs_threads = le32_to_cpu(gc_info->v2.gc_num_max_gs_thds);
1639 		adev->gfx.config.gs_vgt_table_depth = le32_to_cpu(gc_info->v2.gc_gs_table_depth);
1640 		adev->gfx.config.gs_prim_buffer_depth = le32_to_cpu(gc_info->v2.gc_gsprim_buff_depth);
1641 		adev->gfx.config.double_offchip_lds_buf = le32_to_cpu(gc_info->v2.gc_double_offchip_lds_buffer);
1642 		adev->gfx.cu_info.wave_front_size = le32_to_cpu(gc_info->v2.gc_wave_size);
1643 		adev->gfx.cu_info.max_waves_per_simd = le32_to_cpu(gc_info->v2.gc_max_waves_per_simd);
1644 		adev->gfx.cu_info.max_scratch_slots_per_cu = le32_to_cpu(gc_info->v2.gc_max_scratch_slots_per_cu);
1645 		adev->gfx.cu_info.lds_size = le32_to_cpu(gc_info->v2.gc_lds_size);
1646 		adev->gfx.config.num_sc_per_sh = le32_to_cpu(gc_info->v2.gc_num_sc_per_se) /
1647 			le32_to_cpu(gc_info->v2.gc_num_sh_per_se);
1648 		adev->gfx.config.num_packer_per_sc = le32_to_cpu(gc_info->v2.gc_num_packer_per_sc);
1649 		if (le16_to_cpu(gc_info->v2.header.version_minor) == 1) {
1650 			adev->gfx.config.gc_num_tcp_per_sa = le32_to_cpu(gc_info->v2_1.gc_num_tcp_per_sh);
1651 			adev->gfx.config.gc_tcp_size_per_cu = le32_to_cpu(gc_info->v2_1.gc_tcp_size_per_cu);
1652 			adev->gfx.config.gc_num_sdp_interface = le32_to_cpu(gc_info->v2_1.gc_num_sdp_interface); /* per XCD */
1653 			adev->gfx.config.gc_num_cu_per_sqc = le32_to_cpu(gc_info->v2_1.gc_num_cu_per_sqc);
1654 			adev->gfx.config.gc_l1_instruction_cache_size_per_sqc = le32_to_cpu(gc_info->v2_1.gc_instruction_cache_size_per_sqc);
1655 			adev->gfx.config.gc_l1_data_cache_size_per_sqc = le32_to_cpu(gc_info->v2_1.gc_scalar_data_cache_size_per_sqc);
1656 			adev->gfx.config.gc_tcc_size = le32_to_cpu(gc_info->v2_1.gc_tcc_size); /* per XCD */
1657 		}
1658 		break;
1659 	default:
1660 		dev_err(adev->dev,
1661 			"Unhandled GC info table %d.%d\n",
1662 			le16_to_cpu(gc_info->v1.header.version_major),
1663 			le16_to_cpu(gc_info->v1.header.version_minor));
1664 		return -EINVAL;
1665 	}
1666 	return 0;
1667 }
1668 
1669 union mall_info {
1670 	struct mall_info_v1_0 v1;
1671 	struct mall_info_v2_0 v2;
1672 };
1673 
1674 static int amdgpu_discovery_get_mall_info(struct amdgpu_device *adev)
1675 {
1676 	struct binary_header *bhdr;
1677 	union mall_info *mall_info;
1678 	u32 u, mall_size_per_umc, m_s_present, half_use;
1679 	u64 mall_size;
1680 	u16 offset;
1681 
1682 	if (!adev->mman.discovery_bin) {
1683 		DRM_ERROR("ip discovery uninitialized\n");
1684 		return -EINVAL;
1685 	}
1686 
1687 	bhdr = (struct binary_header *)adev->mman.discovery_bin;
1688 	offset = le16_to_cpu(bhdr->table_list[MALL_INFO].offset);
1689 
1690 	if (!offset)
1691 		return 0;
1692 
1693 	mall_info = (union mall_info *)(adev->mman.discovery_bin + offset);
1694 
1695 	switch (le16_to_cpu(mall_info->v1.header.version_major)) {
1696 	case 1:
1697 		mall_size = 0;
1698 		mall_size_per_umc = le32_to_cpu(mall_info->v1.mall_size_per_m);
1699 		m_s_present = le32_to_cpu(mall_info->v1.m_s_present);
1700 		half_use = le32_to_cpu(mall_info->v1.m_half_use);
1701 		for (u = 0; u < adev->gmc.num_umc; u++) {
1702 			if (m_s_present & (1 << u))
1703 				mall_size += mall_size_per_umc * 2;
1704 			else if (half_use & (1 << u))
1705 				mall_size += mall_size_per_umc / 2;
1706 			else
1707 				mall_size += mall_size_per_umc;
1708 		}
1709 		adev->gmc.mall_size = mall_size;
1710 		adev->gmc.m_half_use = half_use;
1711 		break;
1712 	case 2:
1713 		mall_size_per_umc = le32_to_cpu(mall_info->v2.mall_size_per_umc);
1714 		adev->gmc.mall_size = (uint64_t)mall_size_per_umc * adev->gmc.num_umc;
1715 		break;
1716 	default:
1717 		dev_err(adev->dev,
1718 			"Unhandled MALL info table %d.%d\n",
1719 			le16_to_cpu(mall_info->v1.header.version_major),
1720 			le16_to_cpu(mall_info->v1.header.version_minor));
1721 		return -EINVAL;
1722 	}
1723 	return 0;
1724 }
1725 
1726 union vcn_info {
1727 	struct vcn_info_v1_0 v1;
1728 };
1729 
1730 static int amdgpu_discovery_get_vcn_info(struct amdgpu_device *adev)
1731 {
1732 	struct binary_header *bhdr;
1733 	union vcn_info *vcn_info;
1734 	u16 offset;
1735 	int v;
1736 
1737 	if (!adev->mman.discovery_bin) {
1738 		DRM_ERROR("ip discovery uninitialized\n");
1739 		return -EINVAL;
1740 	}
1741 
1742 	/* num_vcn_inst is currently limited to AMDGPU_MAX_VCN_INSTANCES
1743 	 * which is smaller than VCN_INFO_TABLE_MAX_NUM_INSTANCES
1744 	 * but that may change in the future with new GPUs so keep this
1745 	 * check for defensive purposes.
1746 	 */
1747 	if (adev->vcn.num_vcn_inst > VCN_INFO_TABLE_MAX_NUM_INSTANCES) {
1748 		dev_err(adev->dev, "invalid vcn instances\n");
1749 		return -EINVAL;
1750 	}
1751 
1752 	bhdr = (struct binary_header *)adev->mman.discovery_bin;
1753 	offset = le16_to_cpu(bhdr->table_list[VCN_INFO].offset);
1754 
1755 	if (!offset)
1756 		return 0;
1757 
1758 	vcn_info = (union vcn_info *)(adev->mman.discovery_bin + offset);
1759 
1760 	switch (le16_to_cpu(vcn_info->v1.header.version_major)) {
1761 	case 1:
1762 		/* num_vcn_inst is currently limited to AMDGPU_MAX_VCN_INSTANCES
1763 		 * so this won't overflow.
1764 		 */
1765 		for (v = 0; v < adev->vcn.num_vcn_inst; v++) {
1766 			adev->vcn.inst[v].vcn_codec_disable_mask =
1767 				le32_to_cpu(vcn_info->v1.instance_info[v].fuse_data.all_bits);
1768 		}
1769 		break;
1770 	default:
1771 		dev_err(adev->dev,
1772 			"Unhandled VCN info table %d.%d\n",
1773 			le16_to_cpu(vcn_info->v1.header.version_major),
1774 			le16_to_cpu(vcn_info->v1.header.version_minor));
1775 		return -EINVAL;
1776 	}
1777 	return 0;
1778 }
1779 
1780 union nps_info {
1781 	struct nps_info_v1_0 v1;
1782 };
1783 
1784 static int amdgpu_discovery_refresh_nps_info(struct amdgpu_device *adev,
1785 					     union nps_info *nps_data)
1786 {
1787 	uint64_t vram_size, pos, offset;
1788 	struct nps_info_header *nhdr;
1789 	struct binary_header bhdr;
1790 	uint16_t checksum;
1791 
1792 	vram_size = (uint64_t)RREG32(mmRCC_CONFIG_MEMSIZE) << 20;
1793 	pos = vram_size - DISCOVERY_TMR_OFFSET;
1794 	amdgpu_device_vram_access(adev, pos, &bhdr, sizeof(bhdr), false);
1795 
1796 	offset = le16_to_cpu(bhdr.table_list[NPS_INFO].offset);
1797 	checksum = le16_to_cpu(bhdr.table_list[NPS_INFO].checksum);
1798 
1799 	amdgpu_device_vram_access(adev, (pos + offset), nps_data,
1800 				  sizeof(*nps_data), false);
1801 
1802 	nhdr = (struct nps_info_header *)(nps_data);
1803 	if (!amdgpu_discovery_verify_checksum((uint8_t *)nps_data,
1804 					      le32_to_cpu(nhdr->size_bytes),
1805 					      checksum)) {
1806 		dev_err(adev->dev, "nps data refresh, checksum mismatch\n");
1807 		return -EINVAL;
1808 	}
1809 
1810 	return 0;
1811 }
1812 
1813 int amdgpu_discovery_get_nps_info(struct amdgpu_device *adev,
1814 				  uint32_t *nps_type,
1815 				  struct amdgpu_gmc_memrange **ranges,
1816 				  int *range_cnt, bool refresh)
1817 {
1818 	struct amdgpu_gmc_memrange *mem_ranges;
1819 	struct binary_header *bhdr;
1820 	union nps_info *nps_info;
1821 	union nps_info nps_data;
1822 	u16 offset;
1823 	int i, r;
1824 
1825 	if (!nps_type || !range_cnt || !ranges)
1826 		return -EINVAL;
1827 
1828 	if (refresh) {
1829 		r = amdgpu_discovery_refresh_nps_info(adev, &nps_data);
1830 		if (r)
1831 			return r;
1832 		nps_info = &nps_data;
1833 	} else {
1834 		if (!adev->mman.discovery_bin) {
1835 			dev_err(adev->dev,
1836 				"fetch mem range failed, ip discovery uninitialized\n");
1837 			return -EINVAL;
1838 		}
1839 
1840 		bhdr = (struct binary_header *)adev->mman.discovery_bin;
1841 		offset = le16_to_cpu(bhdr->table_list[NPS_INFO].offset);
1842 
1843 		if (!offset)
1844 			return -ENOENT;
1845 
1846 		/* If verification fails, return as if NPS table doesn't exist */
1847 		if (amdgpu_discovery_verify_npsinfo(adev, bhdr))
1848 			return -ENOENT;
1849 
1850 		nps_info =
1851 			(union nps_info *)(adev->mman.discovery_bin + offset);
1852 	}
1853 
1854 	switch (le16_to_cpu(nps_info->v1.header.version_major)) {
1855 	case 1:
1856 		mem_ranges = kvcalloc(nps_info->v1.count,
1857 				      sizeof(*mem_ranges),
1858 				      GFP_KERNEL);
1859 		if (!mem_ranges)
1860 			return -ENOMEM;
1861 		*nps_type = nps_info->v1.nps_type;
1862 		*range_cnt = nps_info->v1.count;
1863 		for (i = 0; i < *range_cnt; i++) {
1864 			mem_ranges[i].base_address =
1865 				nps_info->v1.instance_info[i].base_address;
1866 			mem_ranges[i].limit_address =
1867 				nps_info->v1.instance_info[i].limit_address;
1868 			mem_ranges[i].nid_mask = -1;
1869 			mem_ranges[i].flags = 0;
1870 		}
1871 		*ranges = mem_ranges;
1872 		break;
1873 	default:
1874 		dev_err(adev->dev, "Unhandled NPS info table %d.%d\n",
1875 			le16_to_cpu(nps_info->v1.header.version_major),
1876 			le16_to_cpu(nps_info->v1.header.version_minor));
1877 		return -EINVAL;
1878 	}
1879 
1880 	return 0;
1881 }
1882 
1883 static int amdgpu_discovery_set_common_ip_blocks(struct amdgpu_device *adev)
1884 {
1885 	/* what IP to use for this? */
1886 	switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
1887 	case IP_VERSION(9, 0, 1):
1888 	case IP_VERSION(9, 1, 0):
1889 	case IP_VERSION(9, 2, 1):
1890 	case IP_VERSION(9, 2, 2):
1891 	case IP_VERSION(9, 3, 0):
1892 	case IP_VERSION(9, 4, 0):
1893 	case IP_VERSION(9, 4, 1):
1894 	case IP_VERSION(9, 4, 2):
1895 	case IP_VERSION(9, 4, 3):
1896 	case IP_VERSION(9, 4, 4):
1897 	case IP_VERSION(9, 5, 0):
1898 		amdgpu_device_ip_block_add(adev, &vega10_common_ip_block);
1899 		break;
1900 	case IP_VERSION(10, 1, 10):
1901 	case IP_VERSION(10, 1, 1):
1902 	case IP_VERSION(10, 1, 2):
1903 	case IP_VERSION(10, 1, 3):
1904 	case IP_VERSION(10, 1, 4):
1905 	case IP_VERSION(10, 3, 0):
1906 	case IP_VERSION(10, 3, 1):
1907 	case IP_VERSION(10, 3, 2):
1908 	case IP_VERSION(10, 3, 3):
1909 	case IP_VERSION(10, 3, 4):
1910 	case IP_VERSION(10, 3, 5):
1911 	case IP_VERSION(10, 3, 6):
1912 	case IP_VERSION(10, 3, 7):
1913 		amdgpu_device_ip_block_add(adev, &nv_common_ip_block);
1914 		break;
1915 	case IP_VERSION(11, 0, 0):
1916 	case IP_VERSION(11, 0, 1):
1917 	case IP_VERSION(11, 0, 2):
1918 	case IP_VERSION(11, 0, 3):
1919 	case IP_VERSION(11, 0, 4):
1920 	case IP_VERSION(11, 5, 0):
1921 	case IP_VERSION(11, 5, 1):
1922 	case IP_VERSION(11, 5, 2):
1923 	case IP_VERSION(11, 5, 3):
1924 		amdgpu_device_ip_block_add(adev, &soc21_common_ip_block);
1925 		break;
1926 	case IP_VERSION(12, 0, 0):
1927 	case IP_VERSION(12, 0, 1):
1928 		amdgpu_device_ip_block_add(adev, &soc24_common_ip_block);
1929 		break;
1930 	default:
1931 		dev_err(adev->dev,
1932 			"Failed to add common ip block(GC_HWIP:0x%x)\n",
1933 			amdgpu_ip_version(adev, GC_HWIP, 0));
1934 		return -EINVAL;
1935 	}
1936 	return 0;
1937 }
1938 
1939 static int amdgpu_discovery_set_gmc_ip_blocks(struct amdgpu_device *adev)
1940 {
1941 	/* use GC or MMHUB IP version */
1942 	switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
1943 	case IP_VERSION(9, 0, 1):
1944 	case IP_VERSION(9, 1, 0):
1945 	case IP_VERSION(9, 2, 1):
1946 	case IP_VERSION(9, 2, 2):
1947 	case IP_VERSION(9, 3, 0):
1948 	case IP_VERSION(9, 4, 0):
1949 	case IP_VERSION(9, 4, 1):
1950 	case IP_VERSION(9, 4, 2):
1951 	case IP_VERSION(9, 4, 3):
1952 	case IP_VERSION(9, 4, 4):
1953 	case IP_VERSION(9, 5, 0):
1954 		amdgpu_device_ip_block_add(adev, &gmc_v9_0_ip_block);
1955 		break;
1956 	case IP_VERSION(10, 1, 10):
1957 	case IP_VERSION(10, 1, 1):
1958 	case IP_VERSION(10, 1, 2):
1959 	case IP_VERSION(10, 1, 3):
1960 	case IP_VERSION(10, 1, 4):
1961 	case IP_VERSION(10, 3, 0):
1962 	case IP_VERSION(10, 3, 1):
1963 	case IP_VERSION(10, 3, 2):
1964 	case IP_VERSION(10, 3, 3):
1965 	case IP_VERSION(10, 3, 4):
1966 	case IP_VERSION(10, 3, 5):
1967 	case IP_VERSION(10, 3, 6):
1968 	case IP_VERSION(10, 3, 7):
1969 		amdgpu_device_ip_block_add(adev, &gmc_v10_0_ip_block);
1970 		break;
1971 	case IP_VERSION(11, 0, 0):
1972 	case IP_VERSION(11, 0, 1):
1973 	case IP_VERSION(11, 0, 2):
1974 	case IP_VERSION(11, 0, 3):
1975 	case IP_VERSION(11, 0, 4):
1976 	case IP_VERSION(11, 5, 0):
1977 	case IP_VERSION(11, 5, 1):
1978 	case IP_VERSION(11, 5, 2):
1979 	case IP_VERSION(11, 5, 3):
1980 		amdgpu_device_ip_block_add(adev, &gmc_v11_0_ip_block);
1981 		break;
1982 	case IP_VERSION(12, 0, 0):
1983 	case IP_VERSION(12, 0, 1):
1984 		amdgpu_device_ip_block_add(adev, &gmc_v12_0_ip_block);
1985 		break;
1986 	default:
1987 		dev_err(adev->dev, "Failed to add gmc ip block(GC_HWIP:0x%x)\n",
1988 			amdgpu_ip_version(adev, GC_HWIP, 0));
1989 		return -EINVAL;
1990 	}
1991 	return 0;
1992 }
1993 
1994 static int amdgpu_discovery_set_ih_ip_blocks(struct amdgpu_device *adev)
1995 {
1996 	switch (amdgpu_ip_version(adev, OSSSYS_HWIP, 0)) {
1997 	case IP_VERSION(4, 0, 0):
1998 	case IP_VERSION(4, 0, 1):
1999 	case IP_VERSION(4, 1, 0):
2000 	case IP_VERSION(4, 1, 1):
2001 	case IP_VERSION(4, 3, 0):
2002 		amdgpu_device_ip_block_add(adev, &vega10_ih_ip_block);
2003 		break;
2004 	case IP_VERSION(4, 2, 0):
2005 	case IP_VERSION(4, 2, 1):
2006 	case IP_VERSION(4, 4, 0):
2007 	case IP_VERSION(4, 4, 2):
2008 	case IP_VERSION(4, 4, 5):
2009 		amdgpu_device_ip_block_add(adev, &vega20_ih_ip_block);
2010 		break;
2011 	case IP_VERSION(5, 0, 0):
2012 	case IP_VERSION(5, 0, 1):
2013 	case IP_VERSION(5, 0, 2):
2014 	case IP_VERSION(5, 0, 3):
2015 	case IP_VERSION(5, 2, 0):
2016 	case IP_VERSION(5, 2, 1):
2017 		amdgpu_device_ip_block_add(adev, &navi10_ih_ip_block);
2018 		break;
2019 	case IP_VERSION(6, 0, 0):
2020 	case IP_VERSION(6, 0, 1):
2021 	case IP_VERSION(6, 0, 2):
2022 		amdgpu_device_ip_block_add(adev, &ih_v6_0_ip_block);
2023 		break;
2024 	case IP_VERSION(6, 1, 0):
2025 		amdgpu_device_ip_block_add(adev, &ih_v6_1_ip_block);
2026 		break;
2027 	case IP_VERSION(7, 0, 0):
2028 		amdgpu_device_ip_block_add(adev, &ih_v7_0_ip_block);
2029 		break;
2030 	default:
2031 		dev_err(adev->dev,
2032 			"Failed to add ih ip block(OSSSYS_HWIP:0x%x)\n",
2033 			amdgpu_ip_version(adev, OSSSYS_HWIP, 0));
2034 		return -EINVAL;
2035 	}
2036 	return 0;
2037 }
2038 
2039 static int amdgpu_discovery_set_psp_ip_blocks(struct amdgpu_device *adev)
2040 {
2041 	switch (amdgpu_ip_version(adev, MP0_HWIP, 0)) {
2042 	case IP_VERSION(9, 0, 0):
2043 		amdgpu_device_ip_block_add(adev, &psp_v3_1_ip_block);
2044 		break;
2045 	case IP_VERSION(10, 0, 0):
2046 	case IP_VERSION(10, 0, 1):
2047 		amdgpu_device_ip_block_add(adev, &psp_v10_0_ip_block);
2048 		break;
2049 	case IP_VERSION(11, 0, 0):
2050 	case IP_VERSION(11, 0, 2):
2051 	case IP_VERSION(11, 0, 4):
2052 	case IP_VERSION(11, 0, 5):
2053 	case IP_VERSION(11, 0, 9):
2054 	case IP_VERSION(11, 0, 7):
2055 	case IP_VERSION(11, 0, 11):
2056 	case IP_VERSION(11, 0, 12):
2057 	case IP_VERSION(11, 0, 13):
2058 	case IP_VERSION(11, 5, 0):
2059 	case IP_VERSION(11, 5, 2):
2060 		amdgpu_device_ip_block_add(adev, &psp_v11_0_ip_block);
2061 		break;
2062 	case IP_VERSION(11, 0, 8):
2063 		amdgpu_device_ip_block_add(adev, &psp_v11_0_8_ip_block);
2064 		break;
2065 	case IP_VERSION(11, 0, 3):
2066 	case IP_VERSION(12, 0, 1):
2067 		amdgpu_device_ip_block_add(adev, &psp_v12_0_ip_block);
2068 		break;
2069 	case IP_VERSION(13, 0, 0):
2070 	case IP_VERSION(13, 0, 1):
2071 	case IP_VERSION(13, 0, 2):
2072 	case IP_VERSION(13, 0, 3):
2073 	case IP_VERSION(13, 0, 5):
2074 	case IP_VERSION(13, 0, 6):
2075 	case IP_VERSION(13, 0, 7):
2076 	case IP_VERSION(13, 0, 8):
2077 	case IP_VERSION(13, 0, 10):
2078 	case IP_VERSION(13, 0, 11):
2079 	case IP_VERSION(13, 0, 12):
2080 	case IP_VERSION(13, 0, 14):
2081 	case IP_VERSION(14, 0, 0):
2082 	case IP_VERSION(14, 0, 1):
2083 	case IP_VERSION(14, 0, 4):
2084 		amdgpu_device_ip_block_add(adev, &psp_v13_0_ip_block);
2085 		break;
2086 	case IP_VERSION(13, 0, 4):
2087 		amdgpu_device_ip_block_add(adev, &psp_v13_0_4_ip_block);
2088 		break;
2089 	case IP_VERSION(14, 0, 2):
2090 	case IP_VERSION(14, 0, 3):
2091 	case IP_VERSION(14, 0, 5):
2092 		amdgpu_device_ip_block_add(adev, &psp_v14_0_ip_block);
2093 		break;
2094 	default:
2095 		dev_err(adev->dev,
2096 			"Failed to add psp ip block(MP0_HWIP:0x%x)\n",
2097 			amdgpu_ip_version(adev, MP0_HWIP, 0));
2098 		return -EINVAL;
2099 	}
2100 	return 0;
2101 }
2102 
2103 static int amdgpu_discovery_set_smu_ip_blocks(struct amdgpu_device *adev)
2104 {
2105 	switch (amdgpu_ip_version(adev, MP1_HWIP, 0)) {
2106 	case IP_VERSION(9, 0, 0):
2107 	case IP_VERSION(10, 0, 0):
2108 	case IP_VERSION(10, 0, 1):
2109 	case IP_VERSION(11, 0, 2):
2110 		if (adev->asic_type == CHIP_ARCTURUS)
2111 			amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block);
2112 		else
2113 			amdgpu_device_ip_block_add(adev, &pp_smu_ip_block);
2114 		break;
2115 	case IP_VERSION(11, 0, 0):
2116 	case IP_VERSION(11, 0, 5):
2117 	case IP_VERSION(11, 0, 9):
2118 	case IP_VERSION(11, 0, 7):
2119 	case IP_VERSION(11, 0, 8):
2120 	case IP_VERSION(11, 0, 11):
2121 	case IP_VERSION(11, 0, 12):
2122 	case IP_VERSION(11, 0, 13):
2123 	case IP_VERSION(11, 5, 0):
2124 	case IP_VERSION(11, 5, 2):
2125 		amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block);
2126 		break;
2127 	case IP_VERSION(12, 0, 0):
2128 	case IP_VERSION(12, 0, 1):
2129 		amdgpu_device_ip_block_add(adev, &smu_v12_0_ip_block);
2130 		break;
2131 	case IP_VERSION(13, 0, 0):
2132 	case IP_VERSION(13, 0, 1):
2133 	case IP_VERSION(13, 0, 2):
2134 	case IP_VERSION(13, 0, 3):
2135 	case IP_VERSION(13, 0, 4):
2136 	case IP_VERSION(13, 0, 5):
2137 	case IP_VERSION(13, 0, 6):
2138 	case IP_VERSION(13, 0, 7):
2139 	case IP_VERSION(13, 0, 8):
2140 	case IP_VERSION(13, 0, 10):
2141 	case IP_VERSION(13, 0, 11):
2142 	case IP_VERSION(13, 0, 14):
2143 	case IP_VERSION(13, 0, 12):
2144 		amdgpu_device_ip_block_add(adev, &smu_v13_0_ip_block);
2145 		break;
2146 	case IP_VERSION(14, 0, 0):
2147 	case IP_VERSION(14, 0, 1):
2148 	case IP_VERSION(14, 0, 2):
2149 	case IP_VERSION(14, 0, 3):
2150 	case IP_VERSION(14, 0, 4):
2151 	case IP_VERSION(14, 0, 5):
2152 		amdgpu_device_ip_block_add(adev, &smu_v14_0_ip_block);
2153 		break;
2154 	default:
2155 		dev_err(adev->dev,
2156 			"Failed to add smu ip block(MP1_HWIP:0x%x)\n",
2157 			amdgpu_ip_version(adev, MP1_HWIP, 0));
2158 		return -EINVAL;
2159 	}
2160 	return 0;
2161 }
2162 
2163 #if defined(CONFIG_DRM_AMD_DC)
2164 static void amdgpu_discovery_set_sriov_display(struct amdgpu_device *adev)
2165 {
2166 	amdgpu_device_set_sriov_virtual_display(adev);
2167 	amdgpu_device_ip_block_add(adev, &amdgpu_vkms_ip_block);
2168 }
2169 #endif
2170 
2171 static int amdgpu_discovery_set_display_ip_blocks(struct amdgpu_device *adev)
2172 {
2173 	if (adev->enable_virtual_display) {
2174 		amdgpu_device_ip_block_add(adev, &amdgpu_vkms_ip_block);
2175 		return 0;
2176 	}
2177 
2178 	if (!amdgpu_device_has_dc_support(adev))
2179 		return 0;
2180 
2181 #if defined(CONFIG_DRM_AMD_DC)
2182 	if (amdgpu_ip_version(adev, DCE_HWIP, 0)) {
2183 		switch (amdgpu_ip_version(adev, DCE_HWIP, 0)) {
2184 		case IP_VERSION(1, 0, 0):
2185 		case IP_VERSION(1, 0, 1):
2186 		case IP_VERSION(2, 0, 2):
2187 		case IP_VERSION(2, 0, 0):
2188 		case IP_VERSION(2, 0, 3):
2189 		case IP_VERSION(2, 1, 0):
2190 		case IP_VERSION(3, 0, 0):
2191 		case IP_VERSION(3, 0, 2):
2192 		case IP_VERSION(3, 0, 3):
2193 		case IP_VERSION(3, 0, 1):
2194 		case IP_VERSION(3, 1, 2):
2195 		case IP_VERSION(3, 1, 3):
2196 		case IP_VERSION(3, 1, 4):
2197 		case IP_VERSION(3, 1, 5):
2198 		case IP_VERSION(3, 1, 6):
2199 		case IP_VERSION(3, 2, 0):
2200 		case IP_VERSION(3, 2, 1):
2201 		case IP_VERSION(3, 5, 0):
2202 		case IP_VERSION(3, 5, 1):
2203 		case IP_VERSION(3, 6, 0):
2204 		case IP_VERSION(4, 1, 0):
2205 			/* TODO: Fix IP version. DC code expects version 4.0.1 */
2206 			if (adev->ip_versions[DCE_HWIP][0] == IP_VERSION(4, 1, 0))
2207 				adev->ip_versions[DCE_HWIP][0] = IP_VERSION(4, 0, 1);
2208 
2209 			if (amdgpu_sriov_vf(adev))
2210 				amdgpu_discovery_set_sriov_display(adev);
2211 			else
2212 				amdgpu_device_ip_block_add(adev, &dm_ip_block);
2213 			break;
2214 		default:
2215 			dev_err(adev->dev,
2216 				"Failed to add dm ip block(DCE_HWIP:0x%x)\n",
2217 				amdgpu_ip_version(adev, DCE_HWIP, 0));
2218 			return -EINVAL;
2219 		}
2220 	} else if (amdgpu_ip_version(adev, DCI_HWIP, 0)) {
2221 		switch (amdgpu_ip_version(adev, DCI_HWIP, 0)) {
2222 		case IP_VERSION(12, 0, 0):
2223 		case IP_VERSION(12, 0, 1):
2224 		case IP_VERSION(12, 1, 0):
2225 			if (amdgpu_sriov_vf(adev))
2226 				amdgpu_discovery_set_sriov_display(adev);
2227 			else
2228 				amdgpu_device_ip_block_add(adev, &dm_ip_block);
2229 			break;
2230 		default:
2231 			dev_err(adev->dev,
2232 				"Failed to add dm ip block(DCI_HWIP:0x%x)\n",
2233 				amdgpu_ip_version(adev, DCI_HWIP, 0));
2234 			return -EINVAL;
2235 		}
2236 	}
2237 #endif
2238 	return 0;
2239 }
2240 
2241 static int amdgpu_discovery_set_gc_ip_blocks(struct amdgpu_device *adev)
2242 {
2243 	switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
2244 	case IP_VERSION(9, 0, 1):
2245 	case IP_VERSION(9, 1, 0):
2246 	case IP_VERSION(9, 2, 1):
2247 	case IP_VERSION(9, 2, 2):
2248 	case IP_VERSION(9, 3, 0):
2249 	case IP_VERSION(9, 4, 0):
2250 	case IP_VERSION(9, 4, 1):
2251 	case IP_VERSION(9, 4, 2):
2252 		amdgpu_device_ip_block_add(adev, &gfx_v9_0_ip_block);
2253 		break;
2254 	case IP_VERSION(9, 4, 3):
2255 	case IP_VERSION(9, 4, 4):
2256 	case IP_VERSION(9, 5, 0):
2257 		amdgpu_device_ip_block_add(adev, &gfx_v9_4_3_ip_block);
2258 		break;
2259 	case IP_VERSION(10, 1, 10):
2260 	case IP_VERSION(10, 1, 2):
2261 	case IP_VERSION(10, 1, 1):
2262 	case IP_VERSION(10, 1, 3):
2263 	case IP_VERSION(10, 1, 4):
2264 	case IP_VERSION(10, 3, 0):
2265 	case IP_VERSION(10, 3, 2):
2266 	case IP_VERSION(10, 3, 1):
2267 	case IP_VERSION(10, 3, 4):
2268 	case IP_VERSION(10, 3, 5):
2269 	case IP_VERSION(10, 3, 6):
2270 	case IP_VERSION(10, 3, 3):
2271 	case IP_VERSION(10, 3, 7):
2272 		amdgpu_device_ip_block_add(adev, &gfx_v10_0_ip_block);
2273 		break;
2274 	case IP_VERSION(11, 0, 0):
2275 	case IP_VERSION(11, 0, 1):
2276 	case IP_VERSION(11, 0, 2):
2277 	case IP_VERSION(11, 0, 3):
2278 	case IP_VERSION(11, 0, 4):
2279 	case IP_VERSION(11, 5, 0):
2280 	case IP_VERSION(11, 5, 1):
2281 	case IP_VERSION(11, 5, 2):
2282 	case IP_VERSION(11, 5, 3):
2283 		amdgpu_device_ip_block_add(adev, &gfx_v11_0_ip_block);
2284 		break;
2285 	case IP_VERSION(12, 0, 0):
2286 	case IP_VERSION(12, 0, 1):
2287 		amdgpu_device_ip_block_add(adev, &gfx_v12_0_ip_block);
2288 		break;
2289 	default:
2290 		dev_err(adev->dev, "Failed to add gfx ip block(GC_HWIP:0x%x)\n",
2291 			amdgpu_ip_version(adev, GC_HWIP, 0));
2292 		return -EINVAL;
2293 	}
2294 	return 0;
2295 }
2296 
2297 static int amdgpu_discovery_set_sdma_ip_blocks(struct amdgpu_device *adev)
2298 {
2299 	switch (amdgpu_ip_version(adev, SDMA0_HWIP, 0)) {
2300 	case IP_VERSION(4, 0, 0):
2301 	case IP_VERSION(4, 0, 1):
2302 	case IP_VERSION(4, 1, 0):
2303 	case IP_VERSION(4, 1, 1):
2304 	case IP_VERSION(4, 1, 2):
2305 	case IP_VERSION(4, 2, 0):
2306 	case IP_VERSION(4, 2, 2):
2307 	case IP_VERSION(4, 4, 0):
2308 		amdgpu_device_ip_block_add(adev, &sdma_v4_0_ip_block);
2309 		break;
2310 	case IP_VERSION(4, 4, 2):
2311 	case IP_VERSION(4, 4, 5):
2312 	case IP_VERSION(4, 4, 4):
2313 		amdgpu_device_ip_block_add(adev, &sdma_v4_4_2_ip_block);
2314 		break;
2315 	case IP_VERSION(5, 0, 0):
2316 	case IP_VERSION(5, 0, 1):
2317 	case IP_VERSION(5, 0, 2):
2318 	case IP_VERSION(5, 0, 5):
2319 		amdgpu_device_ip_block_add(adev, &sdma_v5_0_ip_block);
2320 		break;
2321 	case IP_VERSION(5, 2, 0):
2322 	case IP_VERSION(5, 2, 2):
2323 	case IP_VERSION(5, 2, 4):
2324 	case IP_VERSION(5, 2, 5):
2325 	case IP_VERSION(5, 2, 6):
2326 	case IP_VERSION(5, 2, 3):
2327 	case IP_VERSION(5, 2, 1):
2328 	case IP_VERSION(5, 2, 7):
2329 		amdgpu_device_ip_block_add(adev, &sdma_v5_2_ip_block);
2330 		break;
2331 	case IP_VERSION(6, 0, 0):
2332 	case IP_VERSION(6, 0, 1):
2333 	case IP_VERSION(6, 0, 2):
2334 	case IP_VERSION(6, 0, 3):
2335 	case IP_VERSION(6, 1, 0):
2336 	case IP_VERSION(6, 1, 1):
2337 	case IP_VERSION(6, 1, 2):
2338 	case IP_VERSION(6, 1, 3):
2339 		amdgpu_device_ip_block_add(adev, &sdma_v6_0_ip_block);
2340 		break;
2341 	case IP_VERSION(7, 0, 0):
2342 	case IP_VERSION(7, 0, 1):
2343 		amdgpu_device_ip_block_add(adev, &sdma_v7_0_ip_block);
2344 		break;
2345 	default:
2346 		dev_err(adev->dev,
2347 			"Failed to add sdma ip block(SDMA0_HWIP:0x%x)\n",
2348 			amdgpu_ip_version(adev, SDMA0_HWIP, 0));
2349 		return -EINVAL;
2350 	}
2351 	return 0;
2352 }
2353 
2354 static int amdgpu_discovery_set_mm_ip_blocks(struct amdgpu_device *adev)
2355 {
2356 	if (amdgpu_ip_version(adev, VCE_HWIP, 0)) {
2357 		switch (amdgpu_ip_version(adev, UVD_HWIP, 0)) {
2358 		case IP_VERSION(7, 0, 0):
2359 		case IP_VERSION(7, 2, 0):
2360 			/* UVD is not supported on vega20 SR-IOV */
2361 			if (!(adev->asic_type == CHIP_VEGA20 && amdgpu_sriov_vf(adev)))
2362 				amdgpu_device_ip_block_add(adev, &uvd_v7_0_ip_block);
2363 			break;
2364 		default:
2365 			dev_err(adev->dev,
2366 				"Failed to add uvd v7 ip block(UVD_HWIP:0x%x)\n",
2367 				amdgpu_ip_version(adev, UVD_HWIP, 0));
2368 			return -EINVAL;
2369 		}
2370 		switch (amdgpu_ip_version(adev, VCE_HWIP, 0)) {
2371 		case IP_VERSION(4, 0, 0):
2372 		case IP_VERSION(4, 1, 0):
2373 			/* VCE is not supported on vega20 SR-IOV */
2374 			if (!(adev->asic_type == CHIP_VEGA20 && amdgpu_sriov_vf(adev)))
2375 				amdgpu_device_ip_block_add(adev, &vce_v4_0_ip_block);
2376 			break;
2377 		default:
2378 			dev_err(adev->dev,
2379 				"Failed to add VCE v4 ip block(VCE_HWIP:0x%x)\n",
2380 				amdgpu_ip_version(adev, VCE_HWIP, 0));
2381 			return -EINVAL;
2382 		}
2383 	} else {
2384 		switch (amdgpu_ip_version(adev, UVD_HWIP, 0)) {
2385 		case IP_VERSION(1, 0, 0):
2386 		case IP_VERSION(1, 0, 1):
2387 			amdgpu_device_ip_block_add(adev, &vcn_v1_0_ip_block);
2388 			break;
2389 		case IP_VERSION(2, 0, 0):
2390 		case IP_VERSION(2, 0, 2):
2391 		case IP_VERSION(2, 2, 0):
2392 			amdgpu_device_ip_block_add(adev, &vcn_v2_0_ip_block);
2393 			if (!amdgpu_sriov_vf(adev))
2394 				amdgpu_device_ip_block_add(adev, &jpeg_v2_0_ip_block);
2395 			break;
2396 		case IP_VERSION(2, 0, 3):
2397 			break;
2398 		case IP_VERSION(2, 5, 0):
2399 			amdgpu_device_ip_block_add(adev, &vcn_v2_5_ip_block);
2400 			amdgpu_device_ip_block_add(adev, &jpeg_v2_5_ip_block);
2401 			break;
2402 		case IP_VERSION(2, 6, 0):
2403 			amdgpu_device_ip_block_add(adev, &vcn_v2_6_ip_block);
2404 			amdgpu_device_ip_block_add(adev, &jpeg_v2_6_ip_block);
2405 			break;
2406 		case IP_VERSION(3, 0, 0):
2407 		case IP_VERSION(3, 0, 16):
2408 		case IP_VERSION(3, 1, 1):
2409 		case IP_VERSION(3, 1, 2):
2410 		case IP_VERSION(3, 0, 2):
2411 			amdgpu_device_ip_block_add(adev, &vcn_v3_0_ip_block);
2412 			if (!amdgpu_sriov_vf(adev))
2413 				amdgpu_device_ip_block_add(adev, &jpeg_v3_0_ip_block);
2414 			break;
2415 		case IP_VERSION(3, 0, 33):
2416 			amdgpu_device_ip_block_add(adev, &vcn_v3_0_ip_block);
2417 			break;
2418 		case IP_VERSION(4, 0, 0):
2419 		case IP_VERSION(4, 0, 2):
2420 		case IP_VERSION(4, 0, 4):
2421 			amdgpu_device_ip_block_add(adev, &vcn_v4_0_ip_block);
2422 			amdgpu_device_ip_block_add(adev, &jpeg_v4_0_ip_block);
2423 			break;
2424 		case IP_VERSION(4, 0, 3):
2425 			amdgpu_device_ip_block_add(adev, &vcn_v4_0_3_ip_block);
2426 			amdgpu_device_ip_block_add(adev, &jpeg_v4_0_3_ip_block);
2427 			break;
2428 		case IP_VERSION(4, 0, 5):
2429 		case IP_VERSION(4, 0, 6):
2430 			amdgpu_device_ip_block_add(adev, &vcn_v4_0_5_ip_block);
2431 			amdgpu_device_ip_block_add(adev, &jpeg_v4_0_5_ip_block);
2432 			break;
2433 		case IP_VERSION(5, 0, 0):
2434 			amdgpu_device_ip_block_add(adev, &vcn_v5_0_0_ip_block);
2435 			amdgpu_device_ip_block_add(adev, &jpeg_v5_0_0_ip_block);
2436 			break;
2437 		case IP_VERSION(5, 0, 1):
2438 			amdgpu_device_ip_block_add(adev, &vcn_v5_0_1_ip_block);
2439 			amdgpu_device_ip_block_add(adev, &jpeg_v5_0_1_ip_block);
2440 			break;
2441 		default:
2442 			dev_err(adev->dev,
2443 				"Failed to add vcn/jpeg ip block(UVD_HWIP:0x%x)\n",
2444 				amdgpu_ip_version(adev, UVD_HWIP, 0));
2445 			return -EINVAL;
2446 		}
2447 	}
2448 	return 0;
2449 }
2450 
2451 static int amdgpu_discovery_set_mes_ip_blocks(struct amdgpu_device *adev)
2452 {
2453 	switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
2454 	case IP_VERSION(11, 0, 0):
2455 	case IP_VERSION(11, 0, 1):
2456 	case IP_VERSION(11, 0, 2):
2457 	case IP_VERSION(11, 0, 3):
2458 	case IP_VERSION(11, 0, 4):
2459 	case IP_VERSION(11, 5, 0):
2460 	case IP_VERSION(11, 5, 1):
2461 	case IP_VERSION(11, 5, 2):
2462 	case IP_VERSION(11, 5, 3):
2463 		amdgpu_device_ip_block_add(adev, &mes_v11_0_ip_block);
2464 		adev->enable_mes = true;
2465 		adev->enable_mes_kiq = true;
2466 		break;
2467 	case IP_VERSION(12, 0, 0):
2468 	case IP_VERSION(12, 0, 1):
2469 		amdgpu_device_ip_block_add(adev, &mes_v12_0_ip_block);
2470 		adev->enable_mes = true;
2471 		adev->enable_mes_kiq = true;
2472 		if (amdgpu_uni_mes)
2473 			adev->enable_uni_mes = true;
2474 		break;
2475 	default:
2476 		break;
2477 	}
2478 	return 0;
2479 }
2480 
2481 static void amdgpu_discovery_init_soc_config(struct amdgpu_device *adev)
2482 {
2483 	switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
2484 	case IP_VERSION(9, 4, 3):
2485 	case IP_VERSION(9, 4, 4):
2486 	case IP_VERSION(9, 5, 0):
2487 		aqua_vanjaram_init_soc_config(adev);
2488 		break;
2489 	default:
2490 		break;
2491 	}
2492 }
2493 
2494 static int amdgpu_discovery_set_vpe_ip_blocks(struct amdgpu_device *adev)
2495 {
2496 	switch (amdgpu_ip_version(adev, VPE_HWIP, 0)) {
2497 	case IP_VERSION(6, 1, 0):
2498 	case IP_VERSION(6, 1, 1):
2499 	case IP_VERSION(6, 1, 3):
2500 		amdgpu_device_ip_block_add(adev, &vpe_v6_1_ip_block);
2501 		break;
2502 	default:
2503 		break;
2504 	}
2505 
2506 	return 0;
2507 }
2508 
2509 static int amdgpu_discovery_set_umsch_mm_ip_blocks(struct amdgpu_device *adev)
2510 {
2511 	switch (amdgpu_ip_version(adev, VCN_HWIP, 0)) {
2512 	case IP_VERSION(4, 0, 5):
2513 	case IP_VERSION(4, 0, 6):
2514 		if (amdgpu_umsch_mm & 0x1) {
2515 			amdgpu_device_ip_block_add(adev, &umsch_mm_v4_0_ip_block);
2516 			adev->enable_umsch_mm = true;
2517 		}
2518 		break;
2519 	default:
2520 		break;
2521 	}
2522 
2523 	return 0;
2524 }
2525 
2526 static int amdgpu_discovery_set_isp_ip_blocks(struct amdgpu_device *adev)
2527 {
2528 #if defined(CONFIG_DRM_AMD_ISP)
2529 	switch (amdgpu_ip_version(adev, ISP_HWIP, 0)) {
2530 	case IP_VERSION(4, 1, 0):
2531 		amdgpu_device_ip_block_add(adev, &isp_v4_1_0_ip_block);
2532 		break;
2533 	case IP_VERSION(4, 1, 1):
2534 		amdgpu_device_ip_block_add(adev, &isp_v4_1_1_ip_block);
2535 		break;
2536 	default:
2537 		break;
2538 	}
2539 #endif
2540 
2541 	return 0;
2542 }
2543 
2544 int amdgpu_discovery_set_ip_blocks(struct amdgpu_device *adev)
2545 {
2546 	int r;
2547 
2548 	switch (adev->asic_type) {
2549 	case CHIP_VEGA10:
2550 	case CHIP_VEGA12:
2551 	case CHIP_RAVEN:
2552 	case CHIP_VEGA20:
2553 	case CHIP_ARCTURUS:
2554 	case CHIP_ALDEBARAN:
2555 		/* this is not fatal.  We have a fallback below
2556 		 * if the new firmwares are not present. some of
2557 		 * this will be overridden below to keep things
2558 		 * consistent with the current behavior.
2559 		 */
2560 		r = amdgpu_discovery_reg_base_init(adev);
2561 		if (!r) {
2562 			amdgpu_discovery_harvest_ip(adev);
2563 			amdgpu_discovery_get_gfx_info(adev);
2564 			amdgpu_discovery_get_mall_info(adev);
2565 			amdgpu_discovery_get_vcn_info(adev);
2566 		}
2567 		break;
2568 	default:
2569 		r = amdgpu_discovery_reg_base_init(adev);
2570 		if (r)
2571 			return -EINVAL;
2572 
2573 		amdgpu_discovery_harvest_ip(adev);
2574 		amdgpu_discovery_get_gfx_info(adev);
2575 		amdgpu_discovery_get_mall_info(adev);
2576 		amdgpu_discovery_get_vcn_info(adev);
2577 		break;
2578 	}
2579 
2580 	switch (adev->asic_type) {
2581 	case CHIP_VEGA10:
2582 		vega10_reg_base_init(adev);
2583 		adev->sdma.num_instances = 2;
2584 		adev->gmc.num_umc = 4;
2585 		adev->ip_versions[MMHUB_HWIP][0] = IP_VERSION(9, 0, 0);
2586 		adev->ip_versions[ATHUB_HWIP][0] = IP_VERSION(9, 0, 0);
2587 		adev->ip_versions[OSSSYS_HWIP][0] = IP_VERSION(4, 0, 0);
2588 		adev->ip_versions[HDP_HWIP][0] = IP_VERSION(4, 0, 0);
2589 		adev->ip_versions[SDMA0_HWIP][0] = IP_VERSION(4, 0, 0);
2590 		adev->ip_versions[SDMA1_HWIP][0] = IP_VERSION(4, 0, 0);
2591 		adev->ip_versions[DF_HWIP][0] = IP_VERSION(2, 1, 0);
2592 		adev->ip_versions[NBIO_HWIP][0] = IP_VERSION(6, 1, 0);
2593 		adev->ip_versions[UMC_HWIP][0] = IP_VERSION(6, 0, 0);
2594 		adev->ip_versions[MP0_HWIP][0] = IP_VERSION(9, 0, 0);
2595 		adev->ip_versions[MP1_HWIP][0] = IP_VERSION(9, 0, 0);
2596 		adev->ip_versions[THM_HWIP][0] = IP_VERSION(9, 0, 0);
2597 		adev->ip_versions[SMUIO_HWIP][0] = IP_VERSION(9, 0, 0);
2598 		adev->ip_versions[GC_HWIP][0] = IP_VERSION(9, 0, 1);
2599 		adev->ip_versions[UVD_HWIP][0] = IP_VERSION(7, 0, 0);
2600 		adev->ip_versions[VCE_HWIP][0] = IP_VERSION(4, 0, 0);
2601 		adev->ip_versions[DCI_HWIP][0] = IP_VERSION(12, 0, 0);
2602 		break;
2603 	case CHIP_VEGA12:
2604 		vega10_reg_base_init(adev);
2605 		adev->sdma.num_instances = 2;
2606 		adev->gmc.num_umc = 4;
2607 		adev->ip_versions[MMHUB_HWIP][0] = IP_VERSION(9, 3, 0);
2608 		adev->ip_versions[ATHUB_HWIP][0] = IP_VERSION(9, 3, 0);
2609 		adev->ip_versions[OSSSYS_HWIP][0] = IP_VERSION(4, 0, 1);
2610 		adev->ip_versions[HDP_HWIP][0] = IP_VERSION(4, 0, 1);
2611 		adev->ip_versions[SDMA0_HWIP][0] = IP_VERSION(4, 0, 1);
2612 		adev->ip_versions[SDMA1_HWIP][0] = IP_VERSION(4, 0, 1);
2613 		adev->ip_versions[DF_HWIP][0] = IP_VERSION(2, 5, 0);
2614 		adev->ip_versions[NBIO_HWIP][0] = IP_VERSION(6, 2, 0);
2615 		adev->ip_versions[UMC_HWIP][0] = IP_VERSION(6, 1, 0);
2616 		adev->ip_versions[MP0_HWIP][0] = IP_VERSION(9, 0, 0);
2617 		adev->ip_versions[MP1_HWIP][0] = IP_VERSION(9, 0, 0);
2618 		adev->ip_versions[THM_HWIP][0] = IP_VERSION(9, 0, 0);
2619 		adev->ip_versions[SMUIO_HWIP][0] = IP_VERSION(9, 0, 1);
2620 		adev->ip_versions[GC_HWIP][0] = IP_VERSION(9, 2, 1);
2621 		adev->ip_versions[UVD_HWIP][0] = IP_VERSION(7, 0, 0);
2622 		adev->ip_versions[VCE_HWIP][0] = IP_VERSION(4, 0, 0);
2623 		adev->ip_versions[DCI_HWIP][0] = IP_VERSION(12, 0, 1);
2624 		break;
2625 	case CHIP_RAVEN:
2626 		vega10_reg_base_init(adev);
2627 		adev->sdma.num_instances = 1;
2628 		adev->vcn.num_vcn_inst = 1;
2629 		adev->gmc.num_umc = 2;
2630 		if (adev->apu_flags & AMD_APU_IS_RAVEN2) {
2631 			adev->ip_versions[MMHUB_HWIP][0] = IP_VERSION(9, 2, 0);
2632 			adev->ip_versions[ATHUB_HWIP][0] = IP_VERSION(9, 2, 0);
2633 			adev->ip_versions[OSSSYS_HWIP][0] = IP_VERSION(4, 1, 1);
2634 			adev->ip_versions[HDP_HWIP][0] = IP_VERSION(4, 1, 1);
2635 			adev->ip_versions[SDMA0_HWIP][0] = IP_VERSION(4, 1, 1);
2636 			adev->ip_versions[DF_HWIP][0] = IP_VERSION(2, 1, 1);
2637 			adev->ip_versions[NBIO_HWIP][0] = IP_VERSION(7, 0, 1);
2638 			adev->ip_versions[UMC_HWIP][0] = IP_VERSION(7, 5, 0);
2639 			adev->ip_versions[MP0_HWIP][0] = IP_VERSION(10, 0, 1);
2640 			adev->ip_versions[MP1_HWIP][0] = IP_VERSION(10, 0, 1);
2641 			adev->ip_versions[THM_HWIP][0] = IP_VERSION(10, 1, 0);
2642 			adev->ip_versions[SMUIO_HWIP][0] = IP_VERSION(10, 0, 1);
2643 			adev->ip_versions[GC_HWIP][0] = IP_VERSION(9, 2, 2);
2644 			adev->ip_versions[UVD_HWIP][0] = IP_VERSION(1, 0, 1);
2645 			adev->ip_versions[DCE_HWIP][0] = IP_VERSION(1, 0, 1);
2646 			adev->ip_versions[ISP_HWIP][0] = IP_VERSION(2, 0, 0);
2647 		} else {
2648 			adev->ip_versions[MMHUB_HWIP][0] = IP_VERSION(9, 1, 0);
2649 			adev->ip_versions[ATHUB_HWIP][0] = IP_VERSION(9, 1, 0);
2650 			adev->ip_versions[OSSSYS_HWIP][0] = IP_VERSION(4, 1, 0);
2651 			adev->ip_versions[HDP_HWIP][0] = IP_VERSION(4, 1, 0);
2652 			adev->ip_versions[SDMA0_HWIP][0] = IP_VERSION(4, 1, 0);
2653 			adev->ip_versions[DF_HWIP][0] = IP_VERSION(2, 1, 0);
2654 			adev->ip_versions[NBIO_HWIP][0] = IP_VERSION(7, 0, 0);
2655 			adev->ip_versions[UMC_HWIP][0] = IP_VERSION(7, 0, 0);
2656 			adev->ip_versions[MP0_HWIP][0] = IP_VERSION(10, 0, 0);
2657 			adev->ip_versions[MP1_HWIP][0] = IP_VERSION(10, 0, 0);
2658 			adev->ip_versions[THM_HWIP][0] = IP_VERSION(10, 0, 0);
2659 			adev->ip_versions[SMUIO_HWIP][0] = IP_VERSION(10, 0, 0);
2660 			adev->ip_versions[GC_HWIP][0] = IP_VERSION(9, 1, 0);
2661 			adev->ip_versions[UVD_HWIP][0] = IP_VERSION(1, 0, 0);
2662 			adev->ip_versions[DCE_HWIP][0] = IP_VERSION(1, 0, 0);
2663 			adev->ip_versions[ISP_HWIP][0] = IP_VERSION(2, 0, 0);
2664 		}
2665 		break;
2666 	case CHIP_VEGA20:
2667 		vega20_reg_base_init(adev);
2668 		adev->sdma.num_instances = 2;
2669 		adev->gmc.num_umc = 8;
2670 		adev->ip_versions[MMHUB_HWIP][0] = IP_VERSION(9, 4, 0);
2671 		adev->ip_versions[ATHUB_HWIP][0] = IP_VERSION(9, 4, 0);
2672 		adev->ip_versions[OSSSYS_HWIP][0] = IP_VERSION(4, 2, 0);
2673 		adev->ip_versions[HDP_HWIP][0] = IP_VERSION(4, 2, 0);
2674 		adev->ip_versions[SDMA0_HWIP][0] = IP_VERSION(4, 2, 0);
2675 		adev->ip_versions[SDMA1_HWIP][0] = IP_VERSION(4, 2, 0);
2676 		adev->ip_versions[DF_HWIP][0] = IP_VERSION(3, 6, 0);
2677 		adev->ip_versions[NBIO_HWIP][0] = IP_VERSION(7, 4, 0);
2678 		adev->ip_versions[UMC_HWIP][0] = IP_VERSION(6, 1, 1);
2679 		adev->ip_versions[MP0_HWIP][0] = IP_VERSION(11, 0, 2);
2680 		adev->ip_versions[MP1_HWIP][0] = IP_VERSION(11, 0, 2);
2681 		adev->ip_versions[THM_HWIP][0] = IP_VERSION(11, 0, 2);
2682 		adev->ip_versions[SMUIO_HWIP][0] = IP_VERSION(11, 0, 2);
2683 		adev->ip_versions[GC_HWIP][0] = IP_VERSION(9, 4, 0);
2684 		adev->ip_versions[UVD_HWIP][0] = IP_VERSION(7, 2, 0);
2685 		adev->ip_versions[UVD_HWIP][1] = IP_VERSION(7, 2, 0);
2686 		adev->ip_versions[VCE_HWIP][0] = IP_VERSION(4, 1, 0);
2687 		adev->ip_versions[DCI_HWIP][0] = IP_VERSION(12, 1, 0);
2688 		break;
2689 	case CHIP_ARCTURUS:
2690 		arct_reg_base_init(adev);
2691 		adev->sdma.num_instances = 8;
2692 		adev->vcn.num_vcn_inst = 2;
2693 		adev->gmc.num_umc = 8;
2694 		adev->ip_versions[MMHUB_HWIP][0] = IP_VERSION(9, 4, 1);
2695 		adev->ip_versions[ATHUB_HWIP][0] = IP_VERSION(9, 4, 1);
2696 		adev->ip_versions[OSSSYS_HWIP][0] = IP_VERSION(4, 2, 1);
2697 		adev->ip_versions[HDP_HWIP][0] = IP_VERSION(4, 2, 1);
2698 		adev->ip_versions[SDMA0_HWIP][0] = IP_VERSION(4, 2, 2);
2699 		adev->ip_versions[SDMA1_HWIP][0] = IP_VERSION(4, 2, 2);
2700 		adev->ip_versions[SDMA1_HWIP][1] = IP_VERSION(4, 2, 2);
2701 		adev->ip_versions[SDMA1_HWIP][2] = IP_VERSION(4, 2, 2);
2702 		adev->ip_versions[SDMA1_HWIP][3] = IP_VERSION(4, 2, 2);
2703 		adev->ip_versions[SDMA1_HWIP][4] = IP_VERSION(4, 2, 2);
2704 		adev->ip_versions[SDMA1_HWIP][5] = IP_VERSION(4, 2, 2);
2705 		adev->ip_versions[SDMA1_HWIP][6] = IP_VERSION(4, 2, 2);
2706 		adev->ip_versions[DF_HWIP][0] = IP_VERSION(3, 6, 1);
2707 		adev->ip_versions[NBIO_HWIP][0] = IP_VERSION(7, 4, 1);
2708 		adev->ip_versions[UMC_HWIP][0] = IP_VERSION(6, 1, 2);
2709 		adev->ip_versions[MP0_HWIP][0] = IP_VERSION(11, 0, 4);
2710 		adev->ip_versions[MP1_HWIP][0] = IP_VERSION(11, 0, 2);
2711 		adev->ip_versions[THM_HWIP][0] = IP_VERSION(11, 0, 3);
2712 		adev->ip_versions[SMUIO_HWIP][0] = IP_VERSION(11, 0, 3);
2713 		adev->ip_versions[GC_HWIP][0] = IP_VERSION(9, 4, 1);
2714 		adev->ip_versions[UVD_HWIP][0] = IP_VERSION(2, 5, 0);
2715 		adev->ip_versions[UVD_HWIP][1] = IP_VERSION(2, 5, 0);
2716 		break;
2717 	case CHIP_ALDEBARAN:
2718 		aldebaran_reg_base_init(adev);
2719 		adev->sdma.num_instances = 5;
2720 		adev->vcn.num_vcn_inst = 2;
2721 		adev->gmc.num_umc = 4;
2722 		adev->ip_versions[MMHUB_HWIP][0] = IP_VERSION(9, 4, 2);
2723 		adev->ip_versions[ATHUB_HWIP][0] = IP_VERSION(9, 4, 2);
2724 		adev->ip_versions[OSSSYS_HWIP][0] = IP_VERSION(4, 4, 0);
2725 		adev->ip_versions[HDP_HWIP][0] = IP_VERSION(4, 4, 0);
2726 		adev->ip_versions[SDMA0_HWIP][0] = IP_VERSION(4, 4, 0);
2727 		adev->ip_versions[SDMA0_HWIP][1] = IP_VERSION(4, 4, 0);
2728 		adev->ip_versions[SDMA0_HWIP][2] = IP_VERSION(4, 4, 0);
2729 		adev->ip_versions[SDMA0_HWIP][3] = IP_VERSION(4, 4, 0);
2730 		adev->ip_versions[SDMA0_HWIP][4] = IP_VERSION(4, 4, 0);
2731 		adev->ip_versions[DF_HWIP][0] = IP_VERSION(3, 6, 2);
2732 		adev->ip_versions[NBIO_HWIP][0] = IP_VERSION(7, 4, 4);
2733 		adev->ip_versions[UMC_HWIP][0] = IP_VERSION(6, 7, 0);
2734 		adev->ip_versions[MP0_HWIP][0] = IP_VERSION(13, 0, 2);
2735 		adev->ip_versions[MP1_HWIP][0] = IP_VERSION(13, 0, 2);
2736 		adev->ip_versions[THM_HWIP][0] = IP_VERSION(13, 0, 2);
2737 		adev->ip_versions[SMUIO_HWIP][0] = IP_VERSION(13, 0, 2);
2738 		adev->ip_versions[GC_HWIP][0] = IP_VERSION(9, 4, 2);
2739 		adev->ip_versions[UVD_HWIP][0] = IP_VERSION(2, 6, 0);
2740 		adev->ip_versions[UVD_HWIP][1] = IP_VERSION(2, 6, 0);
2741 		adev->ip_versions[XGMI_HWIP][0] = IP_VERSION(6, 1, 0);
2742 		break;
2743 	default:
2744 		break;
2745 	}
2746 
2747 	amdgpu_discovery_init_soc_config(adev);
2748 	amdgpu_discovery_sysfs_init(adev);
2749 
2750 	switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
2751 	case IP_VERSION(9, 0, 1):
2752 	case IP_VERSION(9, 2, 1):
2753 	case IP_VERSION(9, 4, 0):
2754 	case IP_VERSION(9, 4, 1):
2755 	case IP_VERSION(9, 4, 2):
2756 	case IP_VERSION(9, 4, 3):
2757 	case IP_VERSION(9, 4, 4):
2758 	case IP_VERSION(9, 5, 0):
2759 		adev->family = AMDGPU_FAMILY_AI;
2760 		break;
2761 	case IP_VERSION(9, 1, 0):
2762 	case IP_VERSION(9, 2, 2):
2763 	case IP_VERSION(9, 3, 0):
2764 		adev->family = AMDGPU_FAMILY_RV;
2765 		break;
2766 	case IP_VERSION(10, 1, 10):
2767 	case IP_VERSION(10, 1, 1):
2768 	case IP_VERSION(10, 1, 2):
2769 	case IP_VERSION(10, 1, 3):
2770 	case IP_VERSION(10, 1, 4):
2771 	case IP_VERSION(10, 3, 0):
2772 	case IP_VERSION(10, 3, 2):
2773 	case IP_VERSION(10, 3, 4):
2774 	case IP_VERSION(10, 3, 5):
2775 		adev->family = AMDGPU_FAMILY_NV;
2776 		break;
2777 	case IP_VERSION(10, 3, 1):
2778 		adev->family = AMDGPU_FAMILY_VGH;
2779 		adev->apu_flags |= AMD_APU_IS_VANGOGH;
2780 		break;
2781 	case IP_VERSION(10, 3, 3):
2782 		adev->family = AMDGPU_FAMILY_YC;
2783 		break;
2784 	case IP_VERSION(10, 3, 6):
2785 		adev->family = AMDGPU_FAMILY_GC_10_3_6;
2786 		break;
2787 	case IP_VERSION(10, 3, 7):
2788 		adev->family = AMDGPU_FAMILY_GC_10_3_7;
2789 		break;
2790 	case IP_VERSION(11, 0, 0):
2791 	case IP_VERSION(11, 0, 2):
2792 	case IP_VERSION(11, 0, 3):
2793 		adev->family = AMDGPU_FAMILY_GC_11_0_0;
2794 		break;
2795 	case IP_VERSION(11, 0, 1):
2796 	case IP_VERSION(11, 0, 4):
2797 		adev->family = AMDGPU_FAMILY_GC_11_0_1;
2798 		break;
2799 	case IP_VERSION(11, 5, 0):
2800 	case IP_VERSION(11, 5, 1):
2801 	case IP_VERSION(11, 5, 2):
2802 	case IP_VERSION(11, 5, 3):
2803 		adev->family = AMDGPU_FAMILY_GC_11_5_0;
2804 		break;
2805 	case IP_VERSION(12, 0, 0):
2806 	case IP_VERSION(12, 0, 1):
2807 		adev->family = AMDGPU_FAMILY_GC_12_0_0;
2808 		break;
2809 	default:
2810 		return -EINVAL;
2811 	}
2812 
2813 	switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
2814 	case IP_VERSION(9, 1, 0):
2815 	case IP_VERSION(9, 2, 2):
2816 	case IP_VERSION(9, 3, 0):
2817 	case IP_VERSION(10, 1, 3):
2818 	case IP_VERSION(10, 1, 4):
2819 	case IP_VERSION(10, 3, 1):
2820 	case IP_VERSION(10, 3, 3):
2821 	case IP_VERSION(10, 3, 6):
2822 	case IP_VERSION(10, 3, 7):
2823 	case IP_VERSION(11, 0, 1):
2824 	case IP_VERSION(11, 0, 4):
2825 	case IP_VERSION(11, 5, 0):
2826 	case IP_VERSION(11, 5, 1):
2827 	case IP_VERSION(11, 5, 2):
2828 	case IP_VERSION(11, 5, 3):
2829 		adev->flags |= AMD_IS_APU;
2830 		break;
2831 	default:
2832 		break;
2833 	}
2834 
2835 	/* set NBIO version */
2836 	switch (amdgpu_ip_version(adev, NBIO_HWIP, 0)) {
2837 	case IP_VERSION(6, 1, 0):
2838 	case IP_VERSION(6, 2, 0):
2839 		adev->nbio.funcs = &nbio_v6_1_funcs;
2840 		adev->nbio.hdp_flush_reg = &nbio_v6_1_hdp_flush_reg;
2841 		break;
2842 	case IP_VERSION(7, 0, 0):
2843 	case IP_VERSION(7, 0, 1):
2844 	case IP_VERSION(2, 5, 0):
2845 		adev->nbio.funcs = &nbio_v7_0_funcs;
2846 		adev->nbio.hdp_flush_reg = &nbio_v7_0_hdp_flush_reg;
2847 		break;
2848 	case IP_VERSION(7, 4, 0):
2849 	case IP_VERSION(7, 4, 1):
2850 	case IP_VERSION(7, 4, 4):
2851 		adev->nbio.funcs = &nbio_v7_4_funcs;
2852 		adev->nbio.hdp_flush_reg = &nbio_v7_4_hdp_flush_reg;
2853 		break;
2854 	case IP_VERSION(7, 9, 0):
2855 	case IP_VERSION(7, 9, 1):
2856 		adev->nbio.funcs = &nbio_v7_9_funcs;
2857 		adev->nbio.hdp_flush_reg = &nbio_v7_9_hdp_flush_reg;
2858 		break;
2859 	case IP_VERSION(7, 11, 0):
2860 	case IP_VERSION(7, 11, 1):
2861 	case IP_VERSION(7, 11, 2):
2862 	case IP_VERSION(7, 11, 3):
2863 		adev->nbio.funcs = &nbio_v7_11_funcs;
2864 		adev->nbio.hdp_flush_reg = &nbio_v7_11_hdp_flush_reg;
2865 		break;
2866 	case IP_VERSION(7, 2, 0):
2867 	case IP_VERSION(7, 2, 1):
2868 	case IP_VERSION(7, 3, 0):
2869 	case IP_VERSION(7, 5, 0):
2870 	case IP_VERSION(7, 5, 1):
2871 		adev->nbio.funcs = &nbio_v7_2_funcs;
2872 		adev->nbio.hdp_flush_reg = &nbio_v7_2_hdp_flush_reg;
2873 		break;
2874 	case IP_VERSION(2, 1, 1):
2875 	case IP_VERSION(2, 3, 0):
2876 	case IP_VERSION(2, 3, 1):
2877 	case IP_VERSION(2, 3, 2):
2878 	case IP_VERSION(3, 3, 0):
2879 	case IP_VERSION(3, 3, 1):
2880 	case IP_VERSION(3, 3, 2):
2881 	case IP_VERSION(3, 3, 3):
2882 		adev->nbio.funcs = &nbio_v2_3_funcs;
2883 		adev->nbio.hdp_flush_reg = &nbio_v2_3_hdp_flush_reg;
2884 		break;
2885 	case IP_VERSION(4, 3, 0):
2886 	case IP_VERSION(4, 3, 1):
2887 		if (amdgpu_sriov_vf(adev))
2888 			adev->nbio.funcs = &nbio_v4_3_sriov_funcs;
2889 		else
2890 			adev->nbio.funcs = &nbio_v4_3_funcs;
2891 		adev->nbio.hdp_flush_reg = &nbio_v4_3_hdp_flush_reg;
2892 		break;
2893 	case IP_VERSION(7, 7, 0):
2894 	case IP_VERSION(7, 7, 1):
2895 		adev->nbio.funcs = &nbio_v7_7_funcs;
2896 		adev->nbio.hdp_flush_reg = &nbio_v7_7_hdp_flush_reg;
2897 		break;
2898 	case IP_VERSION(6, 3, 1):
2899 		adev->nbio.funcs = &nbif_v6_3_1_funcs;
2900 		adev->nbio.hdp_flush_reg = &nbif_v6_3_1_hdp_flush_reg;
2901 		break;
2902 	default:
2903 		break;
2904 	}
2905 
2906 	switch (amdgpu_ip_version(adev, HDP_HWIP, 0)) {
2907 	case IP_VERSION(4, 0, 0):
2908 	case IP_VERSION(4, 0, 1):
2909 	case IP_VERSION(4, 1, 0):
2910 	case IP_VERSION(4, 1, 1):
2911 	case IP_VERSION(4, 1, 2):
2912 	case IP_VERSION(4, 2, 0):
2913 	case IP_VERSION(4, 2, 1):
2914 	case IP_VERSION(4, 4, 0):
2915 	case IP_VERSION(4, 4, 2):
2916 	case IP_VERSION(4, 4, 5):
2917 		adev->hdp.funcs = &hdp_v4_0_funcs;
2918 		break;
2919 	case IP_VERSION(5, 0, 0):
2920 	case IP_VERSION(5, 0, 1):
2921 	case IP_VERSION(5, 0, 2):
2922 	case IP_VERSION(5, 0, 3):
2923 	case IP_VERSION(5, 0, 4):
2924 	case IP_VERSION(5, 2, 0):
2925 		adev->hdp.funcs = &hdp_v5_0_funcs;
2926 		break;
2927 	case IP_VERSION(5, 2, 1):
2928 		adev->hdp.funcs = &hdp_v5_2_funcs;
2929 		break;
2930 	case IP_VERSION(6, 0, 0):
2931 	case IP_VERSION(6, 0, 1):
2932 	case IP_VERSION(6, 1, 0):
2933 		adev->hdp.funcs = &hdp_v6_0_funcs;
2934 		break;
2935 	case IP_VERSION(7, 0, 0):
2936 		adev->hdp.funcs = &hdp_v7_0_funcs;
2937 		break;
2938 	default:
2939 		break;
2940 	}
2941 
2942 	switch (amdgpu_ip_version(adev, DF_HWIP, 0)) {
2943 	case IP_VERSION(3, 6, 0):
2944 	case IP_VERSION(3, 6, 1):
2945 	case IP_VERSION(3, 6, 2):
2946 		adev->df.funcs = &df_v3_6_funcs;
2947 		break;
2948 	case IP_VERSION(2, 1, 0):
2949 	case IP_VERSION(2, 1, 1):
2950 	case IP_VERSION(2, 5, 0):
2951 	case IP_VERSION(3, 5, 1):
2952 	case IP_VERSION(3, 5, 2):
2953 		adev->df.funcs = &df_v1_7_funcs;
2954 		break;
2955 	case IP_VERSION(4, 3, 0):
2956 		adev->df.funcs = &df_v4_3_funcs;
2957 		break;
2958 	case IP_VERSION(4, 6, 2):
2959 		adev->df.funcs = &df_v4_6_2_funcs;
2960 		break;
2961 	case IP_VERSION(4, 15, 0):
2962 	case IP_VERSION(4, 15, 1):
2963 		adev->df.funcs = &df_v4_15_funcs;
2964 		break;
2965 	default:
2966 		break;
2967 	}
2968 
2969 	switch (amdgpu_ip_version(adev, SMUIO_HWIP, 0)) {
2970 	case IP_VERSION(9, 0, 0):
2971 	case IP_VERSION(9, 0, 1):
2972 	case IP_VERSION(10, 0, 0):
2973 	case IP_VERSION(10, 0, 1):
2974 	case IP_VERSION(10, 0, 2):
2975 		adev->smuio.funcs = &smuio_v9_0_funcs;
2976 		break;
2977 	case IP_VERSION(11, 0, 0):
2978 	case IP_VERSION(11, 0, 2):
2979 	case IP_VERSION(11, 0, 3):
2980 	case IP_VERSION(11, 0, 4):
2981 	case IP_VERSION(11, 0, 7):
2982 	case IP_VERSION(11, 0, 8):
2983 		adev->smuio.funcs = &smuio_v11_0_funcs;
2984 		break;
2985 	case IP_VERSION(11, 0, 6):
2986 	case IP_VERSION(11, 0, 10):
2987 	case IP_VERSION(11, 0, 11):
2988 	case IP_VERSION(11, 5, 0):
2989 	case IP_VERSION(11, 5, 2):
2990 	case IP_VERSION(13, 0, 1):
2991 	case IP_VERSION(13, 0, 9):
2992 	case IP_VERSION(13, 0, 10):
2993 		adev->smuio.funcs = &smuio_v11_0_6_funcs;
2994 		break;
2995 	case IP_VERSION(13, 0, 2):
2996 		adev->smuio.funcs = &smuio_v13_0_funcs;
2997 		break;
2998 	case IP_VERSION(13, 0, 3):
2999 	case IP_VERSION(13, 0, 11):
3000 		adev->smuio.funcs = &smuio_v13_0_3_funcs;
3001 		if (adev->smuio.funcs->get_pkg_type(adev) == AMDGPU_PKG_TYPE_APU) {
3002 			adev->flags |= AMD_IS_APU;
3003 		}
3004 		break;
3005 	case IP_VERSION(13, 0, 6):
3006 	case IP_VERSION(13, 0, 8):
3007 	case IP_VERSION(14, 0, 0):
3008 	case IP_VERSION(14, 0, 1):
3009 		adev->smuio.funcs = &smuio_v13_0_6_funcs;
3010 		break;
3011 	case IP_VERSION(14, 0, 2):
3012 		adev->smuio.funcs = &smuio_v14_0_2_funcs;
3013 		break;
3014 	default:
3015 		break;
3016 	}
3017 
3018 	switch (amdgpu_ip_version(adev, LSDMA_HWIP, 0)) {
3019 	case IP_VERSION(6, 0, 0):
3020 	case IP_VERSION(6, 0, 1):
3021 	case IP_VERSION(6, 0, 2):
3022 	case IP_VERSION(6, 0, 3):
3023 		adev->lsdma.funcs = &lsdma_v6_0_funcs;
3024 		break;
3025 	case IP_VERSION(7, 0, 0):
3026 	case IP_VERSION(7, 0, 1):
3027 		adev->lsdma.funcs = &lsdma_v7_0_funcs;
3028 		break;
3029 	default:
3030 		break;
3031 	}
3032 
3033 	r = amdgpu_discovery_set_common_ip_blocks(adev);
3034 	if (r)
3035 		return r;
3036 
3037 	r = amdgpu_discovery_set_gmc_ip_blocks(adev);
3038 	if (r)
3039 		return r;
3040 
3041 	/* For SR-IOV, PSP needs to be initialized before IH */
3042 	if (amdgpu_sriov_vf(adev)) {
3043 		r = amdgpu_discovery_set_psp_ip_blocks(adev);
3044 		if (r)
3045 			return r;
3046 		r = amdgpu_discovery_set_ih_ip_blocks(adev);
3047 		if (r)
3048 			return r;
3049 	} else {
3050 		r = amdgpu_discovery_set_ih_ip_blocks(adev);
3051 		if (r)
3052 			return r;
3053 
3054 		if (likely(adev->firmware.load_type == AMDGPU_FW_LOAD_PSP)) {
3055 			r = amdgpu_discovery_set_psp_ip_blocks(adev);
3056 			if (r)
3057 				return r;
3058 		}
3059 	}
3060 
3061 	if (likely(adev->firmware.load_type == AMDGPU_FW_LOAD_PSP)) {
3062 		r = amdgpu_discovery_set_smu_ip_blocks(adev);
3063 		if (r)
3064 			return r;
3065 	}
3066 
3067 	r = amdgpu_discovery_set_display_ip_blocks(adev);
3068 	if (r)
3069 		return r;
3070 
3071 	r = amdgpu_discovery_set_gc_ip_blocks(adev);
3072 	if (r)
3073 		return r;
3074 
3075 	r = amdgpu_discovery_set_sdma_ip_blocks(adev);
3076 	if (r)
3077 		return r;
3078 
3079 	if ((adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT &&
3080 	     !amdgpu_sriov_vf(adev)) ||
3081 	    (adev->firmware.load_type == AMDGPU_FW_LOAD_RLC_BACKDOOR_AUTO && amdgpu_dpm == 1)) {
3082 		r = amdgpu_discovery_set_smu_ip_blocks(adev);
3083 		if (r)
3084 			return r;
3085 	}
3086 
3087 	r = amdgpu_discovery_set_mm_ip_blocks(adev);
3088 	if (r)
3089 		return r;
3090 
3091 	r = amdgpu_discovery_set_mes_ip_blocks(adev);
3092 	if (r)
3093 		return r;
3094 
3095 	r = amdgpu_discovery_set_vpe_ip_blocks(adev);
3096 	if (r)
3097 		return r;
3098 
3099 	r = amdgpu_discovery_set_umsch_mm_ip_blocks(adev);
3100 	if (r)
3101 		return r;
3102 
3103 	r = amdgpu_discovery_set_isp_ip_blocks(adev);
3104 	if (r)
3105 		return r;
3106 	return 0;
3107 }
3108 
3109