xref: /linux/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c (revision 22c3b09ef2ff66e062407250246e32389227a8de)
1 /*
2  * Copyright 2018 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  */
23 
24 #include <linux/firmware.h>
25 
26 #include "amdgpu.h"
27 #include "amdgpu_discovery.h"
28 #include "soc15_hw_ip.h"
29 #include "discovery.h"
30 #include "amdgpu_ras.h"
31 
32 #include "soc15.h"
33 #include "gfx_v9_0.h"
34 #include "gfx_v9_4_3.h"
35 #include "gmc_v9_0.h"
36 #include "df_v1_7.h"
37 #include "df_v3_6.h"
38 #include "df_v4_3.h"
39 #include "df_v4_6_2.h"
40 #include "nbio_v6_1.h"
41 #include "nbio_v7_0.h"
42 #include "nbio_v7_4.h"
43 #include "nbio_v7_9.h"
44 #include "nbio_v7_11.h"
45 #include "hdp_v4_0.h"
46 #include "vega10_ih.h"
47 #include "vega20_ih.h"
48 #include "sdma_v4_0.h"
49 #include "sdma_v4_4_2.h"
50 #include "uvd_v7_0.h"
51 #include "vce_v4_0.h"
52 #include "vcn_v1_0.h"
53 #include "vcn_v2_5.h"
54 #include "jpeg_v2_5.h"
55 #include "smuio_v9_0.h"
56 #include "gmc_v10_0.h"
57 #include "gmc_v11_0.h"
58 #include "gfxhub_v2_0.h"
59 #include "mmhub_v2_0.h"
60 #include "nbio_v2_3.h"
61 #include "nbio_v4_3.h"
62 #include "nbio_v7_2.h"
63 #include "nbio_v7_7.h"
64 #include "hdp_v5_0.h"
65 #include "hdp_v5_2.h"
66 #include "hdp_v6_0.h"
67 #include "nv.h"
68 #include "soc21.h"
69 #include "navi10_ih.h"
70 #include "ih_v6_0.h"
71 #include "ih_v6_1.h"
72 #include "gfx_v10_0.h"
73 #include "gfx_v11_0.h"
74 #include "sdma_v5_0.h"
75 #include "sdma_v5_2.h"
76 #include "sdma_v6_0.h"
77 #include "lsdma_v6_0.h"
78 #include "vcn_v2_0.h"
79 #include "jpeg_v2_0.h"
80 #include "vcn_v3_0.h"
81 #include "jpeg_v3_0.h"
82 #include "vcn_v4_0.h"
83 #include "jpeg_v4_0.h"
84 #include "vcn_v4_0_3.h"
85 #include "jpeg_v4_0_3.h"
86 #include "vcn_v4_0_5.h"
87 #include "jpeg_v4_0_5.h"
88 #include "amdgpu_vkms.h"
89 #include "mes_v10_1.h"
90 #include "mes_v11_0.h"
91 #include "smuio_v11_0.h"
92 #include "smuio_v11_0_6.h"
93 #include "smuio_v13_0.h"
94 #include "smuio_v13_0_3.h"
95 #include "smuio_v13_0_6.h"
96 
97 #include "amdgpu_vpe.h"
98 
99 #define FIRMWARE_IP_DISCOVERY "amdgpu/ip_discovery.bin"
100 MODULE_FIRMWARE(FIRMWARE_IP_DISCOVERY);
101 
102 #define mmIP_DISCOVERY_VERSION  0x16A00
103 #define mmRCC_CONFIG_MEMSIZE	0xde3
104 #define mmMP0_SMN_C2PMSG_33	0x16061
105 #define mmMM_INDEX		0x0
106 #define mmMM_INDEX_HI		0x6
107 #define mmMM_DATA		0x1
108 
109 static const char *hw_id_names[HW_ID_MAX] = {
110 	[MP1_HWID]		= "MP1",
111 	[MP2_HWID]		= "MP2",
112 	[THM_HWID]		= "THM",
113 	[SMUIO_HWID]		= "SMUIO",
114 	[FUSE_HWID]		= "FUSE",
115 	[CLKA_HWID]		= "CLKA",
116 	[PWR_HWID]		= "PWR",
117 	[GC_HWID]		= "GC",
118 	[UVD_HWID]		= "UVD",
119 	[AUDIO_AZ_HWID]		= "AUDIO_AZ",
120 	[ACP_HWID]		= "ACP",
121 	[DCI_HWID]		= "DCI",
122 	[DMU_HWID]		= "DMU",
123 	[DCO_HWID]		= "DCO",
124 	[DIO_HWID]		= "DIO",
125 	[XDMA_HWID]		= "XDMA",
126 	[DCEAZ_HWID]		= "DCEAZ",
127 	[DAZ_HWID]		= "DAZ",
128 	[SDPMUX_HWID]		= "SDPMUX",
129 	[NTB_HWID]		= "NTB",
130 	[IOHC_HWID]		= "IOHC",
131 	[L2IMU_HWID]		= "L2IMU",
132 	[VCE_HWID]		= "VCE",
133 	[MMHUB_HWID]		= "MMHUB",
134 	[ATHUB_HWID]		= "ATHUB",
135 	[DBGU_NBIO_HWID]	= "DBGU_NBIO",
136 	[DFX_HWID]		= "DFX",
137 	[DBGU0_HWID]		= "DBGU0",
138 	[DBGU1_HWID]		= "DBGU1",
139 	[OSSSYS_HWID]		= "OSSSYS",
140 	[HDP_HWID]		= "HDP",
141 	[SDMA0_HWID]		= "SDMA0",
142 	[SDMA1_HWID]		= "SDMA1",
143 	[SDMA2_HWID]		= "SDMA2",
144 	[SDMA3_HWID]		= "SDMA3",
145 	[LSDMA_HWID]		= "LSDMA",
146 	[ISP_HWID]		= "ISP",
147 	[DBGU_IO_HWID]		= "DBGU_IO",
148 	[DF_HWID]		= "DF",
149 	[CLKB_HWID]		= "CLKB",
150 	[FCH_HWID]		= "FCH",
151 	[DFX_DAP_HWID]		= "DFX_DAP",
152 	[L1IMU_PCIE_HWID]	= "L1IMU_PCIE",
153 	[L1IMU_NBIF_HWID]	= "L1IMU_NBIF",
154 	[L1IMU_IOAGR_HWID]	= "L1IMU_IOAGR",
155 	[L1IMU3_HWID]		= "L1IMU3",
156 	[L1IMU4_HWID]		= "L1IMU4",
157 	[L1IMU5_HWID]		= "L1IMU5",
158 	[L1IMU6_HWID]		= "L1IMU6",
159 	[L1IMU7_HWID]		= "L1IMU7",
160 	[L1IMU8_HWID]		= "L1IMU8",
161 	[L1IMU9_HWID]		= "L1IMU9",
162 	[L1IMU10_HWID]		= "L1IMU10",
163 	[L1IMU11_HWID]		= "L1IMU11",
164 	[L1IMU12_HWID]		= "L1IMU12",
165 	[L1IMU13_HWID]		= "L1IMU13",
166 	[L1IMU14_HWID]		= "L1IMU14",
167 	[L1IMU15_HWID]		= "L1IMU15",
168 	[WAFLC_HWID]		= "WAFLC",
169 	[FCH_USB_PD_HWID]	= "FCH_USB_PD",
170 	[PCIE_HWID]		= "PCIE",
171 	[PCS_HWID]		= "PCS",
172 	[DDCL_HWID]		= "DDCL",
173 	[SST_HWID]		= "SST",
174 	[IOAGR_HWID]		= "IOAGR",
175 	[NBIF_HWID]		= "NBIF",
176 	[IOAPIC_HWID]		= "IOAPIC",
177 	[SYSTEMHUB_HWID]	= "SYSTEMHUB",
178 	[NTBCCP_HWID]		= "NTBCCP",
179 	[UMC_HWID]		= "UMC",
180 	[SATA_HWID]		= "SATA",
181 	[USB_HWID]		= "USB",
182 	[CCXSEC_HWID]		= "CCXSEC",
183 	[XGMI_HWID]		= "XGMI",
184 	[XGBE_HWID]		= "XGBE",
185 	[MP0_HWID]		= "MP0",
186 	[VPE_HWID]		= "VPE",
187 };
188 
189 static int hw_id_map[MAX_HWIP] = {
190 	[GC_HWIP]	= GC_HWID,
191 	[HDP_HWIP]	= HDP_HWID,
192 	[SDMA0_HWIP]	= SDMA0_HWID,
193 	[SDMA1_HWIP]	= SDMA1_HWID,
194 	[SDMA2_HWIP]    = SDMA2_HWID,
195 	[SDMA3_HWIP]    = SDMA3_HWID,
196 	[LSDMA_HWIP]    = LSDMA_HWID,
197 	[MMHUB_HWIP]	= MMHUB_HWID,
198 	[ATHUB_HWIP]	= ATHUB_HWID,
199 	[NBIO_HWIP]	= NBIF_HWID,
200 	[MP0_HWIP]	= MP0_HWID,
201 	[MP1_HWIP]	= MP1_HWID,
202 	[UVD_HWIP]	= UVD_HWID,
203 	[VCE_HWIP]	= VCE_HWID,
204 	[DF_HWIP]	= DF_HWID,
205 	[DCE_HWIP]	= DMU_HWID,
206 	[OSSSYS_HWIP]	= OSSSYS_HWID,
207 	[SMUIO_HWIP]	= SMUIO_HWID,
208 	[PWR_HWIP]	= PWR_HWID,
209 	[NBIF_HWIP]	= NBIF_HWID,
210 	[THM_HWIP]	= THM_HWID,
211 	[CLK_HWIP]	= CLKA_HWID,
212 	[UMC_HWIP]	= UMC_HWID,
213 	[XGMI_HWIP]	= XGMI_HWID,
214 	[DCI_HWIP]	= DCI_HWID,
215 	[PCIE_HWIP]	= PCIE_HWID,
216 	[VPE_HWIP]	= VPE_HWID,
217 };
218 
219 static int amdgpu_discovery_read_binary_from_sysmem(struct amdgpu_device *adev, uint8_t *binary)
220 {
221 	u64 tmr_offset, tmr_size, pos;
222 	void *discv_regn;
223 	int ret;
224 
225 	ret = amdgpu_acpi_get_tmr_info(adev, &tmr_offset, &tmr_size);
226 	if (ret)
227 		return ret;
228 
229 	pos = tmr_offset + tmr_size - DISCOVERY_TMR_OFFSET;
230 
231 	/* This region is read-only and reserved from system use */
232 	discv_regn = memremap(pos, adev->mman.discovery_tmr_size, MEMREMAP_WC);
233 	if (discv_regn) {
234 		memcpy(binary, discv_regn, adev->mman.discovery_tmr_size);
235 		memunmap(discv_regn);
236 		return 0;
237 	}
238 
239 	return -ENOENT;
240 }
241 
242 static int amdgpu_discovery_read_binary_from_mem(struct amdgpu_device *adev,
243 						 uint8_t *binary)
244 {
245 	uint64_t vram_size;
246 	u32 msg;
247 	int i, ret = 0;
248 
249 	/* It can take up to a second for IFWI init to complete on some dGPUs,
250 	 * but generally it should be in the 60-100ms range.  Normally this starts
251 	 * as soon as the device gets power so by the time the OS loads this has long
252 	 * completed.  However, when a card is hotplugged via e.g., USB4, we need to
253 	 * wait for this to complete.  Once the C2PMSG is updated, we can
254 	 * continue.
255 	 */
256 	if (dev_is_removable(&adev->pdev->dev)) {
257 		for (i = 0; i < 1000; i++) {
258 			msg = RREG32(mmMP0_SMN_C2PMSG_33);
259 			if (msg & 0x80000000)
260 				break;
261 			msleep(1);
262 		}
263 	}
264 	vram_size = (uint64_t)RREG32(mmRCC_CONFIG_MEMSIZE) << 20;
265 
266 	if (vram_size) {
267 		uint64_t pos = vram_size - DISCOVERY_TMR_OFFSET;
268 		amdgpu_device_vram_access(adev, pos, (uint32_t *)binary,
269 					  adev->mman.discovery_tmr_size, false);
270 	} else {
271 		ret = amdgpu_discovery_read_binary_from_sysmem(adev, binary);
272 	}
273 
274 	return ret;
275 }
276 
277 static int amdgpu_discovery_read_binary_from_file(struct amdgpu_device *adev, uint8_t *binary)
278 {
279 	const struct firmware *fw;
280 	const char *fw_name;
281 	int r;
282 
283 	switch (amdgpu_discovery) {
284 	case 2:
285 		fw_name = FIRMWARE_IP_DISCOVERY;
286 		break;
287 	default:
288 		dev_warn(adev->dev, "amdgpu_discovery is not set properly\n");
289 		return -EINVAL;
290 	}
291 
292 	r = request_firmware(&fw, fw_name, adev->dev);
293 	if (r) {
294 		dev_err(adev->dev, "can't load firmware \"%s\"\n",
295 			fw_name);
296 		return r;
297 	}
298 
299 	memcpy((u8 *)binary, (u8 *)fw->data, fw->size);
300 	release_firmware(fw);
301 
302 	return 0;
303 }
304 
305 static uint16_t amdgpu_discovery_calculate_checksum(uint8_t *data, uint32_t size)
306 {
307 	uint16_t checksum = 0;
308 	int i;
309 
310 	for (i = 0; i < size; i++)
311 		checksum += data[i];
312 
313 	return checksum;
314 }
315 
316 static inline bool amdgpu_discovery_verify_checksum(uint8_t *data, uint32_t size,
317 						    uint16_t expected)
318 {
319 	return !!(amdgpu_discovery_calculate_checksum(data, size) == expected);
320 }
321 
322 static inline bool amdgpu_discovery_verify_binary_signature(uint8_t *binary)
323 {
324 	struct binary_header *bhdr;
325 	bhdr = (struct binary_header *)binary;
326 
327 	return (le32_to_cpu(bhdr->binary_signature) == BINARY_SIGNATURE);
328 }
329 
330 static void amdgpu_discovery_harvest_config_quirk(struct amdgpu_device *adev)
331 {
332 	/*
333 	 * So far, apply this quirk only on those Navy Flounder boards which
334 	 * have a bad harvest table of VCN config.
335 	 */
336 	if ((amdgpu_ip_version(adev, UVD_HWIP, 1) == IP_VERSION(3, 0, 1)) &&
337 	    (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(10, 3, 2))) {
338 		switch (adev->pdev->revision) {
339 		case 0xC1:
340 		case 0xC2:
341 		case 0xC3:
342 		case 0xC5:
343 		case 0xC7:
344 		case 0xCF:
345 		case 0xDF:
346 			adev->vcn.harvest_config |= AMDGPU_VCN_HARVEST_VCN1;
347 			adev->vcn.inst_mask &= ~AMDGPU_VCN_HARVEST_VCN1;
348 			break;
349 		default:
350 			break;
351 		}
352 	}
353 }
354 
355 static int amdgpu_discovery_init(struct amdgpu_device *adev)
356 {
357 	struct table_info *info;
358 	struct binary_header *bhdr;
359 	uint16_t offset;
360 	uint16_t size;
361 	uint16_t checksum;
362 	int r;
363 
364 	adev->mman.discovery_tmr_size = DISCOVERY_TMR_SIZE;
365 	adev->mman.discovery_bin = kzalloc(adev->mman.discovery_tmr_size, GFP_KERNEL);
366 	if (!adev->mman.discovery_bin)
367 		return -ENOMEM;
368 
369 	/* Read from file if it is the preferred option */
370 	if (amdgpu_discovery == 2) {
371 		dev_info(adev->dev, "use ip discovery information from file");
372 		r = amdgpu_discovery_read_binary_from_file(adev, adev->mman.discovery_bin);
373 
374 		if (r) {
375 			dev_err(adev->dev, "failed to read ip discovery binary from file\n");
376 			r = -EINVAL;
377 			goto out;
378 		}
379 
380 	} else {
381 		r = amdgpu_discovery_read_binary_from_mem(
382 			adev, adev->mman.discovery_bin);
383 		if (r)
384 			goto out;
385 	}
386 
387 	/* check the ip discovery binary signature */
388 	if (!amdgpu_discovery_verify_binary_signature(adev->mman.discovery_bin)) {
389 		dev_err(adev->dev,
390 			"get invalid ip discovery binary signature\n");
391 		r = -EINVAL;
392 		goto out;
393 	}
394 
395 	bhdr = (struct binary_header *)adev->mman.discovery_bin;
396 
397 	offset = offsetof(struct binary_header, binary_checksum) +
398 		sizeof(bhdr->binary_checksum);
399 	size = le16_to_cpu(bhdr->binary_size) - offset;
400 	checksum = le16_to_cpu(bhdr->binary_checksum);
401 
402 	if (!amdgpu_discovery_verify_checksum(adev->mman.discovery_bin + offset,
403 					      size, checksum)) {
404 		dev_err(adev->dev, "invalid ip discovery binary checksum\n");
405 		r = -EINVAL;
406 		goto out;
407 	}
408 
409 	info = &bhdr->table_list[IP_DISCOVERY];
410 	offset = le16_to_cpu(info->offset);
411 	checksum = le16_to_cpu(info->checksum);
412 
413 	if (offset) {
414 		struct ip_discovery_header *ihdr =
415 			(struct ip_discovery_header *)(adev->mman.discovery_bin + offset);
416 		if (le32_to_cpu(ihdr->signature) != DISCOVERY_TABLE_SIGNATURE) {
417 			dev_err(adev->dev, "invalid ip discovery data table signature\n");
418 			r = -EINVAL;
419 			goto out;
420 		}
421 
422 		if (!amdgpu_discovery_verify_checksum(adev->mman.discovery_bin + offset,
423 						      le16_to_cpu(ihdr->size), checksum)) {
424 			dev_err(adev->dev, "invalid ip discovery data table checksum\n");
425 			r = -EINVAL;
426 			goto out;
427 		}
428 	}
429 
430 	info = &bhdr->table_list[GC];
431 	offset = le16_to_cpu(info->offset);
432 	checksum = le16_to_cpu(info->checksum);
433 
434 	if (offset) {
435 		struct gpu_info_header *ghdr =
436 			(struct gpu_info_header *)(adev->mman.discovery_bin + offset);
437 
438 		if (le32_to_cpu(ghdr->table_id) != GC_TABLE_ID) {
439 			dev_err(adev->dev, "invalid ip discovery gc table id\n");
440 			r = -EINVAL;
441 			goto out;
442 		}
443 
444 		if (!amdgpu_discovery_verify_checksum(adev->mman.discovery_bin + offset,
445 						      le32_to_cpu(ghdr->size), checksum)) {
446 			dev_err(adev->dev, "invalid gc data table checksum\n");
447 			r = -EINVAL;
448 			goto out;
449 		}
450 	}
451 
452 	info = &bhdr->table_list[HARVEST_INFO];
453 	offset = le16_to_cpu(info->offset);
454 	checksum = le16_to_cpu(info->checksum);
455 
456 	if (offset) {
457 		struct harvest_info_header *hhdr =
458 			(struct harvest_info_header *)(adev->mman.discovery_bin + offset);
459 
460 		if (le32_to_cpu(hhdr->signature) != HARVEST_TABLE_SIGNATURE) {
461 			dev_err(adev->dev, "invalid ip discovery harvest table signature\n");
462 			r = -EINVAL;
463 			goto out;
464 		}
465 
466 		if (!amdgpu_discovery_verify_checksum(adev->mman.discovery_bin + offset,
467 						      sizeof(struct harvest_table), checksum)) {
468 			dev_err(adev->dev, "invalid harvest data table checksum\n");
469 			r = -EINVAL;
470 			goto out;
471 		}
472 	}
473 
474 	info = &bhdr->table_list[VCN_INFO];
475 	offset = le16_to_cpu(info->offset);
476 	checksum = le16_to_cpu(info->checksum);
477 
478 	if (offset) {
479 		struct vcn_info_header *vhdr =
480 			(struct vcn_info_header *)(adev->mman.discovery_bin + offset);
481 
482 		if (le32_to_cpu(vhdr->table_id) != VCN_INFO_TABLE_ID) {
483 			dev_err(adev->dev, "invalid ip discovery vcn table id\n");
484 			r = -EINVAL;
485 			goto out;
486 		}
487 
488 		if (!amdgpu_discovery_verify_checksum(adev->mman.discovery_bin + offset,
489 						      le32_to_cpu(vhdr->size_bytes), checksum)) {
490 			dev_err(adev->dev, "invalid vcn data table checksum\n");
491 			r = -EINVAL;
492 			goto out;
493 		}
494 	}
495 
496 	info = &bhdr->table_list[MALL_INFO];
497 	offset = le16_to_cpu(info->offset);
498 	checksum = le16_to_cpu(info->checksum);
499 
500 	if (0 && offset) {
501 		struct mall_info_header *mhdr =
502 			(struct mall_info_header *)(adev->mman.discovery_bin + offset);
503 
504 		if (le32_to_cpu(mhdr->table_id) != MALL_INFO_TABLE_ID) {
505 			dev_err(adev->dev, "invalid ip discovery mall table id\n");
506 			r = -EINVAL;
507 			goto out;
508 		}
509 
510 		if (!amdgpu_discovery_verify_checksum(adev->mman.discovery_bin + offset,
511 						      le32_to_cpu(mhdr->size_bytes), checksum)) {
512 			dev_err(adev->dev, "invalid mall data table checksum\n");
513 			r = -EINVAL;
514 			goto out;
515 		}
516 	}
517 
518 	return 0;
519 
520 out:
521 	kfree(adev->mman.discovery_bin);
522 	adev->mman.discovery_bin = NULL;
523 	if ((amdgpu_discovery != 2) &&
524 	    (RREG32(mmIP_DISCOVERY_VERSION) == 4))
525 		amdgpu_ras_query_boot_status(adev, 4);
526 	return r;
527 }
528 
529 static void amdgpu_discovery_sysfs_fini(struct amdgpu_device *adev);
530 
531 void amdgpu_discovery_fini(struct amdgpu_device *adev)
532 {
533 	amdgpu_discovery_sysfs_fini(adev);
534 	kfree(adev->mman.discovery_bin);
535 	adev->mman.discovery_bin = NULL;
536 }
537 
538 static int amdgpu_discovery_validate_ip(const struct ip_v4 *ip)
539 {
540 	if (ip->instance_number >= HWIP_MAX_INSTANCE) {
541 		DRM_ERROR("Unexpected instance_number (%d) from ip discovery blob\n",
542 			  ip->instance_number);
543 		return -EINVAL;
544 	}
545 	if (le16_to_cpu(ip->hw_id) >= HW_ID_MAX) {
546 		DRM_ERROR("Unexpected hw_id (%d) from ip discovery blob\n",
547 			  le16_to_cpu(ip->hw_id));
548 		return -EINVAL;
549 	}
550 
551 	return 0;
552 }
553 
554 static void amdgpu_discovery_read_harvest_bit_per_ip(struct amdgpu_device *adev,
555 						uint32_t *vcn_harvest_count)
556 {
557 	struct binary_header *bhdr;
558 	struct ip_discovery_header *ihdr;
559 	struct die_header *dhdr;
560 	struct ip_v4 *ip;
561 	uint16_t die_offset, ip_offset, num_dies, num_ips;
562 	int i, j;
563 
564 	bhdr = (struct binary_header *)adev->mman.discovery_bin;
565 	ihdr = (struct ip_discovery_header *)(adev->mman.discovery_bin +
566 			le16_to_cpu(bhdr->table_list[IP_DISCOVERY].offset));
567 	num_dies = le16_to_cpu(ihdr->num_dies);
568 
569 	/* scan harvest bit of all IP data structures */
570 	for (i = 0; i < num_dies; i++) {
571 		die_offset = le16_to_cpu(ihdr->die_info[i].die_offset);
572 		dhdr = (struct die_header *)(adev->mman.discovery_bin + die_offset);
573 		num_ips = le16_to_cpu(dhdr->num_ips);
574 		ip_offset = die_offset + sizeof(*dhdr);
575 
576 		for (j = 0; j < num_ips; j++) {
577 			ip = (struct ip_v4 *)(adev->mman.discovery_bin + ip_offset);
578 
579 			if (amdgpu_discovery_validate_ip(ip))
580 				goto next_ip;
581 
582 			if (le16_to_cpu(ip->variant) == 1) {
583 				switch (le16_to_cpu(ip->hw_id)) {
584 				case VCN_HWID:
585 					(*vcn_harvest_count)++;
586 					if (ip->instance_number == 0) {
587 						adev->vcn.harvest_config |= AMDGPU_VCN_HARVEST_VCN0;
588 						adev->vcn.inst_mask &=
589 							~AMDGPU_VCN_HARVEST_VCN0;
590 						adev->jpeg.inst_mask &=
591 							~AMDGPU_VCN_HARVEST_VCN0;
592 					} else {
593 						adev->vcn.harvest_config |= AMDGPU_VCN_HARVEST_VCN1;
594 						adev->vcn.inst_mask &=
595 							~AMDGPU_VCN_HARVEST_VCN1;
596 						adev->jpeg.inst_mask &=
597 							~AMDGPU_VCN_HARVEST_VCN1;
598 					}
599 					break;
600 				case DMU_HWID:
601 					adev->harvest_ip_mask |= AMD_HARVEST_IP_DMU_MASK;
602 					break;
603 				default:
604 					break;
605 				}
606 			}
607 next_ip:
608 			if (ihdr->base_addr_64_bit)
609 				ip_offset += struct_size(ip, base_address_64, ip->num_base_address);
610 			else
611 				ip_offset += struct_size(ip, base_address, ip->num_base_address);
612 		}
613 	}
614 }
615 
616 static void amdgpu_discovery_read_from_harvest_table(struct amdgpu_device *adev,
617 						     uint32_t *vcn_harvest_count,
618 						     uint32_t *umc_harvest_count)
619 {
620 	struct binary_header *bhdr;
621 	struct harvest_table *harvest_info;
622 	u16 offset;
623 	int i;
624 	uint32_t umc_harvest_config = 0;
625 
626 	bhdr = (struct binary_header *)adev->mman.discovery_bin;
627 	offset = le16_to_cpu(bhdr->table_list[HARVEST_INFO].offset);
628 
629 	if (!offset) {
630 		dev_err(adev->dev, "invalid harvest table offset\n");
631 		return;
632 	}
633 
634 	harvest_info = (struct harvest_table *)(adev->mman.discovery_bin + offset);
635 
636 	for (i = 0; i < 32; i++) {
637 		if (le16_to_cpu(harvest_info->list[i].hw_id) == 0)
638 			break;
639 
640 		switch (le16_to_cpu(harvest_info->list[i].hw_id)) {
641 		case VCN_HWID:
642 			(*vcn_harvest_count)++;
643 			adev->vcn.harvest_config |=
644 				(1 << harvest_info->list[i].number_instance);
645 			adev->jpeg.harvest_config |=
646 				(1 << harvest_info->list[i].number_instance);
647 
648 			adev->vcn.inst_mask &=
649 				~(1U << harvest_info->list[i].number_instance);
650 			adev->jpeg.inst_mask &=
651 				~(1U << harvest_info->list[i].number_instance);
652 			break;
653 		case DMU_HWID:
654 			adev->harvest_ip_mask |= AMD_HARVEST_IP_DMU_MASK;
655 			break;
656 		case UMC_HWID:
657 			umc_harvest_config |=
658 				1 << (le16_to_cpu(harvest_info->list[i].number_instance));
659 			(*umc_harvest_count)++;
660 			break;
661 		case GC_HWID:
662 			adev->gfx.xcc_mask &=
663 				~(1U << harvest_info->list[i].number_instance);
664 			break;
665 		case SDMA0_HWID:
666 			adev->sdma.sdma_mask &=
667 				~(1U << harvest_info->list[i].number_instance);
668 			break;
669 		default:
670 			break;
671 		}
672 	}
673 
674 	adev->umc.active_mask = ((1 << adev->umc.node_inst_num) - 1) &
675 				~umc_harvest_config;
676 }
677 
678 /* ================================================== */
679 
680 struct ip_hw_instance {
681 	struct kobject kobj; /* ip_discovery/die/#die/#hw_id/#instance/<attrs...> */
682 
683 	int hw_id;
684 	u8  num_instance;
685 	u8  major, minor, revision;
686 	u8  harvest;
687 
688 	int num_base_addresses;
689 	u32 base_addr[] __counted_by(num_base_addresses);
690 };
691 
692 struct ip_hw_id {
693 	struct kset hw_id_kset;  /* ip_discovery/die/#die/#hw_id/, contains ip_hw_instance */
694 	int hw_id;
695 };
696 
697 struct ip_die_entry {
698 	struct kset ip_kset;     /* ip_discovery/die/#die/, contains ip_hw_id  */
699 	u16 num_ips;
700 };
701 
702 /* -------------------------------------------------- */
703 
704 struct ip_hw_instance_attr {
705 	struct attribute attr;
706 	ssize_t (*show)(struct ip_hw_instance *ip_hw_instance, char *buf);
707 };
708 
709 static ssize_t hw_id_show(struct ip_hw_instance *ip_hw_instance, char *buf)
710 {
711 	return sysfs_emit(buf, "%d\n", ip_hw_instance->hw_id);
712 }
713 
714 static ssize_t num_instance_show(struct ip_hw_instance *ip_hw_instance, char *buf)
715 {
716 	return sysfs_emit(buf, "%d\n", ip_hw_instance->num_instance);
717 }
718 
719 static ssize_t major_show(struct ip_hw_instance *ip_hw_instance, char *buf)
720 {
721 	return sysfs_emit(buf, "%d\n", ip_hw_instance->major);
722 }
723 
724 static ssize_t minor_show(struct ip_hw_instance *ip_hw_instance, char *buf)
725 {
726 	return sysfs_emit(buf, "%d\n", ip_hw_instance->minor);
727 }
728 
729 static ssize_t revision_show(struct ip_hw_instance *ip_hw_instance, char *buf)
730 {
731 	return sysfs_emit(buf, "%d\n", ip_hw_instance->revision);
732 }
733 
734 static ssize_t harvest_show(struct ip_hw_instance *ip_hw_instance, char *buf)
735 {
736 	return sysfs_emit(buf, "0x%01X\n", ip_hw_instance->harvest);
737 }
738 
739 static ssize_t num_base_addresses_show(struct ip_hw_instance *ip_hw_instance, char *buf)
740 {
741 	return sysfs_emit(buf, "%d\n", ip_hw_instance->num_base_addresses);
742 }
743 
744 static ssize_t base_addr_show(struct ip_hw_instance *ip_hw_instance, char *buf)
745 {
746 	ssize_t res, at;
747 	int ii;
748 
749 	for (res = at = ii = 0; ii < ip_hw_instance->num_base_addresses; ii++) {
750 		/* Here we satisfy the condition that, at + size <= PAGE_SIZE.
751 		 */
752 		if (at + 12 > PAGE_SIZE)
753 			break;
754 		res = sysfs_emit_at(buf, at, "0x%08X\n",
755 				    ip_hw_instance->base_addr[ii]);
756 		if (res <= 0)
757 			break;
758 		at += res;
759 	}
760 
761 	return res < 0 ? res : at;
762 }
763 
764 static struct ip_hw_instance_attr ip_hw_attr[] = {
765 	__ATTR_RO(hw_id),
766 	__ATTR_RO(num_instance),
767 	__ATTR_RO(major),
768 	__ATTR_RO(minor),
769 	__ATTR_RO(revision),
770 	__ATTR_RO(harvest),
771 	__ATTR_RO(num_base_addresses),
772 	__ATTR_RO(base_addr),
773 };
774 
775 static struct attribute *ip_hw_instance_attrs[ARRAY_SIZE(ip_hw_attr) + 1];
776 ATTRIBUTE_GROUPS(ip_hw_instance);
777 
778 #define to_ip_hw_instance(x) container_of(x, struct ip_hw_instance, kobj)
779 #define to_ip_hw_instance_attr(x) container_of(x, struct ip_hw_instance_attr, attr)
780 
781 static ssize_t ip_hw_instance_attr_show(struct kobject *kobj,
782 					struct attribute *attr,
783 					char *buf)
784 {
785 	struct ip_hw_instance *ip_hw_instance = to_ip_hw_instance(kobj);
786 	struct ip_hw_instance_attr *ip_hw_attr = to_ip_hw_instance_attr(attr);
787 
788 	if (!ip_hw_attr->show)
789 		return -EIO;
790 
791 	return ip_hw_attr->show(ip_hw_instance, buf);
792 }
793 
794 static const struct sysfs_ops ip_hw_instance_sysfs_ops = {
795 	.show = ip_hw_instance_attr_show,
796 };
797 
798 static void ip_hw_instance_release(struct kobject *kobj)
799 {
800 	struct ip_hw_instance *ip_hw_instance = to_ip_hw_instance(kobj);
801 
802 	kfree(ip_hw_instance);
803 }
804 
805 static const struct kobj_type ip_hw_instance_ktype = {
806 	.release = ip_hw_instance_release,
807 	.sysfs_ops = &ip_hw_instance_sysfs_ops,
808 	.default_groups = ip_hw_instance_groups,
809 };
810 
811 /* -------------------------------------------------- */
812 
813 #define to_ip_hw_id(x)  container_of(to_kset(x), struct ip_hw_id, hw_id_kset)
814 
815 static void ip_hw_id_release(struct kobject *kobj)
816 {
817 	struct ip_hw_id *ip_hw_id = to_ip_hw_id(kobj);
818 
819 	if (!list_empty(&ip_hw_id->hw_id_kset.list))
820 		DRM_ERROR("ip_hw_id->hw_id_kset is not empty");
821 	kfree(ip_hw_id);
822 }
823 
824 static const struct kobj_type ip_hw_id_ktype = {
825 	.release = ip_hw_id_release,
826 	.sysfs_ops = &kobj_sysfs_ops,
827 };
828 
829 /* -------------------------------------------------- */
830 
831 static void die_kobj_release(struct kobject *kobj);
832 static void ip_disc_release(struct kobject *kobj);
833 
834 struct ip_die_entry_attribute {
835 	struct attribute attr;
836 	ssize_t (*show)(struct ip_die_entry *ip_die_entry, char *buf);
837 };
838 
839 #define to_ip_die_entry_attr(x)  container_of(x, struct ip_die_entry_attribute, attr)
840 
841 static ssize_t num_ips_show(struct ip_die_entry *ip_die_entry, char *buf)
842 {
843 	return sysfs_emit(buf, "%d\n", ip_die_entry->num_ips);
844 }
845 
846 /* If there are more ip_die_entry attrs, other than the number of IPs,
847  * we can make this intro an array of attrs, and then initialize
848  * ip_die_entry_attrs in a loop.
849  */
850 static struct ip_die_entry_attribute num_ips_attr =
851 	__ATTR_RO(num_ips);
852 
853 static struct attribute *ip_die_entry_attrs[] = {
854 	&num_ips_attr.attr,
855 	NULL,
856 };
857 ATTRIBUTE_GROUPS(ip_die_entry); /* ip_die_entry_groups */
858 
859 #define to_ip_die_entry(x) container_of(to_kset(x), struct ip_die_entry, ip_kset)
860 
861 static ssize_t ip_die_entry_attr_show(struct kobject *kobj,
862 				      struct attribute *attr,
863 				      char *buf)
864 {
865 	struct ip_die_entry_attribute *ip_die_entry_attr = to_ip_die_entry_attr(attr);
866 	struct ip_die_entry *ip_die_entry = to_ip_die_entry(kobj);
867 
868 	if (!ip_die_entry_attr->show)
869 		return -EIO;
870 
871 	return ip_die_entry_attr->show(ip_die_entry, buf);
872 }
873 
874 static void ip_die_entry_release(struct kobject *kobj)
875 {
876 	struct ip_die_entry *ip_die_entry = to_ip_die_entry(kobj);
877 
878 	if (!list_empty(&ip_die_entry->ip_kset.list))
879 		DRM_ERROR("ip_die_entry->ip_kset is not empty");
880 	kfree(ip_die_entry);
881 }
882 
883 static const struct sysfs_ops ip_die_entry_sysfs_ops = {
884 	.show = ip_die_entry_attr_show,
885 };
886 
887 static const struct kobj_type ip_die_entry_ktype = {
888 	.release = ip_die_entry_release,
889 	.sysfs_ops = &ip_die_entry_sysfs_ops,
890 	.default_groups = ip_die_entry_groups,
891 };
892 
893 static const struct kobj_type die_kobj_ktype = {
894 	.release = die_kobj_release,
895 	.sysfs_ops = &kobj_sysfs_ops,
896 };
897 
898 static const struct kobj_type ip_discovery_ktype = {
899 	.release = ip_disc_release,
900 	.sysfs_ops = &kobj_sysfs_ops,
901 };
902 
903 struct ip_discovery_top {
904 	struct kobject kobj;    /* ip_discovery/ */
905 	struct kset die_kset;   /* ip_discovery/die/, contains ip_die_entry */
906 	struct amdgpu_device *adev;
907 };
908 
909 static void die_kobj_release(struct kobject *kobj)
910 {
911 	struct ip_discovery_top *ip_top = container_of(to_kset(kobj),
912 						       struct ip_discovery_top,
913 						       die_kset);
914 	if (!list_empty(&ip_top->die_kset.list))
915 		DRM_ERROR("ip_top->die_kset is not empty");
916 }
917 
918 static void ip_disc_release(struct kobject *kobj)
919 {
920 	struct ip_discovery_top *ip_top = container_of(kobj, struct ip_discovery_top,
921 						       kobj);
922 	struct amdgpu_device *adev = ip_top->adev;
923 
924 	adev->ip_top = NULL;
925 	kfree(ip_top);
926 }
927 
928 static uint8_t amdgpu_discovery_get_harvest_info(struct amdgpu_device *adev,
929 						 uint16_t hw_id, uint8_t inst)
930 {
931 	uint8_t harvest = 0;
932 
933 	/* Until a uniform way is figured, get mask based on hwid */
934 	switch (hw_id) {
935 	case VCN_HWID:
936 		harvest = ((1 << inst) & adev->vcn.inst_mask) == 0;
937 		break;
938 	case DMU_HWID:
939 		if (adev->harvest_ip_mask & AMD_HARVEST_IP_DMU_MASK)
940 			harvest = 0x1;
941 		break;
942 	case UMC_HWID:
943 		/* TODO: It needs another parsing; for now, ignore.*/
944 		break;
945 	case GC_HWID:
946 		harvest = ((1 << inst) & adev->gfx.xcc_mask) == 0;
947 		break;
948 	case SDMA0_HWID:
949 		harvest = ((1 << inst) & adev->sdma.sdma_mask) == 0;
950 		break;
951 	default:
952 		break;
953 	}
954 
955 	return harvest;
956 }
957 
958 static int amdgpu_discovery_sysfs_ips(struct amdgpu_device *adev,
959 				      struct ip_die_entry *ip_die_entry,
960 				      const size_t _ip_offset, const int num_ips,
961 				      bool reg_base_64)
962 {
963 	int ii, jj, kk, res;
964 
965 	DRM_DEBUG("num_ips:%d", num_ips);
966 
967 	/* Find all IPs of a given HW ID, and add their instance to
968 	 * #die/#hw_id/#instance/<attributes>
969 	 */
970 	for (ii = 0; ii < HW_ID_MAX; ii++) {
971 		struct ip_hw_id *ip_hw_id = NULL;
972 		size_t ip_offset = _ip_offset;
973 
974 		for (jj = 0; jj < num_ips; jj++) {
975 			struct ip_v4 *ip;
976 			struct ip_hw_instance *ip_hw_instance;
977 
978 			ip = (struct ip_v4 *)(adev->mman.discovery_bin + ip_offset);
979 			if (amdgpu_discovery_validate_ip(ip) ||
980 			    le16_to_cpu(ip->hw_id) != ii)
981 				goto next_ip;
982 
983 			DRM_DEBUG("match:%d @ ip_offset:%zu", ii, ip_offset);
984 
985 			/* We have a hw_id match; register the hw
986 			 * block if not yet registered.
987 			 */
988 			if (!ip_hw_id) {
989 				ip_hw_id = kzalloc(sizeof(*ip_hw_id), GFP_KERNEL);
990 				if (!ip_hw_id)
991 					return -ENOMEM;
992 				ip_hw_id->hw_id = ii;
993 
994 				kobject_set_name(&ip_hw_id->hw_id_kset.kobj, "%d", ii);
995 				ip_hw_id->hw_id_kset.kobj.kset = &ip_die_entry->ip_kset;
996 				ip_hw_id->hw_id_kset.kobj.ktype = &ip_hw_id_ktype;
997 				res = kset_register(&ip_hw_id->hw_id_kset);
998 				if (res) {
999 					DRM_ERROR("Couldn't register ip_hw_id kset");
1000 					kfree(ip_hw_id);
1001 					return res;
1002 				}
1003 				if (hw_id_names[ii]) {
1004 					res = sysfs_create_link(&ip_die_entry->ip_kset.kobj,
1005 								&ip_hw_id->hw_id_kset.kobj,
1006 								hw_id_names[ii]);
1007 					if (res) {
1008 						DRM_ERROR("Couldn't create IP link %s in IP Die:%s\n",
1009 							  hw_id_names[ii],
1010 							  kobject_name(&ip_die_entry->ip_kset.kobj));
1011 					}
1012 				}
1013 			}
1014 
1015 			/* Now register its instance.
1016 			 */
1017 			ip_hw_instance = kzalloc(struct_size(ip_hw_instance,
1018 							     base_addr,
1019 							     ip->num_base_address),
1020 						 GFP_KERNEL);
1021 			if (!ip_hw_instance) {
1022 				DRM_ERROR("no memory for ip_hw_instance");
1023 				return -ENOMEM;
1024 			}
1025 			ip_hw_instance->hw_id = le16_to_cpu(ip->hw_id); /* == ii */
1026 			ip_hw_instance->num_instance = ip->instance_number;
1027 			ip_hw_instance->major = ip->major;
1028 			ip_hw_instance->minor = ip->minor;
1029 			ip_hw_instance->revision = ip->revision;
1030 			ip_hw_instance->harvest =
1031 				amdgpu_discovery_get_harvest_info(
1032 					adev, ip_hw_instance->hw_id,
1033 					ip_hw_instance->num_instance);
1034 			ip_hw_instance->num_base_addresses = ip->num_base_address;
1035 
1036 			for (kk = 0; kk < ip_hw_instance->num_base_addresses; kk++) {
1037 				if (reg_base_64)
1038 					ip_hw_instance->base_addr[kk] =
1039 						lower_32_bits(le64_to_cpu(ip->base_address_64[kk])) & 0x3FFFFFFF;
1040 				else
1041 					ip_hw_instance->base_addr[kk] = ip->base_address[kk];
1042 			}
1043 
1044 			kobject_init(&ip_hw_instance->kobj, &ip_hw_instance_ktype);
1045 			ip_hw_instance->kobj.kset = &ip_hw_id->hw_id_kset;
1046 			res = kobject_add(&ip_hw_instance->kobj, NULL,
1047 					  "%d", ip_hw_instance->num_instance);
1048 next_ip:
1049 			if (reg_base_64)
1050 				ip_offset += struct_size(ip, base_address_64,
1051 							 ip->num_base_address);
1052 			else
1053 				ip_offset += struct_size(ip, base_address,
1054 							 ip->num_base_address);
1055 		}
1056 	}
1057 
1058 	return 0;
1059 }
1060 
1061 static int amdgpu_discovery_sysfs_recurse(struct amdgpu_device *adev)
1062 {
1063 	struct binary_header *bhdr;
1064 	struct ip_discovery_header *ihdr;
1065 	struct die_header *dhdr;
1066 	struct kset *die_kset = &adev->ip_top->die_kset;
1067 	u16 num_dies, die_offset, num_ips;
1068 	size_t ip_offset;
1069 	int ii, res;
1070 
1071 	bhdr = (struct binary_header *)adev->mman.discovery_bin;
1072 	ihdr = (struct ip_discovery_header *)(adev->mman.discovery_bin +
1073 					      le16_to_cpu(bhdr->table_list[IP_DISCOVERY].offset));
1074 	num_dies = le16_to_cpu(ihdr->num_dies);
1075 
1076 	DRM_DEBUG("number of dies: %d\n", num_dies);
1077 
1078 	for (ii = 0; ii < num_dies; ii++) {
1079 		struct ip_die_entry *ip_die_entry;
1080 
1081 		die_offset = le16_to_cpu(ihdr->die_info[ii].die_offset);
1082 		dhdr = (struct die_header *)(adev->mman.discovery_bin + die_offset);
1083 		num_ips = le16_to_cpu(dhdr->num_ips);
1084 		ip_offset = die_offset + sizeof(*dhdr);
1085 
1086 		/* Add the die to the kset.
1087 		 *
1088 		 * dhdr->die_id == ii, which was checked in
1089 		 * amdgpu_discovery_reg_base_init().
1090 		 */
1091 
1092 		ip_die_entry = kzalloc(sizeof(*ip_die_entry), GFP_KERNEL);
1093 		if (!ip_die_entry)
1094 			return -ENOMEM;
1095 
1096 		ip_die_entry->num_ips = num_ips;
1097 
1098 		kobject_set_name(&ip_die_entry->ip_kset.kobj, "%d", le16_to_cpu(dhdr->die_id));
1099 		ip_die_entry->ip_kset.kobj.kset = die_kset;
1100 		ip_die_entry->ip_kset.kobj.ktype = &ip_die_entry_ktype;
1101 		res = kset_register(&ip_die_entry->ip_kset);
1102 		if (res) {
1103 			DRM_ERROR("Couldn't register ip_die_entry kset");
1104 			kfree(ip_die_entry);
1105 			return res;
1106 		}
1107 
1108 		amdgpu_discovery_sysfs_ips(adev, ip_die_entry, ip_offset, num_ips, !!ihdr->base_addr_64_bit);
1109 	}
1110 
1111 	return 0;
1112 }
1113 
1114 static int amdgpu_discovery_sysfs_init(struct amdgpu_device *adev)
1115 {
1116 	struct kset *die_kset;
1117 	int res, ii;
1118 
1119 	if (!adev->mman.discovery_bin)
1120 		return -EINVAL;
1121 
1122 	adev->ip_top = kzalloc(sizeof(*adev->ip_top), GFP_KERNEL);
1123 	if (!adev->ip_top)
1124 		return -ENOMEM;
1125 
1126 	adev->ip_top->adev = adev;
1127 
1128 	res = kobject_init_and_add(&adev->ip_top->kobj, &ip_discovery_ktype,
1129 				   &adev->dev->kobj, "ip_discovery");
1130 	if (res) {
1131 		DRM_ERROR("Couldn't init and add ip_discovery/");
1132 		goto Err;
1133 	}
1134 
1135 	die_kset = &adev->ip_top->die_kset;
1136 	kobject_set_name(&die_kset->kobj, "%s", "die");
1137 	die_kset->kobj.parent = &adev->ip_top->kobj;
1138 	die_kset->kobj.ktype = &die_kobj_ktype;
1139 	res = kset_register(&adev->ip_top->die_kset);
1140 	if (res) {
1141 		DRM_ERROR("Couldn't register die_kset");
1142 		goto Err;
1143 	}
1144 
1145 	for (ii = 0; ii < ARRAY_SIZE(ip_hw_attr); ii++)
1146 		ip_hw_instance_attrs[ii] = &ip_hw_attr[ii].attr;
1147 	ip_hw_instance_attrs[ii] = NULL;
1148 
1149 	res = amdgpu_discovery_sysfs_recurse(adev);
1150 
1151 	return res;
1152 Err:
1153 	kobject_put(&adev->ip_top->kobj);
1154 	return res;
1155 }
1156 
1157 /* -------------------------------------------------- */
1158 
1159 #define list_to_kobj(el) container_of(el, struct kobject, entry)
1160 
1161 static void amdgpu_discovery_sysfs_ip_hw_free(struct ip_hw_id *ip_hw_id)
1162 {
1163 	struct list_head *el, *tmp;
1164 	struct kset *hw_id_kset;
1165 
1166 	hw_id_kset = &ip_hw_id->hw_id_kset;
1167 	spin_lock(&hw_id_kset->list_lock);
1168 	list_for_each_prev_safe(el, tmp, &hw_id_kset->list) {
1169 		list_del_init(el);
1170 		spin_unlock(&hw_id_kset->list_lock);
1171 		/* kobject is embedded in ip_hw_instance */
1172 		kobject_put(list_to_kobj(el));
1173 		spin_lock(&hw_id_kset->list_lock);
1174 	}
1175 	spin_unlock(&hw_id_kset->list_lock);
1176 	kobject_put(&ip_hw_id->hw_id_kset.kobj);
1177 }
1178 
1179 static void amdgpu_discovery_sysfs_die_free(struct ip_die_entry *ip_die_entry)
1180 {
1181 	struct list_head *el, *tmp;
1182 	struct kset *ip_kset;
1183 
1184 	ip_kset = &ip_die_entry->ip_kset;
1185 	spin_lock(&ip_kset->list_lock);
1186 	list_for_each_prev_safe(el, tmp, &ip_kset->list) {
1187 		list_del_init(el);
1188 		spin_unlock(&ip_kset->list_lock);
1189 		amdgpu_discovery_sysfs_ip_hw_free(to_ip_hw_id(list_to_kobj(el)));
1190 		spin_lock(&ip_kset->list_lock);
1191 	}
1192 	spin_unlock(&ip_kset->list_lock);
1193 	kobject_put(&ip_die_entry->ip_kset.kobj);
1194 }
1195 
1196 static void amdgpu_discovery_sysfs_fini(struct amdgpu_device *adev)
1197 {
1198 	struct list_head *el, *tmp;
1199 	struct kset *die_kset;
1200 
1201 	die_kset = &adev->ip_top->die_kset;
1202 	spin_lock(&die_kset->list_lock);
1203 	list_for_each_prev_safe(el, tmp, &die_kset->list) {
1204 		list_del_init(el);
1205 		spin_unlock(&die_kset->list_lock);
1206 		amdgpu_discovery_sysfs_die_free(to_ip_die_entry(list_to_kobj(el)));
1207 		spin_lock(&die_kset->list_lock);
1208 	}
1209 	spin_unlock(&die_kset->list_lock);
1210 	kobject_put(&adev->ip_top->die_kset.kobj);
1211 	kobject_put(&adev->ip_top->kobj);
1212 }
1213 
1214 /* ================================================== */
1215 
1216 static int amdgpu_discovery_reg_base_init(struct amdgpu_device *adev)
1217 {
1218 	uint8_t num_base_address, subrev, variant;
1219 	struct binary_header *bhdr;
1220 	struct ip_discovery_header *ihdr;
1221 	struct die_header *dhdr;
1222 	struct ip_v4 *ip;
1223 	uint16_t die_offset;
1224 	uint16_t ip_offset;
1225 	uint16_t num_dies;
1226 	uint16_t num_ips;
1227 	int hw_ip;
1228 	int i, j, k;
1229 	int r;
1230 
1231 	r = amdgpu_discovery_init(adev);
1232 	if (r) {
1233 		DRM_ERROR("amdgpu_discovery_init failed\n");
1234 		return r;
1235 	}
1236 
1237 	adev->gfx.xcc_mask = 0;
1238 	adev->sdma.sdma_mask = 0;
1239 	adev->vcn.inst_mask = 0;
1240 	adev->jpeg.inst_mask = 0;
1241 	bhdr = (struct binary_header *)adev->mman.discovery_bin;
1242 	ihdr = (struct ip_discovery_header *)(adev->mman.discovery_bin +
1243 			le16_to_cpu(bhdr->table_list[IP_DISCOVERY].offset));
1244 	num_dies = le16_to_cpu(ihdr->num_dies);
1245 
1246 	DRM_DEBUG("number of dies: %d\n", num_dies);
1247 
1248 	for (i = 0; i < num_dies; i++) {
1249 		die_offset = le16_to_cpu(ihdr->die_info[i].die_offset);
1250 		dhdr = (struct die_header *)(adev->mman.discovery_bin + die_offset);
1251 		num_ips = le16_to_cpu(dhdr->num_ips);
1252 		ip_offset = die_offset + sizeof(*dhdr);
1253 
1254 		if (le16_to_cpu(dhdr->die_id) != i) {
1255 			DRM_ERROR("invalid die id %d, expected %d\n",
1256 					le16_to_cpu(dhdr->die_id), i);
1257 			return -EINVAL;
1258 		}
1259 
1260 		DRM_DEBUG("number of hardware IPs on die%d: %d\n",
1261 				le16_to_cpu(dhdr->die_id), num_ips);
1262 
1263 		for (j = 0; j < num_ips; j++) {
1264 			ip = (struct ip_v4 *)(adev->mman.discovery_bin + ip_offset);
1265 
1266 			if (amdgpu_discovery_validate_ip(ip))
1267 				goto next_ip;
1268 
1269 			num_base_address = ip->num_base_address;
1270 
1271 			DRM_DEBUG("%s(%d) #%d v%d.%d.%d:\n",
1272 				  hw_id_names[le16_to_cpu(ip->hw_id)],
1273 				  le16_to_cpu(ip->hw_id),
1274 				  ip->instance_number,
1275 				  ip->major, ip->minor,
1276 				  ip->revision);
1277 
1278 			if (le16_to_cpu(ip->hw_id) == VCN_HWID) {
1279 				/* Bit [5:0]: original revision value
1280 				 * Bit [7:6]: en/decode capability:
1281 				 *     0b00 : VCN function normally
1282 				 *     0b10 : encode is disabled
1283 				 *     0b01 : decode is disabled
1284 				 */
1285 				if (adev->vcn.num_vcn_inst <
1286 				    AMDGPU_MAX_VCN_INSTANCES) {
1287 					adev->vcn.vcn_config[adev->vcn.num_vcn_inst] =
1288 						ip->revision & 0xc0;
1289 					adev->vcn.num_vcn_inst++;
1290 					adev->vcn.inst_mask |=
1291 						(1U << ip->instance_number);
1292 					adev->jpeg.inst_mask |=
1293 						(1U << ip->instance_number);
1294 				} else {
1295 					dev_err(adev->dev, "Too many VCN instances: %d vs %d\n",
1296 						adev->vcn.num_vcn_inst + 1,
1297 						AMDGPU_MAX_VCN_INSTANCES);
1298 				}
1299 				ip->revision &= ~0xc0;
1300 			}
1301 			if (le16_to_cpu(ip->hw_id) == SDMA0_HWID ||
1302 			    le16_to_cpu(ip->hw_id) == SDMA1_HWID ||
1303 			    le16_to_cpu(ip->hw_id) == SDMA2_HWID ||
1304 			    le16_to_cpu(ip->hw_id) == SDMA3_HWID) {
1305 				if (adev->sdma.num_instances <
1306 				    AMDGPU_MAX_SDMA_INSTANCES) {
1307 					adev->sdma.num_instances++;
1308 					adev->sdma.sdma_mask |=
1309 						(1U << ip->instance_number);
1310 				} else {
1311 					dev_err(adev->dev, "Too many SDMA instances: %d vs %d\n",
1312 						adev->sdma.num_instances + 1,
1313 						AMDGPU_MAX_SDMA_INSTANCES);
1314 				}
1315 			}
1316 
1317 			if (le16_to_cpu(ip->hw_id) == UMC_HWID) {
1318 				adev->gmc.num_umc++;
1319 				adev->umc.node_inst_num++;
1320 			}
1321 
1322 			if (le16_to_cpu(ip->hw_id) == GC_HWID)
1323 				adev->gfx.xcc_mask |=
1324 					(1U << ip->instance_number);
1325 
1326 			for (k = 0; k < num_base_address; k++) {
1327 				/*
1328 				 * convert the endianness of base addresses in place,
1329 				 * so that we don't need to convert them when accessing adev->reg_offset.
1330 				 */
1331 				if (ihdr->base_addr_64_bit)
1332 					/* Truncate the 64bit base address from ip discovery
1333 					 * and only store lower 32bit ip base in reg_offset[].
1334 					 * Bits > 32 follows ASIC specific format, thus just
1335 					 * discard them and handle it within specific ASIC.
1336 					 * By this way reg_offset[] and related helpers can
1337 					 * stay unchanged.
1338 					 * The base address is in dwords, thus clear the
1339 					 * highest 2 bits to store.
1340 					 */
1341 					ip->base_address[k] =
1342 						lower_32_bits(le64_to_cpu(ip->base_address_64[k])) & 0x3FFFFFFF;
1343 				else
1344 					ip->base_address[k] = le32_to_cpu(ip->base_address[k]);
1345 				DRM_DEBUG("\t0x%08x\n", ip->base_address[k]);
1346 			}
1347 
1348 			for (hw_ip = 0; hw_ip < MAX_HWIP; hw_ip++) {
1349 				if (hw_id_map[hw_ip] == le16_to_cpu(ip->hw_id) &&
1350 				    hw_id_map[hw_ip] != 0) {
1351 					DRM_DEBUG("set register base offset for %s\n",
1352 							hw_id_names[le16_to_cpu(ip->hw_id)]);
1353 					adev->reg_offset[hw_ip][ip->instance_number] =
1354 						ip->base_address;
1355 					/* Instance support is somewhat inconsistent.
1356 					 * SDMA is a good example.  Sienna cichlid has 4 total
1357 					 * SDMA instances, each enumerated separately (HWIDs
1358 					 * 42, 43, 68, 69).  Arcturus has 8 total SDMA instances,
1359 					 * but they are enumerated as multiple instances of the
1360 					 * same HWIDs (4x HWID 42, 4x HWID 43).  UMC is another
1361 					 * example.  On most chips there are multiple instances
1362 					 * with the same HWID.
1363 					 */
1364 
1365 					if (ihdr->version < 3) {
1366 						subrev = 0;
1367 						variant = 0;
1368 					} else {
1369 						subrev = ip->sub_revision;
1370 						variant = ip->variant;
1371 					}
1372 
1373 					adev->ip_versions[hw_ip]
1374 							 [ip->instance_number] =
1375 						IP_VERSION_FULL(ip->major,
1376 								ip->minor,
1377 								ip->revision,
1378 								variant,
1379 								subrev);
1380 				}
1381 			}
1382 
1383 next_ip:
1384 			if (ihdr->base_addr_64_bit)
1385 				ip_offset += struct_size(ip, base_address_64, ip->num_base_address);
1386 			else
1387 				ip_offset += struct_size(ip, base_address, ip->num_base_address);
1388 		}
1389 	}
1390 
1391 	return 0;
1392 }
1393 
1394 static void amdgpu_discovery_harvest_ip(struct amdgpu_device *adev)
1395 {
1396 	int vcn_harvest_count = 0;
1397 	int umc_harvest_count = 0;
1398 
1399 	/*
1400 	 * Harvest table does not fit Navi1x and legacy GPUs,
1401 	 * so read harvest bit per IP data structure to set
1402 	 * harvest configuration.
1403 	 */
1404 	if (amdgpu_ip_version(adev, GC_HWIP, 0) < IP_VERSION(10, 2, 0) &&
1405 	    amdgpu_ip_version(adev, GC_HWIP, 0) != IP_VERSION(9, 4, 3)) {
1406 		if ((adev->pdev->device == 0x731E &&
1407 			(adev->pdev->revision == 0xC6 ||
1408 			 adev->pdev->revision == 0xC7)) ||
1409 			(adev->pdev->device == 0x7340 &&
1410 			 adev->pdev->revision == 0xC9) ||
1411 			(adev->pdev->device == 0x7360 &&
1412 			 adev->pdev->revision == 0xC7))
1413 			amdgpu_discovery_read_harvest_bit_per_ip(adev,
1414 				&vcn_harvest_count);
1415 	} else {
1416 		amdgpu_discovery_read_from_harvest_table(adev,
1417 							 &vcn_harvest_count,
1418 							 &umc_harvest_count);
1419 	}
1420 
1421 	amdgpu_discovery_harvest_config_quirk(adev);
1422 
1423 	if (vcn_harvest_count == adev->vcn.num_vcn_inst) {
1424 		adev->harvest_ip_mask |= AMD_HARVEST_IP_VCN_MASK;
1425 		adev->harvest_ip_mask |= AMD_HARVEST_IP_JPEG_MASK;
1426 	}
1427 
1428 	if (umc_harvest_count < adev->gmc.num_umc) {
1429 		adev->gmc.num_umc -= umc_harvest_count;
1430 	}
1431 }
1432 
1433 union gc_info {
1434 	struct gc_info_v1_0 v1;
1435 	struct gc_info_v1_1 v1_1;
1436 	struct gc_info_v1_2 v1_2;
1437 	struct gc_info_v2_0 v2;
1438 	struct gc_info_v2_1 v2_1;
1439 };
1440 
1441 static int amdgpu_discovery_get_gfx_info(struct amdgpu_device *adev)
1442 {
1443 	struct binary_header *bhdr;
1444 	union gc_info *gc_info;
1445 	u16 offset;
1446 
1447 	if (!adev->mman.discovery_bin) {
1448 		DRM_ERROR("ip discovery uninitialized\n");
1449 		return -EINVAL;
1450 	}
1451 
1452 	bhdr = (struct binary_header *)adev->mman.discovery_bin;
1453 	offset = le16_to_cpu(bhdr->table_list[GC].offset);
1454 
1455 	if (!offset)
1456 		return 0;
1457 
1458 	gc_info = (union gc_info *)(adev->mman.discovery_bin + offset);
1459 
1460 	switch (le16_to_cpu(gc_info->v1.header.version_major)) {
1461 	case 1:
1462 		adev->gfx.config.max_shader_engines = le32_to_cpu(gc_info->v1.gc_num_se);
1463 		adev->gfx.config.max_cu_per_sh = 2 * (le32_to_cpu(gc_info->v1.gc_num_wgp0_per_sa) +
1464 						      le32_to_cpu(gc_info->v1.gc_num_wgp1_per_sa));
1465 		adev->gfx.config.max_sh_per_se = le32_to_cpu(gc_info->v1.gc_num_sa_per_se);
1466 		adev->gfx.config.max_backends_per_se = le32_to_cpu(gc_info->v1.gc_num_rb_per_se);
1467 		adev->gfx.config.max_texture_channel_caches = le32_to_cpu(gc_info->v1.gc_num_gl2c);
1468 		adev->gfx.config.max_gprs = le32_to_cpu(gc_info->v1.gc_num_gprs);
1469 		adev->gfx.config.max_gs_threads = le32_to_cpu(gc_info->v1.gc_num_max_gs_thds);
1470 		adev->gfx.config.gs_vgt_table_depth = le32_to_cpu(gc_info->v1.gc_gs_table_depth);
1471 		adev->gfx.config.gs_prim_buffer_depth = le32_to_cpu(gc_info->v1.gc_gsprim_buff_depth);
1472 		adev->gfx.config.double_offchip_lds_buf = le32_to_cpu(gc_info->v1.gc_double_offchip_lds_buffer);
1473 		adev->gfx.cu_info.wave_front_size = le32_to_cpu(gc_info->v1.gc_wave_size);
1474 		adev->gfx.cu_info.max_waves_per_simd = le32_to_cpu(gc_info->v1.gc_max_waves_per_simd);
1475 		adev->gfx.cu_info.max_scratch_slots_per_cu = le32_to_cpu(gc_info->v1.gc_max_scratch_slots_per_cu);
1476 		adev->gfx.cu_info.lds_size = le32_to_cpu(gc_info->v1.gc_lds_size);
1477 		adev->gfx.config.num_sc_per_sh = le32_to_cpu(gc_info->v1.gc_num_sc_per_se) /
1478 			le32_to_cpu(gc_info->v1.gc_num_sa_per_se);
1479 		adev->gfx.config.num_packer_per_sc = le32_to_cpu(gc_info->v1.gc_num_packer_per_sc);
1480 		if (le16_to_cpu(gc_info->v1.header.version_minor) >= 1) {
1481 			adev->gfx.config.gc_num_tcp_per_sa = le32_to_cpu(gc_info->v1_1.gc_num_tcp_per_sa);
1482 			adev->gfx.config.gc_num_sdp_interface = le32_to_cpu(gc_info->v1_1.gc_num_sdp_interface);
1483 			adev->gfx.config.gc_num_tcps = le32_to_cpu(gc_info->v1_1.gc_num_tcps);
1484 		}
1485 		if (le16_to_cpu(gc_info->v1.header.version_minor) >= 2) {
1486 			adev->gfx.config.gc_num_tcp_per_wpg = le32_to_cpu(gc_info->v1_2.gc_num_tcp_per_wpg);
1487 			adev->gfx.config.gc_tcp_l1_size = le32_to_cpu(gc_info->v1_2.gc_tcp_l1_size);
1488 			adev->gfx.config.gc_num_sqc_per_wgp = le32_to_cpu(gc_info->v1_2.gc_num_sqc_per_wgp);
1489 			adev->gfx.config.gc_l1_instruction_cache_size_per_sqc = le32_to_cpu(gc_info->v1_2.gc_l1_instruction_cache_size_per_sqc);
1490 			adev->gfx.config.gc_l1_data_cache_size_per_sqc = le32_to_cpu(gc_info->v1_2.gc_l1_data_cache_size_per_sqc);
1491 			adev->gfx.config.gc_gl1c_per_sa = le32_to_cpu(gc_info->v1_2.gc_gl1c_per_sa);
1492 			adev->gfx.config.gc_gl1c_size_per_instance = le32_to_cpu(gc_info->v1_2.gc_gl1c_size_per_instance);
1493 			adev->gfx.config.gc_gl2c_per_gpu = le32_to_cpu(gc_info->v1_2.gc_gl2c_per_gpu);
1494 		}
1495 		break;
1496 	case 2:
1497 		adev->gfx.config.max_shader_engines = le32_to_cpu(gc_info->v2.gc_num_se);
1498 		adev->gfx.config.max_cu_per_sh = le32_to_cpu(gc_info->v2.gc_num_cu_per_sh);
1499 		adev->gfx.config.max_sh_per_se = le32_to_cpu(gc_info->v2.gc_num_sh_per_se);
1500 		adev->gfx.config.max_backends_per_se = le32_to_cpu(gc_info->v2.gc_num_rb_per_se);
1501 		adev->gfx.config.max_texture_channel_caches = le32_to_cpu(gc_info->v2.gc_num_tccs);
1502 		adev->gfx.config.max_gprs = le32_to_cpu(gc_info->v2.gc_num_gprs);
1503 		adev->gfx.config.max_gs_threads = le32_to_cpu(gc_info->v2.gc_num_max_gs_thds);
1504 		adev->gfx.config.gs_vgt_table_depth = le32_to_cpu(gc_info->v2.gc_gs_table_depth);
1505 		adev->gfx.config.gs_prim_buffer_depth = le32_to_cpu(gc_info->v2.gc_gsprim_buff_depth);
1506 		adev->gfx.config.double_offchip_lds_buf = le32_to_cpu(gc_info->v2.gc_double_offchip_lds_buffer);
1507 		adev->gfx.cu_info.wave_front_size = le32_to_cpu(gc_info->v2.gc_wave_size);
1508 		adev->gfx.cu_info.max_waves_per_simd = le32_to_cpu(gc_info->v2.gc_max_waves_per_simd);
1509 		adev->gfx.cu_info.max_scratch_slots_per_cu = le32_to_cpu(gc_info->v2.gc_max_scratch_slots_per_cu);
1510 		adev->gfx.cu_info.lds_size = le32_to_cpu(gc_info->v2.gc_lds_size);
1511 		adev->gfx.config.num_sc_per_sh = le32_to_cpu(gc_info->v2.gc_num_sc_per_se) /
1512 			le32_to_cpu(gc_info->v2.gc_num_sh_per_se);
1513 		adev->gfx.config.num_packer_per_sc = le32_to_cpu(gc_info->v2.gc_num_packer_per_sc);
1514 		if (le16_to_cpu(gc_info->v2.header.version_minor) == 1) {
1515 			adev->gfx.config.gc_num_tcp_per_sa = le32_to_cpu(gc_info->v2_1.gc_num_tcp_per_sh);
1516 			adev->gfx.config.gc_tcp_size_per_cu = le32_to_cpu(gc_info->v2_1.gc_tcp_size_per_cu);
1517 			adev->gfx.config.gc_num_sdp_interface = le32_to_cpu(gc_info->v2_1.gc_num_sdp_interface); /* per XCD */
1518 			adev->gfx.config.gc_num_cu_per_sqc = le32_to_cpu(gc_info->v2_1.gc_num_cu_per_sqc);
1519 			adev->gfx.config.gc_l1_instruction_cache_size_per_sqc = le32_to_cpu(gc_info->v2_1.gc_instruction_cache_size_per_sqc);
1520 			adev->gfx.config.gc_l1_data_cache_size_per_sqc = le32_to_cpu(gc_info->v2_1.gc_scalar_data_cache_size_per_sqc);
1521 			adev->gfx.config.gc_tcc_size = le32_to_cpu(gc_info->v2_1.gc_tcc_size); /* per XCD */
1522 		}
1523 		break;
1524 	default:
1525 		dev_err(adev->dev,
1526 			"Unhandled GC info table %d.%d\n",
1527 			le16_to_cpu(gc_info->v1.header.version_major),
1528 			le16_to_cpu(gc_info->v1.header.version_minor));
1529 		return -EINVAL;
1530 	}
1531 	return 0;
1532 }
1533 
1534 union mall_info {
1535 	struct mall_info_v1_0 v1;
1536 	struct mall_info_v2_0 v2;
1537 };
1538 
1539 static int amdgpu_discovery_get_mall_info(struct amdgpu_device *adev)
1540 {
1541 	struct binary_header *bhdr;
1542 	union mall_info *mall_info;
1543 	u32 u, mall_size_per_umc, m_s_present, half_use;
1544 	u64 mall_size;
1545 	u16 offset;
1546 
1547 	if (!adev->mman.discovery_bin) {
1548 		DRM_ERROR("ip discovery uninitialized\n");
1549 		return -EINVAL;
1550 	}
1551 
1552 	bhdr = (struct binary_header *)adev->mman.discovery_bin;
1553 	offset = le16_to_cpu(bhdr->table_list[MALL_INFO].offset);
1554 
1555 	if (!offset)
1556 		return 0;
1557 
1558 	mall_info = (union mall_info *)(adev->mman.discovery_bin + offset);
1559 
1560 	switch (le16_to_cpu(mall_info->v1.header.version_major)) {
1561 	case 1:
1562 		mall_size = 0;
1563 		mall_size_per_umc = le32_to_cpu(mall_info->v1.mall_size_per_m);
1564 		m_s_present = le32_to_cpu(mall_info->v1.m_s_present);
1565 		half_use = le32_to_cpu(mall_info->v1.m_half_use);
1566 		for (u = 0; u < adev->gmc.num_umc; u++) {
1567 			if (m_s_present & (1 << u))
1568 				mall_size += mall_size_per_umc * 2;
1569 			else if (half_use & (1 << u))
1570 				mall_size += mall_size_per_umc / 2;
1571 			else
1572 				mall_size += mall_size_per_umc;
1573 		}
1574 		adev->gmc.mall_size = mall_size;
1575 		adev->gmc.m_half_use = half_use;
1576 		break;
1577 	case 2:
1578 		mall_size_per_umc = le32_to_cpu(mall_info->v2.mall_size_per_umc);
1579 		adev->gmc.mall_size = mall_size_per_umc * adev->gmc.num_umc;
1580 		break;
1581 	default:
1582 		dev_err(adev->dev,
1583 			"Unhandled MALL info table %d.%d\n",
1584 			le16_to_cpu(mall_info->v1.header.version_major),
1585 			le16_to_cpu(mall_info->v1.header.version_minor));
1586 		return -EINVAL;
1587 	}
1588 	return 0;
1589 }
1590 
1591 union vcn_info {
1592 	struct vcn_info_v1_0 v1;
1593 };
1594 
1595 static int amdgpu_discovery_get_vcn_info(struct amdgpu_device *adev)
1596 {
1597 	struct binary_header *bhdr;
1598 	union vcn_info *vcn_info;
1599 	u16 offset;
1600 	int v;
1601 
1602 	if (!adev->mman.discovery_bin) {
1603 		DRM_ERROR("ip discovery uninitialized\n");
1604 		return -EINVAL;
1605 	}
1606 
1607 	/* num_vcn_inst is currently limited to AMDGPU_MAX_VCN_INSTANCES
1608 	 * which is smaller than VCN_INFO_TABLE_MAX_NUM_INSTANCES
1609 	 * but that may change in the future with new GPUs so keep this
1610 	 * check for defensive purposes.
1611 	 */
1612 	if (adev->vcn.num_vcn_inst > VCN_INFO_TABLE_MAX_NUM_INSTANCES) {
1613 		dev_err(adev->dev, "invalid vcn instances\n");
1614 		return -EINVAL;
1615 	}
1616 
1617 	bhdr = (struct binary_header *)adev->mman.discovery_bin;
1618 	offset = le16_to_cpu(bhdr->table_list[VCN_INFO].offset);
1619 
1620 	if (!offset)
1621 		return 0;
1622 
1623 	vcn_info = (union vcn_info *)(adev->mman.discovery_bin + offset);
1624 
1625 	switch (le16_to_cpu(vcn_info->v1.header.version_major)) {
1626 	case 1:
1627 		/* num_vcn_inst is currently limited to AMDGPU_MAX_VCN_INSTANCES
1628 		 * so this won't overflow.
1629 		 */
1630 		for (v = 0; v < adev->vcn.num_vcn_inst; v++) {
1631 			adev->vcn.vcn_codec_disable_mask[v] =
1632 				le32_to_cpu(vcn_info->v1.instance_info[v].fuse_data.all_bits);
1633 		}
1634 		break;
1635 	default:
1636 		dev_err(adev->dev,
1637 			"Unhandled VCN info table %d.%d\n",
1638 			le16_to_cpu(vcn_info->v1.header.version_major),
1639 			le16_to_cpu(vcn_info->v1.header.version_minor));
1640 		return -EINVAL;
1641 	}
1642 	return 0;
1643 }
1644 
1645 static int amdgpu_discovery_set_common_ip_blocks(struct amdgpu_device *adev)
1646 {
1647 	/* what IP to use for this? */
1648 	switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
1649 	case IP_VERSION(9, 0, 1):
1650 	case IP_VERSION(9, 1, 0):
1651 	case IP_VERSION(9, 2, 1):
1652 	case IP_VERSION(9, 2, 2):
1653 	case IP_VERSION(9, 3, 0):
1654 	case IP_VERSION(9, 4, 0):
1655 	case IP_VERSION(9, 4, 1):
1656 	case IP_VERSION(9, 4, 2):
1657 	case IP_VERSION(9, 4, 3):
1658 		amdgpu_device_ip_block_add(adev, &vega10_common_ip_block);
1659 		break;
1660 	case IP_VERSION(10, 1, 10):
1661 	case IP_VERSION(10, 1, 1):
1662 	case IP_VERSION(10, 1, 2):
1663 	case IP_VERSION(10, 1, 3):
1664 	case IP_VERSION(10, 1, 4):
1665 	case IP_VERSION(10, 3, 0):
1666 	case IP_VERSION(10, 3, 1):
1667 	case IP_VERSION(10, 3, 2):
1668 	case IP_VERSION(10, 3, 3):
1669 	case IP_VERSION(10, 3, 4):
1670 	case IP_VERSION(10, 3, 5):
1671 	case IP_VERSION(10, 3, 6):
1672 	case IP_VERSION(10, 3, 7):
1673 		amdgpu_device_ip_block_add(adev, &nv_common_ip_block);
1674 		break;
1675 	case IP_VERSION(11, 0, 0):
1676 	case IP_VERSION(11, 0, 1):
1677 	case IP_VERSION(11, 0, 2):
1678 	case IP_VERSION(11, 0, 3):
1679 	case IP_VERSION(11, 0, 4):
1680 	case IP_VERSION(11, 5, 0):
1681 		amdgpu_device_ip_block_add(adev, &soc21_common_ip_block);
1682 		break;
1683 	default:
1684 		dev_err(adev->dev,
1685 			"Failed to add common ip block(GC_HWIP:0x%x)\n",
1686 			amdgpu_ip_version(adev, GC_HWIP, 0));
1687 		return -EINVAL;
1688 	}
1689 	return 0;
1690 }
1691 
1692 static int amdgpu_discovery_set_gmc_ip_blocks(struct amdgpu_device *adev)
1693 {
1694 	/* use GC or MMHUB IP version */
1695 	switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
1696 	case IP_VERSION(9, 0, 1):
1697 	case IP_VERSION(9, 1, 0):
1698 	case IP_VERSION(9, 2, 1):
1699 	case IP_VERSION(9, 2, 2):
1700 	case IP_VERSION(9, 3, 0):
1701 	case IP_VERSION(9, 4, 0):
1702 	case IP_VERSION(9, 4, 1):
1703 	case IP_VERSION(9, 4, 2):
1704 	case IP_VERSION(9, 4, 3):
1705 		amdgpu_device_ip_block_add(adev, &gmc_v9_0_ip_block);
1706 		break;
1707 	case IP_VERSION(10, 1, 10):
1708 	case IP_VERSION(10, 1, 1):
1709 	case IP_VERSION(10, 1, 2):
1710 	case IP_VERSION(10, 1, 3):
1711 	case IP_VERSION(10, 1, 4):
1712 	case IP_VERSION(10, 3, 0):
1713 	case IP_VERSION(10, 3, 1):
1714 	case IP_VERSION(10, 3, 2):
1715 	case IP_VERSION(10, 3, 3):
1716 	case IP_VERSION(10, 3, 4):
1717 	case IP_VERSION(10, 3, 5):
1718 	case IP_VERSION(10, 3, 6):
1719 	case IP_VERSION(10, 3, 7):
1720 		amdgpu_device_ip_block_add(adev, &gmc_v10_0_ip_block);
1721 		break;
1722 	case IP_VERSION(11, 0, 0):
1723 	case IP_VERSION(11, 0, 1):
1724 	case IP_VERSION(11, 0, 2):
1725 	case IP_VERSION(11, 0, 3):
1726 	case IP_VERSION(11, 0, 4):
1727 	case IP_VERSION(11, 5, 0):
1728 		amdgpu_device_ip_block_add(adev, &gmc_v11_0_ip_block);
1729 		break;
1730 	default:
1731 		dev_err(adev->dev, "Failed to add gmc ip block(GC_HWIP:0x%x)\n",
1732 			amdgpu_ip_version(adev, GC_HWIP, 0));
1733 		return -EINVAL;
1734 	}
1735 	return 0;
1736 }
1737 
1738 static int amdgpu_discovery_set_ih_ip_blocks(struct amdgpu_device *adev)
1739 {
1740 	switch (amdgpu_ip_version(adev, OSSSYS_HWIP, 0)) {
1741 	case IP_VERSION(4, 0, 0):
1742 	case IP_VERSION(4, 0, 1):
1743 	case IP_VERSION(4, 1, 0):
1744 	case IP_VERSION(4, 1, 1):
1745 	case IP_VERSION(4, 3, 0):
1746 		amdgpu_device_ip_block_add(adev, &vega10_ih_ip_block);
1747 		break;
1748 	case IP_VERSION(4, 2, 0):
1749 	case IP_VERSION(4, 2, 1):
1750 	case IP_VERSION(4, 4, 0):
1751 	case IP_VERSION(4, 4, 2):
1752 		amdgpu_device_ip_block_add(adev, &vega20_ih_ip_block);
1753 		break;
1754 	case IP_VERSION(5, 0, 0):
1755 	case IP_VERSION(5, 0, 1):
1756 	case IP_VERSION(5, 0, 2):
1757 	case IP_VERSION(5, 0, 3):
1758 	case IP_VERSION(5, 2, 0):
1759 	case IP_VERSION(5, 2, 1):
1760 		amdgpu_device_ip_block_add(adev, &navi10_ih_ip_block);
1761 		break;
1762 	case IP_VERSION(6, 0, 0):
1763 	case IP_VERSION(6, 0, 1):
1764 	case IP_VERSION(6, 0, 2):
1765 		amdgpu_device_ip_block_add(adev, &ih_v6_0_ip_block);
1766 		break;
1767 	case IP_VERSION(6, 1, 0):
1768 		amdgpu_device_ip_block_add(adev, &ih_v6_1_ip_block);
1769 		break;
1770 	default:
1771 		dev_err(adev->dev,
1772 			"Failed to add ih ip block(OSSSYS_HWIP:0x%x)\n",
1773 			amdgpu_ip_version(adev, OSSSYS_HWIP, 0));
1774 		return -EINVAL;
1775 	}
1776 	return 0;
1777 }
1778 
1779 static int amdgpu_discovery_set_psp_ip_blocks(struct amdgpu_device *adev)
1780 {
1781 	switch (amdgpu_ip_version(adev, MP0_HWIP, 0)) {
1782 	case IP_VERSION(9, 0, 0):
1783 		amdgpu_device_ip_block_add(adev, &psp_v3_1_ip_block);
1784 		break;
1785 	case IP_VERSION(10, 0, 0):
1786 	case IP_VERSION(10, 0, 1):
1787 		amdgpu_device_ip_block_add(adev, &psp_v10_0_ip_block);
1788 		break;
1789 	case IP_VERSION(11, 0, 0):
1790 	case IP_VERSION(11, 0, 2):
1791 	case IP_VERSION(11, 0, 4):
1792 	case IP_VERSION(11, 0, 5):
1793 	case IP_VERSION(11, 0, 9):
1794 	case IP_VERSION(11, 0, 7):
1795 	case IP_VERSION(11, 0, 11):
1796 	case IP_VERSION(11, 0, 12):
1797 	case IP_VERSION(11, 0, 13):
1798 	case IP_VERSION(11, 5, 0):
1799 		amdgpu_device_ip_block_add(adev, &psp_v11_0_ip_block);
1800 		break;
1801 	case IP_VERSION(11, 0, 8):
1802 		amdgpu_device_ip_block_add(adev, &psp_v11_0_8_ip_block);
1803 		break;
1804 	case IP_VERSION(11, 0, 3):
1805 	case IP_VERSION(12, 0, 1):
1806 		amdgpu_device_ip_block_add(adev, &psp_v12_0_ip_block);
1807 		break;
1808 	case IP_VERSION(13, 0, 0):
1809 	case IP_VERSION(13, 0, 1):
1810 	case IP_VERSION(13, 0, 2):
1811 	case IP_VERSION(13, 0, 3):
1812 	case IP_VERSION(13, 0, 5):
1813 	case IP_VERSION(13, 0, 6):
1814 	case IP_VERSION(13, 0, 7):
1815 	case IP_VERSION(13, 0, 8):
1816 	case IP_VERSION(13, 0, 10):
1817 	case IP_VERSION(13, 0, 11):
1818 	case IP_VERSION(14, 0, 0):
1819 		amdgpu_device_ip_block_add(adev, &psp_v13_0_ip_block);
1820 		break;
1821 	case IP_VERSION(13, 0, 4):
1822 		amdgpu_device_ip_block_add(adev, &psp_v13_0_4_ip_block);
1823 		break;
1824 	default:
1825 		dev_err(adev->dev,
1826 			"Failed to add psp ip block(MP0_HWIP:0x%x)\n",
1827 			amdgpu_ip_version(adev, MP0_HWIP, 0));
1828 		return -EINVAL;
1829 	}
1830 	return 0;
1831 }
1832 
1833 static int amdgpu_discovery_set_smu_ip_blocks(struct amdgpu_device *adev)
1834 {
1835 	switch (amdgpu_ip_version(adev, MP1_HWIP, 0)) {
1836 	case IP_VERSION(9, 0, 0):
1837 	case IP_VERSION(10, 0, 0):
1838 	case IP_VERSION(10, 0, 1):
1839 	case IP_VERSION(11, 0, 2):
1840 		if (adev->asic_type == CHIP_ARCTURUS)
1841 			amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block);
1842 		else
1843 			amdgpu_device_ip_block_add(adev, &pp_smu_ip_block);
1844 		break;
1845 	case IP_VERSION(11, 0, 0):
1846 	case IP_VERSION(11, 0, 5):
1847 	case IP_VERSION(11, 0, 9):
1848 	case IP_VERSION(11, 0, 7):
1849 	case IP_VERSION(11, 0, 8):
1850 	case IP_VERSION(11, 0, 11):
1851 	case IP_VERSION(11, 0, 12):
1852 	case IP_VERSION(11, 0, 13):
1853 	case IP_VERSION(11, 5, 0):
1854 		amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block);
1855 		break;
1856 	case IP_VERSION(12, 0, 0):
1857 	case IP_VERSION(12, 0, 1):
1858 		amdgpu_device_ip_block_add(adev, &smu_v12_0_ip_block);
1859 		break;
1860 	case IP_VERSION(13, 0, 0):
1861 	case IP_VERSION(13, 0, 1):
1862 	case IP_VERSION(13, 0, 2):
1863 	case IP_VERSION(13, 0, 3):
1864 	case IP_VERSION(13, 0, 4):
1865 	case IP_VERSION(13, 0, 5):
1866 	case IP_VERSION(13, 0, 6):
1867 	case IP_VERSION(13, 0, 7):
1868 	case IP_VERSION(13, 0, 8):
1869 	case IP_VERSION(13, 0, 10):
1870 	case IP_VERSION(13, 0, 11):
1871 		amdgpu_device_ip_block_add(adev, &smu_v13_0_ip_block);
1872 		break;
1873 	case IP_VERSION(14, 0, 0):
1874 		amdgpu_device_ip_block_add(adev, &smu_v14_0_ip_block);
1875 		break;
1876 	default:
1877 		dev_err(adev->dev,
1878 			"Failed to add smu ip block(MP1_HWIP:0x%x)\n",
1879 			amdgpu_ip_version(adev, MP1_HWIP, 0));
1880 		return -EINVAL;
1881 	}
1882 	return 0;
1883 }
1884 
1885 #if defined(CONFIG_DRM_AMD_DC)
1886 static void amdgpu_discovery_set_sriov_display(struct amdgpu_device *adev)
1887 {
1888 	amdgpu_device_set_sriov_virtual_display(adev);
1889 	amdgpu_device_ip_block_add(adev, &amdgpu_vkms_ip_block);
1890 }
1891 #endif
1892 
1893 static int amdgpu_discovery_set_display_ip_blocks(struct amdgpu_device *adev)
1894 {
1895 	if (adev->enable_virtual_display) {
1896 		amdgpu_device_ip_block_add(adev, &amdgpu_vkms_ip_block);
1897 		return 0;
1898 	}
1899 
1900 	if (!amdgpu_device_has_dc_support(adev))
1901 		return 0;
1902 
1903 #if defined(CONFIG_DRM_AMD_DC)
1904 	if (amdgpu_ip_version(adev, DCE_HWIP, 0)) {
1905 		switch (amdgpu_ip_version(adev, DCE_HWIP, 0)) {
1906 		case IP_VERSION(1, 0, 0):
1907 		case IP_VERSION(1, 0, 1):
1908 		case IP_VERSION(2, 0, 2):
1909 		case IP_VERSION(2, 0, 0):
1910 		case IP_VERSION(2, 0, 3):
1911 		case IP_VERSION(2, 1, 0):
1912 		case IP_VERSION(3, 0, 0):
1913 		case IP_VERSION(3, 0, 2):
1914 		case IP_VERSION(3, 0, 3):
1915 		case IP_VERSION(3, 0, 1):
1916 		case IP_VERSION(3, 1, 2):
1917 		case IP_VERSION(3, 1, 3):
1918 		case IP_VERSION(3, 1, 4):
1919 		case IP_VERSION(3, 1, 5):
1920 		case IP_VERSION(3, 1, 6):
1921 		case IP_VERSION(3, 2, 0):
1922 		case IP_VERSION(3, 2, 1):
1923 		case IP_VERSION(3, 5, 0):
1924 			if (amdgpu_sriov_vf(adev))
1925 				amdgpu_discovery_set_sriov_display(adev);
1926 			else
1927 				amdgpu_device_ip_block_add(adev, &dm_ip_block);
1928 			break;
1929 		default:
1930 			dev_err(adev->dev,
1931 				"Failed to add dm ip block(DCE_HWIP:0x%x)\n",
1932 				amdgpu_ip_version(adev, DCE_HWIP, 0));
1933 			return -EINVAL;
1934 		}
1935 	} else if (amdgpu_ip_version(adev, DCI_HWIP, 0)) {
1936 		switch (amdgpu_ip_version(adev, DCI_HWIP, 0)) {
1937 		case IP_VERSION(12, 0, 0):
1938 		case IP_VERSION(12, 0, 1):
1939 		case IP_VERSION(12, 1, 0):
1940 			if (amdgpu_sriov_vf(adev))
1941 				amdgpu_discovery_set_sriov_display(adev);
1942 			else
1943 				amdgpu_device_ip_block_add(adev, &dm_ip_block);
1944 			break;
1945 		default:
1946 			dev_err(adev->dev,
1947 				"Failed to add dm ip block(DCI_HWIP:0x%x)\n",
1948 				amdgpu_ip_version(adev, DCI_HWIP, 0));
1949 			return -EINVAL;
1950 		}
1951 	}
1952 #endif
1953 	return 0;
1954 }
1955 
1956 static int amdgpu_discovery_set_gc_ip_blocks(struct amdgpu_device *adev)
1957 {
1958 	switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
1959 	case IP_VERSION(9, 0, 1):
1960 	case IP_VERSION(9, 1, 0):
1961 	case IP_VERSION(9, 2, 1):
1962 	case IP_VERSION(9, 2, 2):
1963 	case IP_VERSION(9, 3, 0):
1964 	case IP_VERSION(9, 4, 0):
1965 	case IP_VERSION(9, 4, 1):
1966 	case IP_VERSION(9, 4, 2):
1967 		amdgpu_device_ip_block_add(adev, &gfx_v9_0_ip_block);
1968 		break;
1969 	case IP_VERSION(9, 4, 3):
1970 		amdgpu_device_ip_block_add(adev, &gfx_v9_4_3_ip_block);
1971 		break;
1972 	case IP_VERSION(10, 1, 10):
1973 	case IP_VERSION(10, 1, 2):
1974 	case IP_VERSION(10, 1, 1):
1975 	case IP_VERSION(10, 1, 3):
1976 	case IP_VERSION(10, 1, 4):
1977 	case IP_VERSION(10, 3, 0):
1978 	case IP_VERSION(10, 3, 2):
1979 	case IP_VERSION(10, 3, 1):
1980 	case IP_VERSION(10, 3, 4):
1981 	case IP_VERSION(10, 3, 5):
1982 	case IP_VERSION(10, 3, 6):
1983 	case IP_VERSION(10, 3, 3):
1984 	case IP_VERSION(10, 3, 7):
1985 		amdgpu_device_ip_block_add(adev, &gfx_v10_0_ip_block);
1986 		break;
1987 	case IP_VERSION(11, 0, 0):
1988 	case IP_VERSION(11, 0, 1):
1989 	case IP_VERSION(11, 0, 2):
1990 	case IP_VERSION(11, 0, 3):
1991 	case IP_VERSION(11, 0, 4):
1992 	case IP_VERSION(11, 5, 0):
1993 		amdgpu_device_ip_block_add(adev, &gfx_v11_0_ip_block);
1994 		break;
1995 	default:
1996 		dev_err(adev->dev, "Failed to add gfx ip block(GC_HWIP:0x%x)\n",
1997 			amdgpu_ip_version(adev, GC_HWIP, 0));
1998 		return -EINVAL;
1999 	}
2000 	return 0;
2001 }
2002 
2003 static int amdgpu_discovery_set_sdma_ip_blocks(struct amdgpu_device *adev)
2004 {
2005 	switch (amdgpu_ip_version(adev, SDMA0_HWIP, 0)) {
2006 	case IP_VERSION(4, 0, 0):
2007 	case IP_VERSION(4, 0, 1):
2008 	case IP_VERSION(4, 1, 0):
2009 	case IP_VERSION(4, 1, 1):
2010 	case IP_VERSION(4, 1, 2):
2011 	case IP_VERSION(4, 2, 0):
2012 	case IP_VERSION(4, 2, 2):
2013 	case IP_VERSION(4, 4, 0):
2014 		amdgpu_device_ip_block_add(adev, &sdma_v4_0_ip_block);
2015 		break;
2016 	case IP_VERSION(4, 4, 2):
2017 		amdgpu_device_ip_block_add(adev, &sdma_v4_4_2_ip_block);
2018 		break;
2019 	case IP_VERSION(5, 0, 0):
2020 	case IP_VERSION(5, 0, 1):
2021 	case IP_VERSION(5, 0, 2):
2022 	case IP_VERSION(5, 0, 5):
2023 		amdgpu_device_ip_block_add(adev, &sdma_v5_0_ip_block);
2024 		break;
2025 	case IP_VERSION(5, 2, 0):
2026 	case IP_VERSION(5, 2, 2):
2027 	case IP_VERSION(5, 2, 4):
2028 	case IP_VERSION(5, 2, 5):
2029 	case IP_VERSION(5, 2, 6):
2030 	case IP_VERSION(5, 2, 3):
2031 	case IP_VERSION(5, 2, 1):
2032 	case IP_VERSION(5, 2, 7):
2033 		amdgpu_device_ip_block_add(adev, &sdma_v5_2_ip_block);
2034 		break;
2035 	case IP_VERSION(6, 0, 0):
2036 	case IP_VERSION(6, 0, 1):
2037 	case IP_VERSION(6, 0, 2):
2038 	case IP_VERSION(6, 0, 3):
2039 	case IP_VERSION(6, 1, 0):
2040 		amdgpu_device_ip_block_add(adev, &sdma_v6_0_ip_block);
2041 		break;
2042 	default:
2043 		dev_err(adev->dev,
2044 			"Failed to add sdma ip block(SDMA0_HWIP:0x%x)\n",
2045 			amdgpu_ip_version(adev, SDMA0_HWIP, 0));
2046 		return -EINVAL;
2047 	}
2048 	return 0;
2049 }
2050 
2051 static int amdgpu_discovery_set_mm_ip_blocks(struct amdgpu_device *adev)
2052 {
2053 	if (amdgpu_ip_version(adev, VCE_HWIP, 0)) {
2054 		switch (amdgpu_ip_version(adev, UVD_HWIP, 0)) {
2055 		case IP_VERSION(7, 0, 0):
2056 		case IP_VERSION(7, 2, 0):
2057 			/* UVD is not supported on vega20 SR-IOV */
2058 			if (!(adev->asic_type == CHIP_VEGA20 && amdgpu_sriov_vf(adev)))
2059 				amdgpu_device_ip_block_add(adev, &uvd_v7_0_ip_block);
2060 			break;
2061 		default:
2062 			dev_err(adev->dev,
2063 				"Failed to add uvd v7 ip block(UVD_HWIP:0x%x)\n",
2064 				amdgpu_ip_version(adev, UVD_HWIP, 0));
2065 			return -EINVAL;
2066 		}
2067 		switch (amdgpu_ip_version(adev, VCE_HWIP, 0)) {
2068 		case IP_VERSION(4, 0, 0):
2069 		case IP_VERSION(4, 1, 0):
2070 			/* VCE is not supported on vega20 SR-IOV */
2071 			if (!(adev->asic_type == CHIP_VEGA20 && amdgpu_sriov_vf(adev)))
2072 				amdgpu_device_ip_block_add(adev, &vce_v4_0_ip_block);
2073 			break;
2074 		default:
2075 			dev_err(adev->dev,
2076 				"Failed to add VCE v4 ip block(VCE_HWIP:0x%x)\n",
2077 				amdgpu_ip_version(adev, VCE_HWIP, 0));
2078 			return -EINVAL;
2079 		}
2080 	} else {
2081 		switch (amdgpu_ip_version(adev, UVD_HWIP, 0)) {
2082 		case IP_VERSION(1, 0, 0):
2083 		case IP_VERSION(1, 0, 1):
2084 			amdgpu_device_ip_block_add(adev, &vcn_v1_0_ip_block);
2085 			break;
2086 		case IP_VERSION(2, 0, 0):
2087 		case IP_VERSION(2, 0, 2):
2088 		case IP_VERSION(2, 2, 0):
2089 			amdgpu_device_ip_block_add(adev, &vcn_v2_0_ip_block);
2090 			if (!amdgpu_sriov_vf(adev))
2091 				amdgpu_device_ip_block_add(adev, &jpeg_v2_0_ip_block);
2092 			break;
2093 		case IP_VERSION(2, 0, 3):
2094 			break;
2095 		case IP_VERSION(2, 5, 0):
2096 			amdgpu_device_ip_block_add(adev, &vcn_v2_5_ip_block);
2097 			amdgpu_device_ip_block_add(adev, &jpeg_v2_5_ip_block);
2098 			break;
2099 		case IP_VERSION(2, 6, 0):
2100 			amdgpu_device_ip_block_add(adev, &vcn_v2_6_ip_block);
2101 			amdgpu_device_ip_block_add(adev, &jpeg_v2_6_ip_block);
2102 			break;
2103 		case IP_VERSION(3, 0, 0):
2104 		case IP_VERSION(3, 0, 16):
2105 		case IP_VERSION(3, 1, 1):
2106 		case IP_VERSION(3, 1, 2):
2107 		case IP_VERSION(3, 0, 2):
2108 			amdgpu_device_ip_block_add(adev, &vcn_v3_0_ip_block);
2109 			if (!amdgpu_sriov_vf(adev))
2110 				amdgpu_device_ip_block_add(adev, &jpeg_v3_0_ip_block);
2111 			break;
2112 		case IP_VERSION(3, 0, 33):
2113 			amdgpu_device_ip_block_add(adev, &vcn_v3_0_ip_block);
2114 			break;
2115 		case IP_VERSION(4, 0, 0):
2116 		case IP_VERSION(4, 0, 2):
2117 		case IP_VERSION(4, 0, 4):
2118 			amdgpu_device_ip_block_add(adev, &vcn_v4_0_ip_block);
2119 			amdgpu_device_ip_block_add(adev, &jpeg_v4_0_ip_block);
2120 			break;
2121 		case IP_VERSION(4, 0, 3):
2122 			amdgpu_device_ip_block_add(adev, &vcn_v4_0_3_ip_block);
2123 			amdgpu_device_ip_block_add(adev, &jpeg_v4_0_3_ip_block);
2124 			break;
2125 		case IP_VERSION(4, 0, 5):
2126 			amdgpu_device_ip_block_add(adev, &vcn_v4_0_5_ip_block);
2127 			amdgpu_device_ip_block_add(adev, &jpeg_v4_0_5_ip_block);
2128 			break;
2129 		default:
2130 			dev_err(adev->dev,
2131 				"Failed to add vcn/jpeg ip block(UVD_HWIP:0x%x)\n",
2132 				amdgpu_ip_version(adev, UVD_HWIP, 0));
2133 			return -EINVAL;
2134 		}
2135 	}
2136 	return 0;
2137 }
2138 
2139 static int amdgpu_discovery_set_mes_ip_blocks(struct amdgpu_device *adev)
2140 {
2141 	switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
2142 	case IP_VERSION(10, 1, 10):
2143 	case IP_VERSION(10, 1, 1):
2144 	case IP_VERSION(10, 1, 2):
2145 	case IP_VERSION(10, 1, 3):
2146 	case IP_VERSION(10, 1, 4):
2147 	case IP_VERSION(10, 3, 0):
2148 	case IP_VERSION(10, 3, 1):
2149 	case IP_VERSION(10, 3, 2):
2150 	case IP_VERSION(10, 3, 3):
2151 	case IP_VERSION(10, 3, 4):
2152 	case IP_VERSION(10, 3, 5):
2153 	case IP_VERSION(10, 3, 6):
2154 		if (amdgpu_mes) {
2155 			amdgpu_device_ip_block_add(adev, &mes_v10_1_ip_block);
2156 			adev->enable_mes = true;
2157 			if (amdgpu_mes_kiq)
2158 				adev->enable_mes_kiq = true;
2159 		}
2160 		break;
2161 	case IP_VERSION(11, 0, 0):
2162 	case IP_VERSION(11, 0, 1):
2163 	case IP_VERSION(11, 0, 2):
2164 	case IP_VERSION(11, 0, 3):
2165 	case IP_VERSION(11, 0, 4):
2166 	case IP_VERSION(11, 5, 0):
2167 		amdgpu_device_ip_block_add(adev, &mes_v11_0_ip_block);
2168 		adev->enable_mes = true;
2169 		adev->enable_mes_kiq = true;
2170 		break;
2171 	default:
2172 		break;
2173 	}
2174 	return 0;
2175 }
2176 
2177 static void amdgpu_discovery_init_soc_config(struct amdgpu_device *adev)
2178 {
2179 	switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
2180 	case IP_VERSION(9, 4, 3):
2181 		aqua_vanjaram_init_soc_config(adev);
2182 		break;
2183 	default:
2184 		break;
2185 	}
2186 }
2187 
2188 static int amdgpu_discovery_set_vpe_ip_blocks(struct amdgpu_device *adev)
2189 {
2190 	switch (amdgpu_ip_version(adev, VPE_HWIP, 0)) {
2191 	case IP_VERSION(6, 1, 0):
2192 		amdgpu_device_ip_block_add(adev, &vpe_v6_1_ip_block);
2193 		break;
2194 	default:
2195 		break;
2196 	}
2197 
2198 	return 0;
2199 }
2200 
2201 static int amdgpu_discovery_set_umsch_mm_ip_blocks(struct amdgpu_device *adev)
2202 {
2203 	switch (amdgpu_ip_version(adev, VCN_HWIP, 0)) {
2204 	case IP_VERSION(4, 0, 5):
2205 		if (amdgpu_umsch_mm & 0x1) {
2206 			amdgpu_device_ip_block_add(adev, &umsch_mm_v4_0_ip_block);
2207 			adev->enable_umsch_mm = true;
2208 		}
2209 		break;
2210 	default:
2211 		break;
2212 	}
2213 
2214 	return 0;
2215 }
2216 
2217 int amdgpu_discovery_set_ip_blocks(struct amdgpu_device *adev)
2218 {
2219 	int r;
2220 
2221 	switch (adev->asic_type) {
2222 	case CHIP_VEGA10:
2223 		vega10_reg_base_init(adev);
2224 		adev->sdma.num_instances = 2;
2225 		adev->gmc.num_umc = 4;
2226 		adev->ip_versions[MMHUB_HWIP][0] = IP_VERSION(9, 0, 0);
2227 		adev->ip_versions[ATHUB_HWIP][0] = IP_VERSION(9, 0, 0);
2228 		adev->ip_versions[OSSSYS_HWIP][0] = IP_VERSION(4, 0, 0);
2229 		adev->ip_versions[HDP_HWIP][0] = IP_VERSION(4, 0, 0);
2230 		adev->ip_versions[SDMA0_HWIP][0] = IP_VERSION(4, 0, 0);
2231 		adev->ip_versions[SDMA1_HWIP][0] = IP_VERSION(4, 0, 0);
2232 		adev->ip_versions[DF_HWIP][0] = IP_VERSION(2, 1, 0);
2233 		adev->ip_versions[NBIO_HWIP][0] = IP_VERSION(6, 1, 0);
2234 		adev->ip_versions[UMC_HWIP][0] = IP_VERSION(6, 0, 0);
2235 		adev->ip_versions[MP0_HWIP][0] = IP_VERSION(9, 0, 0);
2236 		adev->ip_versions[MP1_HWIP][0] = IP_VERSION(9, 0, 0);
2237 		adev->ip_versions[THM_HWIP][0] = IP_VERSION(9, 0, 0);
2238 		adev->ip_versions[SMUIO_HWIP][0] = IP_VERSION(9, 0, 0);
2239 		adev->ip_versions[GC_HWIP][0] = IP_VERSION(9, 0, 1);
2240 		adev->ip_versions[UVD_HWIP][0] = IP_VERSION(7, 0, 0);
2241 		adev->ip_versions[VCE_HWIP][0] = IP_VERSION(4, 0, 0);
2242 		adev->ip_versions[DCI_HWIP][0] = IP_VERSION(12, 0, 0);
2243 		break;
2244 	case CHIP_VEGA12:
2245 		vega10_reg_base_init(adev);
2246 		adev->sdma.num_instances = 2;
2247 		adev->gmc.num_umc = 4;
2248 		adev->ip_versions[MMHUB_HWIP][0] = IP_VERSION(9, 3, 0);
2249 		adev->ip_versions[ATHUB_HWIP][0] = IP_VERSION(9, 3, 0);
2250 		adev->ip_versions[OSSSYS_HWIP][0] = IP_VERSION(4, 0, 1);
2251 		adev->ip_versions[HDP_HWIP][0] = IP_VERSION(4, 0, 1);
2252 		adev->ip_versions[SDMA0_HWIP][0] = IP_VERSION(4, 0, 1);
2253 		adev->ip_versions[SDMA1_HWIP][0] = IP_VERSION(4, 0, 1);
2254 		adev->ip_versions[DF_HWIP][0] = IP_VERSION(2, 5, 0);
2255 		adev->ip_versions[NBIO_HWIP][0] = IP_VERSION(6, 2, 0);
2256 		adev->ip_versions[UMC_HWIP][0] = IP_VERSION(6, 1, 0);
2257 		adev->ip_versions[MP0_HWIP][0] = IP_VERSION(9, 0, 0);
2258 		adev->ip_versions[MP1_HWIP][0] = IP_VERSION(9, 0, 0);
2259 		adev->ip_versions[THM_HWIP][0] = IP_VERSION(9, 0, 0);
2260 		adev->ip_versions[SMUIO_HWIP][0] = IP_VERSION(9, 0, 1);
2261 		adev->ip_versions[GC_HWIP][0] = IP_VERSION(9, 2, 1);
2262 		adev->ip_versions[UVD_HWIP][0] = IP_VERSION(7, 0, 0);
2263 		adev->ip_versions[VCE_HWIP][0] = IP_VERSION(4, 0, 0);
2264 		adev->ip_versions[DCI_HWIP][0] = IP_VERSION(12, 0, 1);
2265 		break;
2266 	case CHIP_RAVEN:
2267 		vega10_reg_base_init(adev);
2268 		adev->sdma.num_instances = 1;
2269 		adev->vcn.num_vcn_inst = 1;
2270 		adev->gmc.num_umc = 2;
2271 		if (adev->apu_flags & AMD_APU_IS_RAVEN2) {
2272 			adev->ip_versions[MMHUB_HWIP][0] = IP_VERSION(9, 2, 0);
2273 			adev->ip_versions[ATHUB_HWIP][0] = IP_VERSION(9, 2, 0);
2274 			adev->ip_versions[OSSSYS_HWIP][0] = IP_VERSION(4, 1, 1);
2275 			adev->ip_versions[HDP_HWIP][0] = IP_VERSION(4, 1, 1);
2276 			adev->ip_versions[SDMA0_HWIP][0] = IP_VERSION(4, 1, 1);
2277 			adev->ip_versions[DF_HWIP][0] = IP_VERSION(2, 1, 1);
2278 			adev->ip_versions[NBIO_HWIP][0] = IP_VERSION(7, 0, 1);
2279 			adev->ip_versions[UMC_HWIP][0] = IP_VERSION(7, 5, 0);
2280 			adev->ip_versions[MP0_HWIP][0] = IP_VERSION(10, 0, 1);
2281 			adev->ip_versions[MP1_HWIP][0] = IP_VERSION(10, 0, 1);
2282 			adev->ip_versions[THM_HWIP][0] = IP_VERSION(10, 1, 0);
2283 			adev->ip_versions[SMUIO_HWIP][0] = IP_VERSION(10, 0, 1);
2284 			adev->ip_versions[GC_HWIP][0] = IP_VERSION(9, 2, 2);
2285 			adev->ip_versions[UVD_HWIP][0] = IP_VERSION(1, 0, 1);
2286 			adev->ip_versions[DCE_HWIP][0] = IP_VERSION(1, 0, 1);
2287 		} else {
2288 			adev->ip_versions[MMHUB_HWIP][0] = IP_VERSION(9, 1, 0);
2289 			adev->ip_versions[ATHUB_HWIP][0] = IP_VERSION(9, 1, 0);
2290 			adev->ip_versions[OSSSYS_HWIP][0] = IP_VERSION(4, 1, 0);
2291 			adev->ip_versions[HDP_HWIP][0] = IP_VERSION(4, 1, 0);
2292 			adev->ip_versions[SDMA0_HWIP][0] = IP_VERSION(4, 1, 0);
2293 			adev->ip_versions[DF_HWIP][0] = IP_VERSION(2, 1, 0);
2294 			adev->ip_versions[NBIO_HWIP][0] = IP_VERSION(7, 0, 0);
2295 			adev->ip_versions[UMC_HWIP][0] = IP_VERSION(7, 0, 0);
2296 			adev->ip_versions[MP0_HWIP][0] = IP_VERSION(10, 0, 0);
2297 			adev->ip_versions[MP1_HWIP][0] = IP_VERSION(10, 0, 0);
2298 			adev->ip_versions[THM_HWIP][0] = IP_VERSION(10, 0, 0);
2299 			adev->ip_versions[SMUIO_HWIP][0] = IP_VERSION(10, 0, 0);
2300 			adev->ip_versions[GC_HWIP][0] = IP_VERSION(9, 1, 0);
2301 			adev->ip_versions[UVD_HWIP][0] = IP_VERSION(1, 0, 0);
2302 			adev->ip_versions[DCE_HWIP][0] = IP_VERSION(1, 0, 0);
2303 		}
2304 		break;
2305 	case CHIP_VEGA20:
2306 		vega20_reg_base_init(adev);
2307 		adev->sdma.num_instances = 2;
2308 		adev->gmc.num_umc = 8;
2309 		adev->ip_versions[MMHUB_HWIP][0] = IP_VERSION(9, 4, 0);
2310 		adev->ip_versions[ATHUB_HWIP][0] = IP_VERSION(9, 4, 0);
2311 		adev->ip_versions[OSSSYS_HWIP][0] = IP_VERSION(4, 2, 0);
2312 		adev->ip_versions[HDP_HWIP][0] = IP_VERSION(4, 2, 0);
2313 		adev->ip_versions[SDMA0_HWIP][0] = IP_VERSION(4, 2, 0);
2314 		adev->ip_versions[SDMA1_HWIP][0] = IP_VERSION(4, 2, 0);
2315 		adev->ip_versions[DF_HWIP][0] = IP_VERSION(3, 6, 0);
2316 		adev->ip_versions[NBIO_HWIP][0] = IP_VERSION(7, 4, 0);
2317 		adev->ip_versions[UMC_HWIP][0] = IP_VERSION(6, 1, 1);
2318 		adev->ip_versions[MP0_HWIP][0] = IP_VERSION(11, 0, 2);
2319 		adev->ip_versions[MP1_HWIP][0] = IP_VERSION(11, 0, 2);
2320 		adev->ip_versions[THM_HWIP][0] = IP_VERSION(11, 0, 2);
2321 		adev->ip_versions[SMUIO_HWIP][0] = IP_VERSION(11, 0, 2);
2322 		adev->ip_versions[GC_HWIP][0] = IP_VERSION(9, 4, 0);
2323 		adev->ip_versions[UVD_HWIP][0] = IP_VERSION(7, 2, 0);
2324 		adev->ip_versions[UVD_HWIP][1] = IP_VERSION(7, 2, 0);
2325 		adev->ip_versions[VCE_HWIP][0] = IP_VERSION(4, 1, 0);
2326 		adev->ip_versions[DCI_HWIP][0] = IP_VERSION(12, 1, 0);
2327 		break;
2328 	case CHIP_ARCTURUS:
2329 		arct_reg_base_init(adev);
2330 		adev->sdma.num_instances = 8;
2331 		adev->vcn.num_vcn_inst = 2;
2332 		adev->gmc.num_umc = 8;
2333 		adev->ip_versions[MMHUB_HWIP][0] = IP_VERSION(9, 4, 1);
2334 		adev->ip_versions[ATHUB_HWIP][0] = IP_VERSION(9, 4, 1);
2335 		adev->ip_versions[OSSSYS_HWIP][0] = IP_VERSION(4, 2, 1);
2336 		adev->ip_versions[HDP_HWIP][0] = IP_VERSION(4, 2, 1);
2337 		adev->ip_versions[SDMA0_HWIP][0] = IP_VERSION(4, 2, 2);
2338 		adev->ip_versions[SDMA1_HWIP][0] = IP_VERSION(4, 2, 2);
2339 		adev->ip_versions[SDMA1_HWIP][1] = IP_VERSION(4, 2, 2);
2340 		adev->ip_versions[SDMA1_HWIP][2] = IP_VERSION(4, 2, 2);
2341 		adev->ip_versions[SDMA1_HWIP][3] = IP_VERSION(4, 2, 2);
2342 		adev->ip_versions[SDMA1_HWIP][4] = IP_VERSION(4, 2, 2);
2343 		adev->ip_versions[SDMA1_HWIP][5] = IP_VERSION(4, 2, 2);
2344 		adev->ip_versions[SDMA1_HWIP][6] = IP_VERSION(4, 2, 2);
2345 		adev->ip_versions[DF_HWIP][0] = IP_VERSION(3, 6, 1);
2346 		adev->ip_versions[NBIO_HWIP][0] = IP_VERSION(7, 4, 1);
2347 		adev->ip_versions[UMC_HWIP][0] = IP_VERSION(6, 1, 2);
2348 		adev->ip_versions[MP0_HWIP][0] = IP_VERSION(11, 0, 4);
2349 		adev->ip_versions[MP1_HWIP][0] = IP_VERSION(11, 0, 2);
2350 		adev->ip_versions[THM_HWIP][0] = IP_VERSION(11, 0, 3);
2351 		adev->ip_versions[SMUIO_HWIP][0] = IP_VERSION(11, 0, 3);
2352 		adev->ip_versions[GC_HWIP][0] = IP_VERSION(9, 4, 1);
2353 		adev->ip_versions[UVD_HWIP][0] = IP_VERSION(2, 5, 0);
2354 		adev->ip_versions[UVD_HWIP][1] = IP_VERSION(2, 5, 0);
2355 		break;
2356 	case CHIP_ALDEBARAN:
2357 		aldebaran_reg_base_init(adev);
2358 		adev->sdma.num_instances = 5;
2359 		adev->vcn.num_vcn_inst = 2;
2360 		adev->gmc.num_umc = 4;
2361 		adev->ip_versions[MMHUB_HWIP][0] = IP_VERSION(9, 4, 2);
2362 		adev->ip_versions[ATHUB_HWIP][0] = IP_VERSION(9, 4, 2);
2363 		adev->ip_versions[OSSSYS_HWIP][0] = IP_VERSION(4, 4, 0);
2364 		adev->ip_versions[HDP_HWIP][0] = IP_VERSION(4, 4, 0);
2365 		adev->ip_versions[SDMA0_HWIP][0] = IP_VERSION(4, 4, 0);
2366 		adev->ip_versions[SDMA0_HWIP][1] = IP_VERSION(4, 4, 0);
2367 		adev->ip_versions[SDMA0_HWIP][2] = IP_VERSION(4, 4, 0);
2368 		adev->ip_versions[SDMA0_HWIP][3] = IP_VERSION(4, 4, 0);
2369 		adev->ip_versions[SDMA0_HWIP][4] = IP_VERSION(4, 4, 0);
2370 		adev->ip_versions[DF_HWIP][0] = IP_VERSION(3, 6, 2);
2371 		adev->ip_versions[NBIO_HWIP][0] = IP_VERSION(7, 4, 4);
2372 		adev->ip_versions[UMC_HWIP][0] = IP_VERSION(6, 7, 0);
2373 		adev->ip_versions[MP0_HWIP][0] = IP_VERSION(13, 0, 2);
2374 		adev->ip_versions[MP1_HWIP][0] = IP_VERSION(13, 0, 2);
2375 		adev->ip_versions[THM_HWIP][0] = IP_VERSION(13, 0, 2);
2376 		adev->ip_versions[SMUIO_HWIP][0] = IP_VERSION(13, 0, 2);
2377 		adev->ip_versions[GC_HWIP][0] = IP_VERSION(9, 4, 2);
2378 		adev->ip_versions[UVD_HWIP][0] = IP_VERSION(2, 6, 0);
2379 		adev->ip_versions[UVD_HWIP][1] = IP_VERSION(2, 6, 0);
2380 		adev->ip_versions[XGMI_HWIP][0] = IP_VERSION(6, 1, 0);
2381 		break;
2382 	default:
2383 		r = amdgpu_discovery_reg_base_init(adev);
2384 		if (r)
2385 			return -EINVAL;
2386 
2387 		amdgpu_discovery_harvest_ip(adev);
2388 		amdgpu_discovery_get_gfx_info(adev);
2389 		amdgpu_discovery_get_mall_info(adev);
2390 		amdgpu_discovery_get_vcn_info(adev);
2391 		break;
2392 	}
2393 
2394 	amdgpu_discovery_init_soc_config(adev);
2395 	amdgpu_discovery_sysfs_init(adev);
2396 
2397 	switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
2398 	case IP_VERSION(9, 0, 1):
2399 	case IP_VERSION(9, 2, 1):
2400 	case IP_VERSION(9, 4, 0):
2401 	case IP_VERSION(9, 4, 1):
2402 	case IP_VERSION(9, 4, 2):
2403 	case IP_VERSION(9, 4, 3):
2404 		adev->family = AMDGPU_FAMILY_AI;
2405 		break;
2406 	case IP_VERSION(9, 1, 0):
2407 	case IP_VERSION(9, 2, 2):
2408 	case IP_VERSION(9, 3, 0):
2409 		adev->family = AMDGPU_FAMILY_RV;
2410 		break;
2411 	case IP_VERSION(10, 1, 10):
2412 	case IP_VERSION(10, 1, 1):
2413 	case IP_VERSION(10, 1, 2):
2414 	case IP_VERSION(10, 1, 3):
2415 	case IP_VERSION(10, 1, 4):
2416 	case IP_VERSION(10, 3, 0):
2417 	case IP_VERSION(10, 3, 2):
2418 	case IP_VERSION(10, 3, 4):
2419 	case IP_VERSION(10, 3, 5):
2420 		adev->family = AMDGPU_FAMILY_NV;
2421 		break;
2422 	case IP_VERSION(10, 3, 1):
2423 		adev->family = AMDGPU_FAMILY_VGH;
2424 		adev->apu_flags |= AMD_APU_IS_VANGOGH;
2425 		break;
2426 	case IP_VERSION(10, 3, 3):
2427 		adev->family = AMDGPU_FAMILY_YC;
2428 		break;
2429 	case IP_VERSION(10, 3, 6):
2430 		adev->family = AMDGPU_FAMILY_GC_10_3_6;
2431 		break;
2432 	case IP_VERSION(10, 3, 7):
2433 		adev->family = AMDGPU_FAMILY_GC_10_3_7;
2434 		break;
2435 	case IP_VERSION(11, 0, 0):
2436 	case IP_VERSION(11, 0, 2):
2437 	case IP_VERSION(11, 0, 3):
2438 		adev->family = AMDGPU_FAMILY_GC_11_0_0;
2439 		break;
2440 	case IP_VERSION(11, 0, 1):
2441 	case IP_VERSION(11, 0, 4):
2442 		adev->family = AMDGPU_FAMILY_GC_11_0_1;
2443 		break;
2444 	case IP_VERSION(11, 5, 0):
2445 		adev->family = AMDGPU_FAMILY_GC_11_5_0;
2446 		break;
2447 	default:
2448 		return -EINVAL;
2449 	}
2450 
2451 	switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
2452 	case IP_VERSION(9, 1, 0):
2453 	case IP_VERSION(9, 2, 2):
2454 	case IP_VERSION(9, 3, 0):
2455 	case IP_VERSION(10, 1, 3):
2456 	case IP_VERSION(10, 1, 4):
2457 	case IP_VERSION(10, 3, 1):
2458 	case IP_VERSION(10, 3, 3):
2459 	case IP_VERSION(10, 3, 6):
2460 	case IP_VERSION(10, 3, 7):
2461 	case IP_VERSION(11, 0, 1):
2462 	case IP_VERSION(11, 0, 4):
2463 	case IP_VERSION(11, 5, 0):
2464 		adev->flags |= AMD_IS_APU;
2465 		break;
2466 	default:
2467 		break;
2468 	}
2469 
2470 	if (amdgpu_ip_version(adev, XGMI_HWIP, 0) == IP_VERSION(4, 8, 0))
2471 		adev->gmc.xgmi.supported = true;
2472 
2473 	if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 3))
2474 		adev->ip_versions[XGMI_HWIP][0] = IP_VERSION(6, 4, 0);
2475 
2476 	/* set NBIO version */
2477 	switch (amdgpu_ip_version(adev, NBIO_HWIP, 0)) {
2478 	case IP_VERSION(6, 1, 0):
2479 	case IP_VERSION(6, 2, 0):
2480 		adev->nbio.funcs = &nbio_v6_1_funcs;
2481 		adev->nbio.hdp_flush_reg = &nbio_v6_1_hdp_flush_reg;
2482 		break;
2483 	case IP_VERSION(7, 0, 0):
2484 	case IP_VERSION(7, 0, 1):
2485 	case IP_VERSION(2, 5, 0):
2486 		adev->nbio.funcs = &nbio_v7_0_funcs;
2487 		adev->nbio.hdp_flush_reg = &nbio_v7_0_hdp_flush_reg;
2488 		break;
2489 	case IP_VERSION(7, 4, 0):
2490 	case IP_VERSION(7, 4, 1):
2491 	case IP_VERSION(7, 4, 4):
2492 		adev->nbio.funcs = &nbio_v7_4_funcs;
2493 		adev->nbio.hdp_flush_reg = &nbio_v7_4_hdp_flush_reg;
2494 		break;
2495 	case IP_VERSION(7, 9, 0):
2496 		adev->nbio.funcs = &nbio_v7_9_funcs;
2497 		adev->nbio.hdp_flush_reg = &nbio_v7_9_hdp_flush_reg;
2498 		break;
2499 	case IP_VERSION(7, 11, 0):
2500 		adev->nbio.funcs = &nbio_v7_11_funcs;
2501 		adev->nbio.hdp_flush_reg = &nbio_v7_11_hdp_flush_reg;
2502 		break;
2503 	case IP_VERSION(7, 2, 0):
2504 	case IP_VERSION(7, 2, 1):
2505 	case IP_VERSION(7, 3, 0):
2506 	case IP_VERSION(7, 5, 0):
2507 	case IP_VERSION(7, 5, 1):
2508 		adev->nbio.funcs = &nbio_v7_2_funcs;
2509 		adev->nbio.hdp_flush_reg = &nbio_v7_2_hdp_flush_reg;
2510 		break;
2511 	case IP_VERSION(2, 1, 1):
2512 	case IP_VERSION(2, 3, 0):
2513 	case IP_VERSION(2, 3, 1):
2514 	case IP_VERSION(2, 3, 2):
2515 	case IP_VERSION(3, 3, 0):
2516 	case IP_VERSION(3, 3, 1):
2517 	case IP_VERSION(3, 3, 2):
2518 	case IP_VERSION(3, 3, 3):
2519 		adev->nbio.funcs = &nbio_v2_3_funcs;
2520 		adev->nbio.hdp_flush_reg = &nbio_v2_3_hdp_flush_reg;
2521 		break;
2522 	case IP_VERSION(4, 3, 0):
2523 	case IP_VERSION(4, 3, 1):
2524 		if (amdgpu_sriov_vf(adev))
2525 			adev->nbio.funcs = &nbio_v4_3_sriov_funcs;
2526 		else
2527 			adev->nbio.funcs = &nbio_v4_3_funcs;
2528 		adev->nbio.hdp_flush_reg = &nbio_v4_3_hdp_flush_reg;
2529 		break;
2530 	case IP_VERSION(7, 7, 0):
2531 	case IP_VERSION(7, 7, 1):
2532 		adev->nbio.funcs = &nbio_v7_7_funcs;
2533 		adev->nbio.hdp_flush_reg = &nbio_v7_7_hdp_flush_reg;
2534 		break;
2535 	default:
2536 		break;
2537 	}
2538 
2539 	switch (amdgpu_ip_version(adev, HDP_HWIP, 0)) {
2540 	case IP_VERSION(4, 0, 0):
2541 	case IP_VERSION(4, 0, 1):
2542 	case IP_VERSION(4, 1, 0):
2543 	case IP_VERSION(4, 1, 1):
2544 	case IP_VERSION(4, 1, 2):
2545 	case IP_VERSION(4, 2, 0):
2546 	case IP_VERSION(4, 2, 1):
2547 	case IP_VERSION(4, 4, 0):
2548 	case IP_VERSION(4, 4, 2):
2549 		adev->hdp.funcs = &hdp_v4_0_funcs;
2550 		break;
2551 	case IP_VERSION(5, 0, 0):
2552 	case IP_VERSION(5, 0, 1):
2553 	case IP_VERSION(5, 0, 2):
2554 	case IP_VERSION(5, 0, 3):
2555 	case IP_VERSION(5, 0, 4):
2556 	case IP_VERSION(5, 2, 0):
2557 		adev->hdp.funcs = &hdp_v5_0_funcs;
2558 		break;
2559 	case IP_VERSION(5, 2, 1):
2560 		adev->hdp.funcs = &hdp_v5_2_funcs;
2561 		break;
2562 	case IP_VERSION(6, 0, 0):
2563 	case IP_VERSION(6, 0, 1):
2564 	case IP_VERSION(6, 1, 0):
2565 		adev->hdp.funcs = &hdp_v6_0_funcs;
2566 		break;
2567 	default:
2568 		break;
2569 	}
2570 
2571 	switch (amdgpu_ip_version(adev, DF_HWIP, 0)) {
2572 	case IP_VERSION(3, 6, 0):
2573 	case IP_VERSION(3, 6, 1):
2574 	case IP_VERSION(3, 6, 2):
2575 		adev->df.funcs = &df_v3_6_funcs;
2576 		break;
2577 	case IP_VERSION(2, 1, 0):
2578 	case IP_VERSION(2, 1, 1):
2579 	case IP_VERSION(2, 5, 0):
2580 	case IP_VERSION(3, 5, 1):
2581 	case IP_VERSION(3, 5, 2):
2582 		adev->df.funcs = &df_v1_7_funcs;
2583 		break;
2584 	case IP_VERSION(4, 3, 0):
2585 		adev->df.funcs = &df_v4_3_funcs;
2586 		break;
2587 	case IP_VERSION(4, 6, 2):
2588 		adev->df.funcs = &df_v4_6_2_funcs;
2589 		break;
2590 	default:
2591 		break;
2592 	}
2593 
2594 	switch (amdgpu_ip_version(adev, SMUIO_HWIP, 0)) {
2595 	case IP_VERSION(9, 0, 0):
2596 	case IP_VERSION(9, 0, 1):
2597 	case IP_VERSION(10, 0, 0):
2598 	case IP_VERSION(10, 0, 1):
2599 	case IP_VERSION(10, 0, 2):
2600 		adev->smuio.funcs = &smuio_v9_0_funcs;
2601 		break;
2602 	case IP_VERSION(11, 0, 0):
2603 	case IP_VERSION(11, 0, 2):
2604 	case IP_VERSION(11, 0, 3):
2605 	case IP_VERSION(11, 0, 4):
2606 	case IP_VERSION(11, 0, 7):
2607 	case IP_VERSION(11, 0, 8):
2608 		adev->smuio.funcs = &smuio_v11_0_funcs;
2609 		break;
2610 	case IP_VERSION(11, 0, 6):
2611 	case IP_VERSION(11, 0, 10):
2612 	case IP_VERSION(11, 0, 11):
2613 	case IP_VERSION(11, 5, 0):
2614 	case IP_VERSION(13, 0, 1):
2615 	case IP_VERSION(13, 0, 9):
2616 	case IP_VERSION(13, 0, 10):
2617 		adev->smuio.funcs = &smuio_v11_0_6_funcs;
2618 		break;
2619 	case IP_VERSION(13, 0, 2):
2620 		adev->smuio.funcs = &smuio_v13_0_funcs;
2621 		break;
2622 	case IP_VERSION(13, 0, 3):
2623 		adev->smuio.funcs = &smuio_v13_0_3_funcs;
2624 		if (adev->smuio.funcs->get_pkg_type(adev) == AMDGPU_PKG_TYPE_APU) {
2625 			adev->flags |= AMD_IS_APU;
2626 		}
2627 		break;
2628 	case IP_VERSION(13, 0, 6):
2629 	case IP_VERSION(13, 0, 8):
2630 	case IP_VERSION(14, 0, 0):
2631 		adev->smuio.funcs = &smuio_v13_0_6_funcs;
2632 		break;
2633 	default:
2634 		break;
2635 	}
2636 
2637 	switch (amdgpu_ip_version(adev, LSDMA_HWIP, 0)) {
2638 	case IP_VERSION(6, 0, 0):
2639 	case IP_VERSION(6, 0, 1):
2640 	case IP_VERSION(6, 0, 2):
2641 	case IP_VERSION(6, 0, 3):
2642 		adev->lsdma.funcs = &lsdma_v6_0_funcs;
2643 		break;
2644 	default:
2645 		break;
2646 	}
2647 
2648 	r = amdgpu_discovery_set_common_ip_blocks(adev);
2649 	if (r)
2650 		return r;
2651 
2652 	r = amdgpu_discovery_set_gmc_ip_blocks(adev);
2653 	if (r)
2654 		return r;
2655 
2656 	/* For SR-IOV, PSP needs to be initialized before IH */
2657 	if (amdgpu_sriov_vf(adev)) {
2658 		r = amdgpu_discovery_set_psp_ip_blocks(adev);
2659 		if (r)
2660 			return r;
2661 		r = amdgpu_discovery_set_ih_ip_blocks(adev);
2662 		if (r)
2663 			return r;
2664 	} else {
2665 		r = amdgpu_discovery_set_ih_ip_blocks(adev);
2666 		if (r)
2667 			return r;
2668 
2669 		if (likely(adev->firmware.load_type == AMDGPU_FW_LOAD_PSP)) {
2670 			r = amdgpu_discovery_set_psp_ip_blocks(adev);
2671 			if (r)
2672 				return r;
2673 		}
2674 	}
2675 
2676 	if (likely(adev->firmware.load_type == AMDGPU_FW_LOAD_PSP)) {
2677 		r = amdgpu_discovery_set_smu_ip_blocks(adev);
2678 		if (r)
2679 			return r;
2680 	}
2681 
2682 	r = amdgpu_discovery_set_display_ip_blocks(adev);
2683 	if (r)
2684 		return r;
2685 
2686 	r = amdgpu_discovery_set_gc_ip_blocks(adev);
2687 	if (r)
2688 		return r;
2689 
2690 	r = amdgpu_discovery_set_sdma_ip_blocks(adev);
2691 	if (r)
2692 		return r;
2693 
2694 	if ((adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT &&
2695 	     !amdgpu_sriov_vf(adev)) ||
2696 	    (adev->firmware.load_type == AMDGPU_FW_LOAD_RLC_BACKDOOR_AUTO && amdgpu_dpm == 1)) {
2697 		r = amdgpu_discovery_set_smu_ip_blocks(adev);
2698 		if (r)
2699 			return r;
2700 	}
2701 
2702 	r = amdgpu_discovery_set_mm_ip_blocks(adev);
2703 	if (r)
2704 		return r;
2705 
2706 	r = amdgpu_discovery_set_mes_ip_blocks(adev);
2707 	if (r)
2708 		return r;
2709 
2710 	r = amdgpu_discovery_set_vpe_ip_blocks(adev);
2711 	if (r)
2712 		return r;
2713 
2714 	r = amdgpu_discovery_set_umsch_mm_ip_blocks(adev);
2715 	if (r)
2716 		return r;
2717 
2718 	return 0;
2719 }
2720 
2721