xref: /linux/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c (revision 569d7db70e5dcf13fbf072f10e9096577ac1e565)
1 /*
2  * Copyright 2018 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  */
23 
24 #include <linux/firmware.h>
25 
26 #include "amdgpu.h"
27 #include "amdgpu_discovery.h"
28 #include "soc15_hw_ip.h"
29 #include "discovery.h"
30 #include "amdgpu_ras.h"
31 
32 #include "soc15.h"
33 #include "gfx_v9_0.h"
34 #include "gfx_v9_4_3.h"
35 #include "gmc_v9_0.h"
36 #include "df_v1_7.h"
37 #include "df_v3_6.h"
38 #include "df_v4_3.h"
39 #include "df_v4_6_2.h"
40 #include "nbio_v6_1.h"
41 #include "nbio_v7_0.h"
42 #include "nbio_v7_4.h"
43 #include "nbio_v7_9.h"
44 #include "nbio_v7_11.h"
45 #include "hdp_v4_0.h"
46 #include "vega10_ih.h"
47 #include "vega20_ih.h"
48 #include "sdma_v4_0.h"
49 #include "sdma_v4_4_2.h"
50 #include "uvd_v7_0.h"
51 #include "vce_v4_0.h"
52 #include "vcn_v1_0.h"
53 #include "vcn_v2_5.h"
54 #include "jpeg_v2_5.h"
55 #include "smuio_v9_0.h"
56 #include "gmc_v10_0.h"
57 #include "gmc_v11_0.h"
58 #include "gmc_v12_0.h"
59 #include "gfxhub_v2_0.h"
60 #include "mmhub_v2_0.h"
61 #include "nbio_v2_3.h"
62 #include "nbio_v4_3.h"
63 #include "nbio_v7_2.h"
64 #include "nbio_v7_7.h"
65 #include "nbif_v6_3_1.h"
66 #include "hdp_v5_0.h"
67 #include "hdp_v5_2.h"
68 #include "hdp_v6_0.h"
69 #include "hdp_v7_0.h"
70 #include "nv.h"
71 #include "soc21.h"
72 #include "soc24.h"
73 #include "navi10_ih.h"
74 #include "ih_v6_0.h"
75 #include "ih_v6_1.h"
76 #include "ih_v7_0.h"
77 #include "gfx_v10_0.h"
78 #include "gfx_v11_0.h"
79 #include "gfx_v12_0.h"
80 #include "sdma_v5_0.h"
81 #include "sdma_v5_2.h"
82 #include "sdma_v6_0.h"
83 #include "sdma_v7_0.h"
84 #include "lsdma_v6_0.h"
85 #include "lsdma_v7_0.h"
86 #include "vcn_v2_0.h"
87 #include "jpeg_v2_0.h"
88 #include "vcn_v3_0.h"
89 #include "jpeg_v3_0.h"
90 #include "vcn_v4_0.h"
91 #include "jpeg_v4_0.h"
92 #include "vcn_v4_0_3.h"
93 #include "jpeg_v4_0_3.h"
94 #include "vcn_v4_0_5.h"
95 #include "jpeg_v4_0_5.h"
96 #include "amdgpu_vkms.h"
97 #include "mes_v11_0.h"
98 #include "mes_v12_0.h"
99 #include "smuio_v11_0.h"
100 #include "smuio_v11_0_6.h"
101 #include "smuio_v13_0.h"
102 #include "smuio_v13_0_3.h"
103 #include "smuio_v13_0_6.h"
104 #include "smuio_v14_0_2.h"
105 #include "vcn_v5_0_0.h"
106 #include "jpeg_v5_0_0.h"
107 
108 #include "amdgpu_vpe.h"
109 
110 #define FIRMWARE_IP_DISCOVERY "amdgpu/ip_discovery.bin"
111 MODULE_FIRMWARE(FIRMWARE_IP_DISCOVERY);
112 
113 #define mmIP_DISCOVERY_VERSION  0x16A00
114 #define mmRCC_CONFIG_MEMSIZE	0xde3
115 #define mmMP0_SMN_C2PMSG_33	0x16061
116 #define mmMM_INDEX		0x0
117 #define mmMM_INDEX_HI		0x6
118 #define mmMM_DATA		0x1
119 
120 static const char *hw_id_names[HW_ID_MAX] = {
121 	[MP1_HWID]		= "MP1",
122 	[MP2_HWID]		= "MP2",
123 	[THM_HWID]		= "THM",
124 	[SMUIO_HWID]		= "SMUIO",
125 	[FUSE_HWID]		= "FUSE",
126 	[CLKA_HWID]		= "CLKA",
127 	[PWR_HWID]		= "PWR",
128 	[GC_HWID]		= "GC",
129 	[UVD_HWID]		= "UVD",
130 	[AUDIO_AZ_HWID]		= "AUDIO_AZ",
131 	[ACP_HWID]		= "ACP",
132 	[DCI_HWID]		= "DCI",
133 	[DMU_HWID]		= "DMU",
134 	[DCO_HWID]		= "DCO",
135 	[DIO_HWID]		= "DIO",
136 	[XDMA_HWID]		= "XDMA",
137 	[DCEAZ_HWID]		= "DCEAZ",
138 	[DAZ_HWID]		= "DAZ",
139 	[SDPMUX_HWID]		= "SDPMUX",
140 	[NTB_HWID]		= "NTB",
141 	[IOHC_HWID]		= "IOHC",
142 	[L2IMU_HWID]		= "L2IMU",
143 	[VCE_HWID]		= "VCE",
144 	[MMHUB_HWID]		= "MMHUB",
145 	[ATHUB_HWID]		= "ATHUB",
146 	[DBGU_NBIO_HWID]	= "DBGU_NBIO",
147 	[DFX_HWID]		= "DFX",
148 	[DBGU0_HWID]		= "DBGU0",
149 	[DBGU1_HWID]		= "DBGU1",
150 	[OSSSYS_HWID]		= "OSSSYS",
151 	[HDP_HWID]		= "HDP",
152 	[SDMA0_HWID]		= "SDMA0",
153 	[SDMA1_HWID]		= "SDMA1",
154 	[SDMA2_HWID]		= "SDMA2",
155 	[SDMA3_HWID]		= "SDMA3",
156 	[LSDMA_HWID]		= "LSDMA",
157 	[ISP_HWID]		= "ISP",
158 	[DBGU_IO_HWID]		= "DBGU_IO",
159 	[DF_HWID]		= "DF",
160 	[CLKB_HWID]		= "CLKB",
161 	[FCH_HWID]		= "FCH",
162 	[DFX_DAP_HWID]		= "DFX_DAP",
163 	[L1IMU_PCIE_HWID]	= "L1IMU_PCIE",
164 	[L1IMU_NBIF_HWID]	= "L1IMU_NBIF",
165 	[L1IMU_IOAGR_HWID]	= "L1IMU_IOAGR",
166 	[L1IMU3_HWID]		= "L1IMU3",
167 	[L1IMU4_HWID]		= "L1IMU4",
168 	[L1IMU5_HWID]		= "L1IMU5",
169 	[L1IMU6_HWID]		= "L1IMU6",
170 	[L1IMU7_HWID]		= "L1IMU7",
171 	[L1IMU8_HWID]		= "L1IMU8",
172 	[L1IMU9_HWID]		= "L1IMU9",
173 	[L1IMU10_HWID]		= "L1IMU10",
174 	[L1IMU11_HWID]		= "L1IMU11",
175 	[L1IMU12_HWID]		= "L1IMU12",
176 	[L1IMU13_HWID]		= "L1IMU13",
177 	[L1IMU14_HWID]		= "L1IMU14",
178 	[L1IMU15_HWID]		= "L1IMU15",
179 	[WAFLC_HWID]		= "WAFLC",
180 	[FCH_USB_PD_HWID]	= "FCH_USB_PD",
181 	[PCIE_HWID]		= "PCIE",
182 	[PCS_HWID]		= "PCS",
183 	[DDCL_HWID]		= "DDCL",
184 	[SST_HWID]		= "SST",
185 	[IOAGR_HWID]		= "IOAGR",
186 	[NBIF_HWID]		= "NBIF",
187 	[IOAPIC_HWID]		= "IOAPIC",
188 	[SYSTEMHUB_HWID]	= "SYSTEMHUB",
189 	[NTBCCP_HWID]		= "NTBCCP",
190 	[UMC_HWID]		= "UMC",
191 	[SATA_HWID]		= "SATA",
192 	[USB_HWID]		= "USB",
193 	[CCXSEC_HWID]		= "CCXSEC",
194 	[XGMI_HWID]		= "XGMI",
195 	[XGBE_HWID]		= "XGBE",
196 	[MP0_HWID]		= "MP0",
197 	[VPE_HWID]		= "VPE",
198 };
199 
200 static int hw_id_map[MAX_HWIP] = {
201 	[GC_HWIP]	= GC_HWID,
202 	[HDP_HWIP]	= HDP_HWID,
203 	[SDMA0_HWIP]	= SDMA0_HWID,
204 	[SDMA1_HWIP]	= SDMA1_HWID,
205 	[SDMA2_HWIP]    = SDMA2_HWID,
206 	[SDMA3_HWIP]    = SDMA3_HWID,
207 	[LSDMA_HWIP]    = LSDMA_HWID,
208 	[MMHUB_HWIP]	= MMHUB_HWID,
209 	[ATHUB_HWIP]	= ATHUB_HWID,
210 	[NBIO_HWIP]	= NBIF_HWID,
211 	[MP0_HWIP]	= MP0_HWID,
212 	[MP1_HWIP]	= MP1_HWID,
213 	[UVD_HWIP]	= UVD_HWID,
214 	[VCE_HWIP]	= VCE_HWID,
215 	[DF_HWIP]	= DF_HWID,
216 	[DCE_HWIP]	= DMU_HWID,
217 	[OSSSYS_HWIP]	= OSSSYS_HWID,
218 	[SMUIO_HWIP]	= SMUIO_HWID,
219 	[PWR_HWIP]	= PWR_HWID,
220 	[NBIF_HWIP]	= NBIF_HWID,
221 	[THM_HWIP]	= THM_HWID,
222 	[CLK_HWIP]	= CLKA_HWID,
223 	[UMC_HWIP]	= UMC_HWID,
224 	[XGMI_HWIP]	= XGMI_HWID,
225 	[DCI_HWIP]	= DCI_HWID,
226 	[PCIE_HWIP]	= PCIE_HWID,
227 	[VPE_HWIP]	= VPE_HWID,
228 };
229 
230 static int amdgpu_discovery_read_binary_from_sysmem(struct amdgpu_device *adev, uint8_t *binary)
231 {
232 	u64 tmr_offset, tmr_size, pos;
233 	void *discv_regn;
234 	int ret;
235 
236 	ret = amdgpu_acpi_get_tmr_info(adev, &tmr_offset, &tmr_size);
237 	if (ret)
238 		return ret;
239 
240 	pos = tmr_offset + tmr_size - DISCOVERY_TMR_OFFSET;
241 
242 	/* This region is read-only and reserved from system use */
243 	discv_regn = memremap(pos, adev->mman.discovery_tmr_size, MEMREMAP_WC);
244 	if (discv_regn) {
245 		memcpy(binary, discv_regn, adev->mman.discovery_tmr_size);
246 		memunmap(discv_regn);
247 		return 0;
248 	}
249 
250 	return -ENOENT;
251 }
252 
253 #define IP_DISCOVERY_V2		2
254 #define IP_DISCOVERY_V4		4
255 
256 static int amdgpu_discovery_read_binary_from_mem(struct amdgpu_device *adev,
257 						 uint8_t *binary)
258 {
259 	uint64_t vram_size;
260 	u32 msg;
261 	int i, ret = 0;
262 
263 	if (!amdgpu_sriov_vf(adev)) {
264 		/* It can take up to a second for IFWI init to complete on some dGPUs,
265 		 * but generally it should be in the 60-100ms range.  Normally this starts
266 		 * as soon as the device gets power so by the time the OS loads this has long
267 		 * completed.  However, when a card is hotplugged via e.g., USB4, we need to
268 		 * wait for this to complete.  Once the C2PMSG is updated, we can
269 		 * continue.
270 		 */
271 
272 		for (i = 0; i < 1000; i++) {
273 			msg = RREG32(mmMP0_SMN_C2PMSG_33);
274 			if (msg & 0x80000000)
275 				break;
276 			usleep_range(1000, 1100);
277 		}
278 	}
279 
280 	vram_size = (uint64_t)RREG32(mmRCC_CONFIG_MEMSIZE) << 20;
281 
282 	if (vram_size) {
283 		uint64_t pos = vram_size - DISCOVERY_TMR_OFFSET;
284 		amdgpu_device_vram_access(adev, pos, (uint32_t *)binary,
285 					  adev->mman.discovery_tmr_size, false);
286 	} else {
287 		ret = amdgpu_discovery_read_binary_from_sysmem(adev, binary);
288 	}
289 
290 	return ret;
291 }
292 
293 static int amdgpu_discovery_read_binary_from_file(struct amdgpu_device *adev, uint8_t *binary)
294 {
295 	const struct firmware *fw;
296 	const char *fw_name;
297 	int r;
298 
299 	switch (amdgpu_discovery) {
300 	case 2:
301 		fw_name = FIRMWARE_IP_DISCOVERY;
302 		break;
303 	default:
304 		dev_warn(adev->dev, "amdgpu_discovery is not set properly\n");
305 		return -EINVAL;
306 	}
307 
308 	r = request_firmware(&fw, fw_name, adev->dev);
309 	if (r) {
310 		dev_err(adev->dev, "can't load firmware \"%s\"\n",
311 			fw_name);
312 		return r;
313 	}
314 
315 	memcpy((u8 *)binary, (u8 *)fw->data, fw->size);
316 	release_firmware(fw);
317 
318 	return 0;
319 }
320 
321 static uint16_t amdgpu_discovery_calculate_checksum(uint8_t *data, uint32_t size)
322 {
323 	uint16_t checksum = 0;
324 	int i;
325 
326 	for (i = 0; i < size; i++)
327 		checksum += data[i];
328 
329 	return checksum;
330 }
331 
332 static inline bool amdgpu_discovery_verify_checksum(uint8_t *data, uint32_t size,
333 						    uint16_t expected)
334 {
335 	return !!(amdgpu_discovery_calculate_checksum(data, size) == expected);
336 }
337 
338 static inline bool amdgpu_discovery_verify_binary_signature(uint8_t *binary)
339 {
340 	struct binary_header *bhdr;
341 	bhdr = (struct binary_header *)binary;
342 
343 	return (le32_to_cpu(bhdr->binary_signature) == BINARY_SIGNATURE);
344 }
345 
346 static void amdgpu_discovery_harvest_config_quirk(struct amdgpu_device *adev)
347 {
348 	/*
349 	 * So far, apply this quirk only on those Navy Flounder boards which
350 	 * have a bad harvest table of VCN config.
351 	 */
352 	if ((amdgpu_ip_version(adev, UVD_HWIP, 1) == IP_VERSION(3, 0, 1)) &&
353 	    (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(10, 3, 2))) {
354 		switch (adev->pdev->revision) {
355 		case 0xC1:
356 		case 0xC2:
357 		case 0xC3:
358 		case 0xC5:
359 		case 0xC7:
360 		case 0xCF:
361 		case 0xDF:
362 			adev->vcn.harvest_config |= AMDGPU_VCN_HARVEST_VCN1;
363 			adev->vcn.inst_mask &= ~AMDGPU_VCN_HARVEST_VCN1;
364 			break;
365 		default:
366 			break;
367 		}
368 	}
369 }
370 
371 static int amdgpu_discovery_verify_npsinfo(struct amdgpu_device *adev,
372 					   struct binary_header *bhdr)
373 {
374 	struct table_info *info;
375 	uint16_t checksum;
376 	uint16_t offset;
377 
378 	info = &bhdr->table_list[NPS_INFO];
379 	offset = le16_to_cpu(info->offset);
380 	checksum = le16_to_cpu(info->checksum);
381 
382 	struct nps_info_header *nhdr =
383 		(struct nps_info_header *)(adev->mman.discovery_bin + offset);
384 
385 	if (le32_to_cpu(nhdr->table_id) != NPS_INFO_TABLE_ID) {
386 		dev_dbg(adev->dev, "invalid ip discovery nps info table id\n");
387 		return -EINVAL;
388 	}
389 
390 	if (!amdgpu_discovery_verify_checksum(adev->mman.discovery_bin + offset,
391 					      le32_to_cpu(nhdr->size_bytes),
392 					      checksum)) {
393 		dev_dbg(adev->dev, "invalid nps info data table checksum\n");
394 		return -EINVAL;
395 	}
396 
397 	return 0;
398 }
399 
400 static int amdgpu_discovery_init(struct amdgpu_device *adev)
401 {
402 	struct table_info *info;
403 	struct binary_header *bhdr;
404 	uint16_t offset;
405 	uint16_t size;
406 	uint16_t checksum;
407 	int r;
408 
409 	adev->mman.discovery_tmr_size = DISCOVERY_TMR_SIZE;
410 	adev->mman.discovery_bin = kzalloc(adev->mman.discovery_tmr_size, GFP_KERNEL);
411 	if (!adev->mman.discovery_bin)
412 		return -ENOMEM;
413 
414 	/* Read from file if it is the preferred option */
415 	if (amdgpu_discovery == 2) {
416 		dev_info(adev->dev, "use ip discovery information from file");
417 		r = amdgpu_discovery_read_binary_from_file(adev, adev->mman.discovery_bin);
418 
419 		if (r) {
420 			dev_err(adev->dev, "failed to read ip discovery binary from file\n");
421 			r = -EINVAL;
422 			goto out;
423 		}
424 
425 	} else {
426 		r = amdgpu_discovery_read_binary_from_mem(
427 			adev, adev->mman.discovery_bin);
428 		if (r)
429 			goto out;
430 	}
431 
432 	/* check the ip discovery binary signature */
433 	if (!amdgpu_discovery_verify_binary_signature(adev->mman.discovery_bin)) {
434 		dev_err(adev->dev,
435 			"get invalid ip discovery binary signature\n");
436 		r = -EINVAL;
437 		goto out;
438 	}
439 
440 	bhdr = (struct binary_header *)adev->mman.discovery_bin;
441 
442 	offset = offsetof(struct binary_header, binary_checksum) +
443 		sizeof(bhdr->binary_checksum);
444 	size = le16_to_cpu(bhdr->binary_size) - offset;
445 	checksum = le16_to_cpu(bhdr->binary_checksum);
446 
447 	if (!amdgpu_discovery_verify_checksum(adev->mman.discovery_bin + offset,
448 					      size, checksum)) {
449 		dev_err(adev->dev, "invalid ip discovery binary checksum\n");
450 		r = -EINVAL;
451 		goto out;
452 	}
453 
454 	info = &bhdr->table_list[IP_DISCOVERY];
455 	offset = le16_to_cpu(info->offset);
456 	checksum = le16_to_cpu(info->checksum);
457 
458 	if (offset) {
459 		struct ip_discovery_header *ihdr =
460 			(struct ip_discovery_header *)(adev->mman.discovery_bin + offset);
461 		if (le32_to_cpu(ihdr->signature) != DISCOVERY_TABLE_SIGNATURE) {
462 			dev_err(adev->dev, "invalid ip discovery data table signature\n");
463 			r = -EINVAL;
464 			goto out;
465 		}
466 
467 		if (!amdgpu_discovery_verify_checksum(adev->mman.discovery_bin + offset,
468 						      le16_to_cpu(ihdr->size), checksum)) {
469 			dev_err(adev->dev, "invalid ip discovery data table checksum\n");
470 			r = -EINVAL;
471 			goto out;
472 		}
473 	}
474 
475 	info = &bhdr->table_list[GC];
476 	offset = le16_to_cpu(info->offset);
477 	checksum = le16_to_cpu(info->checksum);
478 
479 	if (offset) {
480 		struct gpu_info_header *ghdr =
481 			(struct gpu_info_header *)(adev->mman.discovery_bin + offset);
482 
483 		if (le32_to_cpu(ghdr->table_id) != GC_TABLE_ID) {
484 			dev_err(adev->dev, "invalid ip discovery gc table id\n");
485 			r = -EINVAL;
486 			goto out;
487 		}
488 
489 		if (!amdgpu_discovery_verify_checksum(adev->mman.discovery_bin + offset,
490 						      le32_to_cpu(ghdr->size), checksum)) {
491 			dev_err(adev->dev, "invalid gc data table checksum\n");
492 			r = -EINVAL;
493 			goto out;
494 		}
495 	}
496 
497 	info = &bhdr->table_list[HARVEST_INFO];
498 	offset = le16_to_cpu(info->offset);
499 	checksum = le16_to_cpu(info->checksum);
500 
501 	if (offset) {
502 		struct harvest_info_header *hhdr =
503 			(struct harvest_info_header *)(adev->mman.discovery_bin + offset);
504 
505 		if (le32_to_cpu(hhdr->signature) != HARVEST_TABLE_SIGNATURE) {
506 			dev_err(adev->dev, "invalid ip discovery harvest table signature\n");
507 			r = -EINVAL;
508 			goto out;
509 		}
510 
511 		if (!amdgpu_discovery_verify_checksum(adev->mman.discovery_bin + offset,
512 						      sizeof(struct harvest_table), checksum)) {
513 			dev_err(adev->dev, "invalid harvest data table checksum\n");
514 			r = -EINVAL;
515 			goto out;
516 		}
517 	}
518 
519 	info = &bhdr->table_list[VCN_INFO];
520 	offset = le16_to_cpu(info->offset);
521 	checksum = le16_to_cpu(info->checksum);
522 
523 	if (offset) {
524 		struct vcn_info_header *vhdr =
525 			(struct vcn_info_header *)(adev->mman.discovery_bin + offset);
526 
527 		if (le32_to_cpu(vhdr->table_id) != VCN_INFO_TABLE_ID) {
528 			dev_err(adev->dev, "invalid ip discovery vcn table id\n");
529 			r = -EINVAL;
530 			goto out;
531 		}
532 
533 		if (!amdgpu_discovery_verify_checksum(adev->mman.discovery_bin + offset,
534 						      le32_to_cpu(vhdr->size_bytes), checksum)) {
535 			dev_err(adev->dev, "invalid vcn data table checksum\n");
536 			r = -EINVAL;
537 			goto out;
538 		}
539 	}
540 
541 	info = &bhdr->table_list[MALL_INFO];
542 	offset = le16_to_cpu(info->offset);
543 	checksum = le16_to_cpu(info->checksum);
544 
545 	if (0 && offset) {
546 		struct mall_info_header *mhdr =
547 			(struct mall_info_header *)(adev->mman.discovery_bin + offset);
548 
549 		if (le32_to_cpu(mhdr->table_id) != MALL_INFO_TABLE_ID) {
550 			dev_err(adev->dev, "invalid ip discovery mall table id\n");
551 			r = -EINVAL;
552 			goto out;
553 		}
554 
555 		if (!amdgpu_discovery_verify_checksum(adev->mman.discovery_bin + offset,
556 						      le32_to_cpu(mhdr->size_bytes), checksum)) {
557 			dev_err(adev->dev, "invalid mall data table checksum\n");
558 			r = -EINVAL;
559 			goto out;
560 		}
561 	}
562 
563 	return 0;
564 
565 out:
566 	kfree(adev->mman.discovery_bin);
567 	adev->mman.discovery_bin = NULL;
568 	if ((amdgpu_discovery != 2) &&
569 	    (RREG32(mmIP_DISCOVERY_VERSION) == 4))
570 		amdgpu_ras_query_boot_status(adev, 4);
571 	return r;
572 }
573 
574 static void amdgpu_discovery_sysfs_fini(struct amdgpu_device *adev);
575 
576 void amdgpu_discovery_fini(struct amdgpu_device *adev)
577 {
578 	amdgpu_discovery_sysfs_fini(adev);
579 	kfree(adev->mman.discovery_bin);
580 	adev->mman.discovery_bin = NULL;
581 }
582 
583 static int amdgpu_discovery_validate_ip(const struct ip_v4 *ip)
584 {
585 	if (ip->instance_number >= HWIP_MAX_INSTANCE) {
586 		DRM_ERROR("Unexpected instance_number (%d) from ip discovery blob\n",
587 			  ip->instance_number);
588 		return -EINVAL;
589 	}
590 	if (le16_to_cpu(ip->hw_id) >= HW_ID_MAX) {
591 		DRM_ERROR("Unexpected hw_id (%d) from ip discovery blob\n",
592 			  le16_to_cpu(ip->hw_id));
593 		return -EINVAL;
594 	}
595 
596 	return 0;
597 }
598 
599 static void amdgpu_discovery_read_harvest_bit_per_ip(struct amdgpu_device *adev,
600 						uint32_t *vcn_harvest_count)
601 {
602 	struct binary_header *bhdr;
603 	struct ip_discovery_header *ihdr;
604 	struct die_header *dhdr;
605 	struct ip_v4 *ip;
606 	uint16_t die_offset, ip_offset, num_dies, num_ips;
607 	int i, j;
608 
609 	bhdr = (struct binary_header *)adev->mman.discovery_bin;
610 	ihdr = (struct ip_discovery_header *)(adev->mman.discovery_bin +
611 			le16_to_cpu(bhdr->table_list[IP_DISCOVERY].offset));
612 	num_dies = le16_to_cpu(ihdr->num_dies);
613 
614 	/* scan harvest bit of all IP data structures */
615 	for (i = 0; i < num_dies; i++) {
616 		die_offset = le16_to_cpu(ihdr->die_info[i].die_offset);
617 		dhdr = (struct die_header *)(adev->mman.discovery_bin + die_offset);
618 		num_ips = le16_to_cpu(dhdr->num_ips);
619 		ip_offset = die_offset + sizeof(*dhdr);
620 
621 		for (j = 0; j < num_ips; j++) {
622 			ip = (struct ip_v4 *)(adev->mman.discovery_bin + ip_offset);
623 
624 			if (amdgpu_discovery_validate_ip(ip))
625 				goto next_ip;
626 
627 			if (le16_to_cpu(ip->variant) == 1) {
628 				switch (le16_to_cpu(ip->hw_id)) {
629 				case VCN_HWID:
630 					(*vcn_harvest_count)++;
631 					if (ip->instance_number == 0) {
632 						adev->vcn.harvest_config |= AMDGPU_VCN_HARVEST_VCN0;
633 						adev->vcn.inst_mask &=
634 							~AMDGPU_VCN_HARVEST_VCN0;
635 						adev->jpeg.inst_mask &=
636 							~AMDGPU_VCN_HARVEST_VCN0;
637 					} else {
638 						adev->vcn.harvest_config |= AMDGPU_VCN_HARVEST_VCN1;
639 						adev->vcn.inst_mask &=
640 							~AMDGPU_VCN_HARVEST_VCN1;
641 						adev->jpeg.inst_mask &=
642 							~AMDGPU_VCN_HARVEST_VCN1;
643 					}
644 					break;
645 				case DMU_HWID:
646 					adev->harvest_ip_mask |= AMD_HARVEST_IP_DMU_MASK;
647 					break;
648 				default:
649 					break;
650 				}
651 			}
652 next_ip:
653 			if (ihdr->base_addr_64_bit)
654 				ip_offset += struct_size(ip, base_address_64, ip->num_base_address);
655 			else
656 				ip_offset += struct_size(ip, base_address, ip->num_base_address);
657 		}
658 	}
659 }
660 
661 static void amdgpu_discovery_read_from_harvest_table(struct amdgpu_device *adev,
662 						     uint32_t *vcn_harvest_count,
663 						     uint32_t *umc_harvest_count)
664 {
665 	struct binary_header *bhdr;
666 	struct harvest_table *harvest_info;
667 	u16 offset;
668 	int i;
669 	uint32_t umc_harvest_config = 0;
670 
671 	bhdr = (struct binary_header *)adev->mman.discovery_bin;
672 	offset = le16_to_cpu(bhdr->table_list[HARVEST_INFO].offset);
673 
674 	if (!offset) {
675 		dev_err(adev->dev, "invalid harvest table offset\n");
676 		return;
677 	}
678 
679 	harvest_info = (struct harvest_table *)(adev->mman.discovery_bin + offset);
680 
681 	for (i = 0; i < 32; i++) {
682 		if (le16_to_cpu(harvest_info->list[i].hw_id) == 0)
683 			break;
684 
685 		switch (le16_to_cpu(harvest_info->list[i].hw_id)) {
686 		case VCN_HWID:
687 			(*vcn_harvest_count)++;
688 			adev->vcn.harvest_config |=
689 				(1 << harvest_info->list[i].number_instance);
690 			adev->jpeg.harvest_config |=
691 				(1 << harvest_info->list[i].number_instance);
692 
693 			adev->vcn.inst_mask &=
694 				~(1U << harvest_info->list[i].number_instance);
695 			adev->jpeg.inst_mask &=
696 				~(1U << harvest_info->list[i].number_instance);
697 			break;
698 		case DMU_HWID:
699 			adev->harvest_ip_mask |= AMD_HARVEST_IP_DMU_MASK;
700 			break;
701 		case UMC_HWID:
702 			umc_harvest_config |=
703 				1 << (le16_to_cpu(harvest_info->list[i].number_instance));
704 			(*umc_harvest_count)++;
705 			break;
706 		case GC_HWID:
707 			adev->gfx.xcc_mask &=
708 				~(1U << harvest_info->list[i].number_instance);
709 			break;
710 		case SDMA0_HWID:
711 			adev->sdma.sdma_mask &=
712 				~(1U << harvest_info->list[i].number_instance);
713 			break;
714 		default:
715 			break;
716 		}
717 	}
718 
719 	adev->umc.active_mask = ((1 << adev->umc.node_inst_num) - 1) &
720 				~umc_harvest_config;
721 }
722 
723 /* ================================================== */
724 
725 struct ip_hw_instance {
726 	struct kobject kobj; /* ip_discovery/die/#die/#hw_id/#instance/<attrs...> */
727 
728 	int hw_id;
729 	u8  num_instance;
730 	u8  major, minor, revision;
731 	u8  harvest;
732 
733 	int num_base_addresses;
734 	u32 base_addr[] __counted_by(num_base_addresses);
735 };
736 
737 struct ip_hw_id {
738 	struct kset hw_id_kset;  /* ip_discovery/die/#die/#hw_id/, contains ip_hw_instance */
739 	int hw_id;
740 };
741 
742 struct ip_die_entry {
743 	struct kset ip_kset;     /* ip_discovery/die/#die/, contains ip_hw_id  */
744 	u16 num_ips;
745 };
746 
747 /* -------------------------------------------------- */
748 
749 struct ip_hw_instance_attr {
750 	struct attribute attr;
751 	ssize_t (*show)(struct ip_hw_instance *ip_hw_instance, char *buf);
752 };
753 
754 static ssize_t hw_id_show(struct ip_hw_instance *ip_hw_instance, char *buf)
755 {
756 	return sysfs_emit(buf, "%d\n", ip_hw_instance->hw_id);
757 }
758 
759 static ssize_t num_instance_show(struct ip_hw_instance *ip_hw_instance, char *buf)
760 {
761 	return sysfs_emit(buf, "%d\n", ip_hw_instance->num_instance);
762 }
763 
764 static ssize_t major_show(struct ip_hw_instance *ip_hw_instance, char *buf)
765 {
766 	return sysfs_emit(buf, "%d\n", ip_hw_instance->major);
767 }
768 
769 static ssize_t minor_show(struct ip_hw_instance *ip_hw_instance, char *buf)
770 {
771 	return sysfs_emit(buf, "%d\n", ip_hw_instance->minor);
772 }
773 
774 static ssize_t revision_show(struct ip_hw_instance *ip_hw_instance, char *buf)
775 {
776 	return sysfs_emit(buf, "%d\n", ip_hw_instance->revision);
777 }
778 
779 static ssize_t harvest_show(struct ip_hw_instance *ip_hw_instance, char *buf)
780 {
781 	return sysfs_emit(buf, "0x%01X\n", ip_hw_instance->harvest);
782 }
783 
784 static ssize_t num_base_addresses_show(struct ip_hw_instance *ip_hw_instance, char *buf)
785 {
786 	return sysfs_emit(buf, "%d\n", ip_hw_instance->num_base_addresses);
787 }
788 
789 static ssize_t base_addr_show(struct ip_hw_instance *ip_hw_instance, char *buf)
790 {
791 	ssize_t res, at;
792 	int ii;
793 
794 	for (res = at = ii = 0; ii < ip_hw_instance->num_base_addresses; ii++) {
795 		/* Here we satisfy the condition that, at + size <= PAGE_SIZE.
796 		 */
797 		if (at + 12 > PAGE_SIZE)
798 			break;
799 		res = sysfs_emit_at(buf, at, "0x%08X\n",
800 				    ip_hw_instance->base_addr[ii]);
801 		if (res <= 0)
802 			break;
803 		at += res;
804 	}
805 
806 	return res < 0 ? res : at;
807 }
808 
809 static struct ip_hw_instance_attr ip_hw_attr[] = {
810 	__ATTR_RO(hw_id),
811 	__ATTR_RO(num_instance),
812 	__ATTR_RO(major),
813 	__ATTR_RO(minor),
814 	__ATTR_RO(revision),
815 	__ATTR_RO(harvest),
816 	__ATTR_RO(num_base_addresses),
817 	__ATTR_RO(base_addr),
818 };
819 
820 static struct attribute *ip_hw_instance_attrs[ARRAY_SIZE(ip_hw_attr) + 1];
821 ATTRIBUTE_GROUPS(ip_hw_instance);
822 
823 #define to_ip_hw_instance(x) container_of(x, struct ip_hw_instance, kobj)
824 #define to_ip_hw_instance_attr(x) container_of(x, struct ip_hw_instance_attr, attr)
825 
826 static ssize_t ip_hw_instance_attr_show(struct kobject *kobj,
827 					struct attribute *attr,
828 					char *buf)
829 {
830 	struct ip_hw_instance *ip_hw_instance = to_ip_hw_instance(kobj);
831 	struct ip_hw_instance_attr *ip_hw_attr = to_ip_hw_instance_attr(attr);
832 
833 	if (!ip_hw_attr->show)
834 		return -EIO;
835 
836 	return ip_hw_attr->show(ip_hw_instance, buf);
837 }
838 
839 static const struct sysfs_ops ip_hw_instance_sysfs_ops = {
840 	.show = ip_hw_instance_attr_show,
841 };
842 
843 static void ip_hw_instance_release(struct kobject *kobj)
844 {
845 	struct ip_hw_instance *ip_hw_instance = to_ip_hw_instance(kobj);
846 
847 	kfree(ip_hw_instance);
848 }
849 
850 static const struct kobj_type ip_hw_instance_ktype = {
851 	.release = ip_hw_instance_release,
852 	.sysfs_ops = &ip_hw_instance_sysfs_ops,
853 	.default_groups = ip_hw_instance_groups,
854 };
855 
856 /* -------------------------------------------------- */
857 
858 #define to_ip_hw_id(x)  container_of(to_kset(x), struct ip_hw_id, hw_id_kset)
859 
860 static void ip_hw_id_release(struct kobject *kobj)
861 {
862 	struct ip_hw_id *ip_hw_id = to_ip_hw_id(kobj);
863 
864 	if (!list_empty(&ip_hw_id->hw_id_kset.list))
865 		DRM_ERROR("ip_hw_id->hw_id_kset is not empty");
866 	kfree(ip_hw_id);
867 }
868 
869 static const struct kobj_type ip_hw_id_ktype = {
870 	.release = ip_hw_id_release,
871 	.sysfs_ops = &kobj_sysfs_ops,
872 };
873 
874 /* -------------------------------------------------- */
875 
876 static void die_kobj_release(struct kobject *kobj);
877 static void ip_disc_release(struct kobject *kobj);
878 
879 struct ip_die_entry_attribute {
880 	struct attribute attr;
881 	ssize_t (*show)(struct ip_die_entry *ip_die_entry, char *buf);
882 };
883 
884 #define to_ip_die_entry_attr(x)  container_of(x, struct ip_die_entry_attribute, attr)
885 
886 static ssize_t num_ips_show(struct ip_die_entry *ip_die_entry, char *buf)
887 {
888 	return sysfs_emit(buf, "%d\n", ip_die_entry->num_ips);
889 }
890 
891 /* If there are more ip_die_entry attrs, other than the number of IPs,
892  * we can make this intro an array of attrs, and then initialize
893  * ip_die_entry_attrs in a loop.
894  */
895 static struct ip_die_entry_attribute num_ips_attr =
896 	__ATTR_RO(num_ips);
897 
898 static struct attribute *ip_die_entry_attrs[] = {
899 	&num_ips_attr.attr,
900 	NULL,
901 };
902 ATTRIBUTE_GROUPS(ip_die_entry); /* ip_die_entry_groups */
903 
904 #define to_ip_die_entry(x) container_of(to_kset(x), struct ip_die_entry, ip_kset)
905 
906 static ssize_t ip_die_entry_attr_show(struct kobject *kobj,
907 				      struct attribute *attr,
908 				      char *buf)
909 {
910 	struct ip_die_entry_attribute *ip_die_entry_attr = to_ip_die_entry_attr(attr);
911 	struct ip_die_entry *ip_die_entry = to_ip_die_entry(kobj);
912 
913 	if (!ip_die_entry_attr->show)
914 		return -EIO;
915 
916 	return ip_die_entry_attr->show(ip_die_entry, buf);
917 }
918 
919 static void ip_die_entry_release(struct kobject *kobj)
920 {
921 	struct ip_die_entry *ip_die_entry = to_ip_die_entry(kobj);
922 
923 	if (!list_empty(&ip_die_entry->ip_kset.list))
924 		DRM_ERROR("ip_die_entry->ip_kset is not empty");
925 	kfree(ip_die_entry);
926 }
927 
928 static const struct sysfs_ops ip_die_entry_sysfs_ops = {
929 	.show = ip_die_entry_attr_show,
930 };
931 
932 static const struct kobj_type ip_die_entry_ktype = {
933 	.release = ip_die_entry_release,
934 	.sysfs_ops = &ip_die_entry_sysfs_ops,
935 	.default_groups = ip_die_entry_groups,
936 };
937 
938 static const struct kobj_type die_kobj_ktype = {
939 	.release = die_kobj_release,
940 	.sysfs_ops = &kobj_sysfs_ops,
941 };
942 
943 static const struct kobj_type ip_discovery_ktype = {
944 	.release = ip_disc_release,
945 	.sysfs_ops = &kobj_sysfs_ops,
946 };
947 
948 struct ip_discovery_top {
949 	struct kobject kobj;    /* ip_discovery/ */
950 	struct kset die_kset;   /* ip_discovery/die/, contains ip_die_entry */
951 	struct amdgpu_device *adev;
952 };
953 
954 static void die_kobj_release(struct kobject *kobj)
955 {
956 	struct ip_discovery_top *ip_top = container_of(to_kset(kobj),
957 						       struct ip_discovery_top,
958 						       die_kset);
959 	if (!list_empty(&ip_top->die_kset.list))
960 		DRM_ERROR("ip_top->die_kset is not empty");
961 }
962 
963 static void ip_disc_release(struct kobject *kobj)
964 {
965 	struct ip_discovery_top *ip_top = container_of(kobj, struct ip_discovery_top,
966 						       kobj);
967 	struct amdgpu_device *adev = ip_top->adev;
968 
969 	adev->ip_top = NULL;
970 	kfree(ip_top);
971 }
972 
973 static uint8_t amdgpu_discovery_get_harvest_info(struct amdgpu_device *adev,
974 						 uint16_t hw_id, uint8_t inst)
975 {
976 	uint8_t harvest = 0;
977 
978 	/* Until a uniform way is figured, get mask based on hwid */
979 	switch (hw_id) {
980 	case VCN_HWID:
981 		harvest = ((1 << inst) & adev->vcn.inst_mask) == 0;
982 		break;
983 	case DMU_HWID:
984 		if (adev->harvest_ip_mask & AMD_HARVEST_IP_DMU_MASK)
985 			harvest = 0x1;
986 		break;
987 	case UMC_HWID:
988 		/* TODO: It needs another parsing; for now, ignore.*/
989 		break;
990 	case GC_HWID:
991 		harvest = ((1 << inst) & adev->gfx.xcc_mask) == 0;
992 		break;
993 	case SDMA0_HWID:
994 		harvest = ((1 << inst) & adev->sdma.sdma_mask) == 0;
995 		break;
996 	default:
997 		break;
998 	}
999 
1000 	return harvest;
1001 }
1002 
1003 static int amdgpu_discovery_sysfs_ips(struct amdgpu_device *adev,
1004 				      struct ip_die_entry *ip_die_entry,
1005 				      const size_t _ip_offset, const int num_ips,
1006 				      bool reg_base_64)
1007 {
1008 	int ii, jj, kk, res;
1009 
1010 	DRM_DEBUG("num_ips:%d", num_ips);
1011 
1012 	/* Find all IPs of a given HW ID, and add their instance to
1013 	 * #die/#hw_id/#instance/<attributes>
1014 	 */
1015 	for (ii = 0; ii < HW_ID_MAX; ii++) {
1016 		struct ip_hw_id *ip_hw_id = NULL;
1017 		size_t ip_offset = _ip_offset;
1018 
1019 		for (jj = 0; jj < num_ips; jj++) {
1020 			struct ip_v4 *ip;
1021 			struct ip_hw_instance *ip_hw_instance;
1022 
1023 			ip = (struct ip_v4 *)(adev->mman.discovery_bin + ip_offset);
1024 			if (amdgpu_discovery_validate_ip(ip) ||
1025 			    le16_to_cpu(ip->hw_id) != ii)
1026 				goto next_ip;
1027 
1028 			DRM_DEBUG("match:%d @ ip_offset:%zu", ii, ip_offset);
1029 
1030 			/* We have a hw_id match; register the hw
1031 			 * block if not yet registered.
1032 			 */
1033 			if (!ip_hw_id) {
1034 				ip_hw_id = kzalloc(sizeof(*ip_hw_id), GFP_KERNEL);
1035 				if (!ip_hw_id)
1036 					return -ENOMEM;
1037 				ip_hw_id->hw_id = ii;
1038 
1039 				kobject_set_name(&ip_hw_id->hw_id_kset.kobj, "%d", ii);
1040 				ip_hw_id->hw_id_kset.kobj.kset = &ip_die_entry->ip_kset;
1041 				ip_hw_id->hw_id_kset.kobj.ktype = &ip_hw_id_ktype;
1042 				res = kset_register(&ip_hw_id->hw_id_kset);
1043 				if (res) {
1044 					DRM_ERROR("Couldn't register ip_hw_id kset");
1045 					kfree(ip_hw_id);
1046 					return res;
1047 				}
1048 				if (hw_id_names[ii]) {
1049 					res = sysfs_create_link(&ip_die_entry->ip_kset.kobj,
1050 								&ip_hw_id->hw_id_kset.kobj,
1051 								hw_id_names[ii]);
1052 					if (res) {
1053 						DRM_ERROR("Couldn't create IP link %s in IP Die:%s\n",
1054 							  hw_id_names[ii],
1055 							  kobject_name(&ip_die_entry->ip_kset.kobj));
1056 					}
1057 				}
1058 			}
1059 
1060 			/* Now register its instance.
1061 			 */
1062 			ip_hw_instance = kzalloc(struct_size(ip_hw_instance,
1063 							     base_addr,
1064 							     ip->num_base_address),
1065 						 GFP_KERNEL);
1066 			if (!ip_hw_instance) {
1067 				DRM_ERROR("no memory for ip_hw_instance");
1068 				return -ENOMEM;
1069 			}
1070 			ip_hw_instance->hw_id = le16_to_cpu(ip->hw_id); /* == ii */
1071 			ip_hw_instance->num_instance = ip->instance_number;
1072 			ip_hw_instance->major = ip->major;
1073 			ip_hw_instance->minor = ip->minor;
1074 			ip_hw_instance->revision = ip->revision;
1075 			ip_hw_instance->harvest =
1076 				amdgpu_discovery_get_harvest_info(
1077 					adev, ip_hw_instance->hw_id,
1078 					ip_hw_instance->num_instance);
1079 			ip_hw_instance->num_base_addresses = ip->num_base_address;
1080 
1081 			for (kk = 0; kk < ip_hw_instance->num_base_addresses; kk++) {
1082 				if (reg_base_64)
1083 					ip_hw_instance->base_addr[kk] =
1084 						lower_32_bits(le64_to_cpu(ip->base_address_64[kk])) & 0x3FFFFFFF;
1085 				else
1086 					ip_hw_instance->base_addr[kk] = ip->base_address[kk];
1087 			}
1088 
1089 			kobject_init(&ip_hw_instance->kobj, &ip_hw_instance_ktype);
1090 			ip_hw_instance->kobj.kset = &ip_hw_id->hw_id_kset;
1091 			res = kobject_add(&ip_hw_instance->kobj, NULL,
1092 					  "%d", ip_hw_instance->num_instance);
1093 next_ip:
1094 			if (reg_base_64)
1095 				ip_offset += struct_size(ip, base_address_64,
1096 							 ip->num_base_address);
1097 			else
1098 				ip_offset += struct_size(ip, base_address,
1099 							 ip->num_base_address);
1100 		}
1101 	}
1102 
1103 	return 0;
1104 }
1105 
1106 static int amdgpu_discovery_sysfs_recurse(struct amdgpu_device *adev)
1107 {
1108 	struct binary_header *bhdr;
1109 	struct ip_discovery_header *ihdr;
1110 	struct die_header *dhdr;
1111 	struct kset *die_kset = &adev->ip_top->die_kset;
1112 	u16 num_dies, die_offset, num_ips;
1113 	size_t ip_offset;
1114 	int ii, res;
1115 
1116 	bhdr = (struct binary_header *)adev->mman.discovery_bin;
1117 	ihdr = (struct ip_discovery_header *)(adev->mman.discovery_bin +
1118 					      le16_to_cpu(bhdr->table_list[IP_DISCOVERY].offset));
1119 	num_dies = le16_to_cpu(ihdr->num_dies);
1120 
1121 	DRM_DEBUG("number of dies: %d\n", num_dies);
1122 
1123 	for (ii = 0; ii < num_dies; ii++) {
1124 		struct ip_die_entry *ip_die_entry;
1125 
1126 		die_offset = le16_to_cpu(ihdr->die_info[ii].die_offset);
1127 		dhdr = (struct die_header *)(adev->mman.discovery_bin + die_offset);
1128 		num_ips = le16_to_cpu(dhdr->num_ips);
1129 		ip_offset = die_offset + sizeof(*dhdr);
1130 
1131 		/* Add the die to the kset.
1132 		 *
1133 		 * dhdr->die_id == ii, which was checked in
1134 		 * amdgpu_discovery_reg_base_init().
1135 		 */
1136 
1137 		ip_die_entry = kzalloc(sizeof(*ip_die_entry), GFP_KERNEL);
1138 		if (!ip_die_entry)
1139 			return -ENOMEM;
1140 
1141 		ip_die_entry->num_ips = num_ips;
1142 
1143 		kobject_set_name(&ip_die_entry->ip_kset.kobj, "%d", le16_to_cpu(dhdr->die_id));
1144 		ip_die_entry->ip_kset.kobj.kset = die_kset;
1145 		ip_die_entry->ip_kset.kobj.ktype = &ip_die_entry_ktype;
1146 		res = kset_register(&ip_die_entry->ip_kset);
1147 		if (res) {
1148 			DRM_ERROR("Couldn't register ip_die_entry kset");
1149 			kfree(ip_die_entry);
1150 			return res;
1151 		}
1152 
1153 		amdgpu_discovery_sysfs_ips(adev, ip_die_entry, ip_offset, num_ips, !!ihdr->base_addr_64_bit);
1154 	}
1155 
1156 	return 0;
1157 }
1158 
1159 static int amdgpu_discovery_sysfs_init(struct amdgpu_device *adev)
1160 {
1161 	struct kset *die_kset;
1162 	int res, ii;
1163 
1164 	if (!adev->mman.discovery_bin)
1165 		return -EINVAL;
1166 
1167 	adev->ip_top = kzalloc(sizeof(*adev->ip_top), GFP_KERNEL);
1168 	if (!adev->ip_top)
1169 		return -ENOMEM;
1170 
1171 	adev->ip_top->adev = adev;
1172 
1173 	res = kobject_init_and_add(&adev->ip_top->kobj, &ip_discovery_ktype,
1174 				   &adev->dev->kobj, "ip_discovery");
1175 	if (res) {
1176 		DRM_ERROR("Couldn't init and add ip_discovery/");
1177 		goto Err;
1178 	}
1179 
1180 	die_kset = &adev->ip_top->die_kset;
1181 	kobject_set_name(&die_kset->kobj, "%s", "die");
1182 	die_kset->kobj.parent = &adev->ip_top->kobj;
1183 	die_kset->kobj.ktype = &die_kobj_ktype;
1184 	res = kset_register(&adev->ip_top->die_kset);
1185 	if (res) {
1186 		DRM_ERROR("Couldn't register die_kset");
1187 		goto Err;
1188 	}
1189 
1190 	for (ii = 0; ii < ARRAY_SIZE(ip_hw_attr); ii++)
1191 		ip_hw_instance_attrs[ii] = &ip_hw_attr[ii].attr;
1192 	ip_hw_instance_attrs[ii] = NULL;
1193 
1194 	res = amdgpu_discovery_sysfs_recurse(adev);
1195 
1196 	return res;
1197 Err:
1198 	kobject_put(&adev->ip_top->kobj);
1199 	return res;
1200 }
1201 
1202 /* -------------------------------------------------- */
1203 
1204 #define list_to_kobj(el) container_of(el, struct kobject, entry)
1205 
1206 static void amdgpu_discovery_sysfs_ip_hw_free(struct ip_hw_id *ip_hw_id)
1207 {
1208 	struct list_head *el, *tmp;
1209 	struct kset *hw_id_kset;
1210 
1211 	hw_id_kset = &ip_hw_id->hw_id_kset;
1212 	spin_lock(&hw_id_kset->list_lock);
1213 	list_for_each_prev_safe(el, tmp, &hw_id_kset->list) {
1214 		list_del_init(el);
1215 		spin_unlock(&hw_id_kset->list_lock);
1216 		/* kobject is embedded in ip_hw_instance */
1217 		kobject_put(list_to_kobj(el));
1218 		spin_lock(&hw_id_kset->list_lock);
1219 	}
1220 	spin_unlock(&hw_id_kset->list_lock);
1221 	kobject_put(&ip_hw_id->hw_id_kset.kobj);
1222 }
1223 
1224 static void amdgpu_discovery_sysfs_die_free(struct ip_die_entry *ip_die_entry)
1225 {
1226 	struct list_head *el, *tmp;
1227 	struct kset *ip_kset;
1228 
1229 	ip_kset = &ip_die_entry->ip_kset;
1230 	spin_lock(&ip_kset->list_lock);
1231 	list_for_each_prev_safe(el, tmp, &ip_kset->list) {
1232 		list_del_init(el);
1233 		spin_unlock(&ip_kset->list_lock);
1234 		amdgpu_discovery_sysfs_ip_hw_free(to_ip_hw_id(list_to_kobj(el)));
1235 		spin_lock(&ip_kset->list_lock);
1236 	}
1237 	spin_unlock(&ip_kset->list_lock);
1238 	kobject_put(&ip_die_entry->ip_kset.kobj);
1239 }
1240 
1241 static void amdgpu_discovery_sysfs_fini(struct amdgpu_device *adev)
1242 {
1243 	struct list_head *el, *tmp;
1244 	struct kset *die_kset;
1245 
1246 	die_kset = &adev->ip_top->die_kset;
1247 	spin_lock(&die_kset->list_lock);
1248 	list_for_each_prev_safe(el, tmp, &die_kset->list) {
1249 		list_del_init(el);
1250 		spin_unlock(&die_kset->list_lock);
1251 		amdgpu_discovery_sysfs_die_free(to_ip_die_entry(list_to_kobj(el)));
1252 		spin_lock(&die_kset->list_lock);
1253 	}
1254 	spin_unlock(&die_kset->list_lock);
1255 	kobject_put(&adev->ip_top->die_kset.kobj);
1256 	kobject_put(&adev->ip_top->kobj);
1257 }
1258 
1259 /* ================================================== */
1260 
1261 static int amdgpu_discovery_reg_base_init(struct amdgpu_device *adev)
1262 {
1263 	uint8_t num_base_address, subrev, variant;
1264 	struct binary_header *bhdr;
1265 	struct ip_discovery_header *ihdr;
1266 	struct die_header *dhdr;
1267 	struct ip_v4 *ip;
1268 	uint16_t die_offset;
1269 	uint16_t ip_offset;
1270 	uint16_t num_dies;
1271 	uint16_t num_ips;
1272 	int hw_ip;
1273 	int i, j, k;
1274 	int r;
1275 
1276 	r = amdgpu_discovery_init(adev);
1277 	if (r) {
1278 		DRM_ERROR("amdgpu_discovery_init failed\n");
1279 		return r;
1280 	}
1281 
1282 	adev->gfx.xcc_mask = 0;
1283 	adev->sdma.sdma_mask = 0;
1284 	adev->vcn.inst_mask = 0;
1285 	adev->jpeg.inst_mask = 0;
1286 	bhdr = (struct binary_header *)adev->mman.discovery_bin;
1287 	ihdr = (struct ip_discovery_header *)(adev->mman.discovery_bin +
1288 			le16_to_cpu(bhdr->table_list[IP_DISCOVERY].offset));
1289 	num_dies = le16_to_cpu(ihdr->num_dies);
1290 
1291 	DRM_DEBUG("number of dies: %d\n", num_dies);
1292 
1293 	for (i = 0; i < num_dies; i++) {
1294 		die_offset = le16_to_cpu(ihdr->die_info[i].die_offset);
1295 		dhdr = (struct die_header *)(adev->mman.discovery_bin + die_offset);
1296 		num_ips = le16_to_cpu(dhdr->num_ips);
1297 		ip_offset = die_offset + sizeof(*dhdr);
1298 
1299 		if (le16_to_cpu(dhdr->die_id) != i) {
1300 			DRM_ERROR("invalid die id %d, expected %d\n",
1301 					le16_to_cpu(dhdr->die_id), i);
1302 			return -EINVAL;
1303 		}
1304 
1305 		DRM_DEBUG("number of hardware IPs on die%d: %d\n",
1306 				le16_to_cpu(dhdr->die_id), num_ips);
1307 
1308 		for (j = 0; j < num_ips; j++) {
1309 			ip = (struct ip_v4 *)(adev->mman.discovery_bin + ip_offset);
1310 
1311 			if (amdgpu_discovery_validate_ip(ip))
1312 				goto next_ip;
1313 
1314 			num_base_address = ip->num_base_address;
1315 
1316 			DRM_DEBUG("%s(%d) #%d v%d.%d.%d:\n",
1317 				  hw_id_names[le16_to_cpu(ip->hw_id)],
1318 				  le16_to_cpu(ip->hw_id),
1319 				  ip->instance_number,
1320 				  ip->major, ip->minor,
1321 				  ip->revision);
1322 
1323 			if (le16_to_cpu(ip->hw_id) == VCN_HWID) {
1324 				/* Bit [5:0]: original revision value
1325 				 * Bit [7:6]: en/decode capability:
1326 				 *     0b00 : VCN function normally
1327 				 *     0b10 : encode is disabled
1328 				 *     0b01 : decode is disabled
1329 				 */
1330 				if (adev->vcn.num_vcn_inst <
1331 				    AMDGPU_MAX_VCN_INSTANCES) {
1332 					adev->vcn.vcn_config[adev->vcn.num_vcn_inst] =
1333 						ip->revision & 0xc0;
1334 					adev->vcn.num_vcn_inst++;
1335 					adev->vcn.inst_mask |=
1336 						(1U << ip->instance_number);
1337 					adev->jpeg.inst_mask |=
1338 						(1U << ip->instance_number);
1339 				} else {
1340 					dev_err(adev->dev, "Too many VCN instances: %d vs %d\n",
1341 						adev->vcn.num_vcn_inst + 1,
1342 						AMDGPU_MAX_VCN_INSTANCES);
1343 				}
1344 				ip->revision &= ~0xc0;
1345 			}
1346 			if (le16_to_cpu(ip->hw_id) == SDMA0_HWID ||
1347 			    le16_to_cpu(ip->hw_id) == SDMA1_HWID ||
1348 			    le16_to_cpu(ip->hw_id) == SDMA2_HWID ||
1349 			    le16_to_cpu(ip->hw_id) == SDMA3_HWID) {
1350 				if (adev->sdma.num_instances <
1351 				    AMDGPU_MAX_SDMA_INSTANCES) {
1352 					adev->sdma.num_instances++;
1353 					adev->sdma.sdma_mask |=
1354 						(1U << ip->instance_number);
1355 				} else {
1356 					dev_err(adev->dev, "Too many SDMA instances: %d vs %d\n",
1357 						adev->sdma.num_instances + 1,
1358 						AMDGPU_MAX_SDMA_INSTANCES);
1359 				}
1360 			}
1361 
1362 			if (le16_to_cpu(ip->hw_id) == VPE_HWID) {
1363 				if (adev->vpe.num_instances < AMDGPU_MAX_VPE_INSTANCES)
1364 					adev->vpe.num_instances++;
1365 				else
1366 					dev_err(adev->dev, "Too many VPE instances: %d vs %d\n",
1367 						adev->vpe.num_instances + 1,
1368 						AMDGPU_MAX_VPE_INSTANCES);
1369 			}
1370 
1371 			if (le16_to_cpu(ip->hw_id) == UMC_HWID) {
1372 				adev->gmc.num_umc++;
1373 				adev->umc.node_inst_num++;
1374 			}
1375 
1376 			if (le16_to_cpu(ip->hw_id) == GC_HWID)
1377 				adev->gfx.xcc_mask |=
1378 					(1U << ip->instance_number);
1379 
1380 			for (k = 0; k < num_base_address; k++) {
1381 				/*
1382 				 * convert the endianness of base addresses in place,
1383 				 * so that we don't need to convert them when accessing adev->reg_offset.
1384 				 */
1385 				if (ihdr->base_addr_64_bit)
1386 					/* Truncate the 64bit base address from ip discovery
1387 					 * and only store lower 32bit ip base in reg_offset[].
1388 					 * Bits > 32 follows ASIC specific format, thus just
1389 					 * discard them and handle it within specific ASIC.
1390 					 * By this way reg_offset[] and related helpers can
1391 					 * stay unchanged.
1392 					 * The base address is in dwords, thus clear the
1393 					 * highest 2 bits to store.
1394 					 */
1395 					ip->base_address[k] =
1396 						lower_32_bits(le64_to_cpu(ip->base_address_64[k])) & 0x3FFFFFFF;
1397 				else
1398 					ip->base_address[k] = le32_to_cpu(ip->base_address[k]);
1399 				DRM_DEBUG("\t0x%08x\n", ip->base_address[k]);
1400 			}
1401 
1402 			for (hw_ip = 0; hw_ip < MAX_HWIP; hw_ip++) {
1403 				if (hw_id_map[hw_ip] == le16_to_cpu(ip->hw_id) &&
1404 				    hw_id_map[hw_ip] != 0) {
1405 					DRM_DEBUG("set register base offset for %s\n",
1406 							hw_id_names[le16_to_cpu(ip->hw_id)]);
1407 					adev->reg_offset[hw_ip][ip->instance_number] =
1408 						ip->base_address;
1409 					/* Instance support is somewhat inconsistent.
1410 					 * SDMA is a good example.  Sienna cichlid has 4 total
1411 					 * SDMA instances, each enumerated separately (HWIDs
1412 					 * 42, 43, 68, 69).  Arcturus has 8 total SDMA instances,
1413 					 * but they are enumerated as multiple instances of the
1414 					 * same HWIDs (4x HWID 42, 4x HWID 43).  UMC is another
1415 					 * example.  On most chips there are multiple instances
1416 					 * with the same HWID.
1417 					 */
1418 
1419 					if (ihdr->version < 3) {
1420 						subrev = 0;
1421 						variant = 0;
1422 					} else {
1423 						subrev = ip->sub_revision;
1424 						variant = ip->variant;
1425 					}
1426 
1427 					adev->ip_versions[hw_ip]
1428 							 [ip->instance_number] =
1429 						IP_VERSION_FULL(ip->major,
1430 								ip->minor,
1431 								ip->revision,
1432 								variant,
1433 								subrev);
1434 				}
1435 			}
1436 
1437 next_ip:
1438 			if (ihdr->base_addr_64_bit)
1439 				ip_offset += struct_size(ip, base_address_64, ip->num_base_address);
1440 			else
1441 				ip_offset += struct_size(ip, base_address, ip->num_base_address);
1442 		}
1443 	}
1444 
1445 	return 0;
1446 }
1447 
1448 static void amdgpu_discovery_harvest_ip(struct amdgpu_device *adev)
1449 {
1450 	int vcn_harvest_count = 0;
1451 	int umc_harvest_count = 0;
1452 
1453 	/*
1454 	 * Harvest table does not fit Navi1x and legacy GPUs,
1455 	 * so read harvest bit per IP data structure to set
1456 	 * harvest configuration.
1457 	 */
1458 	if (amdgpu_ip_version(adev, GC_HWIP, 0) < IP_VERSION(10, 2, 0) &&
1459 	    amdgpu_ip_version(adev, GC_HWIP, 0) != IP_VERSION(9, 4, 3) &&
1460 	    amdgpu_ip_version(adev, GC_HWIP, 0) != IP_VERSION(9, 4, 4)) {
1461 		if ((adev->pdev->device == 0x731E &&
1462 			(adev->pdev->revision == 0xC6 ||
1463 			 adev->pdev->revision == 0xC7)) ||
1464 			(adev->pdev->device == 0x7340 &&
1465 			 adev->pdev->revision == 0xC9) ||
1466 			(adev->pdev->device == 0x7360 &&
1467 			 adev->pdev->revision == 0xC7))
1468 			amdgpu_discovery_read_harvest_bit_per_ip(adev,
1469 				&vcn_harvest_count);
1470 	} else {
1471 		amdgpu_discovery_read_from_harvest_table(adev,
1472 							 &vcn_harvest_count,
1473 							 &umc_harvest_count);
1474 	}
1475 
1476 	amdgpu_discovery_harvest_config_quirk(adev);
1477 
1478 	if (vcn_harvest_count == adev->vcn.num_vcn_inst) {
1479 		adev->harvest_ip_mask |= AMD_HARVEST_IP_VCN_MASK;
1480 		adev->harvest_ip_mask |= AMD_HARVEST_IP_JPEG_MASK;
1481 	}
1482 
1483 	if (umc_harvest_count < adev->gmc.num_umc) {
1484 		adev->gmc.num_umc -= umc_harvest_count;
1485 	}
1486 }
1487 
1488 union gc_info {
1489 	struct gc_info_v1_0 v1;
1490 	struct gc_info_v1_1 v1_1;
1491 	struct gc_info_v1_2 v1_2;
1492 	struct gc_info_v2_0 v2;
1493 	struct gc_info_v2_1 v2_1;
1494 };
1495 
1496 static int amdgpu_discovery_get_gfx_info(struct amdgpu_device *adev)
1497 {
1498 	struct binary_header *bhdr;
1499 	union gc_info *gc_info;
1500 	u16 offset;
1501 
1502 	if (!adev->mman.discovery_bin) {
1503 		DRM_ERROR("ip discovery uninitialized\n");
1504 		return -EINVAL;
1505 	}
1506 
1507 	bhdr = (struct binary_header *)adev->mman.discovery_bin;
1508 	offset = le16_to_cpu(bhdr->table_list[GC].offset);
1509 
1510 	if (!offset)
1511 		return 0;
1512 
1513 	gc_info = (union gc_info *)(adev->mman.discovery_bin + offset);
1514 
1515 	switch (le16_to_cpu(gc_info->v1.header.version_major)) {
1516 	case 1:
1517 		adev->gfx.config.max_shader_engines = le32_to_cpu(gc_info->v1.gc_num_se);
1518 		adev->gfx.config.max_cu_per_sh = 2 * (le32_to_cpu(gc_info->v1.gc_num_wgp0_per_sa) +
1519 						      le32_to_cpu(gc_info->v1.gc_num_wgp1_per_sa));
1520 		adev->gfx.config.max_sh_per_se = le32_to_cpu(gc_info->v1.gc_num_sa_per_se);
1521 		adev->gfx.config.max_backends_per_se = le32_to_cpu(gc_info->v1.gc_num_rb_per_se);
1522 		adev->gfx.config.max_texture_channel_caches = le32_to_cpu(gc_info->v1.gc_num_gl2c);
1523 		adev->gfx.config.max_gprs = le32_to_cpu(gc_info->v1.gc_num_gprs);
1524 		adev->gfx.config.max_gs_threads = le32_to_cpu(gc_info->v1.gc_num_max_gs_thds);
1525 		adev->gfx.config.gs_vgt_table_depth = le32_to_cpu(gc_info->v1.gc_gs_table_depth);
1526 		adev->gfx.config.gs_prim_buffer_depth = le32_to_cpu(gc_info->v1.gc_gsprim_buff_depth);
1527 		adev->gfx.config.double_offchip_lds_buf = le32_to_cpu(gc_info->v1.gc_double_offchip_lds_buffer);
1528 		adev->gfx.cu_info.wave_front_size = le32_to_cpu(gc_info->v1.gc_wave_size);
1529 		adev->gfx.cu_info.max_waves_per_simd = le32_to_cpu(gc_info->v1.gc_max_waves_per_simd);
1530 		adev->gfx.cu_info.max_scratch_slots_per_cu = le32_to_cpu(gc_info->v1.gc_max_scratch_slots_per_cu);
1531 		adev->gfx.cu_info.lds_size = le32_to_cpu(gc_info->v1.gc_lds_size);
1532 		adev->gfx.config.num_sc_per_sh = le32_to_cpu(gc_info->v1.gc_num_sc_per_se) /
1533 			le32_to_cpu(gc_info->v1.gc_num_sa_per_se);
1534 		adev->gfx.config.num_packer_per_sc = le32_to_cpu(gc_info->v1.gc_num_packer_per_sc);
1535 		if (le16_to_cpu(gc_info->v1.header.version_minor) >= 1) {
1536 			adev->gfx.config.gc_num_tcp_per_sa = le32_to_cpu(gc_info->v1_1.gc_num_tcp_per_sa);
1537 			adev->gfx.config.gc_num_sdp_interface = le32_to_cpu(gc_info->v1_1.gc_num_sdp_interface);
1538 			adev->gfx.config.gc_num_tcps = le32_to_cpu(gc_info->v1_1.gc_num_tcps);
1539 		}
1540 		if (le16_to_cpu(gc_info->v1.header.version_minor) >= 2) {
1541 			adev->gfx.config.gc_num_tcp_per_wpg = le32_to_cpu(gc_info->v1_2.gc_num_tcp_per_wpg);
1542 			adev->gfx.config.gc_tcp_l1_size = le32_to_cpu(gc_info->v1_2.gc_tcp_l1_size);
1543 			adev->gfx.config.gc_num_sqc_per_wgp = le32_to_cpu(gc_info->v1_2.gc_num_sqc_per_wgp);
1544 			adev->gfx.config.gc_l1_instruction_cache_size_per_sqc = le32_to_cpu(gc_info->v1_2.gc_l1_instruction_cache_size_per_sqc);
1545 			adev->gfx.config.gc_l1_data_cache_size_per_sqc = le32_to_cpu(gc_info->v1_2.gc_l1_data_cache_size_per_sqc);
1546 			adev->gfx.config.gc_gl1c_per_sa = le32_to_cpu(gc_info->v1_2.gc_gl1c_per_sa);
1547 			adev->gfx.config.gc_gl1c_size_per_instance = le32_to_cpu(gc_info->v1_2.gc_gl1c_size_per_instance);
1548 			adev->gfx.config.gc_gl2c_per_gpu = le32_to_cpu(gc_info->v1_2.gc_gl2c_per_gpu);
1549 		}
1550 		break;
1551 	case 2:
1552 		adev->gfx.config.max_shader_engines = le32_to_cpu(gc_info->v2.gc_num_se);
1553 		adev->gfx.config.max_cu_per_sh = le32_to_cpu(gc_info->v2.gc_num_cu_per_sh);
1554 		adev->gfx.config.max_sh_per_se = le32_to_cpu(gc_info->v2.gc_num_sh_per_se);
1555 		adev->gfx.config.max_backends_per_se = le32_to_cpu(gc_info->v2.gc_num_rb_per_se);
1556 		adev->gfx.config.max_texture_channel_caches = le32_to_cpu(gc_info->v2.gc_num_tccs);
1557 		adev->gfx.config.max_gprs = le32_to_cpu(gc_info->v2.gc_num_gprs);
1558 		adev->gfx.config.max_gs_threads = le32_to_cpu(gc_info->v2.gc_num_max_gs_thds);
1559 		adev->gfx.config.gs_vgt_table_depth = le32_to_cpu(gc_info->v2.gc_gs_table_depth);
1560 		adev->gfx.config.gs_prim_buffer_depth = le32_to_cpu(gc_info->v2.gc_gsprim_buff_depth);
1561 		adev->gfx.config.double_offchip_lds_buf = le32_to_cpu(gc_info->v2.gc_double_offchip_lds_buffer);
1562 		adev->gfx.cu_info.wave_front_size = le32_to_cpu(gc_info->v2.gc_wave_size);
1563 		adev->gfx.cu_info.max_waves_per_simd = le32_to_cpu(gc_info->v2.gc_max_waves_per_simd);
1564 		adev->gfx.cu_info.max_scratch_slots_per_cu = le32_to_cpu(gc_info->v2.gc_max_scratch_slots_per_cu);
1565 		adev->gfx.cu_info.lds_size = le32_to_cpu(gc_info->v2.gc_lds_size);
1566 		adev->gfx.config.num_sc_per_sh = le32_to_cpu(gc_info->v2.gc_num_sc_per_se) /
1567 			le32_to_cpu(gc_info->v2.gc_num_sh_per_se);
1568 		adev->gfx.config.num_packer_per_sc = le32_to_cpu(gc_info->v2.gc_num_packer_per_sc);
1569 		if (le16_to_cpu(gc_info->v2.header.version_minor) == 1) {
1570 			adev->gfx.config.gc_num_tcp_per_sa = le32_to_cpu(gc_info->v2_1.gc_num_tcp_per_sh);
1571 			adev->gfx.config.gc_tcp_size_per_cu = le32_to_cpu(gc_info->v2_1.gc_tcp_size_per_cu);
1572 			adev->gfx.config.gc_num_sdp_interface = le32_to_cpu(gc_info->v2_1.gc_num_sdp_interface); /* per XCD */
1573 			adev->gfx.config.gc_num_cu_per_sqc = le32_to_cpu(gc_info->v2_1.gc_num_cu_per_sqc);
1574 			adev->gfx.config.gc_l1_instruction_cache_size_per_sqc = le32_to_cpu(gc_info->v2_1.gc_instruction_cache_size_per_sqc);
1575 			adev->gfx.config.gc_l1_data_cache_size_per_sqc = le32_to_cpu(gc_info->v2_1.gc_scalar_data_cache_size_per_sqc);
1576 			adev->gfx.config.gc_tcc_size = le32_to_cpu(gc_info->v2_1.gc_tcc_size); /* per XCD */
1577 		}
1578 		break;
1579 	default:
1580 		dev_err(adev->dev,
1581 			"Unhandled GC info table %d.%d\n",
1582 			le16_to_cpu(gc_info->v1.header.version_major),
1583 			le16_to_cpu(gc_info->v1.header.version_minor));
1584 		return -EINVAL;
1585 	}
1586 	return 0;
1587 }
1588 
1589 union mall_info {
1590 	struct mall_info_v1_0 v1;
1591 	struct mall_info_v2_0 v2;
1592 };
1593 
1594 static int amdgpu_discovery_get_mall_info(struct amdgpu_device *adev)
1595 {
1596 	struct binary_header *bhdr;
1597 	union mall_info *mall_info;
1598 	u32 u, mall_size_per_umc, m_s_present, half_use;
1599 	u64 mall_size;
1600 	u16 offset;
1601 
1602 	if (!adev->mman.discovery_bin) {
1603 		DRM_ERROR("ip discovery uninitialized\n");
1604 		return -EINVAL;
1605 	}
1606 
1607 	bhdr = (struct binary_header *)adev->mman.discovery_bin;
1608 	offset = le16_to_cpu(bhdr->table_list[MALL_INFO].offset);
1609 
1610 	if (!offset)
1611 		return 0;
1612 
1613 	mall_info = (union mall_info *)(adev->mman.discovery_bin + offset);
1614 
1615 	switch (le16_to_cpu(mall_info->v1.header.version_major)) {
1616 	case 1:
1617 		mall_size = 0;
1618 		mall_size_per_umc = le32_to_cpu(mall_info->v1.mall_size_per_m);
1619 		m_s_present = le32_to_cpu(mall_info->v1.m_s_present);
1620 		half_use = le32_to_cpu(mall_info->v1.m_half_use);
1621 		for (u = 0; u < adev->gmc.num_umc; u++) {
1622 			if (m_s_present & (1 << u))
1623 				mall_size += mall_size_per_umc * 2;
1624 			else if (half_use & (1 << u))
1625 				mall_size += mall_size_per_umc / 2;
1626 			else
1627 				mall_size += mall_size_per_umc;
1628 		}
1629 		adev->gmc.mall_size = mall_size;
1630 		adev->gmc.m_half_use = half_use;
1631 		break;
1632 	case 2:
1633 		mall_size_per_umc = le32_to_cpu(mall_info->v2.mall_size_per_umc);
1634 		adev->gmc.mall_size = (uint64_t)mall_size_per_umc * adev->gmc.num_umc;
1635 		break;
1636 	default:
1637 		dev_err(adev->dev,
1638 			"Unhandled MALL info table %d.%d\n",
1639 			le16_to_cpu(mall_info->v1.header.version_major),
1640 			le16_to_cpu(mall_info->v1.header.version_minor));
1641 		return -EINVAL;
1642 	}
1643 	return 0;
1644 }
1645 
1646 union vcn_info {
1647 	struct vcn_info_v1_0 v1;
1648 };
1649 
1650 static int amdgpu_discovery_get_vcn_info(struct amdgpu_device *adev)
1651 {
1652 	struct binary_header *bhdr;
1653 	union vcn_info *vcn_info;
1654 	u16 offset;
1655 	int v;
1656 
1657 	if (!adev->mman.discovery_bin) {
1658 		DRM_ERROR("ip discovery uninitialized\n");
1659 		return -EINVAL;
1660 	}
1661 
1662 	/* num_vcn_inst is currently limited to AMDGPU_MAX_VCN_INSTANCES
1663 	 * which is smaller than VCN_INFO_TABLE_MAX_NUM_INSTANCES
1664 	 * but that may change in the future with new GPUs so keep this
1665 	 * check for defensive purposes.
1666 	 */
1667 	if (adev->vcn.num_vcn_inst > VCN_INFO_TABLE_MAX_NUM_INSTANCES) {
1668 		dev_err(adev->dev, "invalid vcn instances\n");
1669 		return -EINVAL;
1670 	}
1671 
1672 	bhdr = (struct binary_header *)adev->mman.discovery_bin;
1673 	offset = le16_to_cpu(bhdr->table_list[VCN_INFO].offset);
1674 
1675 	if (!offset)
1676 		return 0;
1677 
1678 	vcn_info = (union vcn_info *)(adev->mman.discovery_bin + offset);
1679 
1680 	switch (le16_to_cpu(vcn_info->v1.header.version_major)) {
1681 	case 1:
1682 		/* num_vcn_inst is currently limited to AMDGPU_MAX_VCN_INSTANCES
1683 		 * so this won't overflow.
1684 		 */
1685 		for (v = 0; v < adev->vcn.num_vcn_inst; v++) {
1686 			adev->vcn.vcn_codec_disable_mask[v] =
1687 				le32_to_cpu(vcn_info->v1.instance_info[v].fuse_data.all_bits);
1688 		}
1689 		break;
1690 	default:
1691 		dev_err(adev->dev,
1692 			"Unhandled VCN info table %d.%d\n",
1693 			le16_to_cpu(vcn_info->v1.header.version_major),
1694 			le16_to_cpu(vcn_info->v1.header.version_minor));
1695 		return -EINVAL;
1696 	}
1697 	return 0;
1698 }
1699 
1700 union nps_info {
1701 	struct nps_info_v1_0 v1;
1702 };
1703 
1704 int amdgpu_discovery_get_nps_info(struct amdgpu_device *adev,
1705 				  uint32_t *nps_type,
1706 				  struct amdgpu_gmc_memrange **ranges,
1707 				  int *range_cnt)
1708 {
1709 	struct amdgpu_gmc_memrange *mem_ranges;
1710 	struct binary_header *bhdr;
1711 	union nps_info *nps_info;
1712 	u16 offset;
1713 	int i;
1714 
1715 	if (!nps_type || !range_cnt || !ranges)
1716 		return -EINVAL;
1717 
1718 	if (!adev->mman.discovery_bin) {
1719 		dev_err(adev->dev,
1720 			"fetch mem range failed, ip discovery uninitialized\n");
1721 		return -EINVAL;
1722 	}
1723 
1724 	bhdr = (struct binary_header *)adev->mman.discovery_bin;
1725 	offset = le16_to_cpu(bhdr->table_list[NPS_INFO].offset);
1726 
1727 	if (!offset)
1728 		return -ENOENT;
1729 
1730 	/* If verification fails, return as if NPS table doesn't exist */
1731 	if (amdgpu_discovery_verify_npsinfo(adev, bhdr))
1732 		return -ENOENT;
1733 
1734 	nps_info = (union nps_info *)(adev->mman.discovery_bin + offset);
1735 
1736 	switch (le16_to_cpu(nps_info->v1.header.version_major)) {
1737 	case 1:
1738 		*nps_type = nps_info->v1.nps_type;
1739 		*range_cnt = nps_info->v1.count;
1740 		mem_ranges = kvzalloc(
1741 			*range_cnt * sizeof(struct amdgpu_gmc_memrange),
1742 			GFP_KERNEL);
1743 		for (i = 0; i < *range_cnt; i++) {
1744 			mem_ranges[i].base_address =
1745 				nps_info->v1.instance_info[i].base_address;
1746 			mem_ranges[i].limit_address =
1747 				nps_info->v1.instance_info[i].limit_address;
1748 			mem_ranges[i].nid_mask = -1;
1749 			mem_ranges[i].flags = 0;
1750 		}
1751 		*ranges = mem_ranges;
1752 		break;
1753 	default:
1754 		dev_err(adev->dev, "Unhandled NPS info table %d.%d\n",
1755 			le16_to_cpu(nps_info->v1.header.version_major),
1756 			le16_to_cpu(nps_info->v1.header.version_minor));
1757 		return -EINVAL;
1758 	}
1759 
1760 	return 0;
1761 }
1762 
1763 static int amdgpu_discovery_set_common_ip_blocks(struct amdgpu_device *adev)
1764 {
1765 	/* what IP to use for this? */
1766 	switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
1767 	case IP_VERSION(9, 0, 1):
1768 	case IP_VERSION(9, 1, 0):
1769 	case IP_VERSION(9, 2, 1):
1770 	case IP_VERSION(9, 2, 2):
1771 	case IP_VERSION(9, 3, 0):
1772 	case IP_VERSION(9, 4, 0):
1773 	case IP_VERSION(9, 4, 1):
1774 	case IP_VERSION(9, 4, 2):
1775 	case IP_VERSION(9, 4, 3):
1776 	case IP_VERSION(9, 4, 4):
1777 		amdgpu_device_ip_block_add(adev, &vega10_common_ip_block);
1778 		break;
1779 	case IP_VERSION(10, 1, 10):
1780 	case IP_VERSION(10, 1, 1):
1781 	case IP_VERSION(10, 1, 2):
1782 	case IP_VERSION(10, 1, 3):
1783 	case IP_VERSION(10, 1, 4):
1784 	case IP_VERSION(10, 3, 0):
1785 	case IP_VERSION(10, 3, 1):
1786 	case IP_VERSION(10, 3, 2):
1787 	case IP_VERSION(10, 3, 3):
1788 	case IP_VERSION(10, 3, 4):
1789 	case IP_VERSION(10, 3, 5):
1790 	case IP_VERSION(10, 3, 6):
1791 	case IP_VERSION(10, 3, 7):
1792 		amdgpu_device_ip_block_add(adev, &nv_common_ip_block);
1793 		break;
1794 	case IP_VERSION(11, 0, 0):
1795 	case IP_VERSION(11, 0, 1):
1796 	case IP_VERSION(11, 0, 2):
1797 	case IP_VERSION(11, 0, 3):
1798 	case IP_VERSION(11, 0, 4):
1799 	case IP_VERSION(11, 5, 0):
1800 	case IP_VERSION(11, 5, 1):
1801 		amdgpu_device_ip_block_add(adev, &soc21_common_ip_block);
1802 		break;
1803 	case IP_VERSION(12, 0, 0):
1804 	case IP_VERSION(12, 0, 1):
1805 		amdgpu_device_ip_block_add(adev, &soc24_common_ip_block);
1806 		break;
1807 	default:
1808 		dev_err(adev->dev,
1809 			"Failed to add common ip block(GC_HWIP:0x%x)\n",
1810 			amdgpu_ip_version(adev, GC_HWIP, 0));
1811 		return -EINVAL;
1812 	}
1813 	return 0;
1814 }
1815 
1816 static int amdgpu_discovery_set_gmc_ip_blocks(struct amdgpu_device *adev)
1817 {
1818 	/* use GC or MMHUB IP version */
1819 	switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
1820 	case IP_VERSION(9, 0, 1):
1821 	case IP_VERSION(9, 1, 0):
1822 	case IP_VERSION(9, 2, 1):
1823 	case IP_VERSION(9, 2, 2):
1824 	case IP_VERSION(9, 3, 0):
1825 	case IP_VERSION(9, 4, 0):
1826 	case IP_VERSION(9, 4, 1):
1827 	case IP_VERSION(9, 4, 2):
1828 	case IP_VERSION(9, 4, 3):
1829 	case IP_VERSION(9, 4, 4):
1830 		amdgpu_device_ip_block_add(adev, &gmc_v9_0_ip_block);
1831 		break;
1832 	case IP_VERSION(10, 1, 10):
1833 	case IP_VERSION(10, 1, 1):
1834 	case IP_VERSION(10, 1, 2):
1835 	case IP_VERSION(10, 1, 3):
1836 	case IP_VERSION(10, 1, 4):
1837 	case IP_VERSION(10, 3, 0):
1838 	case IP_VERSION(10, 3, 1):
1839 	case IP_VERSION(10, 3, 2):
1840 	case IP_VERSION(10, 3, 3):
1841 	case IP_VERSION(10, 3, 4):
1842 	case IP_VERSION(10, 3, 5):
1843 	case IP_VERSION(10, 3, 6):
1844 	case IP_VERSION(10, 3, 7):
1845 		amdgpu_device_ip_block_add(adev, &gmc_v10_0_ip_block);
1846 		break;
1847 	case IP_VERSION(11, 0, 0):
1848 	case IP_VERSION(11, 0, 1):
1849 	case IP_VERSION(11, 0, 2):
1850 	case IP_VERSION(11, 0, 3):
1851 	case IP_VERSION(11, 0, 4):
1852 	case IP_VERSION(11, 5, 0):
1853 	case IP_VERSION(11, 5, 1):
1854 		amdgpu_device_ip_block_add(adev, &gmc_v11_0_ip_block);
1855 		break;
1856 	case IP_VERSION(12, 0, 0):
1857 	case IP_VERSION(12, 0, 1):
1858 		amdgpu_device_ip_block_add(adev, &gmc_v12_0_ip_block);
1859 		break;
1860 	default:
1861 		dev_err(adev->dev, "Failed to add gmc ip block(GC_HWIP:0x%x)\n",
1862 			amdgpu_ip_version(adev, GC_HWIP, 0));
1863 		return -EINVAL;
1864 	}
1865 	return 0;
1866 }
1867 
1868 static int amdgpu_discovery_set_ih_ip_blocks(struct amdgpu_device *adev)
1869 {
1870 	switch (amdgpu_ip_version(adev, OSSSYS_HWIP, 0)) {
1871 	case IP_VERSION(4, 0, 0):
1872 	case IP_VERSION(4, 0, 1):
1873 	case IP_VERSION(4, 1, 0):
1874 	case IP_VERSION(4, 1, 1):
1875 	case IP_VERSION(4, 3, 0):
1876 		amdgpu_device_ip_block_add(adev, &vega10_ih_ip_block);
1877 		break;
1878 	case IP_VERSION(4, 2, 0):
1879 	case IP_VERSION(4, 2, 1):
1880 	case IP_VERSION(4, 4, 0):
1881 	case IP_VERSION(4, 4, 2):
1882 	case IP_VERSION(4, 4, 5):
1883 		amdgpu_device_ip_block_add(adev, &vega20_ih_ip_block);
1884 		break;
1885 	case IP_VERSION(5, 0, 0):
1886 	case IP_VERSION(5, 0, 1):
1887 	case IP_VERSION(5, 0, 2):
1888 	case IP_VERSION(5, 0, 3):
1889 	case IP_VERSION(5, 2, 0):
1890 	case IP_VERSION(5, 2, 1):
1891 		amdgpu_device_ip_block_add(adev, &navi10_ih_ip_block);
1892 		break;
1893 	case IP_VERSION(6, 0, 0):
1894 	case IP_VERSION(6, 0, 1):
1895 	case IP_VERSION(6, 0, 2):
1896 		amdgpu_device_ip_block_add(adev, &ih_v6_0_ip_block);
1897 		break;
1898 	case IP_VERSION(6, 1, 0):
1899 		amdgpu_device_ip_block_add(adev, &ih_v6_1_ip_block);
1900 		break;
1901 	case IP_VERSION(7, 0, 0):
1902 		amdgpu_device_ip_block_add(adev, &ih_v7_0_ip_block);
1903 		break;
1904 	default:
1905 		dev_err(adev->dev,
1906 			"Failed to add ih ip block(OSSSYS_HWIP:0x%x)\n",
1907 			amdgpu_ip_version(adev, OSSSYS_HWIP, 0));
1908 		return -EINVAL;
1909 	}
1910 	return 0;
1911 }
1912 
1913 static int amdgpu_discovery_set_psp_ip_blocks(struct amdgpu_device *adev)
1914 {
1915 	switch (amdgpu_ip_version(adev, MP0_HWIP, 0)) {
1916 	case IP_VERSION(9, 0, 0):
1917 		amdgpu_device_ip_block_add(adev, &psp_v3_1_ip_block);
1918 		break;
1919 	case IP_VERSION(10, 0, 0):
1920 	case IP_VERSION(10, 0, 1):
1921 		amdgpu_device_ip_block_add(adev, &psp_v10_0_ip_block);
1922 		break;
1923 	case IP_VERSION(11, 0, 0):
1924 	case IP_VERSION(11, 0, 2):
1925 	case IP_VERSION(11, 0, 4):
1926 	case IP_VERSION(11, 0, 5):
1927 	case IP_VERSION(11, 0, 9):
1928 	case IP_VERSION(11, 0, 7):
1929 	case IP_VERSION(11, 0, 11):
1930 	case IP_VERSION(11, 0, 12):
1931 	case IP_VERSION(11, 0, 13):
1932 	case IP_VERSION(11, 5, 0):
1933 		amdgpu_device_ip_block_add(adev, &psp_v11_0_ip_block);
1934 		break;
1935 	case IP_VERSION(11, 0, 8):
1936 		amdgpu_device_ip_block_add(adev, &psp_v11_0_8_ip_block);
1937 		break;
1938 	case IP_VERSION(11, 0, 3):
1939 	case IP_VERSION(12, 0, 1):
1940 		amdgpu_device_ip_block_add(adev, &psp_v12_0_ip_block);
1941 		break;
1942 	case IP_VERSION(13, 0, 0):
1943 	case IP_VERSION(13, 0, 1):
1944 	case IP_VERSION(13, 0, 2):
1945 	case IP_VERSION(13, 0, 3):
1946 	case IP_VERSION(13, 0, 5):
1947 	case IP_VERSION(13, 0, 6):
1948 	case IP_VERSION(13, 0, 7):
1949 	case IP_VERSION(13, 0, 8):
1950 	case IP_VERSION(13, 0, 10):
1951 	case IP_VERSION(13, 0, 11):
1952 	case IP_VERSION(13, 0, 14):
1953 	case IP_VERSION(14, 0, 0):
1954 	case IP_VERSION(14, 0, 1):
1955 		amdgpu_device_ip_block_add(adev, &psp_v13_0_ip_block);
1956 		break;
1957 	case IP_VERSION(13, 0, 4):
1958 		amdgpu_device_ip_block_add(adev, &psp_v13_0_4_ip_block);
1959 		break;
1960 	case IP_VERSION(14, 0, 2):
1961 	case IP_VERSION(14, 0, 3):
1962 		amdgpu_device_ip_block_add(adev, &psp_v14_0_ip_block);
1963 		break;
1964 	default:
1965 		dev_err(adev->dev,
1966 			"Failed to add psp ip block(MP0_HWIP:0x%x)\n",
1967 			amdgpu_ip_version(adev, MP0_HWIP, 0));
1968 		return -EINVAL;
1969 	}
1970 	return 0;
1971 }
1972 
1973 static int amdgpu_discovery_set_smu_ip_blocks(struct amdgpu_device *adev)
1974 {
1975 	switch (amdgpu_ip_version(adev, MP1_HWIP, 0)) {
1976 	case IP_VERSION(9, 0, 0):
1977 	case IP_VERSION(10, 0, 0):
1978 	case IP_VERSION(10, 0, 1):
1979 	case IP_VERSION(11, 0, 2):
1980 		if (adev->asic_type == CHIP_ARCTURUS)
1981 			amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block);
1982 		else
1983 			amdgpu_device_ip_block_add(adev, &pp_smu_ip_block);
1984 		break;
1985 	case IP_VERSION(11, 0, 0):
1986 	case IP_VERSION(11, 0, 5):
1987 	case IP_VERSION(11, 0, 9):
1988 	case IP_VERSION(11, 0, 7):
1989 	case IP_VERSION(11, 0, 8):
1990 	case IP_VERSION(11, 0, 11):
1991 	case IP_VERSION(11, 0, 12):
1992 	case IP_VERSION(11, 0, 13):
1993 	case IP_VERSION(11, 5, 0):
1994 		amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block);
1995 		break;
1996 	case IP_VERSION(12, 0, 0):
1997 	case IP_VERSION(12, 0, 1):
1998 		amdgpu_device_ip_block_add(adev, &smu_v12_0_ip_block);
1999 		break;
2000 	case IP_VERSION(13, 0, 0):
2001 	case IP_VERSION(13, 0, 1):
2002 	case IP_VERSION(13, 0, 2):
2003 	case IP_VERSION(13, 0, 3):
2004 	case IP_VERSION(13, 0, 4):
2005 	case IP_VERSION(13, 0, 5):
2006 	case IP_VERSION(13, 0, 6):
2007 	case IP_VERSION(13, 0, 7):
2008 	case IP_VERSION(13, 0, 8):
2009 	case IP_VERSION(13, 0, 10):
2010 	case IP_VERSION(13, 0, 11):
2011 	case IP_VERSION(13, 0, 14):
2012 		amdgpu_device_ip_block_add(adev, &smu_v13_0_ip_block);
2013 		break;
2014 	case IP_VERSION(14, 0, 0):
2015 	case IP_VERSION(14, 0, 1):
2016 	case IP_VERSION(14, 0, 2):
2017 	case IP_VERSION(14, 0, 3):
2018 		amdgpu_device_ip_block_add(adev, &smu_v14_0_ip_block);
2019 		break;
2020 	default:
2021 		dev_err(adev->dev,
2022 			"Failed to add smu ip block(MP1_HWIP:0x%x)\n",
2023 			amdgpu_ip_version(adev, MP1_HWIP, 0));
2024 		return -EINVAL;
2025 	}
2026 	return 0;
2027 }
2028 
2029 #if defined(CONFIG_DRM_AMD_DC)
2030 static void amdgpu_discovery_set_sriov_display(struct amdgpu_device *adev)
2031 {
2032 	amdgpu_device_set_sriov_virtual_display(adev);
2033 	amdgpu_device_ip_block_add(adev, &amdgpu_vkms_ip_block);
2034 }
2035 #endif
2036 
2037 static int amdgpu_discovery_set_display_ip_blocks(struct amdgpu_device *adev)
2038 {
2039 	if (adev->enable_virtual_display) {
2040 		amdgpu_device_ip_block_add(adev, &amdgpu_vkms_ip_block);
2041 		return 0;
2042 	}
2043 
2044 	if (!amdgpu_device_has_dc_support(adev))
2045 		return 0;
2046 
2047 #if defined(CONFIG_DRM_AMD_DC)
2048 	if (amdgpu_ip_version(adev, DCE_HWIP, 0)) {
2049 		switch (amdgpu_ip_version(adev, DCE_HWIP, 0)) {
2050 		case IP_VERSION(1, 0, 0):
2051 		case IP_VERSION(1, 0, 1):
2052 		case IP_VERSION(2, 0, 2):
2053 		case IP_VERSION(2, 0, 0):
2054 		case IP_VERSION(2, 0, 3):
2055 		case IP_VERSION(2, 1, 0):
2056 		case IP_VERSION(3, 0, 0):
2057 		case IP_VERSION(3, 0, 2):
2058 		case IP_VERSION(3, 0, 3):
2059 		case IP_VERSION(3, 0, 1):
2060 		case IP_VERSION(3, 1, 2):
2061 		case IP_VERSION(3, 1, 3):
2062 		case IP_VERSION(3, 1, 4):
2063 		case IP_VERSION(3, 1, 5):
2064 		case IP_VERSION(3, 1, 6):
2065 		case IP_VERSION(3, 2, 0):
2066 		case IP_VERSION(3, 2, 1):
2067 		case IP_VERSION(3, 5, 0):
2068 		case IP_VERSION(3, 5, 1):
2069 		case IP_VERSION(4, 1, 0):
2070 			/* TODO: Fix IP version. DC code expects version 4.0.1 */
2071 			if (adev->ip_versions[DCE_HWIP][0] == IP_VERSION(4, 1, 0))
2072 				adev->ip_versions[DCE_HWIP][0] = IP_VERSION(4, 0, 1);
2073 
2074 			if (amdgpu_sriov_vf(adev))
2075 				amdgpu_discovery_set_sriov_display(adev);
2076 			else
2077 				amdgpu_device_ip_block_add(adev, &dm_ip_block);
2078 			break;
2079 		default:
2080 			dev_err(adev->dev,
2081 				"Failed to add dm ip block(DCE_HWIP:0x%x)\n",
2082 				amdgpu_ip_version(adev, DCE_HWIP, 0));
2083 			return -EINVAL;
2084 		}
2085 	} else if (amdgpu_ip_version(adev, DCI_HWIP, 0)) {
2086 		switch (amdgpu_ip_version(adev, DCI_HWIP, 0)) {
2087 		case IP_VERSION(12, 0, 0):
2088 		case IP_VERSION(12, 0, 1):
2089 		case IP_VERSION(12, 1, 0):
2090 			if (amdgpu_sriov_vf(adev))
2091 				amdgpu_discovery_set_sriov_display(adev);
2092 			else
2093 				amdgpu_device_ip_block_add(adev, &dm_ip_block);
2094 			break;
2095 		default:
2096 			dev_err(adev->dev,
2097 				"Failed to add dm ip block(DCI_HWIP:0x%x)\n",
2098 				amdgpu_ip_version(adev, DCI_HWIP, 0));
2099 			return -EINVAL;
2100 		}
2101 	}
2102 #endif
2103 	return 0;
2104 }
2105 
2106 static int amdgpu_discovery_set_gc_ip_blocks(struct amdgpu_device *adev)
2107 {
2108 	switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
2109 	case IP_VERSION(9, 0, 1):
2110 	case IP_VERSION(9, 1, 0):
2111 	case IP_VERSION(9, 2, 1):
2112 	case IP_VERSION(9, 2, 2):
2113 	case IP_VERSION(9, 3, 0):
2114 	case IP_VERSION(9, 4, 0):
2115 	case IP_VERSION(9, 4, 1):
2116 	case IP_VERSION(9, 4, 2):
2117 		amdgpu_device_ip_block_add(adev, &gfx_v9_0_ip_block);
2118 		break;
2119 	case IP_VERSION(9, 4, 3):
2120 	case IP_VERSION(9, 4, 4):
2121 		amdgpu_device_ip_block_add(adev, &gfx_v9_4_3_ip_block);
2122 		break;
2123 	case IP_VERSION(10, 1, 10):
2124 	case IP_VERSION(10, 1, 2):
2125 	case IP_VERSION(10, 1, 1):
2126 	case IP_VERSION(10, 1, 3):
2127 	case IP_VERSION(10, 1, 4):
2128 	case IP_VERSION(10, 3, 0):
2129 	case IP_VERSION(10, 3, 2):
2130 	case IP_VERSION(10, 3, 1):
2131 	case IP_VERSION(10, 3, 4):
2132 	case IP_VERSION(10, 3, 5):
2133 	case IP_VERSION(10, 3, 6):
2134 	case IP_VERSION(10, 3, 3):
2135 	case IP_VERSION(10, 3, 7):
2136 		amdgpu_device_ip_block_add(adev, &gfx_v10_0_ip_block);
2137 		break;
2138 	case IP_VERSION(11, 0, 0):
2139 	case IP_VERSION(11, 0, 1):
2140 	case IP_VERSION(11, 0, 2):
2141 	case IP_VERSION(11, 0, 3):
2142 	case IP_VERSION(11, 0, 4):
2143 	case IP_VERSION(11, 5, 0):
2144 	case IP_VERSION(11, 5, 1):
2145 		amdgpu_device_ip_block_add(adev, &gfx_v11_0_ip_block);
2146 		break;
2147 	case IP_VERSION(12, 0, 0):
2148 	case IP_VERSION(12, 0, 1):
2149 		if (!amdgpu_exp_hw_support)
2150 			return -EINVAL;
2151 		amdgpu_device_ip_block_add(adev, &gfx_v12_0_ip_block);
2152 		break;
2153 	default:
2154 		dev_err(adev->dev, "Failed to add gfx ip block(GC_HWIP:0x%x)\n",
2155 			amdgpu_ip_version(adev, GC_HWIP, 0));
2156 		return -EINVAL;
2157 	}
2158 	return 0;
2159 }
2160 
2161 static int amdgpu_discovery_set_sdma_ip_blocks(struct amdgpu_device *adev)
2162 {
2163 	switch (amdgpu_ip_version(adev, SDMA0_HWIP, 0)) {
2164 	case IP_VERSION(4, 0, 0):
2165 	case IP_VERSION(4, 0, 1):
2166 	case IP_VERSION(4, 1, 0):
2167 	case IP_VERSION(4, 1, 1):
2168 	case IP_VERSION(4, 1, 2):
2169 	case IP_VERSION(4, 2, 0):
2170 	case IP_VERSION(4, 2, 2):
2171 	case IP_VERSION(4, 4, 0):
2172 		amdgpu_device_ip_block_add(adev, &sdma_v4_0_ip_block);
2173 		break;
2174 	case IP_VERSION(4, 4, 2):
2175 	case IP_VERSION(4, 4, 5):
2176 		amdgpu_device_ip_block_add(adev, &sdma_v4_4_2_ip_block);
2177 		break;
2178 	case IP_VERSION(5, 0, 0):
2179 	case IP_VERSION(5, 0, 1):
2180 	case IP_VERSION(5, 0, 2):
2181 	case IP_VERSION(5, 0, 5):
2182 		amdgpu_device_ip_block_add(adev, &sdma_v5_0_ip_block);
2183 		break;
2184 	case IP_VERSION(5, 2, 0):
2185 	case IP_VERSION(5, 2, 2):
2186 	case IP_VERSION(5, 2, 4):
2187 	case IP_VERSION(5, 2, 5):
2188 	case IP_VERSION(5, 2, 6):
2189 	case IP_VERSION(5, 2, 3):
2190 	case IP_VERSION(5, 2, 1):
2191 	case IP_VERSION(5, 2, 7):
2192 		amdgpu_device_ip_block_add(adev, &sdma_v5_2_ip_block);
2193 		break;
2194 	case IP_VERSION(6, 0, 0):
2195 	case IP_VERSION(6, 0, 1):
2196 	case IP_VERSION(6, 0, 2):
2197 	case IP_VERSION(6, 0, 3):
2198 	case IP_VERSION(6, 1, 0):
2199 	case IP_VERSION(6, 1, 1):
2200 		amdgpu_device_ip_block_add(adev, &sdma_v6_0_ip_block);
2201 		break;
2202 	case IP_VERSION(7, 0, 0):
2203 	case IP_VERSION(7, 0, 1):
2204 		amdgpu_device_ip_block_add(adev, &sdma_v7_0_ip_block);
2205 		break;
2206 	default:
2207 		dev_err(adev->dev,
2208 			"Failed to add sdma ip block(SDMA0_HWIP:0x%x)\n",
2209 			amdgpu_ip_version(adev, SDMA0_HWIP, 0));
2210 		return -EINVAL;
2211 	}
2212 	return 0;
2213 }
2214 
2215 static int amdgpu_discovery_set_mm_ip_blocks(struct amdgpu_device *adev)
2216 {
2217 	if (amdgpu_ip_version(adev, VCE_HWIP, 0)) {
2218 		switch (amdgpu_ip_version(adev, UVD_HWIP, 0)) {
2219 		case IP_VERSION(7, 0, 0):
2220 		case IP_VERSION(7, 2, 0):
2221 			/* UVD is not supported on vega20 SR-IOV */
2222 			if (!(adev->asic_type == CHIP_VEGA20 && amdgpu_sriov_vf(adev)))
2223 				amdgpu_device_ip_block_add(adev, &uvd_v7_0_ip_block);
2224 			break;
2225 		default:
2226 			dev_err(adev->dev,
2227 				"Failed to add uvd v7 ip block(UVD_HWIP:0x%x)\n",
2228 				amdgpu_ip_version(adev, UVD_HWIP, 0));
2229 			return -EINVAL;
2230 		}
2231 		switch (amdgpu_ip_version(adev, VCE_HWIP, 0)) {
2232 		case IP_VERSION(4, 0, 0):
2233 		case IP_VERSION(4, 1, 0):
2234 			/* VCE is not supported on vega20 SR-IOV */
2235 			if (!(adev->asic_type == CHIP_VEGA20 && amdgpu_sriov_vf(adev)))
2236 				amdgpu_device_ip_block_add(adev, &vce_v4_0_ip_block);
2237 			break;
2238 		default:
2239 			dev_err(adev->dev,
2240 				"Failed to add VCE v4 ip block(VCE_HWIP:0x%x)\n",
2241 				amdgpu_ip_version(adev, VCE_HWIP, 0));
2242 			return -EINVAL;
2243 		}
2244 	} else {
2245 		switch (amdgpu_ip_version(adev, UVD_HWIP, 0)) {
2246 		case IP_VERSION(1, 0, 0):
2247 		case IP_VERSION(1, 0, 1):
2248 			amdgpu_device_ip_block_add(adev, &vcn_v1_0_ip_block);
2249 			break;
2250 		case IP_VERSION(2, 0, 0):
2251 		case IP_VERSION(2, 0, 2):
2252 		case IP_VERSION(2, 2, 0):
2253 			amdgpu_device_ip_block_add(adev, &vcn_v2_0_ip_block);
2254 			if (!amdgpu_sriov_vf(adev))
2255 				amdgpu_device_ip_block_add(adev, &jpeg_v2_0_ip_block);
2256 			break;
2257 		case IP_VERSION(2, 0, 3):
2258 			break;
2259 		case IP_VERSION(2, 5, 0):
2260 			amdgpu_device_ip_block_add(adev, &vcn_v2_5_ip_block);
2261 			amdgpu_device_ip_block_add(adev, &jpeg_v2_5_ip_block);
2262 			break;
2263 		case IP_VERSION(2, 6, 0):
2264 			amdgpu_device_ip_block_add(adev, &vcn_v2_6_ip_block);
2265 			amdgpu_device_ip_block_add(adev, &jpeg_v2_6_ip_block);
2266 			break;
2267 		case IP_VERSION(3, 0, 0):
2268 		case IP_VERSION(3, 0, 16):
2269 		case IP_VERSION(3, 1, 1):
2270 		case IP_VERSION(3, 1, 2):
2271 		case IP_VERSION(3, 0, 2):
2272 			amdgpu_device_ip_block_add(adev, &vcn_v3_0_ip_block);
2273 			if (!amdgpu_sriov_vf(adev))
2274 				amdgpu_device_ip_block_add(adev, &jpeg_v3_0_ip_block);
2275 			break;
2276 		case IP_VERSION(3, 0, 33):
2277 			amdgpu_device_ip_block_add(adev, &vcn_v3_0_ip_block);
2278 			break;
2279 		case IP_VERSION(4, 0, 0):
2280 		case IP_VERSION(4, 0, 2):
2281 		case IP_VERSION(4, 0, 4):
2282 			amdgpu_device_ip_block_add(adev, &vcn_v4_0_ip_block);
2283 			amdgpu_device_ip_block_add(adev, &jpeg_v4_0_ip_block);
2284 			break;
2285 		case IP_VERSION(4, 0, 3):
2286 			amdgpu_device_ip_block_add(adev, &vcn_v4_0_3_ip_block);
2287 			amdgpu_device_ip_block_add(adev, &jpeg_v4_0_3_ip_block);
2288 			break;
2289 		case IP_VERSION(4, 0, 5):
2290 		case IP_VERSION(4, 0, 6):
2291 			amdgpu_device_ip_block_add(adev, &vcn_v4_0_5_ip_block);
2292 			amdgpu_device_ip_block_add(adev, &jpeg_v4_0_5_ip_block);
2293 			break;
2294 		case IP_VERSION(5, 0, 0):
2295 			amdgpu_device_ip_block_add(adev, &vcn_v5_0_0_ip_block);
2296 			amdgpu_device_ip_block_add(adev, &jpeg_v5_0_0_ip_block);
2297 			if (amdgpu_jpeg_test)
2298 				adev->enable_jpeg_test = true;
2299 			break;
2300 		default:
2301 			dev_err(adev->dev,
2302 				"Failed to add vcn/jpeg ip block(UVD_HWIP:0x%x)\n",
2303 				amdgpu_ip_version(adev, UVD_HWIP, 0));
2304 			return -EINVAL;
2305 		}
2306 	}
2307 	return 0;
2308 }
2309 
2310 static int amdgpu_discovery_set_mes_ip_blocks(struct amdgpu_device *adev)
2311 {
2312 	switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
2313 	case IP_VERSION(11, 0, 0):
2314 	case IP_VERSION(11, 0, 1):
2315 	case IP_VERSION(11, 0, 2):
2316 	case IP_VERSION(11, 0, 3):
2317 	case IP_VERSION(11, 0, 4):
2318 	case IP_VERSION(11, 5, 0):
2319 	case IP_VERSION(11, 5, 1):
2320 		amdgpu_device_ip_block_add(adev, &mes_v11_0_ip_block);
2321 		adev->enable_mes = true;
2322 		adev->enable_mes_kiq = true;
2323 		break;
2324 	case IP_VERSION(12, 0, 0):
2325 	case IP_VERSION(12, 0, 1):
2326 		amdgpu_device_ip_block_add(adev, &mes_v12_0_ip_block);
2327 		adev->enable_mes = true;
2328 		adev->enable_mes_kiq = true;
2329 		if (amdgpu_uni_mes)
2330 			adev->enable_uni_mes = true;
2331 		break;
2332 	default:
2333 		break;
2334 	}
2335 	return 0;
2336 }
2337 
2338 static void amdgpu_discovery_init_soc_config(struct amdgpu_device *adev)
2339 {
2340 	switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
2341 	case IP_VERSION(9, 4, 3):
2342 	case IP_VERSION(9, 4, 4):
2343 		aqua_vanjaram_init_soc_config(adev);
2344 		break;
2345 	default:
2346 		break;
2347 	}
2348 }
2349 
2350 static int amdgpu_discovery_set_vpe_ip_blocks(struct amdgpu_device *adev)
2351 {
2352 	switch (amdgpu_ip_version(adev, VPE_HWIP, 0)) {
2353 	case IP_VERSION(6, 1, 0):
2354 	case IP_VERSION(6, 1, 1):
2355 		amdgpu_device_ip_block_add(adev, &vpe_v6_1_ip_block);
2356 		break;
2357 	default:
2358 		break;
2359 	}
2360 
2361 	return 0;
2362 }
2363 
2364 static int amdgpu_discovery_set_umsch_mm_ip_blocks(struct amdgpu_device *adev)
2365 {
2366 	switch (amdgpu_ip_version(adev, VCN_HWIP, 0)) {
2367 	case IP_VERSION(4, 0, 5):
2368 	case IP_VERSION(4, 0, 6):
2369 		if (amdgpu_umsch_mm & 0x1) {
2370 			amdgpu_device_ip_block_add(adev, &umsch_mm_v4_0_ip_block);
2371 			adev->enable_umsch_mm = true;
2372 		}
2373 		break;
2374 	default:
2375 		break;
2376 	}
2377 
2378 	return 0;
2379 }
2380 
2381 int amdgpu_discovery_set_ip_blocks(struct amdgpu_device *adev)
2382 {
2383 	int r;
2384 
2385 	switch (adev->asic_type) {
2386 	case CHIP_VEGA10:
2387 		vega10_reg_base_init(adev);
2388 		adev->sdma.num_instances = 2;
2389 		adev->gmc.num_umc = 4;
2390 		adev->ip_versions[MMHUB_HWIP][0] = IP_VERSION(9, 0, 0);
2391 		adev->ip_versions[ATHUB_HWIP][0] = IP_VERSION(9, 0, 0);
2392 		adev->ip_versions[OSSSYS_HWIP][0] = IP_VERSION(4, 0, 0);
2393 		adev->ip_versions[HDP_HWIP][0] = IP_VERSION(4, 0, 0);
2394 		adev->ip_versions[SDMA0_HWIP][0] = IP_VERSION(4, 0, 0);
2395 		adev->ip_versions[SDMA1_HWIP][0] = IP_VERSION(4, 0, 0);
2396 		adev->ip_versions[DF_HWIP][0] = IP_VERSION(2, 1, 0);
2397 		adev->ip_versions[NBIO_HWIP][0] = IP_VERSION(6, 1, 0);
2398 		adev->ip_versions[UMC_HWIP][0] = IP_VERSION(6, 0, 0);
2399 		adev->ip_versions[MP0_HWIP][0] = IP_VERSION(9, 0, 0);
2400 		adev->ip_versions[MP1_HWIP][0] = IP_VERSION(9, 0, 0);
2401 		adev->ip_versions[THM_HWIP][0] = IP_VERSION(9, 0, 0);
2402 		adev->ip_versions[SMUIO_HWIP][0] = IP_VERSION(9, 0, 0);
2403 		adev->ip_versions[GC_HWIP][0] = IP_VERSION(9, 0, 1);
2404 		adev->ip_versions[UVD_HWIP][0] = IP_VERSION(7, 0, 0);
2405 		adev->ip_versions[VCE_HWIP][0] = IP_VERSION(4, 0, 0);
2406 		adev->ip_versions[DCI_HWIP][0] = IP_VERSION(12, 0, 0);
2407 		break;
2408 	case CHIP_VEGA12:
2409 		vega10_reg_base_init(adev);
2410 		adev->sdma.num_instances = 2;
2411 		adev->gmc.num_umc = 4;
2412 		adev->ip_versions[MMHUB_HWIP][0] = IP_VERSION(9, 3, 0);
2413 		adev->ip_versions[ATHUB_HWIP][0] = IP_VERSION(9, 3, 0);
2414 		adev->ip_versions[OSSSYS_HWIP][0] = IP_VERSION(4, 0, 1);
2415 		adev->ip_versions[HDP_HWIP][0] = IP_VERSION(4, 0, 1);
2416 		adev->ip_versions[SDMA0_HWIP][0] = IP_VERSION(4, 0, 1);
2417 		adev->ip_versions[SDMA1_HWIP][0] = IP_VERSION(4, 0, 1);
2418 		adev->ip_versions[DF_HWIP][0] = IP_VERSION(2, 5, 0);
2419 		adev->ip_versions[NBIO_HWIP][0] = IP_VERSION(6, 2, 0);
2420 		adev->ip_versions[UMC_HWIP][0] = IP_VERSION(6, 1, 0);
2421 		adev->ip_versions[MP0_HWIP][0] = IP_VERSION(9, 0, 0);
2422 		adev->ip_versions[MP1_HWIP][0] = IP_VERSION(9, 0, 0);
2423 		adev->ip_versions[THM_HWIP][0] = IP_VERSION(9, 0, 0);
2424 		adev->ip_versions[SMUIO_HWIP][0] = IP_VERSION(9, 0, 1);
2425 		adev->ip_versions[GC_HWIP][0] = IP_VERSION(9, 2, 1);
2426 		adev->ip_versions[UVD_HWIP][0] = IP_VERSION(7, 0, 0);
2427 		adev->ip_versions[VCE_HWIP][0] = IP_VERSION(4, 0, 0);
2428 		adev->ip_versions[DCI_HWIP][0] = IP_VERSION(12, 0, 1);
2429 		break;
2430 	case CHIP_RAVEN:
2431 		vega10_reg_base_init(adev);
2432 		adev->sdma.num_instances = 1;
2433 		adev->vcn.num_vcn_inst = 1;
2434 		adev->gmc.num_umc = 2;
2435 		if (adev->apu_flags & AMD_APU_IS_RAVEN2) {
2436 			adev->ip_versions[MMHUB_HWIP][0] = IP_VERSION(9, 2, 0);
2437 			adev->ip_versions[ATHUB_HWIP][0] = IP_VERSION(9, 2, 0);
2438 			adev->ip_versions[OSSSYS_HWIP][0] = IP_VERSION(4, 1, 1);
2439 			adev->ip_versions[HDP_HWIP][0] = IP_VERSION(4, 1, 1);
2440 			adev->ip_versions[SDMA0_HWIP][0] = IP_VERSION(4, 1, 1);
2441 			adev->ip_versions[DF_HWIP][0] = IP_VERSION(2, 1, 1);
2442 			adev->ip_versions[NBIO_HWIP][0] = IP_VERSION(7, 0, 1);
2443 			adev->ip_versions[UMC_HWIP][0] = IP_VERSION(7, 5, 0);
2444 			adev->ip_versions[MP0_HWIP][0] = IP_VERSION(10, 0, 1);
2445 			adev->ip_versions[MP1_HWIP][0] = IP_VERSION(10, 0, 1);
2446 			adev->ip_versions[THM_HWIP][0] = IP_VERSION(10, 1, 0);
2447 			adev->ip_versions[SMUIO_HWIP][0] = IP_VERSION(10, 0, 1);
2448 			adev->ip_versions[GC_HWIP][0] = IP_VERSION(9, 2, 2);
2449 			adev->ip_versions[UVD_HWIP][0] = IP_VERSION(1, 0, 1);
2450 			adev->ip_versions[DCE_HWIP][0] = IP_VERSION(1, 0, 1);
2451 		} else {
2452 			adev->ip_versions[MMHUB_HWIP][0] = IP_VERSION(9, 1, 0);
2453 			adev->ip_versions[ATHUB_HWIP][0] = IP_VERSION(9, 1, 0);
2454 			adev->ip_versions[OSSSYS_HWIP][0] = IP_VERSION(4, 1, 0);
2455 			adev->ip_versions[HDP_HWIP][0] = IP_VERSION(4, 1, 0);
2456 			adev->ip_versions[SDMA0_HWIP][0] = IP_VERSION(4, 1, 0);
2457 			adev->ip_versions[DF_HWIP][0] = IP_VERSION(2, 1, 0);
2458 			adev->ip_versions[NBIO_HWIP][0] = IP_VERSION(7, 0, 0);
2459 			adev->ip_versions[UMC_HWIP][0] = IP_VERSION(7, 0, 0);
2460 			adev->ip_versions[MP0_HWIP][0] = IP_VERSION(10, 0, 0);
2461 			adev->ip_versions[MP1_HWIP][0] = IP_VERSION(10, 0, 0);
2462 			adev->ip_versions[THM_HWIP][0] = IP_VERSION(10, 0, 0);
2463 			adev->ip_versions[SMUIO_HWIP][0] = IP_VERSION(10, 0, 0);
2464 			adev->ip_versions[GC_HWIP][0] = IP_VERSION(9, 1, 0);
2465 			adev->ip_versions[UVD_HWIP][0] = IP_VERSION(1, 0, 0);
2466 			adev->ip_versions[DCE_HWIP][0] = IP_VERSION(1, 0, 0);
2467 		}
2468 		break;
2469 	case CHIP_VEGA20:
2470 		vega20_reg_base_init(adev);
2471 		adev->sdma.num_instances = 2;
2472 		adev->gmc.num_umc = 8;
2473 		adev->ip_versions[MMHUB_HWIP][0] = IP_VERSION(9, 4, 0);
2474 		adev->ip_versions[ATHUB_HWIP][0] = IP_VERSION(9, 4, 0);
2475 		adev->ip_versions[OSSSYS_HWIP][0] = IP_VERSION(4, 2, 0);
2476 		adev->ip_versions[HDP_HWIP][0] = IP_VERSION(4, 2, 0);
2477 		adev->ip_versions[SDMA0_HWIP][0] = IP_VERSION(4, 2, 0);
2478 		adev->ip_versions[SDMA1_HWIP][0] = IP_VERSION(4, 2, 0);
2479 		adev->ip_versions[DF_HWIP][0] = IP_VERSION(3, 6, 0);
2480 		adev->ip_versions[NBIO_HWIP][0] = IP_VERSION(7, 4, 0);
2481 		adev->ip_versions[UMC_HWIP][0] = IP_VERSION(6, 1, 1);
2482 		adev->ip_versions[MP0_HWIP][0] = IP_VERSION(11, 0, 2);
2483 		adev->ip_versions[MP1_HWIP][0] = IP_VERSION(11, 0, 2);
2484 		adev->ip_versions[THM_HWIP][0] = IP_VERSION(11, 0, 2);
2485 		adev->ip_versions[SMUIO_HWIP][0] = IP_VERSION(11, 0, 2);
2486 		adev->ip_versions[GC_HWIP][0] = IP_VERSION(9, 4, 0);
2487 		adev->ip_versions[UVD_HWIP][0] = IP_VERSION(7, 2, 0);
2488 		adev->ip_versions[UVD_HWIP][1] = IP_VERSION(7, 2, 0);
2489 		adev->ip_versions[VCE_HWIP][0] = IP_VERSION(4, 1, 0);
2490 		adev->ip_versions[DCI_HWIP][0] = IP_VERSION(12, 1, 0);
2491 		break;
2492 	case CHIP_ARCTURUS:
2493 		arct_reg_base_init(adev);
2494 		adev->sdma.num_instances = 8;
2495 		adev->vcn.num_vcn_inst = 2;
2496 		adev->gmc.num_umc = 8;
2497 		adev->ip_versions[MMHUB_HWIP][0] = IP_VERSION(9, 4, 1);
2498 		adev->ip_versions[ATHUB_HWIP][0] = IP_VERSION(9, 4, 1);
2499 		adev->ip_versions[OSSSYS_HWIP][0] = IP_VERSION(4, 2, 1);
2500 		adev->ip_versions[HDP_HWIP][0] = IP_VERSION(4, 2, 1);
2501 		adev->ip_versions[SDMA0_HWIP][0] = IP_VERSION(4, 2, 2);
2502 		adev->ip_versions[SDMA1_HWIP][0] = IP_VERSION(4, 2, 2);
2503 		adev->ip_versions[SDMA1_HWIP][1] = IP_VERSION(4, 2, 2);
2504 		adev->ip_versions[SDMA1_HWIP][2] = IP_VERSION(4, 2, 2);
2505 		adev->ip_versions[SDMA1_HWIP][3] = IP_VERSION(4, 2, 2);
2506 		adev->ip_versions[SDMA1_HWIP][4] = IP_VERSION(4, 2, 2);
2507 		adev->ip_versions[SDMA1_HWIP][5] = IP_VERSION(4, 2, 2);
2508 		adev->ip_versions[SDMA1_HWIP][6] = IP_VERSION(4, 2, 2);
2509 		adev->ip_versions[DF_HWIP][0] = IP_VERSION(3, 6, 1);
2510 		adev->ip_versions[NBIO_HWIP][0] = IP_VERSION(7, 4, 1);
2511 		adev->ip_versions[UMC_HWIP][0] = IP_VERSION(6, 1, 2);
2512 		adev->ip_versions[MP0_HWIP][0] = IP_VERSION(11, 0, 4);
2513 		adev->ip_versions[MP1_HWIP][0] = IP_VERSION(11, 0, 2);
2514 		adev->ip_versions[THM_HWIP][0] = IP_VERSION(11, 0, 3);
2515 		adev->ip_versions[SMUIO_HWIP][0] = IP_VERSION(11, 0, 3);
2516 		adev->ip_versions[GC_HWIP][0] = IP_VERSION(9, 4, 1);
2517 		adev->ip_versions[UVD_HWIP][0] = IP_VERSION(2, 5, 0);
2518 		adev->ip_versions[UVD_HWIP][1] = IP_VERSION(2, 5, 0);
2519 		break;
2520 	case CHIP_ALDEBARAN:
2521 		aldebaran_reg_base_init(adev);
2522 		adev->sdma.num_instances = 5;
2523 		adev->vcn.num_vcn_inst = 2;
2524 		adev->gmc.num_umc = 4;
2525 		adev->ip_versions[MMHUB_HWIP][0] = IP_VERSION(9, 4, 2);
2526 		adev->ip_versions[ATHUB_HWIP][0] = IP_VERSION(9, 4, 2);
2527 		adev->ip_versions[OSSSYS_HWIP][0] = IP_VERSION(4, 4, 0);
2528 		adev->ip_versions[HDP_HWIP][0] = IP_VERSION(4, 4, 0);
2529 		adev->ip_versions[SDMA0_HWIP][0] = IP_VERSION(4, 4, 0);
2530 		adev->ip_versions[SDMA0_HWIP][1] = IP_VERSION(4, 4, 0);
2531 		adev->ip_versions[SDMA0_HWIP][2] = IP_VERSION(4, 4, 0);
2532 		adev->ip_versions[SDMA0_HWIP][3] = IP_VERSION(4, 4, 0);
2533 		adev->ip_versions[SDMA0_HWIP][4] = IP_VERSION(4, 4, 0);
2534 		adev->ip_versions[DF_HWIP][0] = IP_VERSION(3, 6, 2);
2535 		adev->ip_versions[NBIO_HWIP][0] = IP_VERSION(7, 4, 4);
2536 		adev->ip_versions[UMC_HWIP][0] = IP_VERSION(6, 7, 0);
2537 		adev->ip_versions[MP0_HWIP][0] = IP_VERSION(13, 0, 2);
2538 		adev->ip_versions[MP1_HWIP][0] = IP_VERSION(13, 0, 2);
2539 		adev->ip_versions[THM_HWIP][0] = IP_VERSION(13, 0, 2);
2540 		adev->ip_versions[SMUIO_HWIP][0] = IP_VERSION(13, 0, 2);
2541 		adev->ip_versions[GC_HWIP][0] = IP_VERSION(9, 4, 2);
2542 		adev->ip_versions[UVD_HWIP][0] = IP_VERSION(2, 6, 0);
2543 		adev->ip_versions[UVD_HWIP][1] = IP_VERSION(2, 6, 0);
2544 		adev->ip_versions[XGMI_HWIP][0] = IP_VERSION(6, 1, 0);
2545 		break;
2546 	default:
2547 		r = amdgpu_discovery_reg_base_init(adev);
2548 		if (r)
2549 			return -EINVAL;
2550 
2551 		amdgpu_discovery_harvest_ip(adev);
2552 		amdgpu_discovery_get_gfx_info(adev);
2553 		amdgpu_discovery_get_mall_info(adev);
2554 		amdgpu_discovery_get_vcn_info(adev);
2555 		break;
2556 	}
2557 
2558 	amdgpu_discovery_init_soc_config(adev);
2559 	amdgpu_discovery_sysfs_init(adev);
2560 
2561 	switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
2562 	case IP_VERSION(9, 0, 1):
2563 	case IP_VERSION(9, 2, 1):
2564 	case IP_VERSION(9, 4, 0):
2565 	case IP_VERSION(9, 4, 1):
2566 	case IP_VERSION(9, 4, 2):
2567 	case IP_VERSION(9, 4, 3):
2568 	case IP_VERSION(9, 4, 4):
2569 		adev->family = AMDGPU_FAMILY_AI;
2570 		break;
2571 	case IP_VERSION(9, 1, 0):
2572 	case IP_VERSION(9, 2, 2):
2573 	case IP_VERSION(9, 3, 0):
2574 		adev->family = AMDGPU_FAMILY_RV;
2575 		break;
2576 	case IP_VERSION(10, 1, 10):
2577 	case IP_VERSION(10, 1, 1):
2578 	case IP_VERSION(10, 1, 2):
2579 	case IP_VERSION(10, 1, 3):
2580 	case IP_VERSION(10, 1, 4):
2581 	case IP_VERSION(10, 3, 0):
2582 	case IP_VERSION(10, 3, 2):
2583 	case IP_VERSION(10, 3, 4):
2584 	case IP_VERSION(10, 3, 5):
2585 		adev->family = AMDGPU_FAMILY_NV;
2586 		break;
2587 	case IP_VERSION(10, 3, 1):
2588 		adev->family = AMDGPU_FAMILY_VGH;
2589 		adev->apu_flags |= AMD_APU_IS_VANGOGH;
2590 		break;
2591 	case IP_VERSION(10, 3, 3):
2592 		adev->family = AMDGPU_FAMILY_YC;
2593 		break;
2594 	case IP_VERSION(10, 3, 6):
2595 		adev->family = AMDGPU_FAMILY_GC_10_3_6;
2596 		break;
2597 	case IP_VERSION(10, 3, 7):
2598 		adev->family = AMDGPU_FAMILY_GC_10_3_7;
2599 		break;
2600 	case IP_VERSION(11, 0, 0):
2601 	case IP_VERSION(11, 0, 2):
2602 	case IP_VERSION(11, 0, 3):
2603 		adev->family = AMDGPU_FAMILY_GC_11_0_0;
2604 		break;
2605 	case IP_VERSION(11, 0, 1):
2606 	case IP_VERSION(11, 0, 4):
2607 		adev->family = AMDGPU_FAMILY_GC_11_0_1;
2608 		break;
2609 	case IP_VERSION(11, 5, 0):
2610 	case IP_VERSION(11, 5, 1):
2611 		adev->family = AMDGPU_FAMILY_GC_11_5_0;
2612 		break;
2613 	case IP_VERSION(12, 0, 0):
2614 	case IP_VERSION(12, 0, 1):
2615 		adev->family = AMDGPU_FAMILY_GC_12_0_0;
2616 		break;
2617 	default:
2618 		return -EINVAL;
2619 	}
2620 
2621 	switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
2622 	case IP_VERSION(9, 1, 0):
2623 	case IP_VERSION(9, 2, 2):
2624 	case IP_VERSION(9, 3, 0):
2625 	case IP_VERSION(10, 1, 3):
2626 	case IP_VERSION(10, 1, 4):
2627 	case IP_VERSION(10, 3, 1):
2628 	case IP_VERSION(10, 3, 3):
2629 	case IP_VERSION(10, 3, 6):
2630 	case IP_VERSION(10, 3, 7):
2631 	case IP_VERSION(11, 0, 1):
2632 	case IP_VERSION(11, 0, 4):
2633 	case IP_VERSION(11, 5, 0):
2634 	case IP_VERSION(11, 5, 1):
2635 		adev->flags |= AMD_IS_APU;
2636 		break;
2637 	default:
2638 		break;
2639 	}
2640 
2641 	if (amdgpu_ip_version(adev, XGMI_HWIP, 0) == IP_VERSION(4, 8, 0))
2642 		adev->gmc.xgmi.supported = true;
2643 
2644 	if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 3) ||
2645 	    amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 4))
2646 		adev->ip_versions[XGMI_HWIP][0] = IP_VERSION(6, 4, 0);
2647 
2648 	/* set NBIO version */
2649 	switch (amdgpu_ip_version(adev, NBIO_HWIP, 0)) {
2650 	case IP_VERSION(6, 1, 0):
2651 	case IP_VERSION(6, 2, 0):
2652 		adev->nbio.funcs = &nbio_v6_1_funcs;
2653 		adev->nbio.hdp_flush_reg = &nbio_v6_1_hdp_flush_reg;
2654 		break;
2655 	case IP_VERSION(7, 0, 0):
2656 	case IP_VERSION(7, 0, 1):
2657 	case IP_VERSION(2, 5, 0):
2658 		adev->nbio.funcs = &nbio_v7_0_funcs;
2659 		adev->nbio.hdp_flush_reg = &nbio_v7_0_hdp_flush_reg;
2660 		break;
2661 	case IP_VERSION(7, 4, 0):
2662 	case IP_VERSION(7, 4, 1):
2663 	case IP_VERSION(7, 4, 4):
2664 		adev->nbio.funcs = &nbio_v7_4_funcs;
2665 		adev->nbio.hdp_flush_reg = &nbio_v7_4_hdp_flush_reg;
2666 		break;
2667 	case IP_VERSION(7, 9, 0):
2668 		adev->nbio.funcs = &nbio_v7_9_funcs;
2669 		adev->nbio.hdp_flush_reg = &nbio_v7_9_hdp_flush_reg;
2670 		break;
2671 	case IP_VERSION(7, 11, 0):
2672 	case IP_VERSION(7, 11, 1):
2673 		adev->nbio.funcs = &nbio_v7_11_funcs;
2674 		adev->nbio.hdp_flush_reg = &nbio_v7_11_hdp_flush_reg;
2675 		break;
2676 	case IP_VERSION(7, 2, 0):
2677 	case IP_VERSION(7, 2, 1):
2678 	case IP_VERSION(7, 3, 0):
2679 	case IP_VERSION(7, 5, 0):
2680 	case IP_VERSION(7, 5, 1):
2681 		adev->nbio.funcs = &nbio_v7_2_funcs;
2682 		adev->nbio.hdp_flush_reg = &nbio_v7_2_hdp_flush_reg;
2683 		break;
2684 	case IP_VERSION(2, 1, 1):
2685 	case IP_VERSION(2, 3, 0):
2686 	case IP_VERSION(2, 3, 1):
2687 	case IP_VERSION(2, 3, 2):
2688 	case IP_VERSION(3, 3, 0):
2689 	case IP_VERSION(3, 3, 1):
2690 	case IP_VERSION(3, 3, 2):
2691 	case IP_VERSION(3, 3, 3):
2692 		adev->nbio.funcs = &nbio_v2_3_funcs;
2693 		adev->nbio.hdp_flush_reg = &nbio_v2_3_hdp_flush_reg;
2694 		break;
2695 	case IP_VERSION(4, 3, 0):
2696 	case IP_VERSION(4, 3, 1):
2697 		if (amdgpu_sriov_vf(adev))
2698 			adev->nbio.funcs = &nbio_v4_3_sriov_funcs;
2699 		else
2700 			adev->nbio.funcs = &nbio_v4_3_funcs;
2701 		adev->nbio.hdp_flush_reg = &nbio_v4_3_hdp_flush_reg;
2702 		break;
2703 	case IP_VERSION(7, 7, 0):
2704 	case IP_VERSION(7, 7, 1):
2705 		adev->nbio.funcs = &nbio_v7_7_funcs;
2706 		adev->nbio.hdp_flush_reg = &nbio_v7_7_hdp_flush_reg;
2707 		break;
2708 	case IP_VERSION(6, 3, 1):
2709 		adev->nbio.funcs = &nbif_v6_3_1_funcs;
2710 		adev->nbio.hdp_flush_reg = &nbif_v6_3_1_hdp_flush_reg;
2711 		break;
2712 	default:
2713 		break;
2714 	}
2715 
2716 	switch (amdgpu_ip_version(adev, HDP_HWIP, 0)) {
2717 	case IP_VERSION(4, 0, 0):
2718 	case IP_VERSION(4, 0, 1):
2719 	case IP_VERSION(4, 1, 0):
2720 	case IP_VERSION(4, 1, 1):
2721 	case IP_VERSION(4, 1, 2):
2722 	case IP_VERSION(4, 2, 0):
2723 	case IP_VERSION(4, 2, 1):
2724 	case IP_VERSION(4, 4, 0):
2725 	case IP_VERSION(4, 4, 2):
2726 	case IP_VERSION(4, 4, 5):
2727 		adev->hdp.funcs = &hdp_v4_0_funcs;
2728 		break;
2729 	case IP_VERSION(5, 0, 0):
2730 	case IP_VERSION(5, 0, 1):
2731 	case IP_VERSION(5, 0, 2):
2732 	case IP_VERSION(5, 0, 3):
2733 	case IP_VERSION(5, 0, 4):
2734 	case IP_VERSION(5, 2, 0):
2735 		adev->hdp.funcs = &hdp_v5_0_funcs;
2736 		break;
2737 	case IP_VERSION(5, 2, 1):
2738 		adev->hdp.funcs = &hdp_v5_2_funcs;
2739 		break;
2740 	case IP_VERSION(6, 0, 0):
2741 	case IP_VERSION(6, 0, 1):
2742 	case IP_VERSION(6, 1, 0):
2743 		adev->hdp.funcs = &hdp_v6_0_funcs;
2744 		break;
2745 	case IP_VERSION(7, 0, 0):
2746 		adev->hdp.funcs = &hdp_v7_0_funcs;
2747 		break;
2748 	default:
2749 		break;
2750 	}
2751 
2752 	switch (amdgpu_ip_version(adev, DF_HWIP, 0)) {
2753 	case IP_VERSION(3, 6, 0):
2754 	case IP_VERSION(3, 6, 1):
2755 	case IP_VERSION(3, 6, 2):
2756 		adev->df.funcs = &df_v3_6_funcs;
2757 		break;
2758 	case IP_VERSION(2, 1, 0):
2759 	case IP_VERSION(2, 1, 1):
2760 	case IP_VERSION(2, 5, 0):
2761 	case IP_VERSION(3, 5, 1):
2762 	case IP_VERSION(3, 5, 2):
2763 		adev->df.funcs = &df_v1_7_funcs;
2764 		break;
2765 	case IP_VERSION(4, 3, 0):
2766 		adev->df.funcs = &df_v4_3_funcs;
2767 		break;
2768 	case IP_VERSION(4, 6, 2):
2769 		adev->df.funcs = &df_v4_6_2_funcs;
2770 		break;
2771 	default:
2772 		break;
2773 	}
2774 
2775 	switch (amdgpu_ip_version(adev, SMUIO_HWIP, 0)) {
2776 	case IP_VERSION(9, 0, 0):
2777 	case IP_VERSION(9, 0, 1):
2778 	case IP_VERSION(10, 0, 0):
2779 	case IP_VERSION(10, 0, 1):
2780 	case IP_VERSION(10, 0, 2):
2781 		adev->smuio.funcs = &smuio_v9_0_funcs;
2782 		break;
2783 	case IP_VERSION(11, 0, 0):
2784 	case IP_VERSION(11, 0, 2):
2785 	case IP_VERSION(11, 0, 3):
2786 	case IP_VERSION(11, 0, 4):
2787 	case IP_VERSION(11, 0, 7):
2788 	case IP_VERSION(11, 0, 8):
2789 		adev->smuio.funcs = &smuio_v11_0_funcs;
2790 		break;
2791 	case IP_VERSION(11, 0, 6):
2792 	case IP_VERSION(11, 0, 10):
2793 	case IP_VERSION(11, 0, 11):
2794 	case IP_VERSION(11, 5, 0):
2795 	case IP_VERSION(13, 0, 1):
2796 	case IP_VERSION(13, 0, 9):
2797 	case IP_VERSION(13, 0, 10):
2798 		adev->smuio.funcs = &smuio_v11_0_6_funcs;
2799 		break;
2800 	case IP_VERSION(13, 0, 2):
2801 		adev->smuio.funcs = &smuio_v13_0_funcs;
2802 		break;
2803 	case IP_VERSION(13, 0, 3):
2804 		adev->smuio.funcs = &smuio_v13_0_3_funcs;
2805 		if (adev->smuio.funcs->get_pkg_type(adev) == AMDGPU_PKG_TYPE_APU) {
2806 			adev->flags |= AMD_IS_APU;
2807 		}
2808 		break;
2809 	case IP_VERSION(13, 0, 6):
2810 	case IP_VERSION(13, 0, 8):
2811 	case IP_VERSION(14, 0, 0):
2812 	case IP_VERSION(14, 0, 1):
2813 		adev->smuio.funcs = &smuio_v13_0_6_funcs;
2814 		break;
2815 	case IP_VERSION(14, 0, 2):
2816 		adev->smuio.funcs = &smuio_v14_0_2_funcs;
2817 		break;
2818 	default:
2819 		break;
2820 	}
2821 
2822 	switch (amdgpu_ip_version(adev, LSDMA_HWIP, 0)) {
2823 	case IP_VERSION(6, 0, 0):
2824 	case IP_VERSION(6, 0, 1):
2825 	case IP_VERSION(6, 0, 2):
2826 	case IP_VERSION(6, 0, 3):
2827 		adev->lsdma.funcs = &lsdma_v6_0_funcs;
2828 		break;
2829 	case IP_VERSION(7, 0, 0):
2830 	case IP_VERSION(7, 0, 1):
2831 		adev->lsdma.funcs = &lsdma_v7_0_funcs;
2832 		break;
2833 	default:
2834 		break;
2835 	}
2836 
2837 	r = amdgpu_discovery_set_common_ip_blocks(adev);
2838 	if (r)
2839 		return r;
2840 
2841 	r = amdgpu_discovery_set_gmc_ip_blocks(adev);
2842 	if (r)
2843 		return r;
2844 
2845 	/* For SR-IOV, PSP needs to be initialized before IH */
2846 	if (amdgpu_sriov_vf(adev)) {
2847 		r = amdgpu_discovery_set_psp_ip_blocks(adev);
2848 		if (r)
2849 			return r;
2850 		r = amdgpu_discovery_set_ih_ip_blocks(adev);
2851 		if (r)
2852 			return r;
2853 	} else {
2854 		r = amdgpu_discovery_set_ih_ip_blocks(adev);
2855 		if (r)
2856 			return r;
2857 
2858 		if (likely(adev->firmware.load_type == AMDGPU_FW_LOAD_PSP)) {
2859 			r = amdgpu_discovery_set_psp_ip_blocks(adev);
2860 			if (r)
2861 				return r;
2862 		}
2863 	}
2864 
2865 	if (likely(adev->firmware.load_type == AMDGPU_FW_LOAD_PSP)) {
2866 		r = amdgpu_discovery_set_smu_ip_blocks(adev);
2867 		if (r)
2868 			return r;
2869 	}
2870 
2871 	r = amdgpu_discovery_set_display_ip_blocks(adev);
2872 	if (r)
2873 		return r;
2874 
2875 	r = amdgpu_discovery_set_gc_ip_blocks(adev);
2876 	if (r)
2877 		return r;
2878 
2879 	r = amdgpu_discovery_set_sdma_ip_blocks(adev);
2880 	if (r)
2881 		return r;
2882 
2883 	if ((adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT &&
2884 	     !amdgpu_sriov_vf(adev)) ||
2885 	    (adev->firmware.load_type == AMDGPU_FW_LOAD_RLC_BACKDOOR_AUTO && amdgpu_dpm == 1)) {
2886 		r = amdgpu_discovery_set_smu_ip_blocks(adev);
2887 		if (r)
2888 			return r;
2889 	}
2890 
2891 	r = amdgpu_discovery_set_mm_ip_blocks(adev);
2892 	if (r)
2893 		return r;
2894 
2895 	r = amdgpu_discovery_set_mes_ip_blocks(adev);
2896 	if (r)
2897 		return r;
2898 
2899 	r = amdgpu_discovery_set_vpe_ip_blocks(adev);
2900 	if (r)
2901 		return r;
2902 
2903 	r = amdgpu_discovery_set_umsch_mm_ip_blocks(adev);
2904 	if (r)
2905 		return r;
2906 
2907 	return 0;
2908 }
2909 
2910