xref: /linux/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c (revision 872c0de315d2ebad16d0ff574f8c9ce26dd5c6f2)
1 /*
2  * Copyright 2018 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  */
23 
24 #include <linux/firmware.h>
25 
26 #include "amdgpu.h"
27 #include "amdgpu_discovery.h"
28 #include "soc15_hw_ip.h"
29 #include "discovery.h"
30 #include "amdgpu_ras.h"
31 
32 #include "soc15.h"
33 #include "gfx_v9_0.h"
34 #include "gfx_v9_4_3.h"
35 #include "gmc_v9_0.h"
36 #include "df_v1_7.h"
37 #include "df_v3_6.h"
38 #include "df_v4_3.h"
39 #include "df_v4_6_2.h"
40 #include "nbio_v6_1.h"
41 #include "nbio_v7_0.h"
42 #include "nbio_v7_4.h"
43 #include "nbio_v7_9.h"
44 #include "nbio_v7_11.h"
45 #include "hdp_v4_0.h"
46 #include "vega10_ih.h"
47 #include "vega20_ih.h"
48 #include "sdma_v4_0.h"
49 #include "sdma_v4_4_2.h"
50 #include "uvd_v7_0.h"
51 #include "vce_v4_0.h"
52 #include "vcn_v1_0.h"
53 #include "vcn_v2_5.h"
54 #include "jpeg_v2_5.h"
55 #include "smuio_v9_0.h"
56 #include "gmc_v10_0.h"
57 #include "gmc_v11_0.h"
58 #include "gmc_v12_0.h"
59 #include "gfxhub_v2_0.h"
60 #include "mmhub_v2_0.h"
61 #include "nbio_v2_3.h"
62 #include "nbio_v4_3.h"
63 #include "nbio_v7_2.h"
64 #include "nbio_v7_7.h"
65 #include "nbif_v6_3_1.h"
66 #include "hdp_v5_0.h"
67 #include "hdp_v5_2.h"
68 #include "hdp_v6_0.h"
69 #include "hdp_v7_0.h"
70 #include "nv.h"
71 #include "soc21.h"
72 #include "soc24.h"
73 #include "navi10_ih.h"
74 #include "ih_v6_0.h"
75 #include "ih_v6_1.h"
76 #include "ih_v7_0.h"
77 #include "gfx_v10_0.h"
78 #include "gfx_v11_0.h"
79 #include "gfx_v12_0.h"
80 #include "sdma_v5_0.h"
81 #include "sdma_v5_2.h"
82 #include "sdma_v6_0.h"
83 #include "sdma_v7_0.h"
84 #include "lsdma_v6_0.h"
85 #include "lsdma_v7_0.h"
86 #include "vcn_v2_0.h"
87 #include "jpeg_v2_0.h"
88 #include "vcn_v3_0.h"
89 #include "jpeg_v3_0.h"
90 #include "vcn_v4_0.h"
91 #include "jpeg_v4_0.h"
92 #include "vcn_v4_0_3.h"
93 #include "jpeg_v4_0_3.h"
94 #include "vcn_v4_0_5.h"
95 #include "jpeg_v4_0_5.h"
96 #include "amdgpu_vkms.h"
97 #include "mes_v10_1.h"
98 #include "mes_v11_0.h"
99 #include "mes_v12_0.h"
100 #include "smuio_v11_0.h"
101 #include "smuio_v11_0_6.h"
102 #include "smuio_v13_0.h"
103 #include "smuio_v13_0_3.h"
104 #include "smuio_v13_0_6.h"
105 #include "smuio_v14_0_2.h"
106 #include "vcn_v5_0_0.h"
107 #include "jpeg_v5_0_0.h"
108 
109 #include "amdgpu_vpe.h"
110 
111 #define FIRMWARE_IP_DISCOVERY "amdgpu/ip_discovery.bin"
112 MODULE_FIRMWARE(FIRMWARE_IP_DISCOVERY);
113 
114 #define mmIP_DISCOVERY_VERSION  0x16A00
115 #define mmRCC_CONFIG_MEMSIZE	0xde3
116 #define mmMP0_SMN_C2PMSG_33	0x16061
117 #define mmMM_INDEX		0x0
118 #define mmMM_INDEX_HI		0x6
119 #define mmMM_DATA		0x1
120 
121 static const char *hw_id_names[HW_ID_MAX] = {
122 	[MP1_HWID]		= "MP1",
123 	[MP2_HWID]		= "MP2",
124 	[THM_HWID]		= "THM",
125 	[SMUIO_HWID]		= "SMUIO",
126 	[FUSE_HWID]		= "FUSE",
127 	[CLKA_HWID]		= "CLKA",
128 	[PWR_HWID]		= "PWR",
129 	[GC_HWID]		= "GC",
130 	[UVD_HWID]		= "UVD",
131 	[AUDIO_AZ_HWID]		= "AUDIO_AZ",
132 	[ACP_HWID]		= "ACP",
133 	[DCI_HWID]		= "DCI",
134 	[DMU_HWID]		= "DMU",
135 	[DCO_HWID]		= "DCO",
136 	[DIO_HWID]		= "DIO",
137 	[XDMA_HWID]		= "XDMA",
138 	[DCEAZ_HWID]		= "DCEAZ",
139 	[DAZ_HWID]		= "DAZ",
140 	[SDPMUX_HWID]		= "SDPMUX",
141 	[NTB_HWID]		= "NTB",
142 	[IOHC_HWID]		= "IOHC",
143 	[L2IMU_HWID]		= "L2IMU",
144 	[VCE_HWID]		= "VCE",
145 	[MMHUB_HWID]		= "MMHUB",
146 	[ATHUB_HWID]		= "ATHUB",
147 	[DBGU_NBIO_HWID]	= "DBGU_NBIO",
148 	[DFX_HWID]		= "DFX",
149 	[DBGU0_HWID]		= "DBGU0",
150 	[DBGU1_HWID]		= "DBGU1",
151 	[OSSSYS_HWID]		= "OSSSYS",
152 	[HDP_HWID]		= "HDP",
153 	[SDMA0_HWID]		= "SDMA0",
154 	[SDMA1_HWID]		= "SDMA1",
155 	[SDMA2_HWID]		= "SDMA2",
156 	[SDMA3_HWID]		= "SDMA3",
157 	[LSDMA_HWID]		= "LSDMA",
158 	[ISP_HWID]		= "ISP",
159 	[DBGU_IO_HWID]		= "DBGU_IO",
160 	[DF_HWID]		= "DF",
161 	[CLKB_HWID]		= "CLKB",
162 	[FCH_HWID]		= "FCH",
163 	[DFX_DAP_HWID]		= "DFX_DAP",
164 	[L1IMU_PCIE_HWID]	= "L1IMU_PCIE",
165 	[L1IMU_NBIF_HWID]	= "L1IMU_NBIF",
166 	[L1IMU_IOAGR_HWID]	= "L1IMU_IOAGR",
167 	[L1IMU3_HWID]		= "L1IMU3",
168 	[L1IMU4_HWID]		= "L1IMU4",
169 	[L1IMU5_HWID]		= "L1IMU5",
170 	[L1IMU6_HWID]		= "L1IMU6",
171 	[L1IMU7_HWID]		= "L1IMU7",
172 	[L1IMU8_HWID]		= "L1IMU8",
173 	[L1IMU9_HWID]		= "L1IMU9",
174 	[L1IMU10_HWID]		= "L1IMU10",
175 	[L1IMU11_HWID]		= "L1IMU11",
176 	[L1IMU12_HWID]		= "L1IMU12",
177 	[L1IMU13_HWID]		= "L1IMU13",
178 	[L1IMU14_HWID]		= "L1IMU14",
179 	[L1IMU15_HWID]		= "L1IMU15",
180 	[WAFLC_HWID]		= "WAFLC",
181 	[FCH_USB_PD_HWID]	= "FCH_USB_PD",
182 	[PCIE_HWID]		= "PCIE",
183 	[PCS_HWID]		= "PCS",
184 	[DDCL_HWID]		= "DDCL",
185 	[SST_HWID]		= "SST",
186 	[IOAGR_HWID]		= "IOAGR",
187 	[NBIF_HWID]		= "NBIF",
188 	[IOAPIC_HWID]		= "IOAPIC",
189 	[SYSTEMHUB_HWID]	= "SYSTEMHUB",
190 	[NTBCCP_HWID]		= "NTBCCP",
191 	[UMC_HWID]		= "UMC",
192 	[SATA_HWID]		= "SATA",
193 	[USB_HWID]		= "USB",
194 	[CCXSEC_HWID]		= "CCXSEC",
195 	[XGMI_HWID]		= "XGMI",
196 	[XGBE_HWID]		= "XGBE",
197 	[MP0_HWID]		= "MP0",
198 	[VPE_HWID]		= "VPE",
199 };
200 
201 static int hw_id_map[MAX_HWIP] = {
202 	[GC_HWIP]	= GC_HWID,
203 	[HDP_HWIP]	= HDP_HWID,
204 	[SDMA0_HWIP]	= SDMA0_HWID,
205 	[SDMA1_HWIP]	= SDMA1_HWID,
206 	[SDMA2_HWIP]    = SDMA2_HWID,
207 	[SDMA3_HWIP]    = SDMA3_HWID,
208 	[LSDMA_HWIP]    = LSDMA_HWID,
209 	[MMHUB_HWIP]	= MMHUB_HWID,
210 	[ATHUB_HWIP]	= ATHUB_HWID,
211 	[NBIO_HWIP]	= NBIF_HWID,
212 	[MP0_HWIP]	= MP0_HWID,
213 	[MP1_HWIP]	= MP1_HWID,
214 	[UVD_HWIP]	= UVD_HWID,
215 	[VCE_HWIP]	= VCE_HWID,
216 	[DF_HWIP]	= DF_HWID,
217 	[DCE_HWIP]	= DMU_HWID,
218 	[OSSSYS_HWIP]	= OSSSYS_HWID,
219 	[SMUIO_HWIP]	= SMUIO_HWID,
220 	[PWR_HWIP]	= PWR_HWID,
221 	[NBIF_HWIP]	= NBIF_HWID,
222 	[THM_HWIP]	= THM_HWID,
223 	[CLK_HWIP]	= CLKA_HWID,
224 	[UMC_HWIP]	= UMC_HWID,
225 	[XGMI_HWIP]	= XGMI_HWID,
226 	[DCI_HWIP]	= DCI_HWID,
227 	[PCIE_HWIP]	= PCIE_HWID,
228 	[VPE_HWIP]	= VPE_HWID,
229 };
230 
231 static int amdgpu_discovery_read_binary_from_sysmem(struct amdgpu_device *adev, uint8_t *binary)
232 {
233 	u64 tmr_offset, tmr_size, pos;
234 	void *discv_regn;
235 	int ret;
236 
237 	ret = amdgpu_acpi_get_tmr_info(adev, &tmr_offset, &tmr_size);
238 	if (ret)
239 		return ret;
240 
241 	pos = tmr_offset + tmr_size - DISCOVERY_TMR_OFFSET;
242 
243 	/* This region is read-only and reserved from system use */
244 	discv_regn = memremap(pos, adev->mman.discovery_tmr_size, MEMREMAP_WC);
245 	if (discv_regn) {
246 		memcpy(binary, discv_regn, adev->mman.discovery_tmr_size);
247 		memunmap(discv_regn);
248 		return 0;
249 	}
250 
251 	return -ENOENT;
252 }
253 
254 #define IP_DISCOVERY_V2		2
255 #define IP_DISCOVERY_V4		4
256 
257 static int amdgpu_discovery_read_binary_from_mem(struct amdgpu_device *adev,
258 						 uint8_t *binary)
259 {
260 	uint64_t vram_size;
261 	u32 msg;
262 	int i, ret = 0;
263 
264 	/* It can take up to a second for IFWI init to complete on some dGPUs,
265 	 * but generally it should be in the 60-100ms range.  Normally this starts
266 	 * as soon as the device gets power so by the time the OS loads this has long
267 	 * completed.  However, when a card is hotplugged via e.g., USB4, we need to
268 	 * wait for this to complete.  Once the C2PMSG is updated, we can
269 	 * continue.
270 	 */
271 
272 	for (i = 0; i < 1000; i++) {
273 		msg = RREG32(mmMP0_SMN_C2PMSG_33);
274 		if (msg & 0x80000000)
275 			break;
276 		usleep_range(1000, 1100);
277 	}
278 
279 	vram_size = (uint64_t)RREG32(mmRCC_CONFIG_MEMSIZE) << 20;
280 
281 	if (vram_size) {
282 		uint64_t pos = vram_size - DISCOVERY_TMR_OFFSET;
283 		amdgpu_device_vram_access(adev, pos, (uint32_t *)binary,
284 					  adev->mman.discovery_tmr_size, false);
285 	} else {
286 		ret = amdgpu_discovery_read_binary_from_sysmem(adev, binary);
287 	}
288 
289 	return ret;
290 }
291 
292 static int amdgpu_discovery_read_binary_from_file(struct amdgpu_device *adev, uint8_t *binary)
293 {
294 	const struct firmware *fw;
295 	const char *fw_name;
296 	int r;
297 
298 	switch (amdgpu_discovery) {
299 	case 2:
300 		fw_name = FIRMWARE_IP_DISCOVERY;
301 		break;
302 	default:
303 		dev_warn(adev->dev, "amdgpu_discovery is not set properly\n");
304 		return -EINVAL;
305 	}
306 
307 	r = request_firmware(&fw, fw_name, adev->dev);
308 	if (r) {
309 		dev_err(adev->dev, "can't load firmware \"%s\"\n",
310 			fw_name);
311 		return r;
312 	}
313 
314 	memcpy((u8 *)binary, (u8 *)fw->data, fw->size);
315 	release_firmware(fw);
316 
317 	return 0;
318 }
319 
320 static uint16_t amdgpu_discovery_calculate_checksum(uint8_t *data, uint32_t size)
321 {
322 	uint16_t checksum = 0;
323 	int i;
324 
325 	for (i = 0; i < size; i++)
326 		checksum += data[i];
327 
328 	return checksum;
329 }
330 
331 static inline bool amdgpu_discovery_verify_checksum(uint8_t *data, uint32_t size,
332 						    uint16_t expected)
333 {
334 	return !!(amdgpu_discovery_calculate_checksum(data, size) == expected);
335 }
336 
337 static inline bool amdgpu_discovery_verify_binary_signature(uint8_t *binary)
338 {
339 	struct binary_header *bhdr;
340 	bhdr = (struct binary_header *)binary;
341 
342 	return (le32_to_cpu(bhdr->binary_signature) == BINARY_SIGNATURE);
343 }
344 
345 static void amdgpu_discovery_harvest_config_quirk(struct amdgpu_device *adev)
346 {
347 	/*
348 	 * So far, apply this quirk only on those Navy Flounder boards which
349 	 * have a bad harvest table of VCN config.
350 	 */
351 	if ((amdgpu_ip_version(adev, UVD_HWIP, 1) == IP_VERSION(3, 0, 1)) &&
352 	    (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(10, 3, 2))) {
353 		switch (adev->pdev->revision) {
354 		case 0xC1:
355 		case 0xC2:
356 		case 0xC3:
357 		case 0xC5:
358 		case 0xC7:
359 		case 0xCF:
360 		case 0xDF:
361 			adev->vcn.harvest_config |= AMDGPU_VCN_HARVEST_VCN1;
362 			adev->vcn.inst_mask &= ~AMDGPU_VCN_HARVEST_VCN1;
363 			break;
364 		default:
365 			break;
366 		}
367 	}
368 }
369 
370 static int amdgpu_discovery_verify_npsinfo(struct amdgpu_device *adev,
371 					   struct binary_header *bhdr)
372 {
373 	struct table_info *info;
374 	uint16_t checksum;
375 	uint16_t offset;
376 
377 	info = &bhdr->table_list[NPS_INFO];
378 	offset = le16_to_cpu(info->offset);
379 	checksum = le16_to_cpu(info->checksum);
380 
381 	struct nps_info_header *nhdr =
382 		(struct nps_info_header *)(adev->mman.discovery_bin + offset);
383 
384 	if (le32_to_cpu(nhdr->table_id) != NPS_INFO_TABLE_ID) {
385 		dev_dbg(adev->dev, "invalid ip discovery nps info table id\n");
386 		return -EINVAL;
387 	}
388 
389 	if (!amdgpu_discovery_verify_checksum(adev->mman.discovery_bin + offset,
390 					      le32_to_cpu(nhdr->size_bytes),
391 					      checksum)) {
392 		dev_dbg(adev->dev, "invalid nps info data table checksum\n");
393 		return -EINVAL;
394 	}
395 
396 	return 0;
397 }
398 
399 static int amdgpu_discovery_init(struct amdgpu_device *adev)
400 {
401 	struct table_info *info;
402 	struct binary_header *bhdr;
403 	uint16_t offset;
404 	uint16_t size;
405 	uint16_t checksum;
406 	int r;
407 
408 	adev->mman.discovery_tmr_size = DISCOVERY_TMR_SIZE;
409 	adev->mman.discovery_bin = kzalloc(adev->mman.discovery_tmr_size, GFP_KERNEL);
410 	if (!adev->mman.discovery_bin)
411 		return -ENOMEM;
412 
413 	/* Read from file if it is the preferred option */
414 	if (amdgpu_discovery == 2) {
415 		dev_info(adev->dev, "use ip discovery information from file");
416 		r = amdgpu_discovery_read_binary_from_file(adev, adev->mman.discovery_bin);
417 
418 		if (r) {
419 			dev_err(adev->dev, "failed to read ip discovery binary from file\n");
420 			r = -EINVAL;
421 			goto out;
422 		}
423 
424 	} else {
425 		r = amdgpu_discovery_read_binary_from_mem(
426 			adev, adev->mman.discovery_bin);
427 		if (r)
428 			goto out;
429 	}
430 
431 	/* check the ip discovery binary signature */
432 	if (!amdgpu_discovery_verify_binary_signature(adev->mman.discovery_bin)) {
433 		dev_err(adev->dev,
434 			"get invalid ip discovery binary signature\n");
435 		r = -EINVAL;
436 		goto out;
437 	}
438 
439 	bhdr = (struct binary_header *)adev->mman.discovery_bin;
440 
441 	offset = offsetof(struct binary_header, binary_checksum) +
442 		sizeof(bhdr->binary_checksum);
443 	size = le16_to_cpu(bhdr->binary_size) - offset;
444 	checksum = le16_to_cpu(bhdr->binary_checksum);
445 
446 	if (!amdgpu_discovery_verify_checksum(adev->mman.discovery_bin + offset,
447 					      size, checksum)) {
448 		dev_err(adev->dev, "invalid ip discovery binary checksum\n");
449 		r = -EINVAL;
450 		goto out;
451 	}
452 
453 	info = &bhdr->table_list[IP_DISCOVERY];
454 	offset = le16_to_cpu(info->offset);
455 	checksum = le16_to_cpu(info->checksum);
456 
457 	if (offset) {
458 		struct ip_discovery_header *ihdr =
459 			(struct ip_discovery_header *)(adev->mman.discovery_bin + offset);
460 		if (le32_to_cpu(ihdr->signature) != DISCOVERY_TABLE_SIGNATURE) {
461 			dev_err(adev->dev, "invalid ip discovery data table signature\n");
462 			r = -EINVAL;
463 			goto out;
464 		}
465 
466 		if (!amdgpu_discovery_verify_checksum(adev->mman.discovery_bin + offset,
467 						      le16_to_cpu(ihdr->size), checksum)) {
468 			dev_err(adev->dev, "invalid ip discovery data table checksum\n");
469 			r = -EINVAL;
470 			goto out;
471 		}
472 	}
473 
474 	info = &bhdr->table_list[GC];
475 	offset = le16_to_cpu(info->offset);
476 	checksum = le16_to_cpu(info->checksum);
477 
478 	if (offset) {
479 		struct gpu_info_header *ghdr =
480 			(struct gpu_info_header *)(adev->mman.discovery_bin + offset);
481 
482 		if (le32_to_cpu(ghdr->table_id) != GC_TABLE_ID) {
483 			dev_err(adev->dev, "invalid ip discovery gc table id\n");
484 			r = -EINVAL;
485 			goto out;
486 		}
487 
488 		if (!amdgpu_discovery_verify_checksum(adev->mman.discovery_bin + offset,
489 						      le32_to_cpu(ghdr->size), checksum)) {
490 			dev_err(adev->dev, "invalid gc data table checksum\n");
491 			r = -EINVAL;
492 			goto out;
493 		}
494 	}
495 
496 	info = &bhdr->table_list[HARVEST_INFO];
497 	offset = le16_to_cpu(info->offset);
498 	checksum = le16_to_cpu(info->checksum);
499 
500 	if (offset) {
501 		struct harvest_info_header *hhdr =
502 			(struct harvest_info_header *)(adev->mman.discovery_bin + offset);
503 
504 		if (le32_to_cpu(hhdr->signature) != HARVEST_TABLE_SIGNATURE) {
505 			dev_err(adev->dev, "invalid ip discovery harvest table signature\n");
506 			r = -EINVAL;
507 			goto out;
508 		}
509 
510 		if (!amdgpu_discovery_verify_checksum(adev->mman.discovery_bin + offset,
511 						      sizeof(struct harvest_table), checksum)) {
512 			dev_err(adev->dev, "invalid harvest data table checksum\n");
513 			r = -EINVAL;
514 			goto out;
515 		}
516 	}
517 
518 	info = &bhdr->table_list[VCN_INFO];
519 	offset = le16_to_cpu(info->offset);
520 	checksum = le16_to_cpu(info->checksum);
521 
522 	if (offset) {
523 		struct vcn_info_header *vhdr =
524 			(struct vcn_info_header *)(adev->mman.discovery_bin + offset);
525 
526 		if (le32_to_cpu(vhdr->table_id) != VCN_INFO_TABLE_ID) {
527 			dev_err(adev->dev, "invalid ip discovery vcn table id\n");
528 			r = -EINVAL;
529 			goto out;
530 		}
531 
532 		if (!amdgpu_discovery_verify_checksum(adev->mman.discovery_bin + offset,
533 						      le32_to_cpu(vhdr->size_bytes), checksum)) {
534 			dev_err(adev->dev, "invalid vcn data table checksum\n");
535 			r = -EINVAL;
536 			goto out;
537 		}
538 	}
539 
540 	info = &bhdr->table_list[MALL_INFO];
541 	offset = le16_to_cpu(info->offset);
542 	checksum = le16_to_cpu(info->checksum);
543 
544 	if (0 && offset) {
545 		struct mall_info_header *mhdr =
546 			(struct mall_info_header *)(adev->mman.discovery_bin + offset);
547 
548 		if (le32_to_cpu(mhdr->table_id) != MALL_INFO_TABLE_ID) {
549 			dev_err(adev->dev, "invalid ip discovery mall table id\n");
550 			r = -EINVAL;
551 			goto out;
552 		}
553 
554 		if (!amdgpu_discovery_verify_checksum(adev->mman.discovery_bin + offset,
555 						      le32_to_cpu(mhdr->size_bytes), checksum)) {
556 			dev_err(adev->dev, "invalid mall data table checksum\n");
557 			r = -EINVAL;
558 			goto out;
559 		}
560 	}
561 
562 	return 0;
563 
564 out:
565 	kfree(adev->mman.discovery_bin);
566 	adev->mman.discovery_bin = NULL;
567 	if ((amdgpu_discovery != 2) &&
568 	    (RREG32(mmIP_DISCOVERY_VERSION) == 4))
569 		amdgpu_ras_query_boot_status(adev, 4);
570 	return r;
571 }
572 
573 static void amdgpu_discovery_sysfs_fini(struct amdgpu_device *adev);
574 
575 void amdgpu_discovery_fini(struct amdgpu_device *adev)
576 {
577 	amdgpu_discovery_sysfs_fini(adev);
578 	kfree(adev->mman.discovery_bin);
579 	adev->mman.discovery_bin = NULL;
580 }
581 
582 static int amdgpu_discovery_validate_ip(const struct ip_v4 *ip)
583 {
584 	if (ip->instance_number >= HWIP_MAX_INSTANCE) {
585 		DRM_ERROR("Unexpected instance_number (%d) from ip discovery blob\n",
586 			  ip->instance_number);
587 		return -EINVAL;
588 	}
589 	if (le16_to_cpu(ip->hw_id) >= HW_ID_MAX) {
590 		DRM_ERROR("Unexpected hw_id (%d) from ip discovery blob\n",
591 			  le16_to_cpu(ip->hw_id));
592 		return -EINVAL;
593 	}
594 
595 	return 0;
596 }
597 
598 static void amdgpu_discovery_read_harvest_bit_per_ip(struct amdgpu_device *adev,
599 						uint32_t *vcn_harvest_count)
600 {
601 	struct binary_header *bhdr;
602 	struct ip_discovery_header *ihdr;
603 	struct die_header *dhdr;
604 	struct ip_v4 *ip;
605 	uint16_t die_offset, ip_offset, num_dies, num_ips;
606 	int i, j;
607 
608 	bhdr = (struct binary_header *)adev->mman.discovery_bin;
609 	ihdr = (struct ip_discovery_header *)(adev->mman.discovery_bin +
610 			le16_to_cpu(bhdr->table_list[IP_DISCOVERY].offset));
611 	num_dies = le16_to_cpu(ihdr->num_dies);
612 
613 	/* scan harvest bit of all IP data structures */
614 	for (i = 0; i < num_dies; i++) {
615 		die_offset = le16_to_cpu(ihdr->die_info[i].die_offset);
616 		dhdr = (struct die_header *)(adev->mman.discovery_bin + die_offset);
617 		num_ips = le16_to_cpu(dhdr->num_ips);
618 		ip_offset = die_offset + sizeof(*dhdr);
619 
620 		for (j = 0; j < num_ips; j++) {
621 			ip = (struct ip_v4 *)(adev->mman.discovery_bin + ip_offset);
622 
623 			if (amdgpu_discovery_validate_ip(ip))
624 				goto next_ip;
625 
626 			if (le16_to_cpu(ip->variant) == 1) {
627 				switch (le16_to_cpu(ip->hw_id)) {
628 				case VCN_HWID:
629 					(*vcn_harvest_count)++;
630 					if (ip->instance_number == 0) {
631 						adev->vcn.harvest_config |= AMDGPU_VCN_HARVEST_VCN0;
632 						adev->vcn.inst_mask &=
633 							~AMDGPU_VCN_HARVEST_VCN0;
634 						adev->jpeg.inst_mask &=
635 							~AMDGPU_VCN_HARVEST_VCN0;
636 					} else {
637 						adev->vcn.harvest_config |= AMDGPU_VCN_HARVEST_VCN1;
638 						adev->vcn.inst_mask &=
639 							~AMDGPU_VCN_HARVEST_VCN1;
640 						adev->jpeg.inst_mask &=
641 							~AMDGPU_VCN_HARVEST_VCN1;
642 					}
643 					break;
644 				case DMU_HWID:
645 					adev->harvest_ip_mask |= AMD_HARVEST_IP_DMU_MASK;
646 					break;
647 				default:
648 					break;
649 				}
650 			}
651 next_ip:
652 			if (ihdr->base_addr_64_bit)
653 				ip_offset += struct_size(ip, base_address_64, ip->num_base_address);
654 			else
655 				ip_offset += struct_size(ip, base_address, ip->num_base_address);
656 		}
657 	}
658 }
659 
660 static void amdgpu_discovery_read_from_harvest_table(struct amdgpu_device *adev,
661 						     uint32_t *vcn_harvest_count,
662 						     uint32_t *umc_harvest_count)
663 {
664 	struct binary_header *bhdr;
665 	struct harvest_table *harvest_info;
666 	u16 offset;
667 	int i;
668 	uint32_t umc_harvest_config = 0;
669 
670 	bhdr = (struct binary_header *)adev->mman.discovery_bin;
671 	offset = le16_to_cpu(bhdr->table_list[HARVEST_INFO].offset);
672 
673 	if (!offset) {
674 		dev_err(adev->dev, "invalid harvest table offset\n");
675 		return;
676 	}
677 
678 	harvest_info = (struct harvest_table *)(adev->mman.discovery_bin + offset);
679 
680 	for (i = 0; i < 32; i++) {
681 		if (le16_to_cpu(harvest_info->list[i].hw_id) == 0)
682 			break;
683 
684 		switch (le16_to_cpu(harvest_info->list[i].hw_id)) {
685 		case VCN_HWID:
686 			(*vcn_harvest_count)++;
687 			adev->vcn.harvest_config |=
688 				(1 << harvest_info->list[i].number_instance);
689 			adev->jpeg.harvest_config |=
690 				(1 << harvest_info->list[i].number_instance);
691 
692 			adev->vcn.inst_mask &=
693 				~(1U << harvest_info->list[i].number_instance);
694 			adev->jpeg.inst_mask &=
695 				~(1U << harvest_info->list[i].number_instance);
696 			break;
697 		case DMU_HWID:
698 			adev->harvest_ip_mask |= AMD_HARVEST_IP_DMU_MASK;
699 			break;
700 		case UMC_HWID:
701 			umc_harvest_config |=
702 				1 << (le16_to_cpu(harvest_info->list[i].number_instance));
703 			(*umc_harvest_count)++;
704 			break;
705 		case GC_HWID:
706 			adev->gfx.xcc_mask &=
707 				~(1U << harvest_info->list[i].number_instance);
708 			break;
709 		case SDMA0_HWID:
710 			adev->sdma.sdma_mask &=
711 				~(1U << harvest_info->list[i].number_instance);
712 			break;
713 		default:
714 			break;
715 		}
716 	}
717 
718 	adev->umc.active_mask = ((1 << adev->umc.node_inst_num) - 1) &
719 				~umc_harvest_config;
720 }
721 
722 /* ================================================== */
723 
724 struct ip_hw_instance {
725 	struct kobject kobj; /* ip_discovery/die/#die/#hw_id/#instance/<attrs...> */
726 
727 	int hw_id;
728 	u8  num_instance;
729 	u8  major, minor, revision;
730 	u8  harvest;
731 
732 	int num_base_addresses;
733 	u32 base_addr[] __counted_by(num_base_addresses);
734 };
735 
736 struct ip_hw_id {
737 	struct kset hw_id_kset;  /* ip_discovery/die/#die/#hw_id/, contains ip_hw_instance */
738 	int hw_id;
739 };
740 
741 struct ip_die_entry {
742 	struct kset ip_kset;     /* ip_discovery/die/#die/, contains ip_hw_id  */
743 	u16 num_ips;
744 };
745 
746 /* -------------------------------------------------- */
747 
748 struct ip_hw_instance_attr {
749 	struct attribute attr;
750 	ssize_t (*show)(struct ip_hw_instance *ip_hw_instance, char *buf);
751 };
752 
753 static ssize_t hw_id_show(struct ip_hw_instance *ip_hw_instance, char *buf)
754 {
755 	return sysfs_emit(buf, "%d\n", ip_hw_instance->hw_id);
756 }
757 
758 static ssize_t num_instance_show(struct ip_hw_instance *ip_hw_instance, char *buf)
759 {
760 	return sysfs_emit(buf, "%d\n", ip_hw_instance->num_instance);
761 }
762 
763 static ssize_t major_show(struct ip_hw_instance *ip_hw_instance, char *buf)
764 {
765 	return sysfs_emit(buf, "%d\n", ip_hw_instance->major);
766 }
767 
768 static ssize_t minor_show(struct ip_hw_instance *ip_hw_instance, char *buf)
769 {
770 	return sysfs_emit(buf, "%d\n", ip_hw_instance->minor);
771 }
772 
773 static ssize_t revision_show(struct ip_hw_instance *ip_hw_instance, char *buf)
774 {
775 	return sysfs_emit(buf, "%d\n", ip_hw_instance->revision);
776 }
777 
778 static ssize_t harvest_show(struct ip_hw_instance *ip_hw_instance, char *buf)
779 {
780 	return sysfs_emit(buf, "0x%01X\n", ip_hw_instance->harvest);
781 }
782 
783 static ssize_t num_base_addresses_show(struct ip_hw_instance *ip_hw_instance, char *buf)
784 {
785 	return sysfs_emit(buf, "%d\n", ip_hw_instance->num_base_addresses);
786 }
787 
788 static ssize_t base_addr_show(struct ip_hw_instance *ip_hw_instance, char *buf)
789 {
790 	ssize_t res, at;
791 	int ii;
792 
793 	for (res = at = ii = 0; ii < ip_hw_instance->num_base_addresses; ii++) {
794 		/* Here we satisfy the condition that, at + size <= PAGE_SIZE.
795 		 */
796 		if (at + 12 > PAGE_SIZE)
797 			break;
798 		res = sysfs_emit_at(buf, at, "0x%08X\n",
799 				    ip_hw_instance->base_addr[ii]);
800 		if (res <= 0)
801 			break;
802 		at += res;
803 	}
804 
805 	return res < 0 ? res : at;
806 }
807 
808 static struct ip_hw_instance_attr ip_hw_attr[] = {
809 	__ATTR_RO(hw_id),
810 	__ATTR_RO(num_instance),
811 	__ATTR_RO(major),
812 	__ATTR_RO(minor),
813 	__ATTR_RO(revision),
814 	__ATTR_RO(harvest),
815 	__ATTR_RO(num_base_addresses),
816 	__ATTR_RO(base_addr),
817 };
818 
819 static struct attribute *ip_hw_instance_attrs[ARRAY_SIZE(ip_hw_attr) + 1];
820 ATTRIBUTE_GROUPS(ip_hw_instance);
821 
822 #define to_ip_hw_instance(x) container_of(x, struct ip_hw_instance, kobj)
823 #define to_ip_hw_instance_attr(x) container_of(x, struct ip_hw_instance_attr, attr)
824 
825 static ssize_t ip_hw_instance_attr_show(struct kobject *kobj,
826 					struct attribute *attr,
827 					char *buf)
828 {
829 	struct ip_hw_instance *ip_hw_instance = to_ip_hw_instance(kobj);
830 	struct ip_hw_instance_attr *ip_hw_attr = to_ip_hw_instance_attr(attr);
831 
832 	if (!ip_hw_attr->show)
833 		return -EIO;
834 
835 	return ip_hw_attr->show(ip_hw_instance, buf);
836 }
837 
838 static const struct sysfs_ops ip_hw_instance_sysfs_ops = {
839 	.show = ip_hw_instance_attr_show,
840 };
841 
842 static void ip_hw_instance_release(struct kobject *kobj)
843 {
844 	struct ip_hw_instance *ip_hw_instance = to_ip_hw_instance(kobj);
845 
846 	kfree(ip_hw_instance);
847 }
848 
849 static const struct kobj_type ip_hw_instance_ktype = {
850 	.release = ip_hw_instance_release,
851 	.sysfs_ops = &ip_hw_instance_sysfs_ops,
852 	.default_groups = ip_hw_instance_groups,
853 };
854 
855 /* -------------------------------------------------- */
856 
857 #define to_ip_hw_id(x)  container_of(to_kset(x), struct ip_hw_id, hw_id_kset)
858 
859 static void ip_hw_id_release(struct kobject *kobj)
860 {
861 	struct ip_hw_id *ip_hw_id = to_ip_hw_id(kobj);
862 
863 	if (!list_empty(&ip_hw_id->hw_id_kset.list))
864 		DRM_ERROR("ip_hw_id->hw_id_kset is not empty");
865 	kfree(ip_hw_id);
866 }
867 
868 static const struct kobj_type ip_hw_id_ktype = {
869 	.release = ip_hw_id_release,
870 	.sysfs_ops = &kobj_sysfs_ops,
871 };
872 
873 /* -------------------------------------------------- */
874 
875 static void die_kobj_release(struct kobject *kobj);
876 static void ip_disc_release(struct kobject *kobj);
877 
878 struct ip_die_entry_attribute {
879 	struct attribute attr;
880 	ssize_t (*show)(struct ip_die_entry *ip_die_entry, char *buf);
881 };
882 
883 #define to_ip_die_entry_attr(x)  container_of(x, struct ip_die_entry_attribute, attr)
884 
885 static ssize_t num_ips_show(struct ip_die_entry *ip_die_entry, char *buf)
886 {
887 	return sysfs_emit(buf, "%d\n", ip_die_entry->num_ips);
888 }
889 
890 /* If there are more ip_die_entry attrs, other than the number of IPs,
891  * we can make this intro an array of attrs, and then initialize
892  * ip_die_entry_attrs in a loop.
893  */
894 static struct ip_die_entry_attribute num_ips_attr =
895 	__ATTR_RO(num_ips);
896 
897 static struct attribute *ip_die_entry_attrs[] = {
898 	&num_ips_attr.attr,
899 	NULL,
900 };
901 ATTRIBUTE_GROUPS(ip_die_entry); /* ip_die_entry_groups */
902 
903 #define to_ip_die_entry(x) container_of(to_kset(x), struct ip_die_entry, ip_kset)
904 
905 static ssize_t ip_die_entry_attr_show(struct kobject *kobj,
906 				      struct attribute *attr,
907 				      char *buf)
908 {
909 	struct ip_die_entry_attribute *ip_die_entry_attr = to_ip_die_entry_attr(attr);
910 	struct ip_die_entry *ip_die_entry = to_ip_die_entry(kobj);
911 
912 	if (!ip_die_entry_attr->show)
913 		return -EIO;
914 
915 	return ip_die_entry_attr->show(ip_die_entry, buf);
916 }
917 
918 static void ip_die_entry_release(struct kobject *kobj)
919 {
920 	struct ip_die_entry *ip_die_entry = to_ip_die_entry(kobj);
921 
922 	if (!list_empty(&ip_die_entry->ip_kset.list))
923 		DRM_ERROR("ip_die_entry->ip_kset is not empty");
924 	kfree(ip_die_entry);
925 }
926 
927 static const struct sysfs_ops ip_die_entry_sysfs_ops = {
928 	.show = ip_die_entry_attr_show,
929 };
930 
931 static const struct kobj_type ip_die_entry_ktype = {
932 	.release = ip_die_entry_release,
933 	.sysfs_ops = &ip_die_entry_sysfs_ops,
934 	.default_groups = ip_die_entry_groups,
935 };
936 
937 static const struct kobj_type die_kobj_ktype = {
938 	.release = die_kobj_release,
939 	.sysfs_ops = &kobj_sysfs_ops,
940 };
941 
942 static const struct kobj_type ip_discovery_ktype = {
943 	.release = ip_disc_release,
944 	.sysfs_ops = &kobj_sysfs_ops,
945 };
946 
947 struct ip_discovery_top {
948 	struct kobject kobj;    /* ip_discovery/ */
949 	struct kset die_kset;   /* ip_discovery/die/, contains ip_die_entry */
950 	struct amdgpu_device *adev;
951 };
952 
953 static void die_kobj_release(struct kobject *kobj)
954 {
955 	struct ip_discovery_top *ip_top = container_of(to_kset(kobj),
956 						       struct ip_discovery_top,
957 						       die_kset);
958 	if (!list_empty(&ip_top->die_kset.list))
959 		DRM_ERROR("ip_top->die_kset is not empty");
960 }
961 
962 static void ip_disc_release(struct kobject *kobj)
963 {
964 	struct ip_discovery_top *ip_top = container_of(kobj, struct ip_discovery_top,
965 						       kobj);
966 	struct amdgpu_device *adev = ip_top->adev;
967 
968 	adev->ip_top = NULL;
969 	kfree(ip_top);
970 }
971 
972 static uint8_t amdgpu_discovery_get_harvest_info(struct amdgpu_device *adev,
973 						 uint16_t hw_id, uint8_t inst)
974 {
975 	uint8_t harvest = 0;
976 
977 	/* Until a uniform way is figured, get mask based on hwid */
978 	switch (hw_id) {
979 	case VCN_HWID:
980 		harvest = ((1 << inst) & adev->vcn.inst_mask) == 0;
981 		break;
982 	case DMU_HWID:
983 		if (adev->harvest_ip_mask & AMD_HARVEST_IP_DMU_MASK)
984 			harvest = 0x1;
985 		break;
986 	case UMC_HWID:
987 		/* TODO: It needs another parsing; for now, ignore.*/
988 		break;
989 	case GC_HWID:
990 		harvest = ((1 << inst) & adev->gfx.xcc_mask) == 0;
991 		break;
992 	case SDMA0_HWID:
993 		harvest = ((1 << inst) & adev->sdma.sdma_mask) == 0;
994 		break;
995 	default:
996 		break;
997 	}
998 
999 	return harvest;
1000 }
1001 
1002 static int amdgpu_discovery_sysfs_ips(struct amdgpu_device *adev,
1003 				      struct ip_die_entry *ip_die_entry,
1004 				      const size_t _ip_offset, const int num_ips,
1005 				      bool reg_base_64)
1006 {
1007 	int ii, jj, kk, res;
1008 
1009 	DRM_DEBUG("num_ips:%d", num_ips);
1010 
1011 	/* Find all IPs of a given HW ID, and add their instance to
1012 	 * #die/#hw_id/#instance/<attributes>
1013 	 */
1014 	for (ii = 0; ii < HW_ID_MAX; ii++) {
1015 		struct ip_hw_id *ip_hw_id = NULL;
1016 		size_t ip_offset = _ip_offset;
1017 
1018 		for (jj = 0; jj < num_ips; jj++) {
1019 			struct ip_v4 *ip;
1020 			struct ip_hw_instance *ip_hw_instance;
1021 
1022 			ip = (struct ip_v4 *)(adev->mman.discovery_bin + ip_offset);
1023 			if (amdgpu_discovery_validate_ip(ip) ||
1024 			    le16_to_cpu(ip->hw_id) != ii)
1025 				goto next_ip;
1026 
1027 			DRM_DEBUG("match:%d @ ip_offset:%zu", ii, ip_offset);
1028 
1029 			/* We have a hw_id match; register the hw
1030 			 * block if not yet registered.
1031 			 */
1032 			if (!ip_hw_id) {
1033 				ip_hw_id = kzalloc(sizeof(*ip_hw_id), GFP_KERNEL);
1034 				if (!ip_hw_id)
1035 					return -ENOMEM;
1036 				ip_hw_id->hw_id = ii;
1037 
1038 				kobject_set_name(&ip_hw_id->hw_id_kset.kobj, "%d", ii);
1039 				ip_hw_id->hw_id_kset.kobj.kset = &ip_die_entry->ip_kset;
1040 				ip_hw_id->hw_id_kset.kobj.ktype = &ip_hw_id_ktype;
1041 				res = kset_register(&ip_hw_id->hw_id_kset);
1042 				if (res) {
1043 					DRM_ERROR("Couldn't register ip_hw_id kset");
1044 					kfree(ip_hw_id);
1045 					return res;
1046 				}
1047 				if (hw_id_names[ii]) {
1048 					res = sysfs_create_link(&ip_die_entry->ip_kset.kobj,
1049 								&ip_hw_id->hw_id_kset.kobj,
1050 								hw_id_names[ii]);
1051 					if (res) {
1052 						DRM_ERROR("Couldn't create IP link %s in IP Die:%s\n",
1053 							  hw_id_names[ii],
1054 							  kobject_name(&ip_die_entry->ip_kset.kobj));
1055 					}
1056 				}
1057 			}
1058 
1059 			/* Now register its instance.
1060 			 */
1061 			ip_hw_instance = kzalloc(struct_size(ip_hw_instance,
1062 							     base_addr,
1063 							     ip->num_base_address),
1064 						 GFP_KERNEL);
1065 			if (!ip_hw_instance) {
1066 				DRM_ERROR("no memory for ip_hw_instance");
1067 				return -ENOMEM;
1068 			}
1069 			ip_hw_instance->hw_id = le16_to_cpu(ip->hw_id); /* == ii */
1070 			ip_hw_instance->num_instance = ip->instance_number;
1071 			ip_hw_instance->major = ip->major;
1072 			ip_hw_instance->minor = ip->minor;
1073 			ip_hw_instance->revision = ip->revision;
1074 			ip_hw_instance->harvest =
1075 				amdgpu_discovery_get_harvest_info(
1076 					adev, ip_hw_instance->hw_id,
1077 					ip_hw_instance->num_instance);
1078 			ip_hw_instance->num_base_addresses = ip->num_base_address;
1079 
1080 			for (kk = 0; kk < ip_hw_instance->num_base_addresses; kk++) {
1081 				if (reg_base_64)
1082 					ip_hw_instance->base_addr[kk] =
1083 						lower_32_bits(le64_to_cpu(ip->base_address_64[kk])) & 0x3FFFFFFF;
1084 				else
1085 					ip_hw_instance->base_addr[kk] = ip->base_address[kk];
1086 			}
1087 
1088 			kobject_init(&ip_hw_instance->kobj, &ip_hw_instance_ktype);
1089 			ip_hw_instance->kobj.kset = &ip_hw_id->hw_id_kset;
1090 			res = kobject_add(&ip_hw_instance->kobj, NULL,
1091 					  "%d", ip_hw_instance->num_instance);
1092 next_ip:
1093 			if (reg_base_64)
1094 				ip_offset += struct_size(ip, base_address_64,
1095 							 ip->num_base_address);
1096 			else
1097 				ip_offset += struct_size(ip, base_address,
1098 							 ip->num_base_address);
1099 		}
1100 	}
1101 
1102 	return 0;
1103 }
1104 
1105 static int amdgpu_discovery_sysfs_recurse(struct amdgpu_device *adev)
1106 {
1107 	struct binary_header *bhdr;
1108 	struct ip_discovery_header *ihdr;
1109 	struct die_header *dhdr;
1110 	struct kset *die_kset = &adev->ip_top->die_kset;
1111 	u16 num_dies, die_offset, num_ips;
1112 	size_t ip_offset;
1113 	int ii, res;
1114 
1115 	bhdr = (struct binary_header *)adev->mman.discovery_bin;
1116 	ihdr = (struct ip_discovery_header *)(adev->mman.discovery_bin +
1117 					      le16_to_cpu(bhdr->table_list[IP_DISCOVERY].offset));
1118 	num_dies = le16_to_cpu(ihdr->num_dies);
1119 
1120 	DRM_DEBUG("number of dies: %d\n", num_dies);
1121 
1122 	for (ii = 0; ii < num_dies; ii++) {
1123 		struct ip_die_entry *ip_die_entry;
1124 
1125 		die_offset = le16_to_cpu(ihdr->die_info[ii].die_offset);
1126 		dhdr = (struct die_header *)(adev->mman.discovery_bin + die_offset);
1127 		num_ips = le16_to_cpu(dhdr->num_ips);
1128 		ip_offset = die_offset + sizeof(*dhdr);
1129 
1130 		/* Add the die to the kset.
1131 		 *
1132 		 * dhdr->die_id == ii, which was checked in
1133 		 * amdgpu_discovery_reg_base_init().
1134 		 */
1135 
1136 		ip_die_entry = kzalloc(sizeof(*ip_die_entry), GFP_KERNEL);
1137 		if (!ip_die_entry)
1138 			return -ENOMEM;
1139 
1140 		ip_die_entry->num_ips = num_ips;
1141 
1142 		kobject_set_name(&ip_die_entry->ip_kset.kobj, "%d", le16_to_cpu(dhdr->die_id));
1143 		ip_die_entry->ip_kset.kobj.kset = die_kset;
1144 		ip_die_entry->ip_kset.kobj.ktype = &ip_die_entry_ktype;
1145 		res = kset_register(&ip_die_entry->ip_kset);
1146 		if (res) {
1147 			DRM_ERROR("Couldn't register ip_die_entry kset");
1148 			kfree(ip_die_entry);
1149 			return res;
1150 		}
1151 
1152 		amdgpu_discovery_sysfs_ips(adev, ip_die_entry, ip_offset, num_ips, !!ihdr->base_addr_64_bit);
1153 	}
1154 
1155 	return 0;
1156 }
1157 
1158 static int amdgpu_discovery_sysfs_init(struct amdgpu_device *adev)
1159 {
1160 	struct kset *die_kset;
1161 	int res, ii;
1162 
1163 	if (!adev->mman.discovery_bin)
1164 		return -EINVAL;
1165 
1166 	adev->ip_top = kzalloc(sizeof(*adev->ip_top), GFP_KERNEL);
1167 	if (!adev->ip_top)
1168 		return -ENOMEM;
1169 
1170 	adev->ip_top->adev = adev;
1171 
1172 	res = kobject_init_and_add(&adev->ip_top->kobj, &ip_discovery_ktype,
1173 				   &adev->dev->kobj, "ip_discovery");
1174 	if (res) {
1175 		DRM_ERROR("Couldn't init and add ip_discovery/");
1176 		goto Err;
1177 	}
1178 
1179 	die_kset = &adev->ip_top->die_kset;
1180 	kobject_set_name(&die_kset->kobj, "%s", "die");
1181 	die_kset->kobj.parent = &adev->ip_top->kobj;
1182 	die_kset->kobj.ktype = &die_kobj_ktype;
1183 	res = kset_register(&adev->ip_top->die_kset);
1184 	if (res) {
1185 		DRM_ERROR("Couldn't register die_kset");
1186 		goto Err;
1187 	}
1188 
1189 	for (ii = 0; ii < ARRAY_SIZE(ip_hw_attr); ii++)
1190 		ip_hw_instance_attrs[ii] = &ip_hw_attr[ii].attr;
1191 	ip_hw_instance_attrs[ii] = NULL;
1192 
1193 	res = amdgpu_discovery_sysfs_recurse(adev);
1194 
1195 	return res;
1196 Err:
1197 	kobject_put(&adev->ip_top->kobj);
1198 	return res;
1199 }
1200 
1201 /* -------------------------------------------------- */
1202 
1203 #define list_to_kobj(el) container_of(el, struct kobject, entry)
1204 
1205 static void amdgpu_discovery_sysfs_ip_hw_free(struct ip_hw_id *ip_hw_id)
1206 {
1207 	struct list_head *el, *tmp;
1208 	struct kset *hw_id_kset;
1209 
1210 	hw_id_kset = &ip_hw_id->hw_id_kset;
1211 	spin_lock(&hw_id_kset->list_lock);
1212 	list_for_each_prev_safe(el, tmp, &hw_id_kset->list) {
1213 		list_del_init(el);
1214 		spin_unlock(&hw_id_kset->list_lock);
1215 		/* kobject is embedded in ip_hw_instance */
1216 		kobject_put(list_to_kobj(el));
1217 		spin_lock(&hw_id_kset->list_lock);
1218 	}
1219 	spin_unlock(&hw_id_kset->list_lock);
1220 	kobject_put(&ip_hw_id->hw_id_kset.kobj);
1221 }
1222 
1223 static void amdgpu_discovery_sysfs_die_free(struct ip_die_entry *ip_die_entry)
1224 {
1225 	struct list_head *el, *tmp;
1226 	struct kset *ip_kset;
1227 
1228 	ip_kset = &ip_die_entry->ip_kset;
1229 	spin_lock(&ip_kset->list_lock);
1230 	list_for_each_prev_safe(el, tmp, &ip_kset->list) {
1231 		list_del_init(el);
1232 		spin_unlock(&ip_kset->list_lock);
1233 		amdgpu_discovery_sysfs_ip_hw_free(to_ip_hw_id(list_to_kobj(el)));
1234 		spin_lock(&ip_kset->list_lock);
1235 	}
1236 	spin_unlock(&ip_kset->list_lock);
1237 	kobject_put(&ip_die_entry->ip_kset.kobj);
1238 }
1239 
1240 static void amdgpu_discovery_sysfs_fini(struct amdgpu_device *adev)
1241 {
1242 	struct list_head *el, *tmp;
1243 	struct kset *die_kset;
1244 
1245 	die_kset = &adev->ip_top->die_kset;
1246 	spin_lock(&die_kset->list_lock);
1247 	list_for_each_prev_safe(el, tmp, &die_kset->list) {
1248 		list_del_init(el);
1249 		spin_unlock(&die_kset->list_lock);
1250 		amdgpu_discovery_sysfs_die_free(to_ip_die_entry(list_to_kobj(el)));
1251 		spin_lock(&die_kset->list_lock);
1252 	}
1253 	spin_unlock(&die_kset->list_lock);
1254 	kobject_put(&adev->ip_top->die_kset.kobj);
1255 	kobject_put(&adev->ip_top->kobj);
1256 }
1257 
1258 /* ================================================== */
1259 
1260 static int amdgpu_discovery_reg_base_init(struct amdgpu_device *adev)
1261 {
1262 	uint8_t num_base_address, subrev, variant;
1263 	struct binary_header *bhdr;
1264 	struct ip_discovery_header *ihdr;
1265 	struct die_header *dhdr;
1266 	struct ip_v4 *ip;
1267 	uint16_t die_offset;
1268 	uint16_t ip_offset;
1269 	uint16_t num_dies;
1270 	uint16_t num_ips;
1271 	int hw_ip;
1272 	int i, j, k;
1273 	int r;
1274 
1275 	r = amdgpu_discovery_init(adev);
1276 	if (r) {
1277 		DRM_ERROR("amdgpu_discovery_init failed\n");
1278 		return r;
1279 	}
1280 
1281 	adev->gfx.xcc_mask = 0;
1282 	adev->sdma.sdma_mask = 0;
1283 	adev->vcn.inst_mask = 0;
1284 	adev->jpeg.inst_mask = 0;
1285 	bhdr = (struct binary_header *)adev->mman.discovery_bin;
1286 	ihdr = (struct ip_discovery_header *)(adev->mman.discovery_bin +
1287 			le16_to_cpu(bhdr->table_list[IP_DISCOVERY].offset));
1288 	num_dies = le16_to_cpu(ihdr->num_dies);
1289 
1290 	DRM_DEBUG("number of dies: %d\n", num_dies);
1291 
1292 	for (i = 0; i < num_dies; i++) {
1293 		die_offset = le16_to_cpu(ihdr->die_info[i].die_offset);
1294 		dhdr = (struct die_header *)(adev->mman.discovery_bin + die_offset);
1295 		num_ips = le16_to_cpu(dhdr->num_ips);
1296 		ip_offset = die_offset + sizeof(*dhdr);
1297 
1298 		if (le16_to_cpu(dhdr->die_id) != i) {
1299 			DRM_ERROR("invalid die id %d, expected %d\n",
1300 					le16_to_cpu(dhdr->die_id), i);
1301 			return -EINVAL;
1302 		}
1303 
1304 		DRM_DEBUG("number of hardware IPs on die%d: %d\n",
1305 				le16_to_cpu(dhdr->die_id), num_ips);
1306 
1307 		for (j = 0; j < num_ips; j++) {
1308 			ip = (struct ip_v4 *)(adev->mman.discovery_bin + ip_offset);
1309 
1310 			if (amdgpu_discovery_validate_ip(ip))
1311 				goto next_ip;
1312 
1313 			num_base_address = ip->num_base_address;
1314 
1315 			DRM_DEBUG("%s(%d) #%d v%d.%d.%d:\n",
1316 				  hw_id_names[le16_to_cpu(ip->hw_id)],
1317 				  le16_to_cpu(ip->hw_id),
1318 				  ip->instance_number,
1319 				  ip->major, ip->minor,
1320 				  ip->revision);
1321 
1322 			if (le16_to_cpu(ip->hw_id) == VCN_HWID) {
1323 				/* Bit [5:0]: original revision value
1324 				 * Bit [7:6]: en/decode capability:
1325 				 *     0b00 : VCN function normally
1326 				 *     0b10 : encode is disabled
1327 				 *     0b01 : decode is disabled
1328 				 */
1329 				if (adev->vcn.num_vcn_inst <
1330 				    AMDGPU_MAX_VCN_INSTANCES) {
1331 					adev->vcn.vcn_config[adev->vcn.num_vcn_inst] =
1332 						ip->revision & 0xc0;
1333 					adev->vcn.num_vcn_inst++;
1334 					adev->vcn.inst_mask |=
1335 						(1U << ip->instance_number);
1336 					adev->jpeg.inst_mask |=
1337 						(1U << ip->instance_number);
1338 				} else {
1339 					dev_err(adev->dev, "Too many VCN instances: %d vs %d\n",
1340 						adev->vcn.num_vcn_inst + 1,
1341 						AMDGPU_MAX_VCN_INSTANCES);
1342 				}
1343 				ip->revision &= ~0xc0;
1344 			}
1345 			if (le16_to_cpu(ip->hw_id) == SDMA0_HWID ||
1346 			    le16_to_cpu(ip->hw_id) == SDMA1_HWID ||
1347 			    le16_to_cpu(ip->hw_id) == SDMA2_HWID ||
1348 			    le16_to_cpu(ip->hw_id) == SDMA3_HWID) {
1349 				if (adev->sdma.num_instances <
1350 				    AMDGPU_MAX_SDMA_INSTANCES) {
1351 					adev->sdma.num_instances++;
1352 					adev->sdma.sdma_mask |=
1353 						(1U << ip->instance_number);
1354 				} else {
1355 					dev_err(adev->dev, "Too many SDMA instances: %d vs %d\n",
1356 						adev->sdma.num_instances + 1,
1357 						AMDGPU_MAX_SDMA_INSTANCES);
1358 				}
1359 			}
1360 
1361 			if (le16_to_cpu(ip->hw_id) == VPE_HWID) {
1362 				if (adev->vpe.num_instances < AMDGPU_MAX_VPE_INSTANCES)
1363 					adev->vpe.num_instances++;
1364 				else
1365 					dev_err(adev->dev, "Too many VPE instances: %d vs %d\n",
1366 						adev->vpe.num_instances + 1,
1367 						AMDGPU_MAX_VPE_INSTANCES);
1368 			}
1369 
1370 			if (le16_to_cpu(ip->hw_id) == UMC_HWID) {
1371 				adev->gmc.num_umc++;
1372 				adev->umc.node_inst_num++;
1373 			}
1374 
1375 			if (le16_to_cpu(ip->hw_id) == GC_HWID)
1376 				adev->gfx.xcc_mask |=
1377 					(1U << ip->instance_number);
1378 
1379 			for (k = 0; k < num_base_address; k++) {
1380 				/*
1381 				 * convert the endianness of base addresses in place,
1382 				 * so that we don't need to convert them when accessing adev->reg_offset.
1383 				 */
1384 				if (ihdr->base_addr_64_bit)
1385 					/* Truncate the 64bit base address from ip discovery
1386 					 * and only store lower 32bit ip base in reg_offset[].
1387 					 * Bits > 32 follows ASIC specific format, thus just
1388 					 * discard them and handle it within specific ASIC.
1389 					 * By this way reg_offset[] and related helpers can
1390 					 * stay unchanged.
1391 					 * The base address is in dwords, thus clear the
1392 					 * highest 2 bits to store.
1393 					 */
1394 					ip->base_address[k] =
1395 						lower_32_bits(le64_to_cpu(ip->base_address_64[k])) & 0x3FFFFFFF;
1396 				else
1397 					ip->base_address[k] = le32_to_cpu(ip->base_address[k]);
1398 				DRM_DEBUG("\t0x%08x\n", ip->base_address[k]);
1399 			}
1400 
1401 			for (hw_ip = 0; hw_ip < MAX_HWIP; hw_ip++) {
1402 				if (hw_id_map[hw_ip] == le16_to_cpu(ip->hw_id) &&
1403 				    hw_id_map[hw_ip] != 0) {
1404 					DRM_DEBUG("set register base offset for %s\n",
1405 							hw_id_names[le16_to_cpu(ip->hw_id)]);
1406 					adev->reg_offset[hw_ip][ip->instance_number] =
1407 						ip->base_address;
1408 					/* Instance support is somewhat inconsistent.
1409 					 * SDMA is a good example.  Sienna cichlid has 4 total
1410 					 * SDMA instances, each enumerated separately (HWIDs
1411 					 * 42, 43, 68, 69).  Arcturus has 8 total SDMA instances,
1412 					 * but they are enumerated as multiple instances of the
1413 					 * same HWIDs (4x HWID 42, 4x HWID 43).  UMC is another
1414 					 * example.  On most chips there are multiple instances
1415 					 * with the same HWID.
1416 					 */
1417 
1418 					if (ihdr->version < 3) {
1419 						subrev = 0;
1420 						variant = 0;
1421 					} else {
1422 						subrev = ip->sub_revision;
1423 						variant = ip->variant;
1424 					}
1425 
1426 					adev->ip_versions[hw_ip]
1427 							 [ip->instance_number] =
1428 						IP_VERSION_FULL(ip->major,
1429 								ip->minor,
1430 								ip->revision,
1431 								variant,
1432 								subrev);
1433 				}
1434 			}
1435 
1436 next_ip:
1437 			if (ihdr->base_addr_64_bit)
1438 				ip_offset += struct_size(ip, base_address_64, ip->num_base_address);
1439 			else
1440 				ip_offset += struct_size(ip, base_address, ip->num_base_address);
1441 		}
1442 	}
1443 
1444 	return 0;
1445 }
1446 
1447 static void amdgpu_discovery_harvest_ip(struct amdgpu_device *adev)
1448 {
1449 	int vcn_harvest_count = 0;
1450 	int umc_harvest_count = 0;
1451 
1452 	/*
1453 	 * Harvest table does not fit Navi1x and legacy GPUs,
1454 	 * so read harvest bit per IP data structure to set
1455 	 * harvest configuration.
1456 	 */
1457 	if (amdgpu_ip_version(adev, GC_HWIP, 0) < IP_VERSION(10, 2, 0) &&
1458 	    amdgpu_ip_version(adev, GC_HWIP, 0) != IP_VERSION(9, 4, 3) &&
1459 	    amdgpu_ip_version(adev, GC_HWIP, 0) != IP_VERSION(9, 4, 4)) {
1460 		if ((adev->pdev->device == 0x731E &&
1461 			(adev->pdev->revision == 0xC6 ||
1462 			 adev->pdev->revision == 0xC7)) ||
1463 			(adev->pdev->device == 0x7340 &&
1464 			 adev->pdev->revision == 0xC9) ||
1465 			(adev->pdev->device == 0x7360 &&
1466 			 adev->pdev->revision == 0xC7))
1467 			amdgpu_discovery_read_harvest_bit_per_ip(adev,
1468 				&vcn_harvest_count);
1469 	} else {
1470 		amdgpu_discovery_read_from_harvest_table(adev,
1471 							 &vcn_harvest_count,
1472 							 &umc_harvest_count);
1473 	}
1474 
1475 	amdgpu_discovery_harvest_config_quirk(adev);
1476 
1477 	if (vcn_harvest_count == adev->vcn.num_vcn_inst) {
1478 		adev->harvest_ip_mask |= AMD_HARVEST_IP_VCN_MASK;
1479 		adev->harvest_ip_mask |= AMD_HARVEST_IP_JPEG_MASK;
1480 	}
1481 
1482 	if (umc_harvest_count < adev->gmc.num_umc) {
1483 		adev->gmc.num_umc -= umc_harvest_count;
1484 	}
1485 }
1486 
1487 union gc_info {
1488 	struct gc_info_v1_0 v1;
1489 	struct gc_info_v1_1 v1_1;
1490 	struct gc_info_v1_2 v1_2;
1491 	struct gc_info_v2_0 v2;
1492 	struct gc_info_v2_1 v2_1;
1493 };
1494 
1495 static int amdgpu_discovery_get_gfx_info(struct amdgpu_device *adev)
1496 {
1497 	struct binary_header *bhdr;
1498 	union gc_info *gc_info;
1499 	u16 offset;
1500 
1501 	if (!adev->mman.discovery_bin) {
1502 		DRM_ERROR("ip discovery uninitialized\n");
1503 		return -EINVAL;
1504 	}
1505 
1506 	bhdr = (struct binary_header *)adev->mman.discovery_bin;
1507 	offset = le16_to_cpu(bhdr->table_list[GC].offset);
1508 
1509 	if (!offset)
1510 		return 0;
1511 
1512 	gc_info = (union gc_info *)(adev->mman.discovery_bin + offset);
1513 
1514 	switch (le16_to_cpu(gc_info->v1.header.version_major)) {
1515 	case 1:
1516 		adev->gfx.config.max_shader_engines = le32_to_cpu(gc_info->v1.gc_num_se);
1517 		adev->gfx.config.max_cu_per_sh = 2 * (le32_to_cpu(gc_info->v1.gc_num_wgp0_per_sa) +
1518 						      le32_to_cpu(gc_info->v1.gc_num_wgp1_per_sa));
1519 		adev->gfx.config.max_sh_per_se = le32_to_cpu(gc_info->v1.gc_num_sa_per_se);
1520 		adev->gfx.config.max_backends_per_se = le32_to_cpu(gc_info->v1.gc_num_rb_per_se);
1521 		adev->gfx.config.max_texture_channel_caches = le32_to_cpu(gc_info->v1.gc_num_gl2c);
1522 		adev->gfx.config.max_gprs = le32_to_cpu(gc_info->v1.gc_num_gprs);
1523 		adev->gfx.config.max_gs_threads = le32_to_cpu(gc_info->v1.gc_num_max_gs_thds);
1524 		adev->gfx.config.gs_vgt_table_depth = le32_to_cpu(gc_info->v1.gc_gs_table_depth);
1525 		adev->gfx.config.gs_prim_buffer_depth = le32_to_cpu(gc_info->v1.gc_gsprim_buff_depth);
1526 		adev->gfx.config.double_offchip_lds_buf = le32_to_cpu(gc_info->v1.gc_double_offchip_lds_buffer);
1527 		adev->gfx.cu_info.wave_front_size = le32_to_cpu(gc_info->v1.gc_wave_size);
1528 		adev->gfx.cu_info.max_waves_per_simd = le32_to_cpu(gc_info->v1.gc_max_waves_per_simd);
1529 		adev->gfx.cu_info.max_scratch_slots_per_cu = le32_to_cpu(gc_info->v1.gc_max_scratch_slots_per_cu);
1530 		adev->gfx.cu_info.lds_size = le32_to_cpu(gc_info->v1.gc_lds_size);
1531 		adev->gfx.config.num_sc_per_sh = le32_to_cpu(gc_info->v1.gc_num_sc_per_se) /
1532 			le32_to_cpu(gc_info->v1.gc_num_sa_per_se);
1533 		adev->gfx.config.num_packer_per_sc = le32_to_cpu(gc_info->v1.gc_num_packer_per_sc);
1534 		if (le16_to_cpu(gc_info->v1.header.version_minor) >= 1) {
1535 			adev->gfx.config.gc_num_tcp_per_sa = le32_to_cpu(gc_info->v1_1.gc_num_tcp_per_sa);
1536 			adev->gfx.config.gc_num_sdp_interface = le32_to_cpu(gc_info->v1_1.gc_num_sdp_interface);
1537 			adev->gfx.config.gc_num_tcps = le32_to_cpu(gc_info->v1_1.gc_num_tcps);
1538 		}
1539 		if (le16_to_cpu(gc_info->v1.header.version_minor) >= 2) {
1540 			adev->gfx.config.gc_num_tcp_per_wpg = le32_to_cpu(gc_info->v1_2.gc_num_tcp_per_wpg);
1541 			adev->gfx.config.gc_tcp_l1_size = le32_to_cpu(gc_info->v1_2.gc_tcp_l1_size);
1542 			adev->gfx.config.gc_num_sqc_per_wgp = le32_to_cpu(gc_info->v1_2.gc_num_sqc_per_wgp);
1543 			adev->gfx.config.gc_l1_instruction_cache_size_per_sqc = le32_to_cpu(gc_info->v1_2.gc_l1_instruction_cache_size_per_sqc);
1544 			adev->gfx.config.gc_l1_data_cache_size_per_sqc = le32_to_cpu(gc_info->v1_2.gc_l1_data_cache_size_per_sqc);
1545 			adev->gfx.config.gc_gl1c_per_sa = le32_to_cpu(gc_info->v1_2.gc_gl1c_per_sa);
1546 			adev->gfx.config.gc_gl1c_size_per_instance = le32_to_cpu(gc_info->v1_2.gc_gl1c_size_per_instance);
1547 			adev->gfx.config.gc_gl2c_per_gpu = le32_to_cpu(gc_info->v1_2.gc_gl2c_per_gpu);
1548 		}
1549 		break;
1550 	case 2:
1551 		adev->gfx.config.max_shader_engines = le32_to_cpu(gc_info->v2.gc_num_se);
1552 		adev->gfx.config.max_cu_per_sh = le32_to_cpu(gc_info->v2.gc_num_cu_per_sh);
1553 		adev->gfx.config.max_sh_per_se = le32_to_cpu(gc_info->v2.gc_num_sh_per_se);
1554 		adev->gfx.config.max_backends_per_se = le32_to_cpu(gc_info->v2.gc_num_rb_per_se);
1555 		adev->gfx.config.max_texture_channel_caches = le32_to_cpu(gc_info->v2.gc_num_tccs);
1556 		adev->gfx.config.max_gprs = le32_to_cpu(gc_info->v2.gc_num_gprs);
1557 		adev->gfx.config.max_gs_threads = le32_to_cpu(gc_info->v2.gc_num_max_gs_thds);
1558 		adev->gfx.config.gs_vgt_table_depth = le32_to_cpu(gc_info->v2.gc_gs_table_depth);
1559 		adev->gfx.config.gs_prim_buffer_depth = le32_to_cpu(gc_info->v2.gc_gsprim_buff_depth);
1560 		adev->gfx.config.double_offchip_lds_buf = le32_to_cpu(gc_info->v2.gc_double_offchip_lds_buffer);
1561 		adev->gfx.cu_info.wave_front_size = le32_to_cpu(gc_info->v2.gc_wave_size);
1562 		adev->gfx.cu_info.max_waves_per_simd = le32_to_cpu(gc_info->v2.gc_max_waves_per_simd);
1563 		adev->gfx.cu_info.max_scratch_slots_per_cu = le32_to_cpu(gc_info->v2.gc_max_scratch_slots_per_cu);
1564 		adev->gfx.cu_info.lds_size = le32_to_cpu(gc_info->v2.gc_lds_size);
1565 		adev->gfx.config.num_sc_per_sh = le32_to_cpu(gc_info->v2.gc_num_sc_per_se) /
1566 			le32_to_cpu(gc_info->v2.gc_num_sh_per_se);
1567 		adev->gfx.config.num_packer_per_sc = le32_to_cpu(gc_info->v2.gc_num_packer_per_sc);
1568 		if (le16_to_cpu(gc_info->v2.header.version_minor) == 1) {
1569 			adev->gfx.config.gc_num_tcp_per_sa = le32_to_cpu(gc_info->v2_1.gc_num_tcp_per_sh);
1570 			adev->gfx.config.gc_tcp_size_per_cu = le32_to_cpu(gc_info->v2_1.gc_tcp_size_per_cu);
1571 			adev->gfx.config.gc_num_sdp_interface = le32_to_cpu(gc_info->v2_1.gc_num_sdp_interface); /* per XCD */
1572 			adev->gfx.config.gc_num_cu_per_sqc = le32_to_cpu(gc_info->v2_1.gc_num_cu_per_sqc);
1573 			adev->gfx.config.gc_l1_instruction_cache_size_per_sqc = le32_to_cpu(gc_info->v2_1.gc_instruction_cache_size_per_sqc);
1574 			adev->gfx.config.gc_l1_data_cache_size_per_sqc = le32_to_cpu(gc_info->v2_1.gc_scalar_data_cache_size_per_sqc);
1575 			adev->gfx.config.gc_tcc_size = le32_to_cpu(gc_info->v2_1.gc_tcc_size); /* per XCD */
1576 		}
1577 		break;
1578 	default:
1579 		dev_err(adev->dev,
1580 			"Unhandled GC info table %d.%d\n",
1581 			le16_to_cpu(gc_info->v1.header.version_major),
1582 			le16_to_cpu(gc_info->v1.header.version_minor));
1583 		return -EINVAL;
1584 	}
1585 	return 0;
1586 }
1587 
1588 union mall_info {
1589 	struct mall_info_v1_0 v1;
1590 	struct mall_info_v2_0 v2;
1591 };
1592 
1593 static int amdgpu_discovery_get_mall_info(struct amdgpu_device *adev)
1594 {
1595 	struct binary_header *bhdr;
1596 	union mall_info *mall_info;
1597 	u32 u, mall_size_per_umc, m_s_present, half_use;
1598 	u64 mall_size;
1599 	u16 offset;
1600 
1601 	if (!adev->mman.discovery_bin) {
1602 		DRM_ERROR("ip discovery uninitialized\n");
1603 		return -EINVAL;
1604 	}
1605 
1606 	bhdr = (struct binary_header *)adev->mman.discovery_bin;
1607 	offset = le16_to_cpu(bhdr->table_list[MALL_INFO].offset);
1608 
1609 	if (!offset)
1610 		return 0;
1611 
1612 	mall_info = (union mall_info *)(adev->mman.discovery_bin + offset);
1613 
1614 	switch (le16_to_cpu(mall_info->v1.header.version_major)) {
1615 	case 1:
1616 		mall_size = 0;
1617 		mall_size_per_umc = le32_to_cpu(mall_info->v1.mall_size_per_m);
1618 		m_s_present = le32_to_cpu(mall_info->v1.m_s_present);
1619 		half_use = le32_to_cpu(mall_info->v1.m_half_use);
1620 		for (u = 0; u < adev->gmc.num_umc; u++) {
1621 			if (m_s_present & (1 << u))
1622 				mall_size += mall_size_per_umc * 2;
1623 			else if (half_use & (1 << u))
1624 				mall_size += mall_size_per_umc / 2;
1625 			else
1626 				mall_size += mall_size_per_umc;
1627 		}
1628 		adev->gmc.mall_size = mall_size;
1629 		adev->gmc.m_half_use = half_use;
1630 		break;
1631 	case 2:
1632 		mall_size_per_umc = le32_to_cpu(mall_info->v2.mall_size_per_umc);
1633 		adev->gmc.mall_size = mall_size_per_umc * adev->gmc.num_umc;
1634 		break;
1635 	default:
1636 		dev_err(adev->dev,
1637 			"Unhandled MALL info table %d.%d\n",
1638 			le16_to_cpu(mall_info->v1.header.version_major),
1639 			le16_to_cpu(mall_info->v1.header.version_minor));
1640 		return -EINVAL;
1641 	}
1642 	return 0;
1643 }
1644 
1645 union vcn_info {
1646 	struct vcn_info_v1_0 v1;
1647 };
1648 
1649 static int amdgpu_discovery_get_vcn_info(struct amdgpu_device *adev)
1650 {
1651 	struct binary_header *bhdr;
1652 	union vcn_info *vcn_info;
1653 	u16 offset;
1654 	int v;
1655 
1656 	if (!adev->mman.discovery_bin) {
1657 		DRM_ERROR("ip discovery uninitialized\n");
1658 		return -EINVAL;
1659 	}
1660 
1661 	/* num_vcn_inst is currently limited to AMDGPU_MAX_VCN_INSTANCES
1662 	 * which is smaller than VCN_INFO_TABLE_MAX_NUM_INSTANCES
1663 	 * but that may change in the future with new GPUs so keep this
1664 	 * check for defensive purposes.
1665 	 */
1666 	if (adev->vcn.num_vcn_inst > VCN_INFO_TABLE_MAX_NUM_INSTANCES) {
1667 		dev_err(adev->dev, "invalid vcn instances\n");
1668 		return -EINVAL;
1669 	}
1670 
1671 	bhdr = (struct binary_header *)adev->mman.discovery_bin;
1672 	offset = le16_to_cpu(bhdr->table_list[VCN_INFO].offset);
1673 
1674 	if (!offset)
1675 		return 0;
1676 
1677 	vcn_info = (union vcn_info *)(adev->mman.discovery_bin + offset);
1678 
1679 	switch (le16_to_cpu(vcn_info->v1.header.version_major)) {
1680 	case 1:
1681 		/* num_vcn_inst is currently limited to AMDGPU_MAX_VCN_INSTANCES
1682 		 * so this won't overflow.
1683 		 */
1684 		for (v = 0; v < adev->vcn.num_vcn_inst; v++) {
1685 			adev->vcn.vcn_codec_disable_mask[v] =
1686 				le32_to_cpu(vcn_info->v1.instance_info[v].fuse_data.all_bits);
1687 		}
1688 		break;
1689 	default:
1690 		dev_err(adev->dev,
1691 			"Unhandled VCN info table %d.%d\n",
1692 			le16_to_cpu(vcn_info->v1.header.version_major),
1693 			le16_to_cpu(vcn_info->v1.header.version_minor));
1694 		return -EINVAL;
1695 	}
1696 	return 0;
1697 }
1698 
1699 union nps_info {
1700 	struct nps_info_v1_0 v1;
1701 };
1702 
1703 int amdgpu_discovery_get_nps_info(struct amdgpu_device *adev,
1704 				  uint32_t *nps_type,
1705 				  struct amdgpu_gmc_memrange **ranges,
1706 				  int *range_cnt)
1707 {
1708 	struct amdgpu_gmc_memrange *mem_ranges;
1709 	struct binary_header *bhdr;
1710 	union nps_info *nps_info;
1711 	u16 offset;
1712 	int i;
1713 
1714 	if (!nps_type || !range_cnt || !ranges)
1715 		return -EINVAL;
1716 
1717 	if (!adev->mman.discovery_bin) {
1718 		dev_err(adev->dev,
1719 			"fetch mem range failed, ip discovery uninitialized\n");
1720 		return -EINVAL;
1721 	}
1722 
1723 	bhdr = (struct binary_header *)adev->mman.discovery_bin;
1724 	offset = le16_to_cpu(bhdr->table_list[NPS_INFO].offset);
1725 
1726 	if (!offset)
1727 		return -ENOENT;
1728 
1729 	/* If verification fails, return as if NPS table doesn't exist */
1730 	if (amdgpu_discovery_verify_npsinfo(adev, bhdr))
1731 		return -ENOENT;
1732 
1733 	nps_info = (union nps_info *)(adev->mman.discovery_bin + offset);
1734 
1735 	switch (le16_to_cpu(nps_info->v1.header.version_major)) {
1736 	case 1:
1737 		*nps_type = nps_info->v1.nps_type;
1738 		*range_cnt = nps_info->v1.count;
1739 		mem_ranges = kvzalloc(
1740 			*range_cnt * sizeof(struct amdgpu_gmc_memrange),
1741 			GFP_KERNEL);
1742 		for (i = 0; i < *range_cnt; i++) {
1743 			mem_ranges[i].base_address =
1744 				nps_info->v1.instance_info[i].base_address;
1745 			mem_ranges[i].limit_address =
1746 				nps_info->v1.instance_info[i].limit_address;
1747 			mem_ranges[i].nid_mask = -1;
1748 			mem_ranges[i].flags = 0;
1749 		}
1750 		*ranges = mem_ranges;
1751 		break;
1752 	default:
1753 		dev_err(adev->dev, "Unhandled NPS info table %d.%d\n",
1754 			le16_to_cpu(nps_info->v1.header.version_major),
1755 			le16_to_cpu(nps_info->v1.header.version_minor));
1756 		return -EINVAL;
1757 	}
1758 
1759 	return 0;
1760 }
1761 
1762 static int amdgpu_discovery_set_common_ip_blocks(struct amdgpu_device *adev)
1763 {
1764 	/* what IP to use for this? */
1765 	switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
1766 	case IP_VERSION(9, 0, 1):
1767 	case IP_VERSION(9, 1, 0):
1768 	case IP_VERSION(9, 2, 1):
1769 	case IP_VERSION(9, 2, 2):
1770 	case IP_VERSION(9, 3, 0):
1771 	case IP_VERSION(9, 4, 0):
1772 	case IP_VERSION(9, 4, 1):
1773 	case IP_VERSION(9, 4, 2):
1774 	case IP_VERSION(9, 4, 3):
1775 	case IP_VERSION(9, 4, 4):
1776 		amdgpu_device_ip_block_add(adev, &vega10_common_ip_block);
1777 		break;
1778 	case IP_VERSION(10, 1, 10):
1779 	case IP_VERSION(10, 1, 1):
1780 	case IP_VERSION(10, 1, 2):
1781 	case IP_VERSION(10, 1, 3):
1782 	case IP_VERSION(10, 1, 4):
1783 	case IP_VERSION(10, 3, 0):
1784 	case IP_VERSION(10, 3, 1):
1785 	case IP_VERSION(10, 3, 2):
1786 	case IP_VERSION(10, 3, 3):
1787 	case IP_VERSION(10, 3, 4):
1788 	case IP_VERSION(10, 3, 5):
1789 	case IP_VERSION(10, 3, 6):
1790 	case IP_VERSION(10, 3, 7):
1791 		amdgpu_device_ip_block_add(adev, &nv_common_ip_block);
1792 		break;
1793 	case IP_VERSION(11, 0, 0):
1794 	case IP_VERSION(11, 0, 1):
1795 	case IP_VERSION(11, 0, 2):
1796 	case IP_VERSION(11, 0, 3):
1797 	case IP_VERSION(11, 0, 4):
1798 	case IP_VERSION(11, 5, 0):
1799 	case IP_VERSION(11, 5, 1):
1800 		amdgpu_device_ip_block_add(adev, &soc21_common_ip_block);
1801 		break;
1802 	case IP_VERSION(12, 0, 0):
1803 	case IP_VERSION(12, 0, 1):
1804 		amdgpu_device_ip_block_add(adev, &soc24_common_ip_block);
1805 		break;
1806 	default:
1807 		dev_err(adev->dev,
1808 			"Failed to add common ip block(GC_HWIP:0x%x)\n",
1809 			amdgpu_ip_version(adev, GC_HWIP, 0));
1810 		return -EINVAL;
1811 	}
1812 	return 0;
1813 }
1814 
1815 static int amdgpu_discovery_set_gmc_ip_blocks(struct amdgpu_device *adev)
1816 {
1817 	/* use GC or MMHUB IP version */
1818 	switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
1819 	case IP_VERSION(9, 0, 1):
1820 	case IP_VERSION(9, 1, 0):
1821 	case IP_VERSION(9, 2, 1):
1822 	case IP_VERSION(9, 2, 2):
1823 	case IP_VERSION(9, 3, 0):
1824 	case IP_VERSION(9, 4, 0):
1825 	case IP_VERSION(9, 4, 1):
1826 	case IP_VERSION(9, 4, 2):
1827 	case IP_VERSION(9, 4, 3):
1828 	case IP_VERSION(9, 4, 4):
1829 		amdgpu_device_ip_block_add(adev, &gmc_v9_0_ip_block);
1830 		break;
1831 	case IP_VERSION(10, 1, 10):
1832 	case IP_VERSION(10, 1, 1):
1833 	case IP_VERSION(10, 1, 2):
1834 	case IP_VERSION(10, 1, 3):
1835 	case IP_VERSION(10, 1, 4):
1836 	case IP_VERSION(10, 3, 0):
1837 	case IP_VERSION(10, 3, 1):
1838 	case IP_VERSION(10, 3, 2):
1839 	case IP_VERSION(10, 3, 3):
1840 	case IP_VERSION(10, 3, 4):
1841 	case IP_VERSION(10, 3, 5):
1842 	case IP_VERSION(10, 3, 6):
1843 	case IP_VERSION(10, 3, 7):
1844 		amdgpu_device_ip_block_add(adev, &gmc_v10_0_ip_block);
1845 		break;
1846 	case IP_VERSION(11, 0, 0):
1847 	case IP_VERSION(11, 0, 1):
1848 	case IP_VERSION(11, 0, 2):
1849 	case IP_VERSION(11, 0, 3):
1850 	case IP_VERSION(11, 0, 4):
1851 	case IP_VERSION(11, 5, 0):
1852 	case IP_VERSION(11, 5, 1):
1853 		amdgpu_device_ip_block_add(adev, &gmc_v11_0_ip_block);
1854 		break;
1855 	case IP_VERSION(12, 0, 0):
1856 	case IP_VERSION(12, 0, 1):
1857 		amdgpu_device_ip_block_add(adev, &gmc_v12_0_ip_block);
1858 		break;
1859 	default:
1860 		dev_err(adev->dev, "Failed to add gmc ip block(GC_HWIP:0x%x)\n",
1861 			amdgpu_ip_version(adev, GC_HWIP, 0));
1862 		return -EINVAL;
1863 	}
1864 	return 0;
1865 }
1866 
1867 static int amdgpu_discovery_set_ih_ip_blocks(struct amdgpu_device *adev)
1868 {
1869 	switch (amdgpu_ip_version(adev, OSSSYS_HWIP, 0)) {
1870 	case IP_VERSION(4, 0, 0):
1871 	case IP_VERSION(4, 0, 1):
1872 	case IP_VERSION(4, 1, 0):
1873 	case IP_VERSION(4, 1, 1):
1874 	case IP_VERSION(4, 3, 0):
1875 		amdgpu_device_ip_block_add(adev, &vega10_ih_ip_block);
1876 		break;
1877 	case IP_VERSION(4, 2, 0):
1878 	case IP_VERSION(4, 2, 1):
1879 	case IP_VERSION(4, 4, 0):
1880 	case IP_VERSION(4, 4, 2):
1881 	case IP_VERSION(4, 4, 5):
1882 		amdgpu_device_ip_block_add(adev, &vega20_ih_ip_block);
1883 		break;
1884 	case IP_VERSION(5, 0, 0):
1885 	case IP_VERSION(5, 0, 1):
1886 	case IP_VERSION(5, 0, 2):
1887 	case IP_VERSION(5, 0, 3):
1888 	case IP_VERSION(5, 2, 0):
1889 	case IP_VERSION(5, 2, 1):
1890 		amdgpu_device_ip_block_add(adev, &navi10_ih_ip_block);
1891 		break;
1892 	case IP_VERSION(6, 0, 0):
1893 	case IP_VERSION(6, 0, 1):
1894 	case IP_VERSION(6, 0, 2):
1895 		amdgpu_device_ip_block_add(adev, &ih_v6_0_ip_block);
1896 		break;
1897 	case IP_VERSION(6, 1, 0):
1898 		amdgpu_device_ip_block_add(adev, &ih_v6_1_ip_block);
1899 		break;
1900 	case IP_VERSION(7, 0, 0):
1901 		amdgpu_device_ip_block_add(adev, &ih_v7_0_ip_block);
1902 		break;
1903 	default:
1904 		dev_err(adev->dev,
1905 			"Failed to add ih ip block(OSSSYS_HWIP:0x%x)\n",
1906 			amdgpu_ip_version(adev, OSSSYS_HWIP, 0));
1907 		return -EINVAL;
1908 	}
1909 	return 0;
1910 }
1911 
1912 static int amdgpu_discovery_set_psp_ip_blocks(struct amdgpu_device *adev)
1913 {
1914 	switch (amdgpu_ip_version(adev, MP0_HWIP, 0)) {
1915 	case IP_VERSION(9, 0, 0):
1916 		amdgpu_device_ip_block_add(adev, &psp_v3_1_ip_block);
1917 		break;
1918 	case IP_VERSION(10, 0, 0):
1919 	case IP_VERSION(10, 0, 1):
1920 		amdgpu_device_ip_block_add(adev, &psp_v10_0_ip_block);
1921 		break;
1922 	case IP_VERSION(11, 0, 0):
1923 	case IP_VERSION(11, 0, 2):
1924 	case IP_VERSION(11, 0, 4):
1925 	case IP_VERSION(11, 0, 5):
1926 	case IP_VERSION(11, 0, 9):
1927 	case IP_VERSION(11, 0, 7):
1928 	case IP_VERSION(11, 0, 11):
1929 	case IP_VERSION(11, 0, 12):
1930 	case IP_VERSION(11, 0, 13):
1931 	case IP_VERSION(11, 5, 0):
1932 		amdgpu_device_ip_block_add(adev, &psp_v11_0_ip_block);
1933 		break;
1934 	case IP_VERSION(11, 0, 8):
1935 		amdgpu_device_ip_block_add(adev, &psp_v11_0_8_ip_block);
1936 		break;
1937 	case IP_VERSION(11, 0, 3):
1938 	case IP_VERSION(12, 0, 1):
1939 		amdgpu_device_ip_block_add(adev, &psp_v12_0_ip_block);
1940 		break;
1941 	case IP_VERSION(13, 0, 0):
1942 	case IP_VERSION(13, 0, 1):
1943 	case IP_VERSION(13, 0, 2):
1944 	case IP_VERSION(13, 0, 3):
1945 	case IP_VERSION(13, 0, 5):
1946 	case IP_VERSION(13, 0, 6):
1947 	case IP_VERSION(13, 0, 7):
1948 	case IP_VERSION(13, 0, 8):
1949 	case IP_VERSION(13, 0, 10):
1950 	case IP_VERSION(13, 0, 11):
1951 	case IP_VERSION(13, 0, 14):
1952 	case IP_VERSION(14, 0, 0):
1953 	case IP_VERSION(14, 0, 1):
1954 		amdgpu_device_ip_block_add(adev, &psp_v13_0_ip_block);
1955 		break;
1956 	case IP_VERSION(13, 0, 4):
1957 		amdgpu_device_ip_block_add(adev, &psp_v13_0_4_ip_block);
1958 		break;
1959 	case IP_VERSION(14, 0, 2):
1960 	case IP_VERSION(14, 0, 3):
1961 		amdgpu_device_ip_block_add(adev, &psp_v14_0_ip_block);
1962 		break;
1963 	default:
1964 		dev_err(adev->dev,
1965 			"Failed to add psp ip block(MP0_HWIP:0x%x)\n",
1966 			amdgpu_ip_version(adev, MP0_HWIP, 0));
1967 		return -EINVAL;
1968 	}
1969 	return 0;
1970 }
1971 
1972 static int amdgpu_discovery_set_smu_ip_blocks(struct amdgpu_device *adev)
1973 {
1974 	switch (amdgpu_ip_version(adev, MP1_HWIP, 0)) {
1975 	case IP_VERSION(9, 0, 0):
1976 	case IP_VERSION(10, 0, 0):
1977 	case IP_VERSION(10, 0, 1):
1978 	case IP_VERSION(11, 0, 2):
1979 		if (adev->asic_type == CHIP_ARCTURUS)
1980 			amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block);
1981 		else
1982 			amdgpu_device_ip_block_add(adev, &pp_smu_ip_block);
1983 		break;
1984 	case IP_VERSION(11, 0, 0):
1985 	case IP_VERSION(11, 0, 5):
1986 	case IP_VERSION(11, 0, 9):
1987 	case IP_VERSION(11, 0, 7):
1988 	case IP_VERSION(11, 0, 8):
1989 	case IP_VERSION(11, 0, 11):
1990 	case IP_VERSION(11, 0, 12):
1991 	case IP_VERSION(11, 0, 13):
1992 	case IP_VERSION(11, 5, 0):
1993 		amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block);
1994 		break;
1995 	case IP_VERSION(12, 0, 0):
1996 	case IP_VERSION(12, 0, 1):
1997 		amdgpu_device_ip_block_add(adev, &smu_v12_0_ip_block);
1998 		break;
1999 	case IP_VERSION(13, 0, 0):
2000 	case IP_VERSION(13, 0, 1):
2001 	case IP_VERSION(13, 0, 2):
2002 	case IP_VERSION(13, 0, 3):
2003 	case IP_VERSION(13, 0, 4):
2004 	case IP_VERSION(13, 0, 5):
2005 	case IP_VERSION(13, 0, 6):
2006 	case IP_VERSION(13, 0, 7):
2007 	case IP_VERSION(13, 0, 8):
2008 	case IP_VERSION(13, 0, 10):
2009 	case IP_VERSION(13, 0, 11):
2010 	case IP_VERSION(13, 0, 14):
2011 		amdgpu_device_ip_block_add(adev, &smu_v13_0_ip_block);
2012 		break;
2013 	case IP_VERSION(14, 0, 0):
2014 	case IP_VERSION(14, 0, 1):
2015 	case IP_VERSION(14, 0, 2):
2016 	case IP_VERSION(14, 0, 3):
2017 		amdgpu_device_ip_block_add(adev, &smu_v14_0_ip_block);
2018 		break;
2019 	default:
2020 		dev_err(adev->dev,
2021 			"Failed to add smu ip block(MP1_HWIP:0x%x)\n",
2022 			amdgpu_ip_version(adev, MP1_HWIP, 0));
2023 		return -EINVAL;
2024 	}
2025 	return 0;
2026 }
2027 
2028 #if defined(CONFIG_DRM_AMD_DC)
2029 static void amdgpu_discovery_set_sriov_display(struct amdgpu_device *adev)
2030 {
2031 	amdgpu_device_set_sriov_virtual_display(adev);
2032 	amdgpu_device_ip_block_add(adev, &amdgpu_vkms_ip_block);
2033 }
2034 #endif
2035 
2036 static int amdgpu_discovery_set_display_ip_blocks(struct amdgpu_device *adev)
2037 {
2038 	if (adev->enable_virtual_display) {
2039 		amdgpu_device_ip_block_add(adev, &amdgpu_vkms_ip_block);
2040 		return 0;
2041 	}
2042 
2043 	if (!amdgpu_device_has_dc_support(adev))
2044 		return 0;
2045 
2046 #if defined(CONFIG_DRM_AMD_DC)
2047 	if (amdgpu_ip_version(adev, DCE_HWIP, 0)) {
2048 		switch (amdgpu_ip_version(adev, DCE_HWIP, 0)) {
2049 		case IP_VERSION(1, 0, 0):
2050 		case IP_VERSION(1, 0, 1):
2051 		case IP_VERSION(2, 0, 2):
2052 		case IP_VERSION(2, 0, 0):
2053 		case IP_VERSION(2, 0, 3):
2054 		case IP_VERSION(2, 1, 0):
2055 		case IP_VERSION(3, 0, 0):
2056 		case IP_VERSION(3, 0, 2):
2057 		case IP_VERSION(3, 0, 3):
2058 		case IP_VERSION(3, 0, 1):
2059 		case IP_VERSION(3, 1, 2):
2060 		case IP_VERSION(3, 1, 3):
2061 		case IP_VERSION(3, 1, 4):
2062 		case IP_VERSION(3, 1, 5):
2063 		case IP_VERSION(3, 1, 6):
2064 		case IP_VERSION(3, 2, 0):
2065 		case IP_VERSION(3, 2, 1):
2066 		case IP_VERSION(3, 5, 0):
2067 		case IP_VERSION(3, 5, 1):
2068 		case IP_VERSION(4, 1, 0):
2069 			/* TODO: Fix IP version. DC code expects version 4.0.1 */
2070 			if (adev->ip_versions[DCE_HWIP][0] == IP_VERSION(4, 1, 0))
2071 				adev->ip_versions[DCE_HWIP][0] = IP_VERSION(4, 0, 1);
2072 
2073 			if (amdgpu_sriov_vf(adev))
2074 				amdgpu_discovery_set_sriov_display(adev);
2075 			else
2076 				amdgpu_device_ip_block_add(adev, &dm_ip_block);
2077 			break;
2078 		default:
2079 			dev_err(adev->dev,
2080 				"Failed to add dm ip block(DCE_HWIP:0x%x)\n",
2081 				amdgpu_ip_version(adev, DCE_HWIP, 0));
2082 			return -EINVAL;
2083 		}
2084 	} else if (amdgpu_ip_version(adev, DCI_HWIP, 0)) {
2085 		switch (amdgpu_ip_version(adev, DCI_HWIP, 0)) {
2086 		case IP_VERSION(12, 0, 0):
2087 		case IP_VERSION(12, 0, 1):
2088 		case IP_VERSION(12, 1, 0):
2089 			if (amdgpu_sriov_vf(adev))
2090 				amdgpu_discovery_set_sriov_display(adev);
2091 			else
2092 				amdgpu_device_ip_block_add(adev, &dm_ip_block);
2093 			break;
2094 		default:
2095 			dev_err(adev->dev,
2096 				"Failed to add dm ip block(DCI_HWIP:0x%x)\n",
2097 				amdgpu_ip_version(adev, DCI_HWIP, 0));
2098 			return -EINVAL;
2099 		}
2100 	}
2101 #endif
2102 	return 0;
2103 }
2104 
2105 static int amdgpu_discovery_set_gc_ip_blocks(struct amdgpu_device *adev)
2106 {
2107 	switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
2108 	case IP_VERSION(9, 0, 1):
2109 	case IP_VERSION(9, 1, 0):
2110 	case IP_VERSION(9, 2, 1):
2111 	case IP_VERSION(9, 2, 2):
2112 	case IP_VERSION(9, 3, 0):
2113 	case IP_VERSION(9, 4, 0):
2114 	case IP_VERSION(9, 4, 1):
2115 	case IP_VERSION(9, 4, 2):
2116 		amdgpu_device_ip_block_add(adev, &gfx_v9_0_ip_block);
2117 		break;
2118 	case IP_VERSION(9, 4, 3):
2119 	case IP_VERSION(9, 4, 4):
2120 		amdgpu_device_ip_block_add(adev, &gfx_v9_4_3_ip_block);
2121 		break;
2122 	case IP_VERSION(10, 1, 10):
2123 	case IP_VERSION(10, 1, 2):
2124 	case IP_VERSION(10, 1, 1):
2125 	case IP_VERSION(10, 1, 3):
2126 	case IP_VERSION(10, 1, 4):
2127 	case IP_VERSION(10, 3, 0):
2128 	case IP_VERSION(10, 3, 2):
2129 	case IP_VERSION(10, 3, 1):
2130 	case IP_VERSION(10, 3, 4):
2131 	case IP_VERSION(10, 3, 5):
2132 	case IP_VERSION(10, 3, 6):
2133 	case IP_VERSION(10, 3, 3):
2134 	case IP_VERSION(10, 3, 7):
2135 		amdgpu_device_ip_block_add(adev, &gfx_v10_0_ip_block);
2136 		break;
2137 	case IP_VERSION(11, 0, 0):
2138 	case IP_VERSION(11, 0, 1):
2139 	case IP_VERSION(11, 0, 2):
2140 	case IP_VERSION(11, 0, 3):
2141 	case IP_VERSION(11, 0, 4):
2142 	case IP_VERSION(11, 5, 0):
2143 	case IP_VERSION(11, 5, 1):
2144 		amdgpu_device_ip_block_add(adev, &gfx_v11_0_ip_block);
2145 		break;
2146 	case IP_VERSION(12, 0, 0):
2147 	case IP_VERSION(12, 0, 1):
2148 		if (!amdgpu_exp_hw_support)
2149 			return -EINVAL;
2150 		amdgpu_device_ip_block_add(adev, &gfx_v12_0_ip_block);
2151 		break;
2152 	default:
2153 		dev_err(adev->dev, "Failed to add gfx ip block(GC_HWIP:0x%x)\n",
2154 			amdgpu_ip_version(adev, GC_HWIP, 0));
2155 		return -EINVAL;
2156 	}
2157 	return 0;
2158 }
2159 
2160 static int amdgpu_discovery_set_sdma_ip_blocks(struct amdgpu_device *adev)
2161 {
2162 	switch (amdgpu_ip_version(adev, SDMA0_HWIP, 0)) {
2163 	case IP_VERSION(4, 0, 0):
2164 	case IP_VERSION(4, 0, 1):
2165 	case IP_VERSION(4, 1, 0):
2166 	case IP_VERSION(4, 1, 1):
2167 	case IP_VERSION(4, 1, 2):
2168 	case IP_VERSION(4, 2, 0):
2169 	case IP_VERSION(4, 2, 2):
2170 	case IP_VERSION(4, 4, 0):
2171 		amdgpu_device_ip_block_add(adev, &sdma_v4_0_ip_block);
2172 		break;
2173 	case IP_VERSION(4, 4, 2):
2174 	case IP_VERSION(4, 4, 5):
2175 		amdgpu_device_ip_block_add(adev, &sdma_v4_4_2_ip_block);
2176 		break;
2177 	case IP_VERSION(5, 0, 0):
2178 	case IP_VERSION(5, 0, 1):
2179 	case IP_VERSION(5, 0, 2):
2180 	case IP_VERSION(5, 0, 5):
2181 		amdgpu_device_ip_block_add(adev, &sdma_v5_0_ip_block);
2182 		break;
2183 	case IP_VERSION(5, 2, 0):
2184 	case IP_VERSION(5, 2, 2):
2185 	case IP_VERSION(5, 2, 4):
2186 	case IP_VERSION(5, 2, 5):
2187 	case IP_VERSION(5, 2, 6):
2188 	case IP_VERSION(5, 2, 3):
2189 	case IP_VERSION(5, 2, 1):
2190 	case IP_VERSION(5, 2, 7):
2191 		amdgpu_device_ip_block_add(adev, &sdma_v5_2_ip_block);
2192 		break;
2193 	case IP_VERSION(6, 0, 0):
2194 	case IP_VERSION(6, 0, 1):
2195 	case IP_VERSION(6, 0, 2):
2196 	case IP_VERSION(6, 0, 3):
2197 	case IP_VERSION(6, 1, 0):
2198 	case IP_VERSION(6, 1, 1):
2199 		amdgpu_device_ip_block_add(adev, &sdma_v6_0_ip_block);
2200 		break;
2201 	case IP_VERSION(7, 0, 0):
2202 	case IP_VERSION(7, 0, 1):
2203 		amdgpu_device_ip_block_add(adev, &sdma_v7_0_ip_block);
2204 		break;
2205 	default:
2206 		dev_err(adev->dev,
2207 			"Failed to add sdma ip block(SDMA0_HWIP:0x%x)\n",
2208 			amdgpu_ip_version(adev, SDMA0_HWIP, 0));
2209 		return -EINVAL;
2210 	}
2211 	return 0;
2212 }
2213 
2214 static int amdgpu_discovery_set_mm_ip_blocks(struct amdgpu_device *adev)
2215 {
2216 	if (amdgpu_ip_version(adev, VCE_HWIP, 0)) {
2217 		switch (amdgpu_ip_version(adev, UVD_HWIP, 0)) {
2218 		case IP_VERSION(7, 0, 0):
2219 		case IP_VERSION(7, 2, 0):
2220 			/* UVD is not supported on vega20 SR-IOV */
2221 			if (!(adev->asic_type == CHIP_VEGA20 && amdgpu_sriov_vf(adev)))
2222 				amdgpu_device_ip_block_add(adev, &uvd_v7_0_ip_block);
2223 			break;
2224 		default:
2225 			dev_err(adev->dev,
2226 				"Failed to add uvd v7 ip block(UVD_HWIP:0x%x)\n",
2227 				amdgpu_ip_version(adev, UVD_HWIP, 0));
2228 			return -EINVAL;
2229 		}
2230 		switch (amdgpu_ip_version(adev, VCE_HWIP, 0)) {
2231 		case IP_VERSION(4, 0, 0):
2232 		case IP_VERSION(4, 1, 0):
2233 			/* VCE is not supported on vega20 SR-IOV */
2234 			if (!(adev->asic_type == CHIP_VEGA20 && amdgpu_sriov_vf(adev)))
2235 				amdgpu_device_ip_block_add(adev, &vce_v4_0_ip_block);
2236 			break;
2237 		default:
2238 			dev_err(adev->dev,
2239 				"Failed to add VCE v4 ip block(VCE_HWIP:0x%x)\n",
2240 				amdgpu_ip_version(adev, VCE_HWIP, 0));
2241 			return -EINVAL;
2242 		}
2243 	} else {
2244 		switch (amdgpu_ip_version(adev, UVD_HWIP, 0)) {
2245 		case IP_VERSION(1, 0, 0):
2246 		case IP_VERSION(1, 0, 1):
2247 			amdgpu_device_ip_block_add(adev, &vcn_v1_0_ip_block);
2248 			break;
2249 		case IP_VERSION(2, 0, 0):
2250 		case IP_VERSION(2, 0, 2):
2251 		case IP_VERSION(2, 2, 0):
2252 			amdgpu_device_ip_block_add(adev, &vcn_v2_0_ip_block);
2253 			if (!amdgpu_sriov_vf(adev))
2254 				amdgpu_device_ip_block_add(adev, &jpeg_v2_0_ip_block);
2255 			break;
2256 		case IP_VERSION(2, 0, 3):
2257 			break;
2258 		case IP_VERSION(2, 5, 0):
2259 			amdgpu_device_ip_block_add(adev, &vcn_v2_5_ip_block);
2260 			amdgpu_device_ip_block_add(adev, &jpeg_v2_5_ip_block);
2261 			break;
2262 		case IP_VERSION(2, 6, 0):
2263 			amdgpu_device_ip_block_add(adev, &vcn_v2_6_ip_block);
2264 			amdgpu_device_ip_block_add(adev, &jpeg_v2_6_ip_block);
2265 			break;
2266 		case IP_VERSION(3, 0, 0):
2267 		case IP_VERSION(3, 0, 16):
2268 		case IP_VERSION(3, 1, 1):
2269 		case IP_VERSION(3, 1, 2):
2270 		case IP_VERSION(3, 0, 2):
2271 			amdgpu_device_ip_block_add(adev, &vcn_v3_0_ip_block);
2272 			if (!amdgpu_sriov_vf(adev))
2273 				amdgpu_device_ip_block_add(adev, &jpeg_v3_0_ip_block);
2274 			break;
2275 		case IP_VERSION(3, 0, 33):
2276 			amdgpu_device_ip_block_add(adev, &vcn_v3_0_ip_block);
2277 			break;
2278 		case IP_VERSION(4, 0, 0):
2279 		case IP_VERSION(4, 0, 2):
2280 		case IP_VERSION(4, 0, 4):
2281 			amdgpu_device_ip_block_add(adev, &vcn_v4_0_ip_block);
2282 			amdgpu_device_ip_block_add(adev, &jpeg_v4_0_ip_block);
2283 			break;
2284 		case IP_VERSION(4, 0, 3):
2285 			amdgpu_device_ip_block_add(adev, &vcn_v4_0_3_ip_block);
2286 			amdgpu_device_ip_block_add(adev, &jpeg_v4_0_3_ip_block);
2287 			break;
2288 		case IP_VERSION(4, 0, 5):
2289 		case IP_VERSION(4, 0, 6):
2290 			amdgpu_device_ip_block_add(adev, &vcn_v4_0_5_ip_block);
2291 			amdgpu_device_ip_block_add(adev, &jpeg_v4_0_5_ip_block);
2292 			break;
2293 		case IP_VERSION(5, 0, 0):
2294 			amdgpu_device_ip_block_add(adev, &vcn_v5_0_0_ip_block);
2295 			amdgpu_device_ip_block_add(adev, &jpeg_v5_0_0_ip_block);
2296 			if (amdgpu_jpeg_test)
2297 				adev->enable_jpeg_test = true;
2298 			break;
2299 		default:
2300 			dev_err(adev->dev,
2301 				"Failed to add vcn/jpeg ip block(UVD_HWIP:0x%x)\n",
2302 				amdgpu_ip_version(adev, UVD_HWIP, 0));
2303 			return -EINVAL;
2304 		}
2305 	}
2306 	return 0;
2307 }
2308 
2309 static int amdgpu_discovery_set_mes_ip_blocks(struct amdgpu_device *adev)
2310 {
2311 	switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
2312 	case IP_VERSION(10, 1, 10):
2313 	case IP_VERSION(10, 1, 1):
2314 	case IP_VERSION(10, 1, 2):
2315 	case IP_VERSION(10, 1, 3):
2316 	case IP_VERSION(10, 1, 4):
2317 	case IP_VERSION(10, 3, 0):
2318 	case IP_VERSION(10, 3, 1):
2319 	case IP_VERSION(10, 3, 2):
2320 	case IP_VERSION(10, 3, 3):
2321 	case IP_VERSION(10, 3, 4):
2322 	case IP_VERSION(10, 3, 5):
2323 	case IP_VERSION(10, 3, 6):
2324 		if (amdgpu_mes) {
2325 			amdgpu_device_ip_block_add(adev, &mes_v10_1_ip_block);
2326 			adev->enable_mes = true;
2327 			if (amdgpu_mes_kiq)
2328 				adev->enable_mes_kiq = true;
2329 		}
2330 		break;
2331 	case IP_VERSION(11, 0, 0):
2332 	case IP_VERSION(11, 0, 1):
2333 	case IP_VERSION(11, 0, 2):
2334 	case IP_VERSION(11, 0, 3):
2335 	case IP_VERSION(11, 0, 4):
2336 	case IP_VERSION(11, 5, 0):
2337 	case IP_VERSION(11, 5, 1):
2338 		amdgpu_device_ip_block_add(adev, &mes_v11_0_ip_block);
2339 		adev->enable_mes = true;
2340 		adev->enable_mes_kiq = true;
2341 		break;
2342 	case IP_VERSION(12, 0, 0):
2343 	case IP_VERSION(12, 0, 1):
2344 		amdgpu_device_ip_block_add(adev, &mes_v12_0_ip_block);
2345 		adev->enable_mes = true;
2346 		adev->enable_mes_kiq = true;
2347 		if (amdgpu_uni_mes)
2348 			adev->enable_uni_mes = true;
2349 		break;
2350 	default:
2351 		break;
2352 	}
2353 	return 0;
2354 }
2355 
2356 static void amdgpu_discovery_init_soc_config(struct amdgpu_device *adev)
2357 {
2358 	switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
2359 	case IP_VERSION(9, 4, 3):
2360 	case IP_VERSION(9, 4, 4):
2361 		aqua_vanjaram_init_soc_config(adev);
2362 		break;
2363 	default:
2364 		break;
2365 	}
2366 }
2367 
2368 static int amdgpu_discovery_set_vpe_ip_blocks(struct amdgpu_device *adev)
2369 {
2370 	switch (amdgpu_ip_version(adev, VPE_HWIP, 0)) {
2371 	case IP_VERSION(6, 1, 0):
2372 	case IP_VERSION(6, 1, 1):
2373 		amdgpu_device_ip_block_add(adev, &vpe_v6_1_ip_block);
2374 		break;
2375 	default:
2376 		break;
2377 	}
2378 
2379 	return 0;
2380 }
2381 
2382 static int amdgpu_discovery_set_umsch_mm_ip_blocks(struct amdgpu_device *adev)
2383 {
2384 	switch (amdgpu_ip_version(adev, VCN_HWIP, 0)) {
2385 	case IP_VERSION(4, 0, 5):
2386 	case IP_VERSION(4, 0, 6):
2387 		if (amdgpu_umsch_mm & 0x1) {
2388 			amdgpu_device_ip_block_add(adev, &umsch_mm_v4_0_ip_block);
2389 			adev->enable_umsch_mm = true;
2390 		}
2391 		break;
2392 	default:
2393 		break;
2394 	}
2395 
2396 	return 0;
2397 }
2398 
2399 int amdgpu_discovery_set_ip_blocks(struct amdgpu_device *adev)
2400 {
2401 	int r;
2402 
2403 	switch (adev->asic_type) {
2404 	case CHIP_VEGA10:
2405 		vega10_reg_base_init(adev);
2406 		adev->sdma.num_instances = 2;
2407 		adev->gmc.num_umc = 4;
2408 		adev->ip_versions[MMHUB_HWIP][0] = IP_VERSION(9, 0, 0);
2409 		adev->ip_versions[ATHUB_HWIP][0] = IP_VERSION(9, 0, 0);
2410 		adev->ip_versions[OSSSYS_HWIP][0] = IP_VERSION(4, 0, 0);
2411 		adev->ip_versions[HDP_HWIP][0] = IP_VERSION(4, 0, 0);
2412 		adev->ip_versions[SDMA0_HWIP][0] = IP_VERSION(4, 0, 0);
2413 		adev->ip_versions[SDMA1_HWIP][0] = IP_VERSION(4, 0, 0);
2414 		adev->ip_versions[DF_HWIP][0] = IP_VERSION(2, 1, 0);
2415 		adev->ip_versions[NBIO_HWIP][0] = IP_VERSION(6, 1, 0);
2416 		adev->ip_versions[UMC_HWIP][0] = IP_VERSION(6, 0, 0);
2417 		adev->ip_versions[MP0_HWIP][0] = IP_VERSION(9, 0, 0);
2418 		adev->ip_versions[MP1_HWIP][0] = IP_VERSION(9, 0, 0);
2419 		adev->ip_versions[THM_HWIP][0] = IP_VERSION(9, 0, 0);
2420 		adev->ip_versions[SMUIO_HWIP][0] = IP_VERSION(9, 0, 0);
2421 		adev->ip_versions[GC_HWIP][0] = IP_VERSION(9, 0, 1);
2422 		adev->ip_versions[UVD_HWIP][0] = IP_VERSION(7, 0, 0);
2423 		adev->ip_versions[VCE_HWIP][0] = IP_VERSION(4, 0, 0);
2424 		adev->ip_versions[DCI_HWIP][0] = IP_VERSION(12, 0, 0);
2425 		break;
2426 	case CHIP_VEGA12:
2427 		vega10_reg_base_init(adev);
2428 		adev->sdma.num_instances = 2;
2429 		adev->gmc.num_umc = 4;
2430 		adev->ip_versions[MMHUB_HWIP][0] = IP_VERSION(9, 3, 0);
2431 		adev->ip_versions[ATHUB_HWIP][0] = IP_VERSION(9, 3, 0);
2432 		adev->ip_versions[OSSSYS_HWIP][0] = IP_VERSION(4, 0, 1);
2433 		adev->ip_versions[HDP_HWIP][0] = IP_VERSION(4, 0, 1);
2434 		adev->ip_versions[SDMA0_HWIP][0] = IP_VERSION(4, 0, 1);
2435 		adev->ip_versions[SDMA1_HWIP][0] = IP_VERSION(4, 0, 1);
2436 		adev->ip_versions[DF_HWIP][0] = IP_VERSION(2, 5, 0);
2437 		adev->ip_versions[NBIO_HWIP][0] = IP_VERSION(6, 2, 0);
2438 		adev->ip_versions[UMC_HWIP][0] = IP_VERSION(6, 1, 0);
2439 		adev->ip_versions[MP0_HWIP][0] = IP_VERSION(9, 0, 0);
2440 		adev->ip_versions[MP1_HWIP][0] = IP_VERSION(9, 0, 0);
2441 		adev->ip_versions[THM_HWIP][0] = IP_VERSION(9, 0, 0);
2442 		adev->ip_versions[SMUIO_HWIP][0] = IP_VERSION(9, 0, 1);
2443 		adev->ip_versions[GC_HWIP][0] = IP_VERSION(9, 2, 1);
2444 		adev->ip_versions[UVD_HWIP][0] = IP_VERSION(7, 0, 0);
2445 		adev->ip_versions[VCE_HWIP][0] = IP_VERSION(4, 0, 0);
2446 		adev->ip_versions[DCI_HWIP][0] = IP_VERSION(12, 0, 1);
2447 		break;
2448 	case CHIP_RAVEN:
2449 		vega10_reg_base_init(adev);
2450 		adev->sdma.num_instances = 1;
2451 		adev->vcn.num_vcn_inst = 1;
2452 		adev->gmc.num_umc = 2;
2453 		if (adev->apu_flags & AMD_APU_IS_RAVEN2) {
2454 			adev->ip_versions[MMHUB_HWIP][0] = IP_VERSION(9, 2, 0);
2455 			adev->ip_versions[ATHUB_HWIP][0] = IP_VERSION(9, 2, 0);
2456 			adev->ip_versions[OSSSYS_HWIP][0] = IP_VERSION(4, 1, 1);
2457 			adev->ip_versions[HDP_HWIP][0] = IP_VERSION(4, 1, 1);
2458 			adev->ip_versions[SDMA0_HWIP][0] = IP_VERSION(4, 1, 1);
2459 			adev->ip_versions[DF_HWIP][0] = IP_VERSION(2, 1, 1);
2460 			adev->ip_versions[NBIO_HWIP][0] = IP_VERSION(7, 0, 1);
2461 			adev->ip_versions[UMC_HWIP][0] = IP_VERSION(7, 5, 0);
2462 			adev->ip_versions[MP0_HWIP][0] = IP_VERSION(10, 0, 1);
2463 			adev->ip_versions[MP1_HWIP][0] = IP_VERSION(10, 0, 1);
2464 			adev->ip_versions[THM_HWIP][0] = IP_VERSION(10, 1, 0);
2465 			adev->ip_versions[SMUIO_HWIP][0] = IP_VERSION(10, 0, 1);
2466 			adev->ip_versions[GC_HWIP][0] = IP_VERSION(9, 2, 2);
2467 			adev->ip_versions[UVD_HWIP][0] = IP_VERSION(1, 0, 1);
2468 			adev->ip_versions[DCE_HWIP][0] = IP_VERSION(1, 0, 1);
2469 		} else {
2470 			adev->ip_versions[MMHUB_HWIP][0] = IP_VERSION(9, 1, 0);
2471 			adev->ip_versions[ATHUB_HWIP][0] = IP_VERSION(9, 1, 0);
2472 			adev->ip_versions[OSSSYS_HWIP][0] = IP_VERSION(4, 1, 0);
2473 			adev->ip_versions[HDP_HWIP][0] = IP_VERSION(4, 1, 0);
2474 			adev->ip_versions[SDMA0_HWIP][0] = IP_VERSION(4, 1, 0);
2475 			adev->ip_versions[DF_HWIP][0] = IP_VERSION(2, 1, 0);
2476 			adev->ip_versions[NBIO_HWIP][0] = IP_VERSION(7, 0, 0);
2477 			adev->ip_versions[UMC_HWIP][0] = IP_VERSION(7, 0, 0);
2478 			adev->ip_versions[MP0_HWIP][0] = IP_VERSION(10, 0, 0);
2479 			adev->ip_versions[MP1_HWIP][0] = IP_VERSION(10, 0, 0);
2480 			adev->ip_versions[THM_HWIP][0] = IP_VERSION(10, 0, 0);
2481 			adev->ip_versions[SMUIO_HWIP][0] = IP_VERSION(10, 0, 0);
2482 			adev->ip_versions[GC_HWIP][0] = IP_VERSION(9, 1, 0);
2483 			adev->ip_versions[UVD_HWIP][0] = IP_VERSION(1, 0, 0);
2484 			adev->ip_versions[DCE_HWIP][0] = IP_VERSION(1, 0, 0);
2485 		}
2486 		break;
2487 	case CHIP_VEGA20:
2488 		vega20_reg_base_init(adev);
2489 		adev->sdma.num_instances = 2;
2490 		adev->gmc.num_umc = 8;
2491 		adev->ip_versions[MMHUB_HWIP][0] = IP_VERSION(9, 4, 0);
2492 		adev->ip_versions[ATHUB_HWIP][0] = IP_VERSION(9, 4, 0);
2493 		adev->ip_versions[OSSSYS_HWIP][0] = IP_VERSION(4, 2, 0);
2494 		adev->ip_versions[HDP_HWIP][0] = IP_VERSION(4, 2, 0);
2495 		adev->ip_versions[SDMA0_HWIP][0] = IP_VERSION(4, 2, 0);
2496 		adev->ip_versions[SDMA1_HWIP][0] = IP_VERSION(4, 2, 0);
2497 		adev->ip_versions[DF_HWIP][0] = IP_VERSION(3, 6, 0);
2498 		adev->ip_versions[NBIO_HWIP][0] = IP_VERSION(7, 4, 0);
2499 		adev->ip_versions[UMC_HWIP][0] = IP_VERSION(6, 1, 1);
2500 		adev->ip_versions[MP0_HWIP][0] = IP_VERSION(11, 0, 2);
2501 		adev->ip_versions[MP1_HWIP][0] = IP_VERSION(11, 0, 2);
2502 		adev->ip_versions[THM_HWIP][0] = IP_VERSION(11, 0, 2);
2503 		adev->ip_versions[SMUIO_HWIP][0] = IP_VERSION(11, 0, 2);
2504 		adev->ip_versions[GC_HWIP][0] = IP_VERSION(9, 4, 0);
2505 		adev->ip_versions[UVD_HWIP][0] = IP_VERSION(7, 2, 0);
2506 		adev->ip_versions[UVD_HWIP][1] = IP_VERSION(7, 2, 0);
2507 		adev->ip_versions[VCE_HWIP][0] = IP_VERSION(4, 1, 0);
2508 		adev->ip_versions[DCI_HWIP][0] = IP_VERSION(12, 1, 0);
2509 		break;
2510 	case CHIP_ARCTURUS:
2511 		arct_reg_base_init(adev);
2512 		adev->sdma.num_instances = 8;
2513 		adev->vcn.num_vcn_inst = 2;
2514 		adev->gmc.num_umc = 8;
2515 		adev->ip_versions[MMHUB_HWIP][0] = IP_VERSION(9, 4, 1);
2516 		adev->ip_versions[ATHUB_HWIP][0] = IP_VERSION(9, 4, 1);
2517 		adev->ip_versions[OSSSYS_HWIP][0] = IP_VERSION(4, 2, 1);
2518 		adev->ip_versions[HDP_HWIP][0] = IP_VERSION(4, 2, 1);
2519 		adev->ip_versions[SDMA0_HWIP][0] = IP_VERSION(4, 2, 2);
2520 		adev->ip_versions[SDMA1_HWIP][0] = IP_VERSION(4, 2, 2);
2521 		adev->ip_versions[SDMA1_HWIP][1] = IP_VERSION(4, 2, 2);
2522 		adev->ip_versions[SDMA1_HWIP][2] = IP_VERSION(4, 2, 2);
2523 		adev->ip_versions[SDMA1_HWIP][3] = IP_VERSION(4, 2, 2);
2524 		adev->ip_versions[SDMA1_HWIP][4] = IP_VERSION(4, 2, 2);
2525 		adev->ip_versions[SDMA1_HWIP][5] = IP_VERSION(4, 2, 2);
2526 		adev->ip_versions[SDMA1_HWIP][6] = IP_VERSION(4, 2, 2);
2527 		adev->ip_versions[DF_HWIP][0] = IP_VERSION(3, 6, 1);
2528 		adev->ip_versions[NBIO_HWIP][0] = IP_VERSION(7, 4, 1);
2529 		adev->ip_versions[UMC_HWIP][0] = IP_VERSION(6, 1, 2);
2530 		adev->ip_versions[MP0_HWIP][0] = IP_VERSION(11, 0, 4);
2531 		adev->ip_versions[MP1_HWIP][0] = IP_VERSION(11, 0, 2);
2532 		adev->ip_versions[THM_HWIP][0] = IP_VERSION(11, 0, 3);
2533 		adev->ip_versions[SMUIO_HWIP][0] = IP_VERSION(11, 0, 3);
2534 		adev->ip_versions[GC_HWIP][0] = IP_VERSION(9, 4, 1);
2535 		adev->ip_versions[UVD_HWIP][0] = IP_VERSION(2, 5, 0);
2536 		adev->ip_versions[UVD_HWIP][1] = IP_VERSION(2, 5, 0);
2537 		break;
2538 	case CHIP_ALDEBARAN:
2539 		aldebaran_reg_base_init(adev);
2540 		adev->sdma.num_instances = 5;
2541 		adev->vcn.num_vcn_inst = 2;
2542 		adev->gmc.num_umc = 4;
2543 		adev->ip_versions[MMHUB_HWIP][0] = IP_VERSION(9, 4, 2);
2544 		adev->ip_versions[ATHUB_HWIP][0] = IP_VERSION(9, 4, 2);
2545 		adev->ip_versions[OSSSYS_HWIP][0] = IP_VERSION(4, 4, 0);
2546 		adev->ip_versions[HDP_HWIP][0] = IP_VERSION(4, 4, 0);
2547 		adev->ip_versions[SDMA0_HWIP][0] = IP_VERSION(4, 4, 0);
2548 		adev->ip_versions[SDMA0_HWIP][1] = IP_VERSION(4, 4, 0);
2549 		adev->ip_versions[SDMA0_HWIP][2] = IP_VERSION(4, 4, 0);
2550 		adev->ip_versions[SDMA0_HWIP][3] = IP_VERSION(4, 4, 0);
2551 		adev->ip_versions[SDMA0_HWIP][4] = IP_VERSION(4, 4, 0);
2552 		adev->ip_versions[DF_HWIP][0] = IP_VERSION(3, 6, 2);
2553 		adev->ip_versions[NBIO_HWIP][0] = IP_VERSION(7, 4, 4);
2554 		adev->ip_versions[UMC_HWIP][0] = IP_VERSION(6, 7, 0);
2555 		adev->ip_versions[MP0_HWIP][0] = IP_VERSION(13, 0, 2);
2556 		adev->ip_versions[MP1_HWIP][0] = IP_VERSION(13, 0, 2);
2557 		adev->ip_versions[THM_HWIP][0] = IP_VERSION(13, 0, 2);
2558 		adev->ip_versions[SMUIO_HWIP][0] = IP_VERSION(13, 0, 2);
2559 		adev->ip_versions[GC_HWIP][0] = IP_VERSION(9, 4, 2);
2560 		adev->ip_versions[UVD_HWIP][0] = IP_VERSION(2, 6, 0);
2561 		adev->ip_versions[UVD_HWIP][1] = IP_VERSION(2, 6, 0);
2562 		adev->ip_versions[XGMI_HWIP][0] = IP_VERSION(6, 1, 0);
2563 		break;
2564 	default:
2565 		r = amdgpu_discovery_reg_base_init(adev);
2566 		if (r)
2567 			return -EINVAL;
2568 
2569 		amdgpu_discovery_harvest_ip(adev);
2570 		amdgpu_discovery_get_gfx_info(adev);
2571 		amdgpu_discovery_get_mall_info(adev);
2572 		amdgpu_discovery_get_vcn_info(adev);
2573 		break;
2574 	}
2575 
2576 	amdgpu_discovery_init_soc_config(adev);
2577 	amdgpu_discovery_sysfs_init(adev);
2578 
2579 	switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
2580 	case IP_VERSION(9, 0, 1):
2581 	case IP_VERSION(9, 2, 1):
2582 	case IP_VERSION(9, 4, 0):
2583 	case IP_VERSION(9, 4, 1):
2584 	case IP_VERSION(9, 4, 2):
2585 	case IP_VERSION(9, 4, 3):
2586 	case IP_VERSION(9, 4, 4):
2587 		adev->family = AMDGPU_FAMILY_AI;
2588 		break;
2589 	case IP_VERSION(9, 1, 0):
2590 	case IP_VERSION(9, 2, 2):
2591 	case IP_VERSION(9, 3, 0):
2592 		adev->family = AMDGPU_FAMILY_RV;
2593 		break;
2594 	case IP_VERSION(10, 1, 10):
2595 	case IP_VERSION(10, 1, 1):
2596 	case IP_VERSION(10, 1, 2):
2597 	case IP_VERSION(10, 1, 3):
2598 	case IP_VERSION(10, 1, 4):
2599 	case IP_VERSION(10, 3, 0):
2600 	case IP_VERSION(10, 3, 2):
2601 	case IP_VERSION(10, 3, 4):
2602 	case IP_VERSION(10, 3, 5):
2603 		adev->family = AMDGPU_FAMILY_NV;
2604 		break;
2605 	case IP_VERSION(10, 3, 1):
2606 		adev->family = AMDGPU_FAMILY_VGH;
2607 		adev->apu_flags |= AMD_APU_IS_VANGOGH;
2608 		break;
2609 	case IP_VERSION(10, 3, 3):
2610 		adev->family = AMDGPU_FAMILY_YC;
2611 		break;
2612 	case IP_VERSION(10, 3, 6):
2613 		adev->family = AMDGPU_FAMILY_GC_10_3_6;
2614 		break;
2615 	case IP_VERSION(10, 3, 7):
2616 		adev->family = AMDGPU_FAMILY_GC_10_3_7;
2617 		break;
2618 	case IP_VERSION(11, 0, 0):
2619 	case IP_VERSION(11, 0, 2):
2620 	case IP_VERSION(11, 0, 3):
2621 		adev->family = AMDGPU_FAMILY_GC_11_0_0;
2622 		break;
2623 	case IP_VERSION(11, 0, 1):
2624 	case IP_VERSION(11, 0, 4):
2625 		adev->family = AMDGPU_FAMILY_GC_11_0_1;
2626 		break;
2627 	case IP_VERSION(11, 5, 0):
2628 	case IP_VERSION(11, 5, 1):
2629 		adev->family = AMDGPU_FAMILY_GC_11_5_0;
2630 		break;
2631 	case IP_VERSION(12, 0, 0):
2632 	case IP_VERSION(12, 0, 1):
2633 		adev->family = AMDGPU_FAMILY_GC_12_0_0;
2634 		break;
2635 	default:
2636 		return -EINVAL;
2637 	}
2638 
2639 	switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
2640 	case IP_VERSION(9, 1, 0):
2641 	case IP_VERSION(9, 2, 2):
2642 	case IP_VERSION(9, 3, 0):
2643 	case IP_VERSION(10, 1, 3):
2644 	case IP_VERSION(10, 1, 4):
2645 	case IP_VERSION(10, 3, 1):
2646 	case IP_VERSION(10, 3, 3):
2647 	case IP_VERSION(10, 3, 6):
2648 	case IP_VERSION(10, 3, 7):
2649 	case IP_VERSION(11, 0, 1):
2650 	case IP_VERSION(11, 0, 4):
2651 	case IP_VERSION(11, 5, 0):
2652 	case IP_VERSION(11, 5, 1):
2653 		adev->flags |= AMD_IS_APU;
2654 		break;
2655 	default:
2656 		break;
2657 	}
2658 
2659 	if (amdgpu_ip_version(adev, XGMI_HWIP, 0) == IP_VERSION(4, 8, 0))
2660 		adev->gmc.xgmi.supported = true;
2661 
2662 	if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 3) ||
2663 	    amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 4))
2664 		adev->ip_versions[XGMI_HWIP][0] = IP_VERSION(6, 4, 0);
2665 
2666 	/* set NBIO version */
2667 	switch (amdgpu_ip_version(adev, NBIO_HWIP, 0)) {
2668 	case IP_VERSION(6, 1, 0):
2669 	case IP_VERSION(6, 2, 0):
2670 		adev->nbio.funcs = &nbio_v6_1_funcs;
2671 		adev->nbio.hdp_flush_reg = &nbio_v6_1_hdp_flush_reg;
2672 		break;
2673 	case IP_VERSION(7, 0, 0):
2674 	case IP_VERSION(7, 0, 1):
2675 	case IP_VERSION(2, 5, 0):
2676 		adev->nbio.funcs = &nbio_v7_0_funcs;
2677 		adev->nbio.hdp_flush_reg = &nbio_v7_0_hdp_flush_reg;
2678 		break;
2679 	case IP_VERSION(7, 4, 0):
2680 	case IP_VERSION(7, 4, 1):
2681 	case IP_VERSION(7, 4, 4):
2682 		adev->nbio.funcs = &nbio_v7_4_funcs;
2683 		adev->nbio.hdp_flush_reg = &nbio_v7_4_hdp_flush_reg;
2684 		break;
2685 	case IP_VERSION(7, 9, 0):
2686 		adev->nbio.funcs = &nbio_v7_9_funcs;
2687 		adev->nbio.hdp_flush_reg = &nbio_v7_9_hdp_flush_reg;
2688 		break;
2689 	case IP_VERSION(7, 11, 0):
2690 	case IP_VERSION(7, 11, 1):
2691 		adev->nbio.funcs = &nbio_v7_11_funcs;
2692 		adev->nbio.hdp_flush_reg = &nbio_v7_11_hdp_flush_reg;
2693 		break;
2694 	case IP_VERSION(7, 2, 0):
2695 	case IP_VERSION(7, 2, 1):
2696 	case IP_VERSION(7, 3, 0):
2697 	case IP_VERSION(7, 5, 0):
2698 	case IP_VERSION(7, 5, 1):
2699 		adev->nbio.funcs = &nbio_v7_2_funcs;
2700 		adev->nbio.hdp_flush_reg = &nbio_v7_2_hdp_flush_reg;
2701 		break;
2702 	case IP_VERSION(2, 1, 1):
2703 	case IP_VERSION(2, 3, 0):
2704 	case IP_VERSION(2, 3, 1):
2705 	case IP_VERSION(2, 3, 2):
2706 	case IP_VERSION(3, 3, 0):
2707 	case IP_VERSION(3, 3, 1):
2708 	case IP_VERSION(3, 3, 2):
2709 	case IP_VERSION(3, 3, 3):
2710 		adev->nbio.funcs = &nbio_v2_3_funcs;
2711 		adev->nbio.hdp_flush_reg = &nbio_v2_3_hdp_flush_reg;
2712 		break;
2713 	case IP_VERSION(4, 3, 0):
2714 	case IP_VERSION(4, 3, 1):
2715 		if (amdgpu_sriov_vf(adev))
2716 			adev->nbio.funcs = &nbio_v4_3_sriov_funcs;
2717 		else
2718 			adev->nbio.funcs = &nbio_v4_3_funcs;
2719 		adev->nbio.hdp_flush_reg = &nbio_v4_3_hdp_flush_reg;
2720 		break;
2721 	case IP_VERSION(7, 7, 0):
2722 	case IP_VERSION(7, 7, 1):
2723 		adev->nbio.funcs = &nbio_v7_7_funcs;
2724 		adev->nbio.hdp_flush_reg = &nbio_v7_7_hdp_flush_reg;
2725 		break;
2726 	case IP_VERSION(6, 3, 1):
2727 		adev->nbio.funcs = &nbif_v6_3_1_funcs;
2728 		adev->nbio.hdp_flush_reg = &nbif_v6_3_1_hdp_flush_reg;
2729 		break;
2730 	default:
2731 		break;
2732 	}
2733 
2734 	switch (amdgpu_ip_version(adev, HDP_HWIP, 0)) {
2735 	case IP_VERSION(4, 0, 0):
2736 	case IP_VERSION(4, 0, 1):
2737 	case IP_VERSION(4, 1, 0):
2738 	case IP_VERSION(4, 1, 1):
2739 	case IP_VERSION(4, 1, 2):
2740 	case IP_VERSION(4, 2, 0):
2741 	case IP_VERSION(4, 2, 1):
2742 	case IP_VERSION(4, 4, 0):
2743 	case IP_VERSION(4, 4, 2):
2744 	case IP_VERSION(4, 4, 5):
2745 		adev->hdp.funcs = &hdp_v4_0_funcs;
2746 		break;
2747 	case IP_VERSION(5, 0, 0):
2748 	case IP_VERSION(5, 0, 1):
2749 	case IP_VERSION(5, 0, 2):
2750 	case IP_VERSION(5, 0, 3):
2751 	case IP_VERSION(5, 0, 4):
2752 	case IP_VERSION(5, 2, 0):
2753 		adev->hdp.funcs = &hdp_v5_0_funcs;
2754 		break;
2755 	case IP_VERSION(5, 2, 1):
2756 		adev->hdp.funcs = &hdp_v5_2_funcs;
2757 		break;
2758 	case IP_VERSION(6, 0, 0):
2759 	case IP_VERSION(6, 0, 1):
2760 	case IP_VERSION(6, 1, 0):
2761 		adev->hdp.funcs = &hdp_v6_0_funcs;
2762 		break;
2763 	case IP_VERSION(7, 0, 0):
2764 		adev->hdp.funcs = &hdp_v7_0_funcs;
2765 		break;
2766 	default:
2767 		break;
2768 	}
2769 
2770 	switch (amdgpu_ip_version(adev, DF_HWIP, 0)) {
2771 	case IP_VERSION(3, 6, 0):
2772 	case IP_VERSION(3, 6, 1):
2773 	case IP_VERSION(3, 6, 2):
2774 		adev->df.funcs = &df_v3_6_funcs;
2775 		break;
2776 	case IP_VERSION(2, 1, 0):
2777 	case IP_VERSION(2, 1, 1):
2778 	case IP_VERSION(2, 5, 0):
2779 	case IP_VERSION(3, 5, 1):
2780 	case IP_VERSION(3, 5, 2):
2781 		adev->df.funcs = &df_v1_7_funcs;
2782 		break;
2783 	case IP_VERSION(4, 3, 0):
2784 		adev->df.funcs = &df_v4_3_funcs;
2785 		break;
2786 	case IP_VERSION(4, 6, 2):
2787 		adev->df.funcs = &df_v4_6_2_funcs;
2788 		break;
2789 	default:
2790 		break;
2791 	}
2792 
2793 	switch (amdgpu_ip_version(adev, SMUIO_HWIP, 0)) {
2794 	case IP_VERSION(9, 0, 0):
2795 	case IP_VERSION(9, 0, 1):
2796 	case IP_VERSION(10, 0, 0):
2797 	case IP_VERSION(10, 0, 1):
2798 	case IP_VERSION(10, 0, 2):
2799 		adev->smuio.funcs = &smuio_v9_0_funcs;
2800 		break;
2801 	case IP_VERSION(11, 0, 0):
2802 	case IP_VERSION(11, 0, 2):
2803 	case IP_VERSION(11, 0, 3):
2804 	case IP_VERSION(11, 0, 4):
2805 	case IP_VERSION(11, 0, 7):
2806 	case IP_VERSION(11, 0, 8):
2807 		adev->smuio.funcs = &smuio_v11_0_funcs;
2808 		break;
2809 	case IP_VERSION(11, 0, 6):
2810 	case IP_VERSION(11, 0, 10):
2811 	case IP_VERSION(11, 0, 11):
2812 	case IP_VERSION(11, 5, 0):
2813 	case IP_VERSION(13, 0, 1):
2814 	case IP_VERSION(13, 0, 9):
2815 	case IP_VERSION(13, 0, 10):
2816 		adev->smuio.funcs = &smuio_v11_0_6_funcs;
2817 		break;
2818 	case IP_VERSION(13, 0, 2):
2819 		adev->smuio.funcs = &smuio_v13_0_funcs;
2820 		break;
2821 	case IP_VERSION(13, 0, 3):
2822 		adev->smuio.funcs = &smuio_v13_0_3_funcs;
2823 		if (adev->smuio.funcs->get_pkg_type(adev) == AMDGPU_PKG_TYPE_APU) {
2824 			adev->flags |= AMD_IS_APU;
2825 		}
2826 		break;
2827 	case IP_VERSION(13, 0, 6):
2828 	case IP_VERSION(13, 0, 8):
2829 	case IP_VERSION(14, 0, 0):
2830 	case IP_VERSION(14, 0, 1):
2831 		adev->smuio.funcs = &smuio_v13_0_6_funcs;
2832 		break;
2833 	case IP_VERSION(14, 0, 2):
2834 		adev->smuio.funcs = &smuio_v14_0_2_funcs;
2835 		break;
2836 	default:
2837 		break;
2838 	}
2839 
2840 	switch (amdgpu_ip_version(adev, LSDMA_HWIP, 0)) {
2841 	case IP_VERSION(6, 0, 0):
2842 	case IP_VERSION(6, 0, 1):
2843 	case IP_VERSION(6, 0, 2):
2844 	case IP_VERSION(6, 0, 3):
2845 		adev->lsdma.funcs = &lsdma_v6_0_funcs;
2846 		break;
2847 	case IP_VERSION(7, 0, 0):
2848 	case IP_VERSION(7, 0, 1):
2849 		adev->lsdma.funcs = &lsdma_v7_0_funcs;
2850 		break;
2851 	default:
2852 		break;
2853 	}
2854 
2855 	r = amdgpu_discovery_set_common_ip_blocks(adev);
2856 	if (r)
2857 		return r;
2858 
2859 	r = amdgpu_discovery_set_gmc_ip_blocks(adev);
2860 	if (r)
2861 		return r;
2862 
2863 	/* For SR-IOV, PSP needs to be initialized before IH */
2864 	if (amdgpu_sriov_vf(adev)) {
2865 		r = amdgpu_discovery_set_psp_ip_blocks(adev);
2866 		if (r)
2867 			return r;
2868 		r = amdgpu_discovery_set_ih_ip_blocks(adev);
2869 		if (r)
2870 			return r;
2871 	} else {
2872 		r = amdgpu_discovery_set_ih_ip_blocks(adev);
2873 		if (r)
2874 			return r;
2875 
2876 		if (likely(adev->firmware.load_type == AMDGPU_FW_LOAD_PSP)) {
2877 			r = amdgpu_discovery_set_psp_ip_blocks(adev);
2878 			if (r)
2879 				return r;
2880 		}
2881 	}
2882 
2883 	if (likely(adev->firmware.load_type == AMDGPU_FW_LOAD_PSP)) {
2884 		r = amdgpu_discovery_set_smu_ip_blocks(adev);
2885 		if (r)
2886 			return r;
2887 	}
2888 
2889 	r = amdgpu_discovery_set_display_ip_blocks(adev);
2890 	if (r)
2891 		return r;
2892 
2893 	r = amdgpu_discovery_set_gc_ip_blocks(adev);
2894 	if (r)
2895 		return r;
2896 
2897 	r = amdgpu_discovery_set_sdma_ip_blocks(adev);
2898 	if (r)
2899 		return r;
2900 
2901 	if ((adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT &&
2902 	     !amdgpu_sriov_vf(adev)) ||
2903 	    (adev->firmware.load_type == AMDGPU_FW_LOAD_RLC_BACKDOOR_AUTO && amdgpu_dpm == 1)) {
2904 		r = amdgpu_discovery_set_smu_ip_blocks(adev);
2905 		if (r)
2906 			return r;
2907 	}
2908 
2909 	r = amdgpu_discovery_set_mm_ip_blocks(adev);
2910 	if (r)
2911 		return r;
2912 
2913 	r = amdgpu_discovery_set_mes_ip_blocks(adev);
2914 	if (r)
2915 		return r;
2916 
2917 	r = amdgpu_discovery_set_vpe_ip_blocks(adev);
2918 	if (r)
2919 		return r;
2920 
2921 	r = amdgpu_discovery_set_umsch_mm_ip_blocks(adev);
2922 	if (r)
2923 		return r;
2924 
2925 	return 0;
2926 }
2927 
2928