xref: /linux/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c (revision 6d9b262afe0ec1d6e0ef99321ca9d6b921310471)
1 /*
2  * Copyright 2018 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  */
23 
24 #include <linux/firmware.h>
25 
26 #include "amdgpu.h"
27 #include "amdgpu_discovery.h"
28 #include "soc15_hw_ip.h"
29 #include "discovery.h"
30 #include "amdgpu_ras.h"
31 
32 #include "soc15.h"
33 #include "gfx_v9_0.h"
34 #include "gfx_v9_4_3.h"
35 #include "gmc_v9_0.h"
36 #include "df_v1_7.h"
37 #include "df_v3_6.h"
38 #include "df_v4_3.h"
39 #include "df_v4_6_2.h"
40 #include "nbio_v6_1.h"
41 #include "nbio_v7_0.h"
42 #include "nbio_v7_4.h"
43 #include "nbio_v7_9.h"
44 #include "nbio_v7_11.h"
45 #include "hdp_v4_0.h"
46 #include "vega10_ih.h"
47 #include "vega20_ih.h"
48 #include "sdma_v4_0.h"
49 #include "sdma_v4_4_2.h"
50 #include "uvd_v7_0.h"
51 #include "vce_v4_0.h"
52 #include "vcn_v1_0.h"
53 #include "vcn_v2_5.h"
54 #include "jpeg_v2_5.h"
55 #include "smuio_v9_0.h"
56 #include "gmc_v10_0.h"
57 #include "gmc_v11_0.h"
58 #include "gfxhub_v2_0.h"
59 #include "mmhub_v2_0.h"
60 #include "nbio_v2_3.h"
61 #include "nbio_v4_3.h"
62 #include "nbio_v7_2.h"
63 #include "nbio_v7_7.h"
64 #include "nbif_v6_3_1.h"
65 #include "hdp_v5_0.h"
66 #include "hdp_v5_2.h"
67 #include "hdp_v6_0.h"
68 #include "hdp_v7_0.h"
69 #include "nv.h"
70 #include "soc21.h"
71 #include "navi10_ih.h"
72 #include "ih_v6_0.h"
73 #include "ih_v6_1.h"
74 #include "ih_v7_0.h"
75 #include "gfx_v10_0.h"
76 #include "gfx_v11_0.h"
77 #include "sdma_v5_0.h"
78 #include "sdma_v5_2.h"
79 #include "sdma_v6_0.h"
80 #include "lsdma_v6_0.h"
81 #include "lsdma_v7_0.h"
82 #include "vcn_v2_0.h"
83 #include "jpeg_v2_0.h"
84 #include "vcn_v3_0.h"
85 #include "jpeg_v3_0.h"
86 #include "vcn_v4_0.h"
87 #include "jpeg_v4_0.h"
88 #include "vcn_v4_0_3.h"
89 #include "jpeg_v4_0_3.h"
90 #include "vcn_v4_0_5.h"
91 #include "jpeg_v4_0_5.h"
92 #include "amdgpu_vkms.h"
93 #include "mes_v10_1.h"
94 #include "mes_v11_0.h"
95 #include "smuio_v11_0.h"
96 #include "smuio_v11_0_6.h"
97 #include "smuio_v13_0.h"
98 #include "smuio_v13_0_3.h"
99 #include "smuio_v13_0_6.h"
100 #include "vcn_v5_0_0.h"
101 #include "jpeg_v5_0_0.h"
102 
103 #include "amdgpu_vpe.h"
104 
105 #define FIRMWARE_IP_DISCOVERY "amdgpu/ip_discovery.bin"
106 MODULE_FIRMWARE(FIRMWARE_IP_DISCOVERY);
107 
108 #define mmIP_DISCOVERY_VERSION  0x16A00
109 #define mmRCC_CONFIG_MEMSIZE	0xde3
110 #define mmMP0_SMN_C2PMSG_33	0x16061
111 #define mmMM_INDEX		0x0
112 #define mmMM_INDEX_HI		0x6
113 #define mmMM_DATA		0x1
114 
115 static const char *hw_id_names[HW_ID_MAX] = {
116 	[MP1_HWID]		= "MP1",
117 	[MP2_HWID]		= "MP2",
118 	[THM_HWID]		= "THM",
119 	[SMUIO_HWID]		= "SMUIO",
120 	[FUSE_HWID]		= "FUSE",
121 	[CLKA_HWID]		= "CLKA",
122 	[PWR_HWID]		= "PWR",
123 	[GC_HWID]		= "GC",
124 	[UVD_HWID]		= "UVD",
125 	[AUDIO_AZ_HWID]		= "AUDIO_AZ",
126 	[ACP_HWID]		= "ACP",
127 	[DCI_HWID]		= "DCI",
128 	[DMU_HWID]		= "DMU",
129 	[DCO_HWID]		= "DCO",
130 	[DIO_HWID]		= "DIO",
131 	[XDMA_HWID]		= "XDMA",
132 	[DCEAZ_HWID]		= "DCEAZ",
133 	[DAZ_HWID]		= "DAZ",
134 	[SDPMUX_HWID]		= "SDPMUX",
135 	[NTB_HWID]		= "NTB",
136 	[IOHC_HWID]		= "IOHC",
137 	[L2IMU_HWID]		= "L2IMU",
138 	[VCE_HWID]		= "VCE",
139 	[MMHUB_HWID]		= "MMHUB",
140 	[ATHUB_HWID]		= "ATHUB",
141 	[DBGU_NBIO_HWID]	= "DBGU_NBIO",
142 	[DFX_HWID]		= "DFX",
143 	[DBGU0_HWID]		= "DBGU0",
144 	[DBGU1_HWID]		= "DBGU1",
145 	[OSSSYS_HWID]		= "OSSSYS",
146 	[HDP_HWID]		= "HDP",
147 	[SDMA0_HWID]		= "SDMA0",
148 	[SDMA1_HWID]		= "SDMA1",
149 	[SDMA2_HWID]		= "SDMA2",
150 	[SDMA3_HWID]		= "SDMA3",
151 	[LSDMA_HWID]		= "LSDMA",
152 	[ISP_HWID]		= "ISP",
153 	[DBGU_IO_HWID]		= "DBGU_IO",
154 	[DF_HWID]		= "DF",
155 	[CLKB_HWID]		= "CLKB",
156 	[FCH_HWID]		= "FCH",
157 	[DFX_DAP_HWID]		= "DFX_DAP",
158 	[L1IMU_PCIE_HWID]	= "L1IMU_PCIE",
159 	[L1IMU_NBIF_HWID]	= "L1IMU_NBIF",
160 	[L1IMU_IOAGR_HWID]	= "L1IMU_IOAGR",
161 	[L1IMU3_HWID]		= "L1IMU3",
162 	[L1IMU4_HWID]		= "L1IMU4",
163 	[L1IMU5_HWID]		= "L1IMU5",
164 	[L1IMU6_HWID]		= "L1IMU6",
165 	[L1IMU7_HWID]		= "L1IMU7",
166 	[L1IMU8_HWID]		= "L1IMU8",
167 	[L1IMU9_HWID]		= "L1IMU9",
168 	[L1IMU10_HWID]		= "L1IMU10",
169 	[L1IMU11_HWID]		= "L1IMU11",
170 	[L1IMU12_HWID]		= "L1IMU12",
171 	[L1IMU13_HWID]		= "L1IMU13",
172 	[L1IMU14_HWID]		= "L1IMU14",
173 	[L1IMU15_HWID]		= "L1IMU15",
174 	[WAFLC_HWID]		= "WAFLC",
175 	[FCH_USB_PD_HWID]	= "FCH_USB_PD",
176 	[PCIE_HWID]		= "PCIE",
177 	[PCS_HWID]		= "PCS",
178 	[DDCL_HWID]		= "DDCL",
179 	[SST_HWID]		= "SST",
180 	[IOAGR_HWID]		= "IOAGR",
181 	[NBIF_HWID]		= "NBIF",
182 	[IOAPIC_HWID]		= "IOAPIC",
183 	[SYSTEMHUB_HWID]	= "SYSTEMHUB",
184 	[NTBCCP_HWID]		= "NTBCCP",
185 	[UMC_HWID]		= "UMC",
186 	[SATA_HWID]		= "SATA",
187 	[USB_HWID]		= "USB",
188 	[CCXSEC_HWID]		= "CCXSEC",
189 	[XGMI_HWID]		= "XGMI",
190 	[XGBE_HWID]		= "XGBE",
191 	[MP0_HWID]		= "MP0",
192 	[VPE_HWID]		= "VPE",
193 };
194 
195 static int hw_id_map[MAX_HWIP] = {
196 	[GC_HWIP]	= GC_HWID,
197 	[HDP_HWIP]	= HDP_HWID,
198 	[SDMA0_HWIP]	= SDMA0_HWID,
199 	[SDMA1_HWIP]	= SDMA1_HWID,
200 	[SDMA2_HWIP]    = SDMA2_HWID,
201 	[SDMA3_HWIP]    = SDMA3_HWID,
202 	[LSDMA_HWIP]    = LSDMA_HWID,
203 	[MMHUB_HWIP]	= MMHUB_HWID,
204 	[ATHUB_HWIP]	= ATHUB_HWID,
205 	[NBIO_HWIP]	= NBIF_HWID,
206 	[MP0_HWIP]	= MP0_HWID,
207 	[MP1_HWIP]	= MP1_HWID,
208 	[UVD_HWIP]	= UVD_HWID,
209 	[VCE_HWIP]	= VCE_HWID,
210 	[DF_HWIP]	= DF_HWID,
211 	[DCE_HWIP]	= DMU_HWID,
212 	[OSSSYS_HWIP]	= OSSSYS_HWID,
213 	[SMUIO_HWIP]	= SMUIO_HWID,
214 	[PWR_HWIP]	= PWR_HWID,
215 	[NBIF_HWIP]	= NBIF_HWID,
216 	[THM_HWIP]	= THM_HWID,
217 	[CLK_HWIP]	= CLKA_HWID,
218 	[UMC_HWIP]	= UMC_HWID,
219 	[XGMI_HWIP]	= XGMI_HWID,
220 	[DCI_HWIP]	= DCI_HWID,
221 	[PCIE_HWIP]	= PCIE_HWID,
222 	[VPE_HWIP]	= VPE_HWID,
223 };
224 
225 static int amdgpu_discovery_read_binary_from_sysmem(struct amdgpu_device *adev, uint8_t *binary)
226 {
227 	u64 tmr_offset, tmr_size, pos;
228 	void *discv_regn;
229 	int ret;
230 
231 	ret = amdgpu_acpi_get_tmr_info(adev, &tmr_offset, &tmr_size);
232 	if (ret)
233 		return ret;
234 
235 	pos = tmr_offset + tmr_size - DISCOVERY_TMR_OFFSET;
236 
237 	/* This region is read-only and reserved from system use */
238 	discv_regn = memremap(pos, adev->mman.discovery_tmr_size, MEMREMAP_WC);
239 	if (discv_regn) {
240 		memcpy(binary, discv_regn, adev->mman.discovery_tmr_size);
241 		memunmap(discv_regn);
242 		return 0;
243 	}
244 
245 	return -ENOENT;
246 }
247 
248 static int amdgpu_discovery_read_binary_from_mem(struct amdgpu_device *adev,
249 						 uint8_t *binary)
250 {
251 	uint64_t vram_size;
252 	u32 msg;
253 	int i, ret = 0;
254 
255 	/* It can take up to a second for IFWI init to complete on some dGPUs,
256 	 * but generally it should be in the 60-100ms range.  Normally this starts
257 	 * as soon as the device gets power so by the time the OS loads this has long
258 	 * completed.  However, when a card is hotplugged via e.g., USB4, we need to
259 	 * wait for this to complete.  Once the C2PMSG is updated, we can
260 	 * continue.
261 	 */
262 	if (dev_is_removable(&adev->pdev->dev)) {
263 		for (i = 0; i < 1000; i++) {
264 			msg = RREG32(mmMP0_SMN_C2PMSG_33);
265 			if (msg & 0x80000000)
266 				break;
267 			msleep(1);
268 		}
269 	}
270 	vram_size = (uint64_t)RREG32(mmRCC_CONFIG_MEMSIZE) << 20;
271 
272 	if (vram_size) {
273 		uint64_t pos = vram_size - DISCOVERY_TMR_OFFSET;
274 		amdgpu_device_vram_access(adev, pos, (uint32_t *)binary,
275 					  adev->mman.discovery_tmr_size, false);
276 	} else {
277 		ret = amdgpu_discovery_read_binary_from_sysmem(adev, binary);
278 	}
279 
280 	return ret;
281 }
282 
283 static int amdgpu_discovery_read_binary_from_file(struct amdgpu_device *adev, uint8_t *binary)
284 {
285 	const struct firmware *fw;
286 	const char *fw_name;
287 	int r;
288 
289 	switch (amdgpu_discovery) {
290 	case 2:
291 		fw_name = FIRMWARE_IP_DISCOVERY;
292 		break;
293 	default:
294 		dev_warn(adev->dev, "amdgpu_discovery is not set properly\n");
295 		return -EINVAL;
296 	}
297 
298 	r = request_firmware(&fw, fw_name, adev->dev);
299 	if (r) {
300 		dev_err(adev->dev, "can't load firmware \"%s\"\n",
301 			fw_name);
302 		return r;
303 	}
304 
305 	memcpy((u8 *)binary, (u8 *)fw->data, fw->size);
306 	release_firmware(fw);
307 
308 	return 0;
309 }
310 
311 static uint16_t amdgpu_discovery_calculate_checksum(uint8_t *data, uint32_t size)
312 {
313 	uint16_t checksum = 0;
314 	int i;
315 
316 	for (i = 0; i < size; i++)
317 		checksum += data[i];
318 
319 	return checksum;
320 }
321 
322 static inline bool amdgpu_discovery_verify_checksum(uint8_t *data, uint32_t size,
323 						    uint16_t expected)
324 {
325 	return !!(amdgpu_discovery_calculate_checksum(data, size) == expected);
326 }
327 
328 static inline bool amdgpu_discovery_verify_binary_signature(uint8_t *binary)
329 {
330 	struct binary_header *bhdr;
331 	bhdr = (struct binary_header *)binary;
332 
333 	return (le32_to_cpu(bhdr->binary_signature) == BINARY_SIGNATURE);
334 }
335 
336 static void amdgpu_discovery_harvest_config_quirk(struct amdgpu_device *adev)
337 {
338 	/*
339 	 * So far, apply this quirk only on those Navy Flounder boards which
340 	 * have a bad harvest table of VCN config.
341 	 */
342 	if ((amdgpu_ip_version(adev, UVD_HWIP, 1) == IP_VERSION(3, 0, 1)) &&
343 	    (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(10, 3, 2))) {
344 		switch (adev->pdev->revision) {
345 		case 0xC1:
346 		case 0xC2:
347 		case 0xC3:
348 		case 0xC5:
349 		case 0xC7:
350 		case 0xCF:
351 		case 0xDF:
352 			adev->vcn.harvest_config |= AMDGPU_VCN_HARVEST_VCN1;
353 			adev->vcn.inst_mask &= ~AMDGPU_VCN_HARVEST_VCN1;
354 			break;
355 		default:
356 			break;
357 		}
358 	}
359 }
360 
361 static int amdgpu_discovery_init(struct amdgpu_device *adev)
362 {
363 	struct table_info *info;
364 	struct binary_header *bhdr;
365 	uint16_t offset;
366 	uint16_t size;
367 	uint16_t checksum;
368 	int r;
369 
370 	adev->mman.discovery_tmr_size = DISCOVERY_TMR_SIZE;
371 	adev->mman.discovery_bin = kzalloc(adev->mman.discovery_tmr_size, GFP_KERNEL);
372 	if (!adev->mman.discovery_bin)
373 		return -ENOMEM;
374 
375 	/* Read from file if it is the preferred option */
376 	if (amdgpu_discovery == 2) {
377 		dev_info(adev->dev, "use ip discovery information from file");
378 		r = amdgpu_discovery_read_binary_from_file(adev, adev->mman.discovery_bin);
379 
380 		if (r) {
381 			dev_err(adev->dev, "failed to read ip discovery binary from file\n");
382 			r = -EINVAL;
383 			goto out;
384 		}
385 
386 	} else {
387 		r = amdgpu_discovery_read_binary_from_mem(
388 			adev, adev->mman.discovery_bin);
389 		if (r)
390 			goto out;
391 	}
392 
393 	/* check the ip discovery binary signature */
394 	if (!amdgpu_discovery_verify_binary_signature(adev->mman.discovery_bin)) {
395 		dev_err(adev->dev,
396 			"get invalid ip discovery binary signature\n");
397 		r = -EINVAL;
398 		goto out;
399 	}
400 
401 	bhdr = (struct binary_header *)adev->mman.discovery_bin;
402 
403 	offset = offsetof(struct binary_header, binary_checksum) +
404 		sizeof(bhdr->binary_checksum);
405 	size = le16_to_cpu(bhdr->binary_size) - offset;
406 	checksum = le16_to_cpu(bhdr->binary_checksum);
407 
408 	if (!amdgpu_discovery_verify_checksum(adev->mman.discovery_bin + offset,
409 					      size, checksum)) {
410 		dev_err(adev->dev, "invalid ip discovery binary checksum\n");
411 		r = -EINVAL;
412 		goto out;
413 	}
414 
415 	info = &bhdr->table_list[IP_DISCOVERY];
416 	offset = le16_to_cpu(info->offset);
417 	checksum = le16_to_cpu(info->checksum);
418 
419 	if (offset) {
420 		struct ip_discovery_header *ihdr =
421 			(struct ip_discovery_header *)(adev->mman.discovery_bin + offset);
422 		if (le32_to_cpu(ihdr->signature) != DISCOVERY_TABLE_SIGNATURE) {
423 			dev_err(adev->dev, "invalid ip discovery data table signature\n");
424 			r = -EINVAL;
425 			goto out;
426 		}
427 
428 		if (!amdgpu_discovery_verify_checksum(adev->mman.discovery_bin + offset,
429 						      le16_to_cpu(ihdr->size), checksum)) {
430 			dev_err(adev->dev, "invalid ip discovery data table checksum\n");
431 			r = -EINVAL;
432 			goto out;
433 		}
434 	}
435 
436 	info = &bhdr->table_list[GC];
437 	offset = le16_to_cpu(info->offset);
438 	checksum = le16_to_cpu(info->checksum);
439 
440 	if (offset) {
441 		struct gpu_info_header *ghdr =
442 			(struct gpu_info_header *)(adev->mman.discovery_bin + offset);
443 
444 		if (le32_to_cpu(ghdr->table_id) != GC_TABLE_ID) {
445 			dev_err(adev->dev, "invalid ip discovery gc table id\n");
446 			r = -EINVAL;
447 			goto out;
448 		}
449 
450 		if (!amdgpu_discovery_verify_checksum(adev->mman.discovery_bin + offset,
451 						      le32_to_cpu(ghdr->size), checksum)) {
452 			dev_err(adev->dev, "invalid gc data table checksum\n");
453 			r = -EINVAL;
454 			goto out;
455 		}
456 	}
457 
458 	info = &bhdr->table_list[HARVEST_INFO];
459 	offset = le16_to_cpu(info->offset);
460 	checksum = le16_to_cpu(info->checksum);
461 
462 	if (offset) {
463 		struct harvest_info_header *hhdr =
464 			(struct harvest_info_header *)(adev->mman.discovery_bin + offset);
465 
466 		if (le32_to_cpu(hhdr->signature) != HARVEST_TABLE_SIGNATURE) {
467 			dev_err(adev->dev, "invalid ip discovery harvest table signature\n");
468 			r = -EINVAL;
469 			goto out;
470 		}
471 
472 		if (!amdgpu_discovery_verify_checksum(adev->mman.discovery_bin + offset,
473 						      sizeof(struct harvest_table), checksum)) {
474 			dev_err(adev->dev, "invalid harvest data table checksum\n");
475 			r = -EINVAL;
476 			goto out;
477 		}
478 	}
479 
480 	info = &bhdr->table_list[VCN_INFO];
481 	offset = le16_to_cpu(info->offset);
482 	checksum = le16_to_cpu(info->checksum);
483 
484 	if (offset) {
485 		struct vcn_info_header *vhdr =
486 			(struct vcn_info_header *)(adev->mman.discovery_bin + offset);
487 
488 		if (le32_to_cpu(vhdr->table_id) != VCN_INFO_TABLE_ID) {
489 			dev_err(adev->dev, "invalid ip discovery vcn table id\n");
490 			r = -EINVAL;
491 			goto out;
492 		}
493 
494 		if (!amdgpu_discovery_verify_checksum(adev->mman.discovery_bin + offset,
495 						      le32_to_cpu(vhdr->size_bytes), checksum)) {
496 			dev_err(adev->dev, "invalid vcn data table checksum\n");
497 			r = -EINVAL;
498 			goto out;
499 		}
500 	}
501 
502 	info = &bhdr->table_list[MALL_INFO];
503 	offset = le16_to_cpu(info->offset);
504 	checksum = le16_to_cpu(info->checksum);
505 
506 	if (0 && offset) {
507 		struct mall_info_header *mhdr =
508 			(struct mall_info_header *)(adev->mman.discovery_bin + offset);
509 
510 		if (le32_to_cpu(mhdr->table_id) != MALL_INFO_TABLE_ID) {
511 			dev_err(adev->dev, "invalid ip discovery mall table id\n");
512 			r = -EINVAL;
513 			goto out;
514 		}
515 
516 		if (!amdgpu_discovery_verify_checksum(adev->mman.discovery_bin + offset,
517 						      le32_to_cpu(mhdr->size_bytes), checksum)) {
518 			dev_err(adev->dev, "invalid mall data table checksum\n");
519 			r = -EINVAL;
520 			goto out;
521 		}
522 	}
523 
524 	return 0;
525 
526 out:
527 	kfree(adev->mman.discovery_bin);
528 	adev->mman.discovery_bin = NULL;
529 	if ((amdgpu_discovery != 2) &&
530 	    (RREG32(mmIP_DISCOVERY_VERSION) == 4))
531 		amdgpu_ras_query_boot_status(adev, 4);
532 	return r;
533 }
534 
535 static void amdgpu_discovery_sysfs_fini(struct amdgpu_device *adev);
536 
537 void amdgpu_discovery_fini(struct amdgpu_device *adev)
538 {
539 	amdgpu_discovery_sysfs_fini(adev);
540 	kfree(adev->mman.discovery_bin);
541 	adev->mman.discovery_bin = NULL;
542 }
543 
544 static int amdgpu_discovery_validate_ip(const struct ip_v4 *ip)
545 {
546 	if (ip->instance_number >= HWIP_MAX_INSTANCE) {
547 		DRM_ERROR("Unexpected instance_number (%d) from ip discovery blob\n",
548 			  ip->instance_number);
549 		return -EINVAL;
550 	}
551 	if (le16_to_cpu(ip->hw_id) >= HW_ID_MAX) {
552 		DRM_ERROR("Unexpected hw_id (%d) from ip discovery blob\n",
553 			  le16_to_cpu(ip->hw_id));
554 		return -EINVAL;
555 	}
556 
557 	return 0;
558 }
559 
560 static void amdgpu_discovery_read_harvest_bit_per_ip(struct amdgpu_device *adev,
561 						uint32_t *vcn_harvest_count)
562 {
563 	struct binary_header *bhdr;
564 	struct ip_discovery_header *ihdr;
565 	struct die_header *dhdr;
566 	struct ip_v4 *ip;
567 	uint16_t die_offset, ip_offset, num_dies, num_ips;
568 	int i, j;
569 
570 	bhdr = (struct binary_header *)adev->mman.discovery_bin;
571 	ihdr = (struct ip_discovery_header *)(adev->mman.discovery_bin +
572 			le16_to_cpu(bhdr->table_list[IP_DISCOVERY].offset));
573 	num_dies = le16_to_cpu(ihdr->num_dies);
574 
575 	/* scan harvest bit of all IP data structures */
576 	for (i = 0; i < num_dies; i++) {
577 		die_offset = le16_to_cpu(ihdr->die_info[i].die_offset);
578 		dhdr = (struct die_header *)(adev->mman.discovery_bin + die_offset);
579 		num_ips = le16_to_cpu(dhdr->num_ips);
580 		ip_offset = die_offset + sizeof(*dhdr);
581 
582 		for (j = 0; j < num_ips; j++) {
583 			ip = (struct ip_v4 *)(adev->mman.discovery_bin + ip_offset);
584 
585 			if (amdgpu_discovery_validate_ip(ip))
586 				goto next_ip;
587 
588 			if (le16_to_cpu(ip->variant) == 1) {
589 				switch (le16_to_cpu(ip->hw_id)) {
590 				case VCN_HWID:
591 					(*vcn_harvest_count)++;
592 					if (ip->instance_number == 0) {
593 						adev->vcn.harvest_config |= AMDGPU_VCN_HARVEST_VCN0;
594 						adev->vcn.inst_mask &=
595 							~AMDGPU_VCN_HARVEST_VCN0;
596 						adev->jpeg.inst_mask &=
597 							~AMDGPU_VCN_HARVEST_VCN0;
598 					} else {
599 						adev->vcn.harvest_config |= AMDGPU_VCN_HARVEST_VCN1;
600 						adev->vcn.inst_mask &=
601 							~AMDGPU_VCN_HARVEST_VCN1;
602 						adev->jpeg.inst_mask &=
603 							~AMDGPU_VCN_HARVEST_VCN1;
604 					}
605 					break;
606 				case DMU_HWID:
607 					adev->harvest_ip_mask |= AMD_HARVEST_IP_DMU_MASK;
608 					break;
609 				default:
610 					break;
611 				}
612 			}
613 next_ip:
614 			if (ihdr->base_addr_64_bit)
615 				ip_offset += struct_size(ip, base_address_64, ip->num_base_address);
616 			else
617 				ip_offset += struct_size(ip, base_address, ip->num_base_address);
618 		}
619 	}
620 }
621 
622 static void amdgpu_discovery_read_from_harvest_table(struct amdgpu_device *adev,
623 						     uint32_t *vcn_harvest_count,
624 						     uint32_t *umc_harvest_count)
625 {
626 	struct binary_header *bhdr;
627 	struct harvest_table *harvest_info;
628 	u16 offset;
629 	int i;
630 	uint32_t umc_harvest_config = 0;
631 
632 	bhdr = (struct binary_header *)adev->mman.discovery_bin;
633 	offset = le16_to_cpu(bhdr->table_list[HARVEST_INFO].offset);
634 
635 	if (!offset) {
636 		dev_err(adev->dev, "invalid harvest table offset\n");
637 		return;
638 	}
639 
640 	harvest_info = (struct harvest_table *)(adev->mman.discovery_bin + offset);
641 
642 	for (i = 0; i < 32; i++) {
643 		if (le16_to_cpu(harvest_info->list[i].hw_id) == 0)
644 			break;
645 
646 		switch (le16_to_cpu(harvest_info->list[i].hw_id)) {
647 		case VCN_HWID:
648 			(*vcn_harvest_count)++;
649 			adev->vcn.harvest_config |=
650 				(1 << harvest_info->list[i].number_instance);
651 			adev->jpeg.harvest_config |=
652 				(1 << harvest_info->list[i].number_instance);
653 
654 			adev->vcn.inst_mask &=
655 				~(1U << harvest_info->list[i].number_instance);
656 			adev->jpeg.inst_mask &=
657 				~(1U << harvest_info->list[i].number_instance);
658 			break;
659 		case DMU_HWID:
660 			adev->harvest_ip_mask |= AMD_HARVEST_IP_DMU_MASK;
661 			break;
662 		case UMC_HWID:
663 			umc_harvest_config |=
664 				1 << (le16_to_cpu(harvest_info->list[i].number_instance));
665 			(*umc_harvest_count)++;
666 			break;
667 		case GC_HWID:
668 			adev->gfx.xcc_mask &=
669 				~(1U << harvest_info->list[i].number_instance);
670 			break;
671 		case SDMA0_HWID:
672 			adev->sdma.sdma_mask &=
673 				~(1U << harvest_info->list[i].number_instance);
674 			break;
675 		default:
676 			break;
677 		}
678 	}
679 
680 	adev->umc.active_mask = ((1 << adev->umc.node_inst_num) - 1) &
681 				~umc_harvest_config;
682 }
683 
684 /* ================================================== */
685 
686 struct ip_hw_instance {
687 	struct kobject kobj; /* ip_discovery/die/#die/#hw_id/#instance/<attrs...> */
688 
689 	int hw_id;
690 	u8  num_instance;
691 	u8  major, minor, revision;
692 	u8  harvest;
693 
694 	int num_base_addresses;
695 	u32 base_addr[] __counted_by(num_base_addresses);
696 };
697 
698 struct ip_hw_id {
699 	struct kset hw_id_kset;  /* ip_discovery/die/#die/#hw_id/, contains ip_hw_instance */
700 	int hw_id;
701 };
702 
703 struct ip_die_entry {
704 	struct kset ip_kset;     /* ip_discovery/die/#die/, contains ip_hw_id  */
705 	u16 num_ips;
706 };
707 
708 /* -------------------------------------------------- */
709 
710 struct ip_hw_instance_attr {
711 	struct attribute attr;
712 	ssize_t (*show)(struct ip_hw_instance *ip_hw_instance, char *buf);
713 };
714 
715 static ssize_t hw_id_show(struct ip_hw_instance *ip_hw_instance, char *buf)
716 {
717 	return sysfs_emit(buf, "%d\n", ip_hw_instance->hw_id);
718 }
719 
720 static ssize_t num_instance_show(struct ip_hw_instance *ip_hw_instance, char *buf)
721 {
722 	return sysfs_emit(buf, "%d\n", ip_hw_instance->num_instance);
723 }
724 
725 static ssize_t major_show(struct ip_hw_instance *ip_hw_instance, char *buf)
726 {
727 	return sysfs_emit(buf, "%d\n", ip_hw_instance->major);
728 }
729 
730 static ssize_t minor_show(struct ip_hw_instance *ip_hw_instance, char *buf)
731 {
732 	return sysfs_emit(buf, "%d\n", ip_hw_instance->minor);
733 }
734 
735 static ssize_t revision_show(struct ip_hw_instance *ip_hw_instance, char *buf)
736 {
737 	return sysfs_emit(buf, "%d\n", ip_hw_instance->revision);
738 }
739 
740 static ssize_t harvest_show(struct ip_hw_instance *ip_hw_instance, char *buf)
741 {
742 	return sysfs_emit(buf, "0x%01X\n", ip_hw_instance->harvest);
743 }
744 
745 static ssize_t num_base_addresses_show(struct ip_hw_instance *ip_hw_instance, char *buf)
746 {
747 	return sysfs_emit(buf, "%d\n", ip_hw_instance->num_base_addresses);
748 }
749 
750 static ssize_t base_addr_show(struct ip_hw_instance *ip_hw_instance, char *buf)
751 {
752 	ssize_t res, at;
753 	int ii;
754 
755 	for (res = at = ii = 0; ii < ip_hw_instance->num_base_addresses; ii++) {
756 		/* Here we satisfy the condition that, at + size <= PAGE_SIZE.
757 		 */
758 		if (at + 12 > PAGE_SIZE)
759 			break;
760 		res = sysfs_emit_at(buf, at, "0x%08X\n",
761 				    ip_hw_instance->base_addr[ii]);
762 		if (res <= 0)
763 			break;
764 		at += res;
765 	}
766 
767 	return res < 0 ? res : at;
768 }
769 
770 static struct ip_hw_instance_attr ip_hw_attr[] = {
771 	__ATTR_RO(hw_id),
772 	__ATTR_RO(num_instance),
773 	__ATTR_RO(major),
774 	__ATTR_RO(minor),
775 	__ATTR_RO(revision),
776 	__ATTR_RO(harvest),
777 	__ATTR_RO(num_base_addresses),
778 	__ATTR_RO(base_addr),
779 };
780 
781 static struct attribute *ip_hw_instance_attrs[ARRAY_SIZE(ip_hw_attr) + 1];
782 ATTRIBUTE_GROUPS(ip_hw_instance);
783 
784 #define to_ip_hw_instance(x) container_of(x, struct ip_hw_instance, kobj)
785 #define to_ip_hw_instance_attr(x) container_of(x, struct ip_hw_instance_attr, attr)
786 
787 static ssize_t ip_hw_instance_attr_show(struct kobject *kobj,
788 					struct attribute *attr,
789 					char *buf)
790 {
791 	struct ip_hw_instance *ip_hw_instance = to_ip_hw_instance(kobj);
792 	struct ip_hw_instance_attr *ip_hw_attr = to_ip_hw_instance_attr(attr);
793 
794 	if (!ip_hw_attr->show)
795 		return -EIO;
796 
797 	return ip_hw_attr->show(ip_hw_instance, buf);
798 }
799 
800 static const struct sysfs_ops ip_hw_instance_sysfs_ops = {
801 	.show = ip_hw_instance_attr_show,
802 };
803 
804 static void ip_hw_instance_release(struct kobject *kobj)
805 {
806 	struct ip_hw_instance *ip_hw_instance = to_ip_hw_instance(kobj);
807 
808 	kfree(ip_hw_instance);
809 }
810 
811 static const struct kobj_type ip_hw_instance_ktype = {
812 	.release = ip_hw_instance_release,
813 	.sysfs_ops = &ip_hw_instance_sysfs_ops,
814 	.default_groups = ip_hw_instance_groups,
815 };
816 
817 /* -------------------------------------------------- */
818 
819 #define to_ip_hw_id(x)  container_of(to_kset(x), struct ip_hw_id, hw_id_kset)
820 
821 static void ip_hw_id_release(struct kobject *kobj)
822 {
823 	struct ip_hw_id *ip_hw_id = to_ip_hw_id(kobj);
824 
825 	if (!list_empty(&ip_hw_id->hw_id_kset.list))
826 		DRM_ERROR("ip_hw_id->hw_id_kset is not empty");
827 	kfree(ip_hw_id);
828 }
829 
830 static const struct kobj_type ip_hw_id_ktype = {
831 	.release = ip_hw_id_release,
832 	.sysfs_ops = &kobj_sysfs_ops,
833 };
834 
835 /* -------------------------------------------------- */
836 
837 static void die_kobj_release(struct kobject *kobj);
838 static void ip_disc_release(struct kobject *kobj);
839 
840 struct ip_die_entry_attribute {
841 	struct attribute attr;
842 	ssize_t (*show)(struct ip_die_entry *ip_die_entry, char *buf);
843 };
844 
845 #define to_ip_die_entry_attr(x)  container_of(x, struct ip_die_entry_attribute, attr)
846 
847 static ssize_t num_ips_show(struct ip_die_entry *ip_die_entry, char *buf)
848 {
849 	return sysfs_emit(buf, "%d\n", ip_die_entry->num_ips);
850 }
851 
852 /* If there are more ip_die_entry attrs, other than the number of IPs,
853  * we can make this intro an array of attrs, and then initialize
854  * ip_die_entry_attrs in a loop.
855  */
856 static struct ip_die_entry_attribute num_ips_attr =
857 	__ATTR_RO(num_ips);
858 
859 static struct attribute *ip_die_entry_attrs[] = {
860 	&num_ips_attr.attr,
861 	NULL,
862 };
863 ATTRIBUTE_GROUPS(ip_die_entry); /* ip_die_entry_groups */
864 
865 #define to_ip_die_entry(x) container_of(to_kset(x), struct ip_die_entry, ip_kset)
866 
867 static ssize_t ip_die_entry_attr_show(struct kobject *kobj,
868 				      struct attribute *attr,
869 				      char *buf)
870 {
871 	struct ip_die_entry_attribute *ip_die_entry_attr = to_ip_die_entry_attr(attr);
872 	struct ip_die_entry *ip_die_entry = to_ip_die_entry(kobj);
873 
874 	if (!ip_die_entry_attr->show)
875 		return -EIO;
876 
877 	return ip_die_entry_attr->show(ip_die_entry, buf);
878 }
879 
880 static void ip_die_entry_release(struct kobject *kobj)
881 {
882 	struct ip_die_entry *ip_die_entry = to_ip_die_entry(kobj);
883 
884 	if (!list_empty(&ip_die_entry->ip_kset.list))
885 		DRM_ERROR("ip_die_entry->ip_kset is not empty");
886 	kfree(ip_die_entry);
887 }
888 
889 static const struct sysfs_ops ip_die_entry_sysfs_ops = {
890 	.show = ip_die_entry_attr_show,
891 };
892 
893 static const struct kobj_type ip_die_entry_ktype = {
894 	.release = ip_die_entry_release,
895 	.sysfs_ops = &ip_die_entry_sysfs_ops,
896 	.default_groups = ip_die_entry_groups,
897 };
898 
899 static const struct kobj_type die_kobj_ktype = {
900 	.release = die_kobj_release,
901 	.sysfs_ops = &kobj_sysfs_ops,
902 };
903 
904 static const struct kobj_type ip_discovery_ktype = {
905 	.release = ip_disc_release,
906 	.sysfs_ops = &kobj_sysfs_ops,
907 };
908 
909 struct ip_discovery_top {
910 	struct kobject kobj;    /* ip_discovery/ */
911 	struct kset die_kset;   /* ip_discovery/die/, contains ip_die_entry */
912 	struct amdgpu_device *adev;
913 };
914 
915 static void die_kobj_release(struct kobject *kobj)
916 {
917 	struct ip_discovery_top *ip_top = container_of(to_kset(kobj),
918 						       struct ip_discovery_top,
919 						       die_kset);
920 	if (!list_empty(&ip_top->die_kset.list))
921 		DRM_ERROR("ip_top->die_kset is not empty");
922 }
923 
924 static void ip_disc_release(struct kobject *kobj)
925 {
926 	struct ip_discovery_top *ip_top = container_of(kobj, struct ip_discovery_top,
927 						       kobj);
928 	struct amdgpu_device *adev = ip_top->adev;
929 
930 	adev->ip_top = NULL;
931 	kfree(ip_top);
932 }
933 
934 static uint8_t amdgpu_discovery_get_harvest_info(struct amdgpu_device *adev,
935 						 uint16_t hw_id, uint8_t inst)
936 {
937 	uint8_t harvest = 0;
938 
939 	/* Until a uniform way is figured, get mask based on hwid */
940 	switch (hw_id) {
941 	case VCN_HWID:
942 		harvest = ((1 << inst) & adev->vcn.inst_mask) == 0;
943 		break;
944 	case DMU_HWID:
945 		if (adev->harvest_ip_mask & AMD_HARVEST_IP_DMU_MASK)
946 			harvest = 0x1;
947 		break;
948 	case UMC_HWID:
949 		/* TODO: It needs another parsing; for now, ignore.*/
950 		break;
951 	case GC_HWID:
952 		harvest = ((1 << inst) & adev->gfx.xcc_mask) == 0;
953 		break;
954 	case SDMA0_HWID:
955 		harvest = ((1 << inst) & adev->sdma.sdma_mask) == 0;
956 		break;
957 	default:
958 		break;
959 	}
960 
961 	return harvest;
962 }
963 
964 static int amdgpu_discovery_sysfs_ips(struct amdgpu_device *adev,
965 				      struct ip_die_entry *ip_die_entry,
966 				      const size_t _ip_offset, const int num_ips,
967 				      bool reg_base_64)
968 {
969 	int ii, jj, kk, res;
970 
971 	DRM_DEBUG("num_ips:%d", num_ips);
972 
973 	/* Find all IPs of a given HW ID, and add their instance to
974 	 * #die/#hw_id/#instance/<attributes>
975 	 */
976 	for (ii = 0; ii < HW_ID_MAX; ii++) {
977 		struct ip_hw_id *ip_hw_id = NULL;
978 		size_t ip_offset = _ip_offset;
979 
980 		for (jj = 0; jj < num_ips; jj++) {
981 			struct ip_v4 *ip;
982 			struct ip_hw_instance *ip_hw_instance;
983 
984 			ip = (struct ip_v4 *)(adev->mman.discovery_bin + ip_offset);
985 			if (amdgpu_discovery_validate_ip(ip) ||
986 			    le16_to_cpu(ip->hw_id) != ii)
987 				goto next_ip;
988 
989 			DRM_DEBUG("match:%d @ ip_offset:%zu", ii, ip_offset);
990 
991 			/* We have a hw_id match; register the hw
992 			 * block if not yet registered.
993 			 */
994 			if (!ip_hw_id) {
995 				ip_hw_id = kzalloc(sizeof(*ip_hw_id), GFP_KERNEL);
996 				if (!ip_hw_id)
997 					return -ENOMEM;
998 				ip_hw_id->hw_id = ii;
999 
1000 				kobject_set_name(&ip_hw_id->hw_id_kset.kobj, "%d", ii);
1001 				ip_hw_id->hw_id_kset.kobj.kset = &ip_die_entry->ip_kset;
1002 				ip_hw_id->hw_id_kset.kobj.ktype = &ip_hw_id_ktype;
1003 				res = kset_register(&ip_hw_id->hw_id_kset);
1004 				if (res) {
1005 					DRM_ERROR("Couldn't register ip_hw_id kset");
1006 					kfree(ip_hw_id);
1007 					return res;
1008 				}
1009 				if (hw_id_names[ii]) {
1010 					res = sysfs_create_link(&ip_die_entry->ip_kset.kobj,
1011 								&ip_hw_id->hw_id_kset.kobj,
1012 								hw_id_names[ii]);
1013 					if (res) {
1014 						DRM_ERROR("Couldn't create IP link %s in IP Die:%s\n",
1015 							  hw_id_names[ii],
1016 							  kobject_name(&ip_die_entry->ip_kset.kobj));
1017 					}
1018 				}
1019 			}
1020 
1021 			/* Now register its instance.
1022 			 */
1023 			ip_hw_instance = kzalloc(struct_size(ip_hw_instance,
1024 							     base_addr,
1025 							     ip->num_base_address),
1026 						 GFP_KERNEL);
1027 			if (!ip_hw_instance) {
1028 				DRM_ERROR("no memory for ip_hw_instance");
1029 				return -ENOMEM;
1030 			}
1031 			ip_hw_instance->hw_id = le16_to_cpu(ip->hw_id); /* == ii */
1032 			ip_hw_instance->num_instance = ip->instance_number;
1033 			ip_hw_instance->major = ip->major;
1034 			ip_hw_instance->minor = ip->minor;
1035 			ip_hw_instance->revision = ip->revision;
1036 			ip_hw_instance->harvest =
1037 				amdgpu_discovery_get_harvest_info(
1038 					adev, ip_hw_instance->hw_id,
1039 					ip_hw_instance->num_instance);
1040 			ip_hw_instance->num_base_addresses = ip->num_base_address;
1041 
1042 			for (kk = 0; kk < ip_hw_instance->num_base_addresses; kk++) {
1043 				if (reg_base_64)
1044 					ip_hw_instance->base_addr[kk] =
1045 						lower_32_bits(le64_to_cpu(ip->base_address_64[kk])) & 0x3FFFFFFF;
1046 				else
1047 					ip_hw_instance->base_addr[kk] = ip->base_address[kk];
1048 			}
1049 
1050 			kobject_init(&ip_hw_instance->kobj, &ip_hw_instance_ktype);
1051 			ip_hw_instance->kobj.kset = &ip_hw_id->hw_id_kset;
1052 			res = kobject_add(&ip_hw_instance->kobj, NULL,
1053 					  "%d", ip_hw_instance->num_instance);
1054 next_ip:
1055 			if (reg_base_64)
1056 				ip_offset += struct_size(ip, base_address_64,
1057 							 ip->num_base_address);
1058 			else
1059 				ip_offset += struct_size(ip, base_address,
1060 							 ip->num_base_address);
1061 		}
1062 	}
1063 
1064 	return 0;
1065 }
1066 
1067 static int amdgpu_discovery_sysfs_recurse(struct amdgpu_device *adev)
1068 {
1069 	struct binary_header *bhdr;
1070 	struct ip_discovery_header *ihdr;
1071 	struct die_header *dhdr;
1072 	struct kset *die_kset = &adev->ip_top->die_kset;
1073 	u16 num_dies, die_offset, num_ips;
1074 	size_t ip_offset;
1075 	int ii, res;
1076 
1077 	bhdr = (struct binary_header *)adev->mman.discovery_bin;
1078 	ihdr = (struct ip_discovery_header *)(adev->mman.discovery_bin +
1079 					      le16_to_cpu(bhdr->table_list[IP_DISCOVERY].offset));
1080 	num_dies = le16_to_cpu(ihdr->num_dies);
1081 
1082 	DRM_DEBUG("number of dies: %d\n", num_dies);
1083 
1084 	for (ii = 0; ii < num_dies; ii++) {
1085 		struct ip_die_entry *ip_die_entry;
1086 
1087 		die_offset = le16_to_cpu(ihdr->die_info[ii].die_offset);
1088 		dhdr = (struct die_header *)(adev->mman.discovery_bin + die_offset);
1089 		num_ips = le16_to_cpu(dhdr->num_ips);
1090 		ip_offset = die_offset + sizeof(*dhdr);
1091 
1092 		/* Add the die to the kset.
1093 		 *
1094 		 * dhdr->die_id == ii, which was checked in
1095 		 * amdgpu_discovery_reg_base_init().
1096 		 */
1097 
1098 		ip_die_entry = kzalloc(sizeof(*ip_die_entry), GFP_KERNEL);
1099 		if (!ip_die_entry)
1100 			return -ENOMEM;
1101 
1102 		ip_die_entry->num_ips = num_ips;
1103 
1104 		kobject_set_name(&ip_die_entry->ip_kset.kobj, "%d", le16_to_cpu(dhdr->die_id));
1105 		ip_die_entry->ip_kset.kobj.kset = die_kset;
1106 		ip_die_entry->ip_kset.kobj.ktype = &ip_die_entry_ktype;
1107 		res = kset_register(&ip_die_entry->ip_kset);
1108 		if (res) {
1109 			DRM_ERROR("Couldn't register ip_die_entry kset");
1110 			kfree(ip_die_entry);
1111 			return res;
1112 		}
1113 
1114 		amdgpu_discovery_sysfs_ips(adev, ip_die_entry, ip_offset, num_ips, !!ihdr->base_addr_64_bit);
1115 	}
1116 
1117 	return 0;
1118 }
1119 
1120 static int amdgpu_discovery_sysfs_init(struct amdgpu_device *adev)
1121 {
1122 	struct kset *die_kset;
1123 	int res, ii;
1124 
1125 	if (!adev->mman.discovery_bin)
1126 		return -EINVAL;
1127 
1128 	adev->ip_top = kzalloc(sizeof(*adev->ip_top), GFP_KERNEL);
1129 	if (!adev->ip_top)
1130 		return -ENOMEM;
1131 
1132 	adev->ip_top->adev = adev;
1133 
1134 	res = kobject_init_and_add(&adev->ip_top->kobj, &ip_discovery_ktype,
1135 				   &adev->dev->kobj, "ip_discovery");
1136 	if (res) {
1137 		DRM_ERROR("Couldn't init and add ip_discovery/");
1138 		goto Err;
1139 	}
1140 
1141 	die_kset = &adev->ip_top->die_kset;
1142 	kobject_set_name(&die_kset->kobj, "%s", "die");
1143 	die_kset->kobj.parent = &adev->ip_top->kobj;
1144 	die_kset->kobj.ktype = &die_kobj_ktype;
1145 	res = kset_register(&adev->ip_top->die_kset);
1146 	if (res) {
1147 		DRM_ERROR("Couldn't register die_kset");
1148 		goto Err;
1149 	}
1150 
1151 	for (ii = 0; ii < ARRAY_SIZE(ip_hw_attr); ii++)
1152 		ip_hw_instance_attrs[ii] = &ip_hw_attr[ii].attr;
1153 	ip_hw_instance_attrs[ii] = NULL;
1154 
1155 	res = amdgpu_discovery_sysfs_recurse(adev);
1156 
1157 	return res;
1158 Err:
1159 	kobject_put(&adev->ip_top->kobj);
1160 	return res;
1161 }
1162 
1163 /* -------------------------------------------------- */
1164 
1165 #define list_to_kobj(el) container_of(el, struct kobject, entry)
1166 
1167 static void amdgpu_discovery_sysfs_ip_hw_free(struct ip_hw_id *ip_hw_id)
1168 {
1169 	struct list_head *el, *tmp;
1170 	struct kset *hw_id_kset;
1171 
1172 	hw_id_kset = &ip_hw_id->hw_id_kset;
1173 	spin_lock(&hw_id_kset->list_lock);
1174 	list_for_each_prev_safe(el, tmp, &hw_id_kset->list) {
1175 		list_del_init(el);
1176 		spin_unlock(&hw_id_kset->list_lock);
1177 		/* kobject is embedded in ip_hw_instance */
1178 		kobject_put(list_to_kobj(el));
1179 		spin_lock(&hw_id_kset->list_lock);
1180 	}
1181 	spin_unlock(&hw_id_kset->list_lock);
1182 	kobject_put(&ip_hw_id->hw_id_kset.kobj);
1183 }
1184 
1185 static void amdgpu_discovery_sysfs_die_free(struct ip_die_entry *ip_die_entry)
1186 {
1187 	struct list_head *el, *tmp;
1188 	struct kset *ip_kset;
1189 
1190 	ip_kset = &ip_die_entry->ip_kset;
1191 	spin_lock(&ip_kset->list_lock);
1192 	list_for_each_prev_safe(el, tmp, &ip_kset->list) {
1193 		list_del_init(el);
1194 		spin_unlock(&ip_kset->list_lock);
1195 		amdgpu_discovery_sysfs_ip_hw_free(to_ip_hw_id(list_to_kobj(el)));
1196 		spin_lock(&ip_kset->list_lock);
1197 	}
1198 	spin_unlock(&ip_kset->list_lock);
1199 	kobject_put(&ip_die_entry->ip_kset.kobj);
1200 }
1201 
1202 static void amdgpu_discovery_sysfs_fini(struct amdgpu_device *adev)
1203 {
1204 	struct list_head *el, *tmp;
1205 	struct kset *die_kset;
1206 
1207 	die_kset = &adev->ip_top->die_kset;
1208 	spin_lock(&die_kset->list_lock);
1209 	list_for_each_prev_safe(el, tmp, &die_kset->list) {
1210 		list_del_init(el);
1211 		spin_unlock(&die_kset->list_lock);
1212 		amdgpu_discovery_sysfs_die_free(to_ip_die_entry(list_to_kobj(el)));
1213 		spin_lock(&die_kset->list_lock);
1214 	}
1215 	spin_unlock(&die_kset->list_lock);
1216 	kobject_put(&adev->ip_top->die_kset.kobj);
1217 	kobject_put(&adev->ip_top->kobj);
1218 }
1219 
1220 /* ================================================== */
1221 
1222 static int amdgpu_discovery_reg_base_init(struct amdgpu_device *adev)
1223 {
1224 	uint8_t num_base_address, subrev, variant;
1225 	struct binary_header *bhdr;
1226 	struct ip_discovery_header *ihdr;
1227 	struct die_header *dhdr;
1228 	struct ip_v4 *ip;
1229 	uint16_t die_offset;
1230 	uint16_t ip_offset;
1231 	uint16_t num_dies;
1232 	uint16_t num_ips;
1233 	int hw_ip;
1234 	int i, j, k;
1235 	int r;
1236 
1237 	r = amdgpu_discovery_init(adev);
1238 	if (r) {
1239 		DRM_ERROR("amdgpu_discovery_init failed\n");
1240 		return r;
1241 	}
1242 
1243 	adev->gfx.xcc_mask = 0;
1244 	adev->sdma.sdma_mask = 0;
1245 	adev->vcn.inst_mask = 0;
1246 	adev->jpeg.inst_mask = 0;
1247 	bhdr = (struct binary_header *)adev->mman.discovery_bin;
1248 	ihdr = (struct ip_discovery_header *)(adev->mman.discovery_bin +
1249 			le16_to_cpu(bhdr->table_list[IP_DISCOVERY].offset));
1250 	num_dies = le16_to_cpu(ihdr->num_dies);
1251 
1252 	DRM_DEBUG("number of dies: %d\n", num_dies);
1253 
1254 	for (i = 0; i < num_dies; i++) {
1255 		die_offset = le16_to_cpu(ihdr->die_info[i].die_offset);
1256 		dhdr = (struct die_header *)(adev->mman.discovery_bin + die_offset);
1257 		num_ips = le16_to_cpu(dhdr->num_ips);
1258 		ip_offset = die_offset + sizeof(*dhdr);
1259 
1260 		if (le16_to_cpu(dhdr->die_id) != i) {
1261 			DRM_ERROR("invalid die id %d, expected %d\n",
1262 					le16_to_cpu(dhdr->die_id), i);
1263 			return -EINVAL;
1264 		}
1265 
1266 		DRM_DEBUG("number of hardware IPs on die%d: %d\n",
1267 				le16_to_cpu(dhdr->die_id), num_ips);
1268 
1269 		for (j = 0; j < num_ips; j++) {
1270 			ip = (struct ip_v4 *)(adev->mman.discovery_bin + ip_offset);
1271 
1272 			if (amdgpu_discovery_validate_ip(ip))
1273 				goto next_ip;
1274 
1275 			num_base_address = ip->num_base_address;
1276 
1277 			DRM_DEBUG("%s(%d) #%d v%d.%d.%d:\n",
1278 				  hw_id_names[le16_to_cpu(ip->hw_id)],
1279 				  le16_to_cpu(ip->hw_id),
1280 				  ip->instance_number,
1281 				  ip->major, ip->minor,
1282 				  ip->revision);
1283 
1284 			if (le16_to_cpu(ip->hw_id) == VCN_HWID) {
1285 				/* Bit [5:0]: original revision value
1286 				 * Bit [7:6]: en/decode capability:
1287 				 *     0b00 : VCN function normally
1288 				 *     0b10 : encode is disabled
1289 				 *     0b01 : decode is disabled
1290 				 */
1291 				if (adev->vcn.num_vcn_inst <
1292 				    AMDGPU_MAX_VCN_INSTANCES) {
1293 					adev->vcn.vcn_config[adev->vcn.num_vcn_inst] =
1294 						ip->revision & 0xc0;
1295 					adev->vcn.num_vcn_inst++;
1296 					adev->vcn.inst_mask |=
1297 						(1U << ip->instance_number);
1298 					adev->jpeg.inst_mask |=
1299 						(1U << ip->instance_number);
1300 				} else {
1301 					dev_err(adev->dev, "Too many VCN instances: %d vs %d\n",
1302 						adev->vcn.num_vcn_inst + 1,
1303 						AMDGPU_MAX_VCN_INSTANCES);
1304 				}
1305 				ip->revision &= ~0xc0;
1306 			}
1307 			if (le16_to_cpu(ip->hw_id) == SDMA0_HWID ||
1308 			    le16_to_cpu(ip->hw_id) == SDMA1_HWID ||
1309 			    le16_to_cpu(ip->hw_id) == SDMA2_HWID ||
1310 			    le16_to_cpu(ip->hw_id) == SDMA3_HWID) {
1311 				if (adev->sdma.num_instances <
1312 				    AMDGPU_MAX_SDMA_INSTANCES) {
1313 					adev->sdma.num_instances++;
1314 					adev->sdma.sdma_mask |=
1315 						(1U << ip->instance_number);
1316 				} else {
1317 					dev_err(adev->dev, "Too many SDMA instances: %d vs %d\n",
1318 						adev->sdma.num_instances + 1,
1319 						AMDGPU_MAX_SDMA_INSTANCES);
1320 				}
1321 			}
1322 
1323 			if (le16_to_cpu(ip->hw_id) == VPE_HWID) {
1324 				if (adev->vpe.num_instances < AMDGPU_MAX_VPE_INSTANCES)
1325 					adev->vpe.num_instances++;
1326 				else
1327 					dev_err(adev->dev, "Too many VPE instances: %d vs %d\n",
1328 						adev->vpe.num_instances + 1,
1329 						AMDGPU_MAX_VPE_INSTANCES);
1330 			}
1331 
1332 			if (le16_to_cpu(ip->hw_id) == UMC_HWID) {
1333 				adev->gmc.num_umc++;
1334 				adev->umc.node_inst_num++;
1335 			}
1336 
1337 			if (le16_to_cpu(ip->hw_id) == GC_HWID)
1338 				adev->gfx.xcc_mask |=
1339 					(1U << ip->instance_number);
1340 
1341 			for (k = 0; k < num_base_address; k++) {
1342 				/*
1343 				 * convert the endianness of base addresses in place,
1344 				 * so that we don't need to convert them when accessing adev->reg_offset.
1345 				 */
1346 				if (ihdr->base_addr_64_bit)
1347 					/* Truncate the 64bit base address from ip discovery
1348 					 * and only store lower 32bit ip base in reg_offset[].
1349 					 * Bits > 32 follows ASIC specific format, thus just
1350 					 * discard them and handle it within specific ASIC.
1351 					 * By this way reg_offset[] and related helpers can
1352 					 * stay unchanged.
1353 					 * The base address is in dwords, thus clear the
1354 					 * highest 2 bits to store.
1355 					 */
1356 					ip->base_address[k] =
1357 						lower_32_bits(le64_to_cpu(ip->base_address_64[k])) & 0x3FFFFFFF;
1358 				else
1359 					ip->base_address[k] = le32_to_cpu(ip->base_address[k]);
1360 				DRM_DEBUG("\t0x%08x\n", ip->base_address[k]);
1361 			}
1362 
1363 			for (hw_ip = 0; hw_ip < MAX_HWIP; hw_ip++) {
1364 				if (hw_id_map[hw_ip] == le16_to_cpu(ip->hw_id) &&
1365 				    hw_id_map[hw_ip] != 0) {
1366 					DRM_DEBUG("set register base offset for %s\n",
1367 							hw_id_names[le16_to_cpu(ip->hw_id)]);
1368 					adev->reg_offset[hw_ip][ip->instance_number] =
1369 						ip->base_address;
1370 					/* Instance support is somewhat inconsistent.
1371 					 * SDMA is a good example.  Sienna cichlid has 4 total
1372 					 * SDMA instances, each enumerated separately (HWIDs
1373 					 * 42, 43, 68, 69).  Arcturus has 8 total SDMA instances,
1374 					 * but they are enumerated as multiple instances of the
1375 					 * same HWIDs (4x HWID 42, 4x HWID 43).  UMC is another
1376 					 * example.  On most chips there are multiple instances
1377 					 * with the same HWID.
1378 					 */
1379 
1380 					if (ihdr->version < 3) {
1381 						subrev = 0;
1382 						variant = 0;
1383 					} else {
1384 						subrev = ip->sub_revision;
1385 						variant = ip->variant;
1386 					}
1387 
1388 					adev->ip_versions[hw_ip]
1389 							 [ip->instance_number] =
1390 						IP_VERSION_FULL(ip->major,
1391 								ip->minor,
1392 								ip->revision,
1393 								variant,
1394 								subrev);
1395 				}
1396 			}
1397 
1398 next_ip:
1399 			if (ihdr->base_addr_64_bit)
1400 				ip_offset += struct_size(ip, base_address_64, ip->num_base_address);
1401 			else
1402 				ip_offset += struct_size(ip, base_address, ip->num_base_address);
1403 		}
1404 	}
1405 
1406 	return 0;
1407 }
1408 
1409 static void amdgpu_discovery_harvest_ip(struct amdgpu_device *adev)
1410 {
1411 	int vcn_harvest_count = 0;
1412 	int umc_harvest_count = 0;
1413 
1414 	/*
1415 	 * Harvest table does not fit Navi1x and legacy GPUs,
1416 	 * so read harvest bit per IP data structure to set
1417 	 * harvest configuration.
1418 	 */
1419 	if (amdgpu_ip_version(adev, GC_HWIP, 0) < IP_VERSION(10, 2, 0) &&
1420 	    amdgpu_ip_version(adev, GC_HWIP, 0) != IP_VERSION(9, 4, 3)) {
1421 		if ((adev->pdev->device == 0x731E &&
1422 			(adev->pdev->revision == 0xC6 ||
1423 			 adev->pdev->revision == 0xC7)) ||
1424 			(adev->pdev->device == 0x7340 &&
1425 			 adev->pdev->revision == 0xC9) ||
1426 			(adev->pdev->device == 0x7360 &&
1427 			 adev->pdev->revision == 0xC7))
1428 			amdgpu_discovery_read_harvest_bit_per_ip(adev,
1429 				&vcn_harvest_count);
1430 	} else {
1431 		amdgpu_discovery_read_from_harvest_table(adev,
1432 							 &vcn_harvest_count,
1433 							 &umc_harvest_count);
1434 	}
1435 
1436 	amdgpu_discovery_harvest_config_quirk(adev);
1437 
1438 	if (vcn_harvest_count == adev->vcn.num_vcn_inst) {
1439 		adev->harvest_ip_mask |= AMD_HARVEST_IP_VCN_MASK;
1440 		adev->harvest_ip_mask |= AMD_HARVEST_IP_JPEG_MASK;
1441 	}
1442 
1443 	if (umc_harvest_count < adev->gmc.num_umc) {
1444 		adev->gmc.num_umc -= umc_harvest_count;
1445 	}
1446 }
1447 
1448 union gc_info {
1449 	struct gc_info_v1_0 v1;
1450 	struct gc_info_v1_1 v1_1;
1451 	struct gc_info_v1_2 v1_2;
1452 	struct gc_info_v2_0 v2;
1453 	struct gc_info_v2_1 v2_1;
1454 };
1455 
1456 static int amdgpu_discovery_get_gfx_info(struct amdgpu_device *adev)
1457 {
1458 	struct binary_header *bhdr;
1459 	union gc_info *gc_info;
1460 	u16 offset;
1461 
1462 	if (!adev->mman.discovery_bin) {
1463 		DRM_ERROR("ip discovery uninitialized\n");
1464 		return -EINVAL;
1465 	}
1466 
1467 	bhdr = (struct binary_header *)adev->mman.discovery_bin;
1468 	offset = le16_to_cpu(bhdr->table_list[GC].offset);
1469 
1470 	if (!offset)
1471 		return 0;
1472 
1473 	gc_info = (union gc_info *)(adev->mman.discovery_bin + offset);
1474 
1475 	switch (le16_to_cpu(gc_info->v1.header.version_major)) {
1476 	case 1:
1477 		adev->gfx.config.max_shader_engines = le32_to_cpu(gc_info->v1.gc_num_se);
1478 		adev->gfx.config.max_cu_per_sh = 2 * (le32_to_cpu(gc_info->v1.gc_num_wgp0_per_sa) +
1479 						      le32_to_cpu(gc_info->v1.gc_num_wgp1_per_sa));
1480 		adev->gfx.config.max_sh_per_se = le32_to_cpu(gc_info->v1.gc_num_sa_per_se);
1481 		adev->gfx.config.max_backends_per_se = le32_to_cpu(gc_info->v1.gc_num_rb_per_se);
1482 		adev->gfx.config.max_texture_channel_caches = le32_to_cpu(gc_info->v1.gc_num_gl2c);
1483 		adev->gfx.config.max_gprs = le32_to_cpu(gc_info->v1.gc_num_gprs);
1484 		adev->gfx.config.max_gs_threads = le32_to_cpu(gc_info->v1.gc_num_max_gs_thds);
1485 		adev->gfx.config.gs_vgt_table_depth = le32_to_cpu(gc_info->v1.gc_gs_table_depth);
1486 		adev->gfx.config.gs_prim_buffer_depth = le32_to_cpu(gc_info->v1.gc_gsprim_buff_depth);
1487 		adev->gfx.config.double_offchip_lds_buf = le32_to_cpu(gc_info->v1.gc_double_offchip_lds_buffer);
1488 		adev->gfx.cu_info.wave_front_size = le32_to_cpu(gc_info->v1.gc_wave_size);
1489 		adev->gfx.cu_info.max_waves_per_simd = le32_to_cpu(gc_info->v1.gc_max_waves_per_simd);
1490 		adev->gfx.cu_info.max_scratch_slots_per_cu = le32_to_cpu(gc_info->v1.gc_max_scratch_slots_per_cu);
1491 		adev->gfx.cu_info.lds_size = le32_to_cpu(gc_info->v1.gc_lds_size);
1492 		adev->gfx.config.num_sc_per_sh = le32_to_cpu(gc_info->v1.gc_num_sc_per_se) /
1493 			le32_to_cpu(gc_info->v1.gc_num_sa_per_se);
1494 		adev->gfx.config.num_packer_per_sc = le32_to_cpu(gc_info->v1.gc_num_packer_per_sc);
1495 		if (le16_to_cpu(gc_info->v1.header.version_minor) >= 1) {
1496 			adev->gfx.config.gc_num_tcp_per_sa = le32_to_cpu(gc_info->v1_1.gc_num_tcp_per_sa);
1497 			adev->gfx.config.gc_num_sdp_interface = le32_to_cpu(gc_info->v1_1.gc_num_sdp_interface);
1498 			adev->gfx.config.gc_num_tcps = le32_to_cpu(gc_info->v1_1.gc_num_tcps);
1499 		}
1500 		if (le16_to_cpu(gc_info->v1.header.version_minor) >= 2) {
1501 			adev->gfx.config.gc_num_tcp_per_wpg = le32_to_cpu(gc_info->v1_2.gc_num_tcp_per_wpg);
1502 			adev->gfx.config.gc_tcp_l1_size = le32_to_cpu(gc_info->v1_2.gc_tcp_l1_size);
1503 			adev->gfx.config.gc_num_sqc_per_wgp = le32_to_cpu(gc_info->v1_2.gc_num_sqc_per_wgp);
1504 			adev->gfx.config.gc_l1_instruction_cache_size_per_sqc = le32_to_cpu(gc_info->v1_2.gc_l1_instruction_cache_size_per_sqc);
1505 			adev->gfx.config.gc_l1_data_cache_size_per_sqc = le32_to_cpu(gc_info->v1_2.gc_l1_data_cache_size_per_sqc);
1506 			adev->gfx.config.gc_gl1c_per_sa = le32_to_cpu(gc_info->v1_2.gc_gl1c_per_sa);
1507 			adev->gfx.config.gc_gl1c_size_per_instance = le32_to_cpu(gc_info->v1_2.gc_gl1c_size_per_instance);
1508 			adev->gfx.config.gc_gl2c_per_gpu = le32_to_cpu(gc_info->v1_2.gc_gl2c_per_gpu);
1509 		}
1510 		break;
1511 	case 2:
1512 		adev->gfx.config.max_shader_engines = le32_to_cpu(gc_info->v2.gc_num_se);
1513 		adev->gfx.config.max_cu_per_sh = le32_to_cpu(gc_info->v2.gc_num_cu_per_sh);
1514 		adev->gfx.config.max_sh_per_se = le32_to_cpu(gc_info->v2.gc_num_sh_per_se);
1515 		adev->gfx.config.max_backends_per_se = le32_to_cpu(gc_info->v2.gc_num_rb_per_se);
1516 		adev->gfx.config.max_texture_channel_caches = le32_to_cpu(gc_info->v2.gc_num_tccs);
1517 		adev->gfx.config.max_gprs = le32_to_cpu(gc_info->v2.gc_num_gprs);
1518 		adev->gfx.config.max_gs_threads = le32_to_cpu(gc_info->v2.gc_num_max_gs_thds);
1519 		adev->gfx.config.gs_vgt_table_depth = le32_to_cpu(gc_info->v2.gc_gs_table_depth);
1520 		adev->gfx.config.gs_prim_buffer_depth = le32_to_cpu(gc_info->v2.gc_gsprim_buff_depth);
1521 		adev->gfx.config.double_offchip_lds_buf = le32_to_cpu(gc_info->v2.gc_double_offchip_lds_buffer);
1522 		adev->gfx.cu_info.wave_front_size = le32_to_cpu(gc_info->v2.gc_wave_size);
1523 		adev->gfx.cu_info.max_waves_per_simd = le32_to_cpu(gc_info->v2.gc_max_waves_per_simd);
1524 		adev->gfx.cu_info.max_scratch_slots_per_cu = le32_to_cpu(gc_info->v2.gc_max_scratch_slots_per_cu);
1525 		adev->gfx.cu_info.lds_size = le32_to_cpu(gc_info->v2.gc_lds_size);
1526 		adev->gfx.config.num_sc_per_sh = le32_to_cpu(gc_info->v2.gc_num_sc_per_se) /
1527 			le32_to_cpu(gc_info->v2.gc_num_sh_per_se);
1528 		adev->gfx.config.num_packer_per_sc = le32_to_cpu(gc_info->v2.gc_num_packer_per_sc);
1529 		if (le16_to_cpu(gc_info->v2.header.version_minor) == 1) {
1530 			adev->gfx.config.gc_num_tcp_per_sa = le32_to_cpu(gc_info->v2_1.gc_num_tcp_per_sh);
1531 			adev->gfx.config.gc_tcp_size_per_cu = le32_to_cpu(gc_info->v2_1.gc_tcp_size_per_cu);
1532 			adev->gfx.config.gc_num_sdp_interface = le32_to_cpu(gc_info->v2_1.gc_num_sdp_interface); /* per XCD */
1533 			adev->gfx.config.gc_num_cu_per_sqc = le32_to_cpu(gc_info->v2_1.gc_num_cu_per_sqc);
1534 			adev->gfx.config.gc_l1_instruction_cache_size_per_sqc = le32_to_cpu(gc_info->v2_1.gc_instruction_cache_size_per_sqc);
1535 			adev->gfx.config.gc_l1_data_cache_size_per_sqc = le32_to_cpu(gc_info->v2_1.gc_scalar_data_cache_size_per_sqc);
1536 			adev->gfx.config.gc_tcc_size = le32_to_cpu(gc_info->v2_1.gc_tcc_size); /* per XCD */
1537 		}
1538 		break;
1539 	default:
1540 		dev_err(adev->dev,
1541 			"Unhandled GC info table %d.%d\n",
1542 			le16_to_cpu(gc_info->v1.header.version_major),
1543 			le16_to_cpu(gc_info->v1.header.version_minor));
1544 		return -EINVAL;
1545 	}
1546 	return 0;
1547 }
1548 
1549 union mall_info {
1550 	struct mall_info_v1_0 v1;
1551 	struct mall_info_v2_0 v2;
1552 };
1553 
1554 static int amdgpu_discovery_get_mall_info(struct amdgpu_device *adev)
1555 {
1556 	struct binary_header *bhdr;
1557 	union mall_info *mall_info;
1558 	u32 u, mall_size_per_umc, m_s_present, half_use;
1559 	u64 mall_size;
1560 	u16 offset;
1561 
1562 	if (!adev->mman.discovery_bin) {
1563 		DRM_ERROR("ip discovery uninitialized\n");
1564 		return -EINVAL;
1565 	}
1566 
1567 	bhdr = (struct binary_header *)adev->mman.discovery_bin;
1568 	offset = le16_to_cpu(bhdr->table_list[MALL_INFO].offset);
1569 
1570 	if (!offset)
1571 		return 0;
1572 
1573 	mall_info = (union mall_info *)(adev->mman.discovery_bin + offset);
1574 
1575 	switch (le16_to_cpu(mall_info->v1.header.version_major)) {
1576 	case 1:
1577 		mall_size = 0;
1578 		mall_size_per_umc = le32_to_cpu(mall_info->v1.mall_size_per_m);
1579 		m_s_present = le32_to_cpu(mall_info->v1.m_s_present);
1580 		half_use = le32_to_cpu(mall_info->v1.m_half_use);
1581 		for (u = 0; u < adev->gmc.num_umc; u++) {
1582 			if (m_s_present & (1 << u))
1583 				mall_size += mall_size_per_umc * 2;
1584 			else if (half_use & (1 << u))
1585 				mall_size += mall_size_per_umc / 2;
1586 			else
1587 				mall_size += mall_size_per_umc;
1588 		}
1589 		adev->gmc.mall_size = mall_size;
1590 		adev->gmc.m_half_use = half_use;
1591 		break;
1592 	case 2:
1593 		mall_size_per_umc = le32_to_cpu(mall_info->v2.mall_size_per_umc);
1594 		adev->gmc.mall_size = mall_size_per_umc * adev->gmc.num_umc;
1595 		break;
1596 	default:
1597 		dev_err(adev->dev,
1598 			"Unhandled MALL info table %d.%d\n",
1599 			le16_to_cpu(mall_info->v1.header.version_major),
1600 			le16_to_cpu(mall_info->v1.header.version_minor));
1601 		return -EINVAL;
1602 	}
1603 	return 0;
1604 }
1605 
1606 union vcn_info {
1607 	struct vcn_info_v1_0 v1;
1608 };
1609 
1610 static int amdgpu_discovery_get_vcn_info(struct amdgpu_device *adev)
1611 {
1612 	struct binary_header *bhdr;
1613 	union vcn_info *vcn_info;
1614 	u16 offset;
1615 	int v;
1616 
1617 	if (!adev->mman.discovery_bin) {
1618 		DRM_ERROR("ip discovery uninitialized\n");
1619 		return -EINVAL;
1620 	}
1621 
1622 	/* num_vcn_inst is currently limited to AMDGPU_MAX_VCN_INSTANCES
1623 	 * which is smaller than VCN_INFO_TABLE_MAX_NUM_INSTANCES
1624 	 * but that may change in the future with new GPUs so keep this
1625 	 * check for defensive purposes.
1626 	 */
1627 	if (adev->vcn.num_vcn_inst > VCN_INFO_TABLE_MAX_NUM_INSTANCES) {
1628 		dev_err(adev->dev, "invalid vcn instances\n");
1629 		return -EINVAL;
1630 	}
1631 
1632 	bhdr = (struct binary_header *)adev->mman.discovery_bin;
1633 	offset = le16_to_cpu(bhdr->table_list[VCN_INFO].offset);
1634 
1635 	if (!offset)
1636 		return 0;
1637 
1638 	vcn_info = (union vcn_info *)(adev->mman.discovery_bin + offset);
1639 
1640 	switch (le16_to_cpu(vcn_info->v1.header.version_major)) {
1641 	case 1:
1642 		/* num_vcn_inst is currently limited to AMDGPU_MAX_VCN_INSTANCES
1643 		 * so this won't overflow.
1644 		 */
1645 		for (v = 0; v < adev->vcn.num_vcn_inst; v++) {
1646 			adev->vcn.vcn_codec_disable_mask[v] =
1647 				le32_to_cpu(vcn_info->v1.instance_info[v].fuse_data.all_bits);
1648 		}
1649 		break;
1650 	default:
1651 		dev_err(adev->dev,
1652 			"Unhandled VCN info table %d.%d\n",
1653 			le16_to_cpu(vcn_info->v1.header.version_major),
1654 			le16_to_cpu(vcn_info->v1.header.version_minor));
1655 		return -EINVAL;
1656 	}
1657 	return 0;
1658 }
1659 
1660 static int amdgpu_discovery_set_common_ip_blocks(struct amdgpu_device *adev)
1661 {
1662 	/* what IP to use for this? */
1663 	switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
1664 	case IP_VERSION(9, 0, 1):
1665 	case IP_VERSION(9, 1, 0):
1666 	case IP_VERSION(9, 2, 1):
1667 	case IP_VERSION(9, 2, 2):
1668 	case IP_VERSION(9, 3, 0):
1669 	case IP_VERSION(9, 4, 0):
1670 	case IP_VERSION(9, 4, 1):
1671 	case IP_VERSION(9, 4, 2):
1672 	case IP_VERSION(9, 4, 3):
1673 		amdgpu_device_ip_block_add(adev, &vega10_common_ip_block);
1674 		break;
1675 	case IP_VERSION(10, 1, 10):
1676 	case IP_VERSION(10, 1, 1):
1677 	case IP_VERSION(10, 1, 2):
1678 	case IP_VERSION(10, 1, 3):
1679 	case IP_VERSION(10, 1, 4):
1680 	case IP_VERSION(10, 3, 0):
1681 	case IP_VERSION(10, 3, 1):
1682 	case IP_VERSION(10, 3, 2):
1683 	case IP_VERSION(10, 3, 3):
1684 	case IP_VERSION(10, 3, 4):
1685 	case IP_VERSION(10, 3, 5):
1686 	case IP_VERSION(10, 3, 6):
1687 	case IP_VERSION(10, 3, 7):
1688 		amdgpu_device_ip_block_add(adev, &nv_common_ip_block);
1689 		break;
1690 	case IP_VERSION(11, 0, 0):
1691 	case IP_VERSION(11, 0, 1):
1692 	case IP_VERSION(11, 0, 2):
1693 	case IP_VERSION(11, 0, 3):
1694 	case IP_VERSION(11, 0, 4):
1695 	case IP_VERSION(11, 5, 0):
1696 	case IP_VERSION(11, 5, 1):
1697 		amdgpu_device_ip_block_add(adev, &soc21_common_ip_block);
1698 		break;
1699 	default:
1700 		dev_err(adev->dev,
1701 			"Failed to add common ip block(GC_HWIP:0x%x)\n",
1702 			amdgpu_ip_version(adev, GC_HWIP, 0));
1703 		return -EINVAL;
1704 	}
1705 	return 0;
1706 }
1707 
1708 static int amdgpu_discovery_set_gmc_ip_blocks(struct amdgpu_device *adev)
1709 {
1710 	/* use GC or MMHUB IP version */
1711 	switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
1712 	case IP_VERSION(9, 0, 1):
1713 	case IP_VERSION(9, 1, 0):
1714 	case IP_VERSION(9, 2, 1):
1715 	case IP_VERSION(9, 2, 2):
1716 	case IP_VERSION(9, 3, 0):
1717 	case IP_VERSION(9, 4, 0):
1718 	case IP_VERSION(9, 4, 1):
1719 	case IP_VERSION(9, 4, 2):
1720 	case IP_VERSION(9, 4, 3):
1721 		amdgpu_device_ip_block_add(adev, &gmc_v9_0_ip_block);
1722 		break;
1723 	case IP_VERSION(10, 1, 10):
1724 	case IP_VERSION(10, 1, 1):
1725 	case IP_VERSION(10, 1, 2):
1726 	case IP_VERSION(10, 1, 3):
1727 	case IP_VERSION(10, 1, 4):
1728 	case IP_VERSION(10, 3, 0):
1729 	case IP_VERSION(10, 3, 1):
1730 	case IP_VERSION(10, 3, 2):
1731 	case IP_VERSION(10, 3, 3):
1732 	case IP_VERSION(10, 3, 4):
1733 	case IP_VERSION(10, 3, 5):
1734 	case IP_VERSION(10, 3, 6):
1735 	case IP_VERSION(10, 3, 7):
1736 		amdgpu_device_ip_block_add(adev, &gmc_v10_0_ip_block);
1737 		break;
1738 	case IP_VERSION(11, 0, 0):
1739 	case IP_VERSION(11, 0, 1):
1740 	case IP_VERSION(11, 0, 2):
1741 	case IP_VERSION(11, 0, 3):
1742 	case IP_VERSION(11, 0, 4):
1743 	case IP_VERSION(11, 5, 0):
1744 	case IP_VERSION(11, 5, 1):
1745 		amdgpu_device_ip_block_add(adev, &gmc_v11_0_ip_block);
1746 		break;
1747 	default:
1748 		dev_err(adev->dev, "Failed to add gmc ip block(GC_HWIP:0x%x)\n",
1749 			amdgpu_ip_version(adev, GC_HWIP, 0));
1750 		return -EINVAL;
1751 	}
1752 	return 0;
1753 }
1754 
1755 static int amdgpu_discovery_set_ih_ip_blocks(struct amdgpu_device *adev)
1756 {
1757 	switch (amdgpu_ip_version(adev, OSSSYS_HWIP, 0)) {
1758 	case IP_VERSION(4, 0, 0):
1759 	case IP_VERSION(4, 0, 1):
1760 	case IP_VERSION(4, 1, 0):
1761 	case IP_VERSION(4, 1, 1):
1762 	case IP_VERSION(4, 3, 0):
1763 		amdgpu_device_ip_block_add(adev, &vega10_ih_ip_block);
1764 		break;
1765 	case IP_VERSION(4, 2, 0):
1766 	case IP_VERSION(4, 2, 1):
1767 	case IP_VERSION(4, 4, 0):
1768 	case IP_VERSION(4, 4, 2):
1769 		amdgpu_device_ip_block_add(adev, &vega20_ih_ip_block);
1770 		break;
1771 	case IP_VERSION(5, 0, 0):
1772 	case IP_VERSION(5, 0, 1):
1773 	case IP_VERSION(5, 0, 2):
1774 	case IP_VERSION(5, 0, 3):
1775 	case IP_VERSION(5, 2, 0):
1776 	case IP_VERSION(5, 2, 1):
1777 		amdgpu_device_ip_block_add(adev, &navi10_ih_ip_block);
1778 		break;
1779 	case IP_VERSION(6, 0, 0):
1780 	case IP_VERSION(6, 0, 1):
1781 	case IP_VERSION(6, 0, 2):
1782 		amdgpu_device_ip_block_add(adev, &ih_v6_0_ip_block);
1783 		break;
1784 	case IP_VERSION(6, 1, 0):
1785 		amdgpu_device_ip_block_add(adev, &ih_v6_1_ip_block);
1786 		break;
1787 	case IP_VERSION(7, 0, 0):
1788 		amdgpu_device_ip_block_add(adev, &ih_v7_0_ip_block);
1789 		break;
1790 	default:
1791 		dev_err(adev->dev,
1792 			"Failed to add ih ip block(OSSSYS_HWIP:0x%x)\n",
1793 			amdgpu_ip_version(adev, OSSSYS_HWIP, 0));
1794 		return -EINVAL;
1795 	}
1796 	return 0;
1797 }
1798 
1799 static int amdgpu_discovery_set_psp_ip_blocks(struct amdgpu_device *adev)
1800 {
1801 	switch (amdgpu_ip_version(adev, MP0_HWIP, 0)) {
1802 	case IP_VERSION(9, 0, 0):
1803 		amdgpu_device_ip_block_add(adev, &psp_v3_1_ip_block);
1804 		break;
1805 	case IP_VERSION(10, 0, 0):
1806 	case IP_VERSION(10, 0, 1):
1807 		amdgpu_device_ip_block_add(adev, &psp_v10_0_ip_block);
1808 		break;
1809 	case IP_VERSION(11, 0, 0):
1810 	case IP_VERSION(11, 0, 2):
1811 	case IP_VERSION(11, 0, 4):
1812 	case IP_VERSION(11, 0, 5):
1813 	case IP_VERSION(11, 0, 9):
1814 	case IP_VERSION(11, 0, 7):
1815 	case IP_VERSION(11, 0, 11):
1816 	case IP_VERSION(11, 0, 12):
1817 	case IP_VERSION(11, 0, 13):
1818 	case IP_VERSION(11, 5, 0):
1819 		amdgpu_device_ip_block_add(adev, &psp_v11_0_ip_block);
1820 		break;
1821 	case IP_VERSION(11, 0, 8):
1822 		amdgpu_device_ip_block_add(adev, &psp_v11_0_8_ip_block);
1823 		break;
1824 	case IP_VERSION(11, 0, 3):
1825 	case IP_VERSION(12, 0, 1):
1826 		amdgpu_device_ip_block_add(adev, &psp_v12_0_ip_block);
1827 		break;
1828 	case IP_VERSION(13, 0, 0):
1829 	case IP_VERSION(13, 0, 1):
1830 	case IP_VERSION(13, 0, 2):
1831 	case IP_VERSION(13, 0, 3):
1832 	case IP_VERSION(13, 0, 5):
1833 	case IP_VERSION(13, 0, 6):
1834 	case IP_VERSION(13, 0, 7):
1835 	case IP_VERSION(13, 0, 8):
1836 	case IP_VERSION(13, 0, 10):
1837 	case IP_VERSION(13, 0, 11):
1838 	case IP_VERSION(14, 0, 0):
1839 	case IP_VERSION(14, 0, 1):
1840 		amdgpu_device_ip_block_add(adev, &psp_v13_0_ip_block);
1841 		break;
1842 	case IP_VERSION(13, 0, 4):
1843 		amdgpu_device_ip_block_add(adev, &psp_v13_0_4_ip_block);
1844 		break;
1845 	case IP_VERSION(14, 0, 2):
1846 	case IP_VERSION(14, 0, 3):
1847 		amdgpu_device_ip_block_add(adev, &psp_v14_0_ip_block);
1848 		break;
1849 	default:
1850 		dev_err(adev->dev,
1851 			"Failed to add psp ip block(MP0_HWIP:0x%x)\n",
1852 			amdgpu_ip_version(adev, MP0_HWIP, 0));
1853 		return -EINVAL;
1854 	}
1855 	return 0;
1856 }
1857 
1858 static int amdgpu_discovery_set_smu_ip_blocks(struct amdgpu_device *adev)
1859 {
1860 	switch (amdgpu_ip_version(adev, MP1_HWIP, 0)) {
1861 	case IP_VERSION(9, 0, 0):
1862 	case IP_VERSION(10, 0, 0):
1863 	case IP_VERSION(10, 0, 1):
1864 	case IP_VERSION(11, 0, 2):
1865 		if (adev->asic_type == CHIP_ARCTURUS)
1866 			amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block);
1867 		else
1868 			amdgpu_device_ip_block_add(adev, &pp_smu_ip_block);
1869 		break;
1870 	case IP_VERSION(11, 0, 0):
1871 	case IP_VERSION(11, 0, 5):
1872 	case IP_VERSION(11, 0, 9):
1873 	case IP_VERSION(11, 0, 7):
1874 	case IP_VERSION(11, 0, 8):
1875 	case IP_VERSION(11, 0, 11):
1876 	case IP_VERSION(11, 0, 12):
1877 	case IP_VERSION(11, 0, 13):
1878 	case IP_VERSION(11, 5, 0):
1879 		amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block);
1880 		break;
1881 	case IP_VERSION(12, 0, 0):
1882 	case IP_VERSION(12, 0, 1):
1883 		amdgpu_device_ip_block_add(adev, &smu_v12_0_ip_block);
1884 		break;
1885 	case IP_VERSION(13, 0, 0):
1886 	case IP_VERSION(13, 0, 1):
1887 	case IP_VERSION(13, 0, 2):
1888 	case IP_VERSION(13, 0, 3):
1889 	case IP_VERSION(13, 0, 4):
1890 	case IP_VERSION(13, 0, 5):
1891 	case IP_VERSION(13, 0, 6):
1892 	case IP_VERSION(13, 0, 7):
1893 	case IP_VERSION(13, 0, 8):
1894 	case IP_VERSION(13, 0, 10):
1895 	case IP_VERSION(13, 0, 11):
1896 		amdgpu_device_ip_block_add(adev, &smu_v13_0_ip_block);
1897 		break;
1898 	case IP_VERSION(14, 0, 0):
1899 		amdgpu_device_ip_block_add(adev, &smu_v14_0_ip_block);
1900 		break;
1901 	default:
1902 		dev_err(adev->dev,
1903 			"Failed to add smu ip block(MP1_HWIP:0x%x)\n",
1904 			amdgpu_ip_version(adev, MP1_HWIP, 0));
1905 		return -EINVAL;
1906 	}
1907 	return 0;
1908 }
1909 
1910 #if defined(CONFIG_DRM_AMD_DC)
1911 static void amdgpu_discovery_set_sriov_display(struct amdgpu_device *adev)
1912 {
1913 	amdgpu_device_set_sriov_virtual_display(adev);
1914 	amdgpu_device_ip_block_add(adev, &amdgpu_vkms_ip_block);
1915 }
1916 #endif
1917 
1918 static int amdgpu_discovery_set_display_ip_blocks(struct amdgpu_device *adev)
1919 {
1920 	if (adev->enable_virtual_display) {
1921 		amdgpu_device_ip_block_add(adev, &amdgpu_vkms_ip_block);
1922 		return 0;
1923 	}
1924 
1925 	if (!amdgpu_device_has_dc_support(adev))
1926 		return 0;
1927 
1928 #if defined(CONFIG_DRM_AMD_DC)
1929 	if (amdgpu_ip_version(adev, DCE_HWIP, 0)) {
1930 		switch (amdgpu_ip_version(adev, DCE_HWIP, 0)) {
1931 		case IP_VERSION(1, 0, 0):
1932 		case IP_VERSION(1, 0, 1):
1933 		case IP_VERSION(2, 0, 2):
1934 		case IP_VERSION(2, 0, 0):
1935 		case IP_VERSION(2, 0, 3):
1936 		case IP_VERSION(2, 1, 0):
1937 		case IP_VERSION(3, 0, 0):
1938 		case IP_VERSION(3, 0, 2):
1939 		case IP_VERSION(3, 0, 3):
1940 		case IP_VERSION(3, 0, 1):
1941 		case IP_VERSION(3, 1, 2):
1942 		case IP_VERSION(3, 1, 3):
1943 		case IP_VERSION(3, 1, 4):
1944 		case IP_VERSION(3, 1, 5):
1945 		case IP_VERSION(3, 1, 6):
1946 		case IP_VERSION(3, 2, 0):
1947 		case IP_VERSION(3, 2, 1):
1948 		case IP_VERSION(3, 5, 0):
1949 		case IP_VERSION(3, 5, 1):
1950 			if (amdgpu_sriov_vf(adev))
1951 				amdgpu_discovery_set_sriov_display(adev);
1952 			else
1953 				amdgpu_device_ip_block_add(adev, &dm_ip_block);
1954 			break;
1955 		default:
1956 			dev_err(adev->dev,
1957 				"Failed to add dm ip block(DCE_HWIP:0x%x)\n",
1958 				amdgpu_ip_version(adev, DCE_HWIP, 0));
1959 			return -EINVAL;
1960 		}
1961 	} else if (amdgpu_ip_version(adev, DCI_HWIP, 0)) {
1962 		switch (amdgpu_ip_version(adev, DCI_HWIP, 0)) {
1963 		case IP_VERSION(12, 0, 0):
1964 		case IP_VERSION(12, 0, 1):
1965 		case IP_VERSION(12, 1, 0):
1966 			if (amdgpu_sriov_vf(adev))
1967 				amdgpu_discovery_set_sriov_display(adev);
1968 			else
1969 				amdgpu_device_ip_block_add(adev, &dm_ip_block);
1970 			break;
1971 		default:
1972 			dev_err(adev->dev,
1973 				"Failed to add dm ip block(DCI_HWIP:0x%x)\n",
1974 				amdgpu_ip_version(adev, DCI_HWIP, 0));
1975 			return -EINVAL;
1976 		}
1977 	}
1978 #endif
1979 	return 0;
1980 }
1981 
1982 static int amdgpu_discovery_set_gc_ip_blocks(struct amdgpu_device *adev)
1983 {
1984 	switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
1985 	case IP_VERSION(9, 0, 1):
1986 	case IP_VERSION(9, 1, 0):
1987 	case IP_VERSION(9, 2, 1):
1988 	case IP_VERSION(9, 2, 2):
1989 	case IP_VERSION(9, 3, 0):
1990 	case IP_VERSION(9, 4, 0):
1991 	case IP_VERSION(9, 4, 1):
1992 	case IP_VERSION(9, 4, 2):
1993 		amdgpu_device_ip_block_add(adev, &gfx_v9_0_ip_block);
1994 		break;
1995 	case IP_VERSION(9, 4, 3):
1996 		amdgpu_device_ip_block_add(adev, &gfx_v9_4_3_ip_block);
1997 		break;
1998 	case IP_VERSION(10, 1, 10):
1999 	case IP_VERSION(10, 1, 2):
2000 	case IP_VERSION(10, 1, 1):
2001 	case IP_VERSION(10, 1, 3):
2002 	case IP_VERSION(10, 1, 4):
2003 	case IP_VERSION(10, 3, 0):
2004 	case IP_VERSION(10, 3, 2):
2005 	case IP_VERSION(10, 3, 1):
2006 	case IP_VERSION(10, 3, 4):
2007 	case IP_VERSION(10, 3, 5):
2008 	case IP_VERSION(10, 3, 6):
2009 	case IP_VERSION(10, 3, 3):
2010 	case IP_VERSION(10, 3, 7):
2011 		amdgpu_device_ip_block_add(adev, &gfx_v10_0_ip_block);
2012 		break;
2013 	case IP_VERSION(11, 0, 0):
2014 	case IP_VERSION(11, 0, 1):
2015 	case IP_VERSION(11, 0, 2):
2016 	case IP_VERSION(11, 0, 3):
2017 	case IP_VERSION(11, 0, 4):
2018 	case IP_VERSION(11, 5, 0):
2019 	case IP_VERSION(11, 5, 1):
2020 		amdgpu_device_ip_block_add(adev, &gfx_v11_0_ip_block);
2021 		break;
2022 	default:
2023 		dev_err(adev->dev, "Failed to add gfx ip block(GC_HWIP:0x%x)\n",
2024 			amdgpu_ip_version(adev, GC_HWIP, 0));
2025 		return -EINVAL;
2026 	}
2027 	return 0;
2028 }
2029 
2030 static int amdgpu_discovery_set_sdma_ip_blocks(struct amdgpu_device *adev)
2031 {
2032 	switch (amdgpu_ip_version(adev, SDMA0_HWIP, 0)) {
2033 	case IP_VERSION(4, 0, 0):
2034 	case IP_VERSION(4, 0, 1):
2035 	case IP_VERSION(4, 1, 0):
2036 	case IP_VERSION(4, 1, 1):
2037 	case IP_VERSION(4, 1, 2):
2038 	case IP_VERSION(4, 2, 0):
2039 	case IP_VERSION(4, 2, 2):
2040 	case IP_VERSION(4, 4, 0):
2041 		amdgpu_device_ip_block_add(adev, &sdma_v4_0_ip_block);
2042 		break;
2043 	case IP_VERSION(4, 4, 2):
2044 		amdgpu_device_ip_block_add(adev, &sdma_v4_4_2_ip_block);
2045 		break;
2046 	case IP_VERSION(5, 0, 0):
2047 	case IP_VERSION(5, 0, 1):
2048 	case IP_VERSION(5, 0, 2):
2049 	case IP_VERSION(5, 0, 5):
2050 		amdgpu_device_ip_block_add(adev, &sdma_v5_0_ip_block);
2051 		break;
2052 	case IP_VERSION(5, 2, 0):
2053 	case IP_VERSION(5, 2, 2):
2054 	case IP_VERSION(5, 2, 4):
2055 	case IP_VERSION(5, 2, 5):
2056 	case IP_VERSION(5, 2, 6):
2057 	case IP_VERSION(5, 2, 3):
2058 	case IP_VERSION(5, 2, 1):
2059 	case IP_VERSION(5, 2, 7):
2060 		amdgpu_device_ip_block_add(adev, &sdma_v5_2_ip_block);
2061 		break;
2062 	case IP_VERSION(6, 0, 0):
2063 	case IP_VERSION(6, 0, 1):
2064 	case IP_VERSION(6, 0, 2):
2065 	case IP_VERSION(6, 0, 3):
2066 	case IP_VERSION(6, 1, 0):
2067 	case IP_VERSION(6, 1, 1):
2068 		amdgpu_device_ip_block_add(adev, &sdma_v6_0_ip_block);
2069 		break;
2070 	default:
2071 		dev_err(adev->dev,
2072 			"Failed to add sdma ip block(SDMA0_HWIP:0x%x)\n",
2073 			amdgpu_ip_version(adev, SDMA0_HWIP, 0));
2074 		return -EINVAL;
2075 	}
2076 	return 0;
2077 }
2078 
2079 static int amdgpu_discovery_set_mm_ip_blocks(struct amdgpu_device *adev)
2080 {
2081 	if (amdgpu_ip_version(adev, VCE_HWIP, 0)) {
2082 		switch (amdgpu_ip_version(adev, UVD_HWIP, 0)) {
2083 		case IP_VERSION(7, 0, 0):
2084 		case IP_VERSION(7, 2, 0):
2085 			/* UVD is not supported on vega20 SR-IOV */
2086 			if (!(adev->asic_type == CHIP_VEGA20 && amdgpu_sriov_vf(adev)))
2087 				amdgpu_device_ip_block_add(adev, &uvd_v7_0_ip_block);
2088 			break;
2089 		default:
2090 			dev_err(adev->dev,
2091 				"Failed to add uvd v7 ip block(UVD_HWIP:0x%x)\n",
2092 				amdgpu_ip_version(adev, UVD_HWIP, 0));
2093 			return -EINVAL;
2094 		}
2095 		switch (amdgpu_ip_version(adev, VCE_HWIP, 0)) {
2096 		case IP_VERSION(4, 0, 0):
2097 		case IP_VERSION(4, 1, 0):
2098 			/* VCE is not supported on vega20 SR-IOV */
2099 			if (!(adev->asic_type == CHIP_VEGA20 && amdgpu_sriov_vf(adev)))
2100 				amdgpu_device_ip_block_add(adev, &vce_v4_0_ip_block);
2101 			break;
2102 		default:
2103 			dev_err(adev->dev,
2104 				"Failed to add VCE v4 ip block(VCE_HWIP:0x%x)\n",
2105 				amdgpu_ip_version(adev, VCE_HWIP, 0));
2106 			return -EINVAL;
2107 		}
2108 	} else {
2109 		switch (amdgpu_ip_version(adev, UVD_HWIP, 0)) {
2110 		case IP_VERSION(1, 0, 0):
2111 		case IP_VERSION(1, 0, 1):
2112 			amdgpu_device_ip_block_add(adev, &vcn_v1_0_ip_block);
2113 			break;
2114 		case IP_VERSION(2, 0, 0):
2115 		case IP_VERSION(2, 0, 2):
2116 		case IP_VERSION(2, 2, 0):
2117 			amdgpu_device_ip_block_add(adev, &vcn_v2_0_ip_block);
2118 			if (!amdgpu_sriov_vf(adev))
2119 				amdgpu_device_ip_block_add(adev, &jpeg_v2_0_ip_block);
2120 			break;
2121 		case IP_VERSION(2, 0, 3):
2122 			break;
2123 		case IP_VERSION(2, 5, 0):
2124 			amdgpu_device_ip_block_add(adev, &vcn_v2_5_ip_block);
2125 			amdgpu_device_ip_block_add(adev, &jpeg_v2_5_ip_block);
2126 			break;
2127 		case IP_VERSION(2, 6, 0):
2128 			amdgpu_device_ip_block_add(adev, &vcn_v2_6_ip_block);
2129 			amdgpu_device_ip_block_add(adev, &jpeg_v2_6_ip_block);
2130 			break;
2131 		case IP_VERSION(3, 0, 0):
2132 		case IP_VERSION(3, 0, 16):
2133 		case IP_VERSION(3, 1, 1):
2134 		case IP_VERSION(3, 1, 2):
2135 		case IP_VERSION(3, 0, 2):
2136 			amdgpu_device_ip_block_add(adev, &vcn_v3_0_ip_block);
2137 			if (!amdgpu_sriov_vf(adev))
2138 				amdgpu_device_ip_block_add(adev, &jpeg_v3_0_ip_block);
2139 			break;
2140 		case IP_VERSION(3, 0, 33):
2141 			amdgpu_device_ip_block_add(adev, &vcn_v3_0_ip_block);
2142 			break;
2143 		case IP_VERSION(4, 0, 0):
2144 		case IP_VERSION(4, 0, 2):
2145 		case IP_VERSION(4, 0, 4):
2146 			amdgpu_device_ip_block_add(adev, &vcn_v4_0_ip_block);
2147 			amdgpu_device_ip_block_add(adev, &jpeg_v4_0_ip_block);
2148 			break;
2149 		case IP_VERSION(4, 0, 3):
2150 			amdgpu_device_ip_block_add(adev, &vcn_v4_0_3_ip_block);
2151 			amdgpu_device_ip_block_add(adev, &jpeg_v4_0_3_ip_block);
2152 			break;
2153 		case IP_VERSION(4, 0, 5):
2154 		case IP_VERSION(4, 0, 6):
2155 			amdgpu_device_ip_block_add(adev, &vcn_v4_0_5_ip_block);
2156 			amdgpu_device_ip_block_add(adev, &jpeg_v4_0_5_ip_block);
2157 			break;
2158 		case IP_VERSION(5, 0, 0):
2159 			amdgpu_device_ip_block_add(adev, &vcn_v5_0_0_ip_block);
2160 			amdgpu_device_ip_block_add(adev, &jpeg_v5_0_0_ip_block);
2161 			break;
2162 		default:
2163 			dev_err(adev->dev,
2164 				"Failed to add vcn/jpeg ip block(UVD_HWIP:0x%x)\n",
2165 				amdgpu_ip_version(adev, UVD_HWIP, 0));
2166 			return -EINVAL;
2167 		}
2168 	}
2169 	return 0;
2170 }
2171 
2172 static int amdgpu_discovery_set_mes_ip_blocks(struct amdgpu_device *adev)
2173 {
2174 	switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
2175 	case IP_VERSION(10, 1, 10):
2176 	case IP_VERSION(10, 1, 1):
2177 	case IP_VERSION(10, 1, 2):
2178 	case IP_VERSION(10, 1, 3):
2179 	case IP_VERSION(10, 1, 4):
2180 	case IP_VERSION(10, 3, 0):
2181 	case IP_VERSION(10, 3, 1):
2182 	case IP_VERSION(10, 3, 2):
2183 	case IP_VERSION(10, 3, 3):
2184 	case IP_VERSION(10, 3, 4):
2185 	case IP_VERSION(10, 3, 5):
2186 	case IP_VERSION(10, 3, 6):
2187 		if (amdgpu_mes) {
2188 			amdgpu_device_ip_block_add(adev, &mes_v10_1_ip_block);
2189 			adev->enable_mes = true;
2190 			if (amdgpu_mes_kiq)
2191 				adev->enable_mes_kiq = true;
2192 		}
2193 		break;
2194 	case IP_VERSION(11, 0, 0):
2195 	case IP_VERSION(11, 0, 1):
2196 	case IP_VERSION(11, 0, 2):
2197 	case IP_VERSION(11, 0, 3):
2198 	case IP_VERSION(11, 0, 4):
2199 	case IP_VERSION(11, 5, 0):
2200 	case IP_VERSION(11, 5, 1):
2201 		amdgpu_device_ip_block_add(adev, &mes_v11_0_ip_block);
2202 		adev->enable_mes = true;
2203 		adev->enable_mes_kiq = true;
2204 		break;
2205 	default:
2206 		break;
2207 	}
2208 	return 0;
2209 }
2210 
2211 static void amdgpu_discovery_init_soc_config(struct amdgpu_device *adev)
2212 {
2213 	switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
2214 	case IP_VERSION(9, 4, 3):
2215 		aqua_vanjaram_init_soc_config(adev);
2216 		break;
2217 	default:
2218 		break;
2219 	}
2220 }
2221 
2222 static int amdgpu_discovery_set_vpe_ip_blocks(struct amdgpu_device *adev)
2223 {
2224 	switch (amdgpu_ip_version(adev, VPE_HWIP, 0)) {
2225 	case IP_VERSION(6, 1, 0):
2226 	case IP_VERSION(6, 1, 1):
2227 		amdgpu_device_ip_block_add(adev, &vpe_v6_1_ip_block);
2228 		break;
2229 	default:
2230 		break;
2231 	}
2232 
2233 	return 0;
2234 }
2235 
2236 static int amdgpu_discovery_set_umsch_mm_ip_blocks(struct amdgpu_device *adev)
2237 {
2238 	switch (amdgpu_ip_version(adev, VCN_HWIP, 0)) {
2239 	case IP_VERSION(4, 0, 5):
2240 		if (amdgpu_umsch_mm & 0x1) {
2241 			amdgpu_device_ip_block_add(adev, &umsch_mm_v4_0_ip_block);
2242 			adev->enable_umsch_mm = true;
2243 		}
2244 		break;
2245 	default:
2246 		break;
2247 	}
2248 
2249 	return 0;
2250 }
2251 
2252 int amdgpu_discovery_set_ip_blocks(struct amdgpu_device *adev)
2253 {
2254 	int r;
2255 
2256 	switch (adev->asic_type) {
2257 	case CHIP_VEGA10:
2258 		vega10_reg_base_init(adev);
2259 		adev->sdma.num_instances = 2;
2260 		adev->gmc.num_umc = 4;
2261 		adev->ip_versions[MMHUB_HWIP][0] = IP_VERSION(9, 0, 0);
2262 		adev->ip_versions[ATHUB_HWIP][0] = IP_VERSION(9, 0, 0);
2263 		adev->ip_versions[OSSSYS_HWIP][0] = IP_VERSION(4, 0, 0);
2264 		adev->ip_versions[HDP_HWIP][0] = IP_VERSION(4, 0, 0);
2265 		adev->ip_versions[SDMA0_HWIP][0] = IP_VERSION(4, 0, 0);
2266 		adev->ip_versions[SDMA1_HWIP][0] = IP_VERSION(4, 0, 0);
2267 		adev->ip_versions[DF_HWIP][0] = IP_VERSION(2, 1, 0);
2268 		adev->ip_versions[NBIO_HWIP][0] = IP_VERSION(6, 1, 0);
2269 		adev->ip_versions[UMC_HWIP][0] = IP_VERSION(6, 0, 0);
2270 		adev->ip_versions[MP0_HWIP][0] = IP_VERSION(9, 0, 0);
2271 		adev->ip_versions[MP1_HWIP][0] = IP_VERSION(9, 0, 0);
2272 		adev->ip_versions[THM_HWIP][0] = IP_VERSION(9, 0, 0);
2273 		adev->ip_versions[SMUIO_HWIP][0] = IP_VERSION(9, 0, 0);
2274 		adev->ip_versions[GC_HWIP][0] = IP_VERSION(9, 0, 1);
2275 		adev->ip_versions[UVD_HWIP][0] = IP_VERSION(7, 0, 0);
2276 		adev->ip_versions[VCE_HWIP][0] = IP_VERSION(4, 0, 0);
2277 		adev->ip_versions[DCI_HWIP][0] = IP_VERSION(12, 0, 0);
2278 		break;
2279 	case CHIP_VEGA12:
2280 		vega10_reg_base_init(adev);
2281 		adev->sdma.num_instances = 2;
2282 		adev->gmc.num_umc = 4;
2283 		adev->ip_versions[MMHUB_HWIP][0] = IP_VERSION(9, 3, 0);
2284 		adev->ip_versions[ATHUB_HWIP][0] = IP_VERSION(9, 3, 0);
2285 		adev->ip_versions[OSSSYS_HWIP][0] = IP_VERSION(4, 0, 1);
2286 		adev->ip_versions[HDP_HWIP][0] = IP_VERSION(4, 0, 1);
2287 		adev->ip_versions[SDMA0_HWIP][0] = IP_VERSION(4, 0, 1);
2288 		adev->ip_versions[SDMA1_HWIP][0] = IP_VERSION(4, 0, 1);
2289 		adev->ip_versions[DF_HWIP][0] = IP_VERSION(2, 5, 0);
2290 		adev->ip_versions[NBIO_HWIP][0] = IP_VERSION(6, 2, 0);
2291 		adev->ip_versions[UMC_HWIP][0] = IP_VERSION(6, 1, 0);
2292 		adev->ip_versions[MP0_HWIP][0] = IP_VERSION(9, 0, 0);
2293 		adev->ip_versions[MP1_HWIP][0] = IP_VERSION(9, 0, 0);
2294 		adev->ip_versions[THM_HWIP][0] = IP_VERSION(9, 0, 0);
2295 		adev->ip_versions[SMUIO_HWIP][0] = IP_VERSION(9, 0, 1);
2296 		adev->ip_versions[GC_HWIP][0] = IP_VERSION(9, 2, 1);
2297 		adev->ip_versions[UVD_HWIP][0] = IP_VERSION(7, 0, 0);
2298 		adev->ip_versions[VCE_HWIP][0] = IP_VERSION(4, 0, 0);
2299 		adev->ip_versions[DCI_HWIP][0] = IP_VERSION(12, 0, 1);
2300 		break;
2301 	case CHIP_RAVEN:
2302 		vega10_reg_base_init(adev);
2303 		adev->sdma.num_instances = 1;
2304 		adev->vcn.num_vcn_inst = 1;
2305 		adev->gmc.num_umc = 2;
2306 		if (adev->apu_flags & AMD_APU_IS_RAVEN2) {
2307 			adev->ip_versions[MMHUB_HWIP][0] = IP_VERSION(9, 2, 0);
2308 			adev->ip_versions[ATHUB_HWIP][0] = IP_VERSION(9, 2, 0);
2309 			adev->ip_versions[OSSSYS_HWIP][0] = IP_VERSION(4, 1, 1);
2310 			adev->ip_versions[HDP_HWIP][0] = IP_VERSION(4, 1, 1);
2311 			adev->ip_versions[SDMA0_HWIP][0] = IP_VERSION(4, 1, 1);
2312 			adev->ip_versions[DF_HWIP][0] = IP_VERSION(2, 1, 1);
2313 			adev->ip_versions[NBIO_HWIP][0] = IP_VERSION(7, 0, 1);
2314 			adev->ip_versions[UMC_HWIP][0] = IP_VERSION(7, 5, 0);
2315 			adev->ip_versions[MP0_HWIP][0] = IP_VERSION(10, 0, 1);
2316 			adev->ip_versions[MP1_HWIP][0] = IP_VERSION(10, 0, 1);
2317 			adev->ip_versions[THM_HWIP][0] = IP_VERSION(10, 1, 0);
2318 			adev->ip_versions[SMUIO_HWIP][0] = IP_VERSION(10, 0, 1);
2319 			adev->ip_versions[GC_HWIP][0] = IP_VERSION(9, 2, 2);
2320 			adev->ip_versions[UVD_HWIP][0] = IP_VERSION(1, 0, 1);
2321 			adev->ip_versions[DCE_HWIP][0] = IP_VERSION(1, 0, 1);
2322 		} else {
2323 			adev->ip_versions[MMHUB_HWIP][0] = IP_VERSION(9, 1, 0);
2324 			adev->ip_versions[ATHUB_HWIP][0] = IP_VERSION(9, 1, 0);
2325 			adev->ip_versions[OSSSYS_HWIP][0] = IP_VERSION(4, 1, 0);
2326 			adev->ip_versions[HDP_HWIP][0] = IP_VERSION(4, 1, 0);
2327 			adev->ip_versions[SDMA0_HWIP][0] = IP_VERSION(4, 1, 0);
2328 			adev->ip_versions[DF_HWIP][0] = IP_VERSION(2, 1, 0);
2329 			adev->ip_versions[NBIO_HWIP][0] = IP_VERSION(7, 0, 0);
2330 			adev->ip_versions[UMC_HWIP][0] = IP_VERSION(7, 0, 0);
2331 			adev->ip_versions[MP0_HWIP][0] = IP_VERSION(10, 0, 0);
2332 			adev->ip_versions[MP1_HWIP][0] = IP_VERSION(10, 0, 0);
2333 			adev->ip_versions[THM_HWIP][0] = IP_VERSION(10, 0, 0);
2334 			adev->ip_versions[SMUIO_HWIP][0] = IP_VERSION(10, 0, 0);
2335 			adev->ip_versions[GC_HWIP][0] = IP_VERSION(9, 1, 0);
2336 			adev->ip_versions[UVD_HWIP][0] = IP_VERSION(1, 0, 0);
2337 			adev->ip_versions[DCE_HWIP][0] = IP_VERSION(1, 0, 0);
2338 		}
2339 		break;
2340 	case CHIP_VEGA20:
2341 		vega20_reg_base_init(adev);
2342 		adev->sdma.num_instances = 2;
2343 		adev->gmc.num_umc = 8;
2344 		adev->ip_versions[MMHUB_HWIP][0] = IP_VERSION(9, 4, 0);
2345 		adev->ip_versions[ATHUB_HWIP][0] = IP_VERSION(9, 4, 0);
2346 		adev->ip_versions[OSSSYS_HWIP][0] = IP_VERSION(4, 2, 0);
2347 		adev->ip_versions[HDP_HWIP][0] = IP_VERSION(4, 2, 0);
2348 		adev->ip_versions[SDMA0_HWIP][0] = IP_VERSION(4, 2, 0);
2349 		adev->ip_versions[SDMA1_HWIP][0] = IP_VERSION(4, 2, 0);
2350 		adev->ip_versions[DF_HWIP][0] = IP_VERSION(3, 6, 0);
2351 		adev->ip_versions[NBIO_HWIP][0] = IP_VERSION(7, 4, 0);
2352 		adev->ip_versions[UMC_HWIP][0] = IP_VERSION(6, 1, 1);
2353 		adev->ip_versions[MP0_HWIP][0] = IP_VERSION(11, 0, 2);
2354 		adev->ip_versions[MP1_HWIP][0] = IP_VERSION(11, 0, 2);
2355 		adev->ip_versions[THM_HWIP][0] = IP_VERSION(11, 0, 2);
2356 		adev->ip_versions[SMUIO_HWIP][0] = IP_VERSION(11, 0, 2);
2357 		adev->ip_versions[GC_HWIP][0] = IP_VERSION(9, 4, 0);
2358 		adev->ip_versions[UVD_HWIP][0] = IP_VERSION(7, 2, 0);
2359 		adev->ip_versions[UVD_HWIP][1] = IP_VERSION(7, 2, 0);
2360 		adev->ip_versions[VCE_HWIP][0] = IP_VERSION(4, 1, 0);
2361 		adev->ip_versions[DCI_HWIP][0] = IP_VERSION(12, 1, 0);
2362 		break;
2363 	case CHIP_ARCTURUS:
2364 		arct_reg_base_init(adev);
2365 		adev->sdma.num_instances = 8;
2366 		adev->vcn.num_vcn_inst = 2;
2367 		adev->gmc.num_umc = 8;
2368 		adev->ip_versions[MMHUB_HWIP][0] = IP_VERSION(9, 4, 1);
2369 		adev->ip_versions[ATHUB_HWIP][0] = IP_VERSION(9, 4, 1);
2370 		adev->ip_versions[OSSSYS_HWIP][0] = IP_VERSION(4, 2, 1);
2371 		adev->ip_versions[HDP_HWIP][0] = IP_VERSION(4, 2, 1);
2372 		adev->ip_versions[SDMA0_HWIP][0] = IP_VERSION(4, 2, 2);
2373 		adev->ip_versions[SDMA1_HWIP][0] = IP_VERSION(4, 2, 2);
2374 		adev->ip_versions[SDMA1_HWIP][1] = IP_VERSION(4, 2, 2);
2375 		adev->ip_versions[SDMA1_HWIP][2] = IP_VERSION(4, 2, 2);
2376 		adev->ip_versions[SDMA1_HWIP][3] = IP_VERSION(4, 2, 2);
2377 		adev->ip_versions[SDMA1_HWIP][4] = IP_VERSION(4, 2, 2);
2378 		adev->ip_versions[SDMA1_HWIP][5] = IP_VERSION(4, 2, 2);
2379 		adev->ip_versions[SDMA1_HWIP][6] = IP_VERSION(4, 2, 2);
2380 		adev->ip_versions[DF_HWIP][0] = IP_VERSION(3, 6, 1);
2381 		adev->ip_versions[NBIO_HWIP][0] = IP_VERSION(7, 4, 1);
2382 		adev->ip_versions[UMC_HWIP][0] = IP_VERSION(6, 1, 2);
2383 		adev->ip_versions[MP0_HWIP][0] = IP_VERSION(11, 0, 4);
2384 		adev->ip_versions[MP1_HWIP][0] = IP_VERSION(11, 0, 2);
2385 		adev->ip_versions[THM_HWIP][0] = IP_VERSION(11, 0, 3);
2386 		adev->ip_versions[SMUIO_HWIP][0] = IP_VERSION(11, 0, 3);
2387 		adev->ip_versions[GC_HWIP][0] = IP_VERSION(9, 4, 1);
2388 		adev->ip_versions[UVD_HWIP][0] = IP_VERSION(2, 5, 0);
2389 		adev->ip_versions[UVD_HWIP][1] = IP_VERSION(2, 5, 0);
2390 		break;
2391 	case CHIP_ALDEBARAN:
2392 		aldebaran_reg_base_init(adev);
2393 		adev->sdma.num_instances = 5;
2394 		adev->vcn.num_vcn_inst = 2;
2395 		adev->gmc.num_umc = 4;
2396 		adev->ip_versions[MMHUB_HWIP][0] = IP_VERSION(9, 4, 2);
2397 		adev->ip_versions[ATHUB_HWIP][0] = IP_VERSION(9, 4, 2);
2398 		adev->ip_versions[OSSSYS_HWIP][0] = IP_VERSION(4, 4, 0);
2399 		adev->ip_versions[HDP_HWIP][0] = IP_VERSION(4, 4, 0);
2400 		adev->ip_versions[SDMA0_HWIP][0] = IP_VERSION(4, 4, 0);
2401 		adev->ip_versions[SDMA0_HWIP][1] = IP_VERSION(4, 4, 0);
2402 		adev->ip_versions[SDMA0_HWIP][2] = IP_VERSION(4, 4, 0);
2403 		adev->ip_versions[SDMA0_HWIP][3] = IP_VERSION(4, 4, 0);
2404 		adev->ip_versions[SDMA0_HWIP][4] = IP_VERSION(4, 4, 0);
2405 		adev->ip_versions[DF_HWIP][0] = IP_VERSION(3, 6, 2);
2406 		adev->ip_versions[NBIO_HWIP][0] = IP_VERSION(7, 4, 4);
2407 		adev->ip_versions[UMC_HWIP][0] = IP_VERSION(6, 7, 0);
2408 		adev->ip_versions[MP0_HWIP][0] = IP_VERSION(13, 0, 2);
2409 		adev->ip_versions[MP1_HWIP][0] = IP_VERSION(13, 0, 2);
2410 		adev->ip_versions[THM_HWIP][0] = IP_VERSION(13, 0, 2);
2411 		adev->ip_versions[SMUIO_HWIP][0] = IP_VERSION(13, 0, 2);
2412 		adev->ip_versions[GC_HWIP][0] = IP_VERSION(9, 4, 2);
2413 		adev->ip_versions[UVD_HWIP][0] = IP_VERSION(2, 6, 0);
2414 		adev->ip_versions[UVD_HWIP][1] = IP_VERSION(2, 6, 0);
2415 		adev->ip_versions[XGMI_HWIP][0] = IP_VERSION(6, 1, 0);
2416 		break;
2417 	default:
2418 		r = amdgpu_discovery_reg_base_init(adev);
2419 		if (r)
2420 			return -EINVAL;
2421 
2422 		amdgpu_discovery_harvest_ip(adev);
2423 		amdgpu_discovery_get_gfx_info(adev);
2424 		amdgpu_discovery_get_mall_info(adev);
2425 		amdgpu_discovery_get_vcn_info(adev);
2426 		break;
2427 	}
2428 
2429 	amdgpu_discovery_init_soc_config(adev);
2430 	amdgpu_discovery_sysfs_init(adev);
2431 
2432 	switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
2433 	case IP_VERSION(9, 0, 1):
2434 	case IP_VERSION(9, 2, 1):
2435 	case IP_VERSION(9, 4, 0):
2436 	case IP_VERSION(9, 4, 1):
2437 	case IP_VERSION(9, 4, 2):
2438 	case IP_VERSION(9, 4, 3):
2439 		adev->family = AMDGPU_FAMILY_AI;
2440 		break;
2441 	case IP_VERSION(9, 1, 0):
2442 	case IP_VERSION(9, 2, 2):
2443 	case IP_VERSION(9, 3, 0):
2444 		adev->family = AMDGPU_FAMILY_RV;
2445 		break;
2446 	case IP_VERSION(10, 1, 10):
2447 	case IP_VERSION(10, 1, 1):
2448 	case IP_VERSION(10, 1, 2):
2449 	case IP_VERSION(10, 1, 3):
2450 	case IP_VERSION(10, 1, 4):
2451 	case IP_VERSION(10, 3, 0):
2452 	case IP_VERSION(10, 3, 2):
2453 	case IP_VERSION(10, 3, 4):
2454 	case IP_VERSION(10, 3, 5):
2455 		adev->family = AMDGPU_FAMILY_NV;
2456 		break;
2457 	case IP_VERSION(10, 3, 1):
2458 		adev->family = AMDGPU_FAMILY_VGH;
2459 		adev->apu_flags |= AMD_APU_IS_VANGOGH;
2460 		break;
2461 	case IP_VERSION(10, 3, 3):
2462 		adev->family = AMDGPU_FAMILY_YC;
2463 		break;
2464 	case IP_VERSION(10, 3, 6):
2465 		adev->family = AMDGPU_FAMILY_GC_10_3_6;
2466 		break;
2467 	case IP_VERSION(10, 3, 7):
2468 		adev->family = AMDGPU_FAMILY_GC_10_3_7;
2469 		break;
2470 	case IP_VERSION(11, 0, 0):
2471 	case IP_VERSION(11, 0, 2):
2472 	case IP_VERSION(11, 0, 3):
2473 		adev->family = AMDGPU_FAMILY_GC_11_0_0;
2474 		break;
2475 	case IP_VERSION(11, 0, 1):
2476 	case IP_VERSION(11, 0, 4):
2477 		adev->family = AMDGPU_FAMILY_GC_11_0_1;
2478 		break;
2479 	case IP_VERSION(11, 5, 0):
2480 	case IP_VERSION(11, 5, 1):
2481 		adev->family = AMDGPU_FAMILY_GC_11_5_0;
2482 		break;
2483 	default:
2484 		return -EINVAL;
2485 	}
2486 
2487 	switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
2488 	case IP_VERSION(9, 1, 0):
2489 	case IP_VERSION(9, 2, 2):
2490 	case IP_VERSION(9, 3, 0):
2491 	case IP_VERSION(10, 1, 3):
2492 	case IP_VERSION(10, 1, 4):
2493 	case IP_VERSION(10, 3, 1):
2494 	case IP_VERSION(10, 3, 3):
2495 	case IP_VERSION(10, 3, 6):
2496 	case IP_VERSION(10, 3, 7):
2497 	case IP_VERSION(11, 0, 1):
2498 	case IP_VERSION(11, 0, 4):
2499 	case IP_VERSION(11, 5, 0):
2500 	case IP_VERSION(11, 5, 1):
2501 		adev->flags |= AMD_IS_APU;
2502 		break;
2503 	default:
2504 		break;
2505 	}
2506 
2507 	if (amdgpu_ip_version(adev, XGMI_HWIP, 0) == IP_VERSION(4, 8, 0))
2508 		adev->gmc.xgmi.supported = true;
2509 
2510 	if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 3))
2511 		adev->ip_versions[XGMI_HWIP][0] = IP_VERSION(6, 4, 0);
2512 
2513 	/* set NBIO version */
2514 	switch (amdgpu_ip_version(adev, NBIO_HWIP, 0)) {
2515 	case IP_VERSION(6, 1, 0):
2516 	case IP_VERSION(6, 2, 0):
2517 		adev->nbio.funcs = &nbio_v6_1_funcs;
2518 		adev->nbio.hdp_flush_reg = &nbio_v6_1_hdp_flush_reg;
2519 		break;
2520 	case IP_VERSION(7, 0, 0):
2521 	case IP_VERSION(7, 0, 1):
2522 	case IP_VERSION(2, 5, 0):
2523 		adev->nbio.funcs = &nbio_v7_0_funcs;
2524 		adev->nbio.hdp_flush_reg = &nbio_v7_0_hdp_flush_reg;
2525 		break;
2526 	case IP_VERSION(7, 4, 0):
2527 	case IP_VERSION(7, 4, 1):
2528 	case IP_VERSION(7, 4, 4):
2529 		adev->nbio.funcs = &nbio_v7_4_funcs;
2530 		adev->nbio.hdp_flush_reg = &nbio_v7_4_hdp_flush_reg;
2531 		break;
2532 	case IP_VERSION(7, 9, 0):
2533 		adev->nbio.funcs = &nbio_v7_9_funcs;
2534 		adev->nbio.hdp_flush_reg = &nbio_v7_9_hdp_flush_reg;
2535 		break;
2536 	case IP_VERSION(7, 11, 0):
2537 	case IP_VERSION(7, 11, 1):
2538 		adev->nbio.funcs = &nbio_v7_11_funcs;
2539 		adev->nbio.hdp_flush_reg = &nbio_v7_11_hdp_flush_reg;
2540 		break;
2541 	case IP_VERSION(7, 2, 0):
2542 	case IP_VERSION(7, 2, 1):
2543 	case IP_VERSION(7, 3, 0):
2544 	case IP_VERSION(7, 5, 0):
2545 	case IP_VERSION(7, 5, 1):
2546 		adev->nbio.funcs = &nbio_v7_2_funcs;
2547 		adev->nbio.hdp_flush_reg = &nbio_v7_2_hdp_flush_reg;
2548 		break;
2549 	case IP_VERSION(2, 1, 1):
2550 	case IP_VERSION(2, 3, 0):
2551 	case IP_VERSION(2, 3, 1):
2552 	case IP_VERSION(2, 3, 2):
2553 	case IP_VERSION(3, 3, 0):
2554 	case IP_VERSION(3, 3, 1):
2555 	case IP_VERSION(3, 3, 2):
2556 	case IP_VERSION(3, 3, 3):
2557 		adev->nbio.funcs = &nbio_v2_3_funcs;
2558 		adev->nbio.hdp_flush_reg = &nbio_v2_3_hdp_flush_reg;
2559 		break;
2560 	case IP_VERSION(4, 3, 0):
2561 	case IP_VERSION(4, 3, 1):
2562 		if (amdgpu_sriov_vf(adev))
2563 			adev->nbio.funcs = &nbio_v4_3_sriov_funcs;
2564 		else
2565 			adev->nbio.funcs = &nbio_v4_3_funcs;
2566 		adev->nbio.hdp_flush_reg = &nbio_v4_3_hdp_flush_reg;
2567 		break;
2568 	case IP_VERSION(7, 7, 0):
2569 	case IP_VERSION(7, 7, 1):
2570 		adev->nbio.funcs = &nbio_v7_7_funcs;
2571 		adev->nbio.hdp_flush_reg = &nbio_v7_7_hdp_flush_reg;
2572 		break;
2573 	case IP_VERSION(6, 3, 1):
2574 		adev->nbio.funcs = &nbif_v6_3_1_funcs;
2575 		adev->nbio.hdp_flush_reg = &nbif_v6_3_1_hdp_flush_reg;
2576 		break;
2577 	default:
2578 		break;
2579 	}
2580 
2581 	switch (amdgpu_ip_version(adev, HDP_HWIP, 0)) {
2582 	case IP_VERSION(4, 0, 0):
2583 	case IP_VERSION(4, 0, 1):
2584 	case IP_VERSION(4, 1, 0):
2585 	case IP_VERSION(4, 1, 1):
2586 	case IP_VERSION(4, 1, 2):
2587 	case IP_VERSION(4, 2, 0):
2588 	case IP_VERSION(4, 2, 1):
2589 	case IP_VERSION(4, 4, 0):
2590 	case IP_VERSION(4, 4, 2):
2591 		adev->hdp.funcs = &hdp_v4_0_funcs;
2592 		break;
2593 	case IP_VERSION(5, 0, 0):
2594 	case IP_VERSION(5, 0, 1):
2595 	case IP_VERSION(5, 0, 2):
2596 	case IP_VERSION(5, 0, 3):
2597 	case IP_VERSION(5, 0, 4):
2598 	case IP_VERSION(5, 2, 0):
2599 		adev->hdp.funcs = &hdp_v5_0_funcs;
2600 		break;
2601 	case IP_VERSION(5, 2, 1):
2602 		adev->hdp.funcs = &hdp_v5_2_funcs;
2603 		break;
2604 	case IP_VERSION(6, 0, 0):
2605 	case IP_VERSION(6, 0, 1):
2606 	case IP_VERSION(6, 1, 0):
2607 		adev->hdp.funcs = &hdp_v6_0_funcs;
2608 		break;
2609 	case IP_VERSION(7, 0, 0):
2610 		adev->hdp.funcs = &hdp_v7_0_funcs;
2611 		break;
2612 	default:
2613 		break;
2614 	}
2615 
2616 	switch (amdgpu_ip_version(adev, DF_HWIP, 0)) {
2617 	case IP_VERSION(3, 6, 0):
2618 	case IP_VERSION(3, 6, 1):
2619 	case IP_VERSION(3, 6, 2):
2620 		adev->df.funcs = &df_v3_6_funcs;
2621 		break;
2622 	case IP_VERSION(2, 1, 0):
2623 	case IP_VERSION(2, 1, 1):
2624 	case IP_VERSION(2, 5, 0):
2625 	case IP_VERSION(3, 5, 1):
2626 	case IP_VERSION(3, 5, 2):
2627 		adev->df.funcs = &df_v1_7_funcs;
2628 		break;
2629 	case IP_VERSION(4, 3, 0):
2630 		adev->df.funcs = &df_v4_3_funcs;
2631 		break;
2632 	case IP_VERSION(4, 6, 2):
2633 		adev->df.funcs = &df_v4_6_2_funcs;
2634 		break;
2635 	default:
2636 		break;
2637 	}
2638 
2639 	switch (amdgpu_ip_version(adev, SMUIO_HWIP, 0)) {
2640 	case IP_VERSION(9, 0, 0):
2641 	case IP_VERSION(9, 0, 1):
2642 	case IP_VERSION(10, 0, 0):
2643 	case IP_VERSION(10, 0, 1):
2644 	case IP_VERSION(10, 0, 2):
2645 		adev->smuio.funcs = &smuio_v9_0_funcs;
2646 		break;
2647 	case IP_VERSION(11, 0, 0):
2648 	case IP_VERSION(11, 0, 2):
2649 	case IP_VERSION(11, 0, 3):
2650 	case IP_VERSION(11, 0, 4):
2651 	case IP_VERSION(11, 0, 7):
2652 	case IP_VERSION(11, 0, 8):
2653 		adev->smuio.funcs = &smuio_v11_0_funcs;
2654 		break;
2655 	case IP_VERSION(11, 0, 6):
2656 	case IP_VERSION(11, 0, 10):
2657 	case IP_VERSION(11, 0, 11):
2658 	case IP_VERSION(11, 5, 0):
2659 	case IP_VERSION(13, 0, 1):
2660 	case IP_VERSION(13, 0, 9):
2661 	case IP_VERSION(13, 0, 10):
2662 		adev->smuio.funcs = &smuio_v11_0_6_funcs;
2663 		break;
2664 	case IP_VERSION(13, 0, 2):
2665 		adev->smuio.funcs = &smuio_v13_0_funcs;
2666 		break;
2667 	case IP_VERSION(13, 0, 3):
2668 		adev->smuio.funcs = &smuio_v13_0_3_funcs;
2669 		if (adev->smuio.funcs->get_pkg_type(adev) == AMDGPU_PKG_TYPE_APU) {
2670 			adev->flags |= AMD_IS_APU;
2671 		}
2672 		break;
2673 	case IP_VERSION(13, 0, 6):
2674 	case IP_VERSION(13, 0, 8):
2675 	case IP_VERSION(14, 0, 0):
2676 	case IP_VERSION(14, 0, 1):
2677 		adev->smuio.funcs = &smuio_v13_0_6_funcs;
2678 		break;
2679 	default:
2680 		break;
2681 	}
2682 
2683 	switch (amdgpu_ip_version(adev, LSDMA_HWIP, 0)) {
2684 	case IP_VERSION(6, 0, 0):
2685 	case IP_VERSION(6, 0, 1):
2686 	case IP_VERSION(6, 0, 2):
2687 	case IP_VERSION(6, 0, 3):
2688 		adev->lsdma.funcs = &lsdma_v6_0_funcs;
2689 		break;
2690 	case IP_VERSION(7, 0, 0):
2691 	case IP_VERSION(7, 0, 1):
2692 		adev->lsdma.funcs = &lsdma_v7_0_funcs;
2693 		break;
2694 	default:
2695 		break;
2696 	}
2697 
2698 	r = amdgpu_discovery_set_common_ip_blocks(adev);
2699 	if (r)
2700 		return r;
2701 
2702 	r = amdgpu_discovery_set_gmc_ip_blocks(adev);
2703 	if (r)
2704 		return r;
2705 
2706 	/* For SR-IOV, PSP needs to be initialized before IH */
2707 	if (amdgpu_sriov_vf(adev)) {
2708 		r = amdgpu_discovery_set_psp_ip_blocks(adev);
2709 		if (r)
2710 			return r;
2711 		r = amdgpu_discovery_set_ih_ip_blocks(adev);
2712 		if (r)
2713 			return r;
2714 	} else {
2715 		r = amdgpu_discovery_set_ih_ip_blocks(adev);
2716 		if (r)
2717 			return r;
2718 
2719 		if (likely(adev->firmware.load_type == AMDGPU_FW_LOAD_PSP)) {
2720 			r = amdgpu_discovery_set_psp_ip_blocks(adev);
2721 			if (r)
2722 				return r;
2723 		}
2724 	}
2725 
2726 	if (likely(adev->firmware.load_type == AMDGPU_FW_LOAD_PSP)) {
2727 		r = amdgpu_discovery_set_smu_ip_blocks(adev);
2728 		if (r)
2729 			return r;
2730 	}
2731 
2732 	r = amdgpu_discovery_set_display_ip_blocks(adev);
2733 	if (r)
2734 		return r;
2735 
2736 	r = amdgpu_discovery_set_gc_ip_blocks(adev);
2737 	if (r)
2738 		return r;
2739 
2740 	r = amdgpu_discovery_set_sdma_ip_blocks(adev);
2741 	if (r)
2742 		return r;
2743 
2744 	if ((adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT &&
2745 	     !amdgpu_sriov_vf(adev)) ||
2746 	    (adev->firmware.load_type == AMDGPU_FW_LOAD_RLC_BACKDOOR_AUTO && amdgpu_dpm == 1)) {
2747 		r = amdgpu_discovery_set_smu_ip_blocks(adev);
2748 		if (r)
2749 			return r;
2750 	}
2751 
2752 	r = amdgpu_discovery_set_mm_ip_blocks(adev);
2753 	if (r)
2754 		return r;
2755 
2756 	r = amdgpu_discovery_set_mes_ip_blocks(adev);
2757 	if (r)
2758 		return r;
2759 
2760 	r = amdgpu_discovery_set_vpe_ip_blocks(adev);
2761 	if (r)
2762 		return r;
2763 
2764 	r = amdgpu_discovery_set_umsch_mm_ip_blocks(adev);
2765 	if (r)
2766 		return r;
2767 
2768 	return 0;
2769 }
2770 
2771