xref: /linux/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c (revision fd7d598270724cc787982ea48bbe17ad383a8b7f)
1 /*
2  * Copyright 2018 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  */
23 
24 #include <linux/firmware.h>
25 
26 #include "amdgpu.h"
27 #include "amdgpu_discovery.h"
28 #include "soc15_hw_ip.h"
29 #include "discovery.h"
30 
31 #include "soc15.h"
32 #include "gfx_v9_0.h"
33 #include "gfx_v9_4_3.h"
34 #include "gmc_v9_0.h"
35 #include "df_v1_7.h"
36 #include "df_v3_6.h"
37 #include "df_v4_3.h"
38 #include "df_v4_6_2.h"
39 #include "nbio_v6_1.h"
40 #include "nbio_v7_0.h"
41 #include "nbio_v7_4.h"
42 #include "nbio_v7_9.h"
43 #include "nbio_v7_11.h"
44 #include "hdp_v4_0.h"
45 #include "vega10_ih.h"
46 #include "vega20_ih.h"
47 #include "sdma_v4_0.h"
48 #include "sdma_v4_4_2.h"
49 #include "uvd_v7_0.h"
50 #include "vce_v4_0.h"
51 #include "vcn_v1_0.h"
52 #include "vcn_v2_5.h"
53 #include "jpeg_v2_5.h"
54 #include "smuio_v9_0.h"
55 #include "gmc_v10_0.h"
56 #include "gmc_v11_0.h"
57 #include "gfxhub_v2_0.h"
58 #include "mmhub_v2_0.h"
59 #include "nbio_v2_3.h"
60 #include "nbio_v4_3.h"
61 #include "nbio_v7_2.h"
62 #include "nbio_v7_7.h"
63 #include "hdp_v5_0.h"
64 #include "hdp_v5_2.h"
65 #include "hdp_v6_0.h"
66 #include "nv.h"
67 #include "soc21.h"
68 #include "navi10_ih.h"
69 #include "ih_v6_0.h"
70 #include "ih_v6_1.h"
71 #include "gfx_v10_0.h"
72 #include "gfx_v11_0.h"
73 #include "sdma_v5_0.h"
74 #include "sdma_v5_2.h"
75 #include "sdma_v6_0.h"
76 #include "lsdma_v6_0.h"
77 #include "vcn_v2_0.h"
78 #include "jpeg_v2_0.h"
79 #include "vcn_v3_0.h"
80 #include "jpeg_v3_0.h"
81 #include "vcn_v4_0.h"
82 #include "jpeg_v4_0.h"
83 #include "vcn_v4_0_3.h"
84 #include "jpeg_v4_0_3.h"
85 #include "vcn_v4_0_5.h"
86 #include "jpeg_v4_0_5.h"
87 #include "amdgpu_vkms.h"
88 #include "mes_v10_1.h"
89 #include "mes_v11_0.h"
90 #include "smuio_v11_0.h"
91 #include "smuio_v11_0_6.h"
92 #include "smuio_v13_0.h"
93 #include "smuio_v13_0_3.h"
94 #include "smuio_v13_0_6.h"
95 
96 #include "amdgpu_vpe.h"
97 
98 #define FIRMWARE_IP_DISCOVERY "amdgpu/ip_discovery.bin"
99 MODULE_FIRMWARE(FIRMWARE_IP_DISCOVERY);
100 
101 #define mmRCC_CONFIG_MEMSIZE	0xde3
102 #define mmMM_INDEX		0x0
103 #define mmMM_INDEX_HI		0x6
104 #define mmMM_DATA		0x1
105 
106 static const char *hw_id_names[HW_ID_MAX] = {
107 	[MP1_HWID]		= "MP1",
108 	[MP2_HWID]		= "MP2",
109 	[THM_HWID]		= "THM",
110 	[SMUIO_HWID]		= "SMUIO",
111 	[FUSE_HWID]		= "FUSE",
112 	[CLKA_HWID]		= "CLKA",
113 	[PWR_HWID]		= "PWR",
114 	[GC_HWID]		= "GC",
115 	[UVD_HWID]		= "UVD",
116 	[AUDIO_AZ_HWID]		= "AUDIO_AZ",
117 	[ACP_HWID]		= "ACP",
118 	[DCI_HWID]		= "DCI",
119 	[DMU_HWID]		= "DMU",
120 	[DCO_HWID]		= "DCO",
121 	[DIO_HWID]		= "DIO",
122 	[XDMA_HWID]		= "XDMA",
123 	[DCEAZ_HWID]		= "DCEAZ",
124 	[DAZ_HWID]		= "DAZ",
125 	[SDPMUX_HWID]		= "SDPMUX",
126 	[NTB_HWID]		= "NTB",
127 	[IOHC_HWID]		= "IOHC",
128 	[L2IMU_HWID]		= "L2IMU",
129 	[VCE_HWID]		= "VCE",
130 	[MMHUB_HWID]		= "MMHUB",
131 	[ATHUB_HWID]		= "ATHUB",
132 	[DBGU_NBIO_HWID]	= "DBGU_NBIO",
133 	[DFX_HWID]		= "DFX",
134 	[DBGU0_HWID]		= "DBGU0",
135 	[DBGU1_HWID]		= "DBGU1",
136 	[OSSSYS_HWID]		= "OSSSYS",
137 	[HDP_HWID]		= "HDP",
138 	[SDMA0_HWID]		= "SDMA0",
139 	[SDMA1_HWID]		= "SDMA1",
140 	[SDMA2_HWID]		= "SDMA2",
141 	[SDMA3_HWID]		= "SDMA3",
142 	[LSDMA_HWID]		= "LSDMA",
143 	[ISP_HWID]		= "ISP",
144 	[DBGU_IO_HWID]		= "DBGU_IO",
145 	[DF_HWID]		= "DF",
146 	[CLKB_HWID]		= "CLKB",
147 	[FCH_HWID]		= "FCH",
148 	[DFX_DAP_HWID]		= "DFX_DAP",
149 	[L1IMU_PCIE_HWID]	= "L1IMU_PCIE",
150 	[L1IMU_NBIF_HWID]	= "L1IMU_NBIF",
151 	[L1IMU_IOAGR_HWID]	= "L1IMU_IOAGR",
152 	[L1IMU3_HWID]		= "L1IMU3",
153 	[L1IMU4_HWID]		= "L1IMU4",
154 	[L1IMU5_HWID]		= "L1IMU5",
155 	[L1IMU6_HWID]		= "L1IMU6",
156 	[L1IMU7_HWID]		= "L1IMU7",
157 	[L1IMU8_HWID]		= "L1IMU8",
158 	[L1IMU9_HWID]		= "L1IMU9",
159 	[L1IMU10_HWID]		= "L1IMU10",
160 	[L1IMU11_HWID]		= "L1IMU11",
161 	[L1IMU12_HWID]		= "L1IMU12",
162 	[L1IMU13_HWID]		= "L1IMU13",
163 	[L1IMU14_HWID]		= "L1IMU14",
164 	[L1IMU15_HWID]		= "L1IMU15",
165 	[WAFLC_HWID]		= "WAFLC",
166 	[FCH_USB_PD_HWID]	= "FCH_USB_PD",
167 	[PCIE_HWID]		= "PCIE",
168 	[PCS_HWID]		= "PCS",
169 	[DDCL_HWID]		= "DDCL",
170 	[SST_HWID]		= "SST",
171 	[IOAGR_HWID]		= "IOAGR",
172 	[NBIF_HWID]		= "NBIF",
173 	[IOAPIC_HWID]		= "IOAPIC",
174 	[SYSTEMHUB_HWID]	= "SYSTEMHUB",
175 	[NTBCCP_HWID]		= "NTBCCP",
176 	[UMC_HWID]		= "UMC",
177 	[SATA_HWID]		= "SATA",
178 	[USB_HWID]		= "USB",
179 	[CCXSEC_HWID]		= "CCXSEC",
180 	[XGMI_HWID]		= "XGMI",
181 	[XGBE_HWID]		= "XGBE",
182 	[MP0_HWID]		= "MP0",
183 	[VPE_HWID]		= "VPE",
184 };
185 
186 static int hw_id_map[MAX_HWIP] = {
187 	[GC_HWIP]	= GC_HWID,
188 	[HDP_HWIP]	= HDP_HWID,
189 	[SDMA0_HWIP]	= SDMA0_HWID,
190 	[SDMA1_HWIP]	= SDMA1_HWID,
191 	[SDMA2_HWIP]    = SDMA2_HWID,
192 	[SDMA3_HWIP]    = SDMA3_HWID,
193 	[LSDMA_HWIP]    = LSDMA_HWID,
194 	[MMHUB_HWIP]	= MMHUB_HWID,
195 	[ATHUB_HWIP]	= ATHUB_HWID,
196 	[NBIO_HWIP]	= NBIF_HWID,
197 	[MP0_HWIP]	= MP0_HWID,
198 	[MP1_HWIP]	= MP1_HWID,
199 	[UVD_HWIP]	= UVD_HWID,
200 	[VCE_HWIP]	= VCE_HWID,
201 	[DF_HWIP]	= DF_HWID,
202 	[DCE_HWIP]	= DMU_HWID,
203 	[OSSSYS_HWIP]	= OSSSYS_HWID,
204 	[SMUIO_HWIP]	= SMUIO_HWID,
205 	[PWR_HWIP]	= PWR_HWID,
206 	[NBIF_HWIP]	= NBIF_HWID,
207 	[THM_HWIP]	= THM_HWID,
208 	[CLK_HWIP]	= CLKA_HWID,
209 	[UMC_HWIP]	= UMC_HWID,
210 	[XGMI_HWIP]	= XGMI_HWID,
211 	[DCI_HWIP]	= DCI_HWID,
212 	[PCIE_HWIP]	= PCIE_HWID,
213 	[VPE_HWIP]	= VPE_HWID,
214 };
215 
216 static int amdgpu_discovery_read_binary_from_sysmem(struct amdgpu_device *adev, uint8_t *binary)
217 {
218 	u64 tmr_offset, tmr_size, pos;
219 	void *discv_regn;
220 	int ret;
221 
222 	ret = amdgpu_acpi_get_tmr_info(adev, &tmr_offset, &tmr_size);
223 	if (ret)
224 		return ret;
225 
226 	pos = tmr_offset + tmr_size - DISCOVERY_TMR_OFFSET;
227 
228 	/* This region is read-only and reserved from system use */
229 	discv_regn = memremap(pos, adev->mman.discovery_tmr_size, MEMREMAP_WC);
230 	if (discv_regn) {
231 		memcpy(binary, discv_regn, adev->mman.discovery_tmr_size);
232 		memunmap(discv_regn);
233 		return 0;
234 	}
235 
236 	return -ENOENT;
237 }
238 
239 static int amdgpu_discovery_read_binary_from_mem(struct amdgpu_device *adev,
240 						 uint8_t *binary)
241 {
242 	uint64_t vram_size = (uint64_t)RREG32(mmRCC_CONFIG_MEMSIZE) << 20;
243 	int ret = 0;
244 
245 	if (vram_size) {
246 		uint64_t pos = vram_size - DISCOVERY_TMR_OFFSET;
247 		amdgpu_device_vram_access(adev, pos, (uint32_t *)binary,
248 					  adev->mman.discovery_tmr_size, false);
249 	} else {
250 		ret = amdgpu_discovery_read_binary_from_sysmem(adev, binary);
251 	}
252 
253 	return ret;
254 }
255 
256 static int amdgpu_discovery_read_binary_from_file(struct amdgpu_device *adev, uint8_t *binary)
257 {
258 	const struct firmware *fw;
259 	const char *fw_name;
260 	int r;
261 
262 	switch (amdgpu_discovery) {
263 	case 2:
264 		fw_name = FIRMWARE_IP_DISCOVERY;
265 		break;
266 	default:
267 		dev_warn(adev->dev, "amdgpu_discovery is not set properly\n");
268 		return -EINVAL;
269 	}
270 
271 	r = request_firmware(&fw, fw_name, adev->dev);
272 	if (r) {
273 		dev_err(adev->dev, "can't load firmware \"%s\"\n",
274 			fw_name);
275 		return r;
276 	}
277 
278 	memcpy((u8 *)binary, (u8 *)fw->data, fw->size);
279 	release_firmware(fw);
280 
281 	return 0;
282 }
283 
284 static uint16_t amdgpu_discovery_calculate_checksum(uint8_t *data, uint32_t size)
285 {
286 	uint16_t checksum = 0;
287 	int i;
288 
289 	for (i = 0; i < size; i++)
290 		checksum += data[i];
291 
292 	return checksum;
293 }
294 
295 static inline bool amdgpu_discovery_verify_checksum(uint8_t *data, uint32_t size,
296 						    uint16_t expected)
297 {
298 	return !!(amdgpu_discovery_calculate_checksum(data, size) == expected);
299 }
300 
301 static inline bool amdgpu_discovery_verify_binary_signature(uint8_t *binary)
302 {
303 	struct binary_header *bhdr;
304 	bhdr = (struct binary_header *)binary;
305 
306 	return (le32_to_cpu(bhdr->binary_signature) == BINARY_SIGNATURE);
307 }
308 
309 static void amdgpu_discovery_harvest_config_quirk(struct amdgpu_device *adev)
310 {
311 	/*
312 	 * So far, apply this quirk only on those Navy Flounder boards which
313 	 * have a bad harvest table of VCN config.
314 	 */
315 	if ((amdgpu_ip_version(adev, UVD_HWIP, 1) == IP_VERSION(3, 0, 1)) &&
316 	    (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(10, 3, 2))) {
317 		switch (adev->pdev->revision) {
318 		case 0xC1:
319 		case 0xC2:
320 		case 0xC3:
321 		case 0xC5:
322 		case 0xC7:
323 		case 0xCF:
324 		case 0xDF:
325 			adev->vcn.harvest_config |= AMDGPU_VCN_HARVEST_VCN1;
326 			adev->vcn.inst_mask &= ~AMDGPU_VCN_HARVEST_VCN1;
327 			break;
328 		default:
329 			break;
330 		}
331 	}
332 }
333 
334 static int amdgpu_discovery_init(struct amdgpu_device *adev)
335 {
336 	struct table_info *info;
337 	struct binary_header *bhdr;
338 	uint16_t offset;
339 	uint16_t size;
340 	uint16_t checksum;
341 	int r;
342 
343 	adev->mman.discovery_tmr_size = DISCOVERY_TMR_SIZE;
344 	adev->mman.discovery_bin = kzalloc(adev->mman.discovery_tmr_size, GFP_KERNEL);
345 	if (!adev->mman.discovery_bin)
346 		return -ENOMEM;
347 
348 	/* Read from file if it is the preferred option */
349 	if (amdgpu_discovery == 2) {
350 		dev_info(adev->dev, "use ip discovery information from file");
351 		r = amdgpu_discovery_read_binary_from_file(adev, adev->mman.discovery_bin);
352 
353 		if (r) {
354 			dev_err(adev->dev, "failed to read ip discovery binary from file\n");
355 			r = -EINVAL;
356 			goto out;
357 		}
358 
359 	} else {
360 		r = amdgpu_discovery_read_binary_from_mem(
361 			adev, adev->mman.discovery_bin);
362 		if (r)
363 			goto out;
364 	}
365 
366 	/* check the ip discovery binary signature */
367 	if (!amdgpu_discovery_verify_binary_signature(adev->mman.discovery_bin)) {
368 		dev_err(adev->dev,
369 			"get invalid ip discovery binary signature\n");
370 		r = -EINVAL;
371 		goto out;
372 	}
373 
374 	bhdr = (struct binary_header *)adev->mman.discovery_bin;
375 
376 	offset = offsetof(struct binary_header, binary_checksum) +
377 		sizeof(bhdr->binary_checksum);
378 	size = le16_to_cpu(bhdr->binary_size) - offset;
379 	checksum = le16_to_cpu(bhdr->binary_checksum);
380 
381 	if (!amdgpu_discovery_verify_checksum(adev->mman.discovery_bin + offset,
382 					      size, checksum)) {
383 		dev_err(adev->dev, "invalid ip discovery binary checksum\n");
384 		r = -EINVAL;
385 		goto out;
386 	}
387 
388 	info = &bhdr->table_list[IP_DISCOVERY];
389 	offset = le16_to_cpu(info->offset);
390 	checksum = le16_to_cpu(info->checksum);
391 
392 	if (offset) {
393 		struct ip_discovery_header *ihdr =
394 			(struct ip_discovery_header *)(adev->mman.discovery_bin + offset);
395 		if (le32_to_cpu(ihdr->signature) != DISCOVERY_TABLE_SIGNATURE) {
396 			dev_err(adev->dev, "invalid ip discovery data table signature\n");
397 			r = -EINVAL;
398 			goto out;
399 		}
400 
401 		if (!amdgpu_discovery_verify_checksum(adev->mman.discovery_bin + offset,
402 						      le16_to_cpu(ihdr->size), checksum)) {
403 			dev_err(adev->dev, "invalid ip discovery data table checksum\n");
404 			r = -EINVAL;
405 			goto out;
406 		}
407 	}
408 
409 	info = &bhdr->table_list[GC];
410 	offset = le16_to_cpu(info->offset);
411 	checksum = le16_to_cpu(info->checksum);
412 
413 	if (offset) {
414 		struct gpu_info_header *ghdr =
415 			(struct gpu_info_header *)(adev->mman.discovery_bin + offset);
416 
417 		if (le32_to_cpu(ghdr->table_id) != GC_TABLE_ID) {
418 			dev_err(adev->dev, "invalid ip discovery gc table id\n");
419 			r = -EINVAL;
420 			goto out;
421 		}
422 
423 		if (!amdgpu_discovery_verify_checksum(adev->mman.discovery_bin + offset,
424 						      le32_to_cpu(ghdr->size), checksum)) {
425 			dev_err(adev->dev, "invalid gc data table checksum\n");
426 			r = -EINVAL;
427 			goto out;
428 		}
429 	}
430 
431 	info = &bhdr->table_list[HARVEST_INFO];
432 	offset = le16_to_cpu(info->offset);
433 	checksum = le16_to_cpu(info->checksum);
434 
435 	if (offset) {
436 		struct harvest_info_header *hhdr =
437 			(struct harvest_info_header *)(adev->mman.discovery_bin + offset);
438 
439 		if (le32_to_cpu(hhdr->signature) != HARVEST_TABLE_SIGNATURE) {
440 			dev_err(adev->dev, "invalid ip discovery harvest table signature\n");
441 			r = -EINVAL;
442 			goto out;
443 		}
444 
445 		if (!amdgpu_discovery_verify_checksum(adev->mman.discovery_bin + offset,
446 						      sizeof(struct harvest_table), checksum)) {
447 			dev_err(adev->dev, "invalid harvest data table checksum\n");
448 			r = -EINVAL;
449 			goto out;
450 		}
451 	}
452 
453 	info = &bhdr->table_list[VCN_INFO];
454 	offset = le16_to_cpu(info->offset);
455 	checksum = le16_to_cpu(info->checksum);
456 
457 	if (offset) {
458 		struct vcn_info_header *vhdr =
459 			(struct vcn_info_header *)(adev->mman.discovery_bin + offset);
460 
461 		if (le32_to_cpu(vhdr->table_id) != VCN_INFO_TABLE_ID) {
462 			dev_err(adev->dev, "invalid ip discovery vcn table id\n");
463 			r = -EINVAL;
464 			goto out;
465 		}
466 
467 		if (!amdgpu_discovery_verify_checksum(adev->mman.discovery_bin + offset,
468 						      le32_to_cpu(vhdr->size_bytes), checksum)) {
469 			dev_err(adev->dev, "invalid vcn data table checksum\n");
470 			r = -EINVAL;
471 			goto out;
472 		}
473 	}
474 
475 	info = &bhdr->table_list[MALL_INFO];
476 	offset = le16_to_cpu(info->offset);
477 	checksum = le16_to_cpu(info->checksum);
478 
479 	if (0 && offset) {
480 		struct mall_info_header *mhdr =
481 			(struct mall_info_header *)(adev->mman.discovery_bin + offset);
482 
483 		if (le32_to_cpu(mhdr->table_id) != MALL_INFO_TABLE_ID) {
484 			dev_err(adev->dev, "invalid ip discovery mall table id\n");
485 			r = -EINVAL;
486 			goto out;
487 		}
488 
489 		if (!amdgpu_discovery_verify_checksum(adev->mman.discovery_bin + offset,
490 						      le32_to_cpu(mhdr->size_bytes), checksum)) {
491 			dev_err(adev->dev, "invalid mall data table checksum\n");
492 			r = -EINVAL;
493 			goto out;
494 		}
495 	}
496 
497 	return 0;
498 
499 out:
500 	kfree(adev->mman.discovery_bin);
501 	adev->mman.discovery_bin = NULL;
502 
503 	return r;
504 }
505 
506 static void amdgpu_discovery_sysfs_fini(struct amdgpu_device *adev);
507 
508 void amdgpu_discovery_fini(struct amdgpu_device *adev)
509 {
510 	amdgpu_discovery_sysfs_fini(adev);
511 	kfree(adev->mman.discovery_bin);
512 	adev->mman.discovery_bin = NULL;
513 }
514 
515 static int amdgpu_discovery_validate_ip(const struct ip_v4 *ip)
516 {
517 	if (ip->instance_number >= HWIP_MAX_INSTANCE) {
518 		DRM_ERROR("Unexpected instance_number (%d) from ip discovery blob\n",
519 			  ip->instance_number);
520 		return -EINVAL;
521 	}
522 	if (le16_to_cpu(ip->hw_id) >= HW_ID_MAX) {
523 		DRM_ERROR("Unexpected hw_id (%d) from ip discovery blob\n",
524 			  le16_to_cpu(ip->hw_id));
525 		return -EINVAL;
526 	}
527 
528 	return 0;
529 }
530 
531 static void amdgpu_discovery_read_harvest_bit_per_ip(struct amdgpu_device *adev,
532 						uint32_t *vcn_harvest_count)
533 {
534 	struct binary_header *bhdr;
535 	struct ip_discovery_header *ihdr;
536 	struct die_header *dhdr;
537 	struct ip_v4 *ip;
538 	uint16_t die_offset, ip_offset, num_dies, num_ips;
539 	int i, j;
540 
541 	bhdr = (struct binary_header *)adev->mman.discovery_bin;
542 	ihdr = (struct ip_discovery_header *)(adev->mman.discovery_bin +
543 			le16_to_cpu(bhdr->table_list[IP_DISCOVERY].offset));
544 	num_dies = le16_to_cpu(ihdr->num_dies);
545 
546 	/* scan harvest bit of all IP data structures */
547 	for (i = 0; i < num_dies; i++) {
548 		die_offset = le16_to_cpu(ihdr->die_info[i].die_offset);
549 		dhdr = (struct die_header *)(adev->mman.discovery_bin + die_offset);
550 		num_ips = le16_to_cpu(dhdr->num_ips);
551 		ip_offset = die_offset + sizeof(*dhdr);
552 
553 		for (j = 0; j < num_ips; j++) {
554 			ip = (struct ip_v4 *)(adev->mman.discovery_bin + ip_offset);
555 
556 			if (amdgpu_discovery_validate_ip(ip))
557 				goto next_ip;
558 
559 			if (le16_to_cpu(ip->variant) == 1) {
560 				switch (le16_to_cpu(ip->hw_id)) {
561 				case VCN_HWID:
562 					(*vcn_harvest_count)++;
563 					if (ip->instance_number == 0) {
564 						adev->vcn.harvest_config |= AMDGPU_VCN_HARVEST_VCN0;
565 						adev->vcn.inst_mask &=
566 							~AMDGPU_VCN_HARVEST_VCN0;
567 						adev->jpeg.inst_mask &=
568 							~AMDGPU_VCN_HARVEST_VCN0;
569 					} else {
570 						adev->vcn.harvest_config |= AMDGPU_VCN_HARVEST_VCN1;
571 						adev->vcn.inst_mask &=
572 							~AMDGPU_VCN_HARVEST_VCN1;
573 						adev->jpeg.inst_mask &=
574 							~AMDGPU_VCN_HARVEST_VCN1;
575 					}
576 					break;
577 				case DMU_HWID:
578 					adev->harvest_ip_mask |= AMD_HARVEST_IP_DMU_MASK;
579 					break;
580 				default:
581 					break;
582 				}
583 			}
584 next_ip:
585 			if (ihdr->base_addr_64_bit)
586 				ip_offset += struct_size(ip, base_address_64, ip->num_base_address);
587 			else
588 				ip_offset += struct_size(ip, base_address, ip->num_base_address);
589 		}
590 	}
591 }
592 
593 static void amdgpu_discovery_read_from_harvest_table(struct amdgpu_device *adev,
594 						     uint32_t *vcn_harvest_count,
595 						     uint32_t *umc_harvest_count)
596 {
597 	struct binary_header *bhdr;
598 	struct harvest_table *harvest_info;
599 	u16 offset;
600 	int i;
601 	uint32_t umc_harvest_config = 0;
602 
603 	bhdr = (struct binary_header *)adev->mman.discovery_bin;
604 	offset = le16_to_cpu(bhdr->table_list[HARVEST_INFO].offset);
605 
606 	if (!offset) {
607 		dev_err(adev->dev, "invalid harvest table offset\n");
608 		return;
609 	}
610 
611 	harvest_info = (struct harvest_table *)(adev->mman.discovery_bin + offset);
612 
613 	for (i = 0; i < 32; i++) {
614 		if (le16_to_cpu(harvest_info->list[i].hw_id) == 0)
615 			break;
616 
617 		switch (le16_to_cpu(harvest_info->list[i].hw_id)) {
618 		case VCN_HWID:
619 			(*vcn_harvest_count)++;
620 			adev->vcn.harvest_config |=
621 				(1 << harvest_info->list[i].number_instance);
622 			adev->jpeg.harvest_config |=
623 				(1 << harvest_info->list[i].number_instance);
624 
625 			adev->vcn.inst_mask &=
626 				~(1U << harvest_info->list[i].number_instance);
627 			adev->jpeg.inst_mask &=
628 				~(1U << harvest_info->list[i].number_instance);
629 			break;
630 		case DMU_HWID:
631 			adev->harvest_ip_mask |= AMD_HARVEST_IP_DMU_MASK;
632 			break;
633 		case UMC_HWID:
634 			umc_harvest_config |=
635 				1 << (le16_to_cpu(harvest_info->list[i].number_instance));
636 			(*umc_harvest_count)++;
637 			break;
638 		case GC_HWID:
639 			adev->gfx.xcc_mask &=
640 				~(1U << harvest_info->list[i].number_instance);
641 			break;
642 		case SDMA0_HWID:
643 			adev->sdma.sdma_mask &=
644 				~(1U << harvest_info->list[i].number_instance);
645 			break;
646 		default:
647 			break;
648 		}
649 	}
650 
651 	adev->umc.active_mask = ((1 << adev->umc.node_inst_num) - 1) &
652 				~umc_harvest_config;
653 }
654 
655 /* ================================================== */
656 
657 struct ip_hw_instance {
658 	struct kobject kobj; /* ip_discovery/die/#die/#hw_id/#instance/<attrs...> */
659 
660 	int hw_id;
661 	u8  num_instance;
662 	u8  major, minor, revision;
663 	u8  harvest;
664 
665 	int num_base_addresses;
666 	u32 base_addr[] __counted_by(num_base_addresses);
667 };
668 
669 struct ip_hw_id {
670 	struct kset hw_id_kset;  /* ip_discovery/die/#die/#hw_id/, contains ip_hw_instance */
671 	int hw_id;
672 };
673 
674 struct ip_die_entry {
675 	struct kset ip_kset;     /* ip_discovery/die/#die/, contains ip_hw_id  */
676 	u16 num_ips;
677 };
678 
679 /* -------------------------------------------------- */
680 
681 struct ip_hw_instance_attr {
682 	struct attribute attr;
683 	ssize_t (*show)(struct ip_hw_instance *ip_hw_instance, char *buf);
684 };
685 
686 static ssize_t hw_id_show(struct ip_hw_instance *ip_hw_instance, char *buf)
687 {
688 	return sysfs_emit(buf, "%d\n", ip_hw_instance->hw_id);
689 }
690 
691 static ssize_t num_instance_show(struct ip_hw_instance *ip_hw_instance, char *buf)
692 {
693 	return sysfs_emit(buf, "%d\n", ip_hw_instance->num_instance);
694 }
695 
696 static ssize_t major_show(struct ip_hw_instance *ip_hw_instance, char *buf)
697 {
698 	return sysfs_emit(buf, "%d\n", ip_hw_instance->major);
699 }
700 
701 static ssize_t minor_show(struct ip_hw_instance *ip_hw_instance, char *buf)
702 {
703 	return sysfs_emit(buf, "%d\n", ip_hw_instance->minor);
704 }
705 
706 static ssize_t revision_show(struct ip_hw_instance *ip_hw_instance, char *buf)
707 {
708 	return sysfs_emit(buf, "%d\n", ip_hw_instance->revision);
709 }
710 
711 static ssize_t harvest_show(struct ip_hw_instance *ip_hw_instance, char *buf)
712 {
713 	return sysfs_emit(buf, "0x%01X\n", ip_hw_instance->harvest);
714 }
715 
716 static ssize_t num_base_addresses_show(struct ip_hw_instance *ip_hw_instance, char *buf)
717 {
718 	return sysfs_emit(buf, "%d\n", ip_hw_instance->num_base_addresses);
719 }
720 
721 static ssize_t base_addr_show(struct ip_hw_instance *ip_hw_instance, char *buf)
722 {
723 	ssize_t res, at;
724 	int ii;
725 
726 	for (res = at = ii = 0; ii < ip_hw_instance->num_base_addresses; ii++) {
727 		/* Here we satisfy the condition that, at + size <= PAGE_SIZE.
728 		 */
729 		if (at + 12 > PAGE_SIZE)
730 			break;
731 		res = sysfs_emit_at(buf, at, "0x%08X\n",
732 				    ip_hw_instance->base_addr[ii]);
733 		if (res <= 0)
734 			break;
735 		at += res;
736 	}
737 
738 	return res < 0 ? res : at;
739 }
740 
741 static struct ip_hw_instance_attr ip_hw_attr[] = {
742 	__ATTR_RO(hw_id),
743 	__ATTR_RO(num_instance),
744 	__ATTR_RO(major),
745 	__ATTR_RO(minor),
746 	__ATTR_RO(revision),
747 	__ATTR_RO(harvest),
748 	__ATTR_RO(num_base_addresses),
749 	__ATTR_RO(base_addr),
750 };
751 
752 static struct attribute *ip_hw_instance_attrs[ARRAY_SIZE(ip_hw_attr) + 1];
753 ATTRIBUTE_GROUPS(ip_hw_instance);
754 
755 #define to_ip_hw_instance(x) container_of(x, struct ip_hw_instance, kobj)
756 #define to_ip_hw_instance_attr(x) container_of(x, struct ip_hw_instance_attr, attr)
757 
758 static ssize_t ip_hw_instance_attr_show(struct kobject *kobj,
759 					struct attribute *attr,
760 					char *buf)
761 {
762 	struct ip_hw_instance *ip_hw_instance = to_ip_hw_instance(kobj);
763 	struct ip_hw_instance_attr *ip_hw_attr = to_ip_hw_instance_attr(attr);
764 
765 	if (!ip_hw_attr->show)
766 		return -EIO;
767 
768 	return ip_hw_attr->show(ip_hw_instance, buf);
769 }
770 
771 static const struct sysfs_ops ip_hw_instance_sysfs_ops = {
772 	.show = ip_hw_instance_attr_show,
773 };
774 
775 static void ip_hw_instance_release(struct kobject *kobj)
776 {
777 	struct ip_hw_instance *ip_hw_instance = to_ip_hw_instance(kobj);
778 
779 	kfree(ip_hw_instance);
780 }
781 
782 static const struct kobj_type ip_hw_instance_ktype = {
783 	.release = ip_hw_instance_release,
784 	.sysfs_ops = &ip_hw_instance_sysfs_ops,
785 	.default_groups = ip_hw_instance_groups,
786 };
787 
788 /* -------------------------------------------------- */
789 
790 #define to_ip_hw_id(x)  container_of(to_kset(x), struct ip_hw_id, hw_id_kset)
791 
792 static void ip_hw_id_release(struct kobject *kobj)
793 {
794 	struct ip_hw_id *ip_hw_id = to_ip_hw_id(kobj);
795 
796 	if (!list_empty(&ip_hw_id->hw_id_kset.list))
797 		DRM_ERROR("ip_hw_id->hw_id_kset is not empty");
798 	kfree(ip_hw_id);
799 }
800 
801 static const struct kobj_type ip_hw_id_ktype = {
802 	.release = ip_hw_id_release,
803 	.sysfs_ops = &kobj_sysfs_ops,
804 };
805 
806 /* -------------------------------------------------- */
807 
808 static void die_kobj_release(struct kobject *kobj);
809 static void ip_disc_release(struct kobject *kobj);
810 
811 struct ip_die_entry_attribute {
812 	struct attribute attr;
813 	ssize_t (*show)(struct ip_die_entry *ip_die_entry, char *buf);
814 };
815 
816 #define to_ip_die_entry_attr(x)  container_of(x, struct ip_die_entry_attribute, attr)
817 
818 static ssize_t num_ips_show(struct ip_die_entry *ip_die_entry, char *buf)
819 {
820 	return sysfs_emit(buf, "%d\n", ip_die_entry->num_ips);
821 }
822 
823 /* If there are more ip_die_entry attrs, other than the number of IPs,
824  * we can make this intro an array of attrs, and then initialize
825  * ip_die_entry_attrs in a loop.
826  */
827 static struct ip_die_entry_attribute num_ips_attr =
828 	__ATTR_RO(num_ips);
829 
830 static struct attribute *ip_die_entry_attrs[] = {
831 	&num_ips_attr.attr,
832 	NULL,
833 };
834 ATTRIBUTE_GROUPS(ip_die_entry); /* ip_die_entry_groups */
835 
836 #define to_ip_die_entry(x) container_of(to_kset(x), struct ip_die_entry, ip_kset)
837 
838 static ssize_t ip_die_entry_attr_show(struct kobject *kobj,
839 				      struct attribute *attr,
840 				      char *buf)
841 {
842 	struct ip_die_entry_attribute *ip_die_entry_attr = to_ip_die_entry_attr(attr);
843 	struct ip_die_entry *ip_die_entry = to_ip_die_entry(kobj);
844 
845 	if (!ip_die_entry_attr->show)
846 		return -EIO;
847 
848 	return ip_die_entry_attr->show(ip_die_entry, buf);
849 }
850 
851 static void ip_die_entry_release(struct kobject *kobj)
852 {
853 	struct ip_die_entry *ip_die_entry = to_ip_die_entry(kobj);
854 
855 	if (!list_empty(&ip_die_entry->ip_kset.list))
856 		DRM_ERROR("ip_die_entry->ip_kset is not empty");
857 	kfree(ip_die_entry);
858 }
859 
860 static const struct sysfs_ops ip_die_entry_sysfs_ops = {
861 	.show = ip_die_entry_attr_show,
862 };
863 
864 static const struct kobj_type ip_die_entry_ktype = {
865 	.release = ip_die_entry_release,
866 	.sysfs_ops = &ip_die_entry_sysfs_ops,
867 	.default_groups = ip_die_entry_groups,
868 };
869 
870 static const struct kobj_type die_kobj_ktype = {
871 	.release = die_kobj_release,
872 	.sysfs_ops = &kobj_sysfs_ops,
873 };
874 
875 static const struct kobj_type ip_discovery_ktype = {
876 	.release = ip_disc_release,
877 	.sysfs_ops = &kobj_sysfs_ops,
878 };
879 
880 struct ip_discovery_top {
881 	struct kobject kobj;    /* ip_discovery/ */
882 	struct kset die_kset;   /* ip_discovery/die/, contains ip_die_entry */
883 	struct amdgpu_device *adev;
884 };
885 
886 static void die_kobj_release(struct kobject *kobj)
887 {
888 	struct ip_discovery_top *ip_top = container_of(to_kset(kobj),
889 						       struct ip_discovery_top,
890 						       die_kset);
891 	if (!list_empty(&ip_top->die_kset.list))
892 		DRM_ERROR("ip_top->die_kset is not empty");
893 }
894 
895 static void ip_disc_release(struct kobject *kobj)
896 {
897 	struct ip_discovery_top *ip_top = container_of(kobj, struct ip_discovery_top,
898 						       kobj);
899 	struct amdgpu_device *adev = ip_top->adev;
900 
901 	adev->ip_top = NULL;
902 	kfree(ip_top);
903 }
904 
905 static uint8_t amdgpu_discovery_get_harvest_info(struct amdgpu_device *adev,
906 						 uint16_t hw_id, uint8_t inst)
907 {
908 	uint8_t harvest = 0;
909 
910 	/* Until a uniform way is figured, get mask based on hwid */
911 	switch (hw_id) {
912 	case VCN_HWID:
913 		harvest = ((1 << inst) & adev->vcn.inst_mask) == 0;
914 		break;
915 	case DMU_HWID:
916 		if (adev->harvest_ip_mask & AMD_HARVEST_IP_DMU_MASK)
917 			harvest = 0x1;
918 		break;
919 	case UMC_HWID:
920 		/* TODO: It needs another parsing; for now, ignore.*/
921 		break;
922 	case GC_HWID:
923 		harvest = ((1 << inst) & adev->gfx.xcc_mask) == 0;
924 		break;
925 	case SDMA0_HWID:
926 		harvest = ((1 << inst) & adev->sdma.sdma_mask) == 0;
927 		break;
928 	default:
929 		break;
930 	}
931 
932 	return harvest;
933 }
934 
935 static int amdgpu_discovery_sysfs_ips(struct amdgpu_device *adev,
936 				      struct ip_die_entry *ip_die_entry,
937 				      const size_t _ip_offset, const int num_ips,
938 				      bool reg_base_64)
939 {
940 	int ii, jj, kk, res;
941 
942 	DRM_DEBUG("num_ips:%d", num_ips);
943 
944 	/* Find all IPs of a given HW ID, and add their instance to
945 	 * #die/#hw_id/#instance/<attributes>
946 	 */
947 	for (ii = 0; ii < HW_ID_MAX; ii++) {
948 		struct ip_hw_id *ip_hw_id = NULL;
949 		size_t ip_offset = _ip_offset;
950 
951 		for (jj = 0; jj < num_ips; jj++) {
952 			struct ip_v4 *ip;
953 			struct ip_hw_instance *ip_hw_instance;
954 
955 			ip = (struct ip_v4 *)(adev->mman.discovery_bin + ip_offset);
956 			if (amdgpu_discovery_validate_ip(ip) ||
957 			    le16_to_cpu(ip->hw_id) != ii)
958 				goto next_ip;
959 
960 			DRM_DEBUG("match:%d @ ip_offset:%zu", ii, ip_offset);
961 
962 			/* We have a hw_id match; register the hw
963 			 * block if not yet registered.
964 			 */
965 			if (!ip_hw_id) {
966 				ip_hw_id = kzalloc(sizeof(*ip_hw_id), GFP_KERNEL);
967 				if (!ip_hw_id)
968 					return -ENOMEM;
969 				ip_hw_id->hw_id = ii;
970 
971 				kobject_set_name(&ip_hw_id->hw_id_kset.kobj, "%d", ii);
972 				ip_hw_id->hw_id_kset.kobj.kset = &ip_die_entry->ip_kset;
973 				ip_hw_id->hw_id_kset.kobj.ktype = &ip_hw_id_ktype;
974 				res = kset_register(&ip_hw_id->hw_id_kset);
975 				if (res) {
976 					DRM_ERROR("Couldn't register ip_hw_id kset");
977 					kfree(ip_hw_id);
978 					return res;
979 				}
980 				if (hw_id_names[ii]) {
981 					res = sysfs_create_link(&ip_die_entry->ip_kset.kobj,
982 								&ip_hw_id->hw_id_kset.kobj,
983 								hw_id_names[ii]);
984 					if (res) {
985 						DRM_ERROR("Couldn't create IP link %s in IP Die:%s\n",
986 							  hw_id_names[ii],
987 							  kobject_name(&ip_die_entry->ip_kset.kobj));
988 					}
989 				}
990 			}
991 
992 			/* Now register its instance.
993 			 */
994 			ip_hw_instance = kzalloc(struct_size(ip_hw_instance,
995 							     base_addr,
996 							     ip->num_base_address),
997 						 GFP_KERNEL);
998 			if (!ip_hw_instance) {
999 				DRM_ERROR("no memory for ip_hw_instance");
1000 				return -ENOMEM;
1001 			}
1002 			ip_hw_instance->hw_id = le16_to_cpu(ip->hw_id); /* == ii */
1003 			ip_hw_instance->num_instance = ip->instance_number;
1004 			ip_hw_instance->major = ip->major;
1005 			ip_hw_instance->minor = ip->minor;
1006 			ip_hw_instance->revision = ip->revision;
1007 			ip_hw_instance->harvest =
1008 				amdgpu_discovery_get_harvest_info(
1009 					adev, ip_hw_instance->hw_id,
1010 					ip_hw_instance->num_instance);
1011 			ip_hw_instance->num_base_addresses = ip->num_base_address;
1012 
1013 			for (kk = 0; kk < ip_hw_instance->num_base_addresses; kk++) {
1014 				if (reg_base_64)
1015 					ip_hw_instance->base_addr[kk] =
1016 						lower_32_bits(le64_to_cpu(ip->base_address_64[kk])) & 0x3FFFFFFF;
1017 				else
1018 					ip_hw_instance->base_addr[kk] = ip->base_address[kk];
1019 			}
1020 
1021 			kobject_init(&ip_hw_instance->kobj, &ip_hw_instance_ktype);
1022 			ip_hw_instance->kobj.kset = &ip_hw_id->hw_id_kset;
1023 			res = kobject_add(&ip_hw_instance->kobj, NULL,
1024 					  "%d", ip_hw_instance->num_instance);
1025 next_ip:
1026 			if (reg_base_64)
1027 				ip_offset += struct_size(ip, base_address_64,
1028 							 ip->num_base_address);
1029 			else
1030 				ip_offset += struct_size(ip, base_address,
1031 							 ip->num_base_address);
1032 		}
1033 	}
1034 
1035 	return 0;
1036 }
1037 
1038 static int amdgpu_discovery_sysfs_recurse(struct amdgpu_device *adev)
1039 {
1040 	struct binary_header *bhdr;
1041 	struct ip_discovery_header *ihdr;
1042 	struct die_header *dhdr;
1043 	struct kset *die_kset = &adev->ip_top->die_kset;
1044 	u16 num_dies, die_offset, num_ips;
1045 	size_t ip_offset;
1046 	int ii, res;
1047 
1048 	bhdr = (struct binary_header *)adev->mman.discovery_bin;
1049 	ihdr = (struct ip_discovery_header *)(adev->mman.discovery_bin +
1050 					      le16_to_cpu(bhdr->table_list[IP_DISCOVERY].offset));
1051 	num_dies = le16_to_cpu(ihdr->num_dies);
1052 
1053 	DRM_DEBUG("number of dies: %d\n", num_dies);
1054 
1055 	for (ii = 0; ii < num_dies; ii++) {
1056 		struct ip_die_entry *ip_die_entry;
1057 
1058 		die_offset = le16_to_cpu(ihdr->die_info[ii].die_offset);
1059 		dhdr = (struct die_header *)(adev->mman.discovery_bin + die_offset);
1060 		num_ips = le16_to_cpu(dhdr->num_ips);
1061 		ip_offset = die_offset + sizeof(*dhdr);
1062 
1063 		/* Add the die to the kset.
1064 		 *
1065 		 * dhdr->die_id == ii, which was checked in
1066 		 * amdgpu_discovery_reg_base_init().
1067 		 */
1068 
1069 		ip_die_entry = kzalloc(sizeof(*ip_die_entry), GFP_KERNEL);
1070 		if (!ip_die_entry)
1071 			return -ENOMEM;
1072 
1073 		ip_die_entry->num_ips = num_ips;
1074 
1075 		kobject_set_name(&ip_die_entry->ip_kset.kobj, "%d", le16_to_cpu(dhdr->die_id));
1076 		ip_die_entry->ip_kset.kobj.kset = die_kset;
1077 		ip_die_entry->ip_kset.kobj.ktype = &ip_die_entry_ktype;
1078 		res = kset_register(&ip_die_entry->ip_kset);
1079 		if (res) {
1080 			DRM_ERROR("Couldn't register ip_die_entry kset");
1081 			kfree(ip_die_entry);
1082 			return res;
1083 		}
1084 
1085 		amdgpu_discovery_sysfs_ips(adev, ip_die_entry, ip_offset, num_ips, !!ihdr->base_addr_64_bit);
1086 	}
1087 
1088 	return 0;
1089 }
1090 
1091 static int amdgpu_discovery_sysfs_init(struct amdgpu_device *adev)
1092 {
1093 	struct kset *die_kset;
1094 	int res, ii;
1095 
1096 	if (!adev->mman.discovery_bin)
1097 		return -EINVAL;
1098 
1099 	adev->ip_top = kzalloc(sizeof(*adev->ip_top), GFP_KERNEL);
1100 	if (!adev->ip_top)
1101 		return -ENOMEM;
1102 
1103 	adev->ip_top->adev = adev;
1104 
1105 	res = kobject_init_and_add(&adev->ip_top->kobj, &ip_discovery_ktype,
1106 				   &adev->dev->kobj, "ip_discovery");
1107 	if (res) {
1108 		DRM_ERROR("Couldn't init and add ip_discovery/");
1109 		goto Err;
1110 	}
1111 
1112 	die_kset = &adev->ip_top->die_kset;
1113 	kobject_set_name(&die_kset->kobj, "%s", "die");
1114 	die_kset->kobj.parent = &adev->ip_top->kobj;
1115 	die_kset->kobj.ktype = &die_kobj_ktype;
1116 	res = kset_register(&adev->ip_top->die_kset);
1117 	if (res) {
1118 		DRM_ERROR("Couldn't register die_kset");
1119 		goto Err;
1120 	}
1121 
1122 	for (ii = 0; ii < ARRAY_SIZE(ip_hw_attr); ii++)
1123 		ip_hw_instance_attrs[ii] = &ip_hw_attr[ii].attr;
1124 	ip_hw_instance_attrs[ii] = NULL;
1125 
1126 	res = amdgpu_discovery_sysfs_recurse(adev);
1127 
1128 	return res;
1129 Err:
1130 	kobject_put(&adev->ip_top->kobj);
1131 	return res;
1132 }
1133 
1134 /* -------------------------------------------------- */
1135 
1136 #define list_to_kobj(el) container_of(el, struct kobject, entry)
1137 
1138 static void amdgpu_discovery_sysfs_ip_hw_free(struct ip_hw_id *ip_hw_id)
1139 {
1140 	struct list_head *el, *tmp;
1141 	struct kset *hw_id_kset;
1142 
1143 	hw_id_kset = &ip_hw_id->hw_id_kset;
1144 	spin_lock(&hw_id_kset->list_lock);
1145 	list_for_each_prev_safe(el, tmp, &hw_id_kset->list) {
1146 		list_del_init(el);
1147 		spin_unlock(&hw_id_kset->list_lock);
1148 		/* kobject is embedded in ip_hw_instance */
1149 		kobject_put(list_to_kobj(el));
1150 		spin_lock(&hw_id_kset->list_lock);
1151 	}
1152 	spin_unlock(&hw_id_kset->list_lock);
1153 	kobject_put(&ip_hw_id->hw_id_kset.kobj);
1154 }
1155 
1156 static void amdgpu_discovery_sysfs_die_free(struct ip_die_entry *ip_die_entry)
1157 {
1158 	struct list_head *el, *tmp;
1159 	struct kset *ip_kset;
1160 
1161 	ip_kset = &ip_die_entry->ip_kset;
1162 	spin_lock(&ip_kset->list_lock);
1163 	list_for_each_prev_safe(el, tmp, &ip_kset->list) {
1164 		list_del_init(el);
1165 		spin_unlock(&ip_kset->list_lock);
1166 		amdgpu_discovery_sysfs_ip_hw_free(to_ip_hw_id(list_to_kobj(el)));
1167 		spin_lock(&ip_kset->list_lock);
1168 	}
1169 	spin_unlock(&ip_kset->list_lock);
1170 	kobject_put(&ip_die_entry->ip_kset.kobj);
1171 }
1172 
1173 static void amdgpu_discovery_sysfs_fini(struct amdgpu_device *adev)
1174 {
1175 	struct list_head *el, *tmp;
1176 	struct kset *die_kset;
1177 
1178 	die_kset = &adev->ip_top->die_kset;
1179 	spin_lock(&die_kset->list_lock);
1180 	list_for_each_prev_safe(el, tmp, &die_kset->list) {
1181 		list_del_init(el);
1182 		spin_unlock(&die_kset->list_lock);
1183 		amdgpu_discovery_sysfs_die_free(to_ip_die_entry(list_to_kobj(el)));
1184 		spin_lock(&die_kset->list_lock);
1185 	}
1186 	spin_unlock(&die_kset->list_lock);
1187 	kobject_put(&adev->ip_top->die_kset.kobj);
1188 	kobject_put(&adev->ip_top->kobj);
1189 }
1190 
1191 /* ================================================== */
1192 
1193 static int amdgpu_discovery_reg_base_init(struct amdgpu_device *adev)
1194 {
1195 	uint8_t num_base_address, subrev, variant;
1196 	struct binary_header *bhdr;
1197 	struct ip_discovery_header *ihdr;
1198 	struct die_header *dhdr;
1199 	struct ip_v4 *ip;
1200 	uint16_t die_offset;
1201 	uint16_t ip_offset;
1202 	uint16_t num_dies;
1203 	uint16_t num_ips;
1204 	int hw_ip;
1205 	int i, j, k;
1206 	int r;
1207 
1208 	r = amdgpu_discovery_init(adev);
1209 	if (r) {
1210 		DRM_ERROR("amdgpu_discovery_init failed\n");
1211 		return r;
1212 	}
1213 
1214 	adev->gfx.xcc_mask = 0;
1215 	adev->sdma.sdma_mask = 0;
1216 	adev->vcn.inst_mask = 0;
1217 	adev->jpeg.inst_mask = 0;
1218 	bhdr = (struct binary_header *)adev->mman.discovery_bin;
1219 	ihdr = (struct ip_discovery_header *)(adev->mman.discovery_bin +
1220 			le16_to_cpu(bhdr->table_list[IP_DISCOVERY].offset));
1221 	num_dies = le16_to_cpu(ihdr->num_dies);
1222 
1223 	DRM_DEBUG("number of dies: %d\n", num_dies);
1224 
1225 	for (i = 0; i < num_dies; i++) {
1226 		die_offset = le16_to_cpu(ihdr->die_info[i].die_offset);
1227 		dhdr = (struct die_header *)(adev->mman.discovery_bin + die_offset);
1228 		num_ips = le16_to_cpu(dhdr->num_ips);
1229 		ip_offset = die_offset + sizeof(*dhdr);
1230 
1231 		if (le16_to_cpu(dhdr->die_id) != i) {
1232 			DRM_ERROR("invalid die id %d, expected %d\n",
1233 					le16_to_cpu(dhdr->die_id), i);
1234 			return -EINVAL;
1235 		}
1236 
1237 		DRM_DEBUG("number of hardware IPs on die%d: %d\n",
1238 				le16_to_cpu(dhdr->die_id), num_ips);
1239 
1240 		for (j = 0; j < num_ips; j++) {
1241 			ip = (struct ip_v4 *)(adev->mman.discovery_bin + ip_offset);
1242 
1243 			if (amdgpu_discovery_validate_ip(ip))
1244 				goto next_ip;
1245 
1246 			num_base_address = ip->num_base_address;
1247 
1248 			DRM_DEBUG("%s(%d) #%d v%d.%d.%d:\n",
1249 				  hw_id_names[le16_to_cpu(ip->hw_id)],
1250 				  le16_to_cpu(ip->hw_id),
1251 				  ip->instance_number,
1252 				  ip->major, ip->minor,
1253 				  ip->revision);
1254 
1255 			if (le16_to_cpu(ip->hw_id) == VCN_HWID) {
1256 				/* Bit [5:0]: original revision value
1257 				 * Bit [7:6]: en/decode capability:
1258 				 *     0b00 : VCN function normally
1259 				 *     0b10 : encode is disabled
1260 				 *     0b01 : decode is disabled
1261 				 */
1262 				adev->vcn.vcn_config[adev->vcn.num_vcn_inst] =
1263 					ip->revision & 0xc0;
1264 				ip->revision &= ~0xc0;
1265 				if (adev->vcn.num_vcn_inst <
1266 				    AMDGPU_MAX_VCN_INSTANCES) {
1267 					adev->vcn.num_vcn_inst++;
1268 					adev->vcn.inst_mask |=
1269 						(1U << ip->instance_number);
1270 					adev->jpeg.inst_mask |=
1271 						(1U << ip->instance_number);
1272 				} else {
1273 					dev_err(adev->dev, "Too many VCN instances: %d vs %d\n",
1274 						adev->vcn.num_vcn_inst + 1,
1275 						AMDGPU_MAX_VCN_INSTANCES);
1276 				}
1277 			}
1278 			if (le16_to_cpu(ip->hw_id) == SDMA0_HWID ||
1279 			    le16_to_cpu(ip->hw_id) == SDMA1_HWID ||
1280 			    le16_to_cpu(ip->hw_id) == SDMA2_HWID ||
1281 			    le16_to_cpu(ip->hw_id) == SDMA3_HWID) {
1282 				if (adev->sdma.num_instances <
1283 				    AMDGPU_MAX_SDMA_INSTANCES) {
1284 					adev->sdma.num_instances++;
1285 					adev->sdma.sdma_mask |=
1286 						(1U << ip->instance_number);
1287 				} else {
1288 					dev_err(adev->dev, "Too many SDMA instances: %d vs %d\n",
1289 						adev->sdma.num_instances + 1,
1290 						AMDGPU_MAX_SDMA_INSTANCES);
1291 				}
1292 			}
1293 
1294 			if (le16_to_cpu(ip->hw_id) == UMC_HWID) {
1295 				adev->gmc.num_umc++;
1296 				adev->umc.node_inst_num++;
1297 			}
1298 
1299 			if (le16_to_cpu(ip->hw_id) == GC_HWID)
1300 				adev->gfx.xcc_mask |=
1301 					(1U << ip->instance_number);
1302 
1303 			for (k = 0; k < num_base_address; k++) {
1304 				/*
1305 				 * convert the endianness of base addresses in place,
1306 				 * so that we don't need to convert them when accessing adev->reg_offset.
1307 				 */
1308 				if (ihdr->base_addr_64_bit)
1309 					/* Truncate the 64bit base address from ip discovery
1310 					 * and only store lower 32bit ip base in reg_offset[].
1311 					 * Bits > 32 follows ASIC specific format, thus just
1312 					 * discard them and handle it within specific ASIC.
1313 					 * By this way reg_offset[] and related helpers can
1314 					 * stay unchanged.
1315 					 * The base address is in dwords, thus clear the
1316 					 * highest 2 bits to store.
1317 					 */
1318 					ip->base_address[k] =
1319 						lower_32_bits(le64_to_cpu(ip->base_address_64[k])) & 0x3FFFFFFF;
1320 				else
1321 					ip->base_address[k] = le32_to_cpu(ip->base_address[k]);
1322 				DRM_DEBUG("\t0x%08x\n", ip->base_address[k]);
1323 			}
1324 
1325 			for (hw_ip = 0; hw_ip < MAX_HWIP; hw_ip++) {
1326 				if (hw_id_map[hw_ip] == le16_to_cpu(ip->hw_id) &&
1327 				    hw_id_map[hw_ip] != 0) {
1328 					DRM_DEBUG("set register base offset for %s\n",
1329 							hw_id_names[le16_to_cpu(ip->hw_id)]);
1330 					adev->reg_offset[hw_ip][ip->instance_number] =
1331 						ip->base_address;
1332 					/* Instance support is somewhat inconsistent.
1333 					 * SDMA is a good example.  Sienna cichlid has 4 total
1334 					 * SDMA instances, each enumerated separately (HWIDs
1335 					 * 42, 43, 68, 69).  Arcturus has 8 total SDMA instances,
1336 					 * but they are enumerated as multiple instances of the
1337 					 * same HWIDs (4x HWID 42, 4x HWID 43).  UMC is another
1338 					 * example.  On most chips there are multiple instances
1339 					 * with the same HWID.
1340 					 */
1341 
1342 					if (ihdr->version < 3) {
1343 						subrev = 0;
1344 						variant = 0;
1345 					} else {
1346 						subrev = ip->sub_revision;
1347 						variant = ip->variant;
1348 					}
1349 
1350 					adev->ip_versions[hw_ip]
1351 							 [ip->instance_number] =
1352 						IP_VERSION_FULL(ip->major,
1353 								ip->minor,
1354 								ip->revision,
1355 								variant,
1356 								subrev);
1357 				}
1358 			}
1359 
1360 next_ip:
1361 			if (ihdr->base_addr_64_bit)
1362 				ip_offset += struct_size(ip, base_address_64, ip->num_base_address);
1363 			else
1364 				ip_offset += struct_size(ip, base_address, ip->num_base_address);
1365 		}
1366 	}
1367 
1368 	return 0;
1369 }
1370 
1371 static void amdgpu_discovery_harvest_ip(struct amdgpu_device *adev)
1372 {
1373 	int vcn_harvest_count = 0;
1374 	int umc_harvest_count = 0;
1375 
1376 	/*
1377 	 * Harvest table does not fit Navi1x and legacy GPUs,
1378 	 * so read harvest bit per IP data structure to set
1379 	 * harvest configuration.
1380 	 */
1381 	if (amdgpu_ip_version(adev, GC_HWIP, 0) < IP_VERSION(10, 2, 0) &&
1382 	    amdgpu_ip_version(adev, GC_HWIP, 0) != IP_VERSION(9, 4, 3)) {
1383 		if ((adev->pdev->device == 0x731E &&
1384 			(adev->pdev->revision == 0xC6 ||
1385 			 adev->pdev->revision == 0xC7)) ||
1386 			(adev->pdev->device == 0x7340 &&
1387 			 adev->pdev->revision == 0xC9) ||
1388 			(adev->pdev->device == 0x7360 &&
1389 			 adev->pdev->revision == 0xC7))
1390 			amdgpu_discovery_read_harvest_bit_per_ip(adev,
1391 				&vcn_harvest_count);
1392 	} else {
1393 		amdgpu_discovery_read_from_harvest_table(adev,
1394 							 &vcn_harvest_count,
1395 							 &umc_harvest_count);
1396 	}
1397 
1398 	amdgpu_discovery_harvest_config_quirk(adev);
1399 
1400 	if (vcn_harvest_count == adev->vcn.num_vcn_inst) {
1401 		adev->harvest_ip_mask |= AMD_HARVEST_IP_VCN_MASK;
1402 		adev->harvest_ip_mask |= AMD_HARVEST_IP_JPEG_MASK;
1403 	}
1404 
1405 	if (umc_harvest_count < adev->gmc.num_umc) {
1406 		adev->gmc.num_umc -= umc_harvest_count;
1407 	}
1408 }
1409 
1410 union gc_info {
1411 	struct gc_info_v1_0 v1;
1412 	struct gc_info_v1_1 v1_1;
1413 	struct gc_info_v1_2 v1_2;
1414 	struct gc_info_v2_0 v2;
1415 	struct gc_info_v2_1 v2_1;
1416 };
1417 
1418 static int amdgpu_discovery_get_gfx_info(struct amdgpu_device *adev)
1419 {
1420 	struct binary_header *bhdr;
1421 	union gc_info *gc_info;
1422 	u16 offset;
1423 
1424 	if (!adev->mman.discovery_bin) {
1425 		DRM_ERROR("ip discovery uninitialized\n");
1426 		return -EINVAL;
1427 	}
1428 
1429 	bhdr = (struct binary_header *)adev->mman.discovery_bin;
1430 	offset = le16_to_cpu(bhdr->table_list[GC].offset);
1431 
1432 	if (!offset)
1433 		return 0;
1434 
1435 	gc_info = (union gc_info *)(adev->mman.discovery_bin + offset);
1436 
1437 	switch (le16_to_cpu(gc_info->v1.header.version_major)) {
1438 	case 1:
1439 		adev->gfx.config.max_shader_engines = le32_to_cpu(gc_info->v1.gc_num_se);
1440 		adev->gfx.config.max_cu_per_sh = 2 * (le32_to_cpu(gc_info->v1.gc_num_wgp0_per_sa) +
1441 						      le32_to_cpu(gc_info->v1.gc_num_wgp1_per_sa));
1442 		adev->gfx.config.max_sh_per_se = le32_to_cpu(gc_info->v1.gc_num_sa_per_se);
1443 		adev->gfx.config.max_backends_per_se = le32_to_cpu(gc_info->v1.gc_num_rb_per_se);
1444 		adev->gfx.config.max_texture_channel_caches = le32_to_cpu(gc_info->v1.gc_num_gl2c);
1445 		adev->gfx.config.max_gprs = le32_to_cpu(gc_info->v1.gc_num_gprs);
1446 		adev->gfx.config.max_gs_threads = le32_to_cpu(gc_info->v1.gc_num_max_gs_thds);
1447 		adev->gfx.config.gs_vgt_table_depth = le32_to_cpu(gc_info->v1.gc_gs_table_depth);
1448 		adev->gfx.config.gs_prim_buffer_depth = le32_to_cpu(gc_info->v1.gc_gsprim_buff_depth);
1449 		adev->gfx.config.double_offchip_lds_buf = le32_to_cpu(gc_info->v1.gc_double_offchip_lds_buffer);
1450 		adev->gfx.cu_info.wave_front_size = le32_to_cpu(gc_info->v1.gc_wave_size);
1451 		adev->gfx.cu_info.max_waves_per_simd = le32_to_cpu(gc_info->v1.gc_max_waves_per_simd);
1452 		adev->gfx.cu_info.max_scratch_slots_per_cu = le32_to_cpu(gc_info->v1.gc_max_scratch_slots_per_cu);
1453 		adev->gfx.cu_info.lds_size = le32_to_cpu(gc_info->v1.gc_lds_size);
1454 		adev->gfx.config.num_sc_per_sh = le32_to_cpu(gc_info->v1.gc_num_sc_per_se) /
1455 			le32_to_cpu(gc_info->v1.gc_num_sa_per_se);
1456 		adev->gfx.config.num_packer_per_sc = le32_to_cpu(gc_info->v1.gc_num_packer_per_sc);
1457 		if (le16_to_cpu(gc_info->v1.header.version_minor) >= 1) {
1458 			adev->gfx.config.gc_num_tcp_per_sa = le32_to_cpu(gc_info->v1_1.gc_num_tcp_per_sa);
1459 			adev->gfx.config.gc_num_sdp_interface = le32_to_cpu(gc_info->v1_1.gc_num_sdp_interface);
1460 			adev->gfx.config.gc_num_tcps = le32_to_cpu(gc_info->v1_1.gc_num_tcps);
1461 		}
1462 		if (le16_to_cpu(gc_info->v1.header.version_minor) >= 2) {
1463 			adev->gfx.config.gc_num_tcp_per_wpg = le32_to_cpu(gc_info->v1_2.gc_num_tcp_per_wpg);
1464 			adev->gfx.config.gc_tcp_l1_size = le32_to_cpu(gc_info->v1_2.gc_tcp_l1_size);
1465 			adev->gfx.config.gc_num_sqc_per_wgp = le32_to_cpu(gc_info->v1_2.gc_num_sqc_per_wgp);
1466 			adev->gfx.config.gc_l1_instruction_cache_size_per_sqc = le32_to_cpu(gc_info->v1_2.gc_l1_instruction_cache_size_per_sqc);
1467 			adev->gfx.config.gc_l1_data_cache_size_per_sqc = le32_to_cpu(gc_info->v1_2.gc_l1_data_cache_size_per_sqc);
1468 			adev->gfx.config.gc_gl1c_per_sa = le32_to_cpu(gc_info->v1_2.gc_gl1c_per_sa);
1469 			adev->gfx.config.gc_gl1c_size_per_instance = le32_to_cpu(gc_info->v1_2.gc_gl1c_size_per_instance);
1470 			adev->gfx.config.gc_gl2c_per_gpu = le32_to_cpu(gc_info->v1_2.gc_gl2c_per_gpu);
1471 		}
1472 		break;
1473 	case 2:
1474 		adev->gfx.config.max_shader_engines = le32_to_cpu(gc_info->v2.gc_num_se);
1475 		adev->gfx.config.max_cu_per_sh = le32_to_cpu(gc_info->v2.gc_num_cu_per_sh);
1476 		adev->gfx.config.max_sh_per_se = le32_to_cpu(gc_info->v2.gc_num_sh_per_se);
1477 		adev->gfx.config.max_backends_per_se = le32_to_cpu(gc_info->v2.gc_num_rb_per_se);
1478 		adev->gfx.config.max_texture_channel_caches = le32_to_cpu(gc_info->v2.gc_num_tccs);
1479 		adev->gfx.config.max_gprs = le32_to_cpu(gc_info->v2.gc_num_gprs);
1480 		adev->gfx.config.max_gs_threads = le32_to_cpu(gc_info->v2.gc_num_max_gs_thds);
1481 		adev->gfx.config.gs_vgt_table_depth = le32_to_cpu(gc_info->v2.gc_gs_table_depth);
1482 		adev->gfx.config.gs_prim_buffer_depth = le32_to_cpu(gc_info->v2.gc_gsprim_buff_depth);
1483 		adev->gfx.config.double_offchip_lds_buf = le32_to_cpu(gc_info->v2.gc_double_offchip_lds_buffer);
1484 		adev->gfx.cu_info.wave_front_size = le32_to_cpu(gc_info->v2.gc_wave_size);
1485 		adev->gfx.cu_info.max_waves_per_simd = le32_to_cpu(gc_info->v2.gc_max_waves_per_simd);
1486 		adev->gfx.cu_info.max_scratch_slots_per_cu = le32_to_cpu(gc_info->v2.gc_max_scratch_slots_per_cu);
1487 		adev->gfx.cu_info.lds_size = le32_to_cpu(gc_info->v2.gc_lds_size);
1488 		adev->gfx.config.num_sc_per_sh = le32_to_cpu(gc_info->v2.gc_num_sc_per_se) /
1489 			le32_to_cpu(gc_info->v2.gc_num_sh_per_se);
1490 		adev->gfx.config.num_packer_per_sc = le32_to_cpu(gc_info->v2.gc_num_packer_per_sc);
1491 		if (le16_to_cpu(gc_info->v2.header.version_minor) == 1) {
1492 			adev->gfx.config.gc_num_tcp_per_sa = le32_to_cpu(gc_info->v2_1.gc_num_tcp_per_sh);
1493 			adev->gfx.config.gc_tcp_size_per_cu = le32_to_cpu(gc_info->v2_1.gc_tcp_size_per_cu);
1494 			adev->gfx.config.gc_num_sdp_interface = le32_to_cpu(gc_info->v2_1.gc_num_sdp_interface); /* per XCD */
1495 			adev->gfx.config.gc_num_cu_per_sqc = le32_to_cpu(gc_info->v2_1.gc_num_cu_per_sqc);
1496 			adev->gfx.config.gc_l1_instruction_cache_size_per_sqc = le32_to_cpu(gc_info->v2_1.gc_instruction_cache_size_per_sqc);
1497 			adev->gfx.config.gc_l1_data_cache_size_per_sqc = le32_to_cpu(gc_info->v2_1.gc_scalar_data_cache_size_per_sqc);
1498 			adev->gfx.config.gc_tcc_size = le32_to_cpu(gc_info->v2_1.gc_tcc_size); /* per XCD */
1499 		}
1500 		break;
1501 	default:
1502 		dev_err(adev->dev,
1503 			"Unhandled GC info table %d.%d\n",
1504 			le16_to_cpu(gc_info->v1.header.version_major),
1505 			le16_to_cpu(gc_info->v1.header.version_minor));
1506 		return -EINVAL;
1507 	}
1508 	return 0;
1509 }
1510 
1511 union mall_info {
1512 	struct mall_info_v1_0 v1;
1513 	struct mall_info_v2_0 v2;
1514 };
1515 
1516 static int amdgpu_discovery_get_mall_info(struct amdgpu_device *adev)
1517 {
1518 	struct binary_header *bhdr;
1519 	union mall_info *mall_info;
1520 	u32 u, mall_size_per_umc, m_s_present, half_use;
1521 	u64 mall_size;
1522 	u16 offset;
1523 
1524 	if (!adev->mman.discovery_bin) {
1525 		DRM_ERROR("ip discovery uninitialized\n");
1526 		return -EINVAL;
1527 	}
1528 
1529 	bhdr = (struct binary_header *)adev->mman.discovery_bin;
1530 	offset = le16_to_cpu(bhdr->table_list[MALL_INFO].offset);
1531 
1532 	if (!offset)
1533 		return 0;
1534 
1535 	mall_info = (union mall_info *)(adev->mman.discovery_bin + offset);
1536 
1537 	switch (le16_to_cpu(mall_info->v1.header.version_major)) {
1538 	case 1:
1539 		mall_size = 0;
1540 		mall_size_per_umc = le32_to_cpu(mall_info->v1.mall_size_per_m);
1541 		m_s_present = le32_to_cpu(mall_info->v1.m_s_present);
1542 		half_use = le32_to_cpu(mall_info->v1.m_half_use);
1543 		for (u = 0; u < adev->gmc.num_umc; u++) {
1544 			if (m_s_present & (1 << u))
1545 				mall_size += mall_size_per_umc * 2;
1546 			else if (half_use & (1 << u))
1547 				mall_size += mall_size_per_umc / 2;
1548 			else
1549 				mall_size += mall_size_per_umc;
1550 		}
1551 		adev->gmc.mall_size = mall_size;
1552 		adev->gmc.m_half_use = half_use;
1553 		break;
1554 	case 2:
1555 		mall_size_per_umc = le32_to_cpu(mall_info->v2.mall_size_per_umc);
1556 		adev->gmc.mall_size = mall_size_per_umc * adev->gmc.num_umc;
1557 		break;
1558 	default:
1559 		dev_err(adev->dev,
1560 			"Unhandled MALL info table %d.%d\n",
1561 			le16_to_cpu(mall_info->v1.header.version_major),
1562 			le16_to_cpu(mall_info->v1.header.version_minor));
1563 		return -EINVAL;
1564 	}
1565 	return 0;
1566 }
1567 
1568 union vcn_info {
1569 	struct vcn_info_v1_0 v1;
1570 };
1571 
1572 static int amdgpu_discovery_get_vcn_info(struct amdgpu_device *adev)
1573 {
1574 	struct binary_header *bhdr;
1575 	union vcn_info *vcn_info;
1576 	u16 offset;
1577 	int v;
1578 
1579 	if (!adev->mman.discovery_bin) {
1580 		DRM_ERROR("ip discovery uninitialized\n");
1581 		return -EINVAL;
1582 	}
1583 
1584 	/* num_vcn_inst is currently limited to AMDGPU_MAX_VCN_INSTANCES
1585 	 * which is smaller than VCN_INFO_TABLE_MAX_NUM_INSTANCES
1586 	 * but that may change in the future with new GPUs so keep this
1587 	 * check for defensive purposes.
1588 	 */
1589 	if (adev->vcn.num_vcn_inst > VCN_INFO_TABLE_MAX_NUM_INSTANCES) {
1590 		dev_err(adev->dev, "invalid vcn instances\n");
1591 		return -EINVAL;
1592 	}
1593 
1594 	bhdr = (struct binary_header *)adev->mman.discovery_bin;
1595 	offset = le16_to_cpu(bhdr->table_list[VCN_INFO].offset);
1596 
1597 	if (!offset)
1598 		return 0;
1599 
1600 	vcn_info = (union vcn_info *)(adev->mman.discovery_bin + offset);
1601 
1602 	switch (le16_to_cpu(vcn_info->v1.header.version_major)) {
1603 	case 1:
1604 		/* num_vcn_inst is currently limited to AMDGPU_MAX_VCN_INSTANCES
1605 		 * so this won't overflow.
1606 		 */
1607 		for (v = 0; v < adev->vcn.num_vcn_inst; v++) {
1608 			adev->vcn.vcn_codec_disable_mask[v] =
1609 				le32_to_cpu(vcn_info->v1.instance_info[v].fuse_data.all_bits);
1610 		}
1611 		break;
1612 	default:
1613 		dev_err(adev->dev,
1614 			"Unhandled VCN info table %d.%d\n",
1615 			le16_to_cpu(vcn_info->v1.header.version_major),
1616 			le16_to_cpu(vcn_info->v1.header.version_minor));
1617 		return -EINVAL;
1618 	}
1619 	return 0;
1620 }
1621 
1622 static int amdgpu_discovery_set_common_ip_blocks(struct amdgpu_device *adev)
1623 {
1624 	/* what IP to use for this? */
1625 	switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
1626 	case IP_VERSION(9, 0, 1):
1627 	case IP_VERSION(9, 1, 0):
1628 	case IP_VERSION(9, 2, 1):
1629 	case IP_VERSION(9, 2, 2):
1630 	case IP_VERSION(9, 3, 0):
1631 	case IP_VERSION(9, 4, 0):
1632 	case IP_VERSION(9, 4, 1):
1633 	case IP_VERSION(9, 4, 2):
1634 	case IP_VERSION(9, 4, 3):
1635 		amdgpu_device_ip_block_add(adev, &vega10_common_ip_block);
1636 		break;
1637 	case IP_VERSION(10, 1, 10):
1638 	case IP_VERSION(10, 1, 1):
1639 	case IP_VERSION(10, 1, 2):
1640 	case IP_VERSION(10, 1, 3):
1641 	case IP_VERSION(10, 1, 4):
1642 	case IP_VERSION(10, 3, 0):
1643 	case IP_VERSION(10, 3, 1):
1644 	case IP_VERSION(10, 3, 2):
1645 	case IP_VERSION(10, 3, 3):
1646 	case IP_VERSION(10, 3, 4):
1647 	case IP_VERSION(10, 3, 5):
1648 	case IP_VERSION(10, 3, 6):
1649 	case IP_VERSION(10, 3, 7):
1650 		amdgpu_device_ip_block_add(adev, &nv_common_ip_block);
1651 		break;
1652 	case IP_VERSION(11, 0, 0):
1653 	case IP_VERSION(11, 0, 1):
1654 	case IP_VERSION(11, 0, 2):
1655 	case IP_VERSION(11, 0, 3):
1656 	case IP_VERSION(11, 0, 4):
1657 	case IP_VERSION(11, 5, 0):
1658 		amdgpu_device_ip_block_add(adev, &soc21_common_ip_block);
1659 		break;
1660 	default:
1661 		dev_err(adev->dev,
1662 			"Failed to add common ip block(GC_HWIP:0x%x)\n",
1663 			amdgpu_ip_version(adev, GC_HWIP, 0));
1664 		return -EINVAL;
1665 	}
1666 	return 0;
1667 }
1668 
1669 static int amdgpu_discovery_set_gmc_ip_blocks(struct amdgpu_device *adev)
1670 {
1671 	/* use GC or MMHUB IP version */
1672 	switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
1673 	case IP_VERSION(9, 0, 1):
1674 	case IP_VERSION(9, 1, 0):
1675 	case IP_VERSION(9, 2, 1):
1676 	case IP_VERSION(9, 2, 2):
1677 	case IP_VERSION(9, 3, 0):
1678 	case IP_VERSION(9, 4, 0):
1679 	case IP_VERSION(9, 4, 1):
1680 	case IP_VERSION(9, 4, 2):
1681 	case IP_VERSION(9, 4, 3):
1682 		amdgpu_device_ip_block_add(adev, &gmc_v9_0_ip_block);
1683 		break;
1684 	case IP_VERSION(10, 1, 10):
1685 	case IP_VERSION(10, 1, 1):
1686 	case IP_VERSION(10, 1, 2):
1687 	case IP_VERSION(10, 1, 3):
1688 	case IP_VERSION(10, 1, 4):
1689 	case IP_VERSION(10, 3, 0):
1690 	case IP_VERSION(10, 3, 1):
1691 	case IP_VERSION(10, 3, 2):
1692 	case IP_VERSION(10, 3, 3):
1693 	case IP_VERSION(10, 3, 4):
1694 	case IP_VERSION(10, 3, 5):
1695 	case IP_VERSION(10, 3, 6):
1696 	case IP_VERSION(10, 3, 7):
1697 		amdgpu_device_ip_block_add(adev, &gmc_v10_0_ip_block);
1698 		break;
1699 	case IP_VERSION(11, 0, 0):
1700 	case IP_VERSION(11, 0, 1):
1701 	case IP_VERSION(11, 0, 2):
1702 	case IP_VERSION(11, 0, 3):
1703 	case IP_VERSION(11, 0, 4):
1704 	case IP_VERSION(11, 5, 0):
1705 		amdgpu_device_ip_block_add(adev, &gmc_v11_0_ip_block);
1706 		break;
1707 	default:
1708 		dev_err(adev->dev, "Failed to add gmc ip block(GC_HWIP:0x%x)\n",
1709 			amdgpu_ip_version(adev, GC_HWIP, 0));
1710 		return -EINVAL;
1711 	}
1712 	return 0;
1713 }
1714 
1715 static int amdgpu_discovery_set_ih_ip_blocks(struct amdgpu_device *adev)
1716 {
1717 	switch (amdgpu_ip_version(adev, OSSSYS_HWIP, 0)) {
1718 	case IP_VERSION(4, 0, 0):
1719 	case IP_VERSION(4, 0, 1):
1720 	case IP_VERSION(4, 1, 0):
1721 	case IP_VERSION(4, 1, 1):
1722 	case IP_VERSION(4, 3, 0):
1723 		amdgpu_device_ip_block_add(adev, &vega10_ih_ip_block);
1724 		break;
1725 	case IP_VERSION(4, 2, 0):
1726 	case IP_VERSION(4, 2, 1):
1727 	case IP_VERSION(4, 4, 0):
1728 	case IP_VERSION(4, 4, 2):
1729 		amdgpu_device_ip_block_add(adev, &vega20_ih_ip_block);
1730 		break;
1731 	case IP_VERSION(5, 0, 0):
1732 	case IP_VERSION(5, 0, 1):
1733 	case IP_VERSION(5, 0, 2):
1734 	case IP_VERSION(5, 0, 3):
1735 	case IP_VERSION(5, 2, 0):
1736 	case IP_VERSION(5, 2, 1):
1737 		amdgpu_device_ip_block_add(adev, &navi10_ih_ip_block);
1738 		break;
1739 	case IP_VERSION(6, 0, 0):
1740 	case IP_VERSION(6, 0, 1):
1741 	case IP_VERSION(6, 0, 2):
1742 		amdgpu_device_ip_block_add(adev, &ih_v6_0_ip_block);
1743 		break;
1744 	case IP_VERSION(6, 1, 0):
1745 		amdgpu_device_ip_block_add(adev, &ih_v6_1_ip_block);
1746 		break;
1747 	default:
1748 		dev_err(adev->dev,
1749 			"Failed to add ih ip block(OSSSYS_HWIP:0x%x)\n",
1750 			amdgpu_ip_version(adev, OSSSYS_HWIP, 0));
1751 		return -EINVAL;
1752 	}
1753 	return 0;
1754 }
1755 
1756 static int amdgpu_discovery_set_psp_ip_blocks(struct amdgpu_device *adev)
1757 {
1758 	switch (amdgpu_ip_version(adev, MP0_HWIP, 0)) {
1759 	case IP_VERSION(9, 0, 0):
1760 		amdgpu_device_ip_block_add(adev, &psp_v3_1_ip_block);
1761 		break;
1762 	case IP_VERSION(10, 0, 0):
1763 	case IP_VERSION(10, 0, 1):
1764 		amdgpu_device_ip_block_add(adev, &psp_v10_0_ip_block);
1765 		break;
1766 	case IP_VERSION(11, 0, 0):
1767 	case IP_VERSION(11, 0, 2):
1768 	case IP_VERSION(11, 0, 4):
1769 	case IP_VERSION(11, 0, 5):
1770 	case IP_VERSION(11, 0, 9):
1771 	case IP_VERSION(11, 0, 7):
1772 	case IP_VERSION(11, 0, 11):
1773 	case IP_VERSION(11, 0, 12):
1774 	case IP_VERSION(11, 0, 13):
1775 	case IP_VERSION(11, 5, 0):
1776 		amdgpu_device_ip_block_add(adev, &psp_v11_0_ip_block);
1777 		break;
1778 	case IP_VERSION(11, 0, 8):
1779 		amdgpu_device_ip_block_add(adev, &psp_v11_0_8_ip_block);
1780 		break;
1781 	case IP_VERSION(11, 0, 3):
1782 	case IP_VERSION(12, 0, 1):
1783 		amdgpu_device_ip_block_add(adev, &psp_v12_0_ip_block);
1784 		break;
1785 	case IP_VERSION(13, 0, 0):
1786 	case IP_VERSION(13, 0, 1):
1787 	case IP_VERSION(13, 0, 2):
1788 	case IP_VERSION(13, 0, 3):
1789 	case IP_VERSION(13, 0, 5):
1790 	case IP_VERSION(13, 0, 6):
1791 	case IP_VERSION(13, 0, 7):
1792 	case IP_VERSION(13, 0, 8):
1793 	case IP_VERSION(13, 0, 10):
1794 	case IP_VERSION(13, 0, 11):
1795 	case IP_VERSION(14, 0, 0):
1796 		amdgpu_device_ip_block_add(adev, &psp_v13_0_ip_block);
1797 		break;
1798 	case IP_VERSION(13, 0, 4):
1799 		amdgpu_device_ip_block_add(adev, &psp_v13_0_4_ip_block);
1800 		break;
1801 	default:
1802 		dev_err(adev->dev,
1803 			"Failed to add psp ip block(MP0_HWIP:0x%x)\n",
1804 			amdgpu_ip_version(adev, MP0_HWIP, 0));
1805 		return -EINVAL;
1806 	}
1807 	return 0;
1808 }
1809 
1810 static int amdgpu_discovery_set_smu_ip_blocks(struct amdgpu_device *adev)
1811 {
1812 	switch (amdgpu_ip_version(adev, MP1_HWIP, 0)) {
1813 	case IP_VERSION(9, 0, 0):
1814 	case IP_VERSION(10, 0, 0):
1815 	case IP_VERSION(10, 0, 1):
1816 	case IP_VERSION(11, 0, 2):
1817 		if (adev->asic_type == CHIP_ARCTURUS)
1818 			amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block);
1819 		else
1820 			amdgpu_device_ip_block_add(adev, &pp_smu_ip_block);
1821 		break;
1822 	case IP_VERSION(11, 0, 0):
1823 	case IP_VERSION(11, 0, 5):
1824 	case IP_VERSION(11, 0, 9):
1825 	case IP_VERSION(11, 0, 7):
1826 	case IP_VERSION(11, 0, 8):
1827 	case IP_VERSION(11, 0, 11):
1828 	case IP_VERSION(11, 0, 12):
1829 	case IP_VERSION(11, 0, 13):
1830 	case IP_VERSION(11, 5, 0):
1831 		amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block);
1832 		break;
1833 	case IP_VERSION(12, 0, 0):
1834 	case IP_VERSION(12, 0, 1):
1835 		amdgpu_device_ip_block_add(adev, &smu_v12_0_ip_block);
1836 		break;
1837 	case IP_VERSION(13, 0, 0):
1838 	case IP_VERSION(13, 0, 1):
1839 	case IP_VERSION(13, 0, 2):
1840 	case IP_VERSION(13, 0, 3):
1841 	case IP_VERSION(13, 0, 4):
1842 	case IP_VERSION(13, 0, 5):
1843 	case IP_VERSION(13, 0, 6):
1844 	case IP_VERSION(13, 0, 7):
1845 	case IP_VERSION(13, 0, 8):
1846 	case IP_VERSION(13, 0, 10):
1847 	case IP_VERSION(13, 0, 11):
1848 		amdgpu_device_ip_block_add(adev, &smu_v13_0_ip_block);
1849 		break;
1850 	case IP_VERSION(14, 0, 0):
1851 		amdgpu_device_ip_block_add(adev, &smu_v14_0_ip_block);
1852 		break;
1853 	default:
1854 		dev_err(adev->dev,
1855 			"Failed to add smu ip block(MP1_HWIP:0x%x)\n",
1856 			amdgpu_ip_version(adev, MP1_HWIP, 0));
1857 		return -EINVAL;
1858 	}
1859 	return 0;
1860 }
1861 
1862 #if defined(CONFIG_DRM_AMD_DC)
1863 static void amdgpu_discovery_set_sriov_display(struct amdgpu_device *adev)
1864 {
1865 	amdgpu_device_set_sriov_virtual_display(adev);
1866 	amdgpu_device_ip_block_add(adev, &amdgpu_vkms_ip_block);
1867 }
1868 #endif
1869 
1870 static int amdgpu_discovery_set_display_ip_blocks(struct amdgpu_device *adev)
1871 {
1872 	if (adev->enable_virtual_display) {
1873 		amdgpu_device_ip_block_add(adev, &amdgpu_vkms_ip_block);
1874 		return 0;
1875 	}
1876 
1877 	if (!amdgpu_device_has_dc_support(adev))
1878 		return 0;
1879 
1880 #if defined(CONFIG_DRM_AMD_DC)
1881 	if (amdgpu_ip_version(adev, DCE_HWIP, 0)) {
1882 		switch (amdgpu_ip_version(adev, DCE_HWIP, 0)) {
1883 		case IP_VERSION(1, 0, 0):
1884 		case IP_VERSION(1, 0, 1):
1885 		case IP_VERSION(2, 0, 2):
1886 		case IP_VERSION(2, 0, 0):
1887 		case IP_VERSION(2, 0, 3):
1888 		case IP_VERSION(2, 1, 0):
1889 		case IP_VERSION(3, 0, 0):
1890 		case IP_VERSION(3, 0, 2):
1891 		case IP_VERSION(3, 0, 3):
1892 		case IP_VERSION(3, 0, 1):
1893 		case IP_VERSION(3, 1, 2):
1894 		case IP_VERSION(3, 1, 3):
1895 		case IP_VERSION(3, 1, 4):
1896 		case IP_VERSION(3, 1, 5):
1897 		case IP_VERSION(3, 1, 6):
1898 		case IP_VERSION(3, 2, 0):
1899 		case IP_VERSION(3, 2, 1):
1900 		case IP_VERSION(3, 5, 0):
1901 			if (amdgpu_sriov_vf(adev))
1902 				amdgpu_discovery_set_sriov_display(adev);
1903 			else
1904 				amdgpu_device_ip_block_add(adev, &dm_ip_block);
1905 			break;
1906 		default:
1907 			dev_err(adev->dev,
1908 				"Failed to add dm ip block(DCE_HWIP:0x%x)\n",
1909 				amdgpu_ip_version(adev, DCE_HWIP, 0));
1910 			return -EINVAL;
1911 		}
1912 	} else if (amdgpu_ip_version(adev, DCI_HWIP, 0)) {
1913 		switch (amdgpu_ip_version(adev, DCI_HWIP, 0)) {
1914 		case IP_VERSION(12, 0, 0):
1915 		case IP_VERSION(12, 0, 1):
1916 		case IP_VERSION(12, 1, 0):
1917 			if (amdgpu_sriov_vf(adev))
1918 				amdgpu_discovery_set_sriov_display(adev);
1919 			else
1920 				amdgpu_device_ip_block_add(adev, &dm_ip_block);
1921 			break;
1922 		default:
1923 			dev_err(adev->dev,
1924 				"Failed to add dm ip block(DCI_HWIP:0x%x)\n",
1925 				amdgpu_ip_version(adev, DCI_HWIP, 0));
1926 			return -EINVAL;
1927 		}
1928 	}
1929 #endif
1930 	return 0;
1931 }
1932 
1933 static int amdgpu_discovery_set_gc_ip_blocks(struct amdgpu_device *adev)
1934 {
1935 	switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
1936 	case IP_VERSION(9, 0, 1):
1937 	case IP_VERSION(9, 1, 0):
1938 	case IP_VERSION(9, 2, 1):
1939 	case IP_VERSION(9, 2, 2):
1940 	case IP_VERSION(9, 3, 0):
1941 	case IP_VERSION(9, 4, 0):
1942 	case IP_VERSION(9, 4, 1):
1943 	case IP_VERSION(9, 4, 2):
1944 		amdgpu_device_ip_block_add(adev, &gfx_v9_0_ip_block);
1945 		break;
1946 	case IP_VERSION(9, 4, 3):
1947 		if (!amdgpu_exp_hw_support)
1948 			return -EINVAL;
1949 		amdgpu_device_ip_block_add(adev, &gfx_v9_4_3_ip_block);
1950 		break;
1951 	case IP_VERSION(10, 1, 10):
1952 	case IP_VERSION(10, 1, 2):
1953 	case IP_VERSION(10, 1, 1):
1954 	case IP_VERSION(10, 1, 3):
1955 	case IP_VERSION(10, 1, 4):
1956 	case IP_VERSION(10, 3, 0):
1957 	case IP_VERSION(10, 3, 2):
1958 	case IP_VERSION(10, 3, 1):
1959 	case IP_VERSION(10, 3, 4):
1960 	case IP_VERSION(10, 3, 5):
1961 	case IP_VERSION(10, 3, 6):
1962 	case IP_VERSION(10, 3, 3):
1963 	case IP_VERSION(10, 3, 7):
1964 		amdgpu_device_ip_block_add(adev, &gfx_v10_0_ip_block);
1965 		break;
1966 	case IP_VERSION(11, 0, 0):
1967 	case IP_VERSION(11, 0, 1):
1968 	case IP_VERSION(11, 0, 2):
1969 	case IP_VERSION(11, 0, 3):
1970 	case IP_VERSION(11, 0, 4):
1971 	case IP_VERSION(11, 5, 0):
1972 		amdgpu_device_ip_block_add(adev, &gfx_v11_0_ip_block);
1973 		break;
1974 	default:
1975 		dev_err(adev->dev, "Failed to add gfx ip block(GC_HWIP:0x%x)\n",
1976 			amdgpu_ip_version(adev, GC_HWIP, 0));
1977 		return -EINVAL;
1978 	}
1979 	return 0;
1980 }
1981 
1982 static int amdgpu_discovery_set_sdma_ip_blocks(struct amdgpu_device *adev)
1983 {
1984 	switch (amdgpu_ip_version(adev, SDMA0_HWIP, 0)) {
1985 	case IP_VERSION(4, 0, 0):
1986 	case IP_VERSION(4, 0, 1):
1987 	case IP_VERSION(4, 1, 0):
1988 	case IP_VERSION(4, 1, 1):
1989 	case IP_VERSION(4, 1, 2):
1990 	case IP_VERSION(4, 2, 0):
1991 	case IP_VERSION(4, 2, 2):
1992 	case IP_VERSION(4, 4, 0):
1993 		amdgpu_device_ip_block_add(adev, &sdma_v4_0_ip_block);
1994 		break;
1995 	case IP_VERSION(4, 4, 2):
1996 		amdgpu_device_ip_block_add(adev, &sdma_v4_4_2_ip_block);
1997 		break;
1998 	case IP_VERSION(5, 0, 0):
1999 	case IP_VERSION(5, 0, 1):
2000 	case IP_VERSION(5, 0, 2):
2001 	case IP_VERSION(5, 0, 5):
2002 		amdgpu_device_ip_block_add(adev, &sdma_v5_0_ip_block);
2003 		break;
2004 	case IP_VERSION(5, 2, 0):
2005 	case IP_VERSION(5, 2, 2):
2006 	case IP_VERSION(5, 2, 4):
2007 	case IP_VERSION(5, 2, 5):
2008 	case IP_VERSION(5, 2, 6):
2009 	case IP_VERSION(5, 2, 3):
2010 	case IP_VERSION(5, 2, 1):
2011 	case IP_VERSION(5, 2, 7):
2012 		amdgpu_device_ip_block_add(adev, &sdma_v5_2_ip_block);
2013 		break;
2014 	case IP_VERSION(6, 0, 0):
2015 	case IP_VERSION(6, 0, 1):
2016 	case IP_VERSION(6, 0, 2):
2017 	case IP_VERSION(6, 0, 3):
2018 	case IP_VERSION(6, 1, 0):
2019 		amdgpu_device_ip_block_add(adev, &sdma_v6_0_ip_block);
2020 		break;
2021 	default:
2022 		dev_err(adev->dev,
2023 			"Failed to add sdma ip block(SDMA0_HWIP:0x%x)\n",
2024 			amdgpu_ip_version(adev, SDMA0_HWIP, 0));
2025 		return -EINVAL;
2026 	}
2027 	return 0;
2028 }
2029 
2030 static int amdgpu_discovery_set_mm_ip_blocks(struct amdgpu_device *adev)
2031 {
2032 	if (amdgpu_ip_version(adev, VCE_HWIP, 0)) {
2033 		switch (amdgpu_ip_version(adev, UVD_HWIP, 0)) {
2034 		case IP_VERSION(7, 0, 0):
2035 		case IP_VERSION(7, 2, 0):
2036 			/* UVD is not supported on vega20 SR-IOV */
2037 			if (!(adev->asic_type == CHIP_VEGA20 && amdgpu_sriov_vf(adev)))
2038 				amdgpu_device_ip_block_add(adev, &uvd_v7_0_ip_block);
2039 			break;
2040 		default:
2041 			dev_err(adev->dev,
2042 				"Failed to add uvd v7 ip block(UVD_HWIP:0x%x)\n",
2043 				amdgpu_ip_version(adev, UVD_HWIP, 0));
2044 			return -EINVAL;
2045 		}
2046 		switch (amdgpu_ip_version(adev, VCE_HWIP, 0)) {
2047 		case IP_VERSION(4, 0, 0):
2048 		case IP_VERSION(4, 1, 0):
2049 			/* VCE is not supported on vega20 SR-IOV */
2050 			if (!(adev->asic_type == CHIP_VEGA20 && amdgpu_sriov_vf(adev)))
2051 				amdgpu_device_ip_block_add(adev, &vce_v4_0_ip_block);
2052 			break;
2053 		default:
2054 			dev_err(adev->dev,
2055 				"Failed to add VCE v4 ip block(VCE_HWIP:0x%x)\n",
2056 				amdgpu_ip_version(adev, VCE_HWIP, 0));
2057 			return -EINVAL;
2058 		}
2059 	} else {
2060 		switch (amdgpu_ip_version(adev, UVD_HWIP, 0)) {
2061 		case IP_VERSION(1, 0, 0):
2062 		case IP_VERSION(1, 0, 1):
2063 			amdgpu_device_ip_block_add(adev, &vcn_v1_0_ip_block);
2064 			break;
2065 		case IP_VERSION(2, 0, 0):
2066 		case IP_VERSION(2, 0, 2):
2067 		case IP_VERSION(2, 2, 0):
2068 			amdgpu_device_ip_block_add(adev, &vcn_v2_0_ip_block);
2069 			if (!amdgpu_sriov_vf(adev))
2070 				amdgpu_device_ip_block_add(adev, &jpeg_v2_0_ip_block);
2071 			break;
2072 		case IP_VERSION(2, 0, 3):
2073 			break;
2074 		case IP_VERSION(2, 5, 0):
2075 			amdgpu_device_ip_block_add(adev, &vcn_v2_5_ip_block);
2076 			amdgpu_device_ip_block_add(adev, &jpeg_v2_5_ip_block);
2077 			break;
2078 		case IP_VERSION(2, 6, 0):
2079 			amdgpu_device_ip_block_add(adev, &vcn_v2_6_ip_block);
2080 			amdgpu_device_ip_block_add(adev, &jpeg_v2_6_ip_block);
2081 			break;
2082 		case IP_VERSION(3, 0, 0):
2083 		case IP_VERSION(3, 0, 16):
2084 		case IP_VERSION(3, 1, 1):
2085 		case IP_VERSION(3, 1, 2):
2086 		case IP_VERSION(3, 0, 2):
2087 			amdgpu_device_ip_block_add(adev, &vcn_v3_0_ip_block);
2088 			if (!amdgpu_sriov_vf(adev))
2089 				amdgpu_device_ip_block_add(adev, &jpeg_v3_0_ip_block);
2090 			break;
2091 		case IP_VERSION(3, 0, 33):
2092 			amdgpu_device_ip_block_add(adev, &vcn_v3_0_ip_block);
2093 			break;
2094 		case IP_VERSION(4, 0, 0):
2095 		case IP_VERSION(4, 0, 2):
2096 		case IP_VERSION(4, 0, 4):
2097 			amdgpu_device_ip_block_add(adev, &vcn_v4_0_ip_block);
2098 			amdgpu_device_ip_block_add(adev, &jpeg_v4_0_ip_block);
2099 			break;
2100 		case IP_VERSION(4, 0, 3):
2101 			amdgpu_device_ip_block_add(adev, &vcn_v4_0_3_ip_block);
2102 			amdgpu_device_ip_block_add(adev, &jpeg_v4_0_3_ip_block);
2103 			break;
2104 		case IP_VERSION(4, 0, 5):
2105 			amdgpu_device_ip_block_add(adev, &vcn_v4_0_5_ip_block);
2106 			amdgpu_device_ip_block_add(adev, &jpeg_v4_0_5_ip_block);
2107 			break;
2108 		default:
2109 			dev_err(adev->dev,
2110 				"Failed to add vcn/jpeg ip block(UVD_HWIP:0x%x)\n",
2111 				amdgpu_ip_version(adev, UVD_HWIP, 0));
2112 			return -EINVAL;
2113 		}
2114 	}
2115 	return 0;
2116 }
2117 
2118 static int amdgpu_discovery_set_mes_ip_blocks(struct amdgpu_device *adev)
2119 {
2120 	switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
2121 	case IP_VERSION(10, 1, 10):
2122 	case IP_VERSION(10, 1, 1):
2123 	case IP_VERSION(10, 1, 2):
2124 	case IP_VERSION(10, 1, 3):
2125 	case IP_VERSION(10, 1, 4):
2126 	case IP_VERSION(10, 3, 0):
2127 	case IP_VERSION(10, 3, 1):
2128 	case IP_VERSION(10, 3, 2):
2129 	case IP_VERSION(10, 3, 3):
2130 	case IP_VERSION(10, 3, 4):
2131 	case IP_VERSION(10, 3, 5):
2132 	case IP_VERSION(10, 3, 6):
2133 		if (amdgpu_mes) {
2134 			amdgpu_device_ip_block_add(adev, &mes_v10_1_ip_block);
2135 			adev->enable_mes = true;
2136 			if (amdgpu_mes_kiq)
2137 				adev->enable_mes_kiq = true;
2138 		}
2139 		break;
2140 	case IP_VERSION(11, 0, 0):
2141 	case IP_VERSION(11, 0, 1):
2142 	case IP_VERSION(11, 0, 2):
2143 	case IP_VERSION(11, 0, 3):
2144 	case IP_VERSION(11, 0, 4):
2145 	case IP_VERSION(11, 5, 0):
2146 		amdgpu_device_ip_block_add(adev, &mes_v11_0_ip_block);
2147 		adev->enable_mes = true;
2148 		adev->enable_mes_kiq = true;
2149 		break;
2150 	default:
2151 		break;
2152 	}
2153 	return 0;
2154 }
2155 
2156 static void amdgpu_discovery_init_soc_config(struct amdgpu_device *adev)
2157 {
2158 	switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
2159 	case IP_VERSION(9, 4, 3):
2160 		aqua_vanjaram_init_soc_config(adev);
2161 		break;
2162 	default:
2163 		break;
2164 	}
2165 }
2166 
2167 static int amdgpu_discovery_set_vpe_ip_blocks(struct amdgpu_device *adev)
2168 {
2169 	switch (amdgpu_ip_version(adev, VPE_HWIP, 0)) {
2170 	case IP_VERSION(6, 1, 0):
2171 		amdgpu_device_ip_block_add(adev, &vpe_v6_1_ip_block);
2172 		break;
2173 	default:
2174 		break;
2175 	}
2176 
2177 	return 0;
2178 }
2179 
2180 static int amdgpu_discovery_set_umsch_mm_ip_blocks(struct amdgpu_device *adev)
2181 {
2182 	switch (amdgpu_ip_version(adev, VCN_HWIP, 0)) {
2183 	case IP_VERSION(4, 0, 5):
2184 		if (amdgpu_umsch_mm & 0x1) {
2185 			amdgpu_device_ip_block_add(adev, &umsch_mm_v4_0_ip_block);
2186 			adev->enable_umsch_mm = true;
2187 		}
2188 		break;
2189 	default:
2190 		break;
2191 	}
2192 
2193 	return 0;
2194 }
2195 
2196 int amdgpu_discovery_set_ip_blocks(struct amdgpu_device *adev)
2197 {
2198 	int r;
2199 
2200 	switch (adev->asic_type) {
2201 	case CHIP_VEGA10:
2202 		vega10_reg_base_init(adev);
2203 		adev->sdma.num_instances = 2;
2204 		adev->gmc.num_umc = 4;
2205 		adev->ip_versions[MMHUB_HWIP][0] = IP_VERSION(9, 0, 0);
2206 		adev->ip_versions[ATHUB_HWIP][0] = IP_VERSION(9, 0, 0);
2207 		adev->ip_versions[OSSSYS_HWIP][0] = IP_VERSION(4, 0, 0);
2208 		adev->ip_versions[HDP_HWIP][0] = IP_VERSION(4, 0, 0);
2209 		adev->ip_versions[SDMA0_HWIP][0] = IP_VERSION(4, 0, 0);
2210 		adev->ip_versions[SDMA1_HWIP][0] = IP_VERSION(4, 0, 0);
2211 		adev->ip_versions[DF_HWIP][0] = IP_VERSION(2, 1, 0);
2212 		adev->ip_versions[NBIO_HWIP][0] = IP_VERSION(6, 1, 0);
2213 		adev->ip_versions[UMC_HWIP][0] = IP_VERSION(6, 0, 0);
2214 		adev->ip_versions[MP0_HWIP][0] = IP_VERSION(9, 0, 0);
2215 		adev->ip_versions[MP1_HWIP][0] = IP_VERSION(9, 0, 0);
2216 		adev->ip_versions[THM_HWIP][0] = IP_VERSION(9, 0, 0);
2217 		adev->ip_versions[SMUIO_HWIP][0] = IP_VERSION(9, 0, 0);
2218 		adev->ip_versions[GC_HWIP][0] = IP_VERSION(9, 0, 1);
2219 		adev->ip_versions[UVD_HWIP][0] = IP_VERSION(7, 0, 0);
2220 		adev->ip_versions[VCE_HWIP][0] = IP_VERSION(4, 0, 0);
2221 		adev->ip_versions[DCI_HWIP][0] = IP_VERSION(12, 0, 0);
2222 		break;
2223 	case CHIP_VEGA12:
2224 		vega10_reg_base_init(adev);
2225 		adev->sdma.num_instances = 2;
2226 		adev->gmc.num_umc = 4;
2227 		adev->ip_versions[MMHUB_HWIP][0] = IP_VERSION(9, 3, 0);
2228 		adev->ip_versions[ATHUB_HWIP][0] = IP_VERSION(9, 3, 0);
2229 		adev->ip_versions[OSSSYS_HWIP][0] = IP_VERSION(4, 0, 1);
2230 		adev->ip_versions[HDP_HWIP][0] = IP_VERSION(4, 0, 1);
2231 		adev->ip_versions[SDMA0_HWIP][0] = IP_VERSION(4, 0, 1);
2232 		adev->ip_versions[SDMA1_HWIP][0] = IP_VERSION(4, 0, 1);
2233 		adev->ip_versions[DF_HWIP][0] = IP_VERSION(2, 5, 0);
2234 		adev->ip_versions[NBIO_HWIP][0] = IP_VERSION(6, 2, 0);
2235 		adev->ip_versions[UMC_HWIP][0] = IP_VERSION(6, 1, 0);
2236 		adev->ip_versions[MP0_HWIP][0] = IP_VERSION(9, 0, 0);
2237 		adev->ip_versions[MP1_HWIP][0] = IP_VERSION(9, 0, 0);
2238 		adev->ip_versions[THM_HWIP][0] = IP_VERSION(9, 0, 0);
2239 		adev->ip_versions[SMUIO_HWIP][0] = IP_VERSION(9, 0, 1);
2240 		adev->ip_versions[GC_HWIP][0] = IP_VERSION(9, 2, 1);
2241 		adev->ip_versions[UVD_HWIP][0] = IP_VERSION(7, 0, 0);
2242 		adev->ip_versions[VCE_HWIP][0] = IP_VERSION(4, 0, 0);
2243 		adev->ip_versions[DCI_HWIP][0] = IP_VERSION(12, 0, 1);
2244 		break;
2245 	case CHIP_RAVEN:
2246 		vega10_reg_base_init(adev);
2247 		adev->sdma.num_instances = 1;
2248 		adev->vcn.num_vcn_inst = 1;
2249 		adev->gmc.num_umc = 2;
2250 		if (adev->apu_flags & AMD_APU_IS_RAVEN2) {
2251 			adev->ip_versions[MMHUB_HWIP][0] = IP_VERSION(9, 2, 0);
2252 			adev->ip_versions[ATHUB_HWIP][0] = IP_VERSION(9, 2, 0);
2253 			adev->ip_versions[OSSSYS_HWIP][0] = IP_VERSION(4, 1, 1);
2254 			adev->ip_versions[HDP_HWIP][0] = IP_VERSION(4, 1, 1);
2255 			adev->ip_versions[SDMA0_HWIP][0] = IP_VERSION(4, 1, 1);
2256 			adev->ip_versions[DF_HWIP][0] = IP_VERSION(2, 1, 1);
2257 			adev->ip_versions[NBIO_HWIP][0] = IP_VERSION(7, 0, 1);
2258 			adev->ip_versions[UMC_HWIP][0] = IP_VERSION(7, 5, 0);
2259 			adev->ip_versions[MP0_HWIP][0] = IP_VERSION(10, 0, 1);
2260 			adev->ip_versions[MP1_HWIP][0] = IP_VERSION(10, 0, 1);
2261 			adev->ip_versions[THM_HWIP][0] = IP_VERSION(10, 1, 0);
2262 			adev->ip_versions[SMUIO_HWIP][0] = IP_VERSION(10, 0, 1);
2263 			adev->ip_versions[GC_HWIP][0] = IP_VERSION(9, 2, 2);
2264 			adev->ip_versions[UVD_HWIP][0] = IP_VERSION(1, 0, 1);
2265 			adev->ip_versions[DCE_HWIP][0] = IP_VERSION(1, 0, 1);
2266 		} else {
2267 			adev->ip_versions[MMHUB_HWIP][0] = IP_VERSION(9, 1, 0);
2268 			adev->ip_versions[ATHUB_HWIP][0] = IP_VERSION(9, 1, 0);
2269 			adev->ip_versions[OSSSYS_HWIP][0] = IP_VERSION(4, 1, 0);
2270 			adev->ip_versions[HDP_HWIP][0] = IP_VERSION(4, 1, 0);
2271 			adev->ip_versions[SDMA0_HWIP][0] = IP_VERSION(4, 1, 0);
2272 			adev->ip_versions[DF_HWIP][0] = IP_VERSION(2, 1, 0);
2273 			adev->ip_versions[NBIO_HWIP][0] = IP_VERSION(7, 0, 0);
2274 			adev->ip_versions[UMC_HWIP][0] = IP_VERSION(7, 0, 0);
2275 			adev->ip_versions[MP0_HWIP][0] = IP_VERSION(10, 0, 0);
2276 			adev->ip_versions[MP1_HWIP][0] = IP_VERSION(10, 0, 0);
2277 			adev->ip_versions[THM_HWIP][0] = IP_VERSION(10, 0, 0);
2278 			adev->ip_versions[SMUIO_HWIP][0] = IP_VERSION(10, 0, 0);
2279 			adev->ip_versions[GC_HWIP][0] = IP_VERSION(9, 1, 0);
2280 			adev->ip_versions[UVD_HWIP][0] = IP_VERSION(1, 0, 0);
2281 			adev->ip_versions[DCE_HWIP][0] = IP_VERSION(1, 0, 0);
2282 		}
2283 		break;
2284 	case CHIP_VEGA20:
2285 		vega20_reg_base_init(adev);
2286 		adev->sdma.num_instances = 2;
2287 		adev->gmc.num_umc = 8;
2288 		adev->ip_versions[MMHUB_HWIP][0] = IP_VERSION(9, 4, 0);
2289 		adev->ip_versions[ATHUB_HWIP][0] = IP_VERSION(9, 4, 0);
2290 		adev->ip_versions[OSSSYS_HWIP][0] = IP_VERSION(4, 2, 0);
2291 		adev->ip_versions[HDP_HWIP][0] = IP_VERSION(4, 2, 0);
2292 		adev->ip_versions[SDMA0_HWIP][0] = IP_VERSION(4, 2, 0);
2293 		adev->ip_versions[SDMA1_HWIP][0] = IP_VERSION(4, 2, 0);
2294 		adev->ip_versions[DF_HWIP][0] = IP_VERSION(3, 6, 0);
2295 		adev->ip_versions[NBIO_HWIP][0] = IP_VERSION(7, 4, 0);
2296 		adev->ip_versions[UMC_HWIP][0] = IP_VERSION(6, 1, 1);
2297 		adev->ip_versions[MP0_HWIP][0] = IP_VERSION(11, 0, 2);
2298 		adev->ip_versions[MP1_HWIP][0] = IP_VERSION(11, 0, 2);
2299 		adev->ip_versions[THM_HWIP][0] = IP_VERSION(11, 0, 2);
2300 		adev->ip_versions[SMUIO_HWIP][0] = IP_VERSION(11, 0, 2);
2301 		adev->ip_versions[GC_HWIP][0] = IP_VERSION(9, 4, 0);
2302 		adev->ip_versions[UVD_HWIP][0] = IP_VERSION(7, 2, 0);
2303 		adev->ip_versions[UVD_HWIP][1] = IP_VERSION(7, 2, 0);
2304 		adev->ip_versions[VCE_HWIP][0] = IP_VERSION(4, 1, 0);
2305 		adev->ip_versions[DCI_HWIP][0] = IP_VERSION(12, 1, 0);
2306 		break;
2307 	case CHIP_ARCTURUS:
2308 		arct_reg_base_init(adev);
2309 		adev->sdma.num_instances = 8;
2310 		adev->vcn.num_vcn_inst = 2;
2311 		adev->gmc.num_umc = 8;
2312 		adev->ip_versions[MMHUB_HWIP][0] = IP_VERSION(9, 4, 1);
2313 		adev->ip_versions[ATHUB_HWIP][0] = IP_VERSION(9, 4, 1);
2314 		adev->ip_versions[OSSSYS_HWIP][0] = IP_VERSION(4, 2, 1);
2315 		adev->ip_versions[HDP_HWIP][0] = IP_VERSION(4, 2, 1);
2316 		adev->ip_versions[SDMA0_HWIP][0] = IP_VERSION(4, 2, 2);
2317 		adev->ip_versions[SDMA1_HWIP][0] = IP_VERSION(4, 2, 2);
2318 		adev->ip_versions[SDMA1_HWIP][1] = IP_VERSION(4, 2, 2);
2319 		adev->ip_versions[SDMA1_HWIP][2] = IP_VERSION(4, 2, 2);
2320 		adev->ip_versions[SDMA1_HWIP][3] = IP_VERSION(4, 2, 2);
2321 		adev->ip_versions[SDMA1_HWIP][4] = IP_VERSION(4, 2, 2);
2322 		adev->ip_versions[SDMA1_HWIP][5] = IP_VERSION(4, 2, 2);
2323 		adev->ip_versions[SDMA1_HWIP][6] = IP_VERSION(4, 2, 2);
2324 		adev->ip_versions[DF_HWIP][0] = IP_VERSION(3, 6, 1);
2325 		adev->ip_versions[NBIO_HWIP][0] = IP_VERSION(7, 4, 1);
2326 		adev->ip_versions[UMC_HWIP][0] = IP_VERSION(6, 1, 2);
2327 		adev->ip_versions[MP0_HWIP][0] = IP_VERSION(11, 0, 4);
2328 		adev->ip_versions[MP1_HWIP][0] = IP_VERSION(11, 0, 2);
2329 		adev->ip_versions[THM_HWIP][0] = IP_VERSION(11, 0, 3);
2330 		adev->ip_versions[SMUIO_HWIP][0] = IP_VERSION(11, 0, 3);
2331 		adev->ip_versions[GC_HWIP][0] = IP_VERSION(9, 4, 1);
2332 		adev->ip_versions[UVD_HWIP][0] = IP_VERSION(2, 5, 0);
2333 		adev->ip_versions[UVD_HWIP][1] = IP_VERSION(2, 5, 0);
2334 		break;
2335 	case CHIP_ALDEBARAN:
2336 		aldebaran_reg_base_init(adev);
2337 		adev->sdma.num_instances = 5;
2338 		adev->vcn.num_vcn_inst = 2;
2339 		adev->gmc.num_umc = 4;
2340 		adev->ip_versions[MMHUB_HWIP][0] = IP_VERSION(9, 4, 2);
2341 		adev->ip_versions[ATHUB_HWIP][0] = IP_VERSION(9, 4, 2);
2342 		adev->ip_versions[OSSSYS_HWIP][0] = IP_VERSION(4, 4, 0);
2343 		adev->ip_versions[HDP_HWIP][0] = IP_VERSION(4, 4, 0);
2344 		adev->ip_versions[SDMA0_HWIP][0] = IP_VERSION(4, 4, 0);
2345 		adev->ip_versions[SDMA0_HWIP][1] = IP_VERSION(4, 4, 0);
2346 		adev->ip_versions[SDMA0_HWIP][2] = IP_VERSION(4, 4, 0);
2347 		adev->ip_versions[SDMA0_HWIP][3] = IP_VERSION(4, 4, 0);
2348 		adev->ip_versions[SDMA0_HWIP][4] = IP_VERSION(4, 4, 0);
2349 		adev->ip_versions[DF_HWIP][0] = IP_VERSION(3, 6, 2);
2350 		adev->ip_versions[NBIO_HWIP][0] = IP_VERSION(7, 4, 4);
2351 		adev->ip_versions[UMC_HWIP][0] = IP_VERSION(6, 7, 0);
2352 		adev->ip_versions[MP0_HWIP][0] = IP_VERSION(13, 0, 2);
2353 		adev->ip_versions[MP1_HWIP][0] = IP_VERSION(13, 0, 2);
2354 		adev->ip_versions[THM_HWIP][0] = IP_VERSION(13, 0, 2);
2355 		adev->ip_versions[SMUIO_HWIP][0] = IP_VERSION(13, 0, 2);
2356 		adev->ip_versions[GC_HWIP][0] = IP_VERSION(9, 4, 2);
2357 		adev->ip_versions[UVD_HWIP][0] = IP_VERSION(2, 6, 0);
2358 		adev->ip_versions[UVD_HWIP][1] = IP_VERSION(2, 6, 0);
2359 		adev->ip_versions[XGMI_HWIP][0] = IP_VERSION(6, 1, 0);
2360 		break;
2361 	default:
2362 		r = amdgpu_discovery_reg_base_init(adev);
2363 		if (r)
2364 			return -EINVAL;
2365 
2366 		amdgpu_discovery_harvest_ip(adev);
2367 		amdgpu_discovery_get_gfx_info(adev);
2368 		amdgpu_discovery_get_mall_info(adev);
2369 		amdgpu_discovery_get_vcn_info(adev);
2370 		break;
2371 	}
2372 
2373 	amdgpu_discovery_init_soc_config(adev);
2374 	amdgpu_discovery_sysfs_init(adev);
2375 
2376 	switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
2377 	case IP_VERSION(9, 0, 1):
2378 	case IP_VERSION(9, 2, 1):
2379 	case IP_VERSION(9, 4, 0):
2380 	case IP_VERSION(9, 4, 1):
2381 	case IP_VERSION(9, 4, 2):
2382 	case IP_VERSION(9, 4, 3):
2383 		adev->family = AMDGPU_FAMILY_AI;
2384 		break;
2385 	case IP_VERSION(9, 1, 0):
2386 	case IP_VERSION(9, 2, 2):
2387 	case IP_VERSION(9, 3, 0):
2388 		adev->family = AMDGPU_FAMILY_RV;
2389 		break;
2390 	case IP_VERSION(10, 1, 10):
2391 	case IP_VERSION(10, 1, 1):
2392 	case IP_VERSION(10, 1, 2):
2393 	case IP_VERSION(10, 1, 3):
2394 	case IP_VERSION(10, 1, 4):
2395 	case IP_VERSION(10, 3, 0):
2396 	case IP_VERSION(10, 3, 2):
2397 	case IP_VERSION(10, 3, 4):
2398 	case IP_VERSION(10, 3, 5):
2399 		adev->family = AMDGPU_FAMILY_NV;
2400 		break;
2401 	case IP_VERSION(10, 3, 1):
2402 		adev->family = AMDGPU_FAMILY_VGH;
2403 		adev->apu_flags |= AMD_APU_IS_VANGOGH;
2404 		break;
2405 	case IP_VERSION(10, 3, 3):
2406 		adev->family = AMDGPU_FAMILY_YC;
2407 		break;
2408 	case IP_VERSION(10, 3, 6):
2409 		adev->family = AMDGPU_FAMILY_GC_10_3_6;
2410 		break;
2411 	case IP_VERSION(10, 3, 7):
2412 		adev->family = AMDGPU_FAMILY_GC_10_3_7;
2413 		break;
2414 	case IP_VERSION(11, 0, 0):
2415 	case IP_VERSION(11, 0, 2):
2416 	case IP_VERSION(11, 0, 3):
2417 		adev->family = AMDGPU_FAMILY_GC_11_0_0;
2418 		break;
2419 	case IP_VERSION(11, 0, 1):
2420 	case IP_VERSION(11, 0, 4):
2421 		adev->family = AMDGPU_FAMILY_GC_11_0_1;
2422 		break;
2423 	case IP_VERSION(11, 5, 0):
2424 		adev->family = AMDGPU_FAMILY_GC_11_5_0;
2425 		break;
2426 	default:
2427 		return -EINVAL;
2428 	}
2429 
2430 	switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
2431 	case IP_VERSION(9, 1, 0):
2432 	case IP_VERSION(9, 2, 2):
2433 	case IP_VERSION(9, 3, 0):
2434 	case IP_VERSION(10, 1, 3):
2435 	case IP_VERSION(10, 1, 4):
2436 	case IP_VERSION(10, 3, 1):
2437 	case IP_VERSION(10, 3, 3):
2438 	case IP_VERSION(10, 3, 6):
2439 	case IP_VERSION(10, 3, 7):
2440 	case IP_VERSION(11, 0, 1):
2441 	case IP_VERSION(11, 0, 4):
2442 	case IP_VERSION(11, 5, 0):
2443 		adev->flags |= AMD_IS_APU;
2444 		break;
2445 	default:
2446 		break;
2447 	}
2448 
2449 	if (amdgpu_ip_version(adev, XGMI_HWIP, 0) == IP_VERSION(4, 8, 0))
2450 		adev->gmc.xgmi.supported = true;
2451 
2452 	/* set NBIO version */
2453 	switch (amdgpu_ip_version(adev, NBIO_HWIP, 0)) {
2454 	case IP_VERSION(6, 1, 0):
2455 	case IP_VERSION(6, 2, 0):
2456 		adev->nbio.funcs = &nbio_v6_1_funcs;
2457 		adev->nbio.hdp_flush_reg = &nbio_v6_1_hdp_flush_reg;
2458 		break;
2459 	case IP_VERSION(7, 0, 0):
2460 	case IP_VERSION(7, 0, 1):
2461 	case IP_VERSION(2, 5, 0):
2462 		adev->nbio.funcs = &nbio_v7_0_funcs;
2463 		adev->nbio.hdp_flush_reg = &nbio_v7_0_hdp_flush_reg;
2464 		break;
2465 	case IP_VERSION(7, 4, 0):
2466 	case IP_VERSION(7, 4, 1):
2467 	case IP_VERSION(7, 4, 4):
2468 		adev->nbio.funcs = &nbio_v7_4_funcs;
2469 		adev->nbio.hdp_flush_reg = &nbio_v7_4_hdp_flush_reg;
2470 		break;
2471 	case IP_VERSION(7, 9, 0):
2472 		adev->nbio.funcs = &nbio_v7_9_funcs;
2473 		adev->nbio.hdp_flush_reg = &nbio_v7_9_hdp_flush_reg;
2474 		break;
2475 	case IP_VERSION(7, 11, 0):
2476 		adev->nbio.funcs = &nbio_v7_11_funcs;
2477 		adev->nbio.hdp_flush_reg = &nbio_v7_11_hdp_flush_reg;
2478 		break;
2479 	case IP_VERSION(7, 2, 0):
2480 	case IP_VERSION(7, 2, 1):
2481 	case IP_VERSION(7, 3, 0):
2482 	case IP_VERSION(7, 5, 0):
2483 	case IP_VERSION(7, 5, 1):
2484 		adev->nbio.funcs = &nbio_v7_2_funcs;
2485 		adev->nbio.hdp_flush_reg = &nbio_v7_2_hdp_flush_reg;
2486 		break;
2487 	case IP_VERSION(2, 1, 1):
2488 	case IP_VERSION(2, 3, 0):
2489 	case IP_VERSION(2, 3, 1):
2490 	case IP_VERSION(2, 3, 2):
2491 	case IP_VERSION(3, 3, 0):
2492 	case IP_VERSION(3, 3, 1):
2493 	case IP_VERSION(3, 3, 2):
2494 	case IP_VERSION(3, 3, 3):
2495 		adev->nbio.funcs = &nbio_v2_3_funcs;
2496 		adev->nbio.hdp_flush_reg = &nbio_v2_3_hdp_flush_reg;
2497 		break;
2498 	case IP_VERSION(4, 3, 0):
2499 	case IP_VERSION(4, 3, 1):
2500 		if (amdgpu_sriov_vf(adev))
2501 			adev->nbio.funcs = &nbio_v4_3_sriov_funcs;
2502 		else
2503 			adev->nbio.funcs = &nbio_v4_3_funcs;
2504 		adev->nbio.hdp_flush_reg = &nbio_v4_3_hdp_flush_reg;
2505 		break;
2506 	case IP_VERSION(7, 7, 0):
2507 	case IP_VERSION(7, 7, 1):
2508 		adev->nbio.funcs = &nbio_v7_7_funcs;
2509 		adev->nbio.hdp_flush_reg = &nbio_v7_7_hdp_flush_reg;
2510 		break;
2511 	default:
2512 		break;
2513 	}
2514 
2515 	switch (amdgpu_ip_version(adev, HDP_HWIP, 0)) {
2516 	case IP_VERSION(4, 0, 0):
2517 	case IP_VERSION(4, 0, 1):
2518 	case IP_VERSION(4, 1, 0):
2519 	case IP_VERSION(4, 1, 1):
2520 	case IP_VERSION(4, 1, 2):
2521 	case IP_VERSION(4, 2, 0):
2522 	case IP_VERSION(4, 2, 1):
2523 	case IP_VERSION(4, 4, 0):
2524 	case IP_VERSION(4, 4, 2):
2525 		adev->hdp.funcs = &hdp_v4_0_funcs;
2526 		break;
2527 	case IP_VERSION(5, 0, 0):
2528 	case IP_VERSION(5, 0, 1):
2529 	case IP_VERSION(5, 0, 2):
2530 	case IP_VERSION(5, 0, 3):
2531 	case IP_VERSION(5, 0, 4):
2532 	case IP_VERSION(5, 2, 0):
2533 		adev->hdp.funcs = &hdp_v5_0_funcs;
2534 		break;
2535 	case IP_VERSION(5, 2, 1):
2536 		adev->hdp.funcs = &hdp_v5_2_funcs;
2537 		break;
2538 	case IP_VERSION(6, 0, 0):
2539 	case IP_VERSION(6, 0, 1):
2540 	case IP_VERSION(6, 1, 0):
2541 		adev->hdp.funcs = &hdp_v6_0_funcs;
2542 		break;
2543 	default:
2544 		break;
2545 	}
2546 
2547 	switch (amdgpu_ip_version(adev, DF_HWIP, 0)) {
2548 	case IP_VERSION(3, 6, 0):
2549 	case IP_VERSION(3, 6, 1):
2550 	case IP_VERSION(3, 6, 2):
2551 		adev->df.funcs = &df_v3_6_funcs;
2552 		break;
2553 	case IP_VERSION(2, 1, 0):
2554 	case IP_VERSION(2, 1, 1):
2555 	case IP_VERSION(2, 5, 0):
2556 	case IP_VERSION(3, 5, 1):
2557 	case IP_VERSION(3, 5, 2):
2558 		adev->df.funcs = &df_v1_7_funcs;
2559 		break;
2560 	case IP_VERSION(4, 3, 0):
2561 		adev->df.funcs = &df_v4_3_funcs;
2562 		break;
2563 	case IP_VERSION(4, 6, 2):
2564 		adev->df.funcs = &df_v4_6_2_funcs;
2565 		break;
2566 	default:
2567 		break;
2568 	}
2569 
2570 	switch (amdgpu_ip_version(adev, SMUIO_HWIP, 0)) {
2571 	case IP_VERSION(9, 0, 0):
2572 	case IP_VERSION(9, 0, 1):
2573 	case IP_VERSION(10, 0, 0):
2574 	case IP_VERSION(10, 0, 1):
2575 	case IP_VERSION(10, 0, 2):
2576 		adev->smuio.funcs = &smuio_v9_0_funcs;
2577 		break;
2578 	case IP_VERSION(11, 0, 0):
2579 	case IP_VERSION(11, 0, 2):
2580 	case IP_VERSION(11, 0, 3):
2581 	case IP_VERSION(11, 0, 4):
2582 	case IP_VERSION(11, 0, 7):
2583 	case IP_VERSION(11, 0, 8):
2584 		adev->smuio.funcs = &smuio_v11_0_funcs;
2585 		break;
2586 	case IP_VERSION(11, 0, 6):
2587 	case IP_VERSION(11, 0, 10):
2588 	case IP_VERSION(11, 0, 11):
2589 	case IP_VERSION(11, 5, 0):
2590 	case IP_VERSION(13, 0, 1):
2591 	case IP_VERSION(13, 0, 9):
2592 	case IP_VERSION(13, 0, 10):
2593 		adev->smuio.funcs = &smuio_v11_0_6_funcs;
2594 		break;
2595 	case IP_VERSION(13, 0, 2):
2596 		adev->smuio.funcs = &smuio_v13_0_funcs;
2597 		break;
2598 	case IP_VERSION(13, 0, 3):
2599 		adev->smuio.funcs = &smuio_v13_0_3_funcs;
2600 		if (adev->smuio.funcs->get_pkg_type(adev) == AMDGPU_PKG_TYPE_APU) {
2601 			adev->flags |= AMD_IS_APU;
2602 		}
2603 		break;
2604 	case IP_VERSION(13, 0, 6):
2605 	case IP_VERSION(13, 0, 8):
2606 	case IP_VERSION(14, 0, 0):
2607 		adev->smuio.funcs = &smuio_v13_0_6_funcs;
2608 		break;
2609 	default:
2610 		break;
2611 	}
2612 
2613 	switch (amdgpu_ip_version(adev, LSDMA_HWIP, 0)) {
2614 	case IP_VERSION(6, 0, 0):
2615 	case IP_VERSION(6, 0, 1):
2616 	case IP_VERSION(6, 0, 2):
2617 	case IP_VERSION(6, 0, 3):
2618 		adev->lsdma.funcs = &lsdma_v6_0_funcs;
2619 		break;
2620 	default:
2621 		break;
2622 	}
2623 
2624 	r = amdgpu_discovery_set_common_ip_blocks(adev);
2625 	if (r)
2626 		return r;
2627 
2628 	r = amdgpu_discovery_set_gmc_ip_blocks(adev);
2629 	if (r)
2630 		return r;
2631 
2632 	/* For SR-IOV, PSP needs to be initialized before IH */
2633 	if (amdgpu_sriov_vf(adev)) {
2634 		r = amdgpu_discovery_set_psp_ip_blocks(adev);
2635 		if (r)
2636 			return r;
2637 		r = amdgpu_discovery_set_ih_ip_blocks(adev);
2638 		if (r)
2639 			return r;
2640 	} else {
2641 		r = amdgpu_discovery_set_ih_ip_blocks(adev);
2642 		if (r)
2643 			return r;
2644 
2645 		if (likely(adev->firmware.load_type == AMDGPU_FW_LOAD_PSP)) {
2646 			r = amdgpu_discovery_set_psp_ip_blocks(adev);
2647 			if (r)
2648 				return r;
2649 		}
2650 	}
2651 
2652 	if (likely(adev->firmware.load_type == AMDGPU_FW_LOAD_PSP)) {
2653 		r = amdgpu_discovery_set_smu_ip_blocks(adev);
2654 		if (r)
2655 			return r;
2656 	}
2657 
2658 	r = amdgpu_discovery_set_display_ip_blocks(adev);
2659 	if (r)
2660 		return r;
2661 
2662 	r = amdgpu_discovery_set_gc_ip_blocks(adev);
2663 	if (r)
2664 		return r;
2665 
2666 	r = amdgpu_discovery_set_sdma_ip_blocks(adev);
2667 	if (r)
2668 		return r;
2669 
2670 	if ((adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT &&
2671 	     !amdgpu_sriov_vf(adev)) ||
2672 	    (adev->firmware.load_type == AMDGPU_FW_LOAD_RLC_BACKDOOR_AUTO && amdgpu_dpm == 1)) {
2673 		r = amdgpu_discovery_set_smu_ip_blocks(adev);
2674 		if (r)
2675 			return r;
2676 	}
2677 
2678 	r = amdgpu_discovery_set_mm_ip_blocks(adev);
2679 	if (r)
2680 		return r;
2681 
2682 	r = amdgpu_discovery_set_mes_ip_blocks(adev);
2683 	if (r)
2684 		return r;
2685 
2686 	r = amdgpu_discovery_set_vpe_ip_blocks(adev);
2687 	if (r)
2688 		return r;
2689 
2690 	r = amdgpu_discovery_set_umsch_mm_ip_blocks(adev);
2691 	if (r)
2692 		return r;
2693 
2694 	return 0;
2695 }
2696 
2697