xref: /linux/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c (revision 889d55154516ec8f98ea953e8660963f2e29c75d)
1 /*
2  * Copyright 2018 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  */
23 
24 #include <linux/firmware.h>
25 
26 #include "amdgpu.h"
27 #include "amdgpu_discovery.h"
28 #include "soc15_hw_ip.h"
29 #include "discovery.h"
30 
31 #include "soc15.h"
32 #include "gfx_v9_0.h"
33 #include "gfx_v9_4_3.h"
34 #include "gmc_v9_0.h"
35 #include "df_v1_7.h"
36 #include "df_v3_6.h"
37 #include "df_v4_3.h"
38 #include "nbio_v6_1.h"
39 #include "nbio_v7_0.h"
40 #include "nbio_v7_4.h"
41 #include "nbio_v7_9.h"
42 #include "nbio_v7_11.h"
43 #include "hdp_v4_0.h"
44 #include "vega10_ih.h"
45 #include "vega20_ih.h"
46 #include "sdma_v4_0.h"
47 #include "sdma_v4_4_2.h"
48 #include "uvd_v7_0.h"
49 #include "vce_v4_0.h"
50 #include "vcn_v1_0.h"
51 #include "vcn_v2_5.h"
52 #include "jpeg_v2_5.h"
53 #include "smuio_v9_0.h"
54 #include "gmc_v10_0.h"
55 #include "gmc_v11_0.h"
56 #include "gfxhub_v2_0.h"
57 #include "mmhub_v2_0.h"
58 #include "nbio_v2_3.h"
59 #include "nbio_v4_3.h"
60 #include "nbio_v7_2.h"
61 #include "nbio_v7_7.h"
62 #include "hdp_v5_0.h"
63 #include "hdp_v5_2.h"
64 #include "hdp_v6_0.h"
65 #include "nv.h"
66 #include "soc21.h"
67 #include "navi10_ih.h"
68 #include "ih_v6_0.h"
69 #include "ih_v6_1.h"
70 #include "gfx_v10_0.h"
71 #include "gfx_v11_0.h"
72 #include "sdma_v5_0.h"
73 #include "sdma_v5_2.h"
74 #include "sdma_v6_0.h"
75 #include "lsdma_v6_0.h"
76 #include "vcn_v2_0.h"
77 #include "jpeg_v2_0.h"
78 #include "vcn_v3_0.h"
79 #include "jpeg_v3_0.h"
80 #include "vcn_v4_0.h"
81 #include "jpeg_v4_0.h"
82 #include "vcn_v4_0_3.h"
83 #include "jpeg_v4_0_3.h"
84 #include "vcn_v4_0_5.h"
85 #include "jpeg_v4_0_5.h"
86 #include "amdgpu_vkms.h"
87 #include "mes_v10_1.h"
88 #include "mes_v11_0.h"
89 #include "smuio_v11_0.h"
90 #include "smuio_v11_0_6.h"
91 #include "smuio_v13_0.h"
92 #include "smuio_v13_0_3.h"
93 #include "smuio_v13_0_6.h"
94 
95 #include "amdgpu_vpe.h"
96 
97 #define FIRMWARE_IP_DISCOVERY "amdgpu/ip_discovery.bin"
98 MODULE_FIRMWARE(FIRMWARE_IP_DISCOVERY);
99 
100 #define mmRCC_CONFIG_MEMSIZE	0xde3
101 #define mmMM_INDEX		0x0
102 #define mmMM_INDEX_HI		0x6
103 #define mmMM_DATA		0x1
104 
105 static const char *hw_id_names[HW_ID_MAX] = {
106 	[MP1_HWID]		= "MP1",
107 	[MP2_HWID]		= "MP2",
108 	[THM_HWID]		= "THM",
109 	[SMUIO_HWID]		= "SMUIO",
110 	[FUSE_HWID]		= "FUSE",
111 	[CLKA_HWID]		= "CLKA",
112 	[PWR_HWID]		= "PWR",
113 	[GC_HWID]		= "GC",
114 	[UVD_HWID]		= "UVD",
115 	[AUDIO_AZ_HWID]		= "AUDIO_AZ",
116 	[ACP_HWID]		= "ACP",
117 	[DCI_HWID]		= "DCI",
118 	[DMU_HWID]		= "DMU",
119 	[DCO_HWID]		= "DCO",
120 	[DIO_HWID]		= "DIO",
121 	[XDMA_HWID]		= "XDMA",
122 	[DCEAZ_HWID]		= "DCEAZ",
123 	[DAZ_HWID]		= "DAZ",
124 	[SDPMUX_HWID]		= "SDPMUX",
125 	[NTB_HWID]		= "NTB",
126 	[IOHC_HWID]		= "IOHC",
127 	[L2IMU_HWID]		= "L2IMU",
128 	[VCE_HWID]		= "VCE",
129 	[MMHUB_HWID]		= "MMHUB",
130 	[ATHUB_HWID]		= "ATHUB",
131 	[DBGU_NBIO_HWID]	= "DBGU_NBIO",
132 	[DFX_HWID]		= "DFX",
133 	[DBGU0_HWID]		= "DBGU0",
134 	[DBGU1_HWID]		= "DBGU1",
135 	[OSSSYS_HWID]		= "OSSSYS",
136 	[HDP_HWID]		= "HDP",
137 	[SDMA0_HWID]		= "SDMA0",
138 	[SDMA1_HWID]		= "SDMA1",
139 	[SDMA2_HWID]		= "SDMA2",
140 	[SDMA3_HWID]		= "SDMA3",
141 	[LSDMA_HWID]		= "LSDMA",
142 	[ISP_HWID]		= "ISP",
143 	[DBGU_IO_HWID]		= "DBGU_IO",
144 	[DF_HWID]		= "DF",
145 	[CLKB_HWID]		= "CLKB",
146 	[FCH_HWID]		= "FCH",
147 	[DFX_DAP_HWID]		= "DFX_DAP",
148 	[L1IMU_PCIE_HWID]	= "L1IMU_PCIE",
149 	[L1IMU_NBIF_HWID]	= "L1IMU_NBIF",
150 	[L1IMU_IOAGR_HWID]	= "L1IMU_IOAGR",
151 	[L1IMU3_HWID]		= "L1IMU3",
152 	[L1IMU4_HWID]		= "L1IMU4",
153 	[L1IMU5_HWID]		= "L1IMU5",
154 	[L1IMU6_HWID]		= "L1IMU6",
155 	[L1IMU7_HWID]		= "L1IMU7",
156 	[L1IMU8_HWID]		= "L1IMU8",
157 	[L1IMU9_HWID]		= "L1IMU9",
158 	[L1IMU10_HWID]		= "L1IMU10",
159 	[L1IMU11_HWID]		= "L1IMU11",
160 	[L1IMU12_HWID]		= "L1IMU12",
161 	[L1IMU13_HWID]		= "L1IMU13",
162 	[L1IMU14_HWID]		= "L1IMU14",
163 	[L1IMU15_HWID]		= "L1IMU15",
164 	[WAFLC_HWID]		= "WAFLC",
165 	[FCH_USB_PD_HWID]	= "FCH_USB_PD",
166 	[PCIE_HWID]		= "PCIE",
167 	[PCS_HWID]		= "PCS",
168 	[DDCL_HWID]		= "DDCL",
169 	[SST_HWID]		= "SST",
170 	[IOAGR_HWID]		= "IOAGR",
171 	[NBIF_HWID]		= "NBIF",
172 	[IOAPIC_HWID]		= "IOAPIC",
173 	[SYSTEMHUB_HWID]	= "SYSTEMHUB",
174 	[NTBCCP_HWID]		= "NTBCCP",
175 	[UMC_HWID]		= "UMC",
176 	[SATA_HWID]		= "SATA",
177 	[USB_HWID]		= "USB",
178 	[CCXSEC_HWID]		= "CCXSEC",
179 	[XGMI_HWID]		= "XGMI",
180 	[XGBE_HWID]		= "XGBE",
181 	[MP0_HWID]		= "MP0",
182 	[VPE_HWID]		= "VPE",
183 };
184 
185 static int hw_id_map[MAX_HWIP] = {
186 	[GC_HWIP]	= GC_HWID,
187 	[HDP_HWIP]	= HDP_HWID,
188 	[SDMA0_HWIP]	= SDMA0_HWID,
189 	[SDMA1_HWIP]	= SDMA1_HWID,
190 	[SDMA2_HWIP]    = SDMA2_HWID,
191 	[SDMA3_HWIP]    = SDMA3_HWID,
192 	[LSDMA_HWIP]    = LSDMA_HWID,
193 	[MMHUB_HWIP]	= MMHUB_HWID,
194 	[ATHUB_HWIP]	= ATHUB_HWID,
195 	[NBIO_HWIP]	= NBIF_HWID,
196 	[MP0_HWIP]	= MP0_HWID,
197 	[MP1_HWIP]	= MP1_HWID,
198 	[UVD_HWIP]	= UVD_HWID,
199 	[VCE_HWIP]	= VCE_HWID,
200 	[DF_HWIP]	= DF_HWID,
201 	[DCE_HWIP]	= DMU_HWID,
202 	[OSSSYS_HWIP]	= OSSSYS_HWID,
203 	[SMUIO_HWIP]	= SMUIO_HWID,
204 	[PWR_HWIP]	= PWR_HWID,
205 	[NBIF_HWIP]	= NBIF_HWID,
206 	[THM_HWIP]	= THM_HWID,
207 	[CLK_HWIP]	= CLKA_HWID,
208 	[UMC_HWIP]	= UMC_HWID,
209 	[XGMI_HWIP]	= XGMI_HWID,
210 	[DCI_HWIP]	= DCI_HWID,
211 	[PCIE_HWIP]	= PCIE_HWID,
212 	[VPE_HWIP]	= VPE_HWID,
213 };
214 
215 static int amdgpu_discovery_read_binary_from_sysmem(struct amdgpu_device *adev, uint8_t *binary)
216 {
217 	u64 tmr_offset, tmr_size, pos;
218 	void *discv_regn;
219 	int ret;
220 
221 	ret = amdgpu_acpi_get_tmr_info(adev, &tmr_offset, &tmr_size);
222 	if (ret)
223 		return ret;
224 
225 	pos = tmr_offset + tmr_size - DISCOVERY_TMR_OFFSET;
226 
227 	/* This region is read-only and reserved from system use */
228 	discv_regn = memremap(pos, adev->mman.discovery_tmr_size, MEMREMAP_WC);
229 	if (discv_regn) {
230 		memcpy(binary, discv_regn, adev->mman.discovery_tmr_size);
231 		memunmap(discv_regn);
232 		return 0;
233 	}
234 
235 	return -ENOENT;
236 }
237 
238 static int amdgpu_discovery_read_binary_from_mem(struct amdgpu_device *adev,
239 						 uint8_t *binary)
240 {
241 	uint64_t vram_size = (uint64_t)RREG32(mmRCC_CONFIG_MEMSIZE) << 20;
242 	int ret = 0;
243 
244 	if (vram_size) {
245 		uint64_t pos = vram_size - DISCOVERY_TMR_OFFSET;
246 		amdgpu_device_vram_access(adev, pos, (uint32_t *)binary,
247 					  adev->mman.discovery_tmr_size, false);
248 	} else {
249 		ret = amdgpu_discovery_read_binary_from_sysmem(adev, binary);
250 	}
251 
252 	return ret;
253 }
254 
255 static int amdgpu_discovery_read_binary_from_file(struct amdgpu_device *adev, uint8_t *binary)
256 {
257 	const struct firmware *fw;
258 	const char *fw_name;
259 	int r;
260 
261 	switch (amdgpu_discovery) {
262 	case 2:
263 		fw_name = FIRMWARE_IP_DISCOVERY;
264 		break;
265 	default:
266 		dev_warn(adev->dev, "amdgpu_discovery is not set properly\n");
267 		return -EINVAL;
268 	}
269 
270 	r = request_firmware(&fw, fw_name, adev->dev);
271 	if (r) {
272 		dev_err(adev->dev, "can't load firmware \"%s\"\n",
273 			fw_name);
274 		return r;
275 	}
276 
277 	memcpy((u8 *)binary, (u8 *)fw->data, fw->size);
278 	release_firmware(fw);
279 
280 	return 0;
281 }
282 
283 static uint16_t amdgpu_discovery_calculate_checksum(uint8_t *data, uint32_t size)
284 {
285 	uint16_t checksum = 0;
286 	int i;
287 
288 	for (i = 0; i < size; i++)
289 		checksum += data[i];
290 
291 	return checksum;
292 }
293 
294 static inline bool amdgpu_discovery_verify_checksum(uint8_t *data, uint32_t size,
295 						    uint16_t expected)
296 {
297 	return !!(amdgpu_discovery_calculate_checksum(data, size) == expected);
298 }
299 
300 static inline bool amdgpu_discovery_verify_binary_signature(uint8_t *binary)
301 {
302 	struct binary_header *bhdr;
303 	bhdr = (struct binary_header *)binary;
304 
305 	return (le32_to_cpu(bhdr->binary_signature) == BINARY_SIGNATURE);
306 }
307 
308 static void amdgpu_discovery_harvest_config_quirk(struct amdgpu_device *adev)
309 {
310 	/*
311 	 * So far, apply this quirk only on those Navy Flounder boards which
312 	 * have a bad harvest table of VCN config.
313 	 */
314 	if ((amdgpu_ip_version(adev, UVD_HWIP, 1) == IP_VERSION(3, 0, 1)) &&
315 	    (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(10, 3, 2))) {
316 		switch (adev->pdev->revision) {
317 		case 0xC1:
318 		case 0xC2:
319 		case 0xC3:
320 		case 0xC5:
321 		case 0xC7:
322 		case 0xCF:
323 		case 0xDF:
324 			adev->vcn.harvest_config |= AMDGPU_VCN_HARVEST_VCN1;
325 			adev->vcn.inst_mask &= ~AMDGPU_VCN_HARVEST_VCN1;
326 			break;
327 		default:
328 			break;
329 		}
330 	}
331 }
332 
333 static int amdgpu_discovery_init(struct amdgpu_device *adev)
334 {
335 	struct table_info *info;
336 	struct binary_header *bhdr;
337 	uint16_t offset;
338 	uint16_t size;
339 	uint16_t checksum;
340 	int r;
341 
342 	adev->mman.discovery_tmr_size = DISCOVERY_TMR_SIZE;
343 	adev->mman.discovery_bin = kzalloc(adev->mman.discovery_tmr_size, GFP_KERNEL);
344 	if (!adev->mman.discovery_bin)
345 		return -ENOMEM;
346 
347 	/* Read from file if it is the preferred option */
348 	if (amdgpu_discovery == 2) {
349 		dev_info(adev->dev, "use ip discovery information from file");
350 		r = amdgpu_discovery_read_binary_from_file(adev, adev->mman.discovery_bin);
351 
352 		if (r) {
353 			dev_err(adev->dev, "failed to read ip discovery binary from file\n");
354 			r = -EINVAL;
355 			goto out;
356 		}
357 
358 	} else {
359 		r = amdgpu_discovery_read_binary_from_mem(
360 			adev, adev->mman.discovery_bin);
361 		if (r)
362 			goto out;
363 	}
364 
365 	/* check the ip discovery binary signature */
366 	if (!amdgpu_discovery_verify_binary_signature(adev->mman.discovery_bin)) {
367 		dev_err(adev->dev,
368 			"get invalid ip discovery binary signature\n");
369 		r = -EINVAL;
370 		goto out;
371 	}
372 
373 	bhdr = (struct binary_header *)adev->mman.discovery_bin;
374 
375 	offset = offsetof(struct binary_header, binary_checksum) +
376 		sizeof(bhdr->binary_checksum);
377 	size = le16_to_cpu(bhdr->binary_size) - offset;
378 	checksum = le16_to_cpu(bhdr->binary_checksum);
379 
380 	if (!amdgpu_discovery_verify_checksum(adev->mman.discovery_bin + offset,
381 					      size, checksum)) {
382 		dev_err(adev->dev, "invalid ip discovery binary checksum\n");
383 		r = -EINVAL;
384 		goto out;
385 	}
386 
387 	info = &bhdr->table_list[IP_DISCOVERY];
388 	offset = le16_to_cpu(info->offset);
389 	checksum = le16_to_cpu(info->checksum);
390 
391 	if (offset) {
392 		struct ip_discovery_header *ihdr =
393 			(struct ip_discovery_header *)(adev->mman.discovery_bin + offset);
394 		if (le32_to_cpu(ihdr->signature) != DISCOVERY_TABLE_SIGNATURE) {
395 			dev_err(adev->dev, "invalid ip discovery data table signature\n");
396 			r = -EINVAL;
397 			goto out;
398 		}
399 
400 		if (!amdgpu_discovery_verify_checksum(adev->mman.discovery_bin + offset,
401 						      le16_to_cpu(ihdr->size), checksum)) {
402 			dev_err(adev->dev, "invalid ip discovery data table checksum\n");
403 			r = -EINVAL;
404 			goto out;
405 		}
406 	}
407 
408 	info = &bhdr->table_list[GC];
409 	offset = le16_to_cpu(info->offset);
410 	checksum = le16_to_cpu(info->checksum);
411 
412 	if (offset) {
413 		struct gpu_info_header *ghdr =
414 			(struct gpu_info_header *)(adev->mman.discovery_bin + offset);
415 
416 		if (le32_to_cpu(ghdr->table_id) != GC_TABLE_ID) {
417 			dev_err(adev->dev, "invalid ip discovery gc table id\n");
418 			r = -EINVAL;
419 			goto out;
420 		}
421 
422 		if (!amdgpu_discovery_verify_checksum(adev->mman.discovery_bin + offset,
423 						      le32_to_cpu(ghdr->size), checksum)) {
424 			dev_err(adev->dev, "invalid gc data table checksum\n");
425 			r = -EINVAL;
426 			goto out;
427 		}
428 	}
429 
430 	info = &bhdr->table_list[HARVEST_INFO];
431 	offset = le16_to_cpu(info->offset);
432 	checksum = le16_to_cpu(info->checksum);
433 
434 	if (offset) {
435 		struct harvest_info_header *hhdr =
436 			(struct harvest_info_header *)(adev->mman.discovery_bin + offset);
437 
438 		if (le32_to_cpu(hhdr->signature) != HARVEST_TABLE_SIGNATURE) {
439 			dev_err(adev->dev, "invalid ip discovery harvest table signature\n");
440 			r = -EINVAL;
441 			goto out;
442 		}
443 
444 		if (!amdgpu_discovery_verify_checksum(adev->mman.discovery_bin + offset,
445 						      sizeof(struct harvest_table), checksum)) {
446 			dev_err(adev->dev, "invalid harvest data table checksum\n");
447 			r = -EINVAL;
448 			goto out;
449 		}
450 	}
451 
452 	info = &bhdr->table_list[VCN_INFO];
453 	offset = le16_to_cpu(info->offset);
454 	checksum = le16_to_cpu(info->checksum);
455 
456 	if (offset) {
457 		struct vcn_info_header *vhdr =
458 			(struct vcn_info_header *)(adev->mman.discovery_bin + offset);
459 
460 		if (le32_to_cpu(vhdr->table_id) != VCN_INFO_TABLE_ID) {
461 			dev_err(adev->dev, "invalid ip discovery vcn table id\n");
462 			r = -EINVAL;
463 			goto out;
464 		}
465 
466 		if (!amdgpu_discovery_verify_checksum(adev->mman.discovery_bin + offset,
467 						      le32_to_cpu(vhdr->size_bytes), checksum)) {
468 			dev_err(adev->dev, "invalid vcn data table checksum\n");
469 			r = -EINVAL;
470 			goto out;
471 		}
472 	}
473 
474 	info = &bhdr->table_list[MALL_INFO];
475 	offset = le16_to_cpu(info->offset);
476 	checksum = le16_to_cpu(info->checksum);
477 
478 	if (0 && offset) {
479 		struct mall_info_header *mhdr =
480 			(struct mall_info_header *)(adev->mman.discovery_bin + offset);
481 
482 		if (le32_to_cpu(mhdr->table_id) != MALL_INFO_TABLE_ID) {
483 			dev_err(adev->dev, "invalid ip discovery mall table id\n");
484 			r = -EINVAL;
485 			goto out;
486 		}
487 
488 		if (!amdgpu_discovery_verify_checksum(adev->mman.discovery_bin + offset,
489 						      le32_to_cpu(mhdr->size_bytes), checksum)) {
490 			dev_err(adev->dev, "invalid mall data table checksum\n");
491 			r = -EINVAL;
492 			goto out;
493 		}
494 	}
495 
496 	return 0;
497 
498 out:
499 	kfree(adev->mman.discovery_bin);
500 	adev->mman.discovery_bin = NULL;
501 
502 	return r;
503 }
504 
505 static void amdgpu_discovery_sysfs_fini(struct amdgpu_device *adev);
506 
507 void amdgpu_discovery_fini(struct amdgpu_device *adev)
508 {
509 	amdgpu_discovery_sysfs_fini(adev);
510 	kfree(adev->mman.discovery_bin);
511 	adev->mman.discovery_bin = NULL;
512 }
513 
514 static int amdgpu_discovery_validate_ip(const struct ip_v4 *ip)
515 {
516 	if (ip->instance_number >= HWIP_MAX_INSTANCE) {
517 		DRM_ERROR("Unexpected instance_number (%d) from ip discovery blob\n",
518 			  ip->instance_number);
519 		return -EINVAL;
520 	}
521 	if (le16_to_cpu(ip->hw_id) >= HW_ID_MAX) {
522 		DRM_ERROR("Unexpected hw_id (%d) from ip discovery blob\n",
523 			  le16_to_cpu(ip->hw_id));
524 		return -EINVAL;
525 	}
526 
527 	return 0;
528 }
529 
530 static void amdgpu_discovery_read_harvest_bit_per_ip(struct amdgpu_device *adev,
531 						uint32_t *vcn_harvest_count)
532 {
533 	struct binary_header *bhdr;
534 	struct ip_discovery_header *ihdr;
535 	struct die_header *dhdr;
536 	struct ip_v4 *ip;
537 	uint16_t die_offset, ip_offset, num_dies, num_ips;
538 	int i, j;
539 
540 	bhdr = (struct binary_header *)adev->mman.discovery_bin;
541 	ihdr = (struct ip_discovery_header *)(adev->mman.discovery_bin +
542 			le16_to_cpu(bhdr->table_list[IP_DISCOVERY].offset));
543 	num_dies = le16_to_cpu(ihdr->num_dies);
544 
545 	/* scan harvest bit of all IP data structures */
546 	for (i = 0; i < num_dies; i++) {
547 		die_offset = le16_to_cpu(ihdr->die_info[i].die_offset);
548 		dhdr = (struct die_header *)(adev->mman.discovery_bin + die_offset);
549 		num_ips = le16_to_cpu(dhdr->num_ips);
550 		ip_offset = die_offset + sizeof(*dhdr);
551 
552 		for (j = 0; j < num_ips; j++) {
553 			ip = (struct ip_v4 *)(adev->mman.discovery_bin + ip_offset);
554 
555 			if (amdgpu_discovery_validate_ip(ip))
556 				goto next_ip;
557 
558 			if (le16_to_cpu(ip->variant) == 1) {
559 				switch (le16_to_cpu(ip->hw_id)) {
560 				case VCN_HWID:
561 					(*vcn_harvest_count)++;
562 					if (ip->instance_number == 0) {
563 						adev->vcn.harvest_config |= AMDGPU_VCN_HARVEST_VCN0;
564 						adev->vcn.inst_mask &=
565 							~AMDGPU_VCN_HARVEST_VCN0;
566 						adev->jpeg.inst_mask &=
567 							~AMDGPU_VCN_HARVEST_VCN0;
568 					} else {
569 						adev->vcn.harvest_config |= AMDGPU_VCN_HARVEST_VCN1;
570 						adev->vcn.inst_mask &=
571 							~AMDGPU_VCN_HARVEST_VCN1;
572 						adev->jpeg.inst_mask &=
573 							~AMDGPU_VCN_HARVEST_VCN1;
574 					}
575 					break;
576 				case DMU_HWID:
577 					adev->harvest_ip_mask |= AMD_HARVEST_IP_DMU_MASK;
578 					break;
579 				default:
580 					break;
581 				}
582 			}
583 next_ip:
584 			if (ihdr->base_addr_64_bit)
585 				ip_offset += struct_size(ip, base_address_64, ip->num_base_address);
586 			else
587 				ip_offset += struct_size(ip, base_address, ip->num_base_address);
588 		}
589 	}
590 }
591 
592 static void amdgpu_discovery_read_from_harvest_table(struct amdgpu_device *adev,
593 						     uint32_t *vcn_harvest_count,
594 						     uint32_t *umc_harvest_count)
595 {
596 	struct binary_header *bhdr;
597 	struct harvest_table *harvest_info;
598 	u16 offset;
599 	int i;
600 	uint32_t umc_harvest_config = 0;
601 
602 	bhdr = (struct binary_header *)adev->mman.discovery_bin;
603 	offset = le16_to_cpu(bhdr->table_list[HARVEST_INFO].offset);
604 
605 	if (!offset) {
606 		dev_err(adev->dev, "invalid harvest table offset\n");
607 		return;
608 	}
609 
610 	harvest_info = (struct harvest_table *)(adev->mman.discovery_bin + offset);
611 
612 	for (i = 0; i < 32; i++) {
613 		if (le16_to_cpu(harvest_info->list[i].hw_id) == 0)
614 			break;
615 
616 		switch (le16_to_cpu(harvest_info->list[i].hw_id)) {
617 		case VCN_HWID:
618 			(*vcn_harvest_count)++;
619 			adev->vcn.harvest_config |=
620 				(1 << harvest_info->list[i].number_instance);
621 			adev->jpeg.harvest_config |=
622 				(1 << harvest_info->list[i].number_instance);
623 
624 			adev->vcn.inst_mask &=
625 				~(1U << harvest_info->list[i].number_instance);
626 			adev->jpeg.inst_mask &=
627 				~(1U << harvest_info->list[i].number_instance);
628 			break;
629 		case DMU_HWID:
630 			adev->harvest_ip_mask |= AMD_HARVEST_IP_DMU_MASK;
631 			break;
632 		case UMC_HWID:
633 			umc_harvest_config |=
634 				1 << (le16_to_cpu(harvest_info->list[i].number_instance));
635 			(*umc_harvest_count)++;
636 			break;
637 		case GC_HWID:
638 			adev->gfx.xcc_mask &=
639 				~(1U << harvest_info->list[i].number_instance);
640 			break;
641 		case SDMA0_HWID:
642 			adev->sdma.sdma_mask &=
643 				~(1U << harvest_info->list[i].number_instance);
644 			break;
645 		default:
646 			break;
647 		}
648 	}
649 
650 	adev->umc.active_mask = ((1 << adev->umc.node_inst_num) - 1) &
651 				~umc_harvest_config;
652 }
653 
654 /* ================================================== */
655 
656 struct ip_hw_instance {
657 	struct kobject kobj; /* ip_discovery/die/#die/#hw_id/#instance/<attrs...> */
658 
659 	int hw_id;
660 	u8  num_instance;
661 	u8  major, minor, revision;
662 	u8  harvest;
663 
664 	int num_base_addresses;
665 	u32 base_addr[];
666 };
667 
668 struct ip_hw_id {
669 	struct kset hw_id_kset;  /* ip_discovery/die/#die/#hw_id/, contains ip_hw_instance */
670 	int hw_id;
671 };
672 
673 struct ip_die_entry {
674 	struct kset ip_kset;     /* ip_discovery/die/#die/, contains ip_hw_id  */
675 	u16 num_ips;
676 };
677 
678 /* -------------------------------------------------- */
679 
680 struct ip_hw_instance_attr {
681 	struct attribute attr;
682 	ssize_t (*show)(struct ip_hw_instance *ip_hw_instance, char *buf);
683 };
684 
685 static ssize_t hw_id_show(struct ip_hw_instance *ip_hw_instance, char *buf)
686 {
687 	return sysfs_emit(buf, "%d\n", ip_hw_instance->hw_id);
688 }
689 
690 static ssize_t num_instance_show(struct ip_hw_instance *ip_hw_instance, char *buf)
691 {
692 	return sysfs_emit(buf, "%d\n", ip_hw_instance->num_instance);
693 }
694 
695 static ssize_t major_show(struct ip_hw_instance *ip_hw_instance, char *buf)
696 {
697 	return sysfs_emit(buf, "%d\n", ip_hw_instance->major);
698 }
699 
700 static ssize_t minor_show(struct ip_hw_instance *ip_hw_instance, char *buf)
701 {
702 	return sysfs_emit(buf, "%d\n", ip_hw_instance->minor);
703 }
704 
705 static ssize_t revision_show(struct ip_hw_instance *ip_hw_instance, char *buf)
706 {
707 	return sysfs_emit(buf, "%d\n", ip_hw_instance->revision);
708 }
709 
710 static ssize_t harvest_show(struct ip_hw_instance *ip_hw_instance, char *buf)
711 {
712 	return sysfs_emit(buf, "0x%01X\n", ip_hw_instance->harvest);
713 }
714 
715 static ssize_t num_base_addresses_show(struct ip_hw_instance *ip_hw_instance, char *buf)
716 {
717 	return sysfs_emit(buf, "%d\n", ip_hw_instance->num_base_addresses);
718 }
719 
720 static ssize_t base_addr_show(struct ip_hw_instance *ip_hw_instance, char *buf)
721 {
722 	ssize_t res, at;
723 	int ii;
724 
725 	for (res = at = ii = 0; ii < ip_hw_instance->num_base_addresses; ii++) {
726 		/* Here we satisfy the condition that, at + size <= PAGE_SIZE.
727 		 */
728 		if (at + 12 > PAGE_SIZE)
729 			break;
730 		res = sysfs_emit_at(buf, at, "0x%08X\n",
731 				    ip_hw_instance->base_addr[ii]);
732 		if (res <= 0)
733 			break;
734 		at += res;
735 	}
736 
737 	return res < 0 ? res : at;
738 }
739 
740 static struct ip_hw_instance_attr ip_hw_attr[] = {
741 	__ATTR_RO(hw_id),
742 	__ATTR_RO(num_instance),
743 	__ATTR_RO(major),
744 	__ATTR_RO(minor),
745 	__ATTR_RO(revision),
746 	__ATTR_RO(harvest),
747 	__ATTR_RO(num_base_addresses),
748 	__ATTR_RO(base_addr),
749 };
750 
751 static struct attribute *ip_hw_instance_attrs[ARRAY_SIZE(ip_hw_attr) + 1];
752 ATTRIBUTE_GROUPS(ip_hw_instance);
753 
754 #define to_ip_hw_instance(x) container_of(x, struct ip_hw_instance, kobj)
755 #define to_ip_hw_instance_attr(x) container_of(x, struct ip_hw_instance_attr, attr)
756 
757 static ssize_t ip_hw_instance_attr_show(struct kobject *kobj,
758 					struct attribute *attr,
759 					char *buf)
760 {
761 	struct ip_hw_instance *ip_hw_instance = to_ip_hw_instance(kobj);
762 	struct ip_hw_instance_attr *ip_hw_attr = to_ip_hw_instance_attr(attr);
763 
764 	if (!ip_hw_attr->show)
765 		return -EIO;
766 
767 	return ip_hw_attr->show(ip_hw_instance, buf);
768 }
769 
770 static const struct sysfs_ops ip_hw_instance_sysfs_ops = {
771 	.show = ip_hw_instance_attr_show,
772 };
773 
774 static void ip_hw_instance_release(struct kobject *kobj)
775 {
776 	struct ip_hw_instance *ip_hw_instance = to_ip_hw_instance(kobj);
777 
778 	kfree(ip_hw_instance);
779 }
780 
781 static const struct kobj_type ip_hw_instance_ktype = {
782 	.release = ip_hw_instance_release,
783 	.sysfs_ops = &ip_hw_instance_sysfs_ops,
784 	.default_groups = ip_hw_instance_groups,
785 };
786 
787 /* -------------------------------------------------- */
788 
789 #define to_ip_hw_id(x)  container_of(to_kset(x), struct ip_hw_id, hw_id_kset)
790 
791 static void ip_hw_id_release(struct kobject *kobj)
792 {
793 	struct ip_hw_id *ip_hw_id = to_ip_hw_id(kobj);
794 
795 	if (!list_empty(&ip_hw_id->hw_id_kset.list))
796 		DRM_ERROR("ip_hw_id->hw_id_kset is not empty");
797 	kfree(ip_hw_id);
798 }
799 
800 static const struct kobj_type ip_hw_id_ktype = {
801 	.release = ip_hw_id_release,
802 	.sysfs_ops = &kobj_sysfs_ops,
803 };
804 
805 /* -------------------------------------------------- */
806 
807 static void die_kobj_release(struct kobject *kobj);
808 static void ip_disc_release(struct kobject *kobj);
809 
810 struct ip_die_entry_attribute {
811 	struct attribute attr;
812 	ssize_t (*show)(struct ip_die_entry *ip_die_entry, char *buf);
813 };
814 
815 #define to_ip_die_entry_attr(x)  container_of(x, struct ip_die_entry_attribute, attr)
816 
817 static ssize_t num_ips_show(struct ip_die_entry *ip_die_entry, char *buf)
818 {
819 	return sysfs_emit(buf, "%d\n", ip_die_entry->num_ips);
820 }
821 
822 /* If there are more ip_die_entry attrs, other than the number of IPs,
823  * we can make this intro an array of attrs, and then initialize
824  * ip_die_entry_attrs in a loop.
825  */
826 static struct ip_die_entry_attribute num_ips_attr =
827 	__ATTR_RO(num_ips);
828 
829 static struct attribute *ip_die_entry_attrs[] = {
830 	&num_ips_attr.attr,
831 	NULL,
832 };
833 ATTRIBUTE_GROUPS(ip_die_entry); /* ip_die_entry_groups */
834 
835 #define to_ip_die_entry(x) container_of(to_kset(x), struct ip_die_entry, ip_kset)
836 
837 static ssize_t ip_die_entry_attr_show(struct kobject *kobj,
838 				      struct attribute *attr,
839 				      char *buf)
840 {
841 	struct ip_die_entry_attribute *ip_die_entry_attr = to_ip_die_entry_attr(attr);
842 	struct ip_die_entry *ip_die_entry = to_ip_die_entry(kobj);
843 
844 	if (!ip_die_entry_attr->show)
845 		return -EIO;
846 
847 	return ip_die_entry_attr->show(ip_die_entry, buf);
848 }
849 
850 static void ip_die_entry_release(struct kobject *kobj)
851 {
852 	struct ip_die_entry *ip_die_entry = to_ip_die_entry(kobj);
853 
854 	if (!list_empty(&ip_die_entry->ip_kset.list))
855 		DRM_ERROR("ip_die_entry->ip_kset is not empty");
856 	kfree(ip_die_entry);
857 }
858 
859 static const struct sysfs_ops ip_die_entry_sysfs_ops = {
860 	.show = ip_die_entry_attr_show,
861 };
862 
863 static const struct kobj_type ip_die_entry_ktype = {
864 	.release = ip_die_entry_release,
865 	.sysfs_ops = &ip_die_entry_sysfs_ops,
866 	.default_groups = ip_die_entry_groups,
867 };
868 
869 static const struct kobj_type die_kobj_ktype = {
870 	.release = die_kobj_release,
871 	.sysfs_ops = &kobj_sysfs_ops,
872 };
873 
874 static const struct kobj_type ip_discovery_ktype = {
875 	.release = ip_disc_release,
876 	.sysfs_ops = &kobj_sysfs_ops,
877 };
878 
879 struct ip_discovery_top {
880 	struct kobject kobj;    /* ip_discovery/ */
881 	struct kset die_kset;   /* ip_discovery/die/, contains ip_die_entry */
882 	struct amdgpu_device *adev;
883 };
884 
885 static void die_kobj_release(struct kobject *kobj)
886 {
887 	struct ip_discovery_top *ip_top = container_of(to_kset(kobj),
888 						       struct ip_discovery_top,
889 						       die_kset);
890 	if (!list_empty(&ip_top->die_kset.list))
891 		DRM_ERROR("ip_top->die_kset is not empty");
892 }
893 
894 static void ip_disc_release(struct kobject *kobj)
895 {
896 	struct ip_discovery_top *ip_top = container_of(kobj, struct ip_discovery_top,
897 						       kobj);
898 	struct amdgpu_device *adev = ip_top->adev;
899 
900 	adev->ip_top = NULL;
901 	kfree(ip_top);
902 }
903 
904 static uint8_t amdgpu_discovery_get_harvest_info(struct amdgpu_device *adev,
905 						 uint16_t hw_id, uint8_t inst)
906 {
907 	uint8_t harvest = 0;
908 
909 	/* Until a uniform way is figured, get mask based on hwid */
910 	switch (hw_id) {
911 	case VCN_HWID:
912 		harvest = ((1 << inst) & adev->vcn.inst_mask) == 0;
913 		break;
914 	case DMU_HWID:
915 		if (adev->harvest_ip_mask & AMD_HARVEST_IP_DMU_MASK)
916 			harvest = 0x1;
917 		break;
918 	case UMC_HWID:
919 		/* TODO: It needs another parsing; for now, ignore.*/
920 		break;
921 	case GC_HWID:
922 		harvest = ((1 << inst) & adev->gfx.xcc_mask) == 0;
923 		break;
924 	case SDMA0_HWID:
925 		harvest = ((1 << inst) & adev->sdma.sdma_mask) == 0;
926 		break;
927 	default:
928 		break;
929 	}
930 
931 	return harvest;
932 }
933 
934 static int amdgpu_discovery_sysfs_ips(struct amdgpu_device *adev,
935 				      struct ip_die_entry *ip_die_entry,
936 				      const size_t _ip_offset, const int num_ips,
937 				      bool reg_base_64)
938 {
939 	int ii, jj, kk, res;
940 
941 	DRM_DEBUG("num_ips:%d", num_ips);
942 
943 	/* Find all IPs of a given HW ID, and add their instance to
944 	 * #die/#hw_id/#instance/<attributes>
945 	 */
946 	for (ii = 0; ii < HW_ID_MAX; ii++) {
947 		struct ip_hw_id *ip_hw_id = NULL;
948 		size_t ip_offset = _ip_offset;
949 
950 		for (jj = 0; jj < num_ips; jj++) {
951 			struct ip_v4 *ip;
952 			struct ip_hw_instance *ip_hw_instance;
953 
954 			ip = (struct ip_v4 *)(adev->mman.discovery_bin + ip_offset);
955 			if (amdgpu_discovery_validate_ip(ip) ||
956 			    le16_to_cpu(ip->hw_id) != ii)
957 				goto next_ip;
958 
959 			DRM_DEBUG("match:%d @ ip_offset:%zu", ii, ip_offset);
960 
961 			/* We have a hw_id match; register the hw
962 			 * block if not yet registered.
963 			 */
964 			if (!ip_hw_id) {
965 				ip_hw_id = kzalloc(sizeof(*ip_hw_id), GFP_KERNEL);
966 				if (!ip_hw_id)
967 					return -ENOMEM;
968 				ip_hw_id->hw_id = ii;
969 
970 				kobject_set_name(&ip_hw_id->hw_id_kset.kobj, "%d", ii);
971 				ip_hw_id->hw_id_kset.kobj.kset = &ip_die_entry->ip_kset;
972 				ip_hw_id->hw_id_kset.kobj.ktype = &ip_hw_id_ktype;
973 				res = kset_register(&ip_hw_id->hw_id_kset);
974 				if (res) {
975 					DRM_ERROR("Couldn't register ip_hw_id kset");
976 					kfree(ip_hw_id);
977 					return res;
978 				}
979 				if (hw_id_names[ii]) {
980 					res = sysfs_create_link(&ip_die_entry->ip_kset.kobj,
981 								&ip_hw_id->hw_id_kset.kobj,
982 								hw_id_names[ii]);
983 					if (res) {
984 						DRM_ERROR("Couldn't create IP link %s in IP Die:%s\n",
985 							  hw_id_names[ii],
986 							  kobject_name(&ip_die_entry->ip_kset.kobj));
987 					}
988 				}
989 			}
990 
991 			/* Now register its instance.
992 			 */
993 			ip_hw_instance = kzalloc(struct_size(ip_hw_instance,
994 							     base_addr,
995 							     ip->num_base_address),
996 						 GFP_KERNEL);
997 			if (!ip_hw_instance) {
998 				DRM_ERROR("no memory for ip_hw_instance");
999 				return -ENOMEM;
1000 			}
1001 			ip_hw_instance->hw_id = le16_to_cpu(ip->hw_id); /* == ii */
1002 			ip_hw_instance->num_instance = ip->instance_number;
1003 			ip_hw_instance->major = ip->major;
1004 			ip_hw_instance->minor = ip->minor;
1005 			ip_hw_instance->revision = ip->revision;
1006 			ip_hw_instance->harvest =
1007 				amdgpu_discovery_get_harvest_info(
1008 					adev, ip_hw_instance->hw_id,
1009 					ip_hw_instance->num_instance);
1010 			ip_hw_instance->num_base_addresses = ip->num_base_address;
1011 
1012 			for (kk = 0; kk < ip_hw_instance->num_base_addresses; kk++) {
1013 				if (reg_base_64)
1014 					ip_hw_instance->base_addr[kk] =
1015 						lower_32_bits(le64_to_cpu(ip->base_address_64[kk])) & 0x3FFFFFFF;
1016 				else
1017 					ip_hw_instance->base_addr[kk] = ip->base_address[kk];
1018 			}
1019 
1020 			kobject_init(&ip_hw_instance->kobj, &ip_hw_instance_ktype);
1021 			ip_hw_instance->kobj.kset = &ip_hw_id->hw_id_kset;
1022 			res = kobject_add(&ip_hw_instance->kobj, NULL,
1023 					  "%d", ip_hw_instance->num_instance);
1024 next_ip:
1025 			if (reg_base_64)
1026 				ip_offset += struct_size(ip, base_address_64,
1027 							 ip->num_base_address);
1028 			else
1029 				ip_offset += struct_size(ip, base_address,
1030 							 ip->num_base_address);
1031 		}
1032 	}
1033 
1034 	return 0;
1035 }
1036 
1037 static int amdgpu_discovery_sysfs_recurse(struct amdgpu_device *adev)
1038 {
1039 	struct binary_header *bhdr;
1040 	struct ip_discovery_header *ihdr;
1041 	struct die_header *dhdr;
1042 	struct kset *die_kset = &adev->ip_top->die_kset;
1043 	u16 num_dies, die_offset, num_ips;
1044 	size_t ip_offset;
1045 	int ii, res;
1046 
1047 	bhdr = (struct binary_header *)adev->mman.discovery_bin;
1048 	ihdr = (struct ip_discovery_header *)(adev->mman.discovery_bin +
1049 					      le16_to_cpu(bhdr->table_list[IP_DISCOVERY].offset));
1050 	num_dies = le16_to_cpu(ihdr->num_dies);
1051 
1052 	DRM_DEBUG("number of dies: %d\n", num_dies);
1053 
1054 	for (ii = 0; ii < num_dies; ii++) {
1055 		struct ip_die_entry *ip_die_entry;
1056 
1057 		die_offset = le16_to_cpu(ihdr->die_info[ii].die_offset);
1058 		dhdr = (struct die_header *)(adev->mman.discovery_bin + die_offset);
1059 		num_ips = le16_to_cpu(dhdr->num_ips);
1060 		ip_offset = die_offset + sizeof(*dhdr);
1061 
1062 		/* Add the die to the kset.
1063 		 *
1064 		 * dhdr->die_id == ii, which was checked in
1065 		 * amdgpu_discovery_reg_base_init().
1066 		 */
1067 
1068 		ip_die_entry = kzalloc(sizeof(*ip_die_entry), GFP_KERNEL);
1069 		if (!ip_die_entry)
1070 			return -ENOMEM;
1071 
1072 		ip_die_entry->num_ips = num_ips;
1073 
1074 		kobject_set_name(&ip_die_entry->ip_kset.kobj, "%d", le16_to_cpu(dhdr->die_id));
1075 		ip_die_entry->ip_kset.kobj.kset = die_kset;
1076 		ip_die_entry->ip_kset.kobj.ktype = &ip_die_entry_ktype;
1077 		res = kset_register(&ip_die_entry->ip_kset);
1078 		if (res) {
1079 			DRM_ERROR("Couldn't register ip_die_entry kset");
1080 			kfree(ip_die_entry);
1081 			return res;
1082 		}
1083 
1084 		amdgpu_discovery_sysfs_ips(adev, ip_die_entry, ip_offset, num_ips, !!ihdr->base_addr_64_bit);
1085 	}
1086 
1087 	return 0;
1088 }
1089 
1090 static int amdgpu_discovery_sysfs_init(struct amdgpu_device *adev)
1091 {
1092 	struct kset *die_kset;
1093 	int res, ii;
1094 
1095 	if (!adev->mman.discovery_bin)
1096 		return -EINVAL;
1097 
1098 	adev->ip_top = kzalloc(sizeof(*adev->ip_top), GFP_KERNEL);
1099 	if (!adev->ip_top)
1100 		return -ENOMEM;
1101 
1102 	adev->ip_top->adev = adev;
1103 
1104 	res = kobject_init_and_add(&adev->ip_top->kobj, &ip_discovery_ktype,
1105 				   &adev->dev->kobj, "ip_discovery");
1106 	if (res) {
1107 		DRM_ERROR("Couldn't init and add ip_discovery/");
1108 		goto Err;
1109 	}
1110 
1111 	die_kset = &adev->ip_top->die_kset;
1112 	kobject_set_name(&die_kset->kobj, "%s", "die");
1113 	die_kset->kobj.parent = &adev->ip_top->kobj;
1114 	die_kset->kobj.ktype = &die_kobj_ktype;
1115 	res = kset_register(&adev->ip_top->die_kset);
1116 	if (res) {
1117 		DRM_ERROR("Couldn't register die_kset");
1118 		goto Err;
1119 	}
1120 
1121 	for (ii = 0; ii < ARRAY_SIZE(ip_hw_attr); ii++)
1122 		ip_hw_instance_attrs[ii] = &ip_hw_attr[ii].attr;
1123 	ip_hw_instance_attrs[ii] = NULL;
1124 
1125 	res = amdgpu_discovery_sysfs_recurse(adev);
1126 
1127 	return res;
1128 Err:
1129 	kobject_put(&adev->ip_top->kobj);
1130 	return res;
1131 }
1132 
1133 /* -------------------------------------------------- */
1134 
1135 #define list_to_kobj(el) container_of(el, struct kobject, entry)
1136 
1137 static void amdgpu_discovery_sysfs_ip_hw_free(struct ip_hw_id *ip_hw_id)
1138 {
1139 	struct list_head *el, *tmp;
1140 	struct kset *hw_id_kset;
1141 
1142 	hw_id_kset = &ip_hw_id->hw_id_kset;
1143 	spin_lock(&hw_id_kset->list_lock);
1144 	list_for_each_prev_safe(el, tmp, &hw_id_kset->list) {
1145 		list_del_init(el);
1146 		spin_unlock(&hw_id_kset->list_lock);
1147 		/* kobject is embedded in ip_hw_instance */
1148 		kobject_put(list_to_kobj(el));
1149 		spin_lock(&hw_id_kset->list_lock);
1150 	}
1151 	spin_unlock(&hw_id_kset->list_lock);
1152 	kobject_put(&ip_hw_id->hw_id_kset.kobj);
1153 }
1154 
1155 static void amdgpu_discovery_sysfs_die_free(struct ip_die_entry *ip_die_entry)
1156 {
1157 	struct list_head *el, *tmp;
1158 	struct kset *ip_kset;
1159 
1160 	ip_kset = &ip_die_entry->ip_kset;
1161 	spin_lock(&ip_kset->list_lock);
1162 	list_for_each_prev_safe(el, tmp, &ip_kset->list) {
1163 		list_del_init(el);
1164 		spin_unlock(&ip_kset->list_lock);
1165 		amdgpu_discovery_sysfs_ip_hw_free(to_ip_hw_id(list_to_kobj(el)));
1166 		spin_lock(&ip_kset->list_lock);
1167 	}
1168 	spin_unlock(&ip_kset->list_lock);
1169 	kobject_put(&ip_die_entry->ip_kset.kobj);
1170 }
1171 
1172 static void amdgpu_discovery_sysfs_fini(struct amdgpu_device *adev)
1173 {
1174 	struct list_head *el, *tmp;
1175 	struct kset *die_kset;
1176 
1177 	die_kset = &adev->ip_top->die_kset;
1178 	spin_lock(&die_kset->list_lock);
1179 	list_for_each_prev_safe(el, tmp, &die_kset->list) {
1180 		list_del_init(el);
1181 		spin_unlock(&die_kset->list_lock);
1182 		amdgpu_discovery_sysfs_die_free(to_ip_die_entry(list_to_kobj(el)));
1183 		spin_lock(&die_kset->list_lock);
1184 	}
1185 	spin_unlock(&die_kset->list_lock);
1186 	kobject_put(&adev->ip_top->die_kset.kobj);
1187 	kobject_put(&adev->ip_top->kobj);
1188 }
1189 
1190 /* ================================================== */
1191 
1192 static int amdgpu_discovery_reg_base_init(struct amdgpu_device *adev)
1193 {
1194 	uint8_t num_base_address, subrev, variant;
1195 	struct binary_header *bhdr;
1196 	struct ip_discovery_header *ihdr;
1197 	struct die_header *dhdr;
1198 	struct ip_v4 *ip;
1199 	uint16_t die_offset;
1200 	uint16_t ip_offset;
1201 	uint16_t num_dies;
1202 	uint16_t num_ips;
1203 	int hw_ip;
1204 	int i, j, k;
1205 	int r;
1206 
1207 	r = amdgpu_discovery_init(adev);
1208 	if (r) {
1209 		DRM_ERROR("amdgpu_discovery_init failed\n");
1210 		return r;
1211 	}
1212 
1213 	adev->gfx.xcc_mask = 0;
1214 	adev->sdma.sdma_mask = 0;
1215 	adev->vcn.inst_mask = 0;
1216 	adev->jpeg.inst_mask = 0;
1217 	bhdr = (struct binary_header *)adev->mman.discovery_bin;
1218 	ihdr = (struct ip_discovery_header *)(adev->mman.discovery_bin +
1219 			le16_to_cpu(bhdr->table_list[IP_DISCOVERY].offset));
1220 	num_dies = le16_to_cpu(ihdr->num_dies);
1221 
1222 	DRM_DEBUG("number of dies: %d\n", num_dies);
1223 
1224 	for (i = 0; i < num_dies; i++) {
1225 		die_offset = le16_to_cpu(ihdr->die_info[i].die_offset);
1226 		dhdr = (struct die_header *)(adev->mman.discovery_bin + die_offset);
1227 		num_ips = le16_to_cpu(dhdr->num_ips);
1228 		ip_offset = die_offset + sizeof(*dhdr);
1229 
1230 		if (le16_to_cpu(dhdr->die_id) != i) {
1231 			DRM_ERROR("invalid die id %d, expected %d\n",
1232 					le16_to_cpu(dhdr->die_id), i);
1233 			return -EINVAL;
1234 		}
1235 
1236 		DRM_DEBUG("number of hardware IPs on die%d: %d\n",
1237 				le16_to_cpu(dhdr->die_id), num_ips);
1238 
1239 		for (j = 0; j < num_ips; j++) {
1240 			ip = (struct ip_v4 *)(adev->mman.discovery_bin + ip_offset);
1241 
1242 			if (amdgpu_discovery_validate_ip(ip))
1243 				goto next_ip;
1244 
1245 			num_base_address = ip->num_base_address;
1246 
1247 			DRM_DEBUG("%s(%d) #%d v%d.%d.%d:\n",
1248 				  hw_id_names[le16_to_cpu(ip->hw_id)],
1249 				  le16_to_cpu(ip->hw_id),
1250 				  ip->instance_number,
1251 				  ip->major, ip->minor,
1252 				  ip->revision);
1253 
1254 			if (le16_to_cpu(ip->hw_id) == VCN_HWID) {
1255 				/* Bit [5:0]: original revision value
1256 				 * Bit [7:6]: en/decode capability:
1257 				 *     0b00 : VCN function normally
1258 				 *     0b10 : encode is disabled
1259 				 *     0b01 : decode is disabled
1260 				 */
1261 				adev->vcn.vcn_config[adev->vcn.num_vcn_inst] =
1262 					ip->revision & 0xc0;
1263 				ip->revision &= ~0xc0;
1264 				if (adev->vcn.num_vcn_inst <
1265 				    AMDGPU_MAX_VCN_INSTANCES) {
1266 					adev->vcn.num_vcn_inst++;
1267 					adev->vcn.inst_mask |=
1268 						(1U << ip->instance_number);
1269 					adev->jpeg.inst_mask |=
1270 						(1U << ip->instance_number);
1271 				} else {
1272 					dev_err(adev->dev, "Too many VCN instances: %d vs %d\n",
1273 						adev->vcn.num_vcn_inst + 1,
1274 						AMDGPU_MAX_VCN_INSTANCES);
1275 				}
1276 			}
1277 			if (le16_to_cpu(ip->hw_id) == SDMA0_HWID ||
1278 			    le16_to_cpu(ip->hw_id) == SDMA1_HWID ||
1279 			    le16_to_cpu(ip->hw_id) == SDMA2_HWID ||
1280 			    le16_to_cpu(ip->hw_id) == SDMA3_HWID) {
1281 				if (adev->sdma.num_instances <
1282 				    AMDGPU_MAX_SDMA_INSTANCES) {
1283 					adev->sdma.num_instances++;
1284 					adev->sdma.sdma_mask |=
1285 						(1U << ip->instance_number);
1286 				} else {
1287 					dev_err(adev->dev, "Too many SDMA instances: %d vs %d\n",
1288 						adev->sdma.num_instances + 1,
1289 						AMDGPU_MAX_SDMA_INSTANCES);
1290 				}
1291 			}
1292 
1293 			if (le16_to_cpu(ip->hw_id) == UMC_HWID) {
1294 				adev->gmc.num_umc++;
1295 				adev->umc.node_inst_num++;
1296 			}
1297 
1298 			if (le16_to_cpu(ip->hw_id) == GC_HWID)
1299 				adev->gfx.xcc_mask |=
1300 					(1U << ip->instance_number);
1301 
1302 			for (k = 0; k < num_base_address; k++) {
1303 				/*
1304 				 * convert the endianness of base addresses in place,
1305 				 * so that we don't need to convert them when accessing adev->reg_offset.
1306 				 */
1307 				if (ihdr->base_addr_64_bit)
1308 					/* Truncate the 64bit base address from ip discovery
1309 					 * and only store lower 32bit ip base in reg_offset[].
1310 					 * Bits > 32 follows ASIC specific format, thus just
1311 					 * discard them and handle it within specific ASIC.
1312 					 * By this way reg_offset[] and related helpers can
1313 					 * stay unchanged.
1314 					 * The base address is in dwords, thus clear the
1315 					 * highest 2 bits to store.
1316 					 */
1317 					ip->base_address[k] =
1318 						lower_32_bits(le64_to_cpu(ip->base_address_64[k])) & 0x3FFFFFFF;
1319 				else
1320 					ip->base_address[k] = le32_to_cpu(ip->base_address[k]);
1321 				DRM_DEBUG("\t0x%08x\n", ip->base_address[k]);
1322 			}
1323 
1324 			for (hw_ip = 0; hw_ip < MAX_HWIP; hw_ip++) {
1325 				if (hw_id_map[hw_ip] == le16_to_cpu(ip->hw_id) &&
1326 				    hw_id_map[hw_ip] != 0) {
1327 					DRM_DEBUG("set register base offset for %s\n",
1328 							hw_id_names[le16_to_cpu(ip->hw_id)]);
1329 					adev->reg_offset[hw_ip][ip->instance_number] =
1330 						ip->base_address;
1331 					/* Instance support is somewhat inconsistent.
1332 					 * SDMA is a good example.  Sienna cichlid has 4 total
1333 					 * SDMA instances, each enumerated separately (HWIDs
1334 					 * 42, 43, 68, 69).  Arcturus has 8 total SDMA instances,
1335 					 * but they are enumerated as multiple instances of the
1336 					 * same HWIDs (4x HWID 42, 4x HWID 43).  UMC is another
1337 					 * example.  On most chips there are multiple instances
1338 					 * with the same HWID.
1339 					 */
1340 
1341 					if (ihdr->version < 3) {
1342 						subrev = 0;
1343 						variant = 0;
1344 					} else {
1345 						subrev = ip->sub_revision;
1346 						variant = ip->variant;
1347 					}
1348 
1349 					adev->ip_versions[hw_ip]
1350 							 [ip->instance_number] =
1351 						IP_VERSION_FULL(ip->major,
1352 								ip->minor,
1353 								ip->revision,
1354 								variant,
1355 								subrev);
1356 				}
1357 			}
1358 
1359 next_ip:
1360 			if (ihdr->base_addr_64_bit)
1361 				ip_offset += struct_size(ip, base_address_64, ip->num_base_address);
1362 			else
1363 				ip_offset += struct_size(ip, base_address, ip->num_base_address);
1364 		}
1365 	}
1366 
1367 	return 0;
1368 }
1369 
1370 static void amdgpu_discovery_harvest_ip(struct amdgpu_device *adev)
1371 {
1372 	int vcn_harvest_count = 0;
1373 	int umc_harvest_count = 0;
1374 
1375 	/*
1376 	 * Harvest table does not fit Navi1x and legacy GPUs,
1377 	 * so read harvest bit per IP data structure to set
1378 	 * harvest configuration.
1379 	 */
1380 	if (amdgpu_ip_version(adev, GC_HWIP, 0) < IP_VERSION(10, 2, 0) &&
1381 	    amdgpu_ip_version(adev, GC_HWIP, 0) != IP_VERSION(9, 4, 3)) {
1382 		if ((adev->pdev->device == 0x731E &&
1383 			(adev->pdev->revision == 0xC6 ||
1384 			 adev->pdev->revision == 0xC7)) ||
1385 			(adev->pdev->device == 0x7340 &&
1386 			 adev->pdev->revision == 0xC9) ||
1387 			(adev->pdev->device == 0x7360 &&
1388 			 adev->pdev->revision == 0xC7))
1389 			amdgpu_discovery_read_harvest_bit_per_ip(adev,
1390 				&vcn_harvest_count);
1391 	} else {
1392 		amdgpu_discovery_read_from_harvest_table(adev,
1393 							 &vcn_harvest_count,
1394 							 &umc_harvest_count);
1395 	}
1396 
1397 	amdgpu_discovery_harvest_config_quirk(adev);
1398 
1399 	if (vcn_harvest_count == adev->vcn.num_vcn_inst) {
1400 		adev->harvest_ip_mask |= AMD_HARVEST_IP_VCN_MASK;
1401 		adev->harvest_ip_mask |= AMD_HARVEST_IP_JPEG_MASK;
1402 	}
1403 
1404 	if (umc_harvest_count < adev->gmc.num_umc) {
1405 		adev->gmc.num_umc -= umc_harvest_count;
1406 	}
1407 }
1408 
1409 union gc_info {
1410 	struct gc_info_v1_0 v1;
1411 	struct gc_info_v1_1 v1_1;
1412 	struct gc_info_v1_2 v1_2;
1413 	struct gc_info_v2_0 v2;
1414 	struct gc_info_v2_1 v2_1;
1415 };
1416 
1417 static int amdgpu_discovery_get_gfx_info(struct amdgpu_device *adev)
1418 {
1419 	struct binary_header *bhdr;
1420 	union gc_info *gc_info;
1421 	u16 offset;
1422 
1423 	if (!adev->mman.discovery_bin) {
1424 		DRM_ERROR("ip discovery uninitialized\n");
1425 		return -EINVAL;
1426 	}
1427 
1428 	bhdr = (struct binary_header *)adev->mman.discovery_bin;
1429 	offset = le16_to_cpu(bhdr->table_list[GC].offset);
1430 
1431 	if (!offset)
1432 		return 0;
1433 
1434 	gc_info = (union gc_info *)(adev->mman.discovery_bin + offset);
1435 
1436 	switch (le16_to_cpu(gc_info->v1.header.version_major)) {
1437 	case 1:
1438 		adev->gfx.config.max_shader_engines = le32_to_cpu(gc_info->v1.gc_num_se);
1439 		adev->gfx.config.max_cu_per_sh = 2 * (le32_to_cpu(gc_info->v1.gc_num_wgp0_per_sa) +
1440 						      le32_to_cpu(gc_info->v1.gc_num_wgp1_per_sa));
1441 		adev->gfx.config.max_sh_per_se = le32_to_cpu(gc_info->v1.gc_num_sa_per_se);
1442 		adev->gfx.config.max_backends_per_se = le32_to_cpu(gc_info->v1.gc_num_rb_per_se);
1443 		adev->gfx.config.max_texture_channel_caches = le32_to_cpu(gc_info->v1.gc_num_gl2c);
1444 		adev->gfx.config.max_gprs = le32_to_cpu(gc_info->v1.gc_num_gprs);
1445 		adev->gfx.config.max_gs_threads = le32_to_cpu(gc_info->v1.gc_num_max_gs_thds);
1446 		adev->gfx.config.gs_vgt_table_depth = le32_to_cpu(gc_info->v1.gc_gs_table_depth);
1447 		adev->gfx.config.gs_prim_buffer_depth = le32_to_cpu(gc_info->v1.gc_gsprim_buff_depth);
1448 		adev->gfx.config.double_offchip_lds_buf = le32_to_cpu(gc_info->v1.gc_double_offchip_lds_buffer);
1449 		adev->gfx.cu_info.wave_front_size = le32_to_cpu(gc_info->v1.gc_wave_size);
1450 		adev->gfx.cu_info.max_waves_per_simd = le32_to_cpu(gc_info->v1.gc_max_waves_per_simd);
1451 		adev->gfx.cu_info.max_scratch_slots_per_cu = le32_to_cpu(gc_info->v1.gc_max_scratch_slots_per_cu);
1452 		adev->gfx.cu_info.lds_size = le32_to_cpu(gc_info->v1.gc_lds_size);
1453 		adev->gfx.config.num_sc_per_sh = le32_to_cpu(gc_info->v1.gc_num_sc_per_se) /
1454 			le32_to_cpu(gc_info->v1.gc_num_sa_per_se);
1455 		adev->gfx.config.num_packer_per_sc = le32_to_cpu(gc_info->v1.gc_num_packer_per_sc);
1456 		if (le16_to_cpu(gc_info->v1.header.version_minor) >= 1) {
1457 			adev->gfx.config.gc_num_tcp_per_sa = le32_to_cpu(gc_info->v1_1.gc_num_tcp_per_sa);
1458 			adev->gfx.config.gc_num_sdp_interface = le32_to_cpu(gc_info->v1_1.gc_num_sdp_interface);
1459 			adev->gfx.config.gc_num_tcps = le32_to_cpu(gc_info->v1_1.gc_num_tcps);
1460 		}
1461 		if (le16_to_cpu(gc_info->v1.header.version_minor) >= 2) {
1462 			adev->gfx.config.gc_num_tcp_per_wpg = le32_to_cpu(gc_info->v1_2.gc_num_tcp_per_wpg);
1463 			adev->gfx.config.gc_tcp_l1_size = le32_to_cpu(gc_info->v1_2.gc_tcp_l1_size);
1464 			adev->gfx.config.gc_num_sqc_per_wgp = le32_to_cpu(gc_info->v1_2.gc_num_sqc_per_wgp);
1465 			adev->gfx.config.gc_l1_instruction_cache_size_per_sqc = le32_to_cpu(gc_info->v1_2.gc_l1_instruction_cache_size_per_sqc);
1466 			adev->gfx.config.gc_l1_data_cache_size_per_sqc = le32_to_cpu(gc_info->v1_2.gc_l1_data_cache_size_per_sqc);
1467 			adev->gfx.config.gc_gl1c_per_sa = le32_to_cpu(gc_info->v1_2.gc_gl1c_per_sa);
1468 			adev->gfx.config.gc_gl1c_size_per_instance = le32_to_cpu(gc_info->v1_2.gc_gl1c_size_per_instance);
1469 			adev->gfx.config.gc_gl2c_per_gpu = le32_to_cpu(gc_info->v1_2.gc_gl2c_per_gpu);
1470 		}
1471 		break;
1472 	case 2:
1473 		adev->gfx.config.max_shader_engines = le32_to_cpu(gc_info->v2.gc_num_se);
1474 		adev->gfx.config.max_cu_per_sh = le32_to_cpu(gc_info->v2.gc_num_cu_per_sh);
1475 		adev->gfx.config.max_sh_per_se = le32_to_cpu(gc_info->v2.gc_num_sh_per_se);
1476 		adev->gfx.config.max_backends_per_se = le32_to_cpu(gc_info->v2.gc_num_rb_per_se);
1477 		adev->gfx.config.max_texture_channel_caches = le32_to_cpu(gc_info->v2.gc_num_tccs);
1478 		adev->gfx.config.max_gprs = le32_to_cpu(gc_info->v2.gc_num_gprs);
1479 		adev->gfx.config.max_gs_threads = le32_to_cpu(gc_info->v2.gc_num_max_gs_thds);
1480 		adev->gfx.config.gs_vgt_table_depth = le32_to_cpu(gc_info->v2.gc_gs_table_depth);
1481 		adev->gfx.config.gs_prim_buffer_depth = le32_to_cpu(gc_info->v2.gc_gsprim_buff_depth);
1482 		adev->gfx.config.double_offchip_lds_buf = le32_to_cpu(gc_info->v2.gc_double_offchip_lds_buffer);
1483 		adev->gfx.cu_info.wave_front_size = le32_to_cpu(gc_info->v2.gc_wave_size);
1484 		adev->gfx.cu_info.max_waves_per_simd = le32_to_cpu(gc_info->v2.gc_max_waves_per_simd);
1485 		adev->gfx.cu_info.max_scratch_slots_per_cu = le32_to_cpu(gc_info->v2.gc_max_scratch_slots_per_cu);
1486 		adev->gfx.cu_info.lds_size = le32_to_cpu(gc_info->v2.gc_lds_size);
1487 		adev->gfx.config.num_sc_per_sh = le32_to_cpu(gc_info->v2.gc_num_sc_per_se) /
1488 			le32_to_cpu(gc_info->v2.gc_num_sh_per_se);
1489 		adev->gfx.config.num_packer_per_sc = le32_to_cpu(gc_info->v2.gc_num_packer_per_sc);
1490 		if (le16_to_cpu(gc_info->v2.header.version_minor == 1)) {
1491 			adev->gfx.config.gc_num_tcp_per_sa = le32_to_cpu(gc_info->v2_1.gc_num_tcp_per_sh);
1492 			adev->gfx.config.gc_tcp_size_per_cu = le32_to_cpu(gc_info->v2_1.gc_tcp_size_per_cu);
1493 			adev->gfx.config.gc_num_sdp_interface = le32_to_cpu(gc_info->v2_1.gc_num_sdp_interface); /* per XCD */
1494 			adev->gfx.config.gc_num_cu_per_sqc = le32_to_cpu(gc_info->v2_1.gc_num_cu_per_sqc);
1495 			adev->gfx.config.gc_l1_instruction_cache_size_per_sqc = le32_to_cpu(gc_info->v2_1.gc_instruction_cache_size_per_sqc);
1496 			adev->gfx.config.gc_l1_data_cache_size_per_sqc = le32_to_cpu(gc_info->v2_1.gc_scalar_data_cache_size_per_sqc);
1497 			adev->gfx.config.gc_tcc_size = le32_to_cpu(gc_info->v2_1.gc_tcc_size); /* per XCD */
1498 		}
1499 		break;
1500 	default:
1501 		dev_err(adev->dev,
1502 			"Unhandled GC info table %d.%d\n",
1503 			le16_to_cpu(gc_info->v1.header.version_major),
1504 			le16_to_cpu(gc_info->v1.header.version_minor));
1505 		return -EINVAL;
1506 	}
1507 	return 0;
1508 }
1509 
1510 union mall_info {
1511 	struct mall_info_v1_0 v1;
1512 	struct mall_info_v2_0 v2;
1513 };
1514 
1515 static int amdgpu_discovery_get_mall_info(struct amdgpu_device *adev)
1516 {
1517 	struct binary_header *bhdr;
1518 	union mall_info *mall_info;
1519 	u32 u, mall_size_per_umc, m_s_present, half_use;
1520 	u64 mall_size;
1521 	u16 offset;
1522 
1523 	if (!adev->mman.discovery_bin) {
1524 		DRM_ERROR("ip discovery uninitialized\n");
1525 		return -EINVAL;
1526 	}
1527 
1528 	bhdr = (struct binary_header *)adev->mman.discovery_bin;
1529 	offset = le16_to_cpu(bhdr->table_list[MALL_INFO].offset);
1530 
1531 	if (!offset)
1532 		return 0;
1533 
1534 	mall_info = (union mall_info *)(adev->mman.discovery_bin + offset);
1535 
1536 	switch (le16_to_cpu(mall_info->v1.header.version_major)) {
1537 	case 1:
1538 		mall_size = 0;
1539 		mall_size_per_umc = le32_to_cpu(mall_info->v1.mall_size_per_m);
1540 		m_s_present = le32_to_cpu(mall_info->v1.m_s_present);
1541 		half_use = le32_to_cpu(mall_info->v1.m_half_use);
1542 		for (u = 0; u < adev->gmc.num_umc; u++) {
1543 			if (m_s_present & (1 << u))
1544 				mall_size += mall_size_per_umc * 2;
1545 			else if (half_use & (1 << u))
1546 				mall_size += mall_size_per_umc / 2;
1547 			else
1548 				mall_size += mall_size_per_umc;
1549 		}
1550 		adev->gmc.mall_size = mall_size;
1551 		adev->gmc.m_half_use = half_use;
1552 		break;
1553 	case 2:
1554 		mall_size_per_umc = le32_to_cpu(mall_info->v2.mall_size_per_umc);
1555 		adev->gmc.mall_size = mall_size_per_umc * adev->gmc.num_umc;
1556 		break;
1557 	default:
1558 		dev_err(adev->dev,
1559 			"Unhandled MALL info table %d.%d\n",
1560 			le16_to_cpu(mall_info->v1.header.version_major),
1561 			le16_to_cpu(mall_info->v1.header.version_minor));
1562 		return -EINVAL;
1563 	}
1564 	return 0;
1565 }
1566 
1567 union vcn_info {
1568 	struct vcn_info_v1_0 v1;
1569 };
1570 
1571 static int amdgpu_discovery_get_vcn_info(struct amdgpu_device *adev)
1572 {
1573 	struct binary_header *bhdr;
1574 	union vcn_info *vcn_info;
1575 	u16 offset;
1576 	int v;
1577 
1578 	if (!adev->mman.discovery_bin) {
1579 		DRM_ERROR("ip discovery uninitialized\n");
1580 		return -EINVAL;
1581 	}
1582 
1583 	/* num_vcn_inst is currently limited to AMDGPU_MAX_VCN_INSTANCES
1584 	 * which is smaller than VCN_INFO_TABLE_MAX_NUM_INSTANCES
1585 	 * but that may change in the future with new GPUs so keep this
1586 	 * check for defensive purposes.
1587 	 */
1588 	if (adev->vcn.num_vcn_inst > VCN_INFO_TABLE_MAX_NUM_INSTANCES) {
1589 		dev_err(adev->dev, "invalid vcn instances\n");
1590 		return -EINVAL;
1591 	}
1592 
1593 	bhdr = (struct binary_header *)adev->mman.discovery_bin;
1594 	offset = le16_to_cpu(bhdr->table_list[VCN_INFO].offset);
1595 
1596 	if (!offset)
1597 		return 0;
1598 
1599 	vcn_info = (union vcn_info *)(adev->mman.discovery_bin + offset);
1600 
1601 	switch (le16_to_cpu(vcn_info->v1.header.version_major)) {
1602 	case 1:
1603 		/* num_vcn_inst is currently limited to AMDGPU_MAX_VCN_INSTANCES
1604 		 * so this won't overflow.
1605 		 */
1606 		for (v = 0; v < adev->vcn.num_vcn_inst; v++) {
1607 			adev->vcn.vcn_codec_disable_mask[v] =
1608 				le32_to_cpu(vcn_info->v1.instance_info[v].fuse_data.all_bits);
1609 		}
1610 		break;
1611 	default:
1612 		dev_err(adev->dev,
1613 			"Unhandled VCN info table %d.%d\n",
1614 			le16_to_cpu(vcn_info->v1.header.version_major),
1615 			le16_to_cpu(vcn_info->v1.header.version_minor));
1616 		return -EINVAL;
1617 	}
1618 	return 0;
1619 }
1620 
1621 static int amdgpu_discovery_set_common_ip_blocks(struct amdgpu_device *adev)
1622 {
1623 	/* what IP to use for this? */
1624 	switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
1625 	case IP_VERSION(9, 0, 1):
1626 	case IP_VERSION(9, 1, 0):
1627 	case IP_VERSION(9, 2, 1):
1628 	case IP_VERSION(9, 2, 2):
1629 	case IP_VERSION(9, 3, 0):
1630 	case IP_VERSION(9, 4, 0):
1631 	case IP_VERSION(9, 4, 1):
1632 	case IP_VERSION(9, 4, 2):
1633 	case IP_VERSION(9, 4, 3):
1634 		amdgpu_device_ip_block_add(adev, &vega10_common_ip_block);
1635 		break;
1636 	case IP_VERSION(10, 1, 10):
1637 	case IP_VERSION(10, 1, 1):
1638 	case IP_VERSION(10, 1, 2):
1639 	case IP_VERSION(10, 1, 3):
1640 	case IP_VERSION(10, 1, 4):
1641 	case IP_VERSION(10, 3, 0):
1642 	case IP_VERSION(10, 3, 1):
1643 	case IP_VERSION(10, 3, 2):
1644 	case IP_VERSION(10, 3, 3):
1645 	case IP_VERSION(10, 3, 4):
1646 	case IP_VERSION(10, 3, 5):
1647 	case IP_VERSION(10, 3, 6):
1648 	case IP_VERSION(10, 3, 7):
1649 		amdgpu_device_ip_block_add(adev, &nv_common_ip_block);
1650 		break;
1651 	case IP_VERSION(11, 0, 0):
1652 	case IP_VERSION(11, 0, 1):
1653 	case IP_VERSION(11, 0, 2):
1654 	case IP_VERSION(11, 0, 3):
1655 	case IP_VERSION(11, 0, 4):
1656 	case IP_VERSION(11, 5, 0):
1657 		amdgpu_device_ip_block_add(adev, &soc21_common_ip_block);
1658 		break;
1659 	default:
1660 		dev_err(adev->dev,
1661 			"Failed to add common ip block(GC_HWIP:0x%x)\n",
1662 			amdgpu_ip_version(adev, GC_HWIP, 0));
1663 		return -EINVAL;
1664 	}
1665 	return 0;
1666 }
1667 
1668 static int amdgpu_discovery_set_gmc_ip_blocks(struct amdgpu_device *adev)
1669 {
1670 	/* use GC or MMHUB IP version */
1671 	switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
1672 	case IP_VERSION(9, 0, 1):
1673 	case IP_VERSION(9, 1, 0):
1674 	case IP_VERSION(9, 2, 1):
1675 	case IP_VERSION(9, 2, 2):
1676 	case IP_VERSION(9, 3, 0):
1677 	case IP_VERSION(9, 4, 0):
1678 	case IP_VERSION(9, 4, 1):
1679 	case IP_VERSION(9, 4, 2):
1680 	case IP_VERSION(9, 4, 3):
1681 		amdgpu_device_ip_block_add(adev, &gmc_v9_0_ip_block);
1682 		break;
1683 	case IP_VERSION(10, 1, 10):
1684 	case IP_VERSION(10, 1, 1):
1685 	case IP_VERSION(10, 1, 2):
1686 	case IP_VERSION(10, 1, 3):
1687 	case IP_VERSION(10, 1, 4):
1688 	case IP_VERSION(10, 3, 0):
1689 	case IP_VERSION(10, 3, 1):
1690 	case IP_VERSION(10, 3, 2):
1691 	case IP_VERSION(10, 3, 3):
1692 	case IP_VERSION(10, 3, 4):
1693 	case IP_VERSION(10, 3, 5):
1694 	case IP_VERSION(10, 3, 6):
1695 	case IP_VERSION(10, 3, 7):
1696 		amdgpu_device_ip_block_add(adev, &gmc_v10_0_ip_block);
1697 		break;
1698 	case IP_VERSION(11, 0, 0):
1699 	case IP_VERSION(11, 0, 1):
1700 	case IP_VERSION(11, 0, 2):
1701 	case IP_VERSION(11, 0, 3):
1702 	case IP_VERSION(11, 0, 4):
1703 	case IP_VERSION(11, 5, 0):
1704 		amdgpu_device_ip_block_add(adev, &gmc_v11_0_ip_block);
1705 		break;
1706 	default:
1707 		dev_err(adev->dev, "Failed to add gmc ip block(GC_HWIP:0x%x)\n",
1708 			amdgpu_ip_version(adev, GC_HWIP, 0));
1709 		return -EINVAL;
1710 	}
1711 	return 0;
1712 }
1713 
1714 static int amdgpu_discovery_set_ih_ip_blocks(struct amdgpu_device *adev)
1715 {
1716 	switch (amdgpu_ip_version(adev, OSSSYS_HWIP, 0)) {
1717 	case IP_VERSION(4, 0, 0):
1718 	case IP_VERSION(4, 0, 1):
1719 	case IP_VERSION(4, 1, 0):
1720 	case IP_VERSION(4, 1, 1):
1721 	case IP_VERSION(4, 3, 0):
1722 		amdgpu_device_ip_block_add(adev, &vega10_ih_ip_block);
1723 		break;
1724 	case IP_VERSION(4, 2, 0):
1725 	case IP_VERSION(4, 2, 1):
1726 	case IP_VERSION(4, 4, 0):
1727 	case IP_VERSION(4, 4, 2):
1728 		amdgpu_device_ip_block_add(adev, &vega20_ih_ip_block);
1729 		break;
1730 	case IP_VERSION(5, 0, 0):
1731 	case IP_VERSION(5, 0, 1):
1732 	case IP_VERSION(5, 0, 2):
1733 	case IP_VERSION(5, 0, 3):
1734 	case IP_VERSION(5, 2, 0):
1735 	case IP_VERSION(5, 2, 1):
1736 		amdgpu_device_ip_block_add(adev, &navi10_ih_ip_block);
1737 		break;
1738 	case IP_VERSION(6, 0, 0):
1739 	case IP_VERSION(6, 0, 1):
1740 	case IP_VERSION(6, 0, 2):
1741 		amdgpu_device_ip_block_add(adev, &ih_v6_0_ip_block);
1742 		break;
1743 	case IP_VERSION(6, 1, 0):
1744 		amdgpu_device_ip_block_add(adev, &ih_v6_1_ip_block);
1745 		break;
1746 	default:
1747 		dev_err(adev->dev,
1748 			"Failed to add ih ip block(OSSSYS_HWIP:0x%x)\n",
1749 			amdgpu_ip_version(adev, OSSSYS_HWIP, 0));
1750 		return -EINVAL;
1751 	}
1752 	return 0;
1753 }
1754 
1755 static int amdgpu_discovery_set_psp_ip_blocks(struct amdgpu_device *adev)
1756 {
1757 	switch (amdgpu_ip_version(adev, MP0_HWIP, 0)) {
1758 	case IP_VERSION(9, 0, 0):
1759 		amdgpu_device_ip_block_add(adev, &psp_v3_1_ip_block);
1760 		break;
1761 	case IP_VERSION(10, 0, 0):
1762 	case IP_VERSION(10, 0, 1):
1763 		amdgpu_device_ip_block_add(adev, &psp_v10_0_ip_block);
1764 		break;
1765 	case IP_VERSION(11, 0, 0):
1766 	case IP_VERSION(11, 0, 2):
1767 	case IP_VERSION(11, 0, 4):
1768 	case IP_VERSION(11, 0, 5):
1769 	case IP_VERSION(11, 0, 9):
1770 	case IP_VERSION(11, 0, 7):
1771 	case IP_VERSION(11, 0, 11):
1772 	case IP_VERSION(11, 0, 12):
1773 	case IP_VERSION(11, 0, 13):
1774 	case IP_VERSION(11, 5, 0):
1775 		amdgpu_device_ip_block_add(adev, &psp_v11_0_ip_block);
1776 		break;
1777 	case IP_VERSION(11, 0, 8):
1778 		amdgpu_device_ip_block_add(adev, &psp_v11_0_8_ip_block);
1779 		break;
1780 	case IP_VERSION(11, 0, 3):
1781 	case IP_VERSION(12, 0, 1):
1782 		amdgpu_device_ip_block_add(adev, &psp_v12_0_ip_block);
1783 		break;
1784 	case IP_VERSION(13, 0, 0):
1785 	case IP_VERSION(13, 0, 1):
1786 	case IP_VERSION(13, 0, 2):
1787 	case IP_VERSION(13, 0, 3):
1788 	case IP_VERSION(13, 0, 5):
1789 	case IP_VERSION(13, 0, 6):
1790 	case IP_VERSION(13, 0, 7):
1791 	case IP_VERSION(13, 0, 8):
1792 	case IP_VERSION(13, 0, 10):
1793 	case IP_VERSION(13, 0, 11):
1794 	case IP_VERSION(14, 0, 0):
1795 		amdgpu_device_ip_block_add(adev, &psp_v13_0_ip_block);
1796 		break;
1797 	case IP_VERSION(13, 0, 4):
1798 		amdgpu_device_ip_block_add(adev, &psp_v13_0_4_ip_block);
1799 		break;
1800 	default:
1801 		dev_err(adev->dev,
1802 			"Failed to add psp ip block(MP0_HWIP:0x%x)\n",
1803 			amdgpu_ip_version(adev, MP0_HWIP, 0));
1804 		return -EINVAL;
1805 	}
1806 	return 0;
1807 }
1808 
1809 static int amdgpu_discovery_set_smu_ip_blocks(struct amdgpu_device *adev)
1810 {
1811 	switch (amdgpu_ip_version(adev, MP1_HWIP, 0)) {
1812 	case IP_VERSION(9, 0, 0):
1813 	case IP_VERSION(10, 0, 0):
1814 	case IP_VERSION(10, 0, 1):
1815 	case IP_VERSION(11, 0, 2):
1816 		if (adev->asic_type == CHIP_ARCTURUS)
1817 			amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block);
1818 		else
1819 			amdgpu_device_ip_block_add(adev, &pp_smu_ip_block);
1820 		break;
1821 	case IP_VERSION(11, 0, 0):
1822 	case IP_VERSION(11, 0, 5):
1823 	case IP_VERSION(11, 0, 9):
1824 	case IP_VERSION(11, 0, 7):
1825 	case IP_VERSION(11, 0, 8):
1826 	case IP_VERSION(11, 0, 11):
1827 	case IP_VERSION(11, 0, 12):
1828 	case IP_VERSION(11, 0, 13):
1829 	case IP_VERSION(11, 5, 0):
1830 		amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block);
1831 		break;
1832 	case IP_VERSION(12, 0, 0):
1833 	case IP_VERSION(12, 0, 1):
1834 		amdgpu_device_ip_block_add(adev, &smu_v12_0_ip_block);
1835 		break;
1836 	case IP_VERSION(13, 0, 0):
1837 	case IP_VERSION(13, 0, 1):
1838 	case IP_VERSION(13, 0, 2):
1839 	case IP_VERSION(13, 0, 3):
1840 	case IP_VERSION(13, 0, 4):
1841 	case IP_VERSION(13, 0, 5):
1842 	case IP_VERSION(13, 0, 6):
1843 	case IP_VERSION(13, 0, 7):
1844 	case IP_VERSION(13, 0, 8):
1845 	case IP_VERSION(13, 0, 10):
1846 	case IP_VERSION(13, 0, 11):
1847 		amdgpu_device_ip_block_add(adev, &smu_v13_0_ip_block);
1848 		break;
1849 	default:
1850 		dev_err(adev->dev,
1851 			"Failed to add smu ip block(MP1_HWIP:0x%x)\n",
1852 			amdgpu_ip_version(adev, MP1_HWIP, 0));
1853 		return -EINVAL;
1854 	}
1855 	return 0;
1856 }
1857 
1858 #if defined(CONFIG_DRM_AMD_DC)
1859 static void amdgpu_discovery_set_sriov_display(struct amdgpu_device *adev)
1860 {
1861 	amdgpu_device_set_sriov_virtual_display(adev);
1862 	amdgpu_device_ip_block_add(adev, &amdgpu_vkms_ip_block);
1863 }
1864 #endif
1865 
1866 static int amdgpu_discovery_set_display_ip_blocks(struct amdgpu_device *adev)
1867 {
1868 	if (adev->enable_virtual_display) {
1869 		amdgpu_device_ip_block_add(adev, &amdgpu_vkms_ip_block);
1870 		return 0;
1871 	}
1872 
1873 	if (!amdgpu_device_has_dc_support(adev))
1874 		return 0;
1875 
1876 #if defined(CONFIG_DRM_AMD_DC)
1877 	if (amdgpu_ip_version(adev, DCE_HWIP, 0)) {
1878 		switch (amdgpu_ip_version(adev, DCE_HWIP, 0)) {
1879 		case IP_VERSION(1, 0, 0):
1880 		case IP_VERSION(1, 0, 1):
1881 		case IP_VERSION(2, 0, 2):
1882 		case IP_VERSION(2, 0, 0):
1883 		case IP_VERSION(2, 0, 3):
1884 		case IP_VERSION(2, 1, 0):
1885 		case IP_VERSION(3, 0, 0):
1886 		case IP_VERSION(3, 0, 2):
1887 		case IP_VERSION(3, 0, 3):
1888 		case IP_VERSION(3, 0, 1):
1889 		case IP_VERSION(3, 1, 2):
1890 		case IP_VERSION(3, 1, 3):
1891 		case IP_VERSION(3, 1, 4):
1892 		case IP_VERSION(3, 1, 5):
1893 		case IP_VERSION(3, 1, 6):
1894 		case IP_VERSION(3, 2, 0):
1895 		case IP_VERSION(3, 2, 1):
1896 			if (amdgpu_sriov_vf(adev))
1897 				amdgpu_discovery_set_sriov_display(adev);
1898 			else
1899 				amdgpu_device_ip_block_add(adev, &dm_ip_block);
1900 			break;
1901 		default:
1902 			dev_err(adev->dev,
1903 				"Failed to add dm ip block(DCE_HWIP:0x%x)\n",
1904 				amdgpu_ip_version(adev, DCE_HWIP, 0));
1905 			return -EINVAL;
1906 		}
1907 	} else if (amdgpu_ip_version(adev, DCI_HWIP, 0)) {
1908 		switch (amdgpu_ip_version(adev, DCI_HWIP, 0)) {
1909 		case IP_VERSION(12, 0, 0):
1910 		case IP_VERSION(12, 0, 1):
1911 		case IP_VERSION(12, 1, 0):
1912 			if (amdgpu_sriov_vf(adev))
1913 				amdgpu_discovery_set_sriov_display(adev);
1914 			else
1915 				amdgpu_device_ip_block_add(adev, &dm_ip_block);
1916 			break;
1917 		default:
1918 			dev_err(adev->dev,
1919 				"Failed to add dm ip block(DCI_HWIP:0x%x)\n",
1920 				amdgpu_ip_version(adev, DCI_HWIP, 0));
1921 			return -EINVAL;
1922 		}
1923 	}
1924 #endif
1925 	return 0;
1926 }
1927 
1928 static int amdgpu_discovery_set_gc_ip_blocks(struct amdgpu_device *adev)
1929 {
1930 	switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
1931 	case IP_VERSION(9, 0, 1):
1932 	case IP_VERSION(9, 1, 0):
1933 	case IP_VERSION(9, 2, 1):
1934 	case IP_VERSION(9, 2, 2):
1935 	case IP_VERSION(9, 3, 0):
1936 	case IP_VERSION(9, 4, 0):
1937 	case IP_VERSION(9, 4, 1):
1938 	case IP_VERSION(9, 4, 2):
1939 		amdgpu_device_ip_block_add(adev, &gfx_v9_0_ip_block);
1940 		break;
1941 	case IP_VERSION(9, 4, 3):
1942 		if (!amdgpu_exp_hw_support)
1943 			return -EINVAL;
1944 		amdgpu_device_ip_block_add(adev, &gfx_v9_4_3_ip_block);
1945 		break;
1946 	case IP_VERSION(10, 1, 10):
1947 	case IP_VERSION(10, 1, 2):
1948 	case IP_VERSION(10, 1, 1):
1949 	case IP_VERSION(10, 1, 3):
1950 	case IP_VERSION(10, 1, 4):
1951 	case IP_VERSION(10, 3, 0):
1952 	case IP_VERSION(10, 3, 2):
1953 	case IP_VERSION(10, 3, 1):
1954 	case IP_VERSION(10, 3, 4):
1955 	case IP_VERSION(10, 3, 5):
1956 	case IP_VERSION(10, 3, 6):
1957 	case IP_VERSION(10, 3, 3):
1958 	case IP_VERSION(10, 3, 7):
1959 		amdgpu_device_ip_block_add(adev, &gfx_v10_0_ip_block);
1960 		break;
1961 	case IP_VERSION(11, 0, 0):
1962 	case IP_VERSION(11, 0, 1):
1963 	case IP_VERSION(11, 0, 2):
1964 	case IP_VERSION(11, 0, 3):
1965 	case IP_VERSION(11, 0, 4):
1966 	case IP_VERSION(11, 5, 0):
1967 		amdgpu_device_ip_block_add(adev, &gfx_v11_0_ip_block);
1968 		break;
1969 	default:
1970 		dev_err(adev->dev, "Failed to add gfx ip block(GC_HWIP:0x%x)\n",
1971 			amdgpu_ip_version(adev, GC_HWIP, 0));
1972 		return -EINVAL;
1973 	}
1974 	return 0;
1975 }
1976 
1977 static int amdgpu_discovery_set_sdma_ip_blocks(struct amdgpu_device *adev)
1978 {
1979 	switch (amdgpu_ip_version(adev, SDMA0_HWIP, 0)) {
1980 	case IP_VERSION(4, 0, 0):
1981 	case IP_VERSION(4, 0, 1):
1982 	case IP_VERSION(4, 1, 0):
1983 	case IP_VERSION(4, 1, 1):
1984 	case IP_VERSION(4, 1, 2):
1985 	case IP_VERSION(4, 2, 0):
1986 	case IP_VERSION(4, 2, 2):
1987 	case IP_VERSION(4, 4, 0):
1988 		amdgpu_device_ip_block_add(adev, &sdma_v4_0_ip_block);
1989 		break;
1990 	case IP_VERSION(4, 4, 2):
1991 		amdgpu_device_ip_block_add(adev, &sdma_v4_4_2_ip_block);
1992 		break;
1993 	case IP_VERSION(5, 0, 0):
1994 	case IP_VERSION(5, 0, 1):
1995 	case IP_VERSION(5, 0, 2):
1996 	case IP_VERSION(5, 0, 5):
1997 		amdgpu_device_ip_block_add(adev, &sdma_v5_0_ip_block);
1998 		break;
1999 	case IP_VERSION(5, 2, 0):
2000 	case IP_VERSION(5, 2, 2):
2001 	case IP_VERSION(5, 2, 4):
2002 	case IP_VERSION(5, 2, 5):
2003 	case IP_VERSION(5, 2, 6):
2004 	case IP_VERSION(5, 2, 3):
2005 	case IP_VERSION(5, 2, 1):
2006 	case IP_VERSION(5, 2, 7):
2007 		amdgpu_device_ip_block_add(adev, &sdma_v5_2_ip_block);
2008 		break;
2009 	case IP_VERSION(6, 0, 0):
2010 	case IP_VERSION(6, 0, 1):
2011 	case IP_VERSION(6, 0, 2):
2012 	case IP_VERSION(6, 0, 3):
2013 	case IP_VERSION(6, 1, 0):
2014 		amdgpu_device_ip_block_add(adev, &sdma_v6_0_ip_block);
2015 		break;
2016 	default:
2017 		dev_err(adev->dev,
2018 			"Failed to add sdma ip block(SDMA0_HWIP:0x%x)\n",
2019 			amdgpu_ip_version(adev, SDMA0_HWIP, 0));
2020 		return -EINVAL;
2021 	}
2022 	return 0;
2023 }
2024 
2025 static int amdgpu_discovery_set_mm_ip_blocks(struct amdgpu_device *adev)
2026 {
2027 	if (amdgpu_ip_version(adev, VCE_HWIP, 0)) {
2028 		switch (amdgpu_ip_version(adev, UVD_HWIP, 0)) {
2029 		case IP_VERSION(7, 0, 0):
2030 		case IP_VERSION(7, 2, 0):
2031 			/* UVD is not supported on vega20 SR-IOV */
2032 			if (!(adev->asic_type == CHIP_VEGA20 && amdgpu_sriov_vf(adev)))
2033 				amdgpu_device_ip_block_add(adev, &uvd_v7_0_ip_block);
2034 			break;
2035 		default:
2036 			dev_err(adev->dev,
2037 				"Failed to add uvd v7 ip block(UVD_HWIP:0x%x)\n",
2038 				amdgpu_ip_version(adev, UVD_HWIP, 0));
2039 			return -EINVAL;
2040 		}
2041 		switch (amdgpu_ip_version(adev, VCE_HWIP, 0)) {
2042 		case IP_VERSION(4, 0, 0):
2043 		case IP_VERSION(4, 1, 0):
2044 			/* VCE is not supported on vega20 SR-IOV */
2045 			if (!(adev->asic_type == CHIP_VEGA20 && amdgpu_sriov_vf(adev)))
2046 				amdgpu_device_ip_block_add(adev, &vce_v4_0_ip_block);
2047 			break;
2048 		default:
2049 			dev_err(adev->dev,
2050 				"Failed to add VCE v4 ip block(VCE_HWIP:0x%x)\n",
2051 				amdgpu_ip_version(adev, VCE_HWIP, 0));
2052 			return -EINVAL;
2053 		}
2054 	} else {
2055 		switch (amdgpu_ip_version(adev, UVD_HWIP, 0)) {
2056 		case IP_VERSION(1, 0, 0):
2057 		case IP_VERSION(1, 0, 1):
2058 			amdgpu_device_ip_block_add(adev, &vcn_v1_0_ip_block);
2059 			break;
2060 		case IP_VERSION(2, 0, 0):
2061 		case IP_VERSION(2, 0, 2):
2062 		case IP_VERSION(2, 2, 0):
2063 			amdgpu_device_ip_block_add(adev, &vcn_v2_0_ip_block);
2064 			if (!amdgpu_sriov_vf(adev))
2065 				amdgpu_device_ip_block_add(adev, &jpeg_v2_0_ip_block);
2066 			break;
2067 		case IP_VERSION(2, 0, 3):
2068 			break;
2069 		case IP_VERSION(2, 5, 0):
2070 			amdgpu_device_ip_block_add(adev, &vcn_v2_5_ip_block);
2071 			amdgpu_device_ip_block_add(adev, &jpeg_v2_5_ip_block);
2072 			break;
2073 		case IP_VERSION(2, 6, 0):
2074 			amdgpu_device_ip_block_add(adev, &vcn_v2_6_ip_block);
2075 			amdgpu_device_ip_block_add(adev, &jpeg_v2_6_ip_block);
2076 			break;
2077 		case IP_VERSION(3, 0, 0):
2078 		case IP_VERSION(3, 0, 16):
2079 		case IP_VERSION(3, 1, 1):
2080 		case IP_VERSION(3, 1, 2):
2081 		case IP_VERSION(3, 0, 2):
2082 			amdgpu_device_ip_block_add(adev, &vcn_v3_0_ip_block);
2083 			if (!amdgpu_sriov_vf(adev))
2084 				amdgpu_device_ip_block_add(adev, &jpeg_v3_0_ip_block);
2085 			break;
2086 		case IP_VERSION(3, 0, 33):
2087 			amdgpu_device_ip_block_add(adev, &vcn_v3_0_ip_block);
2088 			break;
2089 		case IP_VERSION(4, 0, 0):
2090 		case IP_VERSION(4, 0, 2):
2091 		case IP_VERSION(4, 0, 4):
2092 			amdgpu_device_ip_block_add(adev, &vcn_v4_0_ip_block);
2093 			amdgpu_device_ip_block_add(adev, &jpeg_v4_0_ip_block);
2094 			break;
2095 		case IP_VERSION(4, 0, 3):
2096 			amdgpu_device_ip_block_add(adev, &vcn_v4_0_3_ip_block);
2097 			amdgpu_device_ip_block_add(adev, &jpeg_v4_0_3_ip_block);
2098 			break;
2099 		case IP_VERSION(4, 0, 5):
2100 			amdgpu_device_ip_block_add(adev, &vcn_v4_0_5_ip_block);
2101 			amdgpu_device_ip_block_add(adev, &jpeg_v4_0_5_ip_block);
2102 			break;
2103 		default:
2104 			dev_err(adev->dev,
2105 				"Failed to add vcn/jpeg ip block(UVD_HWIP:0x%x)\n",
2106 				amdgpu_ip_version(adev, UVD_HWIP, 0));
2107 			return -EINVAL;
2108 		}
2109 	}
2110 	return 0;
2111 }
2112 
2113 static int amdgpu_discovery_set_mes_ip_blocks(struct amdgpu_device *adev)
2114 {
2115 	switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
2116 	case IP_VERSION(10, 1, 10):
2117 	case IP_VERSION(10, 1, 1):
2118 	case IP_VERSION(10, 1, 2):
2119 	case IP_VERSION(10, 1, 3):
2120 	case IP_VERSION(10, 1, 4):
2121 	case IP_VERSION(10, 3, 0):
2122 	case IP_VERSION(10, 3, 1):
2123 	case IP_VERSION(10, 3, 2):
2124 	case IP_VERSION(10, 3, 3):
2125 	case IP_VERSION(10, 3, 4):
2126 	case IP_VERSION(10, 3, 5):
2127 	case IP_VERSION(10, 3, 6):
2128 		if (amdgpu_mes) {
2129 			amdgpu_device_ip_block_add(adev, &mes_v10_1_ip_block);
2130 			adev->enable_mes = true;
2131 			if (amdgpu_mes_kiq)
2132 				adev->enable_mes_kiq = true;
2133 		}
2134 		break;
2135 	case IP_VERSION(11, 0, 0):
2136 	case IP_VERSION(11, 0, 1):
2137 	case IP_VERSION(11, 0, 2):
2138 	case IP_VERSION(11, 0, 3):
2139 	case IP_VERSION(11, 0, 4):
2140 	case IP_VERSION(11, 5, 0):
2141 		amdgpu_device_ip_block_add(adev, &mes_v11_0_ip_block);
2142 		adev->enable_mes = true;
2143 		adev->enable_mes_kiq = true;
2144 		break;
2145 	default:
2146 		break;
2147 	}
2148 	return 0;
2149 }
2150 
2151 static void amdgpu_discovery_init_soc_config(struct amdgpu_device *adev)
2152 {
2153 	switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
2154 	case IP_VERSION(9, 4, 3):
2155 		aqua_vanjaram_init_soc_config(adev);
2156 		break;
2157 	default:
2158 		break;
2159 	}
2160 }
2161 
2162 static int amdgpu_discovery_set_vpe_ip_blocks(struct amdgpu_device *adev)
2163 {
2164 	switch (amdgpu_ip_version(adev, VPE_HWIP, 0)) {
2165 	case IP_VERSION(6, 1, 0):
2166 		amdgpu_device_ip_block_add(adev, &vpe_v6_1_ip_block);
2167 		break;
2168 	default:
2169 		break;
2170 	}
2171 
2172 	return 0;
2173 }
2174 
2175 static int amdgpu_discovery_set_umsch_mm_ip_blocks(struct amdgpu_device *adev)
2176 {
2177 	switch (amdgpu_ip_version(adev, VCN_HWIP, 0)) {
2178 	case IP_VERSION(4, 0, 5):
2179 		if (amdgpu_umsch_mm & 0x1) {
2180 			amdgpu_device_ip_block_add(adev, &umsch_mm_v4_0_ip_block);
2181 			adev->enable_umsch_mm = true;
2182 		}
2183 		break;
2184 	default:
2185 		break;
2186 	}
2187 
2188 	return 0;
2189 }
2190 
2191 int amdgpu_discovery_set_ip_blocks(struct amdgpu_device *adev)
2192 {
2193 	int r;
2194 
2195 	switch (adev->asic_type) {
2196 	case CHIP_VEGA10:
2197 		vega10_reg_base_init(adev);
2198 		adev->sdma.num_instances = 2;
2199 		adev->gmc.num_umc = 4;
2200 		adev->ip_versions[MMHUB_HWIP][0] = IP_VERSION(9, 0, 0);
2201 		adev->ip_versions[ATHUB_HWIP][0] = IP_VERSION(9, 0, 0);
2202 		adev->ip_versions[OSSSYS_HWIP][0] = IP_VERSION(4, 0, 0);
2203 		adev->ip_versions[HDP_HWIP][0] = IP_VERSION(4, 0, 0);
2204 		adev->ip_versions[SDMA0_HWIP][0] = IP_VERSION(4, 0, 0);
2205 		adev->ip_versions[SDMA1_HWIP][0] = IP_VERSION(4, 0, 0);
2206 		adev->ip_versions[DF_HWIP][0] = IP_VERSION(2, 1, 0);
2207 		adev->ip_versions[NBIO_HWIP][0] = IP_VERSION(6, 1, 0);
2208 		adev->ip_versions[UMC_HWIP][0] = IP_VERSION(6, 0, 0);
2209 		adev->ip_versions[MP0_HWIP][0] = IP_VERSION(9, 0, 0);
2210 		adev->ip_versions[MP1_HWIP][0] = IP_VERSION(9, 0, 0);
2211 		adev->ip_versions[THM_HWIP][0] = IP_VERSION(9, 0, 0);
2212 		adev->ip_versions[SMUIO_HWIP][0] = IP_VERSION(9, 0, 0);
2213 		adev->ip_versions[GC_HWIP][0] = IP_VERSION(9, 0, 1);
2214 		adev->ip_versions[UVD_HWIP][0] = IP_VERSION(7, 0, 0);
2215 		adev->ip_versions[VCE_HWIP][0] = IP_VERSION(4, 0, 0);
2216 		adev->ip_versions[DCI_HWIP][0] = IP_VERSION(12, 0, 0);
2217 		break;
2218 	case CHIP_VEGA12:
2219 		vega10_reg_base_init(adev);
2220 		adev->sdma.num_instances = 2;
2221 		adev->gmc.num_umc = 4;
2222 		adev->ip_versions[MMHUB_HWIP][0] = IP_VERSION(9, 3, 0);
2223 		adev->ip_versions[ATHUB_HWIP][0] = IP_VERSION(9, 3, 0);
2224 		adev->ip_versions[OSSSYS_HWIP][0] = IP_VERSION(4, 0, 1);
2225 		adev->ip_versions[HDP_HWIP][0] = IP_VERSION(4, 0, 1);
2226 		adev->ip_versions[SDMA0_HWIP][0] = IP_VERSION(4, 0, 1);
2227 		adev->ip_versions[SDMA1_HWIP][0] = IP_VERSION(4, 0, 1);
2228 		adev->ip_versions[DF_HWIP][0] = IP_VERSION(2, 5, 0);
2229 		adev->ip_versions[NBIO_HWIP][0] = IP_VERSION(6, 2, 0);
2230 		adev->ip_versions[UMC_HWIP][0] = IP_VERSION(6, 1, 0);
2231 		adev->ip_versions[MP0_HWIP][0] = IP_VERSION(9, 0, 0);
2232 		adev->ip_versions[MP1_HWIP][0] = IP_VERSION(9, 0, 0);
2233 		adev->ip_versions[THM_HWIP][0] = IP_VERSION(9, 0, 0);
2234 		adev->ip_versions[SMUIO_HWIP][0] = IP_VERSION(9, 0, 1);
2235 		adev->ip_versions[GC_HWIP][0] = IP_VERSION(9, 2, 1);
2236 		adev->ip_versions[UVD_HWIP][0] = IP_VERSION(7, 0, 0);
2237 		adev->ip_versions[VCE_HWIP][0] = IP_VERSION(4, 0, 0);
2238 		adev->ip_versions[DCI_HWIP][0] = IP_VERSION(12, 0, 1);
2239 		break;
2240 	case CHIP_RAVEN:
2241 		vega10_reg_base_init(adev);
2242 		adev->sdma.num_instances = 1;
2243 		adev->vcn.num_vcn_inst = 1;
2244 		adev->gmc.num_umc = 2;
2245 		if (adev->apu_flags & AMD_APU_IS_RAVEN2) {
2246 			adev->ip_versions[MMHUB_HWIP][0] = IP_VERSION(9, 2, 0);
2247 			adev->ip_versions[ATHUB_HWIP][0] = IP_VERSION(9, 2, 0);
2248 			adev->ip_versions[OSSSYS_HWIP][0] = IP_VERSION(4, 1, 1);
2249 			adev->ip_versions[HDP_HWIP][0] = IP_VERSION(4, 1, 1);
2250 			adev->ip_versions[SDMA0_HWIP][0] = IP_VERSION(4, 1, 1);
2251 			adev->ip_versions[DF_HWIP][0] = IP_VERSION(2, 1, 1);
2252 			adev->ip_versions[NBIO_HWIP][0] = IP_VERSION(7, 0, 1);
2253 			adev->ip_versions[UMC_HWIP][0] = IP_VERSION(7, 5, 0);
2254 			adev->ip_versions[MP0_HWIP][0] = IP_VERSION(10, 0, 1);
2255 			adev->ip_versions[MP1_HWIP][0] = IP_VERSION(10, 0, 1);
2256 			adev->ip_versions[THM_HWIP][0] = IP_VERSION(10, 1, 0);
2257 			adev->ip_versions[SMUIO_HWIP][0] = IP_VERSION(10, 0, 1);
2258 			adev->ip_versions[GC_HWIP][0] = IP_VERSION(9, 2, 2);
2259 			adev->ip_versions[UVD_HWIP][0] = IP_VERSION(1, 0, 1);
2260 			adev->ip_versions[DCE_HWIP][0] = IP_VERSION(1, 0, 1);
2261 		} else {
2262 			adev->ip_versions[MMHUB_HWIP][0] = IP_VERSION(9, 1, 0);
2263 			adev->ip_versions[ATHUB_HWIP][0] = IP_VERSION(9, 1, 0);
2264 			adev->ip_versions[OSSSYS_HWIP][0] = IP_VERSION(4, 1, 0);
2265 			adev->ip_versions[HDP_HWIP][0] = IP_VERSION(4, 1, 0);
2266 			adev->ip_versions[SDMA0_HWIP][0] = IP_VERSION(4, 1, 0);
2267 			adev->ip_versions[DF_HWIP][0] = IP_VERSION(2, 1, 0);
2268 			adev->ip_versions[NBIO_HWIP][0] = IP_VERSION(7, 0, 0);
2269 			adev->ip_versions[UMC_HWIP][0] = IP_VERSION(7, 0, 0);
2270 			adev->ip_versions[MP0_HWIP][0] = IP_VERSION(10, 0, 0);
2271 			adev->ip_versions[MP1_HWIP][0] = IP_VERSION(10, 0, 0);
2272 			adev->ip_versions[THM_HWIP][0] = IP_VERSION(10, 0, 0);
2273 			adev->ip_versions[SMUIO_HWIP][0] = IP_VERSION(10, 0, 0);
2274 			adev->ip_versions[GC_HWIP][0] = IP_VERSION(9, 1, 0);
2275 			adev->ip_versions[UVD_HWIP][0] = IP_VERSION(1, 0, 0);
2276 			adev->ip_versions[DCE_HWIP][0] = IP_VERSION(1, 0, 0);
2277 		}
2278 		break;
2279 	case CHIP_VEGA20:
2280 		vega20_reg_base_init(adev);
2281 		adev->sdma.num_instances = 2;
2282 		adev->gmc.num_umc = 8;
2283 		adev->ip_versions[MMHUB_HWIP][0] = IP_VERSION(9, 4, 0);
2284 		adev->ip_versions[ATHUB_HWIP][0] = IP_VERSION(9, 4, 0);
2285 		adev->ip_versions[OSSSYS_HWIP][0] = IP_VERSION(4, 2, 0);
2286 		adev->ip_versions[HDP_HWIP][0] = IP_VERSION(4, 2, 0);
2287 		adev->ip_versions[SDMA0_HWIP][0] = IP_VERSION(4, 2, 0);
2288 		adev->ip_versions[SDMA1_HWIP][0] = IP_VERSION(4, 2, 0);
2289 		adev->ip_versions[DF_HWIP][0] = IP_VERSION(3, 6, 0);
2290 		adev->ip_versions[NBIO_HWIP][0] = IP_VERSION(7, 4, 0);
2291 		adev->ip_versions[UMC_HWIP][0] = IP_VERSION(6, 1, 1);
2292 		adev->ip_versions[MP0_HWIP][0] = IP_VERSION(11, 0, 2);
2293 		adev->ip_versions[MP1_HWIP][0] = IP_VERSION(11, 0, 2);
2294 		adev->ip_versions[THM_HWIP][0] = IP_VERSION(11, 0, 2);
2295 		adev->ip_versions[SMUIO_HWIP][0] = IP_VERSION(11, 0, 2);
2296 		adev->ip_versions[GC_HWIP][0] = IP_VERSION(9, 4, 0);
2297 		adev->ip_versions[UVD_HWIP][0] = IP_VERSION(7, 2, 0);
2298 		adev->ip_versions[UVD_HWIP][1] = IP_VERSION(7, 2, 0);
2299 		adev->ip_versions[VCE_HWIP][0] = IP_VERSION(4, 1, 0);
2300 		adev->ip_versions[DCI_HWIP][0] = IP_VERSION(12, 1, 0);
2301 		break;
2302 	case CHIP_ARCTURUS:
2303 		arct_reg_base_init(adev);
2304 		adev->sdma.num_instances = 8;
2305 		adev->vcn.num_vcn_inst = 2;
2306 		adev->gmc.num_umc = 8;
2307 		adev->ip_versions[MMHUB_HWIP][0] = IP_VERSION(9, 4, 1);
2308 		adev->ip_versions[ATHUB_HWIP][0] = IP_VERSION(9, 4, 1);
2309 		adev->ip_versions[OSSSYS_HWIP][0] = IP_VERSION(4, 2, 1);
2310 		adev->ip_versions[HDP_HWIP][0] = IP_VERSION(4, 2, 1);
2311 		adev->ip_versions[SDMA0_HWIP][0] = IP_VERSION(4, 2, 2);
2312 		adev->ip_versions[SDMA1_HWIP][0] = IP_VERSION(4, 2, 2);
2313 		adev->ip_versions[SDMA1_HWIP][1] = IP_VERSION(4, 2, 2);
2314 		adev->ip_versions[SDMA1_HWIP][2] = IP_VERSION(4, 2, 2);
2315 		adev->ip_versions[SDMA1_HWIP][3] = IP_VERSION(4, 2, 2);
2316 		adev->ip_versions[SDMA1_HWIP][4] = IP_VERSION(4, 2, 2);
2317 		adev->ip_versions[SDMA1_HWIP][5] = IP_VERSION(4, 2, 2);
2318 		adev->ip_versions[SDMA1_HWIP][6] = IP_VERSION(4, 2, 2);
2319 		adev->ip_versions[DF_HWIP][0] = IP_VERSION(3, 6, 1);
2320 		adev->ip_versions[NBIO_HWIP][0] = IP_VERSION(7, 4, 1);
2321 		adev->ip_versions[UMC_HWIP][0] = IP_VERSION(6, 1, 2);
2322 		adev->ip_versions[MP0_HWIP][0] = IP_VERSION(11, 0, 4);
2323 		adev->ip_versions[MP1_HWIP][0] = IP_VERSION(11, 0, 2);
2324 		adev->ip_versions[THM_HWIP][0] = IP_VERSION(11, 0, 3);
2325 		adev->ip_versions[SMUIO_HWIP][0] = IP_VERSION(11, 0, 3);
2326 		adev->ip_versions[GC_HWIP][0] = IP_VERSION(9, 4, 1);
2327 		adev->ip_versions[UVD_HWIP][0] = IP_VERSION(2, 5, 0);
2328 		adev->ip_versions[UVD_HWIP][1] = IP_VERSION(2, 5, 0);
2329 		break;
2330 	case CHIP_ALDEBARAN:
2331 		aldebaran_reg_base_init(adev);
2332 		adev->sdma.num_instances = 5;
2333 		adev->vcn.num_vcn_inst = 2;
2334 		adev->gmc.num_umc = 4;
2335 		adev->ip_versions[MMHUB_HWIP][0] = IP_VERSION(9, 4, 2);
2336 		adev->ip_versions[ATHUB_HWIP][0] = IP_VERSION(9, 4, 2);
2337 		adev->ip_versions[OSSSYS_HWIP][0] = IP_VERSION(4, 4, 0);
2338 		adev->ip_versions[HDP_HWIP][0] = IP_VERSION(4, 4, 0);
2339 		adev->ip_versions[SDMA0_HWIP][0] = IP_VERSION(4, 4, 0);
2340 		adev->ip_versions[SDMA0_HWIP][1] = IP_VERSION(4, 4, 0);
2341 		adev->ip_versions[SDMA0_HWIP][2] = IP_VERSION(4, 4, 0);
2342 		adev->ip_versions[SDMA0_HWIP][3] = IP_VERSION(4, 4, 0);
2343 		adev->ip_versions[SDMA0_HWIP][4] = IP_VERSION(4, 4, 0);
2344 		adev->ip_versions[DF_HWIP][0] = IP_VERSION(3, 6, 2);
2345 		adev->ip_versions[NBIO_HWIP][0] = IP_VERSION(7, 4, 4);
2346 		adev->ip_versions[UMC_HWIP][0] = IP_VERSION(6, 7, 0);
2347 		adev->ip_versions[MP0_HWIP][0] = IP_VERSION(13, 0, 2);
2348 		adev->ip_versions[MP1_HWIP][0] = IP_VERSION(13, 0, 2);
2349 		adev->ip_versions[THM_HWIP][0] = IP_VERSION(13, 0, 2);
2350 		adev->ip_versions[SMUIO_HWIP][0] = IP_VERSION(13, 0, 2);
2351 		adev->ip_versions[GC_HWIP][0] = IP_VERSION(9, 4, 2);
2352 		adev->ip_versions[UVD_HWIP][0] = IP_VERSION(2, 6, 0);
2353 		adev->ip_versions[UVD_HWIP][1] = IP_VERSION(2, 6, 0);
2354 		adev->ip_versions[XGMI_HWIP][0] = IP_VERSION(6, 1, 0);
2355 		break;
2356 	default:
2357 		r = amdgpu_discovery_reg_base_init(adev);
2358 		if (r)
2359 			return -EINVAL;
2360 
2361 		amdgpu_discovery_harvest_ip(adev);
2362 		amdgpu_discovery_get_gfx_info(adev);
2363 		amdgpu_discovery_get_mall_info(adev);
2364 		amdgpu_discovery_get_vcn_info(adev);
2365 		break;
2366 	}
2367 
2368 	amdgpu_discovery_init_soc_config(adev);
2369 	amdgpu_discovery_sysfs_init(adev);
2370 
2371 	switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
2372 	case IP_VERSION(9, 0, 1):
2373 	case IP_VERSION(9, 2, 1):
2374 	case IP_VERSION(9, 4, 0):
2375 	case IP_VERSION(9, 4, 1):
2376 	case IP_VERSION(9, 4, 2):
2377 	case IP_VERSION(9, 4, 3):
2378 		adev->family = AMDGPU_FAMILY_AI;
2379 		break;
2380 	case IP_VERSION(9, 1, 0):
2381 	case IP_VERSION(9, 2, 2):
2382 	case IP_VERSION(9, 3, 0):
2383 		adev->family = AMDGPU_FAMILY_RV;
2384 		break;
2385 	case IP_VERSION(10, 1, 10):
2386 	case IP_VERSION(10, 1, 1):
2387 	case IP_VERSION(10, 1, 2):
2388 	case IP_VERSION(10, 1, 3):
2389 	case IP_VERSION(10, 1, 4):
2390 	case IP_VERSION(10, 3, 0):
2391 	case IP_VERSION(10, 3, 2):
2392 	case IP_VERSION(10, 3, 4):
2393 	case IP_VERSION(10, 3, 5):
2394 		adev->family = AMDGPU_FAMILY_NV;
2395 		break;
2396 	case IP_VERSION(10, 3, 1):
2397 		adev->family = AMDGPU_FAMILY_VGH;
2398 		adev->apu_flags |= AMD_APU_IS_VANGOGH;
2399 		break;
2400 	case IP_VERSION(10, 3, 3):
2401 		adev->family = AMDGPU_FAMILY_YC;
2402 		break;
2403 	case IP_VERSION(10, 3, 6):
2404 		adev->family = AMDGPU_FAMILY_GC_10_3_6;
2405 		break;
2406 	case IP_VERSION(10, 3, 7):
2407 		adev->family = AMDGPU_FAMILY_GC_10_3_7;
2408 		break;
2409 	case IP_VERSION(11, 0, 0):
2410 	case IP_VERSION(11, 0, 2):
2411 	case IP_VERSION(11, 0, 3):
2412 		adev->family = AMDGPU_FAMILY_GC_11_0_0;
2413 		break;
2414 	case IP_VERSION(11, 0, 1):
2415 	case IP_VERSION(11, 0, 4):
2416 		adev->family = AMDGPU_FAMILY_GC_11_0_1;
2417 		break;
2418 	case IP_VERSION(11, 5, 0):
2419 		adev->family = AMDGPU_FAMILY_GC_11_5_0;
2420 		break;
2421 	default:
2422 		return -EINVAL;
2423 	}
2424 
2425 	switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
2426 	case IP_VERSION(9, 1, 0):
2427 	case IP_VERSION(9, 2, 2):
2428 	case IP_VERSION(9, 3, 0):
2429 	case IP_VERSION(10, 1, 3):
2430 	case IP_VERSION(10, 1, 4):
2431 	case IP_VERSION(10, 3, 1):
2432 	case IP_VERSION(10, 3, 3):
2433 	case IP_VERSION(10, 3, 6):
2434 	case IP_VERSION(10, 3, 7):
2435 	case IP_VERSION(11, 0, 1):
2436 	case IP_VERSION(11, 0, 4):
2437 	case IP_VERSION(11, 5, 0):
2438 		adev->flags |= AMD_IS_APU;
2439 		break;
2440 	default:
2441 		break;
2442 	}
2443 
2444 	if (amdgpu_ip_version(adev, XGMI_HWIP, 0) == IP_VERSION(4, 8, 0))
2445 		adev->gmc.xgmi.supported = true;
2446 
2447 	/* set NBIO version */
2448 	switch (amdgpu_ip_version(adev, NBIO_HWIP, 0)) {
2449 	case IP_VERSION(6, 1, 0):
2450 	case IP_VERSION(6, 2, 0):
2451 		adev->nbio.funcs = &nbio_v6_1_funcs;
2452 		adev->nbio.hdp_flush_reg = &nbio_v6_1_hdp_flush_reg;
2453 		break;
2454 	case IP_VERSION(7, 0, 0):
2455 	case IP_VERSION(7, 0, 1):
2456 	case IP_VERSION(2, 5, 0):
2457 		adev->nbio.funcs = &nbio_v7_0_funcs;
2458 		adev->nbio.hdp_flush_reg = &nbio_v7_0_hdp_flush_reg;
2459 		break;
2460 	case IP_VERSION(7, 4, 0):
2461 	case IP_VERSION(7, 4, 1):
2462 	case IP_VERSION(7, 4, 4):
2463 		adev->nbio.funcs = &nbio_v7_4_funcs;
2464 		adev->nbio.hdp_flush_reg = &nbio_v7_4_hdp_flush_reg;
2465 		break;
2466 	case IP_VERSION(7, 9, 0):
2467 		adev->nbio.funcs = &nbio_v7_9_funcs;
2468 		adev->nbio.hdp_flush_reg = &nbio_v7_9_hdp_flush_reg;
2469 		break;
2470 	case IP_VERSION(7, 11, 0):
2471 		adev->nbio.funcs = &nbio_v7_11_funcs;
2472 		adev->nbio.hdp_flush_reg = &nbio_v7_11_hdp_flush_reg;
2473 		break;
2474 	case IP_VERSION(7, 2, 0):
2475 	case IP_VERSION(7, 2, 1):
2476 	case IP_VERSION(7, 3, 0):
2477 	case IP_VERSION(7, 5, 0):
2478 	case IP_VERSION(7, 5, 1):
2479 		adev->nbio.funcs = &nbio_v7_2_funcs;
2480 		adev->nbio.hdp_flush_reg = &nbio_v7_2_hdp_flush_reg;
2481 		break;
2482 	case IP_VERSION(2, 1, 1):
2483 	case IP_VERSION(2, 3, 0):
2484 	case IP_VERSION(2, 3, 1):
2485 	case IP_VERSION(2, 3, 2):
2486 	case IP_VERSION(3, 3, 0):
2487 	case IP_VERSION(3, 3, 1):
2488 	case IP_VERSION(3, 3, 2):
2489 	case IP_VERSION(3, 3, 3):
2490 		adev->nbio.funcs = &nbio_v2_3_funcs;
2491 		adev->nbio.hdp_flush_reg = &nbio_v2_3_hdp_flush_reg;
2492 		break;
2493 	case IP_VERSION(4, 3, 0):
2494 	case IP_VERSION(4, 3, 1):
2495 		if (amdgpu_sriov_vf(adev))
2496 			adev->nbio.funcs = &nbio_v4_3_sriov_funcs;
2497 		else
2498 			adev->nbio.funcs = &nbio_v4_3_funcs;
2499 		adev->nbio.hdp_flush_reg = &nbio_v4_3_hdp_flush_reg;
2500 		break;
2501 	case IP_VERSION(7, 7, 0):
2502 	case IP_VERSION(7, 7, 1):
2503 		adev->nbio.funcs = &nbio_v7_7_funcs;
2504 		adev->nbio.hdp_flush_reg = &nbio_v7_7_hdp_flush_reg;
2505 		break;
2506 	default:
2507 		break;
2508 	}
2509 
2510 	switch (amdgpu_ip_version(adev, HDP_HWIP, 0)) {
2511 	case IP_VERSION(4, 0, 0):
2512 	case IP_VERSION(4, 0, 1):
2513 	case IP_VERSION(4, 1, 0):
2514 	case IP_VERSION(4, 1, 1):
2515 	case IP_VERSION(4, 1, 2):
2516 	case IP_VERSION(4, 2, 0):
2517 	case IP_VERSION(4, 2, 1):
2518 	case IP_VERSION(4, 4, 0):
2519 	case IP_VERSION(4, 4, 2):
2520 		adev->hdp.funcs = &hdp_v4_0_funcs;
2521 		break;
2522 	case IP_VERSION(5, 0, 0):
2523 	case IP_VERSION(5, 0, 1):
2524 	case IP_VERSION(5, 0, 2):
2525 	case IP_VERSION(5, 0, 3):
2526 	case IP_VERSION(5, 0, 4):
2527 	case IP_VERSION(5, 2, 0):
2528 		adev->hdp.funcs = &hdp_v5_0_funcs;
2529 		break;
2530 	case IP_VERSION(5, 2, 1):
2531 		adev->hdp.funcs = &hdp_v5_2_funcs;
2532 		break;
2533 	case IP_VERSION(6, 0, 0):
2534 	case IP_VERSION(6, 0, 1):
2535 	case IP_VERSION(6, 1, 0):
2536 		adev->hdp.funcs = &hdp_v6_0_funcs;
2537 		break;
2538 	default:
2539 		break;
2540 	}
2541 
2542 	switch (amdgpu_ip_version(adev, DF_HWIP, 0)) {
2543 	case IP_VERSION(3, 6, 0):
2544 	case IP_VERSION(3, 6, 1):
2545 	case IP_VERSION(3, 6, 2):
2546 		adev->df.funcs = &df_v3_6_funcs;
2547 		break;
2548 	case IP_VERSION(2, 1, 0):
2549 	case IP_VERSION(2, 1, 1):
2550 	case IP_VERSION(2, 5, 0):
2551 	case IP_VERSION(3, 5, 1):
2552 	case IP_VERSION(3, 5, 2):
2553 		adev->df.funcs = &df_v1_7_funcs;
2554 		break;
2555 	case IP_VERSION(4, 3, 0):
2556 		adev->df.funcs = &df_v4_3_funcs;
2557 		break;
2558 	default:
2559 		break;
2560 	}
2561 
2562 	switch (amdgpu_ip_version(adev, SMUIO_HWIP, 0)) {
2563 	case IP_VERSION(9, 0, 0):
2564 	case IP_VERSION(9, 0, 1):
2565 	case IP_VERSION(10, 0, 0):
2566 	case IP_VERSION(10, 0, 1):
2567 	case IP_VERSION(10, 0, 2):
2568 		adev->smuio.funcs = &smuio_v9_0_funcs;
2569 		break;
2570 	case IP_VERSION(11, 0, 0):
2571 	case IP_VERSION(11, 0, 2):
2572 	case IP_VERSION(11, 0, 3):
2573 	case IP_VERSION(11, 0, 4):
2574 	case IP_VERSION(11, 0, 7):
2575 	case IP_VERSION(11, 0, 8):
2576 		adev->smuio.funcs = &smuio_v11_0_funcs;
2577 		break;
2578 	case IP_VERSION(11, 0, 6):
2579 	case IP_VERSION(11, 0, 10):
2580 	case IP_VERSION(11, 0, 11):
2581 	case IP_VERSION(11, 5, 0):
2582 	case IP_VERSION(13, 0, 1):
2583 	case IP_VERSION(13, 0, 9):
2584 	case IP_VERSION(13, 0, 10):
2585 		adev->smuio.funcs = &smuio_v11_0_6_funcs;
2586 		break;
2587 	case IP_VERSION(13, 0, 2):
2588 		adev->smuio.funcs = &smuio_v13_0_funcs;
2589 		break;
2590 	case IP_VERSION(13, 0, 3):
2591 		adev->smuio.funcs = &smuio_v13_0_3_funcs;
2592 		if (adev->smuio.funcs->get_pkg_type(adev) == AMDGPU_PKG_TYPE_APU) {
2593 			adev->flags |= AMD_IS_APU;
2594 		}
2595 		break;
2596 	case IP_VERSION(13, 0, 6):
2597 	case IP_VERSION(13, 0, 8):
2598 	case IP_VERSION(14, 0, 0):
2599 		adev->smuio.funcs = &smuio_v13_0_6_funcs;
2600 		break;
2601 	default:
2602 		break;
2603 	}
2604 
2605 	switch (amdgpu_ip_version(adev, LSDMA_HWIP, 0)) {
2606 	case IP_VERSION(6, 0, 0):
2607 	case IP_VERSION(6, 0, 1):
2608 	case IP_VERSION(6, 0, 2):
2609 	case IP_VERSION(6, 0, 3):
2610 		adev->lsdma.funcs = &lsdma_v6_0_funcs;
2611 		break;
2612 	default:
2613 		break;
2614 	}
2615 
2616 	r = amdgpu_discovery_set_common_ip_blocks(adev);
2617 	if (r)
2618 		return r;
2619 
2620 	r = amdgpu_discovery_set_gmc_ip_blocks(adev);
2621 	if (r)
2622 		return r;
2623 
2624 	/* For SR-IOV, PSP needs to be initialized before IH */
2625 	if (amdgpu_sriov_vf(adev)) {
2626 		r = amdgpu_discovery_set_psp_ip_blocks(adev);
2627 		if (r)
2628 			return r;
2629 		r = amdgpu_discovery_set_ih_ip_blocks(adev);
2630 		if (r)
2631 			return r;
2632 	} else {
2633 		r = amdgpu_discovery_set_ih_ip_blocks(adev);
2634 		if (r)
2635 			return r;
2636 
2637 		if (likely(adev->firmware.load_type == AMDGPU_FW_LOAD_PSP)) {
2638 			r = amdgpu_discovery_set_psp_ip_blocks(adev);
2639 			if (r)
2640 				return r;
2641 		}
2642 	}
2643 
2644 	if (likely(adev->firmware.load_type == AMDGPU_FW_LOAD_PSP)) {
2645 		r = amdgpu_discovery_set_smu_ip_blocks(adev);
2646 		if (r)
2647 			return r;
2648 	}
2649 
2650 	r = amdgpu_discovery_set_display_ip_blocks(adev);
2651 	if (r)
2652 		return r;
2653 
2654 	r = amdgpu_discovery_set_gc_ip_blocks(adev);
2655 	if (r)
2656 		return r;
2657 
2658 	r = amdgpu_discovery_set_sdma_ip_blocks(adev);
2659 	if (r)
2660 		return r;
2661 
2662 	if ((adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT &&
2663 	     !amdgpu_sriov_vf(adev)) ||
2664 	    (adev->firmware.load_type == AMDGPU_FW_LOAD_RLC_BACKDOOR_AUTO && amdgpu_dpm == 1)) {
2665 		r = amdgpu_discovery_set_smu_ip_blocks(adev);
2666 		if (r)
2667 			return r;
2668 	}
2669 
2670 	r = amdgpu_discovery_set_mm_ip_blocks(adev);
2671 	if (r)
2672 		return r;
2673 
2674 	r = amdgpu_discovery_set_mes_ip_blocks(adev);
2675 	if (r)
2676 		return r;
2677 
2678 	r = amdgpu_discovery_set_vpe_ip_blocks(adev);
2679 	if (r)
2680 		return r;
2681 
2682 	r = amdgpu_discovery_set_umsch_mm_ip_blocks(adev);
2683 	if (r)
2684 		return r;
2685 
2686 	return 0;
2687 }
2688 
2689