xref: /linux/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c (revision 2c1ed907520c50326b8f604907a8478b27881a2e)
1 /*
2  * Copyright 2018-2024 Advanced Micro Devices, Inc. All rights reserved.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  */
23 
24 #include <linux/firmware.h>
25 
26 #include "amdgpu.h"
27 #include "amdgpu_discovery.h"
28 #include "soc15_hw_ip.h"
29 #include "discovery.h"
30 #include "amdgpu_ras.h"
31 
32 #include "soc15.h"
33 #include "gfx_v9_0.h"
34 #include "gfx_v9_4_3.h"
35 #include "gmc_v9_0.h"
36 #include "df_v1_7.h"
37 #include "df_v3_6.h"
38 #include "df_v4_3.h"
39 #include "df_v4_6_2.h"
40 #include "df_v4_15.h"
41 #include "nbio_v6_1.h"
42 #include "nbio_v7_0.h"
43 #include "nbio_v7_4.h"
44 #include "nbio_v7_9.h"
45 #include "nbio_v7_11.h"
46 #include "hdp_v4_0.h"
47 #include "vega10_ih.h"
48 #include "vega20_ih.h"
49 #include "sdma_v4_0.h"
50 #include "sdma_v4_4_2.h"
51 #include "uvd_v7_0.h"
52 #include "vce_v4_0.h"
53 #include "vcn_v1_0.h"
54 #include "vcn_v2_5.h"
55 #include "jpeg_v2_5.h"
56 #include "smuio_v9_0.h"
57 #include "gmc_v10_0.h"
58 #include "gmc_v11_0.h"
59 #include "gmc_v12_0.h"
60 #include "gfxhub_v2_0.h"
61 #include "mmhub_v2_0.h"
62 #include "nbio_v2_3.h"
63 #include "nbio_v4_3.h"
64 #include "nbio_v7_2.h"
65 #include "nbio_v7_7.h"
66 #include "nbif_v6_3_1.h"
67 #include "hdp_v5_0.h"
68 #include "hdp_v5_2.h"
69 #include "hdp_v6_0.h"
70 #include "hdp_v7_0.h"
71 #include "nv.h"
72 #include "soc21.h"
73 #include "soc24.h"
74 #include "navi10_ih.h"
75 #include "ih_v6_0.h"
76 #include "ih_v6_1.h"
77 #include "ih_v7_0.h"
78 #include "gfx_v10_0.h"
79 #include "gfx_v11_0.h"
80 #include "gfx_v12_0.h"
81 #include "sdma_v5_0.h"
82 #include "sdma_v5_2.h"
83 #include "sdma_v6_0.h"
84 #include "sdma_v7_0.h"
85 #include "lsdma_v6_0.h"
86 #include "lsdma_v7_0.h"
87 #include "vcn_v2_0.h"
88 #include "jpeg_v2_0.h"
89 #include "vcn_v3_0.h"
90 #include "jpeg_v3_0.h"
91 #include "vcn_v4_0.h"
92 #include "jpeg_v4_0.h"
93 #include "vcn_v4_0_3.h"
94 #include "jpeg_v4_0_3.h"
95 #include "vcn_v4_0_5.h"
96 #include "jpeg_v4_0_5.h"
97 #include "amdgpu_vkms.h"
98 #include "mes_v11_0.h"
99 #include "mes_v12_0.h"
100 #include "smuio_v11_0.h"
101 #include "smuio_v11_0_6.h"
102 #include "smuio_v13_0.h"
103 #include "smuio_v13_0_3.h"
104 #include "smuio_v13_0_6.h"
105 #include "smuio_v14_0_2.h"
106 #include "vcn_v5_0_0.h"
107 #include "vcn_v5_0_1.h"
108 #include "jpeg_v5_0_0.h"
109 #include "jpeg_v5_0_1.h"
110 
111 #include "amdgpu_vpe.h"
112 #if defined(CONFIG_DRM_AMD_ISP)
113 #include "amdgpu_isp.h"
114 #endif
115 
116 #define FIRMWARE_IP_DISCOVERY "amdgpu/ip_discovery.bin"
117 MODULE_FIRMWARE(FIRMWARE_IP_DISCOVERY);
118 
119 #define mmIP_DISCOVERY_VERSION  0x16A00
120 #define mmRCC_CONFIG_MEMSIZE	0xde3
121 #define mmMP0_SMN_C2PMSG_33	0x16061
122 #define mmMM_INDEX		0x0
123 #define mmMM_INDEX_HI		0x6
124 #define mmMM_DATA		0x1
125 
126 static const char *hw_id_names[HW_ID_MAX] = {
127 	[MP1_HWID]		= "MP1",
128 	[MP2_HWID]		= "MP2",
129 	[THM_HWID]		= "THM",
130 	[SMUIO_HWID]		= "SMUIO",
131 	[FUSE_HWID]		= "FUSE",
132 	[CLKA_HWID]		= "CLKA",
133 	[PWR_HWID]		= "PWR",
134 	[GC_HWID]		= "GC",
135 	[UVD_HWID]		= "UVD",
136 	[AUDIO_AZ_HWID]		= "AUDIO_AZ",
137 	[ACP_HWID]		= "ACP",
138 	[DCI_HWID]		= "DCI",
139 	[DMU_HWID]		= "DMU",
140 	[DCO_HWID]		= "DCO",
141 	[DIO_HWID]		= "DIO",
142 	[XDMA_HWID]		= "XDMA",
143 	[DCEAZ_HWID]		= "DCEAZ",
144 	[DAZ_HWID]		= "DAZ",
145 	[SDPMUX_HWID]		= "SDPMUX",
146 	[NTB_HWID]		= "NTB",
147 	[IOHC_HWID]		= "IOHC",
148 	[L2IMU_HWID]		= "L2IMU",
149 	[VCE_HWID]		= "VCE",
150 	[MMHUB_HWID]		= "MMHUB",
151 	[ATHUB_HWID]		= "ATHUB",
152 	[DBGU_NBIO_HWID]	= "DBGU_NBIO",
153 	[DFX_HWID]		= "DFX",
154 	[DBGU0_HWID]		= "DBGU0",
155 	[DBGU1_HWID]		= "DBGU1",
156 	[OSSSYS_HWID]		= "OSSSYS",
157 	[HDP_HWID]		= "HDP",
158 	[SDMA0_HWID]		= "SDMA0",
159 	[SDMA1_HWID]		= "SDMA1",
160 	[SDMA2_HWID]		= "SDMA2",
161 	[SDMA3_HWID]		= "SDMA3",
162 	[LSDMA_HWID]		= "LSDMA",
163 	[ISP_HWID]		= "ISP",
164 	[DBGU_IO_HWID]		= "DBGU_IO",
165 	[DF_HWID]		= "DF",
166 	[CLKB_HWID]		= "CLKB",
167 	[FCH_HWID]		= "FCH",
168 	[DFX_DAP_HWID]		= "DFX_DAP",
169 	[L1IMU_PCIE_HWID]	= "L1IMU_PCIE",
170 	[L1IMU_NBIF_HWID]	= "L1IMU_NBIF",
171 	[L1IMU_IOAGR_HWID]	= "L1IMU_IOAGR",
172 	[L1IMU3_HWID]		= "L1IMU3",
173 	[L1IMU4_HWID]		= "L1IMU4",
174 	[L1IMU5_HWID]		= "L1IMU5",
175 	[L1IMU6_HWID]		= "L1IMU6",
176 	[L1IMU7_HWID]		= "L1IMU7",
177 	[L1IMU8_HWID]		= "L1IMU8",
178 	[L1IMU9_HWID]		= "L1IMU9",
179 	[L1IMU10_HWID]		= "L1IMU10",
180 	[L1IMU11_HWID]		= "L1IMU11",
181 	[L1IMU12_HWID]		= "L1IMU12",
182 	[L1IMU13_HWID]		= "L1IMU13",
183 	[L1IMU14_HWID]		= "L1IMU14",
184 	[L1IMU15_HWID]		= "L1IMU15",
185 	[WAFLC_HWID]		= "WAFLC",
186 	[FCH_USB_PD_HWID]	= "FCH_USB_PD",
187 	[PCIE_HWID]		= "PCIE",
188 	[PCS_HWID]		= "PCS",
189 	[DDCL_HWID]		= "DDCL",
190 	[SST_HWID]		= "SST",
191 	[IOAGR_HWID]		= "IOAGR",
192 	[NBIF_HWID]		= "NBIF",
193 	[IOAPIC_HWID]		= "IOAPIC",
194 	[SYSTEMHUB_HWID]	= "SYSTEMHUB",
195 	[NTBCCP_HWID]		= "NTBCCP",
196 	[UMC_HWID]		= "UMC",
197 	[SATA_HWID]		= "SATA",
198 	[USB_HWID]		= "USB",
199 	[CCXSEC_HWID]		= "CCXSEC",
200 	[XGMI_HWID]		= "XGMI",
201 	[XGBE_HWID]		= "XGBE",
202 	[MP0_HWID]		= "MP0",
203 	[VPE_HWID]		= "VPE",
204 };
205 
206 static int hw_id_map[MAX_HWIP] = {
207 	[GC_HWIP]	= GC_HWID,
208 	[HDP_HWIP]	= HDP_HWID,
209 	[SDMA0_HWIP]	= SDMA0_HWID,
210 	[SDMA1_HWIP]	= SDMA1_HWID,
211 	[SDMA2_HWIP]    = SDMA2_HWID,
212 	[SDMA3_HWIP]    = SDMA3_HWID,
213 	[LSDMA_HWIP]    = LSDMA_HWID,
214 	[MMHUB_HWIP]	= MMHUB_HWID,
215 	[ATHUB_HWIP]	= ATHUB_HWID,
216 	[NBIO_HWIP]	= NBIF_HWID,
217 	[MP0_HWIP]	= MP0_HWID,
218 	[MP1_HWIP]	= MP1_HWID,
219 	[UVD_HWIP]	= UVD_HWID,
220 	[VCE_HWIP]	= VCE_HWID,
221 	[DF_HWIP]	= DF_HWID,
222 	[DCE_HWIP]	= DMU_HWID,
223 	[OSSSYS_HWIP]	= OSSSYS_HWID,
224 	[SMUIO_HWIP]	= SMUIO_HWID,
225 	[PWR_HWIP]	= PWR_HWID,
226 	[NBIF_HWIP]	= NBIF_HWID,
227 	[THM_HWIP]	= THM_HWID,
228 	[CLK_HWIP]	= CLKA_HWID,
229 	[UMC_HWIP]	= UMC_HWID,
230 	[XGMI_HWIP]	= XGMI_HWID,
231 	[DCI_HWIP]	= DCI_HWID,
232 	[PCIE_HWIP]	= PCIE_HWID,
233 	[VPE_HWIP]	= VPE_HWID,
234 	[ISP_HWIP]	= ISP_HWID,
235 };
236 
amdgpu_discovery_read_binary_from_sysmem(struct amdgpu_device * adev,uint8_t * binary)237 static int amdgpu_discovery_read_binary_from_sysmem(struct amdgpu_device *adev, uint8_t *binary)
238 {
239 	u64 tmr_offset, tmr_size, pos;
240 	void *discv_regn;
241 	int ret;
242 
243 	ret = amdgpu_acpi_get_tmr_info(adev, &tmr_offset, &tmr_size);
244 	if (ret)
245 		return ret;
246 
247 	pos = tmr_offset + tmr_size - DISCOVERY_TMR_OFFSET;
248 
249 	/* This region is read-only and reserved from system use */
250 	discv_regn = memremap(pos, adev->mman.discovery_tmr_size, MEMREMAP_WC);
251 	if (discv_regn) {
252 		memcpy(binary, discv_regn, adev->mman.discovery_tmr_size);
253 		memunmap(discv_regn);
254 		return 0;
255 	}
256 
257 	return -ENOENT;
258 }
259 
260 #define IP_DISCOVERY_V2		2
261 #define IP_DISCOVERY_V4		4
262 
amdgpu_discovery_read_binary_from_mem(struct amdgpu_device * adev,uint8_t * binary)263 static int amdgpu_discovery_read_binary_from_mem(struct amdgpu_device *adev,
264 						 uint8_t *binary)
265 {
266 	uint64_t vram_size;
267 	u32 msg;
268 	int i, ret = 0;
269 
270 	if (!amdgpu_sriov_vf(adev)) {
271 		/* It can take up to a second for IFWI init to complete on some dGPUs,
272 		 * but generally it should be in the 60-100ms range.  Normally this starts
273 		 * as soon as the device gets power so by the time the OS loads this has long
274 		 * completed.  However, when a card is hotplugged via e.g., USB4, we need to
275 		 * wait for this to complete.  Once the C2PMSG is updated, we can
276 		 * continue.
277 		 */
278 
279 		for (i = 0; i < 1000; i++) {
280 			msg = RREG32(mmMP0_SMN_C2PMSG_33);
281 			if (msg & 0x80000000)
282 				break;
283 			msleep(1);
284 		}
285 	}
286 
287 	vram_size = (uint64_t)RREG32(mmRCC_CONFIG_MEMSIZE) << 20;
288 
289 	if (vram_size) {
290 		uint64_t pos = vram_size - DISCOVERY_TMR_OFFSET;
291 		amdgpu_device_vram_access(adev, pos, (uint32_t *)binary,
292 					  adev->mman.discovery_tmr_size, false);
293 	} else {
294 		ret = amdgpu_discovery_read_binary_from_sysmem(adev, binary);
295 	}
296 
297 	return ret;
298 }
299 
amdgpu_discovery_read_binary_from_file(struct amdgpu_device * adev,uint8_t * binary)300 static int amdgpu_discovery_read_binary_from_file(struct amdgpu_device *adev, uint8_t *binary)
301 {
302 	const struct firmware *fw;
303 	const char *fw_name;
304 	int r;
305 
306 	switch (amdgpu_discovery) {
307 	case 2:
308 		fw_name = FIRMWARE_IP_DISCOVERY;
309 		break;
310 	default:
311 		dev_warn(adev->dev, "amdgpu_discovery is not set properly\n");
312 		return -EINVAL;
313 	}
314 
315 	r = request_firmware(&fw, fw_name, adev->dev);
316 	if (r) {
317 		dev_err(adev->dev, "can't load firmware \"%s\"\n",
318 			fw_name);
319 		return r;
320 	}
321 
322 	memcpy((u8 *)binary, (u8 *)fw->data, fw->size);
323 	release_firmware(fw);
324 
325 	return 0;
326 }
327 
amdgpu_discovery_calculate_checksum(uint8_t * data,uint32_t size)328 static uint16_t amdgpu_discovery_calculate_checksum(uint8_t *data, uint32_t size)
329 {
330 	uint16_t checksum = 0;
331 	int i;
332 
333 	for (i = 0; i < size; i++)
334 		checksum += data[i];
335 
336 	return checksum;
337 }
338 
amdgpu_discovery_verify_checksum(uint8_t * data,uint32_t size,uint16_t expected)339 static inline bool amdgpu_discovery_verify_checksum(uint8_t *data, uint32_t size,
340 						    uint16_t expected)
341 {
342 	return !!(amdgpu_discovery_calculate_checksum(data, size) == expected);
343 }
344 
amdgpu_discovery_verify_binary_signature(uint8_t * binary)345 static inline bool amdgpu_discovery_verify_binary_signature(uint8_t *binary)
346 {
347 	struct binary_header *bhdr;
348 	bhdr = (struct binary_header *)binary;
349 
350 	return (le32_to_cpu(bhdr->binary_signature) == BINARY_SIGNATURE);
351 }
352 
amdgpu_discovery_harvest_config_quirk(struct amdgpu_device * adev)353 static void amdgpu_discovery_harvest_config_quirk(struct amdgpu_device *adev)
354 {
355 	/*
356 	 * So far, apply this quirk only on those Navy Flounder boards which
357 	 * have a bad harvest table of VCN config.
358 	 */
359 	if ((amdgpu_ip_version(adev, UVD_HWIP, 1) == IP_VERSION(3, 0, 1)) &&
360 	    (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(10, 3, 2))) {
361 		switch (adev->pdev->revision) {
362 		case 0xC1:
363 		case 0xC2:
364 		case 0xC3:
365 		case 0xC5:
366 		case 0xC7:
367 		case 0xCF:
368 		case 0xDF:
369 			adev->vcn.harvest_config |= AMDGPU_VCN_HARVEST_VCN1;
370 			adev->vcn.inst_mask &= ~AMDGPU_VCN_HARVEST_VCN1;
371 			break;
372 		default:
373 			break;
374 		}
375 	}
376 }
377 
amdgpu_discovery_verify_npsinfo(struct amdgpu_device * adev,struct binary_header * bhdr)378 static int amdgpu_discovery_verify_npsinfo(struct amdgpu_device *adev,
379 					   struct binary_header *bhdr)
380 {
381 	struct table_info *info;
382 	uint16_t checksum;
383 	uint16_t offset;
384 
385 	info = &bhdr->table_list[NPS_INFO];
386 	offset = le16_to_cpu(info->offset);
387 	checksum = le16_to_cpu(info->checksum);
388 
389 	struct nps_info_header *nhdr =
390 		(struct nps_info_header *)(adev->mman.discovery_bin + offset);
391 
392 	if (le32_to_cpu(nhdr->table_id) != NPS_INFO_TABLE_ID) {
393 		dev_dbg(adev->dev, "invalid ip discovery nps info table id\n");
394 		return -EINVAL;
395 	}
396 
397 	if (!amdgpu_discovery_verify_checksum(adev->mman.discovery_bin + offset,
398 					      le32_to_cpu(nhdr->size_bytes),
399 					      checksum)) {
400 		dev_dbg(adev->dev, "invalid nps info data table checksum\n");
401 		return -EINVAL;
402 	}
403 
404 	return 0;
405 }
406 
amdgpu_discovery_init(struct amdgpu_device * adev)407 static int amdgpu_discovery_init(struct amdgpu_device *adev)
408 {
409 	struct table_info *info;
410 	struct binary_header *bhdr;
411 	uint16_t offset;
412 	uint16_t size;
413 	uint16_t checksum;
414 	int r;
415 
416 	adev->mman.discovery_tmr_size = DISCOVERY_TMR_SIZE;
417 	adev->mman.discovery_bin = kzalloc(adev->mman.discovery_tmr_size, GFP_KERNEL);
418 	if (!adev->mman.discovery_bin)
419 		return -ENOMEM;
420 
421 	/* Read from file if it is the preferred option */
422 	if (amdgpu_discovery == 2) {
423 		dev_info(adev->dev, "use ip discovery information from file");
424 		r = amdgpu_discovery_read_binary_from_file(adev, adev->mman.discovery_bin);
425 
426 		if (r) {
427 			dev_err(adev->dev, "failed to read ip discovery binary from file\n");
428 			r = -EINVAL;
429 			goto out;
430 		}
431 
432 	} else {
433 		r = amdgpu_discovery_read_binary_from_mem(
434 			adev, adev->mman.discovery_bin);
435 		if (r)
436 			goto out;
437 	}
438 
439 	/* check the ip discovery binary signature */
440 	if (!amdgpu_discovery_verify_binary_signature(adev->mman.discovery_bin)) {
441 		dev_err(adev->dev,
442 			"get invalid ip discovery binary signature\n");
443 		r = -EINVAL;
444 		goto out;
445 	}
446 
447 	bhdr = (struct binary_header *)adev->mman.discovery_bin;
448 
449 	offset = offsetof(struct binary_header, binary_checksum) +
450 		sizeof(bhdr->binary_checksum);
451 	size = le16_to_cpu(bhdr->binary_size) - offset;
452 	checksum = le16_to_cpu(bhdr->binary_checksum);
453 
454 	if (!amdgpu_discovery_verify_checksum(adev->mman.discovery_bin + offset,
455 					      size, checksum)) {
456 		dev_err(adev->dev, "invalid ip discovery binary checksum\n");
457 		r = -EINVAL;
458 		goto out;
459 	}
460 
461 	info = &bhdr->table_list[IP_DISCOVERY];
462 	offset = le16_to_cpu(info->offset);
463 	checksum = le16_to_cpu(info->checksum);
464 
465 	if (offset) {
466 		struct ip_discovery_header *ihdr =
467 			(struct ip_discovery_header *)(adev->mman.discovery_bin + offset);
468 		if (le32_to_cpu(ihdr->signature) != DISCOVERY_TABLE_SIGNATURE) {
469 			dev_err(adev->dev, "invalid ip discovery data table signature\n");
470 			r = -EINVAL;
471 			goto out;
472 		}
473 
474 		if (!amdgpu_discovery_verify_checksum(adev->mman.discovery_bin + offset,
475 						      le16_to_cpu(ihdr->size), checksum)) {
476 			dev_err(adev->dev, "invalid ip discovery data table checksum\n");
477 			r = -EINVAL;
478 			goto out;
479 		}
480 	}
481 
482 	info = &bhdr->table_list[GC];
483 	offset = le16_to_cpu(info->offset);
484 	checksum = le16_to_cpu(info->checksum);
485 
486 	if (offset) {
487 		struct gpu_info_header *ghdr =
488 			(struct gpu_info_header *)(adev->mman.discovery_bin + offset);
489 
490 		if (le32_to_cpu(ghdr->table_id) != GC_TABLE_ID) {
491 			dev_err(adev->dev, "invalid ip discovery gc table id\n");
492 			r = -EINVAL;
493 			goto out;
494 		}
495 
496 		if (!amdgpu_discovery_verify_checksum(adev->mman.discovery_bin + offset,
497 						      le32_to_cpu(ghdr->size), checksum)) {
498 			dev_err(adev->dev, "invalid gc data table checksum\n");
499 			r = -EINVAL;
500 			goto out;
501 		}
502 	}
503 
504 	info = &bhdr->table_list[HARVEST_INFO];
505 	offset = le16_to_cpu(info->offset);
506 	checksum = le16_to_cpu(info->checksum);
507 
508 	if (offset) {
509 		struct harvest_info_header *hhdr =
510 			(struct harvest_info_header *)(adev->mman.discovery_bin + offset);
511 
512 		if (le32_to_cpu(hhdr->signature) != HARVEST_TABLE_SIGNATURE) {
513 			dev_err(adev->dev, "invalid ip discovery harvest table signature\n");
514 			r = -EINVAL;
515 			goto out;
516 		}
517 
518 		if (!amdgpu_discovery_verify_checksum(adev->mman.discovery_bin + offset,
519 						      sizeof(struct harvest_table), checksum)) {
520 			dev_err(adev->dev, "invalid harvest data table checksum\n");
521 			r = -EINVAL;
522 			goto out;
523 		}
524 	}
525 
526 	info = &bhdr->table_list[VCN_INFO];
527 	offset = le16_to_cpu(info->offset);
528 	checksum = le16_to_cpu(info->checksum);
529 
530 	if (offset) {
531 		struct vcn_info_header *vhdr =
532 			(struct vcn_info_header *)(adev->mman.discovery_bin + offset);
533 
534 		if (le32_to_cpu(vhdr->table_id) != VCN_INFO_TABLE_ID) {
535 			dev_err(adev->dev, "invalid ip discovery vcn table id\n");
536 			r = -EINVAL;
537 			goto out;
538 		}
539 
540 		if (!amdgpu_discovery_verify_checksum(adev->mman.discovery_bin + offset,
541 						      le32_to_cpu(vhdr->size_bytes), checksum)) {
542 			dev_err(adev->dev, "invalid vcn data table checksum\n");
543 			r = -EINVAL;
544 			goto out;
545 		}
546 	}
547 
548 	info = &bhdr->table_list[MALL_INFO];
549 	offset = le16_to_cpu(info->offset);
550 	checksum = le16_to_cpu(info->checksum);
551 
552 	if (0 && offset) {
553 		struct mall_info_header *mhdr =
554 			(struct mall_info_header *)(adev->mman.discovery_bin + offset);
555 
556 		if (le32_to_cpu(mhdr->table_id) != MALL_INFO_TABLE_ID) {
557 			dev_err(adev->dev, "invalid ip discovery mall table id\n");
558 			r = -EINVAL;
559 			goto out;
560 		}
561 
562 		if (!amdgpu_discovery_verify_checksum(adev->mman.discovery_bin + offset,
563 						      le32_to_cpu(mhdr->size_bytes), checksum)) {
564 			dev_err(adev->dev, "invalid mall data table checksum\n");
565 			r = -EINVAL;
566 			goto out;
567 		}
568 	}
569 
570 	return 0;
571 
572 out:
573 	kfree(adev->mman.discovery_bin);
574 	adev->mman.discovery_bin = NULL;
575 	if ((amdgpu_discovery != 2) &&
576 	    (RREG32(mmIP_DISCOVERY_VERSION) == 4))
577 		amdgpu_ras_query_boot_status(adev, 4);
578 	return r;
579 }
580 
581 static void amdgpu_discovery_sysfs_fini(struct amdgpu_device *adev);
582 
amdgpu_discovery_fini(struct amdgpu_device * adev)583 void amdgpu_discovery_fini(struct amdgpu_device *adev)
584 {
585 	amdgpu_discovery_sysfs_fini(adev);
586 	kfree(adev->mman.discovery_bin);
587 	adev->mman.discovery_bin = NULL;
588 }
589 
amdgpu_discovery_validate_ip(const struct ip_v4 * ip)590 static int amdgpu_discovery_validate_ip(const struct ip_v4 *ip)
591 {
592 	if (ip->instance_number >= HWIP_MAX_INSTANCE) {
593 		DRM_ERROR("Unexpected instance_number (%d) from ip discovery blob\n",
594 			  ip->instance_number);
595 		return -EINVAL;
596 	}
597 	if (le16_to_cpu(ip->hw_id) >= HW_ID_MAX) {
598 		DRM_ERROR("Unexpected hw_id (%d) from ip discovery blob\n",
599 			  le16_to_cpu(ip->hw_id));
600 		return -EINVAL;
601 	}
602 
603 	return 0;
604 }
605 
amdgpu_discovery_read_harvest_bit_per_ip(struct amdgpu_device * adev,uint32_t * vcn_harvest_count)606 static void amdgpu_discovery_read_harvest_bit_per_ip(struct amdgpu_device *adev,
607 						uint32_t *vcn_harvest_count)
608 {
609 	struct binary_header *bhdr;
610 	struct ip_discovery_header *ihdr;
611 	struct die_header *dhdr;
612 	struct ip_v4 *ip;
613 	uint16_t die_offset, ip_offset, num_dies, num_ips;
614 	int i, j;
615 
616 	bhdr = (struct binary_header *)adev->mman.discovery_bin;
617 	ihdr = (struct ip_discovery_header *)(adev->mman.discovery_bin +
618 			le16_to_cpu(bhdr->table_list[IP_DISCOVERY].offset));
619 	num_dies = le16_to_cpu(ihdr->num_dies);
620 
621 	/* scan harvest bit of all IP data structures */
622 	for (i = 0; i < num_dies; i++) {
623 		die_offset = le16_to_cpu(ihdr->die_info[i].die_offset);
624 		dhdr = (struct die_header *)(adev->mman.discovery_bin + die_offset);
625 		num_ips = le16_to_cpu(dhdr->num_ips);
626 		ip_offset = die_offset + sizeof(*dhdr);
627 
628 		for (j = 0; j < num_ips; j++) {
629 			ip = (struct ip_v4 *)(adev->mman.discovery_bin + ip_offset);
630 
631 			if (amdgpu_discovery_validate_ip(ip))
632 				goto next_ip;
633 
634 			if (le16_to_cpu(ip->variant) == 1) {
635 				switch (le16_to_cpu(ip->hw_id)) {
636 				case VCN_HWID:
637 					(*vcn_harvest_count)++;
638 					if (ip->instance_number == 0) {
639 						adev->vcn.harvest_config |= AMDGPU_VCN_HARVEST_VCN0;
640 						adev->vcn.inst_mask &=
641 							~AMDGPU_VCN_HARVEST_VCN0;
642 						adev->jpeg.inst_mask &=
643 							~AMDGPU_VCN_HARVEST_VCN0;
644 					} else {
645 						adev->vcn.harvest_config |= AMDGPU_VCN_HARVEST_VCN1;
646 						adev->vcn.inst_mask &=
647 							~AMDGPU_VCN_HARVEST_VCN1;
648 						adev->jpeg.inst_mask &=
649 							~AMDGPU_VCN_HARVEST_VCN1;
650 					}
651 					break;
652 				case DMU_HWID:
653 					adev->harvest_ip_mask |= AMD_HARVEST_IP_DMU_MASK;
654 					break;
655 				default:
656 					break;
657 				}
658 			}
659 next_ip:
660 			if (ihdr->base_addr_64_bit)
661 				ip_offset += struct_size(ip, base_address_64, ip->num_base_address);
662 			else
663 				ip_offset += struct_size(ip, base_address, ip->num_base_address);
664 		}
665 	}
666 }
667 
amdgpu_discovery_read_from_harvest_table(struct amdgpu_device * adev,uint32_t * vcn_harvest_count,uint32_t * umc_harvest_count)668 static void amdgpu_discovery_read_from_harvest_table(struct amdgpu_device *adev,
669 						     uint32_t *vcn_harvest_count,
670 						     uint32_t *umc_harvest_count)
671 {
672 	struct binary_header *bhdr;
673 	struct harvest_table *harvest_info;
674 	u16 offset;
675 	int i;
676 	uint32_t umc_harvest_config = 0;
677 
678 	bhdr = (struct binary_header *)adev->mman.discovery_bin;
679 	offset = le16_to_cpu(bhdr->table_list[HARVEST_INFO].offset);
680 
681 	if (!offset) {
682 		dev_err(adev->dev, "invalid harvest table offset\n");
683 		return;
684 	}
685 
686 	harvest_info = (struct harvest_table *)(adev->mman.discovery_bin + offset);
687 
688 	for (i = 0; i < 32; i++) {
689 		if (le16_to_cpu(harvest_info->list[i].hw_id) == 0)
690 			break;
691 
692 		switch (le16_to_cpu(harvest_info->list[i].hw_id)) {
693 		case VCN_HWID:
694 			(*vcn_harvest_count)++;
695 			adev->vcn.harvest_config |=
696 				(1 << harvest_info->list[i].number_instance);
697 			adev->jpeg.harvest_config |=
698 				(1 << harvest_info->list[i].number_instance);
699 
700 			adev->vcn.inst_mask &=
701 				~(1U << harvest_info->list[i].number_instance);
702 			adev->jpeg.inst_mask &=
703 				~(1U << harvest_info->list[i].number_instance);
704 			break;
705 		case DMU_HWID:
706 			adev->harvest_ip_mask |= AMD_HARVEST_IP_DMU_MASK;
707 			break;
708 		case UMC_HWID:
709 			umc_harvest_config |=
710 				1 << (le16_to_cpu(harvest_info->list[i].number_instance));
711 			(*umc_harvest_count)++;
712 			break;
713 		case GC_HWID:
714 			adev->gfx.xcc_mask &=
715 				~(1U << harvest_info->list[i].number_instance);
716 			break;
717 		case SDMA0_HWID:
718 			adev->sdma.sdma_mask &=
719 				~(1U << harvest_info->list[i].number_instance);
720 			break;
721 #if defined(CONFIG_DRM_AMD_ISP)
722 		case ISP_HWID:
723 			adev->isp.harvest_config |=
724 				~(1U << harvest_info->list[i].number_instance);
725 			break;
726 #endif
727 		default:
728 			break;
729 		}
730 	}
731 
732 	adev->umc.active_mask = ((1 << adev->umc.node_inst_num) - 1) &
733 				~umc_harvest_config;
734 }
735 
736 /* ================================================== */
737 
738 struct ip_hw_instance {
739 	struct kobject kobj; /* ip_discovery/die/#die/#hw_id/#instance/<attrs...> */
740 
741 	int hw_id;
742 	u8  num_instance;
743 	u8  major, minor, revision;
744 	u8  harvest;
745 
746 	int num_base_addresses;
747 	u32 base_addr[] __counted_by(num_base_addresses);
748 };
749 
750 struct ip_hw_id {
751 	struct kset hw_id_kset;  /* ip_discovery/die/#die/#hw_id/, contains ip_hw_instance */
752 	int hw_id;
753 };
754 
755 struct ip_die_entry {
756 	struct kset ip_kset;     /* ip_discovery/die/#die/, contains ip_hw_id  */
757 	u16 num_ips;
758 };
759 
760 /* -------------------------------------------------- */
761 
762 struct ip_hw_instance_attr {
763 	struct attribute attr;
764 	ssize_t (*show)(struct ip_hw_instance *ip_hw_instance, char *buf);
765 };
766 
hw_id_show(struct ip_hw_instance * ip_hw_instance,char * buf)767 static ssize_t hw_id_show(struct ip_hw_instance *ip_hw_instance, char *buf)
768 {
769 	return sysfs_emit(buf, "%d\n", ip_hw_instance->hw_id);
770 }
771 
num_instance_show(struct ip_hw_instance * ip_hw_instance,char * buf)772 static ssize_t num_instance_show(struct ip_hw_instance *ip_hw_instance, char *buf)
773 {
774 	return sysfs_emit(buf, "%d\n", ip_hw_instance->num_instance);
775 }
776 
major_show(struct ip_hw_instance * ip_hw_instance,char * buf)777 static ssize_t major_show(struct ip_hw_instance *ip_hw_instance, char *buf)
778 {
779 	return sysfs_emit(buf, "%d\n", ip_hw_instance->major);
780 }
781 
minor_show(struct ip_hw_instance * ip_hw_instance,char * buf)782 static ssize_t minor_show(struct ip_hw_instance *ip_hw_instance, char *buf)
783 {
784 	return sysfs_emit(buf, "%d\n", ip_hw_instance->minor);
785 }
786 
revision_show(struct ip_hw_instance * ip_hw_instance,char * buf)787 static ssize_t revision_show(struct ip_hw_instance *ip_hw_instance, char *buf)
788 {
789 	return sysfs_emit(buf, "%d\n", ip_hw_instance->revision);
790 }
791 
harvest_show(struct ip_hw_instance * ip_hw_instance,char * buf)792 static ssize_t harvest_show(struct ip_hw_instance *ip_hw_instance, char *buf)
793 {
794 	return sysfs_emit(buf, "0x%01X\n", ip_hw_instance->harvest);
795 }
796 
num_base_addresses_show(struct ip_hw_instance * ip_hw_instance,char * buf)797 static ssize_t num_base_addresses_show(struct ip_hw_instance *ip_hw_instance, char *buf)
798 {
799 	return sysfs_emit(buf, "%d\n", ip_hw_instance->num_base_addresses);
800 }
801 
base_addr_show(struct ip_hw_instance * ip_hw_instance,char * buf)802 static ssize_t base_addr_show(struct ip_hw_instance *ip_hw_instance, char *buf)
803 {
804 	ssize_t res, at;
805 	int ii;
806 
807 	for (res = at = ii = 0; ii < ip_hw_instance->num_base_addresses; ii++) {
808 		/* Here we satisfy the condition that, at + size <= PAGE_SIZE.
809 		 */
810 		if (at + 12 > PAGE_SIZE)
811 			break;
812 		res = sysfs_emit_at(buf, at, "0x%08X\n",
813 				    ip_hw_instance->base_addr[ii]);
814 		if (res <= 0)
815 			break;
816 		at += res;
817 	}
818 
819 	return res < 0 ? res : at;
820 }
821 
822 static struct ip_hw_instance_attr ip_hw_attr[] = {
823 	__ATTR_RO(hw_id),
824 	__ATTR_RO(num_instance),
825 	__ATTR_RO(major),
826 	__ATTR_RO(minor),
827 	__ATTR_RO(revision),
828 	__ATTR_RO(harvest),
829 	__ATTR_RO(num_base_addresses),
830 	__ATTR_RO(base_addr),
831 };
832 
833 static struct attribute *ip_hw_instance_attrs[ARRAY_SIZE(ip_hw_attr) + 1];
834 ATTRIBUTE_GROUPS(ip_hw_instance);
835 
836 #define to_ip_hw_instance(x) container_of(x, struct ip_hw_instance, kobj)
837 #define to_ip_hw_instance_attr(x) container_of(x, struct ip_hw_instance_attr, attr)
838 
ip_hw_instance_attr_show(struct kobject * kobj,struct attribute * attr,char * buf)839 static ssize_t ip_hw_instance_attr_show(struct kobject *kobj,
840 					struct attribute *attr,
841 					char *buf)
842 {
843 	struct ip_hw_instance *ip_hw_instance = to_ip_hw_instance(kobj);
844 	struct ip_hw_instance_attr *ip_hw_attr = to_ip_hw_instance_attr(attr);
845 
846 	if (!ip_hw_attr->show)
847 		return -EIO;
848 
849 	return ip_hw_attr->show(ip_hw_instance, buf);
850 }
851 
852 static const struct sysfs_ops ip_hw_instance_sysfs_ops = {
853 	.show = ip_hw_instance_attr_show,
854 };
855 
ip_hw_instance_release(struct kobject * kobj)856 static void ip_hw_instance_release(struct kobject *kobj)
857 {
858 	struct ip_hw_instance *ip_hw_instance = to_ip_hw_instance(kobj);
859 
860 	kfree(ip_hw_instance);
861 }
862 
863 static const struct kobj_type ip_hw_instance_ktype = {
864 	.release = ip_hw_instance_release,
865 	.sysfs_ops = &ip_hw_instance_sysfs_ops,
866 	.default_groups = ip_hw_instance_groups,
867 };
868 
869 /* -------------------------------------------------- */
870 
871 #define to_ip_hw_id(x)  container_of(to_kset(x), struct ip_hw_id, hw_id_kset)
872 
ip_hw_id_release(struct kobject * kobj)873 static void ip_hw_id_release(struct kobject *kobj)
874 {
875 	struct ip_hw_id *ip_hw_id = to_ip_hw_id(kobj);
876 
877 	if (!list_empty(&ip_hw_id->hw_id_kset.list))
878 		DRM_ERROR("ip_hw_id->hw_id_kset is not empty");
879 	kfree(ip_hw_id);
880 }
881 
882 static const struct kobj_type ip_hw_id_ktype = {
883 	.release = ip_hw_id_release,
884 	.sysfs_ops = &kobj_sysfs_ops,
885 };
886 
887 /* -------------------------------------------------- */
888 
889 static void die_kobj_release(struct kobject *kobj);
890 static void ip_disc_release(struct kobject *kobj);
891 
892 struct ip_die_entry_attribute {
893 	struct attribute attr;
894 	ssize_t (*show)(struct ip_die_entry *ip_die_entry, char *buf);
895 };
896 
897 #define to_ip_die_entry_attr(x)  container_of(x, struct ip_die_entry_attribute, attr)
898 
num_ips_show(struct ip_die_entry * ip_die_entry,char * buf)899 static ssize_t num_ips_show(struct ip_die_entry *ip_die_entry, char *buf)
900 {
901 	return sysfs_emit(buf, "%d\n", ip_die_entry->num_ips);
902 }
903 
904 /* If there are more ip_die_entry attrs, other than the number of IPs,
905  * we can make this intro an array of attrs, and then initialize
906  * ip_die_entry_attrs in a loop.
907  */
908 static struct ip_die_entry_attribute num_ips_attr =
909 	__ATTR_RO(num_ips);
910 
911 static struct attribute *ip_die_entry_attrs[] = {
912 	&num_ips_attr.attr,
913 	NULL,
914 };
915 ATTRIBUTE_GROUPS(ip_die_entry); /* ip_die_entry_groups */
916 
917 #define to_ip_die_entry(x) container_of(to_kset(x), struct ip_die_entry, ip_kset)
918 
ip_die_entry_attr_show(struct kobject * kobj,struct attribute * attr,char * buf)919 static ssize_t ip_die_entry_attr_show(struct kobject *kobj,
920 				      struct attribute *attr,
921 				      char *buf)
922 {
923 	struct ip_die_entry_attribute *ip_die_entry_attr = to_ip_die_entry_attr(attr);
924 	struct ip_die_entry *ip_die_entry = to_ip_die_entry(kobj);
925 
926 	if (!ip_die_entry_attr->show)
927 		return -EIO;
928 
929 	return ip_die_entry_attr->show(ip_die_entry, buf);
930 }
931 
ip_die_entry_release(struct kobject * kobj)932 static void ip_die_entry_release(struct kobject *kobj)
933 {
934 	struct ip_die_entry *ip_die_entry = to_ip_die_entry(kobj);
935 
936 	if (!list_empty(&ip_die_entry->ip_kset.list))
937 		DRM_ERROR("ip_die_entry->ip_kset is not empty");
938 	kfree(ip_die_entry);
939 }
940 
941 static const struct sysfs_ops ip_die_entry_sysfs_ops = {
942 	.show = ip_die_entry_attr_show,
943 };
944 
945 static const struct kobj_type ip_die_entry_ktype = {
946 	.release = ip_die_entry_release,
947 	.sysfs_ops = &ip_die_entry_sysfs_ops,
948 	.default_groups = ip_die_entry_groups,
949 };
950 
951 static const struct kobj_type die_kobj_ktype = {
952 	.release = die_kobj_release,
953 	.sysfs_ops = &kobj_sysfs_ops,
954 };
955 
956 static const struct kobj_type ip_discovery_ktype = {
957 	.release = ip_disc_release,
958 	.sysfs_ops = &kobj_sysfs_ops,
959 };
960 
961 struct ip_discovery_top {
962 	struct kobject kobj;    /* ip_discovery/ */
963 	struct kset die_kset;   /* ip_discovery/die/, contains ip_die_entry */
964 	struct amdgpu_device *adev;
965 };
966 
die_kobj_release(struct kobject * kobj)967 static void die_kobj_release(struct kobject *kobj)
968 {
969 	struct ip_discovery_top *ip_top = container_of(to_kset(kobj),
970 						       struct ip_discovery_top,
971 						       die_kset);
972 	if (!list_empty(&ip_top->die_kset.list))
973 		DRM_ERROR("ip_top->die_kset is not empty");
974 }
975 
ip_disc_release(struct kobject * kobj)976 static void ip_disc_release(struct kobject *kobj)
977 {
978 	struct ip_discovery_top *ip_top = container_of(kobj, struct ip_discovery_top,
979 						       kobj);
980 	struct amdgpu_device *adev = ip_top->adev;
981 
982 	adev->ip_top = NULL;
983 	kfree(ip_top);
984 }
985 
amdgpu_discovery_get_harvest_info(struct amdgpu_device * adev,uint16_t hw_id,uint8_t inst)986 static uint8_t amdgpu_discovery_get_harvest_info(struct amdgpu_device *adev,
987 						 uint16_t hw_id, uint8_t inst)
988 {
989 	uint8_t harvest = 0;
990 
991 	/* Until a uniform way is figured, get mask based on hwid */
992 	switch (hw_id) {
993 	case VCN_HWID:
994 		harvest = ((1 << inst) & adev->vcn.inst_mask) == 0;
995 		break;
996 	case DMU_HWID:
997 		if (adev->harvest_ip_mask & AMD_HARVEST_IP_DMU_MASK)
998 			harvest = 0x1;
999 		break;
1000 	case UMC_HWID:
1001 		/* TODO: It needs another parsing; for now, ignore.*/
1002 		break;
1003 	case GC_HWID:
1004 		harvest = ((1 << inst) & adev->gfx.xcc_mask) == 0;
1005 		break;
1006 	case SDMA0_HWID:
1007 		harvest = ((1 << inst) & adev->sdma.sdma_mask) == 0;
1008 		break;
1009 	default:
1010 		break;
1011 	}
1012 
1013 	return harvest;
1014 }
1015 
amdgpu_discovery_sysfs_ips(struct amdgpu_device * adev,struct ip_die_entry * ip_die_entry,const size_t _ip_offset,const int num_ips,bool reg_base_64)1016 static int amdgpu_discovery_sysfs_ips(struct amdgpu_device *adev,
1017 				      struct ip_die_entry *ip_die_entry,
1018 				      const size_t _ip_offset, const int num_ips,
1019 				      bool reg_base_64)
1020 {
1021 	int ii, jj, kk, res;
1022 
1023 	DRM_DEBUG("num_ips:%d", num_ips);
1024 
1025 	/* Find all IPs of a given HW ID, and add their instance to
1026 	 * #die/#hw_id/#instance/<attributes>
1027 	 */
1028 	for (ii = 0; ii < HW_ID_MAX; ii++) {
1029 		struct ip_hw_id *ip_hw_id = NULL;
1030 		size_t ip_offset = _ip_offset;
1031 
1032 		for (jj = 0; jj < num_ips; jj++) {
1033 			struct ip_v4 *ip;
1034 			struct ip_hw_instance *ip_hw_instance;
1035 
1036 			ip = (struct ip_v4 *)(adev->mman.discovery_bin + ip_offset);
1037 			if (amdgpu_discovery_validate_ip(ip) ||
1038 			    le16_to_cpu(ip->hw_id) != ii)
1039 				goto next_ip;
1040 
1041 			DRM_DEBUG("match:%d @ ip_offset:%zu", ii, ip_offset);
1042 
1043 			/* We have a hw_id match; register the hw
1044 			 * block if not yet registered.
1045 			 */
1046 			if (!ip_hw_id) {
1047 				ip_hw_id = kzalloc(sizeof(*ip_hw_id), GFP_KERNEL);
1048 				if (!ip_hw_id)
1049 					return -ENOMEM;
1050 				ip_hw_id->hw_id = ii;
1051 
1052 				kobject_set_name(&ip_hw_id->hw_id_kset.kobj, "%d", ii);
1053 				ip_hw_id->hw_id_kset.kobj.kset = &ip_die_entry->ip_kset;
1054 				ip_hw_id->hw_id_kset.kobj.ktype = &ip_hw_id_ktype;
1055 				res = kset_register(&ip_hw_id->hw_id_kset);
1056 				if (res) {
1057 					DRM_ERROR("Couldn't register ip_hw_id kset");
1058 					kfree(ip_hw_id);
1059 					return res;
1060 				}
1061 				if (hw_id_names[ii]) {
1062 					res = sysfs_create_link(&ip_die_entry->ip_kset.kobj,
1063 								&ip_hw_id->hw_id_kset.kobj,
1064 								hw_id_names[ii]);
1065 					if (res) {
1066 						DRM_ERROR("Couldn't create IP link %s in IP Die:%s\n",
1067 							  hw_id_names[ii],
1068 							  kobject_name(&ip_die_entry->ip_kset.kobj));
1069 					}
1070 				}
1071 			}
1072 
1073 			/* Now register its instance.
1074 			 */
1075 			ip_hw_instance = kzalloc(struct_size(ip_hw_instance,
1076 							     base_addr,
1077 							     ip->num_base_address),
1078 						 GFP_KERNEL);
1079 			if (!ip_hw_instance) {
1080 				DRM_ERROR("no memory for ip_hw_instance");
1081 				return -ENOMEM;
1082 			}
1083 			ip_hw_instance->hw_id = le16_to_cpu(ip->hw_id); /* == ii */
1084 			ip_hw_instance->num_instance = ip->instance_number;
1085 			ip_hw_instance->major = ip->major;
1086 			ip_hw_instance->minor = ip->minor;
1087 			ip_hw_instance->revision = ip->revision;
1088 			ip_hw_instance->harvest =
1089 				amdgpu_discovery_get_harvest_info(
1090 					adev, ip_hw_instance->hw_id,
1091 					ip_hw_instance->num_instance);
1092 			ip_hw_instance->num_base_addresses = ip->num_base_address;
1093 
1094 			for (kk = 0; kk < ip_hw_instance->num_base_addresses; kk++) {
1095 				if (reg_base_64)
1096 					ip_hw_instance->base_addr[kk] =
1097 						lower_32_bits(le64_to_cpu(ip->base_address_64[kk])) & 0x3FFFFFFF;
1098 				else
1099 					ip_hw_instance->base_addr[kk] = ip->base_address[kk];
1100 			}
1101 
1102 			kobject_init(&ip_hw_instance->kobj, &ip_hw_instance_ktype);
1103 			ip_hw_instance->kobj.kset = &ip_hw_id->hw_id_kset;
1104 			res = kobject_add(&ip_hw_instance->kobj, NULL,
1105 					  "%d", ip_hw_instance->num_instance);
1106 next_ip:
1107 			if (reg_base_64)
1108 				ip_offset += struct_size(ip, base_address_64,
1109 							 ip->num_base_address);
1110 			else
1111 				ip_offset += struct_size(ip, base_address,
1112 							 ip->num_base_address);
1113 		}
1114 	}
1115 
1116 	return 0;
1117 }
1118 
amdgpu_discovery_sysfs_recurse(struct amdgpu_device * adev)1119 static int amdgpu_discovery_sysfs_recurse(struct amdgpu_device *adev)
1120 {
1121 	struct binary_header *bhdr;
1122 	struct ip_discovery_header *ihdr;
1123 	struct die_header *dhdr;
1124 	struct kset *die_kset = &adev->ip_top->die_kset;
1125 	u16 num_dies, die_offset, num_ips;
1126 	size_t ip_offset;
1127 	int ii, res;
1128 
1129 	bhdr = (struct binary_header *)adev->mman.discovery_bin;
1130 	ihdr = (struct ip_discovery_header *)(adev->mman.discovery_bin +
1131 					      le16_to_cpu(bhdr->table_list[IP_DISCOVERY].offset));
1132 	num_dies = le16_to_cpu(ihdr->num_dies);
1133 
1134 	DRM_DEBUG("number of dies: %d\n", num_dies);
1135 
1136 	for (ii = 0; ii < num_dies; ii++) {
1137 		struct ip_die_entry *ip_die_entry;
1138 
1139 		die_offset = le16_to_cpu(ihdr->die_info[ii].die_offset);
1140 		dhdr = (struct die_header *)(adev->mman.discovery_bin + die_offset);
1141 		num_ips = le16_to_cpu(dhdr->num_ips);
1142 		ip_offset = die_offset + sizeof(*dhdr);
1143 
1144 		/* Add the die to the kset.
1145 		 *
1146 		 * dhdr->die_id == ii, which was checked in
1147 		 * amdgpu_discovery_reg_base_init().
1148 		 */
1149 
1150 		ip_die_entry = kzalloc(sizeof(*ip_die_entry), GFP_KERNEL);
1151 		if (!ip_die_entry)
1152 			return -ENOMEM;
1153 
1154 		ip_die_entry->num_ips = num_ips;
1155 
1156 		kobject_set_name(&ip_die_entry->ip_kset.kobj, "%d", le16_to_cpu(dhdr->die_id));
1157 		ip_die_entry->ip_kset.kobj.kset = die_kset;
1158 		ip_die_entry->ip_kset.kobj.ktype = &ip_die_entry_ktype;
1159 		res = kset_register(&ip_die_entry->ip_kset);
1160 		if (res) {
1161 			DRM_ERROR("Couldn't register ip_die_entry kset");
1162 			kfree(ip_die_entry);
1163 			return res;
1164 		}
1165 
1166 		amdgpu_discovery_sysfs_ips(adev, ip_die_entry, ip_offset, num_ips, !!ihdr->base_addr_64_bit);
1167 	}
1168 
1169 	return 0;
1170 }
1171 
amdgpu_discovery_sysfs_init(struct amdgpu_device * adev)1172 static int amdgpu_discovery_sysfs_init(struct amdgpu_device *adev)
1173 {
1174 	struct kset *die_kset;
1175 	int res, ii;
1176 
1177 	if (!adev->mman.discovery_bin)
1178 		return -EINVAL;
1179 
1180 	adev->ip_top = kzalloc(sizeof(*adev->ip_top), GFP_KERNEL);
1181 	if (!adev->ip_top)
1182 		return -ENOMEM;
1183 
1184 	adev->ip_top->adev = adev;
1185 
1186 	res = kobject_init_and_add(&adev->ip_top->kobj, &ip_discovery_ktype,
1187 				   &adev->dev->kobj, "ip_discovery");
1188 	if (res) {
1189 		DRM_ERROR("Couldn't init and add ip_discovery/");
1190 		goto Err;
1191 	}
1192 
1193 	die_kset = &adev->ip_top->die_kset;
1194 	kobject_set_name(&die_kset->kobj, "%s", "die");
1195 	die_kset->kobj.parent = &adev->ip_top->kobj;
1196 	die_kset->kobj.ktype = &die_kobj_ktype;
1197 	res = kset_register(&adev->ip_top->die_kset);
1198 	if (res) {
1199 		DRM_ERROR("Couldn't register die_kset");
1200 		goto Err;
1201 	}
1202 
1203 	for (ii = 0; ii < ARRAY_SIZE(ip_hw_attr); ii++)
1204 		ip_hw_instance_attrs[ii] = &ip_hw_attr[ii].attr;
1205 	ip_hw_instance_attrs[ii] = NULL;
1206 
1207 	res = amdgpu_discovery_sysfs_recurse(adev);
1208 
1209 	return res;
1210 Err:
1211 	kobject_put(&adev->ip_top->kobj);
1212 	return res;
1213 }
1214 
1215 /* -------------------------------------------------- */
1216 
1217 #define list_to_kobj(el) container_of(el, struct kobject, entry)
1218 
amdgpu_discovery_sysfs_ip_hw_free(struct ip_hw_id * ip_hw_id)1219 static void amdgpu_discovery_sysfs_ip_hw_free(struct ip_hw_id *ip_hw_id)
1220 {
1221 	struct list_head *el, *tmp;
1222 	struct kset *hw_id_kset;
1223 
1224 	hw_id_kset = &ip_hw_id->hw_id_kset;
1225 	spin_lock(&hw_id_kset->list_lock);
1226 	list_for_each_prev_safe(el, tmp, &hw_id_kset->list) {
1227 		list_del_init(el);
1228 		spin_unlock(&hw_id_kset->list_lock);
1229 		/* kobject is embedded in ip_hw_instance */
1230 		kobject_put(list_to_kobj(el));
1231 		spin_lock(&hw_id_kset->list_lock);
1232 	}
1233 	spin_unlock(&hw_id_kset->list_lock);
1234 	kobject_put(&ip_hw_id->hw_id_kset.kobj);
1235 }
1236 
amdgpu_discovery_sysfs_die_free(struct ip_die_entry * ip_die_entry)1237 static void amdgpu_discovery_sysfs_die_free(struct ip_die_entry *ip_die_entry)
1238 {
1239 	struct list_head *el, *tmp;
1240 	struct kset *ip_kset;
1241 
1242 	ip_kset = &ip_die_entry->ip_kset;
1243 	spin_lock(&ip_kset->list_lock);
1244 	list_for_each_prev_safe(el, tmp, &ip_kset->list) {
1245 		list_del_init(el);
1246 		spin_unlock(&ip_kset->list_lock);
1247 		amdgpu_discovery_sysfs_ip_hw_free(to_ip_hw_id(list_to_kobj(el)));
1248 		spin_lock(&ip_kset->list_lock);
1249 	}
1250 	spin_unlock(&ip_kset->list_lock);
1251 	kobject_put(&ip_die_entry->ip_kset.kobj);
1252 }
1253 
amdgpu_discovery_sysfs_fini(struct amdgpu_device * adev)1254 static void amdgpu_discovery_sysfs_fini(struct amdgpu_device *adev)
1255 {
1256 	struct list_head *el, *tmp;
1257 	struct kset *die_kset;
1258 
1259 	die_kset = &adev->ip_top->die_kset;
1260 	spin_lock(&die_kset->list_lock);
1261 	list_for_each_prev_safe(el, tmp, &die_kset->list) {
1262 		list_del_init(el);
1263 		spin_unlock(&die_kset->list_lock);
1264 		amdgpu_discovery_sysfs_die_free(to_ip_die_entry(list_to_kobj(el)));
1265 		spin_lock(&die_kset->list_lock);
1266 	}
1267 	spin_unlock(&die_kset->list_lock);
1268 	kobject_put(&adev->ip_top->die_kset.kobj);
1269 	kobject_put(&adev->ip_top->kobj);
1270 }
1271 
1272 /* ================================================== */
1273 
amdgpu_discovery_reg_base_init(struct amdgpu_device * adev)1274 static int amdgpu_discovery_reg_base_init(struct amdgpu_device *adev)
1275 {
1276 	uint8_t num_base_address, subrev, variant;
1277 	struct binary_header *bhdr;
1278 	struct ip_discovery_header *ihdr;
1279 	struct die_header *dhdr;
1280 	struct ip_v4 *ip;
1281 	uint16_t die_offset;
1282 	uint16_t ip_offset;
1283 	uint16_t num_dies;
1284 	uint16_t num_ips;
1285 	int hw_ip;
1286 	int i, j, k;
1287 	int r;
1288 
1289 	r = amdgpu_discovery_init(adev);
1290 	if (r) {
1291 		DRM_ERROR("amdgpu_discovery_init failed\n");
1292 		return r;
1293 	}
1294 
1295 	adev->gfx.xcc_mask = 0;
1296 	adev->sdma.sdma_mask = 0;
1297 	adev->vcn.inst_mask = 0;
1298 	adev->jpeg.inst_mask = 0;
1299 	bhdr = (struct binary_header *)adev->mman.discovery_bin;
1300 	ihdr = (struct ip_discovery_header *)(adev->mman.discovery_bin +
1301 			le16_to_cpu(bhdr->table_list[IP_DISCOVERY].offset));
1302 	num_dies = le16_to_cpu(ihdr->num_dies);
1303 
1304 	DRM_DEBUG("number of dies: %d\n", num_dies);
1305 
1306 	for (i = 0; i < num_dies; i++) {
1307 		die_offset = le16_to_cpu(ihdr->die_info[i].die_offset);
1308 		dhdr = (struct die_header *)(adev->mman.discovery_bin + die_offset);
1309 		num_ips = le16_to_cpu(dhdr->num_ips);
1310 		ip_offset = die_offset + sizeof(*dhdr);
1311 
1312 		if (le16_to_cpu(dhdr->die_id) != i) {
1313 			DRM_ERROR("invalid die id %d, expected %d\n",
1314 					le16_to_cpu(dhdr->die_id), i);
1315 			return -EINVAL;
1316 		}
1317 
1318 		DRM_DEBUG("number of hardware IPs on die%d: %d\n",
1319 				le16_to_cpu(dhdr->die_id), num_ips);
1320 
1321 		for (j = 0; j < num_ips; j++) {
1322 			ip = (struct ip_v4 *)(adev->mman.discovery_bin + ip_offset);
1323 
1324 			if (amdgpu_discovery_validate_ip(ip))
1325 				goto next_ip;
1326 
1327 			num_base_address = ip->num_base_address;
1328 
1329 			DRM_DEBUG("%s(%d) #%d v%d.%d.%d:\n",
1330 				  hw_id_names[le16_to_cpu(ip->hw_id)],
1331 				  le16_to_cpu(ip->hw_id),
1332 				  ip->instance_number,
1333 				  ip->major, ip->minor,
1334 				  ip->revision);
1335 
1336 			if (le16_to_cpu(ip->hw_id) == VCN_HWID) {
1337 				/* Bit [5:0]: original revision value
1338 				 * Bit [7:6]: en/decode capability:
1339 				 *     0b00 : VCN function normally
1340 				 *     0b10 : encode is disabled
1341 				 *     0b01 : decode is disabled
1342 				 */
1343 				if (adev->vcn.num_vcn_inst <
1344 				    AMDGPU_MAX_VCN_INSTANCES) {
1345 					adev->vcn.inst[adev->vcn.num_vcn_inst].vcn_config =
1346 						ip->revision & 0xc0;
1347 					adev->vcn.num_vcn_inst++;
1348 					adev->vcn.inst_mask |=
1349 						(1U << ip->instance_number);
1350 					adev->jpeg.inst_mask |=
1351 						(1U << ip->instance_number);
1352 				} else {
1353 					dev_err(adev->dev, "Too many VCN instances: %d vs %d\n",
1354 						adev->vcn.num_vcn_inst + 1,
1355 						AMDGPU_MAX_VCN_INSTANCES);
1356 				}
1357 				ip->revision &= ~0xc0;
1358 			}
1359 			if (le16_to_cpu(ip->hw_id) == SDMA0_HWID ||
1360 			    le16_to_cpu(ip->hw_id) == SDMA1_HWID ||
1361 			    le16_to_cpu(ip->hw_id) == SDMA2_HWID ||
1362 			    le16_to_cpu(ip->hw_id) == SDMA3_HWID) {
1363 				if (adev->sdma.num_instances <
1364 				    AMDGPU_MAX_SDMA_INSTANCES) {
1365 					adev->sdma.num_instances++;
1366 					adev->sdma.sdma_mask |=
1367 						(1U << ip->instance_number);
1368 				} else {
1369 					dev_err(adev->dev, "Too many SDMA instances: %d vs %d\n",
1370 						adev->sdma.num_instances + 1,
1371 						AMDGPU_MAX_SDMA_INSTANCES);
1372 				}
1373 			}
1374 
1375 			if (le16_to_cpu(ip->hw_id) == VPE_HWID) {
1376 				if (adev->vpe.num_instances < AMDGPU_MAX_VPE_INSTANCES)
1377 					adev->vpe.num_instances++;
1378 				else
1379 					dev_err(adev->dev, "Too many VPE instances: %d vs %d\n",
1380 						adev->vpe.num_instances + 1,
1381 						AMDGPU_MAX_VPE_INSTANCES);
1382 			}
1383 
1384 			if (le16_to_cpu(ip->hw_id) == UMC_HWID) {
1385 				adev->gmc.num_umc++;
1386 				adev->umc.node_inst_num++;
1387 			}
1388 
1389 			if (le16_to_cpu(ip->hw_id) == GC_HWID)
1390 				adev->gfx.xcc_mask |=
1391 					(1U << ip->instance_number);
1392 
1393 			for (k = 0; k < num_base_address; k++) {
1394 				/*
1395 				 * convert the endianness of base addresses in place,
1396 				 * so that we don't need to convert them when accessing adev->reg_offset.
1397 				 */
1398 				if (ihdr->base_addr_64_bit)
1399 					/* Truncate the 64bit base address from ip discovery
1400 					 * and only store lower 32bit ip base in reg_offset[].
1401 					 * Bits > 32 follows ASIC specific format, thus just
1402 					 * discard them and handle it within specific ASIC.
1403 					 * By this way reg_offset[] and related helpers can
1404 					 * stay unchanged.
1405 					 * The base address is in dwords, thus clear the
1406 					 * highest 2 bits to store.
1407 					 */
1408 					ip->base_address[k] =
1409 						lower_32_bits(le64_to_cpu(ip->base_address_64[k])) & 0x3FFFFFFF;
1410 				else
1411 					ip->base_address[k] = le32_to_cpu(ip->base_address[k]);
1412 				DRM_DEBUG("\t0x%08x\n", ip->base_address[k]);
1413 			}
1414 
1415 			for (hw_ip = 0; hw_ip < MAX_HWIP; hw_ip++) {
1416 				if (hw_id_map[hw_ip] == le16_to_cpu(ip->hw_id) &&
1417 				    hw_id_map[hw_ip] != 0) {
1418 					DRM_DEBUG("set register base offset for %s\n",
1419 							hw_id_names[le16_to_cpu(ip->hw_id)]);
1420 					adev->reg_offset[hw_ip][ip->instance_number] =
1421 						ip->base_address;
1422 					/* Instance support is somewhat inconsistent.
1423 					 * SDMA is a good example.  Sienna cichlid has 4 total
1424 					 * SDMA instances, each enumerated separately (HWIDs
1425 					 * 42, 43, 68, 69).  Arcturus has 8 total SDMA instances,
1426 					 * but they are enumerated as multiple instances of the
1427 					 * same HWIDs (4x HWID 42, 4x HWID 43).  UMC is another
1428 					 * example.  On most chips there are multiple instances
1429 					 * with the same HWID.
1430 					 */
1431 
1432 					if (ihdr->version < 3) {
1433 						subrev = 0;
1434 						variant = 0;
1435 					} else {
1436 						subrev = ip->sub_revision;
1437 						variant = ip->variant;
1438 					}
1439 
1440 					adev->ip_versions[hw_ip]
1441 							 [ip->instance_number] =
1442 						IP_VERSION_FULL(ip->major,
1443 								ip->minor,
1444 								ip->revision,
1445 								variant,
1446 								subrev);
1447 				}
1448 			}
1449 
1450 next_ip:
1451 			if (ihdr->base_addr_64_bit)
1452 				ip_offset += struct_size(ip, base_address_64, ip->num_base_address);
1453 			else
1454 				ip_offset += struct_size(ip, base_address, ip->num_base_address);
1455 		}
1456 	}
1457 
1458 	return 0;
1459 }
1460 
amdgpu_discovery_harvest_ip(struct amdgpu_device * adev)1461 static void amdgpu_discovery_harvest_ip(struct amdgpu_device *adev)
1462 {
1463 	int vcn_harvest_count = 0;
1464 	int umc_harvest_count = 0;
1465 
1466 	/*
1467 	 * Harvest table does not fit Navi1x and legacy GPUs,
1468 	 * so read harvest bit per IP data structure to set
1469 	 * harvest configuration.
1470 	 */
1471 	if (amdgpu_ip_version(adev, GC_HWIP, 0) < IP_VERSION(10, 2, 0) &&
1472 	    amdgpu_ip_version(adev, GC_HWIP, 0) != IP_VERSION(9, 4, 3) &&
1473 	    amdgpu_ip_version(adev, GC_HWIP, 0) != IP_VERSION(9, 4, 4)) {
1474 		if ((adev->pdev->device == 0x731E &&
1475 			(adev->pdev->revision == 0xC6 ||
1476 			 adev->pdev->revision == 0xC7)) ||
1477 			(adev->pdev->device == 0x7340 &&
1478 			 adev->pdev->revision == 0xC9) ||
1479 			(adev->pdev->device == 0x7360 &&
1480 			 adev->pdev->revision == 0xC7))
1481 			amdgpu_discovery_read_harvest_bit_per_ip(adev,
1482 				&vcn_harvest_count);
1483 	} else {
1484 		amdgpu_discovery_read_from_harvest_table(adev,
1485 							 &vcn_harvest_count,
1486 							 &umc_harvest_count);
1487 	}
1488 
1489 	amdgpu_discovery_harvest_config_quirk(adev);
1490 
1491 	if (vcn_harvest_count == adev->vcn.num_vcn_inst) {
1492 		adev->harvest_ip_mask |= AMD_HARVEST_IP_VCN_MASK;
1493 		adev->harvest_ip_mask |= AMD_HARVEST_IP_JPEG_MASK;
1494 	}
1495 
1496 	if (umc_harvest_count < adev->gmc.num_umc) {
1497 		adev->gmc.num_umc -= umc_harvest_count;
1498 	}
1499 }
1500 
1501 union gc_info {
1502 	struct gc_info_v1_0 v1;
1503 	struct gc_info_v1_1 v1_1;
1504 	struct gc_info_v1_2 v1_2;
1505 	struct gc_info_v1_3 v1_3;
1506 	struct gc_info_v2_0 v2;
1507 	struct gc_info_v2_1 v2_1;
1508 };
1509 
amdgpu_discovery_get_gfx_info(struct amdgpu_device * adev)1510 static int amdgpu_discovery_get_gfx_info(struct amdgpu_device *adev)
1511 {
1512 	struct binary_header *bhdr;
1513 	union gc_info *gc_info;
1514 	u16 offset;
1515 
1516 	if (!adev->mman.discovery_bin) {
1517 		DRM_ERROR("ip discovery uninitialized\n");
1518 		return -EINVAL;
1519 	}
1520 
1521 	bhdr = (struct binary_header *)adev->mman.discovery_bin;
1522 	offset = le16_to_cpu(bhdr->table_list[GC].offset);
1523 
1524 	if (!offset)
1525 		return 0;
1526 
1527 	gc_info = (union gc_info *)(adev->mman.discovery_bin + offset);
1528 
1529 	switch (le16_to_cpu(gc_info->v1.header.version_major)) {
1530 	case 1:
1531 		adev->gfx.config.max_shader_engines = le32_to_cpu(gc_info->v1.gc_num_se);
1532 		adev->gfx.config.max_cu_per_sh = 2 * (le32_to_cpu(gc_info->v1.gc_num_wgp0_per_sa) +
1533 						      le32_to_cpu(gc_info->v1.gc_num_wgp1_per_sa));
1534 		adev->gfx.config.max_sh_per_se = le32_to_cpu(gc_info->v1.gc_num_sa_per_se);
1535 		adev->gfx.config.max_backends_per_se = le32_to_cpu(gc_info->v1.gc_num_rb_per_se);
1536 		adev->gfx.config.max_texture_channel_caches = le32_to_cpu(gc_info->v1.gc_num_gl2c);
1537 		adev->gfx.config.max_gprs = le32_to_cpu(gc_info->v1.gc_num_gprs);
1538 		adev->gfx.config.max_gs_threads = le32_to_cpu(gc_info->v1.gc_num_max_gs_thds);
1539 		adev->gfx.config.gs_vgt_table_depth = le32_to_cpu(gc_info->v1.gc_gs_table_depth);
1540 		adev->gfx.config.gs_prim_buffer_depth = le32_to_cpu(gc_info->v1.gc_gsprim_buff_depth);
1541 		adev->gfx.config.double_offchip_lds_buf = le32_to_cpu(gc_info->v1.gc_double_offchip_lds_buffer);
1542 		adev->gfx.cu_info.wave_front_size = le32_to_cpu(gc_info->v1.gc_wave_size);
1543 		adev->gfx.cu_info.max_waves_per_simd = le32_to_cpu(gc_info->v1.gc_max_waves_per_simd);
1544 		adev->gfx.cu_info.max_scratch_slots_per_cu = le32_to_cpu(gc_info->v1.gc_max_scratch_slots_per_cu);
1545 		adev->gfx.cu_info.lds_size = le32_to_cpu(gc_info->v1.gc_lds_size);
1546 		adev->gfx.config.num_sc_per_sh = le32_to_cpu(gc_info->v1.gc_num_sc_per_se) /
1547 			le32_to_cpu(gc_info->v1.gc_num_sa_per_se);
1548 		adev->gfx.config.num_packer_per_sc = le32_to_cpu(gc_info->v1.gc_num_packer_per_sc);
1549 		if (le16_to_cpu(gc_info->v1.header.version_minor) >= 1) {
1550 			adev->gfx.config.gc_num_tcp_per_sa = le32_to_cpu(gc_info->v1_1.gc_num_tcp_per_sa);
1551 			adev->gfx.config.gc_num_sdp_interface = le32_to_cpu(gc_info->v1_1.gc_num_sdp_interface);
1552 			adev->gfx.config.gc_num_tcps = le32_to_cpu(gc_info->v1_1.gc_num_tcps);
1553 		}
1554 		if (le16_to_cpu(gc_info->v1.header.version_minor) >= 2) {
1555 			adev->gfx.config.gc_num_tcp_per_wpg = le32_to_cpu(gc_info->v1_2.gc_num_tcp_per_wpg);
1556 			adev->gfx.config.gc_tcp_l1_size = le32_to_cpu(gc_info->v1_2.gc_tcp_l1_size);
1557 			adev->gfx.config.gc_num_sqc_per_wgp = le32_to_cpu(gc_info->v1_2.gc_num_sqc_per_wgp);
1558 			adev->gfx.config.gc_l1_instruction_cache_size_per_sqc = le32_to_cpu(gc_info->v1_2.gc_l1_instruction_cache_size_per_sqc);
1559 			adev->gfx.config.gc_l1_data_cache_size_per_sqc = le32_to_cpu(gc_info->v1_2.gc_l1_data_cache_size_per_sqc);
1560 			adev->gfx.config.gc_gl1c_per_sa = le32_to_cpu(gc_info->v1_2.gc_gl1c_per_sa);
1561 			adev->gfx.config.gc_gl1c_size_per_instance = le32_to_cpu(gc_info->v1_2.gc_gl1c_size_per_instance);
1562 			adev->gfx.config.gc_gl2c_per_gpu = le32_to_cpu(gc_info->v1_2.gc_gl2c_per_gpu);
1563 		}
1564 		if (le16_to_cpu(gc_info->v1.header.version_minor) >= 3) {
1565 			adev->gfx.config.gc_tcp_size_per_cu = le32_to_cpu(gc_info->v1_3.gc_tcp_size_per_cu);
1566 			adev->gfx.config.gc_tcp_cache_line_size = le32_to_cpu(gc_info->v1_3.gc_tcp_cache_line_size);
1567 			adev->gfx.config.gc_instruction_cache_size_per_sqc = le32_to_cpu(gc_info->v1_3.gc_instruction_cache_size_per_sqc);
1568 			adev->gfx.config.gc_instruction_cache_line_size = le32_to_cpu(gc_info->v1_3.gc_instruction_cache_line_size);
1569 			adev->gfx.config.gc_scalar_data_cache_size_per_sqc = le32_to_cpu(gc_info->v1_3.gc_scalar_data_cache_size_per_sqc);
1570 			adev->gfx.config.gc_scalar_data_cache_line_size = le32_to_cpu(gc_info->v1_3.gc_scalar_data_cache_line_size);
1571 			adev->gfx.config.gc_tcc_size = le32_to_cpu(gc_info->v1_3.gc_tcc_size);
1572 			adev->gfx.config.gc_tcc_cache_line_size = le32_to_cpu(gc_info->v1_3.gc_tcc_cache_line_size);
1573 		}
1574 		break;
1575 	case 2:
1576 		adev->gfx.config.max_shader_engines = le32_to_cpu(gc_info->v2.gc_num_se);
1577 		adev->gfx.config.max_cu_per_sh = le32_to_cpu(gc_info->v2.gc_num_cu_per_sh);
1578 		adev->gfx.config.max_sh_per_se = le32_to_cpu(gc_info->v2.gc_num_sh_per_se);
1579 		adev->gfx.config.max_backends_per_se = le32_to_cpu(gc_info->v2.gc_num_rb_per_se);
1580 		adev->gfx.config.max_texture_channel_caches = le32_to_cpu(gc_info->v2.gc_num_tccs);
1581 		adev->gfx.config.max_gprs = le32_to_cpu(gc_info->v2.gc_num_gprs);
1582 		adev->gfx.config.max_gs_threads = le32_to_cpu(gc_info->v2.gc_num_max_gs_thds);
1583 		adev->gfx.config.gs_vgt_table_depth = le32_to_cpu(gc_info->v2.gc_gs_table_depth);
1584 		adev->gfx.config.gs_prim_buffer_depth = le32_to_cpu(gc_info->v2.gc_gsprim_buff_depth);
1585 		adev->gfx.config.double_offchip_lds_buf = le32_to_cpu(gc_info->v2.gc_double_offchip_lds_buffer);
1586 		adev->gfx.cu_info.wave_front_size = le32_to_cpu(gc_info->v2.gc_wave_size);
1587 		adev->gfx.cu_info.max_waves_per_simd = le32_to_cpu(gc_info->v2.gc_max_waves_per_simd);
1588 		adev->gfx.cu_info.max_scratch_slots_per_cu = le32_to_cpu(gc_info->v2.gc_max_scratch_slots_per_cu);
1589 		adev->gfx.cu_info.lds_size = le32_to_cpu(gc_info->v2.gc_lds_size);
1590 		adev->gfx.config.num_sc_per_sh = le32_to_cpu(gc_info->v2.gc_num_sc_per_se) /
1591 			le32_to_cpu(gc_info->v2.gc_num_sh_per_se);
1592 		adev->gfx.config.num_packer_per_sc = le32_to_cpu(gc_info->v2.gc_num_packer_per_sc);
1593 		if (le16_to_cpu(gc_info->v2.header.version_minor) == 1) {
1594 			adev->gfx.config.gc_num_tcp_per_sa = le32_to_cpu(gc_info->v2_1.gc_num_tcp_per_sh);
1595 			adev->gfx.config.gc_tcp_size_per_cu = le32_to_cpu(gc_info->v2_1.gc_tcp_size_per_cu);
1596 			adev->gfx.config.gc_num_sdp_interface = le32_to_cpu(gc_info->v2_1.gc_num_sdp_interface); /* per XCD */
1597 			adev->gfx.config.gc_num_cu_per_sqc = le32_to_cpu(gc_info->v2_1.gc_num_cu_per_sqc);
1598 			adev->gfx.config.gc_l1_instruction_cache_size_per_sqc = le32_to_cpu(gc_info->v2_1.gc_instruction_cache_size_per_sqc);
1599 			adev->gfx.config.gc_l1_data_cache_size_per_sqc = le32_to_cpu(gc_info->v2_1.gc_scalar_data_cache_size_per_sqc);
1600 			adev->gfx.config.gc_tcc_size = le32_to_cpu(gc_info->v2_1.gc_tcc_size); /* per XCD */
1601 		}
1602 		break;
1603 	default:
1604 		dev_err(adev->dev,
1605 			"Unhandled GC info table %d.%d\n",
1606 			le16_to_cpu(gc_info->v1.header.version_major),
1607 			le16_to_cpu(gc_info->v1.header.version_minor));
1608 		return -EINVAL;
1609 	}
1610 	return 0;
1611 }
1612 
1613 union mall_info {
1614 	struct mall_info_v1_0 v1;
1615 	struct mall_info_v2_0 v2;
1616 };
1617 
amdgpu_discovery_get_mall_info(struct amdgpu_device * adev)1618 static int amdgpu_discovery_get_mall_info(struct amdgpu_device *adev)
1619 {
1620 	struct binary_header *bhdr;
1621 	union mall_info *mall_info;
1622 	u32 u, mall_size_per_umc, m_s_present, half_use;
1623 	u64 mall_size;
1624 	u16 offset;
1625 
1626 	if (!adev->mman.discovery_bin) {
1627 		DRM_ERROR("ip discovery uninitialized\n");
1628 		return -EINVAL;
1629 	}
1630 
1631 	bhdr = (struct binary_header *)adev->mman.discovery_bin;
1632 	offset = le16_to_cpu(bhdr->table_list[MALL_INFO].offset);
1633 
1634 	if (!offset)
1635 		return 0;
1636 
1637 	mall_info = (union mall_info *)(adev->mman.discovery_bin + offset);
1638 
1639 	switch (le16_to_cpu(mall_info->v1.header.version_major)) {
1640 	case 1:
1641 		mall_size = 0;
1642 		mall_size_per_umc = le32_to_cpu(mall_info->v1.mall_size_per_m);
1643 		m_s_present = le32_to_cpu(mall_info->v1.m_s_present);
1644 		half_use = le32_to_cpu(mall_info->v1.m_half_use);
1645 		for (u = 0; u < adev->gmc.num_umc; u++) {
1646 			if (m_s_present & (1 << u))
1647 				mall_size += mall_size_per_umc * 2;
1648 			else if (half_use & (1 << u))
1649 				mall_size += mall_size_per_umc / 2;
1650 			else
1651 				mall_size += mall_size_per_umc;
1652 		}
1653 		adev->gmc.mall_size = mall_size;
1654 		adev->gmc.m_half_use = half_use;
1655 		break;
1656 	case 2:
1657 		mall_size_per_umc = le32_to_cpu(mall_info->v2.mall_size_per_umc);
1658 		adev->gmc.mall_size = (uint64_t)mall_size_per_umc * adev->gmc.num_umc;
1659 		break;
1660 	default:
1661 		dev_err(adev->dev,
1662 			"Unhandled MALL info table %d.%d\n",
1663 			le16_to_cpu(mall_info->v1.header.version_major),
1664 			le16_to_cpu(mall_info->v1.header.version_minor));
1665 		return -EINVAL;
1666 	}
1667 	return 0;
1668 }
1669 
1670 union vcn_info {
1671 	struct vcn_info_v1_0 v1;
1672 };
1673 
amdgpu_discovery_get_vcn_info(struct amdgpu_device * adev)1674 static int amdgpu_discovery_get_vcn_info(struct amdgpu_device *adev)
1675 {
1676 	struct binary_header *bhdr;
1677 	union vcn_info *vcn_info;
1678 	u16 offset;
1679 	int v;
1680 
1681 	if (!adev->mman.discovery_bin) {
1682 		DRM_ERROR("ip discovery uninitialized\n");
1683 		return -EINVAL;
1684 	}
1685 
1686 	/* num_vcn_inst is currently limited to AMDGPU_MAX_VCN_INSTANCES
1687 	 * which is smaller than VCN_INFO_TABLE_MAX_NUM_INSTANCES
1688 	 * but that may change in the future with new GPUs so keep this
1689 	 * check for defensive purposes.
1690 	 */
1691 	if (adev->vcn.num_vcn_inst > VCN_INFO_TABLE_MAX_NUM_INSTANCES) {
1692 		dev_err(adev->dev, "invalid vcn instances\n");
1693 		return -EINVAL;
1694 	}
1695 
1696 	bhdr = (struct binary_header *)adev->mman.discovery_bin;
1697 	offset = le16_to_cpu(bhdr->table_list[VCN_INFO].offset);
1698 
1699 	if (!offset)
1700 		return 0;
1701 
1702 	vcn_info = (union vcn_info *)(adev->mman.discovery_bin + offset);
1703 
1704 	switch (le16_to_cpu(vcn_info->v1.header.version_major)) {
1705 	case 1:
1706 		/* num_vcn_inst is currently limited to AMDGPU_MAX_VCN_INSTANCES
1707 		 * so this won't overflow.
1708 		 */
1709 		for (v = 0; v < adev->vcn.num_vcn_inst; v++) {
1710 			adev->vcn.inst[v].vcn_codec_disable_mask =
1711 				le32_to_cpu(vcn_info->v1.instance_info[v].fuse_data.all_bits);
1712 		}
1713 		break;
1714 	default:
1715 		dev_err(adev->dev,
1716 			"Unhandled VCN info table %d.%d\n",
1717 			le16_to_cpu(vcn_info->v1.header.version_major),
1718 			le16_to_cpu(vcn_info->v1.header.version_minor));
1719 		return -EINVAL;
1720 	}
1721 	return 0;
1722 }
1723 
1724 union nps_info {
1725 	struct nps_info_v1_0 v1;
1726 };
1727 
amdgpu_discovery_refresh_nps_info(struct amdgpu_device * adev,union nps_info * nps_data)1728 static int amdgpu_discovery_refresh_nps_info(struct amdgpu_device *adev,
1729 					     union nps_info *nps_data)
1730 {
1731 	uint64_t vram_size, pos, offset;
1732 	struct nps_info_header *nhdr;
1733 	struct binary_header bhdr;
1734 	uint16_t checksum;
1735 
1736 	vram_size = (uint64_t)RREG32(mmRCC_CONFIG_MEMSIZE) << 20;
1737 	pos = vram_size - DISCOVERY_TMR_OFFSET;
1738 	amdgpu_device_vram_access(adev, pos, &bhdr, sizeof(bhdr), false);
1739 
1740 	offset = le16_to_cpu(bhdr.table_list[NPS_INFO].offset);
1741 	checksum = le16_to_cpu(bhdr.table_list[NPS_INFO].checksum);
1742 
1743 	amdgpu_device_vram_access(adev, (pos + offset), nps_data,
1744 				  sizeof(*nps_data), false);
1745 
1746 	nhdr = (struct nps_info_header *)(nps_data);
1747 	if (!amdgpu_discovery_verify_checksum((uint8_t *)nps_data,
1748 					      le32_to_cpu(nhdr->size_bytes),
1749 					      checksum)) {
1750 		dev_err(adev->dev, "nps data refresh, checksum mismatch\n");
1751 		return -EINVAL;
1752 	}
1753 
1754 	return 0;
1755 }
1756 
amdgpu_discovery_get_nps_info(struct amdgpu_device * adev,uint32_t * nps_type,struct amdgpu_gmc_memrange ** ranges,int * range_cnt,bool refresh)1757 int amdgpu_discovery_get_nps_info(struct amdgpu_device *adev,
1758 				  uint32_t *nps_type,
1759 				  struct amdgpu_gmc_memrange **ranges,
1760 				  int *range_cnt, bool refresh)
1761 {
1762 	struct amdgpu_gmc_memrange *mem_ranges;
1763 	struct binary_header *bhdr;
1764 	union nps_info *nps_info;
1765 	union nps_info nps_data;
1766 	u16 offset;
1767 	int i, r;
1768 
1769 	if (!nps_type || !range_cnt || !ranges)
1770 		return -EINVAL;
1771 
1772 	if (refresh) {
1773 		r = amdgpu_discovery_refresh_nps_info(adev, &nps_data);
1774 		if (r)
1775 			return r;
1776 		nps_info = &nps_data;
1777 	} else {
1778 		if (!adev->mman.discovery_bin) {
1779 			dev_err(adev->dev,
1780 				"fetch mem range failed, ip discovery uninitialized\n");
1781 			return -EINVAL;
1782 		}
1783 
1784 		bhdr = (struct binary_header *)adev->mman.discovery_bin;
1785 		offset = le16_to_cpu(bhdr->table_list[NPS_INFO].offset);
1786 
1787 		if (!offset)
1788 			return -ENOENT;
1789 
1790 		/* If verification fails, return as if NPS table doesn't exist */
1791 		if (amdgpu_discovery_verify_npsinfo(adev, bhdr))
1792 			return -ENOENT;
1793 
1794 		nps_info =
1795 			(union nps_info *)(adev->mman.discovery_bin + offset);
1796 	}
1797 
1798 	switch (le16_to_cpu(nps_info->v1.header.version_major)) {
1799 	case 1:
1800 		mem_ranges = kvcalloc(nps_info->v1.count,
1801 				      sizeof(*mem_ranges),
1802 				      GFP_KERNEL);
1803 		if (!mem_ranges)
1804 			return -ENOMEM;
1805 		*nps_type = nps_info->v1.nps_type;
1806 		*range_cnt = nps_info->v1.count;
1807 		for (i = 0; i < *range_cnt; i++) {
1808 			mem_ranges[i].base_address =
1809 				nps_info->v1.instance_info[i].base_address;
1810 			mem_ranges[i].limit_address =
1811 				nps_info->v1.instance_info[i].limit_address;
1812 			mem_ranges[i].nid_mask = -1;
1813 			mem_ranges[i].flags = 0;
1814 		}
1815 		*ranges = mem_ranges;
1816 		break;
1817 	default:
1818 		dev_err(adev->dev, "Unhandled NPS info table %d.%d\n",
1819 			le16_to_cpu(nps_info->v1.header.version_major),
1820 			le16_to_cpu(nps_info->v1.header.version_minor));
1821 		return -EINVAL;
1822 	}
1823 
1824 	return 0;
1825 }
1826 
amdgpu_discovery_set_common_ip_blocks(struct amdgpu_device * adev)1827 static int amdgpu_discovery_set_common_ip_blocks(struct amdgpu_device *adev)
1828 {
1829 	/* what IP to use for this? */
1830 	switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
1831 	case IP_VERSION(9, 0, 1):
1832 	case IP_VERSION(9, 1, 0):
1833 	case IP_VERSION(9, 2, 1):
1834 	case IP_VERSION(9, 2, 2):
1835 	case IP_VERSION(9, 3, 0):
1836 	case IP_VERSION(9, 4, 0):
1837 	case IP_VERSION(9, 4, 1):
1838 	case IP_VERSION(9, 4, 2):
1839 	case IP_VERSION(9, 4, 3):
1840 	case IP_VERSION(9, 4, 4):
1841 	case IP_VERSION(9, 5, 0):
1842 		amdgpu_device_ip_block_add(adev, &vega10_common_ip_block);
1843 		break;
1844 	case IP_VERSION(10, 1, 10):
1845 	case IP_VERSION(10, 1, 1):
1846 	case IP_VERSION(10, 1, 2):
1847 	case IP_VERSION(10, 1, 3):
1848 	case IP_VERSION(10, 1, 4):
1849 	case IP_VERSION(10, 3, 0):
1850 	case IP_VERSION(10, 3, 1):
1851 	case IP_VERSION(10, 3, 2):
1852 	case IP_VERSION(10, 3, 3):
1853 	case IP_VERSION(10, 3, 4):
1854 	case IP_VERSION(10, 3, 5):
1855 	case IP_VERSION(10, 3, 6):
1856 	case IP_VERSION(10, 3, 7):
1857 		amdgpu_device_ip_block_add(adev, &nv_common_ip_block);
1858 		break;
1859 	case IP_VERSION(11, 0, 0):
1860 	case IP_VERSION(11, 0, 1):
1861 	case IP_VERSION(11, 0, 2):
1862 	case IP_VERSION(11, 0, 3):
1863 	case IP_VERSION(11, 0, 4):
1864 	case IP_VERSION(11, 5, 0):
1865 	case IP_VERSION(11, 5, 1):
1866 	case IP_VERSION(11, 5, 2):
1867 		amdgpu_device_ip_block_add(adev, &soc21_common_ip_block);
1868 		break;
1869 	case IP_VERSION(12, 0, 0):
1870 	case IP_VERSION(12, 0, 1):
1871 		amdgpu_device_ip_block_add(adev, &soc24_common_ip_block);
1872 		break;
1873 	default:
1874 		dev_err(adev->dev,
1875 			"Failed to add common ip block(GC_HWIP:0x%x)\n",
1876 			amdgpu_ip_version(adev, GC_HWIP, 0));
1877 		return -EINVAL;
1878 	}
1879 	return 0;
1880 }
1881 
amdgpu_discovery_set_gmc_ip_blocks(struct amdgpu_device * adev)1882 static int amdgpu_discovery_set_gmc_ip_blocks(struct amdgpu_device *adev)
1883 {
1884 	/* use GC or MMHUB IP version */
1885 	switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
1886 	case IP_VERSION(9, 0, 1):
1887 	case IP_VERSION(9, 1, 0):
1888 	case IP_VERSION(9, 2, 1):
1889 	case IP_VERSION(9, 2, 2):
1890 	case IP_VERSION(9, 3, 0):
1891 	case IP_VERSION(9, 4, 0):
1892 	case IP_VERSION(9, 4, 1):
1893 	case IP_VERSION(9, 4, 2):
1894 	case IP_VERSION(9, 4, 3):
1895 	case IP_VERSION(9, 4, 4):
1896 	case IP_VERSION(9, 5, 0):
1897 		amdgpu_device_ip_block_add(adev, &gmc_v9_0_ip_block);
1898 		break;
1899 	case IP_VERSION(10, 1, 10):
1900 	case IP_VERSION(10, 1, 1):
1901 	case IP_VERSION(10, 1, 2):
1902 	case IP_VERSION(10, 1, 3):
1903 	case IP_VERSION(10, 1, 4):
1904 	case IP_VERSION(10, 3, 0):
1905 	case IP_VERSION(10, 3, 1):
1906 	case IP_VERSION(10, 3, 2):
1907 	case IP_VERSION(10, 3, 3):
1908 	case IP_VERSION(10, 3, 4):
1909 	case IP_VERSION(10, 3, 5):
1910 	case IP_VERSION(10, 3, 6):
1911 	case IP_VERSION(10, 3, 7):
1912 		amdgpu_device_ip_block_add(adev, &gmc_v10_0_ip_block);
1913 		break;
1914 	case IP_VERSION(11, 0, 0):
1915 	case IP_VERSION(11, 0, 1):
1916 	case IP_VERSION(11, 0, 2):
1917 	case IP_VERSION(11, 0, 3):
1918 	case IP_VERSION(11, 0, 4):
1919 	case IP_VERSION(11, 5, 0):
1920 	case IP_VERSION(11, 5, 1):
1921 	case IP_VERSION(11, 5, 2):
1922 		amdgpu_device_ip_block_add(adev, &gmc_v11_0_ip_block);
1923 		break;
1924 	case IP_VERSION(12, 0, 0):
1925 	case IP_VERSION(12, 0, 1):
1926 		amdgpu_device_ip_block_add(adev, &gmc_v12_0_ip_block);
1927 		break;
1928 	default:
1929 		dev_err(adev->dev, "Failed to add gmc ip block(GC_HWIP:0x%x)\n",
1930 			amdgpu_ip_version(adev, GC_HWIP, 0));
1931 		return -EINVAL;
1932 	}
1933 	return 0;
1934 }
1935 
amdgpu_discovery_set_ih_ip_blocks(struct amdgpu_device * adev)1936 static int amdgpu_discovery_set_ih_ip_blocks(struct amdgpu_device *adev)
1937 {
1938 	switch (amdgpu_ip_version(adev, OSSSYS_HWIP, 0)) {
1939 	case IP_VERSION(4, 0, 0):
1940 	case IP_VERSION(4, 0, 1):
1941 	case IP_VERSION(4, 1, 0):
1942 	case IP_VERSION(4, 1, 1):
1943 	case IP_VERSION(4, 3, 0):
1944 		amdgpu_device_ip_block_add(adev, &vega10_ih_ip_block);
1945 		break;
1946 	case IP_VERSION(4, 2, 0):
1947 	case IP_VERSION(4, 2, 1):
1948 	case IP_VERSION(4, 4, 0):
1949 	case IP_VERSION(4, 4, 2):
1950 	case IP_VERSION(4, 4, 5):
1951 		amdgpu_device_ip_block_add(adev, &vega20_ih_ip_block);
1952 		break;
1953 	case IP_VERSION(5, 0, 0):
1954 	case IP_VERSION(5, 0, 1):
1955 	case IP_VERSION(5, 0, 2):
1956 	case IP_VERSION(5, 0, 3):
1957 	case IP_VERSION(5, 2, 0):
1958 	case IP_VERSION(5, 2, 1):
1959 		amdgpu_device_ip_block_add(adev, &navi10_ih_ip_block);
1960 		break;
1961 	case IP_VERSION(6, 0, 0):
1962 	case IP_VERSION(6, 0, 1):
1963 	case IP_VERSION(6, 0, 2):
1964 		amdgpu_device_ip_block_add(adev, &ih_v6_0_ip_block);
1965 		break;
1966 	case IP_VERSION(6, 1, 0):
1967 		amdgpu_device_ip_block_add(adev, &ih_v6_1_ip_block);
1968 		break;
1969 	case IP_VERSION(7, 0, 0):
1970 		amdgpu_device_ip_block_add(adev, &ih_v7_0_ip_block);
1971 		break;
1972 	default:
1973 		dev_err(adev->dev,
1974 			"Failed to add ih ip block(OSSSYS_HWIP:0x%x)\n",
1975 			amdgpu_ip_version(adev, OSSSYS_HWIP, 0));
1976 		return -EINVAL;
1977 	}
1978 	return 0;
1979 }
1980 
amdgpu_discovery_set_psp_ip_blocks(struct amdgpu_device * adev)1981 static int amdgpu_discovery_set_psp_ip_blocks(struct amdgpu_device *adev)
1982 {
1983 	switch (amdgpu_ip_version(adev, MP0_HWIP, 0)) {
1984 	case IP_VERSION(9, 0, 0):
1985 		amdgpu_device_ip_block_add(adev, &psp_v3_1_ip_block);
1986 		break;
1987 	case IP_VERSION(10, 0, 0):
1988 	case IP_VERSION(10, 0, 1):
1989 		amdgpu_device_ip_block_add(adev, &psp_v10_0_ip_block);
1990 		break;
1991 	case IP_VERSION(11, 0, 0):
1992 	case IP_VERSION(11, 0, 2):
1993 	case IP_VERSION(11, 0, 4):
1994 	case IP_VERSION(11, 0, 5):
1995 	case IP_VERSION(11, 0, 9):
1996 	case IP_VERSION(11, 0, 7):
1997 	case IP_VERSION(11, 0, 11):
1998 	case IP_VERSION(11, 0, 12):
1999 	case IP_VERSION(11, 0, 13):
2000 	case IP_VERSION(11, 5, 0):
2001 		amdgpu_device_ip_block_add(adev, &psp_v11_0_ip_block);
2002 		break;
2003 	case IP_VERSION(11, 0, 8):
2004 		amdgpu_device_ip_block_add(adev, &psp_v11_0_8_ip_block);
2005 		break;
2006 	case IP_VERSION(11, 0, 3):
2007 	case IP_VERSION(12, 0, 1):
2008 		amdgpu_device_ip_block_add(adev, &psp_v12_0_ip_block);
2009 		break;
2010 	case IP_VERSION(13, 0, 0):
2011 	case IP_VERSION(13, 0, 1):
2012 	case IP_VERSION(13, 0, 2):
2013 	case IP_VERSION(13, 0, 3):
2014 	case IP_VERSION(13, 0, 5):
2015 	case IP_VERSION(13, 0, 6):
2016 	case IP_VERSION(13, 0, 7):
2017 	case IP_VERSION(13, 0, 8):
2018 	case IP_VERSION(13, 0, 10):
2019 	case IP_VERSION(13, 0, 11):
2020 	case IP_VERSION(13, 0, 12):
2021 	case IP_VERSION(13, 0, 14):
2022 	case IP_VERSION(14, 0, 0):
2023 	case IP_VERSION(14, 0, 1):
2024 	case IP_VERSION(14, 0, 4):
2025 		amdgpu_device_ip_block_add(adev, &psp_v13_0_ip_block);
2026 		break;
2027 	case IP_VERSION(13, 0, 4):
2028 		amdgpu_device_ip_block_add(adev, &psp_v13_0_4_ip_block);
2029 		break;
2030 	case IP_VERSION(14, 0, 2):
2031 	case IP_VERSION(14, 0, 3):
2032 		amdgpu_device_ip_block_add(adev, &psp_v14_0_ip_block);
2033 		break;
2034 	default:
2035 		dev_err(adev->dev,
2036 			"Failed to add psp ip block(MP0_HWIP:0x%x)\n",
2037 			amdgpu_ip_version(adev, MP0_HWIP, 0));
2038 		return -EINVAL;
2039 	}
2040 	return 0;
2041 }
2042 
amdgpu_discovery_set_smu_ip_blocks(struct amdgpu_device * adev)2043 static int amdgpu_discovery_set_smu_ip_blocks(struct amdgpu_device *adev)
2044 {
2045 	switch (amdgpu_ip_version(adev, MP1_HWIP, 0)) {
2046 	case IP_VERSION(9, 0, 0):
2047 	case IP_VERSION(10, 0, 0):
2048 	case IP_VERSION(10, 0, 1):
2049 	case IP_VERSION(11, 0, 2):
2050 		if (adev->asic_type == CHIP_ARCTURUS)
2051 			amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block);
2052 		else
2053 			amdgpu_device_ip_block_add(adev, &pp_smu_ip_block);
2054 		break;
2055 	case IP_VERSION(11, 0, 0):
2056 	case IP_VERSION(11, 0, 5):
2057 	case IP_VERSION(11, 0, 9):
2058 	case IP_VERSION(11, 0, 7):
2059 	case IP_VERSION(11, 0, 8):
2060 	case IP_VERSION(11, 0, 11):
2061 	case IP_VERSION(11, 0, 12):
2062 	case IP_VERSION(11, 0, 13):
2063 	case IP_VERSION(11, 5, 0):
2064 		amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block);
2065 		break;
2066 	case IP_VERSION(12, 0, 0):
2067 	case IP_VERSION(12, 0, 1):
2068 		amdgpu_device_ip_block_add(adev, &smu_v12_0_ip_block);
2069 		break;
2070 	case IP_VERSION(13, 0, 0):
2071 	case IP_VERSION(13, 0, 1):
2072 	case IP_VERSION(13, 0, 2):
2073 	case IP_VERSION(13, 0, 3):
2074 	case IP_VERSION(13, 0, 4):
2075 	case IP_VERSION(13, 0, 5):
2076 	case IP_VERSION(13, 0, 6):
2077 	case IP_VERSION(13, 0, 7):
2078 	case IP_VERSION(13, 0, 8):
2079 	case IP_VERSION(13, 0, 10):
2080 	case IP_VERSION(13, 0, 11):
2081 	case IP_VERSION(13, 0, 14):
2082 		amdgpu_device_ip_block_add(adev, &smu_v13_0_ip_block);
2083 		break;
2084 	case IP_VERSION(14, 0, 0):
2085 	case IP_VERSION(14, 0, 1):
2086 	case IP_VERSION(14, 0, 2):
2087 	case IP_VERSION(14, 0, 3):
2088 	case IP_VERSION(14, 0, 4):
2089 		amdgpu_device_ip_block_add(adev, &smu_v14_0_ip_block);
2090 		break;
2091 	default:
2092 		dev_err(adev->dev,
2093 			"Failed to add smu ip block(MP1_HWIP:0x%x)\n",
2094 			amdgpu_ip_version(adev, MP1_HWIP, 0));
2095 		return -EINVAL;
2096 	}
2097 	return 0;
2098 }
2099 
2100 #if defined(CONFIG_DRM_AMD_DC)
amdgpu_discovery_set_sriov_display(struct amdgpu_device * adev)2101 static void amdgpu_discovery_set_sriov_display(struct amdgpu_device *adev)
2102 {
2103 	amdgpu_device_set_sriov_virtual_display(adev);
2104 	amdgpu_device_ip_block_add(adev, &amdgpu_vkms_ip_block);
2105 }
2106 #endif
2107 
amdgpu_discovery_set_display_ip_blocks(struct amdgpu_device * adev)2108 static int amdgpu_discovery_set_display_ip_blocks(struct amdgpu_device *adev)
2109 {
2110 	if (adev->enable_virtual_display) {
2111 		amdgpu_device_ip_block_add(adev, &amdgpu_vkms_ip_block);
2112 		return 0;
2113 	}
2114 
2115 	if (!amdgpu_device_has_dc_support(adev))
2116 		return 0;
2117 
2118 #if defined(CONFIG_DRM_AMD_DC)
2119 	if (amdgpu_ip_version(adev, DCE_HWIP, 0)) {
2120 		switch (amdgpu_ip_version(adev, DCE_HWIP, 0)) {
2121 		case IP_VERSION(1, 0, 0):
2122 		case IP_VERSION(1, 0, 1):
2123 		case IP_VERSION(2, 0, 2):
2124 		case IP_VERSION(2, 0, 0):
2125 		case IP_VERSION(2, 0, 3):
2126 		case IP_VERSION(2, 1, 0):
2127 		case IP_VERSION(3, 0, 0):
2128 		case IP_VERSION(3, 0, 2):
2129 		case IP_VERSION(3, 0, 3):
2130 		case IP_VERSION(3, 0, 1):
2131 		case IP_VERSION(3, 1, 2):
2132 		case IP_VERSION(3, 1, 3):
2133 		case IP_VERSION(3, 1, 4):
2134 		case IP_VERSION(3, 1, 5):
2135 		case IP_VERSION(3, 1, 6):
2136 		case IP_VERSION(3, 2, 0):
2137 		case IP_VERSION(3, 2, 1):
2138 		case IP_VERSION(3, 5, 0):
2139 		case IP_VERSION(3, 5, 1):
2140 		case IP_VERSION(4, 1, 0):
2141 			/* TODO: Fix IP version. DC code expects version 4.0.1 */
2142 			if (adev->ip_versions[DCE_HWIP][0] == IP_VERSION(4, 1, 0))
2143 				adev->ip_versions[DCE_HWIP][0] = IP_VERSION(4, 0, 1);
2144 
2145 			if (amdgpu_sriov_vf(adev))
2146 				amdgpu_discovery_set_sriov_display(adev);
2147 			else
2148 				amdgpu_device_ip_block_add(adev, &dm_ip_block);
2149 			break;
2150 		default:
2151 			dev_err(adev->dev,
2152 				"Failed to add dm ip block(DCE_HWIP:0x%x)\n",
2153 				amdgpu_ip_version(adev, DCE_HWIP, 0));
2154 			return -EINVAL;
2155 		}
2156 	} else if (amdgpu_ip_version(adev, DCI_HWIP, 0)) {
2157 		switch (amdgpu_ip_version(adev, DCI_HWIP, 0)) {
2158 		case IP_VERSION(12, 0, 0):
2159 		case IP_VERSION(12, 0, 1):
2160 		case IP_VERSION(12, 1, 0):
2161 			if (amdgpu_sriov_vf(adev))
2162 				amdgpu_discovery_set_sriov_display(adev);
2163 			else
2164 				amdgpu_device_ip_block_add(adev, &dm_ip_block);
2165 			break;
2166 		default:
2167 			dev_err(adev->dev,
2168 				"Failed to add dm ip block(DCI_HWIP:0x%x)\n",
2169 				amdgpu_ip_version(adev, DCI_HWIP, 0));
2170 			return -EINVAL;
2171 		}
2172 	}
2173 #endif
2174 	return 0;
2175 }
2176 
amdgpu_discovery_set_gc_ip_blocks(struct amdgpu_device * adev)2177 static int amdgpu_discovery_set_gc_ip_blocks(struct amdgpu_device *adev)
2178 {
2179 	switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
2180 	case IP_VERSION(9, 0, 1):
2181 	case IP_VERSION(9, 1, 0):
2182 	case IP_VERSION(9, 2, 1):
2183 	case IP_VERSION(9, 2, 2):
2184 	case IP_VERSION(9, 3, 0):
2185 	case IP_VERSION(9, 4, 0):
2186 	case IP_VERSION(9, 4, 1):
2187 	case IP_VERSION(9, 4, 2):
2188 		amdgpu_device_ip_block_add(adev, &gfx_v9_0_ip_block);
2189 		break;
2190 	case IP_VERSION(9, 4, 3):
2191 	case IP_VERSION(9, 4, 4):
2192 	case IP_VERSION(9, 5, 0):
2193 		amdgpu_device_ip_block_add(adev, &gfx_v9_4_3_ip_block);
2194 		break;
2195 	case IP_VERSION(10, 1, 10):
2196 	case IP_VERSION(10, 1, 2):
2197 	case IP_VERSION(10, 1, 1):
2198 	case IP_VERSION(10, 1, 3):
2199 	case IP_VERSION(10, 1, 4):
2200 	case IP_VERSION(10, 3, 0):
2201 	case IP_VERSION(10, 3, 2):
2202 	case IP_VERSION(10, 3, 1):
2203 	case IP_VERSION(10, 3, 4):
2204 	case IP_VERSION(10, 3, 5):
2205 	case IP_VERSION(10, 3, 6):
2206 	case IP_VERSION(10, 3, 3):
2207 	case IP_VERSION(10, 3, 7):
2208 		amdgpu_device_ip_block_add(adev, &gfx_v10_0_ip_block);
2209 		break;
2210 	case IP_VERSION(11, 0, 0):
2211 	case IP_VERSION(11, 0, 1):
2212 	case IP_VERSION(11, 0, 2):
2213 	case IP_VERSION(11, 0, 3):
2214 	case IP_VERSION(11, 0, 4):
2215 	case IP_VERSION(11, 5, 0):
2216 	case IP_VERSION(11, 5, 1):
2217 	case IP_VERSION(11, 5, 2):
2218 		amdgpu_device_ip_block_add(adev, &gfx_v11_0_ip_block);
2219 		break;
2220 	case IP_VERSION(12, 0, 0):
2221 	case IP_VERSION(12, 0, 1):
2222 		amdgpu_device_ip_block_add(adev, &gfx_v12_0_ip_block);
2223 		break;
2224 	default:
2225 		dev_err(adev->dev, "Failed to add gfx ip block(GC_HWIP:0x%x)\n",
2226 			amdgpu_ip_version(adev, GC_HWIP, 0));
2227 		return -EINVAL;
2228 	}
2229 	return 0;
2230 }
2231 
amdgpu_discovery_set_sdma_ip_blocks(struct amdgpu_device * adev)2232 static int amdgpu_discovery_set_sdma_ip_blocks(struct amdgpu_device *adev)
2233 {
2234 	switch (amdgpu_ip_version(adev, SDMA0_HWIP, 0)) {
2235 	case IP_VERSION(4, 0, 0):
2236 	case IP_VERSION(4, 0, 1):
2237 	case IP_VERSION(4, 1, 0):
2238 	case IP_VERSION(4, 1, 1):
2239 	case IP_VERSION(4, 1, 2):
2240 	case IP_VERSION(4, 2, 0):
2241 	case IP_VERSION(4, 2, 2):
2242 	case IP_VERSION(4, 4, 0):
2243 		amdgpu_device_ip_block_add(adev, &sdma_v4_0_ip_block);
2244 		break;
2245 	case IP_VERSION(4, 4, 2):
2246 	case IP_VERSION(4, 4, 5):
2247 	case IP_VERSION(4, 4, 4):
2248 		amdgpu_device_ip_block_add(adev, &sdma_v4_4_2_ip_block);
2249 		break;
2250 	case IP_VERSION(5, 0, 0):
2251 	case IP_VERSION(5, 0, 1):
2252 	case IP_VERSION(5, 0, 2):
2253 	case IP_VERSION(5, 0, 5):
2254 		amdgpu_device_ip_block_add(adev, &sdma_v5_0_ip_block);
2255 		break;
2256 	case IP_VERSION(5, 2, 0):
2257 	case IP_VERSION(5, 2, 2):
2258 	case IP_VERSION(5, 2, 4):
2259 	case IP_VERSION(5, 2, 5):
2260 	case IP_VERSION(5, 2, 6):
2261 	case IP_VERSION(5, 2, 3):
2262 	case IP_VERSION(5, 2, 1):
2263 	case IP_VERSION(5, 2, 7):
2264 		amdgpu_device_ip_block_add(adev, &sdma_v5_2_ip_block);
2265 		break;
2266 	case IP_VERSION(6, 0, 0):
2267 	case IP_VERSION(6, 0, 1):
2268 	case IP_VERSION(6, 0, 2):
2269 	case IP_VERSION(6, 0, 3):
2270 	case IP_VERSION(6, 1, 0):
2271 	case IP_VERSION(6, 1, 1):
2272 	case IP_VERSION(6, 1, 2):
2273 		amdgpu_device_ip_block_add(adev, &sdma_v6_0_ip_block);
2274 		break;
2275 	case IP_VERSION(7, 0, 0):
2276 	case IP_VERSION(7, 0, 1):
2277 		amdgpu_device_ip_block_add(adev, &sdma_v7_0_ip_block);
2278 		break;
2279 	default:
2280 		dev_err(adev->dev,
2281 			"Failed to add sdma ip block(SDMA0_HWIP:0x%x)\n",
2282 			amdgpu_ip_version(adev, SDMA0_HWIP, 0));
2283 		return -EINVAL;
2284 	}
2285 	return 0;
2286 }
2287 
amdgpu_discovery_set_mm_ip_blocks(struct amdgpu_device * adev)2288 static int amdgpu_discovery_set_mm_ip_blocks(struct amdgpu_device *adev)
2289 {
2290 	if (amdgpu_ip_version(adev, VCE_HWIP, 0)) {
2291 		switch (amdgpu_ip_version(adev, UVD_HWIP, 0)) {
2292 		case IP_VERSION(7, 0, 0):
2293 		case IP_VERSION(7, 2, 0):
2294 			/* UVD is not supported on vega20 SR-IOV */
2295 			if (!(adev->asic_type == CHIP_VEGA20 && amdgpu_sriov_vf(adev)))
2296 				amdgpu_device_ip_block_add(adev, &uvd_v7_0_ip_block);
2297 			break;
2298 		default:
2299 			dev_err(adev->dev,
2300 				"Failed to add uvd v7 ip block(UVD_HWIP:0x%x)\n",
2301 				amdgpu_ip_version(adev, UVD_HWIP, 0));
2302 			return -EINVAL;
2303 		}
2304 		switch (amdgpu_ip_version(adev, VCE_HWIP, 0)) {
2305 		case IP_VERSION(4, 0, 0):
2306 		case IP_VERSION(4, 1, 0):
2307 			/* VCE is not supported on vega20 SR-IOV */
2308 			if (!(adev->asic_type == CHIP_VEGA20 && amdgpu_sriov_vf(adev)))
2309 				amdgpu_device_ip_block_add(adev, &vce_v4_0_ip_block);
2310 			break;
2311 		default:
2312 			dev_err(adev->dev,
2313 				"Failed to add VCE v4 ip block(VCE_HWIP:0x%x)\n",
2314 				amdgpu_ip_version(adev, VCE_HWIP, 0));
2315 			return -EINVAL;
2316 		}
2317 	} else {
2318 		switch (amdgpu_ip_version(adev, UVD_HWIP, 0)) {
2319 		case IP_VERSION(1, 0, 0):
2320 		case IP_VERSION(1, 0, 1):
2321 			amdgpu_device_ip_block_add(adev, &vcn_v1_0_ip_block);
2322 			break;
2323 		case IP_VERSION(2, 0, 0):
2324 		case IP_VERSION(2, 0, 2):
2325 		case IP_VERSION(2, 2, 0):
2326 			amdgpu_device_ip_block_add(adev, &vcn_v2_0_ip_block);
2327 			if (!amdgpu_sriov_vf(adev))
2328 				amdgpu_device_ip_block_add(adev, &jpeg_v2_0_ip_block);
2329 			break;
2330 		case IP_VERSION(2, 0, 3):
2331 			break;
2332 		case IP_VERSION(2, 5, 0):
2333 			amdgpu_device_ip_block_add(adev, &vcn_v2_5_ip_block);
2334 			amdgpu_device_ip_block_add(adev, &jpeg_v2_5_ip_block);
2335 			break;
2336 		case IP_VERSION(2, 6, 0):
2337 			amdgpu_device_ip_block_add(adev, &vcn_v2_6_ip_block);
2338 			amdgpu_device_ip_block_add(adev, &jpeg_v2_6_ip_block);
2339 			break;
2340 		case IP_VERSION(3, 0, 0):
2341 		case IP_VERSION(3, 0, 16):
2342 		case IP_VERSION(3, 1, 1):
2343 		case IP_VERSION(3, 1, 2):
2344 		case IP_VERSION(3, 0, 2):
2345 			amdgpu_device_ip_block_add(adev, &vcn_v3_0_ip_block);
2346 			if (!amdgpu_sriov_vf(adev))
2347 				amdgpu_device_ip_block_add(adev, &jpeg_v3_0_ip_block);
2348 			break;
2349 		case IP_VERSION(3, 0, 33):
2350 			amdgpu_device_ip_block_add(adev, &vcn_v3_0_ip_block);
2351 			break;
2352 		case IP_VERSION(4, 0, 0):
2353 		case IP_VERSION(4, 0, 2):
2354 		case IP_VERSION(4, 0, 4):
2355 			amdgpu_device_ip_block_add(adev, &vcn_v4_0_ip_block);
2356 			amdgpu_device_ip_block_add(adev, &jpeg_v4_0_ip_block);
2357 			break;
2358 		case IP_VERSION(4, 0, 3):
2359 			amdgpu_device_ip_block_add(adev, &vcn_v4_0_3_ip_block);
2360 			amdgpu_device_ip_block_add(adev, &jpeg_v4_0_3_ip_block);
2361 			break;
2362 		case IP_VERSION(4, 0, 5):
2363 		case IP_VERSION(4, 0, 6):
2364 			amdgpu_device_ip_block_add(adev, &vcn_v4_0_5_ip_block);
2365 			amdgpu_device_ip_block_add(adev, &jpeg_v4_0_5_ip_block);
2366 			break;
2367 		case IP_VERSION(5, 0, 0):
2368 			amdgpu_device_ip_block_add(adev, &vcn_v5_0_0_ip_block);
2369 			amdgpu_device_ip_block_add(adev, &jpeg_v5_0_0_ip_block);
2370 			break;
2371 		case IP_VERSION(5, 0, 1):
2372 			amdgpu_device_ip_block_add(adev, &vcn_v5_0_1_ip_block);
2373 			amdgpu_device_ip_block_add(adev, &jpeg_v5_0_1_ip_block);
2374 			break;
2375 		default:
2376 			dev_err(adev->dev,
2377 				"Failed to add vcn/jpeg ip block(UVD_HWIP:0x%x)\n",
2378 				amdgpu_ip_version(adev, UVD_HWIP, 0));
2379 			return -EINVAL;
2380 		}
2381 	}
2382 	return 0;
2383 }
2384 
amdgpu_discovery_set_mes_ip_blocks(struct amdgpu_device * adev)2385 static int amdgpu_discovery_set_mes_ip_blocks(struct amdgpu_device *adev)
2386 {
2387 	switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
2388 	case IP_VERSION(11, 0, 0):
2389 	case IP_VERSION(11, 0, 1):
2390 	case IP_VERSION(11, 0, 2):
2391 	case IP_VERSION(11, 0, 3):
2392 	case IP_VERSION(11, 0, 4):
2393 	case IP_VERSION(11, 5, 0):
2394 	case IP_VERSION(11, 5, 1):
2395 	case IP_VERSION(11, 5, 2):
2396 		amdgpu_device_ip_block_add(adev, &mes_v11_0_ip_block);
2397 		adev->enable_mes = true;
2398 		adev->enable_mes_kiq = true;
2399 		break;
2400 	case IP_VERSION(12, 0, 0):
2401 	case IP_VERSION(12, 0, 1):
2402 		amdgpu_device_ip_block_add(adev, &mes_v12_0_ip_block);
2403 		adev->enable_mes = true;
2404 		adev->enable_mes_kiq = true;
2405 		if (amdgpu_uni_mes)
2406 			adev->enable_uni_mes = true;
2407 		break;
2408 	default:
2409 		break;
2410 	}
2411 	return 0;
2412 }
2413 
amdgpu_discovery_init_soc_config(struct amdgpu_device * adev)2414 static void amdgpu_discovery_init_soc_config(struct amdgpu_device *adev)
2415 {
2416 	switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
2417 	case IP_VERSION(9, 4, 3):
2418 	case IP_VERSION(9, 4, 4):
2419 	case IP_VERSION(9, 5, 0):
2420 		aqua_vanjaram_init_soc_config(adev);
2421 		break;
2422 	default:
2423 		break;
2424 	}
2425 }
2426 
amdgpu_discovery_set_vpe_ip_blocks(struct amdgpu_device * adev)2427 static int amdgpu_discovery_set_vpe_ip_blocks(struct amdgpu_device *adev)
2428 {
2429 	switch (amdgpu_ip_version(adev, VPE_HWIP, 0)) {
2430 	case IP_VERSION(6, 1, 0):
2431 	case IP_VERSION(6, 1, 1):
2432 	case IP_VERSION(6, 1, 3):
2433 		amdgpu_device_ip_block_add(adev, &vpe_v6_1_ip_block);
2434 		break;
2435 	default:
2436 		break;
2437 	}
2438 
2439 	return 0;
2440 }
2441 
amdgpu_discovery_set_umsch_mm_ip_blocks(struct amdgpu_device * adev)2442 static int amdgpu_discovery_set_umsch_mm_ip_blocks(struct amdgpu_device *adev)
2443 {
2444 	switch (amdgpu_ip_version(adev, VCN_HWIP, 0)) {
2445 	case IP_VERSION(4, 0, 5):
2446 	case IP_VERSION(4, 0, 6):
2447 		if (amdgpu_umsch_mm & 0x1) {
2448 			amdgpu_device_ip_block_add(adev, &umsch_mm_v4_0_ip_block);
2449 			adev->enable_umsch_mm = true;
2450 		}
2451 		break;
2452 	default:
2453 		break;
2454 	}
2455 
2456 	return 0;
2457 }
2458 
amdgpu_discovery_set_isp_ip_blocks(struct amdgpu_device * adev)2459 static int amdgpu_discovery_set_isp_ip_blocks(struct amdgpu_device *adev)
2460 {
2461 #if defined(CONFIG_DRM_AMD_ISP)
2462 	switch (amdgpu_ip_version(adev, ISP_HWIP, 0)) {
2463 	case IP_VERSION(4, 1, 0):
2464 		amdgpu_device_ip_block_add(adev, &isp_v4_1_0_ip_block);
2465 		break;
2466 	case IP_VERSION(4, 1, 1):
2467 		amdgpu_device_ip_block_add(adev, &isp_v4_1_1_ip_block);
2468 		break;
2469 	default:
2470 		break;
2471 	}
2472 #endif
2473 
2474 	return 0;
2475 }
2476 
amdgpu_discovery_set_ip_blocks(struct amdgpu_device * adev)2477 int amdgpu_discovery_set_ip_blocks(struct amdgpu_device *adev)
2478 {
2479 	int r;
2480 
2481 	switch (adev->asic_type) {
2482 	case CHIP_VEGA10:
2483 		vega10_reg_base_init(adev);
2484 		adev->sdma.num_instances = 2;
2485 		adev->gmc.num_umc = 4;
2486 		adev->ip_versions[MMHUB_HWIP][0] = IP_VERSION(9, 0, 0);
2487 		adev->ip_versions[ATHUB_HWIP][0] = IP_VERSION(9, 0, 0);
2488 		adev->ip_versions[OSSSYS_HWIP][0] = IP_VERSION(4, 0, 0);
2489 		adev->ip_versions[HDP_HWIP][0] = IP_VERSION(4, 0, 0);
2490 		adev->ip_versions[SDMA0_HWIP][0] = IP_VERSION(4, 0, 0);
2491 		adev->ip_versions[SDMA1_HWIP][0] = IP_VERSION(4, 0, 0);
2492 		adev->ip_versions[DF_HWIP][0] = IP_VERSION(2, 1, 0);
2493 		adev->ip_versions[NBIO_HWIP][0] = IP_VERSION(6, 1, 0);
2494 		adev->ip_versions[UMC_HWIP][0] = IP_VERSION(6, 0, 0);
2495 		adev->ip_versions[MP0_HWIP][0] = IP_VERSION(9, 0, 0);
2496 		adev->ip_versions[MP1_HWIP][0] = IP_VERSION(9, 0, 0);
2497 		adev->ip_versions[THM_HWIP][0] = IP_VERSION(9, 0, 0);
2498 		adev->ip_versions[SMUIO_HWIP][0] = IP_VERSION(9, 0, 0);
2499 		adev->ip_versions[GC_HWIP][0] = IP_VERSION(9, 0, 1);
2500 		adev->ip_versions[UVD_HWIP][0] = IP_VERSION(7, 0, 0);
2501 		adev->ip_versions[VCE_HWIP][0] = IP_VERSION(4, 0, 0);
2502 		adev->ip_versions[DCI_HWIP][0] = IP_VERSION(12, 0, 0);
2503 		break;
2504 	case CHIP_VEGA12:
2505 		vega10_reg_base_init(adev);
2506 		adev->sdma.num_instances = 2;
2507 		adev->gmc.num_umc = 4;
2508 		adev->ip_versions[MMHUB_HWIP][0] = IP_VERSION(9, 3, 0);
2509 		adev->ip_versions[ATHUB_HWIP][0] = IP_VERSION(9, 3, 0);
2510 		adev->ip_versions[OSSSYS_HWIP][0] = IP_VERSION(4, 0, 1);
2511 		adev->ip_versions[HDP_HWIP][0] = IP_VERSION(4, 0, 1);
2512 		adev->ip_versions[SDMA0_HWIP][0] = IP_VERSION(4, 0, 1);
2513 		adev->ip_versions[SDMA1_HWIP][0] = IP_VERSION(4, 0, 1);
2514 		adev->ip_versions[DF_HWIP][0] = IP_VERSION(2, 5, 0);
2515 		adev->ip_versions[NBIO_HWIP][0] = IP_VERSION(6, 2, 0);
2516 		adev->ip_versions[UMC_HWIP][0] = IP_VERSION(6, 1, 0);
2517 		adev->ip_versions[MP0_HWIP][0] = IP_VERSION(9, 0, 0);
2518 		adev->ip_versions[MP1_HWIP][0] = IP_VERSION(9, 0, 0);
2519 		adev->ip_versions[THM_HWIP][0] = IP_VERSION(9, 0, 0);
2520 		adev->ip_versions[SMUIO_HWIP][0] = IP_VERSION(9, 0, 1);
2521 		adev->ip_versions[GC_HWIP][0] = IP_VERSION(9, 2, 1);
2522 		adev->ip_versions[UVD_HWIP][0] = IP_VERSION(7, 0, 0);
2523 		adev->ip_versions[VCE_HWIP][0] = IP_VERSION(4, 0, 0);
2524 		adev->ip_versions[DCI_HWIP][0] = IP_VERSION(12, 0, 1);
2525 		break;
2526 	case CHIP_RAVEN:
2527 		vega10_reg_base_init(adev);
2528 		adev->sdma.num_instances = 1;
2529 		adev->vcn.num_vcn_inst = 1;
2530 		adev->gmc.num_umc = 2;
2531 		if (adev->apu_flags & AMD_APU_IS_RAVEN2) {
2532 			adev->ip_versions[MMHUB_HWIP][0] = IP_VERSION(9, 2, 0);
2533 			adev->ip_versions[ATHUB_HWIP][0] = IP_VERSION(9, 2, 0);
2534 			adev->ip_versions[OSSSYS_HWIP][0] = IP_VERSION(4, 1, 1);
2535 			adev->ip_versions[HDP_HWIP][0] = IP_VERSION(4, 1, 1);
2536 			adev->ip_versions[SDMA0_HWIP][0] = IP_VERSION(4, 1, 1);
2537 			adev->ip_versions[DF_HWIP][0] = IP_VERSION(2, 1, 1);
2538 			adev->ip_versions[NBIO_HWIP][0] = IP_VERSION(7, 0, 1);
2539 			adev->ip_versions[UMC_HWIP][0] = IP_VERSION(7, 5, 0);
2540 			adev->ip_versions[MP0_HWIP][0] = IP_VERSION(10, 0, 1);
2541 			adev->ip_versions[MP1_HWIP][0] = IP_VERSION(10, 0, 1);
2542 			adev->ip_versions[THM_HWIP][0] = IP_VERSION(10, 1, 0);
2543 			adev->ip_versions[SMUIO_HWIP][0] = IP_VERSION(10, 0, 1);
2544 			adev->ip_versions[GC_HWIP][0] = IP_VERSION(9, 2, 2);
2545 			adev->ip_versions[UVD_HWIP][0] = IP_VERSION(1, 0, 1);
2546 			adev->ip_versions[DCE_HWIP][0] = IP_VERSION(1, 0, 1);
2547 			adev->ip_versions[ISP_HWIP][0] = IP_VERSION(2, 0, 0);
2548 		} else {
2549 			adev->ip_versions[MMHUB_HWIP][0] = IP_VERSION(9, 1, 0);
2550 			adev->ip_versions[ATHUB_HWIP][0] = IP_VERSION(9, 1, 0);
2551 			adev->ip_versions[OSSSYS_HWIP][0] = IP_VERSION(4, 1, 0);
2552 			adev->ip_versions[HDP_HWIP][0] = IP_VERSION(4, 1, 0);
2553 			adev->ip_versions[SDMA0_HWIP][0] = IP_VERSION(4, 1, 0);
2554 			adev->ip_versions[DF_HWIP][0] = IP_VERSION(2, 1, 0);
2555 			adev->ip_versions[NBIO_HWIP][0] = IP_VERSION(7, 0, 0);
2556 			adev->ip_versions[UMC_HWIP][0] = IP_VERSION(7, 0, 0);
2557 			adev->ip_versions[MP0_HWIP][0] = IP_VERSION(10, 0, 0);
2558 			adev->ip_versions[MP1_HWIP][0] = IP_VERSION(10, 0, 0);
2559 			adev->ip_versions[THM_HWIP][0] = IP_VERSION(10, 0, 0);
2560 			adev->ip_versions[SMUIO_HWIP][0] = IP_VERSION(10, 0, 0);
2561 			adev->ip_versions[GC_HWIP][0] = IP_VERSION(9, 1, 0);
2562 			adev->ip_versions[UVD_HWIP][0] = IP_VERSION(1, 0, 0);
2563 			adev->ip_versions[DCE_HWIP][0] = IP_VERSION(1, 0, 0);
2564 			adev->ip_versions[ISP_HWIP][0] = IP_VERSION(2, 0, 0);
2565 		}
2566 		break;
2567 	case CHIP_VEGA20:
2568 		vega20_reg_base_init(adev);
2569 		adev->sdma.num_instances = 2;
2570 		adev->gmc.num_umc = 8;
2571 		adev->ip_versions[MMHUB_HWIP][0] = IP_VERSION(9, 4, 0);
2572 		adev->ip_versions[ATHUB_HWIP][0] = IP_VERSION(9, 4, 0);
2573 		adev->ip_versions[OSSSYS_HWIP][0] = IP_VERSION(4, 2, 0);
2574 		adev->ip_versions[HDP_HWIP][0] = IP_VERSION(4, 2, 0);
2575 		adev->ip_versions[SDMA0_HWIP][0] = IP_VERSION(4, 2, 0);
2576 		adev->ip_versions[SDMA1_HWIP][0] = IP_VERSION(4, 2, 0);
2577 		adev->ip_versions[DF_HWIP][0] = IP_VERSION(3, 6, 0);
2578 		adev->ip_versions[NBIO_HWIP][0] = IP_VERSION(7, 4, 0);
2579 		adev->ip_versions[UMC_HWIP][0] = IP_VERSION(6, 1, 1);
2580 		adev->ip_versions[MP0_HWIP][0] = IP_VERSION(11, 0, 2);
2581 		adev->ip_versions[MP1_HWIP][0] = IP_VERSION(11, 0, 2);
2582 		adev->ip_versions[THM_HWIP][0] = IP_VERSION(11, 0, 2);
2583 		adev->ip_versions[SMUIO_HWIP][0] = IP_VERSION(11, 0, 2);
2584 		adev->ip_versions[GC_HWIP][0] = IP_VERSION(9, 4, 0);
2585 		adev->ip_versions[UVD_HWIP][0] = IP_VERSION(7, 2, 0);
2586 		adev->ip_versions[UVD_HWIP][1] = IP_VERSION(7, 2, 0);
2587 		adev->ip_versions[VCE_HWIP][0] = IP_VERSION(4, 1, 0);
2588 		adev->ip_versions[DCI_HWIP][0] = IP_VERSION(12, 1, 0);
2589 		break;
2590 	case CHIP_ARCTURUS:
2591 		arct_reg_base_init(adev);
2592 		adev->sdma.num_instances = 8;
2593 		adev->vcn.num_vcn_inst = 2;
2594 		adev->gmc.num_umc = 8;
2595 		adev->ip_versions[MMHUB_HWIP][0] = IP_VERSION(9, 4, 1);
2596 		adev->ip_versions[ATHUB_HWIP][0] = IP_VERSION(9, 4, 1);
2597 		adev->ip_versions[OSSSYS_HWIP][0] = IP_VERSION(4, 2, 1);
2598 		adev->ip_versions[HDP_HWIP][0] = IP_VERSION(4, 2, 1);
2599 		adev->ip_versions[SDMA0_HWIP][0] = IP_VERSION(4, 2, 2);
2600 		adev->ip_versions[SDMA1_HWIP][0] = IP_VERSION(4, 2, 2);
2601 		adev->ip_versions[SDMA1_HWIP][1] = IP_VERSION(4, 2, 2);
2602 		adev->ip_versions[SDMA1_HWIP][2] = IP_VERSION(4, 2, 2);
2603 		adev->ip_versions[SDMA1_HWIP][3] = IP_VERSION(4, 2, 2);
2604 		adev->ip_versions[SDMA1_HWIP][4] = IP_VERSION(4, 2, 2);
2605 		adev->ip_versions[SDMA1_HWIP][5] = IP_VERSION(4, 2, 2);
2606 		adev->ip_versions[SDMA1_HWIP][6] = IP_VERSION(4, 2, 2);
2607 		adev->ip_versions[DF_HWIP][0] = IP_VERSION(3, 6, 1);
2608 		adev->ip_versions[NBIO_HWIP][0] = IP_VERSION(7, 4, 1);
2609 		adev->ip_versions[UMC_HWIP][0] = IP_VERSION(6, 1, 2);
2610 		adev->ip_versions[MP0_HWIP][0] = IP_VERSION(11, 0, 4);
2611 		adev->ip_versions[MP1_HWIP][0] = IP_VERSION(11, 0, 2);
2612 		adev->ip_versions[THM_HWIP][0] = IP_VERSION(11, 0, 3);
2613 		adev->ip_versions[SMUIO_HWIP][0] = IP_VERSION(11, 0, 3);
2614 		adev->ip_versions[GC_HWIP][0] = IP_VERSION(9, 4, 1);
2615 		adev->ip_versions[UVD_HWIP][0] = IP_VERSION(2, 5, 0);
2616 		adev->ip_versions[UVD_HWIP][1] = IP_VERSION(2, 5, 0);
2617 		break;
2618 	case CHIP_ALDEBARAN:
2619 		aldebaran_reg_base_init(adev);
2620 		adev->sdma.num_instances = 5;
2621 		adev->vcn.num_vcn_inst = 2;
2622 		adev->gmc.num_umc = 4;
2623 		adev->ip_versions[MMHUB_HWIP][0] = IP_VERSION(9, 4, 2);
2624 		adev->ip_versions[ATHUB_HWIP][0] = IP_VERSION(9, 4, 2);
2625 		adev->ip_versions[OSSSYS_HWIP][0] = IP_VERSION(4, 4, 0);
2626 		adev->ip_versions[HDP_HWIP][0] = IP_VERSION(4, 4, 0);
2627 		adev->ip_versions[SDMA0_HWIP][0] = IP_VERSION(4, 4, 0);
2628 		adev->ip_versions[SDMA0_HWIP][1] = IP_VERSION(4, 4, 0);
2629 		adev->ip_versions[SDMA0_HWIP][2] = IP_VERSION(4, 4, 0);
2630 		adev->ip_versions[SDMA0_HWIP][3] = IP_VERSION(4, 4, 0);
2631 		adev->ip_versions[SDMA0_HWIP][4] = IP_VERSION(4, 4, 0);
2632 		adev->ip_versions[DF_HWIP][0] = IP_VERSION(3, 6, 2);
2633 		adev->ip_versions[NBIO_HWIP][0] = IP_VERSION(7, 4, 4);
2634 		adev->ip_versions[UMC_HWIP][0] = IP_VERSION(6, 7, 0);
2635 		adev->ip_versions[MP0_HWIP][0] = IP_VERSION(13, 0, 2);
2636 		adev->ip_versions[MP1_HWIP][0] = IP_VERSION(13, 0, 2);
2637 		adev->ip_versions[THM_HWIP][0] = IP_VERSION(13, 0, 2);
2638 		adev->ip_versions[SMUIO_HWIP][0] = IP_VERSION(13, 0, 2);
2639 		adev->ip_versions[GC_HWIP][0] = IP_VERSION(9, 4, 2);
2640 		adev->ip_versions[UVD_HWIP][0] = IP_VERSION(2, 6, 0);
2641 		adev->ip_versions[UVD_HWIP][1] = IP_VERSION(2, 6, 0);
2642 		adev->ip_versions[XGMI_HWIP][0] = IP_VERSION(6, 1, 0);
2643 		break;
2644 	default:
2645 		r = amdgpu_discovery_reg_base_init(adev);
2646 		if (r)
2647 			return -EINVAL;
2648 
2649 		amdgpu_discovery_harvest_ip(adev);
2650 		amdgpu_discovery_get_gfx_info(adev);
2651 		amdgpu_discovery_get_mall_info(adev);
2652 		amdgpu_discovery_get_vcn_info(adev);
2653 		break;
2654 	}
2655 
2656 	amdgpu_discovery_init_soc_config(adev);
2657 	amdgpu_discovery_sysfs_init(adev);
2658 
2659 	switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
2660 	case IP_VERSION(9, 0, 1):
2661 	case IP_VERSION(9, 2, 1):
2662 	case IP_VERSION(9, 4, 0):
2663 	case IP_VERSION(9, 4, 1):
2664 	case IP_VERSION(9, 4, 2):
2665 	case IP_VERSION(9, 4, 3):
2666 	case IP_VERSION(9, 4, 4):
2667 	case IP_VERSION(9, 5, 0):
2668 		adev->family = AMDGPU_FAMILY_AI;
2669 		break;
2670 	case IP_VERSION(9, 1, 0):
2671 	case IP_VERSION(9, 2, 2):
2672 	case IP_VERSION(9, 3, 0):
2673 		adev->family = AMDGPU_FAMILY_RV;
2674 		break;
2675 	case IP_VERSION(10, 1, 10):
2676 	case IP_VERSION(10, 1, 1):
2677 	case IP_VERSION(10, 1, 2):
2678 	case IP_VERSION(10, 1, 3):
2679 	case IP_VERSION(10, 1, 4):
2680 	case IP_VERSION(10, 3, 0):
2681 	case IP_VERSION(10, 3, 2):
2682 	case IP_VERSION(10, 3, 4):
2683 	case IP_VERSION(10, 3, 5):
2684 		adev->family = AMDGPU_FAMILY_NV;
2685 		break;
2686 	case IP_VERSION(10, 3, 1):
2687 		adev->family = AMDGPU_FAMILY_VGH;
2688 		adev->apu_flags |= AMD_APU_IS_VANGOGH;
2689 		break;
2690 	case IP_VERSION(10, 3, 3):
2691 		adev->family = AMDGPU_FAMILY_YC;
2692 		break;
2693 	case IP_VERSION(10, 3, 6):
2694 		adev->family = AMDGPU_FAMILY_GC_10_3_6;
2695 		break;
2696 	case IP_VERSION(10, 3, 7):
2697 		adev->family = AMDGPU_FAMILY_GC_10_3_7;
2698 		break;
2699 	case IP_VERSION(11, 0, 0):
2700 	case IP_VERSION(11, 0, 2):
2701 	case IP_VERSION(11, 0, 3):
2702 		adev->family = AMDGPU_FAMILY_GC_11_0_0;
2703 		break;
2704 	case IP_VERSION(11, 0, 1):
2705 	case IP_VERSION(11, 0, 4):
2706 		adev->family = AMDGPU_FAMILY_GC_11_0_1;
2707 		break;
2708 	case IP_VERSION(11, 5, 0):
2709 	case IP_VERSION(11, 5, 1):
2710 	case IP_VERSION(11, 5, 2):
2711 		adev->family = AMDGPU_FAMILY_GC_11_5_0;
2712 		break;
2713 	case IP_VERSION(12, 0, 0):
2714 	case IP_VERSION(12, 0, 1):
2715 		adev->family = AMDGPU_FAMILY_GC_12_0_0;
2716 		break;
2717 	default:
2718 		return -EINVAL;
2719 	}
2720 
2721 	switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
2722 	case IP_VERSION(9, 1, 0):
2723 	case IP_VERSION(9, 2, 2):
2724 	case IP_VERSION(9, 3, 0):
2725 	case IP_VERSION(10, 1, 3):
2726 	case IP_VERSION(10, 1, 4):
2727 	case IP_VERSION(10, 3, 1):
2728 	case IP_VERSION(10, 3, 3):
2729 	case IP_VERSION(10, 3, 6):
2730 	case IP_VERSION(10, 3, 7):
2731 	case IP_VERSION(11, 0, 1):
2732 	case IP_VERSION(11, 0, 4):
2733 	case IP_VERSION(11, 5, 0):
2734 	case IP_VERSION(11, 5, 1):
2735 	case IP_VERSION(11, 5, 2):
2736 		adev->flags |= AMD_IS_APU;
2737 		break;
2738 	default:
2739 		break;
2740 	}
2741 
2742 	if (amdgpu_ip_version(adev, XGMI_HWIP, 0) == IP_VERSION(4, 8, 0))
2743 		adev->gmc.xgmi.supported = true;
2744 
2745 	if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 3) ||
2746 	    amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 4))
2747 		adev->ip_versions[XGMI_HWIP][0] = IP_VERSION(6, 4, 0);
2748 
2749 	/* set NBIO version */
2750 	switch (amdgpu_ip_version(adev, NBIO_HWIP, 0)) {
2751 	case IP_VERSION(6, 1, 0):
2752 	case IP_VERSION(6, 2, 0):
2753 		adev->nbio.funcs = &nbio_v6_1_funcs;
2754 		adev->nbio.hdp_flush_reg = &nbio_v6_1_hdp_flush_reg;
2755 		break;
2756 	case IP_VERSION(7, 0, 0):
2757 	case IP_VERSION(7, 0, 1):
2758 	case IP_VERSION(2, 5, 0):
2759 		adev->nbio.funcs = &nbio_v7_0_funcs;
2760 		adev->nbio.hdp_flush_reg = &nbio_v7_0_hdp_flush_reg;
2761 		break;
2762 	case IP_VERSION(7, 4, 0):
2763 	case IP_VERSION(7, 4, 1):
2764 	case IP_VERSION(7, 4, 4):
2765 		adev->nbio.funcs = &nbio_v7_4_funcs;
2766 		adev->nbio.hdp_flush_reg = &nbio_v7_4_hdp_flush_reg;
2767 		break;
2768 	case IP_VERSION(7, 9, 0):
2769 		adev->nbio.funcs = &nbio_v7_9_funcs;
2770 		adev->nbio.hdp_flush_reg = &nbio_v7_9_hdp_flush_reg;
2771 		break;
2772 	case IP_VERSION(7, 11, 0):
2773 	case IP_VERSION(7, 11, 1):
2774 	case IP_VERSION(7, 11, 3):
2775 		adev->nbio.funcs = &nbio_v7_11_funcs;
2776 		adev->nbio.hdp_flush_reg = &nbio_v7_11_hdp_flush_reg;
2777 		break;
2778 	case IP_VERSION(7, 2, 0):
2779 	case IP_VERSION(7, 2, 1):
2780 	case IP_VERSION(7, 3, 0):
2781 	case IP_VERSION(7, 5, 0):
2782 	case IP_VERSION(7, 5, 1):
2783 		adev->nbio.funcs = &nbio_v7_2_funcs;
2784 		adev->nbio.hdp_flush_reg = &nbio_v7_2_hdp_flush_reg;
2785 		break;
2786 	case IP_VERSION(2, 1, 1):
2787 	case IP_VERSION(2, 3, 0):
2788 	case IP_VERSION(2, 3, 1):
2789 	case IP_VERSION(2, 3, 2):
2790 	case IP_VERSION(3, 3, 0):
2791 	case IP_VERSION(3, 3, 1):
2792 	case IP_VERSION(3, 3, 2):
2793 	case IP_VERSION(3, 3, 3):
2794 		adev->nbio.funcs = &nbio_v2_3_funcs;
2795 		adev->nbio.hdp_flush_reg = &nbio_v2_3_hdp_flush_reg;
2796 		break;
2797 	case IP_VERSION(4, 3, 0):
2798 	case IP_VERSION(4, 3, 1):
2799 		if (amdgpu_sriov_vf(adev))
2800 			adev->nbio.funcs = &nbio_v4_3_sriov_funcs;
2801 		else
2802 			adev->nbio.funcs = &nbio_v4_3_funcs;
2803 		adev->nbio.hdp_flush_reg = &nbio_v4_3_hdp_flush_reg;
2804 		break;
2805 	case IP_VERSION(7, 7, 0):
2806 	case IP_VERSION(7, 7, 1):
2807 		adev->nbio.funcs = &nbio_v7_7_funcs;
2808 		adev->nbio.hdp_flush_reg = &nbio_v7_7_hdp_flush_reg;
2809 		break;
2810 	case IP_VERSION(6, 3, 1):
2811 		adev->nbio.funcs = &nbif_v6_3_1_funcs;
2812 		adev->nbio.hdp_flush_reg = &nbif_v6_3_1_hdp_flush_reg;
2813 		break;
2814 	default:
2815 		break;
2816 	}
2817 
2818 	switch (amdgpu_ip_version(adev, HDP_HWIP, 0)) {
2819 	case IP_VERSION(4, 0, 0):
2820 	case IP_VERSION(4, 0, 1):
2821 	case IP_VERSION(4, 1, 0):
2822 	case IP_VERSION(4, 1, 1):
2823 	case IP_VERSION(4, 1, 2):
2824 	case IP_VERSION(4, 2, 0):
2825 	case IP_VERSION(4, 2, 1):
2826 	case IP_VERSION(4, 4, 0):
2827 	case IP_VERSION(4, 4, 2):
2828 	case IP_VERSION(4, 4, 5):
2829 		adev->hdp.funcs = &hdp_v4_0_funcs;
2830 		break;
2831 	case IP_VERSION(5, 0, 0):
2832 	case IP_VERSION(5, 0, 1):
2833 	case IP_VERSION(5, 0, 2):
2834 	case IP_VERSION(5, 0, 3):
2835 	case IP_VERSION(5, 0, 4):
2836 	case IP_VERSION(5, 2, 0):
2837 		adev->hdp.funcs = &hdp_v5_0_funcs;
2838 		break;
2839 	case IP_VERSION(5, 2, 1):
2840 		adev->hdp.funcs = &hdp_v5_2_funcs;
2841 		break;
2842 	case IP_VERSION(6, 0, 0):
2843 	case IP_VERSION(6, 0, 1):
2844 	case IP_VERSION(6, 1, 0):
2845 		adev->hdp.funcs = &hdp_v6_0_funcs;
2846 		break;
2847 	case IP_VERSION(7, 0, 0):
2848 		adev->hdp.funcs = &hdp_v7_0_funcs;
2849 		break;
2850 	default:
2851 		break;
2852 	}
2853 
2854 	switch (amdgpu_ip_version(adev, DF_HWIP, 0)) {
2855 	case IP_VERSION(3, 6, 0):
2856 	case IP_VERSION(3, 6, 1):
2857 	case IP_VERSION(3, 6, 2):
2858 		adev->df.funcs = &df_v3_6_funcs;
2859 		break;
2860 	case IP_VERSION(2, 1, 0):
2861 	case IP_VERSION(2, 1, 1):
2862 	case IP_VERSION(2, 5, 0):
2863 	case IP_VERSION(3, 5, 1):
2864 	case IP_VERSION(3, 5, 2):
2865 		adev->df.funcs = &df_v1_7_funcs;
2866 		break;
2867 	case IP_VERSION(4, 3, 0):
2868 		adev->df.funcs = &df_v4_3_funcs;
2869 		break;
2870 	case IP_VERSION(4, 6, 2):
2871 		adev->df.funcs = &df_v4_6_2_funcs;
2872 		break;
2873 	case IP_VERSION(4, 15, 0):
2874 	case IP_VERSION(4, 15, 1):
2875 		adev->df.funcs = &df_v4_15_funcs;
2876 		break;
2877 	default:
2878 		break;
2879 	}
2880 
2881 	switch (amdgpu_ip_version(adev, SMUIO_HWIP, 0)) {
2882 	case IP_VERSION(9, 0, 0):
2883 	case IP_VERSION(9, 0, 1):
2884 	case IP_VERSION(10, 0, 0):
2885 	case IP_VERSION(10, 0, 1):
2886 	case IP_VERSION(10, 0, 2):
2887 		adev->smuio.funcs = &smuio_v9_0_funcs;
2888 		break;
2889 	case IP_VERSION(11, 0, 0):
2890 	case IP_VERSION(11, 0, 2):
2891 	case IP_VERSION(11, 0, 3):
2892 	case IP_VERSION(11, 0, 4):
2893 	case IP_VERSION(11, 0, 7):
2894 	case IP_VERSION(11, 0, 8):
2895 		adev->smuio.funcs = &smuio_v11_0_funcs;
2896 		break;
2897 	case IP_VERSION(11, 0, 6):
2898 	case IP_VERSION(11, 0, 10):
2899 	case IP_VERSION(11, 0, 11):
2900 	case IP_VERSION(11, 5, 0):
2901 	case IP_VERSION(13, 0, 1):
2902 	case IP_VERSION(13, 0, 9):
2903 	case IP_VERSION(13, 0, 10):
2904 		adev->smuio.funcs = &smuio_v11_0_6_funcs;
2905 		break;
2906 	case IP_VERSION(13, 0, 2):
2907 		adev->smuio.funcs = &smuio_v13_0_funcs;
2908 		break;
2909 	case IP_VERSION(13, 0, 3):
2910 		adev->smuio.funcs = &smuio_v13_0_3_funcs;
2911 		if (adev->smuio.funcs->get_pkg_type(adev) == AMDGPU_PKG_TYPE_APU) {
2912 			adev->flags |= AMD_IS_APU;
2913 		}
2914 		break;
2915 	case IP_VERSION(13, 0, 6):
2916 	case IP_VERSION(13, 0, 8):
2917 	case IP_VERSION(14, 0, 0):
2918 	case IP_VERSION(14, 0, 1):
2919 		adev->smuio.funcs = &smuio_v13_0_6_funcs;
2920 		break;
2921 	case IP_VERSION(14, 0, 2):
2922 		adev->smuio.funcs = &smuio_v14_0_2_funcs;
2923 		break;
2924 	default:
2925 		break;
2926 	}
2927 
2928 	switch (amdgpu_ip_version(adev, LSDMA_HWIP, 0)) {
2929 	case IP_VERSION(6, 0, 0):
2930 	case IP_VERSION(6, 0, 1):
2931 	case IP_VERSION(6, 0, 2):
2932 	case IP_VERSION(6, 0, 3):
2933 		adev->lsdma.funcs = &lsdma_v6_0_funcs;
2934 		break;
2935 	case IP_VERSION(7, 0, 0):
2936 	case IP_VERSION(7, 0, 1):
2937 		adev->lsdma.funcs = &lsdma_v7_0_funcs;
2938 		break;
2939 	default:
2940 		break;
2941 	}
2942 
2943 	r = amdgpu_discovery_set_common_ip_blocks(adev);
2944 	if (r)
2945 		return r;
2946 
2947 	r = amdgpu_discovery_set_gmc_ip_blocks(adev);
2948 	if (r)
2949 		return r;
2950 
2951 	/* For SR-IOV, PSP needs to be initialized before IH */
2952 	if (amdgpu_sriov_vf(adev)) {
2953 		r = amdgpu_discovery_set_psp_ip_blocks(adev);
2954 		if (r)
2955 			return r;
2956 		r = amdgpu_discovery_set_ih_ip_blocks(adev);
2957 		if (r)
2958 			return r;
2959 	} else {
2960 		r = amdgpu_discovery_set_ih_ip_blocks(adev);
2961 		if (r)
2962 			return r;
2963 
2964 		if (likely(adev->firmware.load_type == AMDGPU_FW_LOAD_PSP)) {
2965 			r = amdgpu_discovery_set_psp_ip_blocks(adev);
2966 			if (r)
2967 				return r;
2968 		}
2969 	}
2970 
2971 	if (likely(adev->firmware.load_type == AMDGPU_FW_LOAD_PSP)) {
2972 		r = amdgpu_discovery_set_smu_ip_blocks(adev);
2973 		if (r)
2974 			return r;
2975 	}
2976 
2977 	r = amdgpu_discovery_set_display_ip_blocks(adev);
2978 	if (r)
2979 		return r;
2980 
2981 	r = amdgpu_discovery_set_gc_ip_blocks(adev);
2982 	if (r)
2983 		return r;
2984 
2985 	r = amdgpu_discovery_set_sdma_ip_blocks(adev);
2986 	if (r)
2987 		return r;
2988 
2989 	if ((adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT &&
2990 	     !amdgpu_sriov_vf(adev)) ||
2991 	    (adev->firmware.load_type == AMDGPU_FW_LOAD_RLC_BACKDOOR_AUTO && amdgpu_dpm == 1)) {
2992 		r = amdgpu_discovery_set_smu_ip_blocks(adev);
2993 		if (r)
2994 			return r;
2995 	}
2996 
2997 	r = amdgpu_discovery_set_mm_ip_blocks(adev);
2998 	if (r)
2999 		return r;
3000 
3001 	r = amdgpu_discovery_set_mes_ip_blocks(adev);
3002 	if (r)
3003 		return r;
3004 
3005 	r = amdgpu_discovery_set_vpe_ip_blocks(adev);
3006 	if (r)
3007 		return r;
3008 
3009 	r = amdgpu_discovery_set_umsch_mm_ip_blocks(adev);
3010 	if (r)
3011 		return r;
3012 
3013 	r = amdgpu_discovery_set_isp_ip_blocks(adev);
3014 	if (r)
3015 		return r;
3016 	return 0;
3017 }
3018 
3019