1 /*
2 * Copyright 2018-2024 Advanced Micro Devices, Inc. All rights reserved.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23
24 #include <linux/firmware.h>
25
26 #include "amdgpu.h"
27 #include "amdgpu_discovery.h"
28 #include "soc15_hw_ip.h"
29 #include "discovery.h"
30 #include "amdgpu_ras.h"
31
32 #include "soc15.h"
33 #include "gfx_v9_0.h"
34 #include "gfx_v9_4_3.h"
35 #include "gmc_v9_0.h"
36 #include "df_v1_7.h"
37 #include "df_v3_6.h"
38 #include "df_v4_3.h"
39 #include "df_v4_6_2.h"
40 #include "df_v4_15.h"
41 #include "nbio_v6_1.h"
42 #include "nbio_v7_0.h"
43 #include "nbio_v7_4.h"
44 #include "nbio_v7_9.h"
45 #include "nbio_v7_11.h"
46 #include "hdp_v4_0.h"
47 #include "vega10_ih.h"
48 #include "vega20_ih.h"
49 #include "sdma_v4_0.h"
50 #include "sdma_v4_4_2.h"
51 #include "uvd_v7_0.h"
52 #include "vce_v4_0.h"
53 #include "vcn_v1_0.h"
54 #include "vcn_v2_5.h"
55 #include "jpeg_v2_5.h"
56 #include "smuio_v9_0.h"
57 #include "gmc_v10_0.h"
58 #include "gmc_v11_0.h"
59 #include "gmc_v12_0.h"
60 #include "gfxhub_v2_0.h"
61 #include "mmhub_v2_0.h"
62 #include "nbio_v2_3.h"
63 #include "nbio_v4_3.h"
64 #include "nbio_v7_2.h"
65 #include "nbio_v7_7.h"
66 #include "nbif_v6_3_1.h"
67 #include "hdp_v5_0.h"
68 #include "hdp_v5_2.h"
69 #include "hdp_v6_0.h"
70 #include "hdp_v7_0.h"
71 #include "nv.h"
72 #include "soc21.h"
73 #include "soc24.h"
74 #include "navi10_ih.h"
75 #include "ih_v6_0.h"
76 #include "ih_v6_1.h"
77 #include "ih_v7_0.h"
78 #include "gfx_v10_0.h"
79 #include "gfx_v11_0.h"
80 #include "gfx_v12_0.h"
81 #include "sdma_v5_0.h"
82 #include "sdma_v5_2.h"
83 #include "sdma_v6_0.h"
84 #include "sdma_v7_0.h"
85 #include "lsdma_v6_0.h"
86 #include "lsdma_v7_0.h"
87 #include "vcn_v2_0.h"
88 #include "jpeg_v2_0.h"
89 #include "vcn_v3_0.h"
90 #include "jpeg_v3_0.h"
91 #include "vcn_v4_0.h"
92 #include "jpeg_v4_0.h"
93 #include "vcn_v4_0_3.h"
94 #include "jpeg_v4_0_3.h"
95 #include "vcn_v4_0_5.h"
96 #include "jpeg_v4_0_5.h"
97 #include "amdgpu_vkms.h"
98 #include "mes_v11_0.h"
99 #include "mes_v12_0.h"
100 #include "smuio_v11_0.h"
101 #include "smuio_v11_0_6.h"
102 #include "smuio_v13_0.h"
103 #include "smuio_v13_0_3.h"
104 #include "smuio_v13_0_6.h"
105 #include "smuio_v14_0_2.h"
106 #include "vcn_v5_0_0.h"
107 #include "vcn_v5_0_1.h"
108 #include "jpeg_v5_0_0.h"
109 #include "jpeg_v5_0_1.h"
110
111 #include "amdgpu_vpe.h"
112 #if defined(CONFIG_DRM_AMD_ISP)
113 #include "amdgpu_isp.h"
114 #endif
115
116 MODULE_FIRMWARE("amdgpu/ip_discovery.bin");
117 MODULE_FIRMWARE("amdgpu/vega10_ip_discovery.bin");
118 MODULE_FIRMWARE("amdgpu/vega12_ip_discovery.bin");
119 MODULE_FIRMWARE("amdgpu/vega20_ip_discovery.bin");
120 MODULE_FIRMWARE("amdgpu/raven_ip_discovery.bin");
121 MODULE_FIRMWARE("amdgpu/raven2_ip_discovery.bin");
122 MODULE_FIRMWARE("amdgpu/picasso_ip_discovery.bin");
123 MODULE_FIRMWARE("amdgpu/arcturus_ip_discovery.bin");
124 MODULE_FIRMWARE("amdgpu/aldebaran_ip_discovery.bin");
125
126 #define mmIP_DISCOVERY_VERSION 0x16A00
127 #define mmRCC_CONFIG_MEMSIZE 0xde3
128 #define mmMP0_SMN_C2PMSG_33 0x16061
129 #define mmMM_INDEX 0x0
130 #define mmMM_INDEX_HI 0x6
131 #define mmMM_DATA 0x1
132
133 static const char *hw_id_names[HW_ID_MAX] = {
134 [MP1_HWID] = "MP1",
135 [MP2_HWID] = "MP2",
136 [THM_HWID] = "THM",
137 [SMUIO_HWID] = "SMUIO",
138 [FUSE_HWID] = "FUSE",
139 [CLKA_HWID] = "CLKA",
140 [PWR_HWID] = "PWR",
141 [GC_HWID] = "GC",
142 [UVD_HWID] = "UVD",
143 [AUDIO_AZ_HWID] = "AUDIO_AZ",
144 [ACP_HWID] = "ACP",
145 [DCI_HWID] = "DCI",
146 [DMU_HWID] = "DMU",
147 [DCO_HWID] = "DCO",
148 [DIO_HWID] = "DIO",
149 [XDMA_HWID] = "XDMA",
150 [DCEAZ_HWID] = "DCEAZ",
151 [DAZ_HWID] = "DAZ",
152 [SDPMUX_HWID] = "SDPMUX",
153 [NTB_HWID] = "NTB",
154 [IOHC_HWID] = "IOHC",
155 [L2IMU_HWID] = "L2IMU",
156 [VCE_HWID] = "VCE",
157 [MMHUB_HWID] = "MMHUB",
158 [ATHUB_HWID] = "ATHUB",
159 [DBGU_NBIO_HWID] = "DBGU_NBIO",
160 [DFX_HWID] = "DFX",
161 [DBGU0_HWID] = "DBGU0",
162 [DBGU1_HWID] = "DBGU1",
163 [OSSSYS_HWID] = "OSSSYS",
164 [HDP_HWID] = "HDP",
165 [SDMA0_HWID] = "SDMA0",
166 [SDMA1_HWID] = "SDMA1",
167 [SDMA2_HWID] = "SDMA2",
168 [SDMA3_HWID] = "SDMA3",
169 [LSDMA_HWID] = "LSDMA",
170 [ISP_HWID] = "ISP",
171 [DBGU_IO_HWID] = "DBGU_IO",
172 [DF_HWID] = "DF",
173 [CLKB_HWID] = "CLKB",
174 [FCH_HWID] = "FCH",
175 [DFX_DAP_HWID] = "DFX_DAP",
176 [L1IMU_PCIE_HWID] = "L1IMU_PCIE",
177 [L1IMU_NBIF_HWID] = "L1IMU_NBIF",
178 [L1IMU_IOAGR_HWID] = "L1IMU_IOAGR",
179 [L1IMU3_HWID] = "L1IMU3",
180 [L1IMU4_HWID] = "L1IMU4",
181 [L1IMU5_HWID] = "L1IMU5",
182 [L1IMU6_HWID] = "L1IMU6",
183 [L1IMU7_HWID] = "L1IMU7",
184 [L1IMU8_HWID] = "L1IMU8",
185 [L1IMU9_HWID] = "L1IMU9",
186 [L1IMU10_HWID] = "L1IMU10",
187 [L1IMU11_HWID] = "L1IMU11",
188 [L1IMU12_HWID] = "L1IMU12",
189 [L1IMU13_HWID] = "L1IMU13",
190 [L1IMU14_HWID] = "L1IMU14",
191 [L1IMU15_HWID] = "L1IMU15",
192 [WAFLC_HWID] = "WAFLC",
193 [FCH_USB_PD_HWID] = "FCH_USB_PD",
194 [PCIE_HWID] = "PCIE",
195 [PCS_HWID] = "PCS",
196 [DDCL_HWID] = "DDCL",
197 [SST_HWID] = "SST",
198 [IOAGR_HWID] = "IOAGR",
199 [NBIF_HWID] = "NBIF",
200 [IOAPIC_HWID] = "IOAPIC",
201 [SYSTEMHUB_HWID] = "SYSTEMHUB",
202 [NTBCCP_HWID] = "NTBCCP",
203 [UMC_HWID] = "UMC",
204 [SATA_HWID] = "SATA",
205 [USB_HWID] = "USB",
206 [CCXSEC_HWID] = "CCXSEC",
207 [XGMI_HWID] = "XGMI",
208 [XGBE_HWID] = "XGBE",
209 [MP0_HWID] = "MP0",
210 [VPE_HWID] = "VPE",
211 };
212
213 static int hw_id_map[MAX_HWIP] = {
214 [GC_HWIP] = GC_HWID,
215 [HDP_HWIP] = HDP_HWID,
216 [SDMA0_HWIP] = SDMA0_HWID,
217 [SDMA1_HWIP] = SDMA1_HWID,
218 [SDMA2_HWIP] = SDMA2_HWID,
219 [SDMA3_HWIP] = SDMA3_HWID,
220 [LSDMA_HWIP] = LSDMA_HWID,
221 [MMHUB_HWIP] = MMHUB_HWID,
222 [ATHUB_HWIP] = ATHUB_HWID,
223 [NBIO_HWIP] = NBIF_HWID,
224 [MP0_HWIP] = MP0_HWID,
225 [MP1_HWIP] = MP1_HWID,
226 [UVD_HWIP] = UVD_HWID,
227 [VCE_HWIP] = VCE_HWID,
228 [DF_HWIP] = DF_HWID,
229 [DCE_HWIP] = DMU_HWID,
230 [OSSSYS_HWIP] = OSSSYS_HWID,
231 [SMUIO_HWIP] = SMUIO_HWID,
232 [PWR_HWIP] = PWR_HWID,
233 [NBIF_HWIP] = NBIF_HWID,
234 [THM_HWIP] = THM_HWID,
235 [CLK_HWIP] = CLKA_HWID,
236 [UMC_HWIP] = UMC_HWID,
237 [XGMI_HWIP] = XGMI_HWID,
238 [DCI_HWIP] = DCI_HWID,
239 [PCIE_HWIP] = PCIE_HWID,
240 [VPE_HWIP] = VPE_HWID,
241 [ISP_HWIP] = ISP_HWID,
242 };
243
amdgpu_discovery_read_binary_from_sysmem(struct amdgpu_device * adev,uint8_t * binary)244 static int amdgpu_discovery_read_binary_from_sysmem(struct amdgpu_device *adev, uint8_t *binary)
245 {
246 u64 tmr_offset, tmr_size, pos;
247 void *discv_regn;
248 int ret;
249
250 ret = amdgpu_acpi_get_tmr_info(adev, &tmr_offset, &tmr_size);
251 if (ret)
252 return ret;
253
254 pos = tmr_offset + tmr_size - DISCOVERY_TMR_OFFSET;
255
256 /* This region is read-only and reserved from system use */
257 discv_regn = memremap(pos, adev->mman.discovery_tmr_size, MEMREMAP_WC);
258 if (discv_regn) {
259 memcpy(binary, discv_regn, adev->mman.discovery_tmr_size);
260 memunmap(discv_regn);
261 return 0;
262 }
263
264 return -ENOENT;
265 }
266
267 #define IP_DISCOVERY_V2 2
268 #define IP_DISCOVERY_V4 4
269
amdgpu_discovery_read_binary_from_mem(struct amdgpu_device * adev,uint8_t * binary)270 static int amdgpu_discovery_read_binary_from_mem(struct amdgpu_device *adev,
271 uint8_t *binary)
272 {
273 bool sz_valid = true;
274 uint64_t vram_size;
275 int i, ret = 0;
276 u32 msg;
277
278 if (!amdgpu_sriov_vf(adev)) {
279 /* It can take up to a second for IFWI init to complete on some dGPUs,
280 * but generally it should be in the 60-100ms range. Normally this starts
281 * as soon as the device gets power so by the time the OS loads this has long
282 * completed. However, when a card is hotplugged via e.g., USB4, we need to
283 * wait for this to complete. Once the C2PMSG is updated, we can
284 * continue.
285 */
286
287 for (i = 0; i < 1000; i++) {
288 msg = RREG32(mmMP0_SMN_C2PMSG_33);
289 if (msg & 0x80000000)
290 break;
291 msleep(1);
292 }
293 }
294
295 vram_size = RREG32(mmRCC_CONFIG_MEMSIZE);
296 if (!vram_size || vram_size == U32_MAX)
297 sz_valid = false;
298 else
299 vram_size <<= 20;
300
301 if (sz_valid) {
302 uint64_t pos = vram_size - DISCOVERY_TMR_OFFSET;
303 amdgpu_device_vram_access(adev, pos, (uint32_t *)binary,
304 adev->mman.discovery_tmr_size, false);
305 } else {
306 ret = amdgpu_discovery_read_binary_from_sysmem(adev, binary);
307 }
308
309 if (ret)
310 dev_err(adev->dev,
311 "failed to read discovery info from memory, vram size read: %llx",
312 vram_size);
313
314 return ret;
315 }
316
amdgpu_discovery_read_binary_from_file(struct amdgpu_device * adev,uint8_t * binary,const char * fw_name)317 static int amdgpu_discovery_read_binary_from_file(struct amdgpu_device *adev,
318 uint8_t *binary,
319 const char *fw_name)
320 {
321 const struct firmware *fw;
322 int r;
323
324 r = firmware_request_nowarn(&fw, fw_name, adev->dev);
325 if (r) {
326 if (amdgpu_discovery == 2)
327 dev_err(adev->dev, "can't load firmware \"%s\"\n", fw_name);
328 else
329 drm_info(&adev->ddev, "Optional firmware \"%s\" was not found\n", fw_name);
330 return r;
331 }
332
333 memcpy((u8 *)binary, (u8 *)fw->data, fw->size);
334 release_firmware(fw);
335
336 return 0;
337 }
338
amdgpu_discovery_calculate_checksum(uint8_t * data,uint32_t size)339 static uint16_t amdgpu_discovery_calculate_checksum(uint8_t *data, uint32_t size)
340 {
341 uint16_t checksum = 0;
342 int i;
343
344 for (i = 0; i < size; i++)
345 checksum += data[i];
346
347 return checksum;
348 }
349
amdgpu_discovery_verify_checksum(uint8_t * data,uint32_t size,uint16_t expected)350 static inline bool amdgpu_discovery_verify_checksum(uint8_t *data, uint32_t size,
351 uint16_t expected)
352 {
353 return !!(amdgpu_discovery_calculate_checksum(data, size) == expected);
354 }
355
amdgpu_discovery_verify_binary_signature(uint8_t * binary)356 static inline bool amdgpu_discovery_verify_binary_signature(uint8_t *binary)
357 {
358 struct binary_header *bhdr;
359 bhdr = (struct binary_header *)binary;
360
361 return (le32_to_cpu(bhdr->binary_signature) == BINARY_SIGNATURE);
362 }
363
amdgpu_discovery_harvest_config_quirk(struct amdgpu_device * adev)364 static void amdgpu_discovery_harvest_config_quirk(struct amdgpu_device *adev)
365 {
366 /*
367 * So far, apply this quirk only on those Navy Flounder boards which
368 * have a bad harvest table of VCN config.
369 */
370 if ((amdgpu_ip_version(adev, UVD_HWIP, 1) == IP_VERSION(3, 0, 1)) &&
371 (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(10, 3, 2))) {
372 switch (adev->pdev->revision) {
373 case 0xC1:
374 case 0xC2:
375 case 0xC3:
376 case 0xC5:
377 case 0xC7:
378 case 0xCF:
379 case 0xDF:
380 adev->vcn.harvest_config |= AMDGPU_VCN_HARVEST_VCN1;
381 adev->vcn.inst_mask &= ~AMDGPU_VCN_HARVEST_VCN1;
382 break;
383 default:
384 break;
385 }
386 }
387 }
388
amdgpu_discovery_verify_npsinfo(struct amdgpu_device * adev,struct binary_header * bhdr)389 static int amdgpu_discovery_verify_npsinfo(struct amdgpu_device *adev,
390 struct binary_header *bhdr)
391 {
392 struct table_info *info;
393 uint16_t checksum;
394 uint16_t offset;
395
396 info = &bhdr->table_list[NPS_INFO];
397 offset = le16_to_cpu(info->offset);
398 checksum = le16_to_cpu(info->checksum);
399
400 struct nps_info_header *nhdr =
401 (struct nps_info_header *)(adev->mman.discovery_bin + offset);
402
403 if (le32_to_cpu(nhdr->table_id) != NPS_INFO_TABLE_ID) {
404 dev_dbg(adev->dev, "invalid ip discovery nps info table id\n");
405 return -EINVAL;
406 }
407
408 if (!amdgpu_discovery_verify_checksum(adev->mman.discovery_bin + offset,
409 le32_to_cpu(nhdr->size_bytes),
410 checksum)) {
411 dev_dbg(adev->dev, "invalid nps info data table checksum\n");
412 return -EINVAL;
413 }
414
415 return 0;
416 }
417
amdgpu_discovery_get_fw_name(struct amdgpu_device * adev)418 static const char *amdgpu_discovery_get_fw_name(struct amdgpu_device *adev)
419 {
420 if (amdgpu_discovery == 2)
421 return "amdgpu/ip_discovery.bin";
422
423 switch (adev->asic_type) {
424 case CHIP_VEGA10:
425 return "amdgpu/vega10_ip_discovery.bin";
426 case CHIP_VEGA12:
427 return "amdgpu/vega12_ip_discovery.bin";
428 case CHIP_RAVEN:
429 if (adev->apu_flags & AMD_APU_IS_RAVEN2)
430 return "amdgpu/raven2_ip_discovery.bin";
431 else if (adev->apu_flags & AMD_APU_IS_PICASSO)
432 return "amdgpu/picasso_ip_discovery.bin";
433 else
434 return "amdgpu/raven_ip_discovery.bin";
435 case CHIP_VEGA20:
436 return "amdgpu/vega20_ip_discovery.bin";
437 case CHIP_ARCTURUS:
438 return "amdgpu/arcturus_ip_discovery.bin";
439 case CHIP_ALDEBARAN:
440 return "amdgpu/aldebaran_ip_discovery.bin";
441 default:
442 return NULL;
443 }
444 }
445
amdgpu_discovery_init(struct amdgpu_device * adev)446 static int amdgpu_discovery_init(struct amdgpu_device *adev)
447 {
448 struct table_info *info;
449 struct binary_header *bhdr;
450 const char *fw_name;
451 uint16_t offset;
452 uint16_t size;
453 uint16_t checksum;
454 int r;
455
456 adev->mman.discovery_tmr_size = DISCOVERY_TMR_SIZE;
457 adev->mman.discovery_bin = kzalloc(adev->mman.discovery_tmr_size, GFP_KERNEL);
458 if (!adev->mman.discovery_bin)
459 return -ENOMEM;
460
461 /* Read from file if it is the preferred option */
462 fw_name = amdgpu_discovery_get_fw_name(adev);
463 if (fw_name != NULL) {
464 drm_dbg(&adev->ddev, "use ip discovery information from file");
465 r = amdgpu_discovery_read_binary_from_file(adev, adev->mman.discovery_bin, fw_name);
466 if (r)
467 goto out;
468 } else {
469 drm_dbg(&adev->ddev, "use ip discovery information from memory");
470 r = amdgpu_discovery_read_binary_from_mem(
471 adev, adev->mman.discovery_bin);
472 if (r)
473 goto out;
474 }
475
476 /* check the ip discovery binary signature */
477 if (!amdgpu_discovery_verify_binary_signature(adev->mman.discovery_bin)) {
478 dev_err(adev->dev,
479 "get invalid ip discovery binary signature\n");
480 r = -EINVAL;
481 goto out;
482 }
483
484 bhdr = (struct binary_header *)adev->mman.discovery_bin;
485
486 offset = offsetof(struct binary_header, binary_checksum) +
487 sizeof(bhdr->binary_checksum);
488 size = le16_to_cpu(bhdr->binary_size) - offset;
489 checksum = le16_to_cpu(bhdr->binary_checksum);
490
491 if (!amdgpu_discovery_verify_checksum(adev->mman.discovery_bin + offset,
492 size, checksum)) {
493 dev_err(adev->dev, "invalid ip discovery binary checksum\n");
494 r = -EINVAL;
495 goto out;
496 }
497
498 info = &bhdr->table_list[IP_DISCOVERY];
499 offset = le16_to_cpu(info->offset);
500 checksum = le16_to_cpu(info->checksum);
501
502 if (offset) {
503 struct ip_discovery_header *ihdr =
504 (struct ip_discovery_header *)(adev->mman.discovery_bin + offset);
505 if (le32_to_cpu(ihdr->signature) != DISCOVERY_TABLE_SIGNATURE) {
506 dev_err(adev->dev, "invalid ip discovery data table signature\n");
507 r = -EINVAL;
508 goto out;
509 }
510
511 if (!amdgpu_discovery_verify_checksum(adev->mman.discovery_bin + offset,
512 le16_to_cpu(ihdr->size), checksum)) {
513 dev_err(adev->dev, "invalid ip discovery data table checksum\n");
514 r = -EINVAL;
515 goto out;
516 }
517 }
518
519 info = &bhdr->table_list[GC];
520 offset = le16_to_cpu(info->offset);
521 checksum = le16_to_cpu(info->checksum);
522
523 if (offset) {
524 struct gpu_info_header *ghdr =
525 (struct gpu_info_header *)(adev->mman.discovery_bin + offset);
526
527 if (le32_to_cpu(ghdr->table_id) != GC_TABLE_ID) {
528 dev_err(adev->dev, "invalid ip discovery gc table id\n");
529 r = -EINVAL;
530 goto out;
531 }
532
533 if (!amdgpu_discovery_verify_checksum(adev->mman.discovery_bin + offset,
534 le32_to_cpu(ghdr->size), checksum)) {
535 dev_err(adev->dev, "invalid gc data table checksum\n");
536 r = -EINVAL;
537 goto out;
538 }
539 }
540
541 info = &bhdr->table_list[HARVEST_INFO];
542 offset = le16_to_cpu(info->offset);
543 checksum = le16_to_cpu(info->checksum);
544
545 if (offset) {
546 struct harvest_info_header *hhdr =
547 (struct harvest_info_header *)(adev->mman.discovery_bin + offset);
548
549 if (le32_to_cpu(hhdr->signature) != HARVEST_TABLE_SIGNATURE) {
550 dev_err(adev->dev, "invalid ip discovery harvest table signature\n");
551 r = -EINVAL;
552 goto out;
553 }
554
555 if (!amdgpu_discovery_verify_checksum(adev->mman.discovery_bin + offset,
556 sizeof(struct harvest_table), checksum)) {
557 dev_err(adev->dev, "invalid harvest data table checksum\n");
558 r = -EINVAL;
559 goto out;
560 }
561 }
562
563 info = &bhdr->table_list[VCN_INFO];
564 offset = le16_to_cpu(info->offset);
565 checksum = le16_to_cpu(info->checksum);
566
567 if (offset) {
568 struct vcn_info_header *vhdr =
569 (struct vcn_info_header *)(adev->mman.discovery_bin + offset);
570
571 if (le32_to_cpu(vhdr->table_id) != VCN_INFO_TABLE_ID) {
572 dev_err(adev->dev, "invalid ip discovery vcn table id\n");
573 r = -EINVAL;
574 goto out;
575 }
576
577 if (!amdgpu_discovery_verify_checksum(adev->mman.discovery_bin + offset,
578 le32_to_cpu(vhdr->size_bytes), checksum)) {
579 dev_err(adev->dev, "invalid vcn data table checksum\n");
580 r = -EINVAL;
581 goto out;
582 }
583 }
584
585 info = &bhdr->table_list[MALL_INFO];
586 offset = le16_to_cpu(info->offset);
587 checksum = le16_to_cpu(info->checksum);
588
589 if (0 && offset) {
590 struct mall_info_header *mhdr =
591 (struct mall_info_header *)(adev->mman.discovery_bin + offset);
592
593 if (le32_to_cpu(mhdr->table_id) != MALL_INFO_TABLE_ID) {
594 dev_err(adev->dev, "invalid ip discovery mall table id\n");
595 r = -EINVAL;
596 goto out;
597 }
598
599 if (!amdgpu_discovery_verify_checksum(adev->mman.discovery_bin + offset,
600 le32_to_cpu(mhdr->size_bytes), checksum)) {
601 dev_err(adev->dev, "invalid mall data table checksum\n");
602 r = -EINVAL;
603 goto out;
604 }
605 }
606
607 return 0;
608
609 out:
610 kfree(adev->mman.discovery_bin);
611 adev->mman.discovery_bin = NULL;
612 if ((amdgpu_discovery != 2) &&
613 (RREG32(mmIP_DISCOVERY_VERSION) == 4))
614 amdgpu_ras_query_boot_status(adev, 4);
615 return r;
616 }
617
618 static void amdgpu_discovery_sysfs_fini(struct amdgpu_device *adev);
619
amdgpu_discovery_fini(struct amdgpu_device * adev)620 void amdgpu_discovery_fini(struct amdgpu_device *adev)
621 {
622 amdgpu_discovery_sysfs_fini(adev);
623 kfree(adev->mman.discovery_bin);
624 adev->mman.discovery_bin = NULL;
625 }
626
amdgpu_discovery_validate_ip(struct amdgpu_device * adev,uint8_t instance,uint16_t hw_id)627 static int amdgpu_discovery_validate_ip(struct amdgpu_device *adev,
628 uint8_t instance, uint16_t hw_id)
629 {
630 if (instance >= HWIP_MAX_INSTANCE) {
631 dev_err(adev->dev,
632 "Unexpected instance_number (%d) from ip discovery blob\n",
633 instance);
634 return -EINVAL;
635 }
636 if (hw_id >= HW_ID_MAX) {
637 dev_err(adev->dev,
638 "Unexpected hw_id (%d) from ip discovery blob\n",
639 hw_id);
640 return -EINVAL;
641 }
642
643 return 0;
644 }
645
amdgpu_discovery_read_harvest_bit_per_ip(struct amdgpu_device * adev,uint32_t * vcn_harvest_count)646 static void amdgpu_discovery_read_harvest_bit_per_ip(struct amdgpu_device *adev,
647 uint32_t *vcn_harvest_count)
648 {
649 struct binary_header *bhdr;
650 struct ip_discovery_header *ihdr;
651 struct die_header *dhdr;
652 struct ip *ip;
653 uint16_t die_offset, ip_offset, num_dies, num_ips;
654 uint16_t hw_id;
655 uint8_t inst;
656 int i, j;
657
658 bhdr = (struct binary_header *)adev->mman.discovery_bin;
659 ihdr = (struct ip_discovery_header *)(adev->mman.discovery_bin +
660 le16_to_cpu(bhdr->table_list[IP_DISCOVERY].offset));
661 num_dies = le16_to_cpu(ihdr->num_dies);
662
663 /* scan harvest bit of all IP data structures */
664 for (i = 0; i < num_dies; i++) {
665 die_offset = le16_to_cpu(ihdr->die_info[i].die_offset);
666 dhdr = (struct die_header *)(adev->mman.discovery_bin + die_offset);
667 num_ips = le16_to_cpu(dhdr->num_ips);
668 ip_offset = die_offset + sizeof(*dhdr);
669
670 for (j = 0; j < num_ips; j++) {
671 ip = (struct ip *)(adev->mman.discovery_bin +
672 ip_offset);
673 inst = ip->number_instance;
674 hw_id = le16_to_cpu(ip->hw_id);
675 if (amdgpu_discovery_validate_ip(adev, inst, hw_id))
676 goto next_ip;
677
678 if (ip->harvest == 1) {
679 switch (hw_id) {
680 case VCN_HWID:
681 (*vcn_harvest_count)++;
682 if (inst == 0) {
683 adev->vcn.harvest_config |= AMDGPU_VCN_HARVEST_VCN0;
684 adev->vcn.inst_mask &=
685 ~AMDGPU_VCN_HARVEST_VCN0;
686 adev->jpeg.inst_mask &=
687 ~AMDGPU_VCN_HARVEST_VCN0;
688 } else {
689 adev->vcn.harvest_config |= AMDGPU_VCN_HARVEST_VCN1;
690 adev->vcn.inst_mask &=
691 ~AMDGPU_VCN_HARVEST_VCN1;
692 adev->jpeg.inst_mask &=
693 ~AMDGPU_VCN_HARVEST_VCN1;
694 }
695 break;
696 case DMU_HWID:
697 adev->harvest_ip_mask |= AMD_HARVEST_IP_DMU_MASK;
698 break;
699 default:
700 break;
701 }
702 }
703 next_ip:
704 ip_offset += struct_size(ip, base_address,
705 ip->num_base_address);
706 }
707 }
708 }
709
amdgpu_discovery_read_from_harvest_table(struct amdgpu_device * adev,uint32_t * vcn_harvest_count,uint32_t * umc_harvest_count)710 static void amdgpu_discovery_read_from_harvest_table(struct amdgpu_device *adev,
711 uint32_t *vcn_harvest_count,
712 uint32_t *umc_harvest_count)
713 {
714 struct binary_header *bhdr;
715 struct harvest_table *harvest_info;
716 u16 offset;
717 int i;
718 uint32_t umc_harvest_config = 0;
719
720 bhdr = (struct binary_header *)adev->mman.discovery_bin;
721 offset = le16_to_cpu(bhdr->table_list[HARVEST_INFO].offset);
722
723 if (!offset) {
724 dev_err(adev->dev, "invalid harvest table offset\n");
725 return;
726 }
727
728 harvest_info = (struct harvest_table *)(adev->mman.discovery_bin + offset);
729
730 for (i = 0; i < 32; i++) {
731 if (le16_to_cpu(harvest_info->list[i].hw_id) == 0)
732 break;
733
734 switch (le16_to_cpu(harvest_info->list[i].hw_id)) {
735 case VCN_HWID:
736 (*vcn_harvest_count)++;
737 adev->vcn.harvest_config |=
738 (1 << harvest_info->list[i].number_instance);
739 adev->jpeg.harvest_config |=
740 (1 << harvest_info->list[i].number_instance);
741
742 adev->vcn.inst_mask &=
743 ~(1U << harvest_info->list[i].number_instance);
744 adev->jpeg.inst_mask &=
745 ~(1U << harvest_info->list[i].number_instance);
746 break;
747 case DMU_HWID:
748 adev->harvest_ip_mask |= AMD_HARVEST_IP_DMU_MASK;
749 break;
750 case UMC_HWID:
751 umc_harvest_config |=
752 1 << (le16_to_cpu(harvest_info->list[i].number_instance));
753 (*umc_harvest_count)++;
754 break;
755 case GC_HWID:
756 adev->gfx.xcc_mask &=
757 ~(1U << harvest_info->list[i].number_instance);
758 break;
759 case SDMA0_HWID:
760 adev->sdma.sdma_mask &=
761 ~(1U << harvest_info->list[i].number_instance);
762 break;
763 #if defined(CONFIG_DRM_AMD_ISP)
764 case ISP_HWID:
765 adev->isp.harvest_config |=
766 ~(1U << harvest_info->list[i].number_instance);
767 break;
768 #endif
769 default:
770 break;
771 }
772 }
773
774 adev->umc.active_mask = ((1 << adev->umc.node_inst_num) - 1) &
775 ~umc_harvest_config;
776 }
777
778 /* ================================================== */
779
780 struct ip_hw_instance {
781 struct kobject kobj; /* ip_discovery/die/#die/#hw_id/#instance/<attrs...> */
782
783 int hw_id;
784 u8 num_instance;
785 u8 major, minor, revision;
786 u8 harvest;
787
788 int num_base_addresses;
789 u32 base_addr[] __counted_by(num_base_addresses);
790 };
791
792 struct ip_hw_id {
793 struct kset hw_id_kset; /* ip_discovery/die/#die/#hw_id/, contains ip_hw_instance */
794 int hw_id;
795 };
796
797 struct ip_die_entry {
798 struct kset ip_kset; /* ip_discovery/die/#die/, contains ip_hw_id */
799 u16 num_ips;
800 };
801
802 /* -------------------------------------------------- */
803
804 struct ip_hw_instance_attr {
805 struct attribute attr;
806 ssize_t (*show)(struct ip_hw_instance *ip_hw_instance, char *buf);
807 };
808
hw_id_show(struct ip_hw_instance * ip_hw_instance,char * buf)809 static ssize_t hw_id_show(struct ip_hw_instance *ip_hw_instance, char *buf)
810 {
811 return sysfs_emit(buf, "%d\n", ip_hw_instance->hw_id);
812 }
813
num_instance_show(struct ip_hw_instance * ip_hw_instance,char * buf)814 static ssize_t num_instance_show(struct ip_hw_instance *ip_hw_instance, char *buf)
815 {
816 return sysfs_emit(buf, "%d\n", ip_hw_instance->num_instance);
817 }
818
major_show(struct ip_hw_instance * ip_hw_instance,char * buf)819 static ssize_t major_show(struct ip_hw_instance *ip_hw_instance, char *buf)
820 {
821 return sysfs_emit(buf, "%d\n", ip_hw_instance->major);
822 }
823
minor_show(struct ip_hw_instance * ip_hw_instance,char * buf)824 static ssize_t minor_show(struct ip_hw_instance *ip_hw_instance, char *buf)
825 {
826 return sysfs_emit(buf, "%d\n", ip_hw_instance->minor);
827 }
828
revision_show(struct ip_hw_instance * ip_hw_instance,char * buf)829 static ssize_t revision_show(struct ip_hw_instance *ip_hw_instance, char *buf)
830 {
831 return sysfs_emit(buf, "%d\n", ip_hw_instance->revision);
832 }
833
harvest_show(struct ip_hw_instance * ip_hw_instance,char * buf)834 static ssize_t harvest_show(struct ip_hw_instance *ip_hw_instance, char *buf)
835 {
836 return sysfs_emit(buf, "0x%01X\n", ip_hw_instance->harvest);
837 }
838
num_base_addresses_show(struct ip_hw_instance * ip_hw_instance,char * buf)839 static ssize_t num_base_addresses_show(struct ip_hw_instance *ip_hw_instance, char *buf)
840 {
841 return sysfs_emit(buf, "%d\n", ip_hw_instance->num_base_addresses);
842 }
843
base_addr_show(struct ip_hw_instance * ip_hw_instance,char * buf)844 static ssize_t base_addr_show(struct ip_hw_instance *ip_hw_instance, char *buf)
845 {
846 ssize_t res, at;
847 int ii;
848
849 for (res = at = ii = 0; ii < ip_hw_instance->num_base_addresses; ii++) {
850 /* Here we satisfy the condition that, at + size <= PAGE_SIZE.
851 */
852 if (at + 12 > PAGE_SIZE)
853 break;
854 res = sysfs_emit_at(buf, at, "0x%08X\n",
855 ip_hw_instance->base_addr[ii]);
856 if (res <= 0)
857 break;
858 at += res;
859 }
860
861 return res < 0 ? res : at;
862 }
863
864 static struct ip_hw_instance_attr ip_hw_attr[] = {
865 __ATTR_RO(hw_id),
866 __ATTR_RO(num_instance),
867 __ATTR_RO(major),
868 __ATTR_RO(minor),
869 __ATTR_RO(revision),
870 __ATTR_RO(harvest),
871 __ATTR_RO(num_base_addresses),
872 __ATTR_RO(base_addr),
873 };
874
875 static struct attribute *ip_hw_instance_attrs[ARRAY_SIZE(ip_hw_attr) + 1];
876 ATTRIBUTE_GROUPS(ip_hw_instance);
877
878 #define to_ip_hw_instance(x) container_of(x, struct ip_hw_instance, kobj)
879 #define to_ip_hw_instance_attr(x) container_of(x, struct ip_hw_instance_attr, attr)
880
ip_hw_instance_attr_show(struct kobject * kobj,struct attribute * attr,char * buf)881 static ssize_t ip_hw_instance_attr_show(struct kobject *kobj,
882 struct attribute *attr,
883 char *buf)
884 {
885 struct ip_hw_instance *ip_hw_instance = to_ip_hw_instance(kobj);
886 struct ip_hw_instance_attr *ip_hw_attr = to_ip_hw_instance_attr(attr);
887
888 if (!ip_hw_attr->show)
889 return -EIO;
890
891 return ip_hw_attr->show(ip_hw_instance, buf);
892 }
893
894 static const struct sysfs_ops ip_hw_instance_sysfs_ops = {
895 .show = ip_hw_instance_attr_show,
896 };
897
ip_hw_instance_release(struct kobject * kobj)898 static void ip_hw_instance_release(struct kobject *kobj)
899 {
900 struct ip_hw_instance *ip_hw_instance = to_ip_hw_instance(kobj);
901
902 kfree(ip_hw_instance);
903 }
904
905 static const struct kobj_type ip_hw_instance_ktype = {
906 .release = ip_hw_instance_release,
907 .sysfs_ops = &ip_hw_instance_sysfs_ops,
908 .default_groups = ip_hw_instance_groups,
909 };
910
911 /* -------------------------------------------------- */
912
913 #define to_ip_hw_id(x) container_of(to_kset(x), struct ip_hw_id, hw_id_kset)
914
ip_hw_id_release(struct kobject * kobj)915 static void ip_hw_id_release(struct kobject *kobj)
916 {
917 struct ip_hw_id *ip_hw_id = to_ip_hw_id(kobj);
918
919 if (!list_empty(&ip_hw_id->hw_id_kset.list))
920 DRM_ERROR("ip_hw_id->hw_id_kset is not empty");
921 kfree(ip_hw_id);
922 }
923
924 static const struct kobj_type ip_hw_id_ktype = {
925 .release = ip_hw_id_release,
926 .sysfs_ops = &kobj_sysfs_ops,
927 };
928
929 /* -------------------------------------------------- */
930
931 static void die_kobj_release(struct kobject *kobj);
932 static void ip_disc_release(struct kobject *kobj);
933
934 struct ip_die_entry_attribute {
935 struct attribute attr;
936 ssize_t (*show)(struct ip_die_entry *ip_die_entry, char *buf);
937 };
938
939 #define to_ip_die_entry_attr(x) container_of(x, struct ip_die_entry_attribute, attr)
940
num_ips_show(struct ip_die_entry * ip_die_entry,char * buf)941 static ssize_t num_ips_show(struct ip_die_entry *ip_die_entry, char *buf)
942 {
943 return sysfs_emit(buf, "%d\n", ip_die_entry->num_ips);
944 }
945
946 /* If there are more ip_die_entry attrs, other than the number of IPs,
947 * we can make this intro an array of attrs, and then initialize
948 * ip_die_entry_attrs in a loop.
949 */
950 static struct ip_die_entry_attribute num_ips_attr =
951 __ATTR_RO(num_ips);
952
953 static struct attribute *ip_die_entry_attrs[] = {
954 &num_ips_attr.attr,
955 NULL,
956 };
957 ATTRIBUTE_GROUPS(ip_die_entry); /* ip_die_entry_groups */
958
959 #define to_ip_die_entry(x) container_of(to_kset(x), struct ip_die_entry, ip_kset)
960
ip_die_entry_attr_show(struct kobject * kobj,struct attribute * attr,char * buf)961 static ssize_t ip_die_entry_attr_show(struct kobject *kobj,
962 struct attribute *attr,
963 char *buf)
964 {
965 struct ip_die_entry_attribute *ip_die_entry_attr = to_ip_die_entry_attr(attr);
966 struct ip_die_entry *ip_die_entry = to_ip_die_entry(kobj);
967
968 if (!ip_die_entry_attr->show)
969 return -EIO;
970
971 return ip_die_entry_attr->show(ip_die_entry, buf);
972 }
973
ip_die_entry_release(struct kobject * kobj)974 static void ip_die_entry_release(struct kobject *kobj)
975 {
976 struct ip_die_entry *ip_die_entry = to_ip_die_entry(kobj);
977
978 if (!list_empty(&ip_die_entry->ip_kset.list))
979 DRM_ERROR("ip_die_entry->ip_kset is not empty");
980 kfree(ip_die_entry);
981 }
982
983 static const struct sysfs_ops ip_die_entry_sysfs_ops = {
984 .show = ip_die_entry_attr_show,
985 };
986
987 static const struct kobj_type ip_die_entry_ktype = {
988 .release = ip_die_entry_release,
989 .sysfs_ops = &ip_die_entry_sysfs_ops,
990 .default_groups = ip_die_entry_groups,
991 };
992
993 static const struct kobj_type die_kobj_ktype = {
994 .release = die_kobj_release,
995 .sysfs_ops = &kobj_sysfs_ops,
996 };
997
998 static const struct kobj_type ip_discovery_ktype = {
999 .release = ip_disc_release,
1000 .sysfs_ops = &kobj_sysfs_ops,
1001 };
1002
1003 struct ip_discovery_top {
1004 struct kobject kobj; /* ip_discovery/ */
1005 struct kset die_kset; /* ip_discovery/die/, contains ip_die_entry */
1006 struct amdgpu_device *adev;
1007 };
1008
die_kobj_release(struct kobject * kobj)1009 static void die_kobj_release(struct kobject *kobj)
1010 {
1011 struct ip_discovery_top *ip_top = container_of(to_kset(kobj),
1012 struct ip_discovery_top,
1013 die_kset);
1014 if (!list_empty(&ip_top->die_kset.list))
1015 DRM_ERROR("ip_top->die_kset is not empty");
1016 }
1017
ip_disc_release(struct kobject * kobj)1018 static void ip_disc_release(struct kobject *kobj)
1019 {
1020 struct ip_discovery_top *ip_top = container_of(kobj, struct ip_discovery_top,
1021 kobj);
1022 struct amdgpu_device *adev = ip_top->adev;
1023
1024 adev->ip_top = NULL;
1025 kfree(ip_top);
1026 }
1027
amdgpu_discovery_get_harvest_info(struct amdgpu_device * adev,uint16_t hw_id,uint8_t inst)1028 static uint8_t amdgpu_discovery_get_harvest_info(struct amdgpu_device *adev,
1029 uint16_t hw_id, uint8_t inst)
1030 {
1031 uint8_t harvest = 0;
1032
1033 /* Until a uniform way is figured, get mask based on hwid */
1034 switch (hw_id) {
1035 case VCN_HWID:
1036 harvest = ((1 << inst) & adev->vcn.inst_mask) == 0;
1037 break;
1038 case DMU_HWID:
1039 if (adev->harvest_ip_mask & AMD_HARVEST_IP_DMU_MASK)
1040 harvest = 0x1;
1041 break;
1042 case UMC_HWID:
1043 /* TODO: It needs another parsing; for now, ignore.*/
1044 break;
1045 case GC_HWID:
1046 harvest = ((1 << inst) & adev->gfx.xcc_mask) == 0;
1047 break;
1048 case SDMA0_HWID:
1049 harvest = ((1 << inst) & adev->sdma.sdma_mask) == 0;
1050 break;
1051 default:
1052 break;
1053 }
1054
1055 return harvest;
1056 }
1057
amdgpu_discovery_sysfs_ips(struct amdgpu_device * adev,struct ip_die_entry * ip_die_entry,const size_t _ip_offset,const int num_ips,bool reg_base_64)1058 static int amdgpu_discovery_sysfs_ips(struct amdgpu_device *adev,
1059 struct ip_die_entry *ip_die_entry,
1060 const size_t _ip_offset, const int num_ips,
1061 bool reg_base_64)
1062 {
1063 int ii, jj, kk, res;
1064 uint16_t hw_id;
1065 uint8_t inst;
1066
1067 DRM_DEBUG("num_ips:%d", num_ips);
1068
1069 /* Find all IPs of a given HW ID, and add their instance to
1070 * #die/#hw_id/#instance/<attributes>
1071 */
1072 for (ii = 0; ii < HW_ID_MAX; ii++) {
1073 struct ip_hw_id *ip_hw_id = NULL;
1074 size_t ip_offset = _ip_offset;
1075
1076 for (jj = 0; jj < num_ips; jj++) {
1077 struct ip_v4 *ip;
1078 struct ip_hw_instance *ip_hw_instance;
1079
1080 ip = (struct ip_v4 *)(adev->mman.discovery_bin + ip_offset);
1081 inst = ip->instance_number;
1082 hw_id = le16_to_cpu(ip->hw_id);
1083 if (amdgpu_discovery_validate_ip(adev, inst, hw_id) ||
1084 hw_id != ii)
1085 goto next_ip;
1086
1087 DRM_DEBUG("match:%d @ ip_offset:%zu", ii, ip_offset);
1088
1089 /* We have a hw_id match; register the hw
1090 * block if not yet registered.
1091 */
1092 if (!ip_hw_id) {
1093 ip_hw_id = kzalloc(sizeof(*ip_hw_id), GFP_KERNEL);
1094 if (!ip_hw_id)
1095 return -ENOMEM;
1096 ip_hw_id->hw_id = ii;
1097
1098 kobject_set_name(&ip_hw_id->hw_id_kset.kobj, "%d", ii);
1099 ip_hw_id->hw_id_kset.kobj.kset = &ip_die_entry->ip_kset;
1100 ip_hw_id->hw_id_kset.kobj.ktype = &ip_hw_id_ktype;
1101 res = kset_register(&ip_hw_id->hw_id_kset);
1102 if (res) {
1103 DRM_ERROR("Couldn't register ip_hw_id kset");
1104 kfree(ip_hw_id);
1105 return res;
1106 }
1107 if (hw_id_names[ii]) {
1108 res = sysfs_create_link(&ip_die_entry->ip_kset.kobj,
1109 &ip_hw_id->hw_id_kset.kobj,
1110 hw_id_names[ii]);
1111 if (res) {
1112 DRM_ERROR("Couldn't create IP link %s in IP Die:%s\n",
1113 hw_id_names[ii],
1114 kobject_name(&ip_die_entry->ip_kset.kobj));
1115 }
1116 }
1117 }
1118
1119 /* Now register its instance.
1120 */
1121 ip_hw_instance = kzalloc(struct_size(ip_hw_instance,
1122 base_addr,
1123 ip->num_base_address),
1124 GFP_KERNEL);
1125 if (!ip_hw_instance) {
1126 DRM_ERROR("no memory for ip_hw_instance");
1127 return -ENOMEM;
1128 }
1129 ip_hw_instance->hw_id = le16_to_cpu(ip->hw_id); /* == ii */
1130 ip_hw_instance->num_instance = ip->instance_number;
1131 ip_hw_instance->major = ip->major;
1132 ip_hw_instance->minor = ip->minor;
1133 ip_hw_instance->revision = ip->revision;
1134 ip_hw_instance->harvest =
1135 amdgpu_discovery_get_harvest_info(
1136 adev, ip_hw_instance->hw_id,
1137 ip_hw_instance->num_instance);
1138 ip_hw_instance->num_base_addresses = ip->num_base_address;
1139
1140 for (kk = 0; kk < ip_hw_instance->num_base_addresses; kk++) {
1141 if (reg_base_64)
1142 ip_hw_instance->base_addr[kk] =
1143 lower_32_bits(le64_to_cpu(ip->base_address_64[kk])) & 0x3FFFFFFF;
1144 else
1145 ip_hw_instance->base_addr[kk] = ip->base_address[kk];
1146 }
1147
1148 kobject_init(&ip_hw_instance->kobj, &ip_hw_instance_ktype);
1149 ip_hw_instance->kobj.kset = &ip_hw_id->hw_id_kset;
1150 res = kobject_add(&ip_hw_instance->kobj, NULL,
1151 "%d", ip_hw_instance->num_instance);
1152 next_ip:
1153 if (reg_base_64)
1154 ip_offset += struct_size(ip, base_address_64,
1155 ip->num_base_address);
1156 else
1157 ip_offset += struct_size(ip, base_address,
1158 ip->num_base_address);
1159 }
1160 }
1161
1162 return 0;
1163 }
1164
amdgpu_discovery_sysfs_recurse(struct amdgpu_device * adev)1165 static int amdgpu_discovery_sysfs_recurse(struct amdgpu_device *adev)
1166 {
1167 struct binary_header *bhdr;
1168 struct ip_discovery_header *ihdr;
1169 struct die_header *dhdr;
1170 struct kset *die_kset = &adev->ip_top->die_kset;
1171 u16 num_dies, die_offset, num_ips;
1172 size_t ip_offset;
1173 int ii, res;
1174
1175 bhdr = (struct binary_header *)adev->mman.discovery_bin;
1176 ihdr = (struct ip_discovery_header *)(adev->mman.discovery_bin +
1177 le16_to_cpu(bhdr->table_list[IP_DISCOVERY].offset));
1178 num_dies = le16_to_cpu(ihdr->num_dies);
1179
1180 DRM_DEBUG("number of dies: %d\n", num_dies);
1181
1182 for (ii = 0; ii < num_dies; ii++) {
1183 struct ip_die_entry *ip_die_entry;
1184
1185 die_offset = le16_to_cpu(ihdr->die_info[ii].die_offset);
1186 dhdr = (struct die_header *)(adev->mman.discovery_bin + die_offset);
1187 num_ips = le16_to_cpu(dhdr->num_ips);
1188 ip_offset = die_offset + sizeof(*dhdr);
1189
1190 /* Add the die to the kset.
1191 *
1192 * dhdr->die_id == ii, which was checked in
1193 * amdgpu_discovery_reg_base_init().
1194 */
1195
1196 ip_die_entry = kzalloc(sizeof(*ip_die_entry), GFP_KERNEL);
1197 if (!ip_die_entry)
1198 return -ENOMEM;
1199
1200 ip_die_entry->num_ips = num_ips;
1201
1202 kobject_set_name(&ip_die_entry->ip_kset.kobj, "%d", le16_to_cpu(dhdr->die_id));
1203 ip_die_entry->ip_kset.kobj.kset = die_kset;
1204 ip_die_entry->ip_kset.kobj.ktype = &ip_die_entry_ktype;
1205 res = kset_register(&ip_die_entry->ip_kset);
1206 if (res) {
1207 DRM_ERROR("Couldn't register ip_die_entry kset");
1208 kfree(ip_die_entry);
1209 return res;
1210 }
1211
1212 amdgpu_discovery_sysfs_ips(adev, ip_die_entry, ip_offset, num_ips, !!ihdr->base_addr_64_bit);
1213 }
1214
1215 return 0;
1216 }
1217
amdgpu_discovery_sysfs_init(struct amdgpu_device * adev)1218 static int amdgpu_discovery_sysfs_init(struct amdgpu_device *adev)
1219 {
1220 struct kset *die_kset;
1221 int res, ii;
1222
1223 if (!adev->mman.discovery_bin)
1224 return -EINVAL;
1225
1226 adev->ip_top = kzalloc(sizeof(*adev->ip_top), GFP_KERNEL);
1227 if (!adev->ip_top)
1228 return -ENOMEM;
1229
1230 adev->ip_top->adev = adev;
1231
1232 res = kobject_init_and_add(&adev->ip_top->kobj, &ip_discovery_ktype,
1233 &adev->dev->kobj, "ip_discovery");
1234 if (res) {
1235 DRM_ERROR("Couldn't init and add ip_discovery/");
1236 goto Err;
1237 }
1238
1239 die_kset = &adev->ip_top->die_kset;
1240 kobject_set_name(&die_kset->kobj, "%s", "die");
1241 die_kset->kobj.parent = &adev->ip_top->kobj;
1242 die_kset->kobj.ktype = &die_kobj_ktype;
1243 res = kset_register(&adev->ip_top->die_kset);
1244 if (res) {
1245 DRM_ERROR("Couldn't register die_kset");
1246 goto Err;
1247 }
1248
1249 for (ii = 0; ii < ARRAY_SIZE(ip_hw_attr); ii++)
1250 ip_hw_instance_attrs[ii] = &ip_hw_attr[ii].attr;
1251 ip_hw_instance_attrs[ii] = NULL;
1252
1253 res = amdgpu_discovery_sysfs_recurse(adev);
1254
1255 return res;
1256 Err:
1257 kobject_put(&adev->ip_top->kobj);
1258 return res;
1259 }
1260
1261 /* -------------------------------------------------- */
1262
1263 #define list_to_kobj(el) container_of(el, struct kobject, entry)
1264
amdgpu_discovery_sysfs_ip_hw_free(struct ip_hw_id * ip_hw_id)1265 static void amdgpu_discovery_sysfs_ip_hw_free(struct ip_hw_id *ip_hw_id)
1266 {
1267 struct list_head *el, *tmp;
1268 struct kset *hw_id_kset;
1269
1270 hw_id_kset = &ip_hw_id->hw_id_kset;
1271 spin_lock(&hw_id_kset->list_lock);
1272 list_for_each_prev_safe(el, tmp, &hw_id_kset->list) {
1273 list_del_init(el);
1274 spin_unlock(&hw_id_kset->list_lock);
1275 /* kobject is embedded in ip_hw_instance */
1276 kobject_put(list_to_kobj(el));
1277 spin_lock(&hw_id_kset->list_lock);
1278 }
1279 spin_unlock(&hw_id_kset->list_lock);
1280 kobject_put(&ip_hw_id->hw_id_kset.kobj);
1281 }
1282
amdgpu_discovery_sysfs_die_free(struct ip_die_entry * ip_die_entry)1283 static void amdgpu_discovery_sysfs_die_free(struct ip_die_entry *ip_die_entry)
1284 {
1285 struct list_head *el, *tmp;
1286 struct kset *ip_kset;
1287
1288 ip_kset = &ip_die_entry->ip_kset;
1289 spin_lock(&ip_kset->list_lock);
1290 list_for_each_prev_safe(el, tmp, &ip_kset->list) {
1291 list_del_init(el);
1292 spin_unlock(&ip_kset->list_lock);
1293 amdgpu_discovery_sysfs_ip_hw_free(to_ip_hw_id(list_to_kobj(el)));
1294 spin_lock(&ip_kset->list_lock);
1295 }
1296 spin_unlock(&ip_kset->list_lock);
1297 kobject_put(&ip_die_entry->ip_kset.kobj);
1298 }
1299
amdgpu_discovery_sysfs_fini(struct amdgpu_device * adev)1300 static void amdgpu_discovery_sysfs_fini(struct amdgpu_device *adev)
1301 {
1302 struct list_head *el, *tmp;
1303 struct kset *die_kset;
1304
1305 die_kset = &adev->ip_top->die_kset;
1306 spin_lock(&die_kset->list_lock);
1307 list_for_each_prev_safe(el, tmp, &die_kset->list) {
1308 list_del_init(el);
1309 spin_unlock(&die_kset->list_lock);
1310 amdgpu_discovery_sysfs_die_free(to_ip_die_entry(list_to_kobj(el)));
1311 spin_lock(&die_kset->list_lock);
1312 }
1313 spin_unlock(&die_kset->list_lock);
1314 kobject_put(&adev->ip_top->die_kset.kobj);
1315 kobject_put(&adev->ip_top->kobj);
1316 }
1317
1318 /* ================================================== */
1319
amdgpu_discovery_reg_base_init(struct amdgpu_device * adev)1320 static int amdgpu_discovery_reg_base_init(struct amdgpu_device *adev)
1321 {
1322 uint8_t num_base_address, subrev, variant;
1323 struct binary_header *bhdr;
1324 struct ip_discovery_header *ihdr;
1325 struct die_header *dhdr;
1326 struct ip_v4 *ip;
1327 uint16_t die_offset;
1328 uint16_t ip_offset;
1329 uint16_t num_dies;
1330 uint32_t wafl_ver;
1331 uint16_t num_ips;
1332 uint16_t hw_id;
1333 uint8_t inst;
1334 int hw_ip;
1335 int i, j, k;
1336 int r;
1337
1338 r = amdgpu_discovery_init(adev);
1339 if (r)
1340 return r;
1341
1342 wafl_ver = 0;
1343 adev->gfx.xcc_mask = 0;
1344 adev->sdma.sdma_mask = 0;
1345 adev->vcn.inst_mask = 0;
1346 adev->jpeg.inst_mask = 0;
1347 bhdr = (struct binary_header *)adev->mman.discovery_bin;
1348 ihdr = (struct ip_discovery_header *)(adev->mman.discovery_bin +
1349 le16_to_cpu(bhdr->table_list[IP_DISCOVERY].offset));
1350 num_dies = le16_to_cpu(ihdr->num_dies);
1351
1352 DRM_DEBUG("number of dies: %d\n", num_dies);
1353
1354 for (i = 0; i < num_dies; i++) {
1355 die_offset = le16_to_cpu(ihdr->die_info[i].die_offset);
1356 dhdr = (struct die_header *)(adev->mman.discovery_bin + die_offset);
1357 num_ips = le16_to_cpu(dhdr->num_ips);
1358 ip_offset = die_offset + sizeof(*dhdr);
1359
1360 if (le16_to_cpu(dhdr->die_id) != i) {
1361 DRM_ERROR("invalid die id %d, expected %d\n",
1362 le16_to_cpu(dhdr->die_id), i);
1363 return -EINVAL;
1364 }
1365
1366 DRM_DEBUG("number of hardware IPs on die%d: %d\n",
1367 le16_to_cpu(dhdr->die_id), num_ips);
1368
1369 for (j = 0; j < num_ips; j++) {
1370 ip = (struct ip_v4 *)(adev->mman.discovery_bin + ip_offset);
1371
1372 inst = ip->instance_number;
1373 hw_id = le16_to_cpu(ip->hw_id);
1374 if (amdgpu_discovery_validate_ip(adev, inst, hw_id))
1375 goto next_ip;
1376
1377 num_base_address = ip->num_base_address;
1378
1379 DRM_DEBUG("%s(%d) #%d v%d.%d.%d:\n",
1380 hw_id_names[le16_to_cpu(ip->hw_id)],
1381 le16_to_cpu(ip->hw_id),
1382 ip->instance_number,
1383 ip->major, ip->minor,
1384 ip->revision);
1385
1386 if (le16_to_cpu(ip->hw_id) == VCN_HWID) {
1387 /* Bit [5:0]: original revision value
1388 * Bit [7:6]: en/decode capability:
1389 * 0b00 : VCN function normally
1390 * 0b10 : encode is disabled
1391 * 0b01 : decode is disabled
1392 */
1393 if (adev->vcn.num_vcn_inst <
1394 AMDGPU_MAX_VCN_INSTANCES) {
1395 adev->vcn.inst[adev->vcn.num_vcn_inst].vcn_config =
1396 ip->revision & 0xc0;
1397 adev->vcn.num_vcn_inst++;
1398 adev->vcn.inst_mask |=
1399 (1U << ip->instance_number);
1400 adev->jpeg.inst_mask |=
1401 (1U << ip->instance_number);
1402 } else {
1403 dev_err(adev->dev, "Too many VCN instances: %d vs %d\n",
1404 adev->vcn.num_vcn_inst + 1,
1405 AMDGPU_MAX_VCN_INSTANCES);
1406 }
1407 ip->revision &= ~0xc0;
1408 }
1409 if (le16_to_cpu(ip->hw_id) == SDMA0_HWID ||
1410 le16_to_cpu(ip->hw_id) == SDMA1_HWID ||
1411 le16_to_cpu(ip->hw_id) == SDMA2_HWID ||
1412 le16_to_cpu(ip->hw_id) == SDMA3_HWID) {
1413 if (adev->sdma.num_instances <
1414 AMDGPU_MAX_SDMA_INSTANCES) {
1415 adev->sdma.num_instances++;
1416 adev->sdma.sdma_mask |=
1417 (1U << ip->instance_number);
1418 } else {
1419 dev_err(adev->dev, "Too many SDMA instances: %d vs %d\n",
1420 adev->sdma.num_instances + 1,
1421 AMDGPU_MAX_SDMA_INSTANCES);
1422 }
1423 }
1424
1425 if (le16_to_cpu(ip->hw_id) == VPE_HWID) {
1426 if (adev->vpe.num_instances < AMDGPU_MAX_VPE_INSTANCES)
1427 adev->vpe.num_instances++;
1428 else
1429 dev_err(adev->dev, "Too many VPE instances: %d vs %d\n",
1430 adev->vpe.num_instances + 1,
1431 AMDGPU_MAX_VPE_INSTANCES);
1432 }
1433
1434 if (le16_to_cpu(ip->hw_id) == UMC_HWID) {
1435 adev->gmc.num_umc++;
1436 adev->umc.node_inst_num++;
1437 }
1438
1439 if (le16_to_cpu(ip->hw_id) == GC_HWID)
1440 adev->gfx.xcc_mask |=
1441 (1U << ip->instance_number);
1442
1443 if (!wafl_ver && le16_to_cpu(ip->hw_id) == WAFLC_HWID)
1444 wafl_ver = IP_VERSION_FULL(ip->major, ip->minor,
1445 ip->revision, 0, 0);
1446
1447 for (k = 0; k < num_base_address; k++) {
1448 /*
1449 * convert the endianness of base addresses in place,
1450 * so that we don't need to convert them when accessing adev->reg_offset.
1451 */
1452 if (ihdr->base_addr_64_bit)
1453 /* Truncate the 64bit base address from ip discovery
1454 * and only store lower 32bit ip base in reg_offset[].
1455 * Bits > 32 follows ASIC specific format, thus just
1456 * discard them and handle it within specific ASIC.
1457 * By this way reg_offset[] and related helpers can
1458 * stay unchanged.
1459 * The base address is in dwords, thus clear the
1460 * highest 2 bits to store.
1461 */
1462 ip->base_address[k] =
1463 lower_32_bits(le64_to_cpu(ip->base_address_64[k])) & 0x3FFFFFFF;
1464 else
1465 ip->base_address[k] = le32_to_cpu(ip->base_address[k]);
1466 DRM_DEBUG("\t0x%08x\n", ip->base_address[k]);
1467 }
1468
1469 for (hw_ip = 0; hw_ip < MAX_HWIP; hw_ip++) {
1470 if (hw_id_map[hw_ip] == le16_to_cpu(ip->hw_id) &&
1471 hw_id_map[hw_ip] != 0) {
1472 DRM_DEBUG("set register base offset for %s\n",
1473 hw_id_names[le16_to_cpu(ip->hw_id)]);
1474 adev->reg_offset[hw_ip][ip->instance_number] =
1475 ip->base_address;
1476 /* Instance support is somewhat inconsistent.
1477 * SDMA is a good example. Sienna cichlid has 4 total
1478 * SDMA instances, each enumerated separately (HWIDs
1479 * 42, 43, 68, 69). Arcturus has 8 total SDMA instances,
1480 * but they are enumerated as multiple instances of the
1481 * same HWIDs (4x HWID 42, 4x HWID 43). UMC is another
1482 * example. On most chips there are multiple instances
1483 * with the same HWID.
1484 */
1485
1486 if (ihdr->version < 3) {
1487 subrev = 0;
1488 variant = 0;
1489 } else {
1490 subrev = ip->sub_revision;
1491 variant = ip->variant;
1492 }
1493
1494 adev->ip_versions[hw_ip]
1495 [ip->instance_number] =
1496 IP_VERSION_FULL(ip->major,
1497 ip->minor,
1498 ip->revision,
1499 variant,
1500 subrev);
1501 }
1502 }
1503
1504 next_ip:
1505 if (ihdr->base_addr_64_bit)
1506 ip_offset += struct_size(ip, base_address_64, ip->num_base_address);
1507 else
1508 ip_offset += struct_size(ip, base_address, ip->num_base_address);
1509 }
1510 }
1511
1512 if (wafl_ver && !adev->ip_versions[XGMI_HWIP][0])
1513 adev->ip_versions[XGMI_HWIP][0] = wafl_ver;
1514
1515 return 0;
1516 }
1517
amdgpu_discovery_harvest_ip(struct amdgpu_device * adev)1518 static void amdgpu_discovery_harvest_ip(struct amdgpu_device *adev)
1519 {
1520 struct ip_discovery_header *ihdr;
1521 struct binary_header *bhdr;
1522 int vcn_harvest_count = 0;
1523 int umc_harvest_count = 0;
1524 uint16_t offset, ihdr_ver;
1525
1526 bhdr = (struct binary_header *)adev->mman.discovery_bin;
1527 offset = le16_to_cpu(bhdr->table_list[IP_DISCOVERY].offset);
1528 ihdr = (struct ip_discovery_header *)(adev->mman.discovery_bin +
1529 offset);
1530 ihdr_ver = le16_to_cpu(ihdr->version);
1531 /*
1532 * Harvest table does not fit Navi1x and legacy GPUs,
1533 * so read harvest bit per IP data structure to set
1534 * harvest configuration.
1535 */
1536 if (amdgpu_ip_version(adev, GC_HWIP, 0) < IP_VERSION(10, 2, 0) &&
1537 ihdr_ver <= 2) {
1538 if ((adev->pdev->device == 0x731E &&
1539 (adev->pdev->revision == 0xC6 ||
1540 adev->pdev->revision == 0xC7)) ||
1541 (adev->pdev->device == 0x7340 &&
1542 adev->pdev->revision == 0xC9) ||
1543 (adev->pdev->device == 0x7360 &&
1544 adev->pdev->revision == 0xC7))
1545 amdgpu_discovery_read_harvest_bit_per_ip(adev,
1546 &vcn_harvest_count);
1547 } else {
1548 amdgpu_discovery_read_from_harvest_table(adev,
1549 &vcn_harvest_count,
1550 &umc_harvest_count);
1551 }
1552
1553 amdgpu_discovery_harvest_config_quirk(adev);
1554
1555 if (vcn_harvest_count == adev->vcn.num_vcn_inst) {
1556 adev->harvest_ip_mask |= AMD_HARVEST_IP_VCN_MASK;
1557 adev->harvest_ip_mask |= AMD_HARVEST_IP_JPEG_MASK;
1558 }
1559
1560 if (umc_harvest_count < adev->gmc.num_umc) {
1561 adev->gmc.num_umc -= umc_harvest_count;
1562 }
1563 }
1564
1565 union gc_info {
1566 struct gc_info_v1_0 v1;
1567 struct gc_info_v1_1 v1_1;
1568 struct gc_info_v1_2 v1_2;
1569 struct gc_info_v1_3 v1_3;
1570 struct gc_info_v2_0 v2;
1571 struct gc_info_v2_1 v2_1;
1572 };
1573
amdgpu_discovery_get_gfx_info(struct amdgpu_device * adev)1574 static int amdgpu_discovery_get_gfx_info(struct amdgpu_device *adev)
1575 {
1576 struct binary_header *bhdr;
1577 union gc_info *gc_info;
1578 u16 offset;
1579
1580 if (!adev->mman.discovery_bin) {
1581 DRM_ERROR("ip discovery uninitialized\n");
1582 return -EINVAL;
1583 }
1584
1585 bhdr = (struct binary_header *)adev->mman.discovery_bin;
1586 offset = le16_to_cpu(bhdr->table_list[GC].offset);
1587
1588 if (!offset)
1589 return 0;
1590
1591 gc_info = (union gc_info *)(adev->mman.discovery_bin + offset);
1592
1593 switch (le16_to_cpu(gc_info->v1.header.version_major)) {
1594 case 1:
1595 adev->gfx.config.max_shader_engines = le32_to_cpu(gc_info->v1.gc_num_se);
1596 adev->gfx.config.max_cu_per_sh = 2 * (le32_to_cpu(gc_info->v1.gc_num_wgp0_per_sa) +
1597 le32_to_cpu(gc_info->v1.gc_num_wgp1_per_sa));
1598 adev->gfx.config.max_sh_per_se = le32_to_cpu(gc_info->v1.gc_num_sa_per_se);
1599 adev->gfx.config.max_backends_per_se = le32_to_cpu(gc_info->v1.gc_num_rb_per_se);
1600 adev->gfx.config.max_texture_channel_caches = le32_to_cpu(gc_info->v1.gc_num_gl2c);
1601 adev->gfx.config.max_gprs = le32_to_cpu(gc_info->v1.gc_num_gprs);
1602 adev->gfx.config.max_gs_threads = le32_to_cpu(gc_info->v1.gc_num_max_gs_thds);
1603 adev->gfx.config.gs_vgt_table_depth = le32_to_cpu(gc_info->v1.gc_gs_table_depth);
1604 adev->gfx.config.gs_prim_buffer_depth = le32_to_cpu(gc_info->v1.gc_gsprim_buff_depth);
1605 adev->gfx.config.double_offchip_lds_buf = le32_to_cpu(gc_info->v1.gc_double_offchip_lds_buffer);
1606 adev->gfx.cu_info.wave_front_size = le32_to_cpu(gc_info->v1.gc_wave_size);
1607 adev->gfx.cu_info.max_waves_per_simd = le32_to_cpu(gc_info->v1.gc_max_waves_per_simd);
1608 adev->gfx.cu_info.max_scratch_slots_per_cu = le32_to_cpu(gc_info->v1.gc_max_scratch_slots_per_cu);
1609 adev->gfx.cu_info.lds_size = le32_to_cpu(gc_info->v1.gc_lds_size);
1610 adev->gfx.config.num_sc_per_sh = le32_to_cpu(gc_info->v1.gc_num_sc_per_se) /
1611 le32_to_cpu(gc_info->v1.gc_num_sa_per_se);
1612 adev->gfx.config.num_packer_per_sc = le32_to_cpu(gc_info->v1.gc_num_packer_per_sc);
1613 if (le16_to_cpu(gc_info->v1.header.version_minor) >= 1) {
1614 adev->gfx.config.gc_num_tcp_per_sa = le32_to_cpu(gc_info->v1_1.gc_num_tcp_per_sa);
1615 adev->gfx.config.gc_num_sdp_interface = le32_to_cpu(gc_info->v1_1.gc_num_sdp_interface);
1616 adev->gfx.config.gc_num_tcps = le32_to_cpu(gc_info->v1_1.gc_num_tcps);
1617 }
1618 if (le16_to_cpu(gc_info->v1.header.version_minor) >= 2) {
1619 adev->gfx.config.gc_num_tcp_per_wpg = le32_to_cpu(gc_info->v1_2.gc_num_tcp_per_wpg);
1620 adev->gfx.config.gc_tcp_l1_size = le32_to_cpu(gc_info->v1_2.gc_tcp_l1_size);
1621 adev->gfx.config.gc_num_sqc_per_wgp = le32_to_cpu(gc_info->v1_2.gc_num_sqc_per_wgp);
1622 adev->gfx.config.gc_l1_instruction_cache_size_per_sqc = le32_to_cpu(gc_info->v1_2.gc_l1_instruction_cache_size_per_sqc);
1623 adev->gfx.config.gc_l1_data_cache_size_per_sqc = le32_to_cpu(gc_info->v1_2.gc_l1_data_cache_size_per_sqc);
1624 adev->gfx.config.gc_gl1c_per_sa = le32_to_cpu(gc_info->v1_2.gc_gl1c_per_sa);
1625 adev->gfx.config.gc_gl1c_size_per_instance = le32_to_cpu(gc_info->v1_2.gc_gl1c_size_per_instance);
1626 adev->gfx.config.gc_gl2c_per_gpu = le32_to_cpu(gc_info->v1_2.gc_gl2c_per_gpu);
1627 }
1628 if (le16_to_cpu(gc_info->v1.header.version_minor) >= 3) {
1629 adev->gfx.config.gc_tcp_size_per_cu = le32_to_cpu(gc_info->v1_3.gc_tcp_size_per_cu);
1630 adev->gfx.config.gc_tcp_cache_line_size = le32_to_cpu(gc_info->v1_3.gc_tcp_cache_line_size);
1631 adev->gfx.config.gc_instruction_cache_size_per_sqc = le32_to_cpu(gc_info->v1_3.gc_instruction_cache_size_per_sqc);
1632 adev->gfx.config.gc_instruction_cache_line_size = le32_to_cpu(gc_info->v1_3.gc_instruction_cache_line_size);
1633 adev->gfx.config.gc_scalar_data_cache_size_per_sqc = le32_to_cpu(gc_info->v1_3.gc_scalar_data_cache_size_per_sqc);
1634 adev->gfx.config.gc_scalar_data_cache_line_size = le32_to_cpu(gc_info->v1_3.gc_scalar_data_cache_line_size);
1635 adev->gfx.config.gc_tcc_size = le32_to_cpu(gc_info->v1_3.gc_tcc_size);
1636 adev->gfx.config.gc_tcc_cache_line_size = le32_to_cpu(gc_info->v1_3.gc_tcc_cache_line_size);
1637 }
1638 break;
1639 case 2:
1640 adev->gfx.config.max_shader_engines = le32_to_cpu(gc_info->v2.gc_num_se);
1641 adev->gfx.config.max_cu_per_sh = le32_to_cpu(gc_info->v2.gc_num_cu_per_sh);
1642 adev->gfx.config.max_sh_per_se = le32_to_cpu(gc_info->v2.gc_num_sh_per_se);
1643 adev->gfx.config.max_backends_per_se = le32_to_cpu(gc_info->v2.gc_num_rb_per_se);
1644 adev->gfx.config.max_texture_channel_caches = le32_to_cpu(gc_info->v2.gc_num_tccs);
1645 adev->gfx.config.max_gprs = le32_to_cpu(gc_info->v2.gc_num_gprs);
1646 adev->gfx.config.max_gs_threads = le32_to_cpu(gc_info->v2.gc_num_max_gs_thds);
1647 adev->gfx.config.gs_vgt_table_depth = le32_to_cpu(gc_info->v2.gc_gs_table_depth);
1648 adev->gfx.config.gs_prim_buffer_depth = le32_to_cpu(gc_info->v2.gc_gsprim_buff_depth);
1649 adev->gfx.config.double_offchip_lds_buf = le32_to_cpu(gc_info->v2.gc_double_offchip_lds_buffer);
1650 adev->gfx.cu_info.wave_front_size = le32_to_cpu(gc_info->v2.gc_wave_size);
1651 adev->gfx.cu_info.max_waves_per_simd = le32_to_cpu(gc_info->v2.gc_max_waves_per_simd);
1652 adev->gfx.cu_info.max_scratch_slots_per_cu = le32_to_cpu(gc_info->v2.gc_max_scratch_slots_per_cu);
1653 adev->gfx.cu_info.lds_size = le32_to_cpu(gc_info->v2.gc_lds_size);
1654 adev->gfx.config.num_sc_per_sh = le32_to_cpu(gc_info->v2.gc_num_sc_per_se) /
1655 le32_to_cpu(gc_info->v2.gc_num_sh_per_se);
1656 adev->gfx.config.num_packer_per_sc = le32_to_cpu(gc_info->v2.gc_num_packer_per_sc);
1657 if (le16_to_cpu(gc_info->v2.header.version_minor) == 1) {
1658 adev->gfx.config.gc_num_tcp_per_sa = le32_to_cpu(gc_info->v2_1.gc_num_tcp_per_sh);
1659 adev->gfx.config.gc_tcp_size_per_cu = le32_to_cpu(gc_info->v2_1.gc_tcp_size_per_cu);
1660 adev->gfx.config.gc_num_sdp_interface = le32_to_cpu(gc_info->v2_1.gc_num_sdp_interface); /* per XCD */
1661 adev->gfx.config.gc_num_cu_per_sqc = le32_to_cpu(gc_info->v2_1.gc_num_cu_per_sqc);
1662 adev->gfx.config.gc_l1_instruction_cache_size_per_sqc = le32_to_cpu(gc_info->v2_1.gc_instruction_cache_size_per_sqc);
1663 adev->gfx.config.gc_l1_data_cache_size_per_sqc = le32_to_cpu(gc_info->v2_1.gc_scalar_data_cache_size_per_sqc);
1664 adev->gfx.config.gc_tcc_size = le32_to_cpu(gc_info->v2_1.gc_tcc_size); /* per XCD */
1665 }
1666 break;
1667 default:
1668 dev_err(adev->dev,
1669 "Unhandled GC info table %d.%d\n",
1670 le16_to_cpu(gc_info->v1.header.version_major),
1671 le16_to_cpu(gc_info->v1.header.version_minor));
1672 return -EINVAL;
1673 }
1674 return 0;
1675 }
1676
1677 union mall_info {
1678 struct mall_info_v1_0 v1;
1679 struct mall_info_v2_0 v2;
1680 };
1681
amdgpu_discovery_get_mall_info(struct amdgpu_device * adev)1682 static int amdgpu_discovery_get_mall_info(struct amdgpu_device *adev)
1683 {
1684 struct binary_header *bhdr;
1685 union mall_info *mall_info;
1686 u32 u, mall_size_per_umc, m_s_present, half_use;
1687 u64 mall_size;
1688 u16 offset;
1689
1690 if (!adev->mman.discovery_bin) {
1691 DRM_ERROR("ip discovery uninitialized\n");
1692 return -EINVAL;
1693 }
1694
1695 bhdr = (struct binary_header *)adev->mman.discovery_bin;
1696 offset = le16_to_cpu(bhdr->table_list[MALL_INFO].offset);
1697
1698 if (!offset)
1699 return 0;
1700
1701 mall_info = (union mall_info *)(adev->mman.discovery_bin + offset);
1702
1703 switch (le16_to_cpu(mall_info->v1.header.version_major)) {
1704 case 1:
1705 mall_size = 0;
1706 mall_size_per_umc = le32_to_cpu(mall_info->v1.mall_size_per_m);
1707 m_s_present = le32_to_cpu(mall_info->v1.m_s_present);
1708 half_use = le32_to_cpu(mall_info->v1.m_half_use);
1709 for (u = 0; u < adev->gmc.num_umc; u++) {
1710 if (m_s_present & (1 << u))
1711 mall_size += mall_size_per_umc * 2;
1712 else if (half_use & (1 << u))
1713 mall_size += mall_size_per_umc / 2;
1714 else
1715 mall_size += mall_size_per_umc;
1716 }
1717 adev->gmc.mall_size = mall_size;
1718 adev->gmc.m_half_use = half_use;
1719 break;
1720 case 2:
1721 mall_size_per_umc = le32_to_cpu(mall_info->v2.mall_size_per_umc);
1722 adev->gmc.mall_size = (uint64_t)mall_size_per_umc * adev->gmc.num_umc;
1723 break;
1724 default:
1725 dev_err(adev->dev,
1726 "Unhandled MALL info table %d.%d\n",
1727 le16_to_cpu(mall_info->v1.header.version_major),
1728 le16_to_cpu(mall_info->v1.header.version_minor));
1729 return -EINVAL;
1730 }
1731 return 0;
1732 }
1733
1734 union vcn_info {
1735 struct vcn_info_v1_0 v1;
1736 };
1737
amdgpu_discovery_get_vcn_info(struct amdgpu_device * adev)1738 static int amdgpu_discovery_get_vcn_info(struct amdgpu_device *adev)
1739 {
1740 struct binary_header *bhdr;
1741 union vcn_info *vcn_info;
1742 u16 offset;
1743 int v;
1744
1745 if (!adev->mman.discovery_bin) {
1746 DRM_ERROR("ip discovery uninitialized\n");
1747 return -EINVAL;
1748 }
1749
1750 /* num_vcn_inst is currently limited to AMDGPU_MAX_VCN_INSTANCES
1751 * which is smaller than VCN_INFO_TABLE_MAX_NUM_INSTANCES
1752 * but that may change in the future with new GPUs so keep this
1753 * check for defensive purposes.
1754 */
1755 if (adev->vcn.num_vcn_inst > VCN_INFO_TABLE_MAX_NUM_INSTANCES) {
1756 dev_err(adev->dev, "invalid vcn instances\n");
1757 return -EINVAL;
1758 }
1759
1760 bhdr = (struct binary_header *)adev->mman.discovery_bin;
1761 offset = le16_to_cpu(bhdr->table_list[VCN_INFO].offset);
1762
1763 if (!offset)
1764 return 0;
1765
1766 vcn_info = (union vcn_info *)(adev->mman.discovery_bin + offset);
1767
1768 switch (le16_to_cpu(vcn_info->v1.header.version_major)) {
1769 case 1:
1770 /* num_vcn_inst is currently limited to AMDGPU_MAX_VCN_INSTANCES
1771 * so this won't overflow.
1772 */
1773 for (v = 0; v < adev->vcn.num_vcn_inst; v++) {
1774 adev->vcn.inst[v].vcn_codec_disable_mask =
1775 le32_to_cpu(vcn_info->v1.instance_info[v].fuse_data.all_bits);
1776 }
1777 break;
1778 default:
1779 dev_err(adev->dev,
1780 "Unhandled VCN info table %d.%d\n",
1781 le16_to_cpu(vcn_info->v1.header.version_major),
1782 le16_to_cpu(vcn_info->v1.header.version_minor));
1783 return -EINVAL;
1784 }
1785 return 0;
1786 }
1787
1788 union nps_info {
1789 struct nps_info_v1_0 v1;
1790 };
1791
amdgpu_discovery_refresh_nps_info(struct amdgpu_device * adev,union nps_info * nps_data)1792 static int amdgpu_discovery_refresh_nps_info(struct amdgpu_device *adev,
1793 union nps_info *nps_data)
1794 {
1795 uint64_t vram_size, pos, offset;
1796 struct nps_info_header *nhdr;
1797 struct binary_header bhdr;
1798 uint16_t checksum;
1799
1800 vram_size = (uint64_t)RREG32(mmRCC_CONFIG_MEMSIZE) << 20;
1801 pos = vram_size - DISCOVERY_TMR_OFFSET;
1802 amdgpu_device_vram_access(adev, pos, &bhdr, sizeof(bhdr), false);
1803
1804 offset = le16_to_cpu(bhdr.table_list[NPS_INFO].offset);
1805 checksum = le16_to_cpu(bhdr.table_list[NPS_INFO].checksum);
1806
1807 amdgpu_device_vram_access(adev, (pos + offset), nps_data,
1808 sizeof(*nps_data), false);
1809
1810 nhdr = (struct nps_info_header *)(nps_data);
1811 if (!amdgpu_discovery_verify_checksum((uint8_t *)nps_data,
1812 le32_to_cpu(nhdr->size_bytes),
1813 checksum)) {
1814 dev_err(adev->dev, "nps data refresh, checksum mismatch\n");
1815 return -EINVAL;
1816 }
1817
1818 return 0;
1819 }
1820
amdgpu_discovery_get_nps_info(struct amdgpu_device * adev,uint32_t * nps_type,struct amdgpu_gmc_memrange ** ranges,int * range_cnt,bool refresh)1821 int amdgpu_discovery_get_nps_info(struct amdgpu_device *adev,
1822 uint32_t *nps_type,
1823 struct amdgpu_gmc_memrange **ranges,
1824 int *range_cnt, bool refresh)
1825 {
1826 struct amdgpu_gmc_memrange *mem_ranges;
1827 struct binary_header *bhdr;
1828 union nps_info *nps_info;
1829 union nps_info nps_data;
1830 u16 offset;
1831 int i, r;
1832
1833 if (!nps_type || !range_cnt || !ranges)
1834 return -EINVAL;
1835
1836 if (refresh) {
1837 r = amdgpu_discovery_refresh_nps_info(adev, &nps_data);
1838 if (r)
1839 return r;
1840 nps_info = &nps_data;
1841 } else {
1842 if (!adev->mman.discovery_bin) {
1843 dev_err(adev->dev,
1844 "fetch mem range failed, ip discovery uninitialized\n");
1845 return -EINVAL;
1846 }
1847
1848 bhdr = (struct binary_header *)adev->mman.discovery_bin;
1849 offset = le16_to_cpu(bhdr->table_list[NPS_INFO].offset);
1850
1851 if (!offset)
1852 return -ENOENT;
1853
1854 /* If verification fails, return as if NPS table doesn't exist */
1855 if (amdgpu_discovery_verify_npsinfo(adev, bhdr))
1856 return -ENOENT;
1857
1858 nps_info =
1859 (union nps_info *)(adev->mman.discovery_bin + offset);
1860 }
1861
1862 switch (le16_to_cpu(nps_info->v1.header.version_major)) {
1863 case 1:
1864 mem_ranges = kvcalloc(nps_info->v1.count,
1865 sizeof(*mem_ranges),
1866 GFP_KERNEL);
1867 if (!mem_ranges)
1868 return -ENOMEM;
1869 *nps_type = nps_info->v1.nps_type;
1870 *range_cnt = nps_info->v1.count;
1871 for (i = 0; i < *range_cnt; i++) {
1872 mem_ranges[i].base_address =
1873 nps_info->v1.instance_info[i].base_address;
1874 mem_ranges[i].limit_address =
1875 nps_info->v1.instance_info[i].limit_address;
1876 mem_ranges[i].nid_mask = -1;
1877 mem_ranges[i].flags = 0;
1878 }
1879 *ranges = mem_ranges;
1880 break;
1881 default:
1882 dev_err(adev->dev, "Unhandled NPS info table %d.%d\n",
1883 le16_to_cpu(nps_info->v1.header.version_major),
1884 le16_to_cpu(nps_info->v1.header.version_minor));
1885 return -EINVAL;
1886 }
1887
1888 return 0;
1889 }
1890
amdgpu_discovery_set_common_ip_blocks(struct amdgpu_device * adev)1891 static int amdgpu_discovery_set_common_ip_blocks(struct amdgpu_device *adev)
1892 {
1893 /* what IP to use for this? */
1894 switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
1895 case IP_VERSION(9, 0, 1):
1896 case IP_VERSION(9, 1, 0):
1897 case IP_VERSION(9, 2, 1):
1898 case IP_VERSION(9, 2, 2):
1899 case IP_VERSION(9, 3, 0):
1900 case IP_VERSION(9, 4, 0):
1901 case IP_VERSION(9, 4, 1):
1902 case IP_VERSION(9, 4, 2):
1903 case IP_VERSION(9, 4, 3):
1904 case IP_VERSION(9, 4, 4):
1905 case IP_VERSION(9, 5, 0):
1906 amdgpu_device_ip_block_add(adev, &vega10_common_ip_block);
1907 break;
1908 case IP_VERSION(10, 1, 10):
1909 case IP_VERSION(10, 1, 1):
1910 case IP_VERSION(10, 1, 2):
1911 case IP_VERSION(10, 1, 3):
1912 case IP_VERSION(10, 1, 4):
1913 case IP_VERSION(10, 3, 0):
1914 case IP_VERSION(10, 3, 1):
1915 case IP_VERSION(10, 3, 2):
1916 case IP_VERSION(10, 3, 3):
1917 case IP_VERSION(10, 3, 4):
1918 case IP_VERSION(10, 3, 5):
1919 case IP_VERSION(10, 3, 6):
1920 case IP_VERSION(10, 3, 7):
1921 amdgpu_device_ip_block_add(adev, &nv_common_ip_block);
1922 break;
1923 case IP_VERSION(11, 0, 0):
1924 case IP_VERSION(11, 0, 1):
1925 case IP_VERSION(11, 0, 2):
1926 case IP_VERSION(11, 0, 3):
1927 case IP_VERSION(11, 0, 4):
1928 case IP_VERSION(11, 5, 0):
1929 case IP_VERSION(11, 5, 1):
1930 case IP_VERSION(11, 5, 2):
1931 case IP_VERSION(11, 5, 3):
1932 amdgpu_device_ip_block_add(adev, &soc21_common_ip_block);
1933 break;
1934 case IP_VERSION(12, 0, 0):
1935 case IP_VERSION(12, 0, 1):
1936 amdgpu_device_ip_block_add(adev, &soc24_common_ip_block);
1937 break;
1938 default:
1939 dev_err(adev->dev,
1940 "Failed to add common ip block(GC_HWIP:0x%x)\n",
1941 amdgpu_ip_version(adev, GC_HWIP, 0));
1942 return -EINVAL;
1943 }
1944 return 0;
1945 }
1946
amdgpu_discovery_set_gmc_ip_blocks(struct amdgpu_device * adev)1947 static int amdgpu_discovery_set_gmc_ip_blocks(struct amdgpu_device *adev)
1948 {
1949 /* use GC or MMHUB IP version */
1950 switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
1951 case IP_VERSION(9, 0, 1):
1952 case IP_VERSION(9, 1, 0):
1953 case IP_VERSION(9, 2, 1):
1954 case IP_VERSION(9, 2, 2):
1955 case IP_VERSION(9, 3, 0):
1956 case IP_VERSION(9, 4, 0):
1957 case IP_VERSION(9, 4, 1):
1958 case IP_VERSION(9, 4, 2):
1959 case IP_VERSION(9, 4, 3):
1960 case IP_VERSION(9, 4, 4):
1961 case IP_VERSION(9, 5, 0):
1962 amdgpu_device_ip_block_add(adev, &gmc_v9_0_ip_block);
1963 break;
1964 case IP_VERSION(10, 1, 10):
1965 case IP_VERSION(10, 1, 1):
1966 case IP_VERSION(10, 1, 2):
1967 case IP_VERSION(10, 1, 3):
1968 case IP_VERSION(10, 1, 4):
1969 case IP_VERSION(10, 3, 0):
1970 case IP_VERSION(10, 3, 1):
1971 case IP_VERSION(10, 3, 2):
1972 case IP_VERSION(10, 3, 3):
1973 case IP_VERSION(10, 3, 4):
1974 case IP_VERSION(10, 3, 5):
1975 case IP_VERSION(10, 3, 6):
1976 case IP_VERSION(10, 3, 7):
1977 amdgpu_device_ip_block_add(adev, &gmc_v10_0_ip_block);
1978 break;
1979 case IP_VERSION(11, 0, 0):
1980 case IP_VERSION(11, 0, 1):
1981 case IP_VERSION(11, 0, 2):
1982 case IP_VERSION(11, 0, 3):
1983 case IP_VERSION(11, 0, 4):
1984 case IP_VERSION(11, 5, 0):
1985 case IP_VERSION(11, 5, 1):
1986 case IP_VERSION(11, 5, 2):
1987 case IP_VERSION(11, 5, 3):
1988 amdgpu_device_ip_block_add(adev, &gmc_v11_0_ip_block);
1989 break;
1990 case IP_VERSION(12, 0, 0):
1991 case IP_VERSION(12, 0, 1):
1992 amdgpu_device_ip_block_add(adev, &gmc_v12_0_ip_block);
1993 break;
1994 default:
1995 dev_err(adev->dev, "Failed to add gmc ip block(GC_HWIP:0x%x)\n",
1996 amdgpu_ip_version(adev, GC_HWIP, 0));
1997 return -EINVAL;
1998 }
1999 return 0;
2000 }
2001
amdgpu_discovery_set_ih_ip_blocks(struct amdgpu_device * adev)2002 static int amdgpu_discovery_set_ih_ip_blocks(struct amdgpu_device *adev)
2003 {
2004 switch (amdgpu_ip_version(adev, OSSSYS_HWIP, 0)) {
2005 case IP_VERSION(4, 0, 0):
2006 case IP_VERSION(4, 0, 1):
2007 case IP_VERSION(4, 1, 0):
2008 case IP_VERSION(4, 1, 1):
2009 case IP_VERSION(4, 3, 0):
2010 amdgpu_device_ip_block_add(adev, &vega10_ih_ip_block);
2011 break;
2012 case IP_VERSION(4, 2, 0):
2013 case IP_VERSION(4, 2, 1):
2014 case IP_VERSION(4, 4, 0):
2015 case IP_VERSION(4, 4, 2):
2016 case IP_VERSION(4, 4, 5):
2017 amdgpu_device_ip_block_add(adev, &vega20_ih_ip_block);
2018 break;
2019 case IP_VERSION(5, 0, 0):
2020 case IP_VERSION(5, 0, 1):
2021 case IP_VERSION(5, 0, 2):
2022 case IP_VERSION(5, 0, 3):
2023 case IP_VERSION(5, 2, 0):
2024 case IP_VERSION(5, 2, 1):
2025 amdgpu_device_ip_block_add(adev, &navi10_ih_ip_block);
2026 break;
2027 case IP_VERSION(6, 0, 0):
2028 case IP_VERSION(6, 0, 1):
2029 case IP_VERSION(6, 0, 2):
2030 amdgpu_device_ip_block_add(adev, &ih_v6_0_ip_block);
2031 break;
2032 case IP_VERSION(6, 1, 0):
2033 amdgpu_device_ip_block_add(adev, &ih_v6_1_ip_block);
2034 break;
2035 case IP_VERSION(7, 0, 0):
2036 amdgpu_device_ip_block_add(adev, &ih_v7_0_ip_block);
2037 break;
2038 default:
2039 dev_err(adev->dev,
2040 "Failed to add ih ip block(OSSSYS_HWIP:0x%x)\n",
2041 amdgpu_ip_version(adev, OSSSYS_HWIP, 0));
2042 return -EINVAL;
2043 }
2044 return 0;
2045 }
2046
amdgpu_discovery_set_psp_ip_blocks(struct amdgpu_device * adev)2047 static int amdgpu_discovery_set_psp_ip_blocks(struct amdgpu_device *adev)
2048 {
2049 switch (amdgpu_ip_version(adev, MP0_HWIP, 0)) {
2050 case IP_VERSION(9, 0, 0):
2051 amdgpu_device_ip_block_add(adev, &psp_v3_1_ip_block);
2052 break;
2053 case IP_VERSION(10, 0, 0):
2054 case IP_VERSION(10, 0, 1):
2055 amdgpu_device_ip_block_add(adev, &psp_v10_0_ip_block);
2056 break;
2057 case IP_VERSION(11, 0, 0):
2058 case IP_VERSION(11, 0, 2):
2059 case IP_VERSION(11, 0, 4):
2060 case IP_VERSION(11, 0, 5):
2061 case IP_VERSION(11, 0, 9):
2062 case IP_VERSION(11, 0, 7):
2063 case IP_VERSION(11, 0, 11):
2064 case IP_VERSION(11, 0, 12):
2065 case IP_VERSION(11, 0, 13):
2066 case IP_VERSION(11, 5, 0):
2067 case IP_VERSION(11, 5, 2):
2068 amdgpu_device_ip_block_add(adev, &psp_v11_0_ip_block);
2069 break;
2070 case IP_VERSION(11, 0, 8):
2071 amdgpu_device_ip_block_add(adev, &psp_v11_0_8_ip_block);
2072 break;
2073 case IP_VERSION(11, 0, 3):
2074 case IP_VERSION(12, 0, 1):
2075 amdgpu_device_ip_block_add(adev, &psp_v12_0_ip_block);
2076 break;
2077 case IP_VERSION(13, 0, 0):
2078 case IP_VERSION(13, 0, 1):
2079 case IP_VERSION(13, 0, 2):
2080 case IP_VERSION(13, 0, 3):
2081 case IP_VERSION(13, 0, 5):
2082 case IP_VERSION(13, 0, 6):
2083 case IP_VERSION(13, 0, 7):
2084 case IP_VERSION(13, 0, 8):
2085 case IP_VERSION(13, 0, 10):
2086 case IP_VERSION(13, 0, 11):
2087 case IP_VERSION(13, 0, 12):
2088 case IP_VERSION(13, 0, 14):
2089 case IP_VERSION(14, 0, 0):
2090 case IP_VERSION(14, 0, 1):
2091 case IP_VERSION(14, 0, 4):
2092 amdgpu_device_ip_block_add(adev, &psp_v13_0_ip_block);
2093 break;
2094 case IP_VERSION(13, 0, 4):
2095 amdgpu_device_ip_block_add(adev, &psp_v13_0_4_ip_block);
2096 break;
2097 case IP_VERSION(14, 0, 2):
2098 case IP_VERSION(14, 0, 3):
2099 case IP_VERSION(14, 0, 5):
2100 amdgpu_device_ip_block_add(adev, &psp_v14_0_ip_block);
2101 break;
2102 default:
2103 dev_err(adev->dev,
2104 "Failed to add psp ip block(MP0_HWIP:0x%x)\n",
2105 amdgpu_ip_version(adev, MP0_HWIP, 0));
2106 return -EINVAL;
2107 }
2108 return 0;
2109 }
2110
amdgpu_discovery_set_smu_ip_blocks(struct amdgpu_device * adev)2111 static int amdgpu_discovery_set_smu_ip_blocks(struct amdgpu_device *adev)
2112 {
2113 switch (amdgpu_ip_version(adev, MP1_HWIP, 0)) {
2114 case IP_VERSION(9, 0, 0):
2115 case IP_VERSION(10, 0, 0):
2116 case IP_VERSION(10, 0, 1):
2117 case IP_VERSION(11, 0, 2):
2118 if (adev->asic_type == CHIP_ARCTURUS)
2119 amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block);
2120 else
2121 amdgpu_device_ip_block_add(adev, &pp_smu_ip_block);
2122 break;
2123 case IP_VERSION(11, 0, 0):
2124 case IP_VERSION(11, 0, 5):
2125 case IP_VERSION(11, 0, 9):
2126 case IP_VERSION(11, 0, 7):
2127 case IP_VERSION(11, 0, 8):
2128 case IP_VERSION(11, 0, 11):
2129 case IP_VERSION(11, 0, 12):
2130 case IP_VERSION(11, 0, 13):
2131 case IP_VERSION(11, 5, 0):
2132 case IP_VERSION(11, 5, 2):
2133 amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block);
2134 break;
2135 case IP_VERSION(12, 0, 0):
2136 case IP_VERSION(12, 0, 1):
2137 amdgpu_device_ip_block_add(adev, &smu_v12_0_ip_block);
2138 break;
2139 case IP_VERSION(13, 0, 0):
2140 case IP_VERSION(13, 0, 1):
2141 case IP_VERSION(13, 0, 2):
2142 case IP_VERSION(13, 0, 3):
2143 case IP_VERSION(13, 0, 4):
2144 case IP_VERSION(13, 0, 5):
2145 case IP_VERSION(13, 0, 6):
2146 case IP_VERSION(13, 0, 7):
2147 case IP_VERSION(13, 0, 8):
2148 case IP_VERSION(13, 0, 10):
2149 case IP_VERSION(13, 0, 11):
2150 case IP_VERSION(13, 0, 14):
2151 case IP_VERSION(13, 0, 12):
2152 amdgpu_device_ip_block_add(adev, &smu_v13_0_ip_block);
2153 break;
2154 case IP_VERSION(14, 0, 0):
2155 case IP_VERSION(14, 0, 1):
2156 case IP_VERSION(14, 0, 2):
2157 case IP_VERSION(14, 0, 3):
2158 case IP_VERSION(14, 0, 4):
2159 case IP_VERSION(14, 0, 5):
2160 amdgpu_device_ip_block_add(adev, &smu_v14_0_ip_block);
2161 break;
2162 default:
2163 dev_err(adev->dev,
2164 "Failed to add smu ip block(MP1_HWIP:0x%x)\n",
2165 amdgpu_ip_version(adev, MP1_HWIP, 0));
2166 return -EINVAL;
2167 }
2168 return 0;
2169 }
2170
2171 #if defined(CONFIG_DRM_AMD_DC)
amdgpu_discovery_set_sriov_display(struct amdgpu_device * adev)2172 static void amdgpu_discovery_set_sriov_display(struct amdgpu_device *adev)
2173 {
2174 amdgpu_device_set_sriov_virtual_display(adev);
2175 amdgpu_device_ip_block_add(adev, &amdgpu_vkms_ip_block);
2176 }
2177 #endif
2178
amdgpu_discovery_set_display_ip_blocks(struct amdgpu_device * adev)2179 static int amdgpu_discovery_set_display_ip_blocks(struct amdgpu_device *adev)
2180 {
2181 if (adev->enable_virtual_display) {
2182 amdgpu_device_ip_block_add(adev, &amdgpu_vkms_ip_block);
2183 return 0;
2184 }
2185
2186 if (!amdgpu_device_has_dc_support(adev))
2187 return 0;
2188
2189 #if defined(CONFIG_DRM_AMD_DC)
2190 if (amdgpu_ip_version(adev, DCE_HWIP, 0)) {
2191 switch (amdgpu_ip_version(adev, DCE_HWIP, 0)) {
2192 case IP_VERSION(1, 0, 0):
2193 case IP_VERSION(1, 0, 1):
2194 case IP_VERSION(2, 0, 2):
2195 case IP_VERSION(2, 0, 0):
2196 case IP_VERSION(2, 0, 3):
2197 case IP_VERSION(2, 1, 0):
2198 case IP_VERSION(3, 0, 0):
2199 case IP_VERSION(3, 0, 2):
2200 case IP_VERSION(3, 0, 3):
2201 case IP_VERSION(3, 0, 1):
2202 case IP_VERSION(3, 1, 2):
2203 case IP_VERSION(3, 1, 3):
2204 case IP_VERSION(3, 1, 4):
2205 case IP_VERSION(3, 1, 5):
2206 case IP_VERSION(3, 1, 6):
2207 case IP_VERSION(3, 2, 0):
2208 case IP_VERSION(3, 2, 1):
2209 case IP_VERSION(3, 5, 0):
2210 case IP_VERSION(3, 5, 1):
2211 case IP_VERSION(3, 6, 0):
2212 case IP_VERSION(4, 1, 0):
2213 /* TODO: Fix IP version. DC code expects version 4.0.1 */
2214 if (adev->ip_versions[DCE_HWIP][0] == IP_VERSION(4, 1, 0))
2215 adev->ip_versions[DCE_HWIP][0] = IP_VERSION(4, 0, 1);
2216
2217 if (amdgpu_sriov_vf(adev))
2218 amdgpu_discovery_set_sriov_display(adev);
2219 else
2220 amdgpu_device_ip_block_add(adev, &dm_ip_block);
2221 break;
2222 default:
2223 dev_err(adev->dev,
2224 "Failed to add dm ip block(DCE_HWIP:0x%x)\n",
2225 amdgpu_ip_version(adev, DCE_HWIP, 0));
2226 return -EINVAL;
2227 }
2228 } else if (amdgpu_ip_version(adev, DCI_HWIP, 0)) {
2229 switch (amdgpu_ip_version(adev, DCI_HWIP, 0)) {
2230 case IP_VERSION(12, 0, 0):
2231 case IP_VERSION(12, 0, 1):
2232 case IP_VERSION(12, 1, 0):
2233 if (amdgpu_sriov_vf(adev))
2234 amdgpu_discovery_set_sriov_display(adev);
2235 else
2236 amdgpu_device_ip_block_add(adev, &dm_ip_block);
2237 break;
2238 default:
2239 dev_err(adev->dev,
2240 "Failed to add dm ip block(DCI_HWIP:0x%x)\n",
2241 amdgpu_ip_version(adev, DCI_HWIP, 0));
2242 return -EINVAL;
2243 }
2244 }
2245 #endif
2246 return 0;
2247 }
2248
amdgpu_discovery_set_gc_ip_blocks(struct amdgpu_device * adev)2249 static int amdgpu_discovery_set_gc_ip_blocks(struct amdgpu_device *adev)
2250 {
2251 switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
2252 case IP_VERSION(9, 0, 1):
2253 case IP_VERSION(9, 1, 0):
2254 case IP_VERSION(9, 2, 1):
2255 case IP_VERSION(9, 2, 2):
2256 case IP_VERSION(9, 3, 0):
2257 case IP_VERSION(9, 4, 0):
2258 case IP_VERSION(9, 4, 1):
2259 case IP_VERSION(9, 4, 2):
2260 amdgpu_device_ip_block_add(adev, &gfx_v9_0_ip_block);
2261 break;
2262 case IP_VERSION(9, 4, 3):
2263 case IP_VERSION(9, 4, 4):
2264 case IP_VERSION(9, 5, 0):
2265 amdgpu_device_ip_block_add(adev, &gfx_v9_4_3_ip_block);
2266 break;
2267 case IP_VERSION(10, 1, 10):
2268 case IP_VERSION(10, 1, 2):
2269 case IP_VERSION(10, 1, 1):
2270 case IP_VERSION(10, 1, 3):
2271 case IP_VERSION(10, 1, 4):
2272 case IP_VERSION(10, 3, 0):
2273 case IP_VERSION(10, 3, 2):
2274 case IP_VERSION(10, 3, 1):
2275 case IP_VERSION(10, 3, 4):
2276 case IP_VERSION(10, 3, 5):
2277 case IP_VERSION(10, 3, 6):
2278 case IP_VERSION(10, 3, 3):
2279 case IP_VERSION(10, 3, 7):
2280 amdgpu_device_ip_block_add(adev, &gfx_v10_0_ip_block);
2281 break;
2282 case IP_VERSION(11, 0, 0):
2283 case IP_VERSION(11, 0, 1):
2284 case IP_VERSION(11, 0, 2):
2285 case IP_VERSION(11, 0, 3):
2286 case IP_VERSION(11, 0, 4):
2287 case IP_VERSION(11, 5, 0):
2288 case IP_VERSION(11, 5, 1):
2289 case IP_VERSION(11, 5, 2):
2290 case IP_VERSION(11, 5, 3):
2291 amdgpu_device_ip_block_add(adev, &gfx_v11_0_ip_block);
2292 break;
2293 case IP_VERSION(12, 0, 0):
2294 case IP_VERSION(12, 0, 1):
2295 amdgpu_device_ip_block_add(adev, &gfx_v12_0_ip_block);
2296 break;
2297 default:
2298 dev_err(adev->dev, "Failed to add gfx ip block(GC_HWIP:0x%x)\n",
2299 amdgpu_ip_version(adev, GC_HWIP, 0));
2300 return -EINVAL;
2301 }
2302 return 0;
2303 }
2304
amdgpu_discovery_set_sdma_ip_blocks(struct amdgpu_device * adev)2305 static int amdgpu_discovery_set_sdma_ip_blocks(struct amdgpu_device *adev)
2306 {
2307 switch (amdgpu_ip_version(adev, SDMA0_HWIP, 0)) {
2308 case IP_VERSION(4, 0, 0):
2309 case IP_VERSION(4, 0, 1):
2310 case IP_VERSION(4, 1, 0):
2311 case IP_VERSION(4, 1, 1):
2312 case IP_VERSION(4, 1, 2):
2313 case IP_VERSION(4, 2, 0):
2314 case IP_VERSION(4, 2, 2):
2315 case IP_VERSION(4, 4, 0):
2316 amdgpu_device_ip_block_add(adev, &sdma_v4_0_ip_block);
2317 break;
2318 case IP_VERSION(4, 4, 2):
2319 case IP_VERSION(4, 4, 5):
2320 case IP_VERSION(4, 4, 4):
2321 amdgpu_device_ip_block_add(adev, &sdma_v4_4_2_ip_block);
2322 break;
2323 case IP_VERSION(5, 0, 0):
2324 case IP_VERSION(5, 0, 1):
2325 case IP_VERSION(5, 0, 2):
2326 case IP_VERSION(5, 0, 5):
2327 amdgpu_device_ip_block_add(adev, &sdma_v5_0_ip_block);
2328 break;
2329 case IP_VERSION(5, 2, 0):
2330 case IP_VERSION(5, 2, 2):
2331 case IP_VERSION(5, 2, 4):
2332 case IP_VERSION(5, 2, 5):
2333 case IP_VERSION(5, 2, 6):
2334 case IP_VERSION(5, 2, 3):
2335 case IP_VERSION(5, 2, 1):
2336 case IP_VERSION(5, 2, 7):
2337 amdgpu_device_ip_block_add(adev, &sdma_v5_2_ip_block);
2338 break;
2339 case IP_VERSION(6, 0, 0):
2340 case IP_VERSION(6, 0, 1):
2341 case IP_VERSION(6, 0, 2):
2342 case IP_VERSION(6, 0, 3):
2343 case IP_VERSION(6, 1, 0):
2344 case IP_VERSION(6, 1, 1):
2345 case IP_VERSION(6, 1, 2):
2346 case IP_VERSION(6, 1, 3):
2347 amdgpu_device_ip_block_add(adev, &sdma_v6_0_ip_block);
2348 break;
2349 case IP_VERSION(7, 0, 0):
2350 case IP_VERSION(7, 0, 1):
2351 amdgpu_device_ip_block_add(adev, &sdma_v7_0_ip_block);
2352 break;
2353 default:
2354 dev_err(adev->dev,
2355 "Failed to add sdma ip block(SDMA0_HWIP:0x%x)\n",
2356 amdgpu_ip_version(adev, SDMA0_HWIP, 0));
2357 return -EINVAL;
2358 }
2359 return 0;
2360 }
2361
amdgpu_discovery_set_mm_ip_blocks(struct amdgpu_device * adev)2362 static int amdgpu_discovery_set_mm_ip_blocks(struct amdgpu_device *adev)
2363 {
2364 if (amdgpu_ip_version(adev, VCE_HWIP, 0)) {
2365 switch (amdgpu_ip_version(adev, UVD_HWIP, 0)) {
2366 case IP_VERSION(7, 0, 0):
2367 case IP_VERSION(7, 2, 0):
2368 /* UVD is not supported on vega20 SR-IOV */
2369 if (!(adev->asic_type == CHIP_VEGA20 && amdgpu_sriov_vf(adev)))
2370 amdgpu_device_ip_block_add(adev, &uvd_v7_0_ip_block);
2371 break;
2372 default:
2373 dev_err(adev->dev,
2374 "Failed to add uvd v7 ip block(UVD_HWIP:0x%x)\n",
2375 amdgpu_ip_version(adev, UVD_HWIP, 0));
2376 return -EINVAL;
2377 }
2378 switch (amdgpu_ip_version(adev, VCE_HWIP, 0)) {
2379 case IP_VERSION(4, 0, 0):
2380 case IP_VERSION(4, 1, 0):
2381 /* VCE is not supported on vega20 SR-IOV */
2382 if (!(adev->asic_type == CHIP_VEGA20 && amdgpu_sriov_vf(adev)))
2383 amdgpu_device_ip_block_add(adev, &vce_v4_0_ip_block);
2384 break;
2385 default:
2386 dev_err(adev->dev,
2387 "Failed to add VCE v4 ip block(VCE_HWIP:0x%x)\n",
2388 amdgpu_ip_version(adev, VCE_HWIP, 0));
2389 return -EINVAL;
2390 }
2391 } else {
2392 switch (amdgpu_ip_version(adev, UVD_HWIP, 0)) {
2393 case IP_VERSION(1, 0, 0):
2394 case IP_VERSION(1, 0, 1):
2395 amdgpu_device_ip_block_add(adev, &vcn_v1_0_ip_block);
2396 break;
2397 case IP_VERSION(2, 0, 0):
2398 case IP_VERSION(2, 0, 2):
2399 case IP_VERSION(2, 2, 0):
2400 amdgpu_device_ip_block_add(adev, &vcn_v2_0_ip_block);
2401 if (!amdgpu_sriov_vf(adev))
2402 amdgpu_device_ip_block_add(adev, &jpeg_v2_0_ip_block);
2403 break;
2404 case IP_VERSION(2, 0, 3):
2405 break;
2406 case IP_VERSION(2, 5, 0):
2407 amdgpu_device_ip_block_add(adev, &vcn_v2_5_ip_block);
2408 amdgpu_device_ip_block_add(adev, &jpeg_v2_5_ip_block);
2409 break;
2410 case IP_VERSION(2, 6, 0):
2411 amdgpu_device_ip_block_add(adev, &vcn_v2_6_ip_block);
2412 amdgpu_device_ip_block_add(adev, &jpeg_v2_6_ip_block);
2413 break;
2414 case IP_VERSION(3, 0, 0):
2415 case IP_VERSION(3, 0, 16):
2416 case IP_VERSION(3, 1, 1):
2417 case IP_VERSION(3, 1, 2):
2418 case IP_VERSION(3, 0, 2):
2419 amdgpu_device_ip_block_add(adev, &vcn_v3_0_ip_block);
2420 if (!amdgpu_sriov_vf(adev))
2421 amdgpu_device_ip_block_add(adev, &jpeg_v3_0_ip_block);
2422 break;
2423 case IP_VERSION(3, 0, 33):
2424 amdgpu_device_ip_block_add(adev, &vcn_v3_0_ip_block);
2425 break;
2426 case IP_VERSION(4, 0, 0):
2427 case IP_VERSION(4, 0, 2):
2428 case IP_VERSION(4, 0, 4):
2429 amdgpu_device_ip_block_add(adev, &vcn_v4_0_ip_block);
2430 amdgpu_device_ip_block_add(adev, &jpeg_v4_0_ip_block);
2431 break;
2432 case IP_VERSION(4, 0, 3):
2433 amdgpu_device_ip_block_add(adev, &vcn_v4_0_3_ip_block);
2434 amdgpu_device_ip_block_add(adev, &jpeg_v4_0_3_ip_block);
2435 break;
2436 case IP_VERSION(4, 0, 5):
2437 case IP_VERSION(4, 0, 6):
2438 amdgpu_device_ip_block_add(adev, &vcn_v4_0_5_ip_block);
2439 amdgpu_device_ip_block_add(adev, &jpeg_v4_0_5_ip_block);
2440 break;
2441 case IP_VERSION(5, 0, 0):
2442 amdgpu_device_ip_block_add(adev, &vcn_v5_0_0_ip_block);
2443 amdgpu_device_ip_block_add(adev, &jpeg_v5_0_0_ip_block);
2444 break;
2445 case IP_VERSION(5, 0, 1):
2446 amdgpu_device_ip_block_add(adev, &vcn_v5_0_1_ip_block);
2447 amdgpu_device_ip_block_add(adev, &jpeg_v5_0_1_ip_block);
2448 break;
2449 default:
2450 dev_err(adev->dev,
2451 "Failed to add vcn/jpeg ip block(UVD_HWIP:0x%x)\n",
2452 amdgpu_ip_version(adev, UVD_HWIP, 0));
2453 return -EINVAL;
2454 }
2455 }
2456 return 0;
2457 }
2458
amdgpu_discovery_set_mes_ip_blocks(struct amdgpu_device * adev)2459 static int amdgpu_discovery_set_mes_ip_blocks(struct amdgpu_device *adev)
2460 {
2461 switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
2462 case IP_VERSION(11, 0, 0):
2463 case IP_VERSION(11, 0, 1):
2464 case IP_VERSION(11, 0, 2):
2465 case IP_VERSION(11, 0, 3):
2466 case IP_VERSION(11, 0, 4):
2467 case IP_VERSION(11, 5, 0):
2468 case IP_VERSION(11, 5, 1):
2469 case IP_VERSION(11, 5, 2):
2470 case IP_VERSION(11, 5, 3):
2471 amdgpu_device_ip_block_add(adev, &mes_v11_0_ip_block);
2472 adev->enable_mes = true;
2473 adev->enable_mes_kiq = true;
2474 break;
2475 case IP_VERSION(12, 0, 0):
2476 case IP_VERSION(12, 0, 1):
2477 amdgpu_device_ip_block_add(adev, &mes_v12_0_ip_block);
2478 adev->enable_mes = true;
2479 adev->enable_mes_kiq = true;
2480 if (amdgpu_uni_mes)
2481 adev->enable_uni_mes = true;
2482 break;
2483 default:
2484 break;
2485 }
2486 return 0;
2487 }
2488
amdgpu_discovery_init_soc_config(struct amdgpu_device * adev)2489 static void amdgpu_discovery_init_soc_config(struct amdgpu_device *adev)
2490 {
2491 switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
2492 case IP_VERSION(9, 4, 3):
2493 case IP_VERSION(9, 4, 4):
2494 case IP_VERSION(9, 5, 0):
2495 aqua_vanjaram_init_soc_config(adev);
2496 break;
2497 default:
2498 break;
2499 }
2500 }
2501
amdgpu_discovery_set_vpe_ip_blocks(struct amdgpu_device * adev)2502 static int amdgpu_discovery_set_vpe_ip_blocks(struct amdgpu_device *adev)
2503 {
2504 switch (amdgpu_ip_version(adev, VPE_HWIP, 0)) {
2505 case IP_VERSION(6, 1, 0):
2506 case IP_VERSION(6, 1, 1):
2507 case IP_VERSION(6, 1, 3):
2508 amdgpu_device_ip_block_add(adev, &vpe_v6_1_ip_block);
2509 break;
2510 default:
2511 break;
2512 }
2513
2514 return 0;
2515 }
2516
amdgpu_discovery_set_umsch_mm_ip_blocks(struct amdgpu_device * adev)2517 static int amdgpu_discovery_set_umsch_mm_ip_blocks(struct amdgpu_device *adev)
2518 {
2519 switch (amdgpu_ip_version(adev, VCN_HWIP, 0)) {
2520 case IP_VERSION(4, 0, 5):
2521 case IP_VERSION(4, 0, 6):
2522 if (amdgpu_umsch_mm & 0x1) {
2523 amdgpu_device_ip_block_add(adev, &umsch_mm_v4_0_ip_block);
2524 adev->enable_umsch_mm = true;
2525 }
2526 break;
2527 default:
2528 break;
2529 }
2530
2531 return 0;
2532 }
2533
amdgpu_discovery_set_isp_ip_blocks(struct amdgpu_device * adev)2534 static int amdgpu_discovery_set_isp_ip_blocks(struct amdgpu_device *adev)
2535 {
2536 #if defined(CONFIG_DRM_AMD_ISP)
2537 switch (amdgpu_ip_version(adev, ISP_HWIP, 0)) {
2538 case IP_VERSION(4, 1, 0):
2539 amdgpu_device_ip_block_add(adev, &isp_v4_1_0_ip_block);
2540 break;
2541 case IP_VERSION(4, 1, 1):
2542 amdgpu_device_ip_block_add(adev, &isp_v4_1_1_ip_block);
2543 break;
2544 default:
2545 break;
2546 }
2547 #endif
2548
2549 return 0;
2550 }
2551
amdgpu_discovery_set_ip_blocks(struct amdgpu_device * adev)2552 int amdgpu_discovery_set_ip_blocks(struct amdgpu_device *adev)
2553 {
2554 int r;
2555
2556 switch (adev->asic_type) {
2557 case CHIP_VEGA10:
2558 case CHIP_VEGA12:
2559 case CHIP_RAVEN:
2560 case CHIP_VEGA20:
2561 case CHIP_ARCTURUS:
2562 case CHIP_ALDEBARAN:
2563 /* this is not fatal. We have a fallback below
2564 * if the new firmwares are not present. some of
2565 * this will be overridden below to keep things
2566 * consistent with the current behavior.
2567 */
2568 r = amdgpu_discovery_reg_base_init(adev);
2569 if (!r) {
2570 amdgpu_discovery_harvest_ip(adev);
2571 amdgpu_discovery_get_gfx_info(adev);
2572 amdgpu_discovery_get_mall_info(adev);
2573 amdgpu_discovery_get_vcn_info(adev);
2574 }
2575 break;
2576 default:
2577 r = amdgpu_discovery_reg_base_init(adev);
2578 if (r) {
2579 drm_err(&adev->ddev, "discovery failed: %d\n", r);
2580 return r;
2581 }
2582
2583 amdgpu_discovery_harvest_ip(adev);
2584 amdgpu_discovery_get_gfx_info(adev);
2585 amdgpu_discovery_get_mall_info(adev);
2586 amdgpu_discovery_get_vcn_info(adev);
2587 break;
2588 }
2589
2590 switch (adev->asic_type) {
2591 case CHIP_VEGA10:
2592 vega10_reg_base_init(adev);
2593 adev->sdma.num_instances = 2;
2594 adev->gmc.num_umc = 4;
2595 adev->ip_versions[MMHUB_HWIP][0] = IP_VERSION(9, 0, 0);
2596 adev->ip_versions[ATHUB_HWIP][0] = IP_VERSION(9, 0, 0);
2597 adev->ip_versions[OSSSYS_HWIP][0] = IP_VERSION(4, 0, 0);
2598 adev->ip_versions[HDP_HWIP][0] = IP_VERSION(4, 0, 0);
2599 adev->ip_versions[SDMA0_HWIP][0] = IP_VERSION(4, 0, 0);
2600 adev->ip_versions[SDMA1_HWIP][0] = IP_VERSION(4, 0, 0);
2601 adev->ip_versions[DF_HWIP][0] = IP_VERSION(2, 1, 0);
2602 adev->ip_versions[NBIO_HWIP][0] = IP_VERSION(6, 1, 0);
2603 adev->ip_versions[UMC_HWIP][0] = IP_VERSION(6, 0, 0);
2604 adev->ip_versions[MP0_HWIP][0] = IP_VERSION(9, 0, 0);
2605 adev->ip_versions[MP1_HWIP][0] = IP_VERSION(9, 0, 0);
2606 adev->ip_versions[THM_HWIP][0] = IP_VERSION(9, 0, 0);
2607 adev->ip_versions[SMUIO_HWIP][0] = IP_VERSION(9, 0, 0);
2608 adev->ip_versions[GC_HWIP][0] = IP_VERSION(9, 0, 1);
2609 adev->ip_versions[UVD_HWIP][0] = IP_VERSION(7, 0, 0);
2610 adev->ip_versions[VCE_HWIP][0] = IP_VERSION(4, 0, 0);
2611 adev->ip_versions[DCI_HWIP][0] = IP_VERSION(12, 0, 0);
2612 break;
2613 case CHIP_VEGA12:
2614 vega10_reg_base_init(adev);
2615 adev->sdma.num_instances = 2;
2616 adev->gmc.num_umc = 4;
2617 adev->ip_versions[MMHUB_HWIP][0] = IP_VERSION(9, 3, 0);
2618 adev->ip_versions[ATHUB_HWIP][0] = IP_VERSION(9, 3, 0);
2619 adev->ip_versions[OSSSYS_HWIP][0] = IP_VERSION(4, 0, 1);
2620 adev->ip_versions[HDP_HWIP][0] = IP_VERSION(4, 0, 1);
2621 adev->ip_versions[SDMA0_HWIP][0] = IP_VERSION(4, 0, 1);
2622 adev->ip_versions[SDMA1_HWIP][0] = IP_VERSION(4, 0, 1);
2623 adev->ip_versions[DF_HWIP][0] = IP_VERSION(2, 5, 0);
2624 adev->ip_versions[NBIO_HWIP][0] = IP_VERSION(6, 2, 0);
2625 adev->ip_versions[UMC_HWIP][0] = IP_VERSION(6, 1, 0);
2626 adev->ip_versions[MP0_HWIP][0] = IP_VERSION(9, 0, 0);
2627 adev->ip_versions[MP1_HWIP][0] = IP_VERSION(9, 0, 0);
2628 adev->ip_versions[THM_HWIP][0] = IP_VERSION(9, 0, 0);
2629 adev->ip_versions[SMUIO_HWIP][0] = IP_VERSION(9, 0, 1);
2630 adev->ip_versions[GC_HWIP][0] = IP_VERSION(9, 2, 1);
2631 adev->ip_versions[UVD_HWIP][0] = IP_VERSION(7, 0, 0);
2632 adev->ip_versions[VCE_HWIP][0] = IP_VERSION(4, 0, 0);
2633 adev->ip_versions[DCI_HWIP][0] = IP_VERSION(12, 0, 1);
2634 break;
2635 case CHIP_RAVEN:
2636 vega10_reg_base_init(adev);
2637 adev->sdma.num_instances = 1;
2638 adev->vcn.num_vcn_inst = 1;
2639 adev->gmc.num_umc = 2;
2640 if (adev->apu_flags & AMD_APU_IS_RAVEN2) {
2641 adev->ip_versions[MMHUB_HWIP][0] = IP_VERSION(9, 2, 0);
2642 adev->ip_versions[ATHUB_HWIP][0] = IP_VERSION(9, 2, 0);
2643 adev->ip_versions[OSSSYS_HWIP][0] = IP_VERSION(4, 1, 1);
2644 adev->ip_versions[HDP_HWIP][0] = IP_VERSION(4, 1, 1);
2645 adev->ip_versions[SDMA0_HWIP][0] = IP_VERSION(4, 1, 1);
2646 adev->ip_versions[DF_HWIP][0] = IP_VERSION(2, 1, 1);
2647 adev->ip_versions[NBIO_HWIP][0] = IP_VERSION(7, 0, 1);
2648 adev->ip_versions[UMC_HWIP][0] = IP_VERSION(7, 5, 0);
2649 adev->ip_versions[MP0_HWIP][0] = IP_VERSION(10, 0, 1);
2650 adev->ip_versions[MP1_HWIP][0] = IP_VERSION(10, 0, 1);
2651 adev->ip_versions[THM_HWIP][0] = IP_VERSION(10, 1, 0);
2652 adev->ip_versions[SMUIO_HWIP][0] = IP_VERSION(10, 0, 1);
2653 adev->ip_versions[GC_HWIP][0] = IP_VERSION(9, 2, 2);
2654 adev->ip_versions[UVD_HWIP][0] = IP_VERSION(1, 0, 1);
2655 adev->ip_versions[DCE_HWIP][0] = IP_VERSION(1, 0, 1);
2656 adev->ip_versions[ISP_HWIP][0] = IP_VERSION(2, 0, 0);
2657 } else {
2658 adev->ip_versions[MMHUB_HWIP][0] = IP_VERSION(9, 1, 0);
2659 adev->ip_versions[ATHUB_HWIP][0] = IP_VERSION(9, 1, 0);
2660 adev->ip_versions[OSSSYS_HWIP][0] = IP_VERSION(4, 1, 0);
2661 adev->ip_versions[HDP_HWIP][0] = IP_VERSION(4, 1, 0);
2662 adev->ip_versions[SDMA0_HWIP][0] = IP_VERSION(4, 1, 0);
2663 adev->ip_versions[DF_HWIP][0] = IP_VERSION(2, 1, 0);
2664 adev->ip_versions[NBIO_HWIP][0] = IP_VERSION(7, 0, 0);
2665 adev->ip_versions[UMC_HWIP][0] = IP_VERSION(7, 0, 0);
2666 adev->ip_versions[MP0_HWIP][0] = IP_VERSION(10, 0, 0);
2667 adev->ip_versions[MP1_HWIP][0] = IP_VERSION(10, 0, 0);
2668 adev->ip_versions[THM_HWIP][0] = IP_VERSION(10, 0, 0);
2669 adev->ip_versions[SMUIO_HWIP][0] = IP_VERSION(10, 0, 0);
2670 adev->ip_versions[GC_HWIP][0] = IP_VERSION(9, 1, 0);
2671 adev->ip_versions[UVD_HWIP][0] = IP_VERSION(1, 0, 0);
2672 adev->ip_versions[DCE_HWIP][0] = IP_VERSION(1, 0, 0);
2673 adev->ip_versions[ISP_HWIP][0] = IP_VERSION(2, 0, 0);
2674 }
2675 break;
2676 case CHIP_VEGA20:
2677 vega20_reg_base_init(adev);
2678 adev->sdma.num_instances = 2;
2679 adev->gmc.num_umc = 8;
2680 adev->ip_versions[MMHUB_HWIP][0] = IP_VERSION(9, 4, 0);
2681 adev->ip_versions[ATHUB_HWIP][0] = IP_VERSION(9, 4, 0);
2682 adev->ip_versions[OSSSYS_HWIP][0] = IP_VERSION(4, 2, 0);
2683 adev->ip_versions[HDP_HWIP][0] = IP_VERSION(4, 2, 0);
2684 adev->ip_versions[SDMA0_HWIP][0] = IP_VERSION(4, 2, 0);
2685 adev->ip_versions[SDMA1_HWIP][0] = IP_VERSION(4, 2, 0);
2686 adev->ip_versions[DF_HWIP][0] = IP_VERSION(3, 6, 0);
2687 adev->ip_versions[NBIO_HWIP][0] = IP_VERSION(7, 4, 0);
2688 adev->ip_versions[UMC_HWIP][0] = IP_VERSION(6, 1, 1);
2689 adev->ip_versions[MP0_HWIP][0] = IP_VERSION(11, 0, 2);
2690 adev->ip_versions[MP1_HWIP][0] = IP_VERSION(11, 0, 2);
2691 adev->ip_versions[THM_HWIP][0] = IP_VERSION(11, 0, 2);
2692 adev->ip_versions[SMUIO_HWIP][0] = IP_VERSION(11, 0, 2);
2693 adev->ip_versions[GC_HWIP][0] = IP_VERSION(9, 4, 0);
2694 adev->ip_versions[UVD_HWIP][0] = IP_VERSION(7, 2, 0);
2695 adev->ip_versions[UVD_HWIP][1] = IP_VERSION(7, 2, 0);
2696 adev->ip_versions[VCE_HWIP][0] = IP_VERSION(4, 1, 0);
2697 adev->ip_versions[DCI_HWIP][0] = IP_VERSION(12, 1, 0);
2698 break;
2699 case CHIP_ARCTURUS:
2700 arct_reg_base_init(adev);
2701 adev->sdma.num_instances = 8;
2702 adev->vcn.num_vcn_inst = 2;
2703 adev->gmc.num_umc = 8;
2704 adev->ip_versions[MMHUB_HWIP][0] = IP_VERSION(9, 4, 1);
2705 adev->ip_versions[ATHUB_HWIP][0] = IP_VERSION(9, 4, 1);
2706 adev->ip_versions[OSSSYS_HWIP][0] = IP_VERSION(4, 2, 1);
2707 adev->ip_versions[HDP_HWIP][0] = IP_VERSION(4, 2, 1);
2708 adev->ip_versions[SDMA0_HWIP][0] = IP_VERSION(4, 2, 2);
2709 adev->ip_versions[SDMA1_HWIP][0] = IP_VERSION(4, 2, 2);
2710 adev->ip_versions[SDMA1_HWIP][1] = IP_VERSION(4, 2, 2);
2711 adev->ip_versions[SDMA1_HWIP][2] = IP_VERSION(4, 2, 2);
2712 adev->ip_versions[SDMA1_HWIP][3] = IP_VERSION(4, 2, 2);
2713 adev->ip_versions[SDMA1_HWIP][4] = IP_VERSION(4, 2, 2);
2714 adev->ip_versions[SDMA1_HWIP][5] = IP_VERSION(4, 2, 2);
2715 adev->ip_versions[SDMA1_HWIP][6] = IP_VERSION(4, 2, 2);
2716 adev->ip_versions[DF_HWIP][0] = IP_VERSION(3, 6, 1);
2717 adev->ip_versions[NBIO_HWIP][0] = IP_VERSION(7, 4, 1);
2718 adev->ip_versions[UMC_HWIP][0] = IP_VERSION(6, 1, 2);
2719 adev->ip_versions[MP0_HWIP][0] = IP_VERSION(11, 0, 4);
2720 adev->ip_versions[MP1_HWIP][0] = IP_VERSION(11, 0, 2);
2721 adev->ip_versions[THM_HWIP][0] = IP_VERSION(11, 0, 3);
2722 adev->ip_versions[SMUIO_HWIP][0] = IP_VERSION(11, 0, 3);
2723 adev->ip_versions[GC_HWIP][0] = IP_VERSION(9, 4, 1);
2724 adev->ip_versions[UVD_HWIP][0] = IP_VERSION(2, 5, 0);
2725 adev->ip_versions[UVD_HWIP][1] = IP_VERSION(2, 5, 0);
2726 break;
2727 case CHIP_ALDEBARAN:
2728 aldebaran_reg_base_init(adev);
2729 adev->sdma.num_instances = 5;
2730 adev->vcn.num_vcn_inst = 2;
2731 adev->gmc.num_umc = 4;
2732 adev->ip_versions[MMHUB_HWIP][0] = IP_VERSION(9, 4, 2);
2733 adev->ip_versions[ATHUB_HWIP][0] = IP_VERSION(9, 4, 2);
2734 adev->ip_versions[OSSSYS_HWIP][0] = IP_VERSION(4, 4, 0);
2735 adev->ip_versions[HDP_HWIP][0] = IP_VERSION(4, 4, 0);
2736 adev->ip_versions[SDMA0_HWIP][0] = IP_VERSION(4, 4, 0);
2737 adev->ip_versions[SDMA0_HWIP][1] = IP_VERSION(4, 4, 0);
2738 adev->ip_versions[SDMA0_HWIP][2] = IP_VERSION(4, 4, 0);
2739 adev->ip_versions[SDMA0_HWIP][3] = IP_VERSION(4, 4, 0);
2740 adev->ip_versions[SDMA0_HWIP][4] = IP_VERSION(4, 4, 0);
2741 adev->ip_versions[DF_HWIP][0] = IP_VERSION(3, 6, 2);
2742 adev->ip_versions[NBIO_HWIP][0] = IP_VERSION(7, 4, 4);
2743 adev->ip_versions[UMC_HWIP][0] = IP_VERSION(6, 7, 0);
2744 adev->ip_versions[MP0_HWIP][0] = IP_VERSION(13, 0, 2);
2745 adev->ip_versions[MP1_HWIP][0] = IP_VERSION(13, 0, 2);
2746 adev->ip_versions[THM_HWIP][0] = IP_VERSION(13, 0, 2);
2747 adev->ip_versions[SMUIO_HWIP][0] = IP_VERSION(13, 0, 2);
2748 adev->ip_versions[GC_HWIP][0] = IP_VERSION(9, 4, 2);
2749 adev->ip_versions[UVD_HWIP][0] = IP_VERSION(2, 6, 0);
2750 adev->ip_versions[UVD_HWIP][1] = IP_VERSION(2, 6, 0);
2751 adev->ip_versions[XGMI_HWIP][0] = IP_VERSION(6, 1, 0);
2752 break;
2753 default:
2754 break;
2755 }
2756
2757 amdgpu_discovery_init_soc_config(adev);
2758 amdgpu_discovery_sysfs_init(adev);
2759
2760 switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
2761 case IP_VERSION(9, 0, 1):
2762 case IP_VERSION(9, 2, 1):
2763 case IP_VERSION(9, 4, 0):
2764 case IP_VERSION(9, 4, 1):
2765 case IP_VERSION(9, 4, 2):
2766 case IP_VERSION(9, 4, 3):
2767 case IP_VERSION(9, 4, 4):
2768 case IP_VERSION(9, 5, 0):
2769 adev->family = AMDGPU_FAMILY_AI;
2770 break;
2771 case IP_VERSION(9, 1, 0):
2772 case IP_VERSION(9, 2, 2):
2773 case IP_VERSION(9, 3, 0):
2774 adev->family = AMDGPU_FAMILY_RV;
2775 break;
2776 case IP_VERSION(10, 1, 10):
2777 case IP_VERSION(10, 1, 1):
2778 case IP_VERSION(10, 1, 2):
2779 case IP_VERSION(10, 1, 3):
2780 case IP_VERSION(10, 1, 4):
2781 case IP_VERSION(10, 3, 0):
2782 case IP_VERSION(10, 3, 2):
2783 case IP_VERSION(10, 3, 4):
2784 case IP_VERSION(10, 3, 5):
2785 adev->family = AMDGPU_FAMILY_NV;
2786 break;
2787 case IP_VERSION(10, 3, 1):
2788 adev->family = AMDGPU_FAMILY_VGH;
2789 adev->apu_flags |= AMD_APU_IS_VANGOGH;
2790 break;
2791 case IP_VERSION(10, 3, 3):
2792 adev->family = AMDGPU_FAMILY_YC;
2793 break;
2794 case IP_VERSION(10, 3, 6):
2795 adev->family = AMDGPU_FAMILY_GC_10_3_6;
2796 break;
2797 case IP_VERSION(10, 3, 7):
2798 adev->family = AMDGPU_FAMILY_GC_10_3_7;
2799 break;
2800 case IP_VERSION(11, 0, 0):
2801 case IP_VERSION(11, 0, 2):
2802 case IP_VERSION(11, 0, 3):
2803 adev->family = AMDGPU_FAMILY_GC_11_0_0;
2804 break;
2805 case IP_VERSION(11, 0, 1):
2806 case IP_VERSION(11, 0, 4):
2807 adev->family = AMDGPU_FAMILY_GC_11_0_1;
2808 break;
2809 case IP_VERSION(11, 5, 0):
2810 case IP_VERSION(11, 5, 1):
2811 case IP_VERSION(11, 5, 2):
2812 case IP_VERSION(11, 5, 3):
2813 adev->family = AMDGPU_FAMILY_GC_11_5_0;
2814 break;
2815 case IP_VERSION(12, 0, 0):
2816 case IP_VERSION(12, 0, 1):
2817 adev->family = AMDGPU_FAMILY_GC_12_0_0;
2818 break;
2819 default:
2820 return -EINVAL;
2821 }
2822
2823 switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
2824 case IP_VERSION(9, 1, 0):
2825 case IP_VERSION(9, 2, 2):
2826 case IP_VERSION(9, 3, 0):
2827 case IP_VERSION(10, 1, 3):
2828 case IP_VERSION(10, 1, 4):
2829 case IP_VERSION(10, 3, 1):
2830 case IP_VERSION(10, 3, 3):
2831 case IP_VERSION(10, 3, 6):
2832 case IP_VERSION(10, 3, 7):
2833 case IP_VERSION(11, 0, 1):
2834 case IP_VERSION(11, 0, 4):
2835 case IP_VERSION(11, 5, 0):
2836 case IP_VERSION(11, 5, 1):
2837 case IP_VERSION(11, 5, 2):
2838 case IP_VERSION(11, 5, 3):
2839 adev->flags |= AMD_IS_APU;
2840 break;
2841 default:
2842 break;
2843 }
2844
2845 /* set NBIO version */
2846 switch (amdgpu_ip_version(adev, NBIO_HWIP, 0)) {
2847 case IP_VERSION(6, 1, 0):
2848 case IP_VERSION(6, 2, 0):
2849 adev->nbio.funcs = &nbio_v6_1_funcs;
2850 adev->nbio.hdp_flush_reg = &nbio_v6_1_hdp_flush_reg;
2851 break;
2852 case IP_VERSION(7, 0, 0):
2853 case IP_VERSION(7, 0, 1):
2854 case IP_VERSION(2, 5, 0):
2855 adev->nbio.funcs = &nbio_v7_0_funcs;
2856 adev->nbio.hdp_flush_reg = &nbio_v7_0_hdp_flush_reg;
2857 break;
2858 case IP_VERSION(7, 4, 0):
2859 case IP_VERSION(7, 4, 1):
2860 case IP_VERSION(7, 4, 4):
2861 adev->nbio.funcs = &nbio_v7_4_funcs;
2862 adev->nbio.hdp_flush_reg = &nbio_v7_4_hdp_flush_reg;
2863 break;
2864 case IP_VERSION(7, 9, 0):
2865 case IP_VERSION(7, 9, 1):
2866 adev->nbio.funcs = &nbio_v7_9_funcs;
2867 adev->nbio.hdp_flush_reg = &nbio_v7_9_hdp_flush_reg;
2868 break;
2869 case IP_VERSION(7, 11, 0):
2870 case IP_VERSION(7, 11, 1):
2871 case IP_VERSION(7, 11, 2):
2872 case IP_VERSION(7, 11, 3):
2873 adev->nbio.funcs = &nbio_v7_11_funcs;
2874 adev->nbio.hdp_flush_reg = &nbio_v7_11_hdp_flush_reg;
2875 break;
2876 case IP_VERSION(7, 2, 0):
2877 case IP_VERSION(7, 2, 1):
2878 case IP_VERSION(7, 3, 0):
2879 case IP_VERSION(7, 5, 0):
2880 case IP_VERSION(7, 5, 1):
2881 adev->nbio.funcs = &nbio_v7_2_funcs;
2882 adev->nbio.hdp_flush_reg = &nbio_v7_2_hdp_flush_reg;
2883 break;
2884 case IP_VERSION(2, 1, 1):
2885 case IP_VERSION(2, 3, 0):
2886 case IP_VERSION(2, 3, 1):
2887 case IP_VERSION(2, 3, 2):
2888 case IP_VERSION(3, 3, 0):
2889 case IP_VERSION(3, 3, 1):
2890 case IP_VERSION(3, 3, 2):
2891 case IP_VERSION(3, 3, 3):
2892 adev->nbio.funcs = &nbio_v2_3_funcs;
2893 adev->nbio.hdp_flush_reg = &nbio_v2_3_hdp_flush_reg;
2894 break;
2895 case IP_VERSION(4, 3, 0):
2896 case IP_VERSION(4, 3, 1):
2897 if (amdgpu_sriov_vf(adev))
2898 adev->nbio.funcs = &nbio_v4_3_sriov_funcs;
2899 else
2900 adev->nbio.funcs = &nbio_v4_3_funcs;
2901 adev->nbio.hdp_flush_reg = &nbio_v4_3_hdp_flush_reg;
2902 break;
2903 case IP_VERSION(7, 7, 0):
2904 case IP_VERSION(7, 7, 1):
2905 adev->nbio.funcs = &nbio_v7_7_funcs;
2906 adev->nbio.hdp_flush_reg = &nbio_v7_7_hdp_flush_reg;
2907 break;
2908 case IP_VERSION(6, 3, 1):
2909 adev->nbio.funcs = &nbif_v6_3_1_funcs;
2910 adev->nbio.hdp_flush_reg = &nbif_v6_3_1_hdp_flush_reg;
2911 break;
2912 default:
2913 break;
2914 }
2915
2916 switch (amdgpu_ip_version(adev, HDP_HWIP, 0)) {
2917 case IP_VERSION(4, 0, 0):
2918 case IP_VERSION(4, 0, 1):
2919 case IP_VERSION(4, 1, 0):
2920 case IP_VERSION(4, 1, 1):
2921 case IP_VERSION(4, 1, 2):
2922 case IP_VERSION(4, 2, 0):
2923 case IP_VERSION(4, 2, 1):
2924 case IP_VERSION(4, 4, 0):
2925 case IP_VERSION(4, 4, 2):
2926 case IP_VERSION(4, 4, 5):
2927 adev->hdp.funcs = &hdp_v4_0_funcs;
2928 break;
2929 case IP_VERSION(5, 0, 0):
2930 case IP_VERSION(5, 0, 1):
2931 case IP_VERSION(5, 0, 2):
2932 case IP_VERSION(5, 0, 3):
2933 case IP_VERSION(5, 0, 4):
2934 case IP_VERSION(5, 2, 0):
2935 adev->hdp.funcs = &hdp_v5_0_funcs;
2936 break;
2937 case IP_VERSION(5, 2, 1):
2938 adev->hdp.funcs = &hdp_v5_2_funcs;
2939 break;
2940 case IP_VERSION(6, 0, 0):
2941 case IP_VERSION(6, 0, 1):
2942 case IP_VERSION(6, 1, 0):
2943 adev->hdp.funcs = &hdp_v6_0_funcs;
2944 break;
2945 case IP_VERSION(7, 0, 0):
2946 adev->hdp.funcs = &hdp_v7_0_funcs;
2947 break;
2948 default:
2949 break;
2950 }
2951
2952 switch (amdgpu_ip_version(adev, DF_HWIP, 0)) {
2953 case IP_VERSION(3, 6, 0):
2954 case IP_VERSION(3, 6, 1):
2955 case IP_VERSION(3, 6, 2):
2956 adev->df.funcs = &df_v3_6_funcs;
2957 break;
2958 case IP_VERSION(2, 1, 0):
2959 case IP_VERSION(2, 1, 1):
2960 case IP_VERSION(2, 5, 0):
2961 case IP_VERSION(3, 5, 1):
2962 case IP_VERSION(3, 5, 2):
2963 adev->df.funcs = &df_v1_7_funcs;
2964 break;
2965 case IP_VERSION(4, 3, 0):
2966 adev->df.funcs = &df_v4_3_funcs;
2967 break;
2968 case IP_VERSION(4, 6, 2):
2969 adev->df.funcs = &df_v4_6_2_funcs;
2970 break;
2971 case IP_VERSION(4, 15, 0):
2972 case IP_VERSION(4, 15, 1):
2973 adev->df.funcs = &df_v4_15_funcs;
2974 break;
2975 default:
2976 break;
2977 }
2978
2979 switch (amdgpu_ip_version(adev, SMUIO_HWIP, 0)) {
2980 case IP_VERSION(9, 0, 0):
2981 case IP_VERSION(9, 0, 1):
2982 case IP_VERSION(10, 0, 0):
2983 case IP_VERSION(10, 0, 1):
2984 case IP_VERSION(10, 0, 2):
2985 adev->smuio.funcs = &smuio_v9_0_funcs;
2986 break;
2987 case IP_VERSION(11, 0, 0):
2988 case IP_VERSION(11, 0, 2):
2989 case IP_VERSION(11, 0, 3):
2990 case IP_VERSION(11, 0, 4):
2991 case IP_VERSION(11, 0, 7):
2992 case IP_VERSION(11, 0, 8):
2993 adev->smuio.funcs = &smuio_v11_0_funcs;
2994 break;
2995 case IP_VERSION(11, 0, 6):
2996 case IP_VERSION(11, 0, 10):
2997 case IP_VERSION(11, 0, 11):
2998 case IP_VERSION(11, 5, 0):
2999 case IP_VERSION(11, 5, 2):
3000 case IP_VERSION(13, 0, 1):
3001 case IP_VERSION(13, 0, 9):
3002 case IP_VERSION(13, 0, 10):
3003 adev->smuio.funcs = &smuio_v11_0_6_funcs;
3004 break;
3005 case IP_VERSION(13, 0, 2):
3006 adev->smuio.funcs = &smuio_v13_0_funcs;
3007 break;
3008 case IP_VERSION(13, 0, 3):
3009 case IP_VERSION(13, 0, 11):
3010 adev->smuio.funcs = &smuio_v13_0_3_funcs;
3011 if (adev->smuio.funcs->get_pkg_type(adev) == AMDGPU_PKG_TYPE_APU) {
3012 adev->flags |= AMD_IS_APU;
3013 }
3014 break;
3015 case IP_VERSION(13, 0, 6):
3016 case IP_VERSION(13, 0, 8):
3017 case IP_VERSION(14, 0, 0):
3018 case IP_VERSION(14, 0, 1):
3019 adev->smuio.funcs = &smuio_v13_0_6_funcs;
3020 break;
3021 case IP_VERSION(14, 0, 2):
3022 adev->smuio.funcs = &smuio_v14_0_2_funcs;
3023 break;
3024 default:
3025 break;
3026 }
3027
3028 switch (amdgpu_ip_version(adev, LSDMA_HWIP, 0)) {
3029 case IP_VERSION(6, 0, 0):
3030 case IP_VERSION(6, 0, 1):
3031 case IP_VERSION(6, 0, 2):
3032 case IP_VERSION(6, 0, 3):
3033 adev->lsdma.funcs = &lsdma_v6_0_funcs;
3034 break;
3035 case IP_VERSION(7, 0, 0):
3036 case IP_VERSION(7, 0, 1):
3037 adev->lsdma.funcs = &lsdma_v7_0_funcs;
3038 break;
3039 default:
3040 break;
3041 }
3042
3043 r = amdgpu_discovery_set_common_ip_blocks(adev);
3044 if (r)
3045 return r;
3046
3047 r = amdgpu_discovery_set_gmc_ip_blocks(adev);
3048 if (r)
3049 return r;
3050
3051 /* For SR-IOV, PSP needs to be initialized before IH */
3052 if (amdgpu_sriov_vf(adev)) {
3053 r = amdgpu_discovery_set_psp_ip_blocks(adev);
3054 if (r)
3055 return r;
3056 r = amdgpu_discovery_set_ih_ip_blocks(adev);
3057 if (r)
3058 return r;
3059 } else {
3060 r = amdgpu_discovery_set_ih_ip_blocks(adev);
3061 if (r)
3062 return r;
3063
3064 if (likely(adev->firmware.load_type == AMDGPU_FW_LOAD_PSP)) {
3065 r = amdgpu_discovery_set_psp_ip_blocks(adev);
3066 if (r)
3067 return r;
3068 }
3069 }
3070
3071 if (likely(adev->firmware.load_type == AMDGPU_FW_LOAD_PSP)) {
3072 r = amdgpu_discovery_set_smu_ip_blocks(adev);
3073 if (r)
3074 return r;
3075 }
3076
3077 r = amdgpu_discovery_set_display_ip_blocks(adev);
3078 if (r)
3079 return r;
3080
3081 r = amdgpu_discovery_set_gc_ip_blocks(adev);
3082 if (r)
3083 return r;
3084
3085 r = amdgpu_discovery_set_sdma_ip_blocks(adev);
3086 if (r)
3087 return r;
3088
3089 if ((adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT &&
3090 !amdgpu_sriov_vf(adev)) ||
3091 (adev->firmware.load_type == AMDGPU_FW_LOAD_RLC_BACKDOOR_AUTO && amdgpu_dpm == 1)) {
3092 r = amdgpu_discovery_set_smu_ip_blocks(adev);
3093 if (r)
3094 return r;
3095 }
3096
3097 r = amdgpu_discovery_set_mm_ip_blocks(adev);
3098 if (r)
3099 return r;
3100
3101 r = amdgpu_discovery_set_mes_ip_blocks(adev);
3102 if (r)
3103 return r;
3104
3105 r = amdgpu_discovery_set_vpe_ip_blocks(adev);
3106 if (r)
3107 return r;
3108
3109 r = amdgpu_discovery_set_umsch_mm_ip_blocks(adev);
3110 if (r)
3111 return r;
3112
3113 r = amdgpu_discovery_set_isp_ip_blocks(adev);
3114 if (r)
3115 return r;
3116 return 0;
3117 }
3118
3119