1 /*
2 * Copyright 2018-2024 Advanced Micro Devices, Inc. All rights reserved.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23
24 #include <linux/firmware.h>
25
26 #include "amdgpu.h"
27 #include "amdgpu_discovery.h"
28 #include "soc15_hw_ip.h"
29 #include "discovery.h"
30 #include "amdgpu_ras.h"
31
32 #include "soc15.h"
33 #include "gfx_v9_0.h"
34 #include "gfx_v9_4_3.h"
35 #include "gmc_v9_0.h"
36 #include "df_v1_7.h"
37 #include "df_v3_6.h"
38 #include "df_v4_3.h"
39 #include "df_v4_6_2.h"
40 #include "df_v4_15.h"
41 #include "nbio_v6_1.h"
42 #include "nbio_v7_0.h"
43 #include "nbio_v7_4.h"
44 #include "nbio_v7_9.h"
45 #include "nbio_v7_11.h"
46 #include "hdp_v4_0.h"
47 #include "vega10_ih.h"
48 #include "vega20_ih.h"
49 #include "sdma_v4_0.h"
50 #include "sdma_v4_4_2.h"
51 #include "uvd_v7_0.h"
52 #include "vce_v4_0.h"
53 #include "vcn_v1_0.h"
54 #include "vcn_v2_5.h"
55 #include "jpeg_v2_5.h"
56 #include "smuio_v9_0.h"
57 #include "gmc_v10_0.h"
58 #include "gmc_v11_0.h"
59 #include "gmc_v12_0.h"
60 #include "gfxhub_v2_0.h"
61 #include "mmhub_v2_0.h"
62 #include "nbio_v2_3.h"
63 #include "nbio_v4_3.h"
64 #include "nbio_v7_2.h"
65 #include "nbio_v7_7.h"
66 #include "nbif_v6_3_1.h"
67 #include "hdp_v5_0.h"
68 #include "hdp_v5_2.h"
69 #include "hdp_v6_0.h"
70 #include "hdp_v7_0.h"
71 #include "nv.h"
72 #include "soc21.h"
73 #include "soc24.h"
74 #include "navi10_ih.h"
75 #include "ih_v6_0.h"
76 #include "ih_v6_1.h"
77 #include "ih_v7_0.h"
78 #include "gfx_v10_0.h"
79 #include "gfx_v11_0.h"
80 #include "gfx_v12_0.h"
81 #include "sdma_v5_0.h"
82 #include "sdma_v5_2.h"
83 #include "sdma_v6_0.h"
84 #include "sdma_v7_0.h"
85 #include "lsdma_v6_0.h"
86 #include "lsdma_v7_0.h"
87 #include "vcn_v2_0.h"
88 #include "jpeg_v2_0.h"
89 #include "vcn_v3_0.h"
90 #include "jpeg_v3_0.h"
91 #include "vcn_v4_0.h"
92 #include "jpeg_v4_0.h"
93 #include "vcn_v4_0_3.h"
94 #include "jpeg_v4_0_3.h"
95 #include "vcn_v4_0_5.h"
96 #include "jpeg_v4_0_5.h"
97 #include "amdgpu_vkms.h"
98 #include "mes_v11_0.h"
99 #include "mes_v12_0.h"
100 #include "smuio_v11_0.h"
101 #include "smuio_v11_0_6.h"
102 #include "smuio_v13_0.h"
103 #include "smuio_v13_0_3.h"
104 #include "smuio_v13_0_6.h"
105 #include "smuio_v14_0_2.h"
106 #include "vcn_v5_0_0.h"
107 #include "vcn_v5_0_1.h"
108 #include "jpeg_v5_0_0.h"
109 #include "jpeg_v5_0_1.h"
110
111 #include "amdgpu_vpe.h"
112 #if defined(CONFIG_DRM_AMD_ISP)
113 #include "amdgpu_isp.h"
114 #endif
115
116 MODULE_FIRMWARE("amdgpu/ip_discovery.bin");
117 MODULE_FIRMWARE("amdgpu/vega10_ip_discovery.bin");
118 MODULE_FIRMWARE("amdgpu/vega12_ip_discovery.bin");
119 MODULE_FIRMWARE("amdgpu/vega20_ip_discovery.bin");
120 MODULE_FIRMWARE("amdgpu/raven_ip_discovery.bin");
121 MODULE_FIRMWARE("amdgpu/raven2_ip_discovery.bin");
122 MODULE_FIRMWARE("amdgpu/picasso_ip_discovery.bin");
123 MODULE_FIRMWARE("amdgpu/arcturus_ip_discovery.bin");
124 MODULE_FIRMWARE("amdgpu/aldebaran_ip_discovery.bin");
125
126 #define mmIP_DISCOVERY_VERSION 0x16A00
127 #define mmRCC_CONFIG_MEMSIZE 0xde3
128 #define mmMP0_SMN_C2PMSG_33 0x16061
129 #define mmMM_INDEX 0x0
130 #define mmMM_INDEX_HI 0x6
131 #define mmMM_DATA 0x1
132
133 static const char *hw_id_names[HW_ID_MAX] = {
134 [MP1_HWID] = "MP1",
135 [MP2_HWID] = "MP2",
136 [THM_HWID] = "THM",
137 [SMUIO_HWID] = "SMUIO",
138 [FUSE_HWID] = "FUSE",
139 [CLKA_HWID] = "CLKA",
140 [PWR_HWID] = "PWR",
141 [GC_HWID] = "GC",
142 [UVD_HWID] = "UVD",
143 [AUDIO_AZ_HWID] = "AUDIO_AZ",
144 [ACP_HWID] = "ACP",
145 [DCI_HWID] = "DCI",
146 [DMU_HWID] = "DMU",
147 [DCO_HWID] = "DCO",
148 [DIO_HWID] = "DIO",
149 [XDMA_HWID] = "XDMA",
150 [DCEAZ_HWID] = "DCEAZ",
151 [DAZ_HWID] = "DAZ",
152 [SDPMUX_HWID] = "SDPMUX",
153 [NTB_HWID] = "NTB",
154 [IOHC_HWID] = "IOHC",
155 [L2IMU_HWID] = "L2IMU",
156 [VCE_HWID] = "VCE",
157 [MMHUB_HWID] = "MMHUB",
158 [ATHUB_HWID] = "ATHUB",
159 [DBGU_NBIO_HWID] = "DBGU_NBIO",
160 [DFX_HWID] = "DFX",
161 [DBGU0_HWID] = "DBGU0",
162 [DBGU1_HWID] = "DBGU1",
163 [OSSSYS_HWID] = "OSSSYS",
164 [HDP_HWID] = "HDP",
165 [SDMA0_HWID] = "SDMA0",
166 [SDMA1_HWID] = "SDMA1",
167 [SDMA2_HWID] = "SDMA2",
168 [SDMA3_HWID] = "SDMA3",
169 [LSDMA_HWID] = "LSDMA",
170 [ISP_HWID] = "ISP",
171 [DBGU_IO_HWID] = "DBGU_IO",
172 [DF_HWID] = "DF",
173 [CLKB_HWID] = "CLKB",
174 [FCH_HWID] = "FCH",
175 [DFX_DAP_HWID] = "DFX_DAP",
176 [L1IMU_PCIE_HWID] = "L1IMU_PCIE",
177 [L1IMU_NBIF_HWID] = "L1IMU_NBIF",
178 [L1IMU_IOAGR_HWID] = "L1IMU_IOAGR",
179 [L1IMU3_HWID] = "L1IMU3",
180 [L1IMU4_HWID] = "L1IMU4",
181 [L1IMU5_HWID] = "L1IMU5",
182 [L1IMU6_HWID] = "L1IMU6",
183 [L1IMU7_HWID] = "L1IMU7",
184 [L1IMU8_HWID] = "L1IMU8",
185 [L1IMU9_HWID] = "L1IMU9",
186 [L1IMU10_HWID] = "L1IMU10",
187 [L1IMU11_HWID] = "L1IMU11",
188 [L1IMU12_HWID] = "L1IMU12",
189 [L1IMU13_HWID] = "L1IMU13",
190 [L1IMU14_HWID] = "L1IMU14",
191 [L1IMU15_HWID] = "L1IMU15",
192 [WAFLC_HWID] = "WAFLC",
193 [FCH_USB_PD_HWID] = "FCH_USB_PD",
194 [PCIE_HWID] = "PCIE",
195 [PCS_HWID] = "PCS",
196 [DDCL_HWID] = "DDCL",
197 [SST_HWID] = "SST",
198 [IOAGR_HWID] = "IOAGR",
199 [NBIF_HWID] = "NBIF",
200 [IOAPIC_HWID] = "IOAPIC",
201 [SYSTEMHUB_HWID] = "SYSTEMHUB",
202 [NTBCCP_HWID] = "NTBCCP",
203 [UMC_HWID] = "UMC",
204 [SATA_HWID] = "SATA",
205 [USB_HWID] = "USB",
206 [CCXSEC_HWID] = "CCXSEC",
207 [XGMI_HWID] = "XGMI",
208 [XGBE_HWID] = "XGBE",
209 [MP0_HWID] = "MP0",
210 [VPE_HWID] = "VPE",
211 };
212
213 static int hw_id_map[MAX_HWIP] = {
214 [GC_HWIP] = GC_HWID,
215 [HDP_HWIP] = HDP_HWID,
216 [SDMA0_HWIP] = SDMA0_HWID,
217 [SDMA1_HWIP] = SDMA1_HWID,
218 [SDMA2_HWIP] = SDMA2_HWID,
219 [SDMA3_HWIP] = SDMA3_HWID,
220 [LSDMA_HWIP] = LSDMA_HWID,
221 [MMHUB_HWIP] = MMHUB_HWID,
222 [ATHUB_HWIP] = ATHUB_HWID,
223 [NBIO_HWIP] = NBIF_HWID,
224 [MP0_HWIP] = MP0_HWID,
225 [MP1_HWIP] = MP1_HWID,
226 [UVD_HWIP] = UVD_HWID,
227 [VCE_HWIP] = VCE_HWID,
228 [DF_HWIP] = DF_HWID,
229 [DCE_HWIP] = DMU_HWID,
230 [OSSSYS_HWIP] = OSSSYS_HWID,
231 [SMUIO_HWIP] = SMUIO_HWID,
232 [PWR_HWIP] = PWR_HWID,
233 [NBIF_HWIP] = NBIF_HWID,
234 [THM_HWIP] = THM_HWID,
235 [CLK_HWIP] = CLKA_HWID,
236 [UMC_HWIP] = UMC_HWID,
237 [XGMI_HWIP] = XGMI_HWID,
238 [DCI_HWIP] = DCI_HWID,
239 [PCIE_HWIP] = PCIE_HWID,
240 [VPE_HWIP] = VPE_HWID,
241 [ISP_HWIP] = ISP_HWID,
242 };
243
amdgpu_discovery_read_binary_from_sysmem(struct amdgpu_device * adev,uint8_t * binary)244 static int amdgpu_discovery_read_binary_from_sysmem(struct amdgpu_device *adev, uint8_t *binary)
245 {
246 u64 tmr_offset, tmr_size, pos;
247 void *discv_regn;
248 int ret;
249
250 ret = amdgpu_acpi_get_tmr_info(adev, &tmr_offset, &tmr_size);
251 if (ret)
252 return ret;
253
254 pos = tmr_offset + tmr_size - DISCOVERY_TMR_OFFSET;
255
256 /* This region is read-only and reserved from system use */
257 discv_regn = memremap(pos, adev->mman.discovery_tmr_size, MEMREMAP_WC);
258 if (discv_regn) {
259 memcpy(binary, discv_regn, adev->mman.discovery_tmr_size);
260 memunmap(discv_regn);
261 return 0;
262 }
263
264 return -ENOENT;
265 }
266
267 #define IP_DISCOVERY_V2 2
268 #define IP_DISCOVERY_V4 4
269
amdgpu_discovery_read_binary_from_mem(struct amdgpu_device * adev,uint8_t * binary)270 static int amdgpu_discovery_read_binary_from_mem(struct amdgpu_device *adev,
271 uint8_t *binary)
272 {
273 bool sz_valid = true;
274 uint64_t vram_size;
275 int i, ret = 0;
276 u32 msg;
277
278 if (!amdgpu_sriov_vf(adev)) {
279 /* It can take up to two second for IFWI init to complete on some dGPUs,
280 * but generally it should be in the 60-100ms range. Normally this starts
281 * as soon as the device gets power so by the time the OS loads this has long
282 * completed. However, when a card is hotplugged via e.g., USB4, we need to
283 * wait for this to complete. Once the C2PMSG is updated, we can
284 * continue.
285 */
286
287 for (i = 0; i < 2000; i++) {
288 msg = RREG32(mmMP0_SMN_C2PMSG_33);
289 if (msg & 0x80000000)
290 break;
291 msleep(1);
292 }
293 }
294
295 vram_size = RREG32(mmRCC_CONFIG_MEMSIZE);
296 if (!vram_size || vram_size == U32_MAX)
297 sz_valid = false;
298 else
299 vram_size <<= 20;
300
301 if (sz_valid) {
302 uint64_t pos = vram_size - DISCOVERY_TMR_OFFSET;
303 amdgpu_device_vram_access(adev, pos, (uint32_t *)binary,
304 adev->mman.discovery_tmr_size, false);
305 } else {
306 ret = amdgpu_discovery_read_binary_from_sysmem(adev, binary);
307 }
308
309 if (ret)
310 dev_err(adev->dev,
311 "failed to read discovery info from memory, vram size read: %llx",
312 vram_size);
313
314 return ret;
315 }
316
amdgpu_discovery_read_binary_from_file(struct amdgpu_device * adev,uint8_t * binary,const char * fw_name)317 static int amdgpu_discovery_read_binary_from_file(struct amdgpu_device *adev,
318 uint8_t *binary,
319 const char *fw_name)
320 {
321 const struct firmware *fw;
322 int r;
323
324 r = firmware_request_nowarn(&fw, fw_name, adev->dev);
325 if (r) {
326 if (amdgpu_discovery == 2)
327 dev_err(adev->dev, "can't load firmware \"%s\"\n", fw_name);
328 else
329 drm_info(&adev->ddev, "Optional firmware \"%s\" was not found\n", fw_name);
330 return r;
331 }
332
333 memcpy((u8 *)binary, (u8 *)fw->data, fw->size);
334 release_firmware(fw);
335
336 return 0;
337 }
338
amdgpu_discovery_calculate_checksum(uint8_t * data,uint32_t size)339 static uint16_t amdgpu_discovery_calculate_checksum(uint8_t *data, uint32_t size)
340 {
341 uint16_t checksum = 0;
342 int i;
343
344 for (i = 0; i < size; i++)
345 checksum += data[i];
346
347 return checksum;
348 }
349
amdgpu_discovery_verify_checksum(uint8_t * data,uint32_t size,uint16_t expected)350 static inline bool amdgpu_discovery_verify_checksum(uint8_t *data, uint32_t size,
351 uint16_t expected)
352 {
353 return !!(amdgpu_discovery_calculate_checksum(data, size) == expected);
354 }
355
amdgpu_discovery_verify_binary_signature(uint8_t * binary)356 static inline bool amdgpu_discovery_verify_binary_signature(uint8_t *binary)
357 {
358 struct binary_header *bhdr;
359 bhdr = (struct binary_header *)binary;
360
361 return (le32_to_cpu(bhdr->binary_signature) == BINARY_SIGNATURE);
362 }
363
amdgpu_discovery_harvest_config_quirk(struct amdgpu_device * adev)364 static void amdgpu_discovery_harvest_config_quirk(struct amdgpu_device *adev)
365 {
366 /*
367 * So far, apply this quirk only on those Navy Flounder boards which
368 * have a bad harvest table of VCN config.
369 */
370 if ((amdgpu_ip_version(adev, UVD_HWIP, 1) == IP_VERSION(3, 0, 1)) &&
371 (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(10, 3, 2))) {
372 switch (adev->pdev->revision) {
373 case 0xC1:
374 case 0xC2:
375 case 0xC3:
376 case 0xC5:
377 case 0xC7:
378 case 0xCF:
379 case 0xDF:
380 adev->vcn.harvest_config |= AMDGPU_VCN_HARVEST_VCN1;
381 adev->vcn.inst_mask &= ~AMDGPU_VCN_HARVEST_VCN1;
382 break;
383 default:
384 break;
385 }
386 }
387 }
388
amdgpu_discovery_verify_npsinfo(struct amdgpu_device * adev,struct binary_header * bhdr)389 static int amdgpu_discovery_verify_npsinfo(struct amdgpu_device *adev,
390 struct binary_header *bhdr)
391 {
392 struct table_info *info;
393 uint16_t checksum;
394 uint16_t offset;
395
396 info = &bhdr->table_list[NPS_INFO];
397 offset = le16_to_cpu(info->offset);
398 checksum = le16_to_cpu(info->checksum);
399
400 struct nps_info_header *nhdr =
401 (struct nps_info_header *)(adev->mman.discovery_bin + offset);
402
403 if (le32_to_cpu(nhdr->table_id) != NPS_INFO_TABLE_ID) {
404 dev_dbg(adev->dev, "invalid ip discovery nps info table id\n");
405 return -EINVAL;
406 }
407
408 if (!amdgpu_discovery_verify_checksum(adev->mman.discovery_bin + offset,
409 le32_to_cpu(nhdr->size_bytes),
410 checksum)) {
411 dev_dbg(adev->dev, "invalid nps info data table checksum\n");
412 return -EINVAL;
413 }
414
415 return 0;
416 }
417
amdgpu_discovery_get_fw_name(struct amdgpu_device * adev)418 static const char *amdgpu_discovery_get_fw_name(struct amdgpu_device *adev)
419 {
420 if (amdgpu_discovery == 2)
421 return "amdgpu/ip_discovery.bin";
422
423 switch (adev->asic_type) {
424 case CHIP_VEGA10:
425 return "amdgpu/vega10_ip_discovery.bin";
426 case CHIP_VEGA12:
427 return "amdgpu/vega12_ip_discovery.bin";
428 case CHIP_RAVEN:
429 if (adev->apu_flags & AMD_APU_IS_RAVEN2)
430 return "amdgpu/raven2_ip_discovery.bin";
431 else if (adev->apu_flags & AMD_APU_IS_PICASSO)
432 return "amdgpu/picasso_ip_discovery.bin";
433 else
434 return "amdgpu/raven_ip_discovery.bin";
435 case CHIP_VEGA20:
436 return "amdgpu/vega20_ip_discovery.bin";
437 case CHIP_ARCTURUS:
438 return "amdgpu/arcturus_ip_discovery.bin";
439 case CHIP_ALDEBARAN:
440 return "amdgpu/aldebaran_ip_discovery.bin";
441 default:
442 return NULL;
443 }
444 }
445
amdgpu_discovery_init(struct amdgpu_device * adev)446 static int amdgpu_discovery_init(struct amdgpu_device *adev)
447 {
448 struct table_info *info;
449 struct binary_header *bhdr;
450 const char *fw_name;
451 uint16_t offset;
452 uint16_t size;
453 uint16_t checksum;
454 int r;
455
456 adev->mman.discovery_tmr_size = DISCOVERY_TMR_SIZE;
457 adev->mman.discovery_bin = kzalloc(adev->mman.discovery_tmr_size, GFP_KERNEL);
458 if (!adev->mman.discovery_bin)
459 return -ENOMEM;
460
461 /* Read from file if it is the preferred option */
462 fw_name = amdgpu_discovery_get_fw_name(adev);
463 if (fw_name != NULL) {
464 drm_dbg(&adev->ddev, "use ip discovery information from file");
465 r = amdgpu_discovery_read_binary_from_file(adev, adev->mman.discovery_bin, fw_name);
466 if (r)
467 goto out;
468 } else {
469 drm_dbg(&adev->ddev, "use ip discovery information from memory");
470 r = amdgpu_discovery_read_binary_from_mem(
471 adev, adev->mman.discovery_bin);
472 if (r)
473 goto out;
474 }
475
476 /* check the ip discovery binary signature */
477 if (!amdgpu_discovery_verify_binary_signature(adev->mman.discovery_bin)) {
478 dev_err(adev->dev,
479 "get invalid ip discovery binary signature\n");
480 r = -EINVAL;
481 goto out;
482 }
483
484 bhdr = (struct binary_header *)adev->mman.discovery_bin;
485
486 offset = offsetof(struct binary_header, binary_checksum) +
487 sizeof(bhdr->binary_checksum);
488 size = le16_to_cpu(bhdr->binary_size) - offset;
489 checksum = le16_to_cpu(bhdr->binary_checksum);
490
491 if (!amdgpu_discovery_verify_checksum(adev->mman.discovery_bin + offset,
492 size, checksum)) {
493 dev_err(adev->dev, "invalid ip discovery binary checksum\n");
494 r = -EINVAL;
495 goto out;
496 }
497
498 info = &bhdr->table_list[IP_DISCOVERY];
499 offset = le16_to_cpu(info->offset);
500 checksum = le16_to_cpu(info->checksum);
501
502 if (offset) {
503 struct ip_discovery_header *ihdr =
504 (struct ip_discovery_header *)(adev->mman.discovery_bin + offset);
505 if (le32_to_cpu(ihdr->signature) != DISCOVERY_TABLE_SIGNATURE) {
506 dev_err(adev->dev, "invalid ip discovery data table signature\n");
507 r = -EINVAL;
508 goto out;
509 }
510
511 if (!amdgpu_discovery_verify_checksum(adev->mman.discovery_bin + offset,
512 le16_to_cpu(ihdr->size), checksum)) {
513 dev_err(adev->dev, "invalid ip discovery data table checksum\n");
514 r = -EINVAL;
515 goto out;
516 }
517 }
518
519 info = &bhdr->table_list[GC];
520 offset = le16_to_cpu(info->offset);
521 checksum = le16_to_cpu(info->checksum);
522
523 if (offset) {
524 struct gpu_info_header *ghdr =
525 (struct gpu_info_header *)(adev->mman.discovery_bin + offset);
526
527 if (le32_to_cpu(ghdr->table_id) != GC_TABLE_ID) {
528 dev_err(adev->dev, "invalid ip discovery gc table id\n");
529 r = -EINVAL;
530 goto out;
531 }
532
533 if (!amdgpu_discovery_verify_checksum(adev->mman.discovery_bin + offset,
534 le32_to_cpu(ghdr->size), checksum)) {
535 dev_err(adev->dev, "invalid gc data table checksum\n");
536 r = -EINVAL;
537 goto out;
538 }
539 }
540
541 info = &bhdr->table_list[HARVEST_INFO];
542 offset = le16_to_cpu(info->offset);
543 checksum = le16_to_cpu(info->checksum);
544
545 if (offset) {
546 struct harvest_info_header *hhdr =
547 (struct harvest_info_header *)(adev->mman.discovery_bin + offset);
548
549 if (le32_to_cpu(hhdr->signature) != HARVEST_TABLE_SIGNATURE) {
550 dev_err(adev->dev, "invalid ip discovery harvest table signature\n");
551 r = -EINVAL;
552 goto out;
553 }
554
555 if (!amdgpu_discovery_verify_checksum(adev->mman.discovery_bin + offset,
556 sizeof(struct harvest_table), checksum)) {
557 dev_err(adev->dev, "invalid harvest data table checksum\n");
558 r = -EINVAL;
559 goto out;
560 }
561 }
562
563 info = &bhdr->table_list[VCN_INFO];
564 offset = le16_to_cpu(info->offset);
565 checksum = le16_to_cpu(info->checksum);
566
567 if (offset) {
568 struct vcn_info_header *vhdr =
569 (struct vcn_info_header *)(adev->mman.discovery_bin + offset);
570
571 if (le32_to_cpu(vhdr->table_id) != VCN_INFO_TABLE_ID) {
572 dev_err(adev->dev, "invalid ip discovery vcn table id\n");
573 r = -EINVAL;
574 goto out;
575 }
576
577 if (!amdgpu_discovery_verify_checksum(adev->mman.discovery_bin + offset,
578 le32_to_cpu(vhdr->size_bytes), checksum)) {
579 dev_err(adev->dev, "invalid vcn data table checksum\n");
580 r = -EINVAL;
581 goto out;
582 }
583 }
584
585 info = &bhdr->table_list[MALL_INFO];
586 offset = le16_to_cpu(info->offset);
587 checksum = le16_to_cpu(info->checksum);
588
589 if (0 && offset) {
590 struct mall_info_header *mhdr =
591 (struct mall_info_header *)(adev->mman.discovery_bin + offset);
592
593 if (le32_to_cpu(mhdr->table_id) != MALL_INFO_TABLE_ID) {
594 dev_err(adev->dev, "invalid ip discovery mall table id\n");
595 r = -EINVAL;
596 goto out;
597 }
598
599 if (!amdgpu_discovery_verify_checksum(adev->mman.discovery_bin + offset,
600 le32_to_cpu(mhdr->size_bytes), checksum)) {
601 dev_err(adev->dev, "invalid mall data table checksum\n");
602 r = -EINVAL;
603 goto out;
604 }
605 }
606
607 return 0;
608
609 out:
610 kfree(adev->mman.discovery_bin);
611 adev->mman.discovery_bin = NULL;
612 if ((amdgpu_discovery != 2) &&
613 (RREG32(mmIP_DISCOVERY_VERSION) == 4))
614 amdgpu_ras_query_boot_status(adev, 4);
615 return r;
616 }
617
618 static void amdgpu_discovery_sysfs_fini(struct amdgpu_device *adev);
619
amdgpu_discovery_fini(struct amdgpu_device * adev)620 void amdgpu_discovery_fini(struct amdgpu_device *adev)
621 {
622 amdgpu_discovery_sysfs_fini(adev);
623 kfree(adev->mman.discovery_bin);
624 adev->mman.discovery_bin = NULL;
625 }
626
amdgpu_discovery_validate_ip(struct amdgpu_device * adev,uint8_t instance,uint16_t hw_id)627 static int amdgpu_discovery_validate_ip(struct amdgpu_device *adev,
628 uint8_t instance, uint16_t hw_id)
629 {
630 if (instance >= HWIP_MAX_INSTANCE) {
631 dev_err(adev->dev,
632 "Unexpected instance_number (%d) from ip discovery blob\n",
633 instance);
634 return -EINVAL;
635 }
636 if (hw_id >= HW_ID_MAX) {
637 dev_err(adev->dev,
638 "Unexpected hw_id (%d) from ip discovery blob\n",
639 hw_id);
640 return -EINVAL;
641 }
642
643 return 0;
644 }
645
amdgpu_discovery_read_harvest_bit_per_ip(struct amdgpu_device * adev,uint32_t * vcn_harvest_count)646 static void amdgpu_discovery_read_harvest_bit_per_ip(struct amdgpu_device *adev,
647 uint32_t *vcn_harvest_count)
648 {
649 struct binary_header *bhdr;
650 struct ip_discovery_header *ihdr;
651 struct die_header *dhdr;
652 struct ip *ip;
653 uint16_t die_offset, ip_offset, num_dies, num_ips;
654 uint16_t hw_id;
655 uint8_t inst;
656 int i, j;
657
658 bhdr = (struct binary_header *)adev->mman.discovery_bin;
659 ihdr = (struct ip_discovery_header *)(adev->mman.discovery_bin +
660 le16_to_cpu(bhdr->table_list[IP_DISCOVERY].offset));
661 num_dies = le16_to_cpu(ihdr->num_dies);
662
663 /* scan harvest bit of all IP data structures */
664 for (i = 0; i < num_dies; i++) {
665 die_offset = le16_to_cpu(ihdr->die_info[i].die_offset);
666 dhdr = (struct die_header *)(adev->mman.discovery_bin + die_offset);
667 num_ips = le16_to_cpu(dhdr->num_ips);
668 ip_offset = die_offset + sizeof(*dhdr);
669
670 for (j = 0; j < num_ips; j++) {
671 ip = (struct ip *)(adev->mman.discovery_bin +
672 ip_offset);
673 inst = ip->number_instance;
674 hw_id = le16_to_cpu(ip->hw_id);
675 if (amdgpu_discovery_validate_ip(adev, inst, hw_id))
676 goto next_ip;
677
678 if (ip->harvest == 1) {
679 switch (hw_id) {
680 case VCN_HWID:
681 (*vcn_harvest_count)++;
682 if (inst == 0) {
683 adev->vcn.harvest_config |= AMDGPU_VCN_HARVEST_VCN0;
684 adev->vcn.inst_mask &=
685 ~AMDGPU_VCN_HARVEST_VCN0;
686 adev->jpeg.inst_mask &=
687 ~AMDGPU_VCN_HARVEST_VCN0;
688 } else {
689 adev->vcn.harvest_config |= AMDGPU_VCN_HARVEST_VCN1;
690 adev->vcn.inst_mask &=
691 ~AMDGPU_VCN_HARVEST_VCN1;
692 adev->jpeg.inst_mask &=
693 ~AMDGPU_VCN_HARVEST_VCN1;
694 }
695 break;
696 case DMU_HWID:
697 adev->harvest_ip_mask |= AMD_HARVEST_IP_DMU_MASK;
698 break;
699 default:
700 break;
701 }
702 }
703 next_ip:
704 ip_offset += struct_size(ip, base_address,
705 ip->num_base_address);
706 }
707 }
708 }
709
amdgpu_discovery_read_from_harvest_table(struct amdgpu_device * adev,uint32_t * vcn_harvest_count,uint32_t * umc_harvest_count)710 static void amdgpu_discovery_read_from_harvest_table(struct amdgpu_device *adev,
711 uint32_t *vcn_harvest_count,
712 uint32_t *umc_harvest_count)
713 {
714 struct binary_header *bhdr;
715 struct harvest_table *harvest_info;
716 u16 offset;
717 int i;
718 uint32_t umc_harvest_config = 0;
719
720 bhdr = (struct binary_header *)adev->mman.discovery_bin;
721 offset = le16_to_cpu(bhdr->table_list[HARVEST_INFO].offset);
722
723 if (!offset) {
724 dev_err(adev->dev, "invalid harvest table offset\n");
725 return;
726 }
727
728 harvest_info = (struct harvest_table *)(adev->mman.discovery_bin + offset);
729
730 for (i = 0; i < 32; i++) {
731 if (le16_to_cpu(harvest_info->list[i].hw_id) == 0)
732 break;
733
734 switch (le16_to_cpu(harvest_info->list[i].hw_id)) {
735 case VCN_HWID:
736 (*vcn_harvest_count)++;
737 adev->vcn.harvest_config |=
738 (1 << harvest_info->list[i].number_instance);
739 adev->jpeg.harvest_config |=
740 (1 << harvest_info->list[i].number_instance);
741
742 adev->vcn.inst_mask &=
743 ~(1U << harvest_info->list[i].number_instance);
744 adev->jpeg.inst_mask &=
745 ~(1U << harvest_info->list[i].number_instance);
746 break;
747 case DMU_HWID:
748 adev->harvest_ip_mask |= AMD_HARVEST_IP_DMU_MASK;
749 break;
750 case UMC_HWID:
751 umc_harvest_config |=
752 1 << (le16_to_cpu(harvest_info->list[i].number_instance));
753 (*umc_harvest_count)++;
754 break;
755 case GC_HWID:
756 adev->gfx.xcc_mask &=
757 ~(1U << harvest_info->list[i].number_instance);
758 break;
759 case SDMA0_HWID:
760 adev->sdma.sdma_mask &=
761 ~(1U << harvest_info->list[i].number_instance);
762 break;
763 #if defined(CONFIG_DRM_AMD_ISP)
764 case ISP_HWID:
765 adev->isp.harvest_config |=
766 ~(1U << harvest_info->list[i].number_instance);
767 break;
768 #endif
769 default:
770 break;
771 }
772 }
773
774 adev->umc.active_mask = ((1 << adev->umc.node_inst_num) - 1) &
775 ~umc_harvest_config;
776 }
777
778 /* ================================================== */
779
780 struct ip_hw_instance {
781 struct kobject kobj; /* ip_discovery/die/#die/#hw_id/#instance/<attrs...> */
782
783 int hw_id;
784 u8 num_instance;
785 u8 major, minor, revision;
786 u8 harvest;
787
788 int num_base_addresses;
789 u32 base_addr[] __counted_by(num_base_addresses);
790 };
791
792 struct ip_hw_id {
793 struct kset hw_id_kset; /* ip_discovery/die/#die/#hw_id/, contains ip_hw_instance */
794 int hw_id;
795 };
796
797 struct ip_die_entry {
798 struct kset ip_kset; /* ip_discovery/die/#die/, contains ip_hw_id */
799 u16 num_ips;
800 };
801
802 /* -------------------------------------------------- */
803
804 struct ip_hw_instance_attr {
805 struct attribute attr;
806 ssize_t (*show)(struct ip_hw_instance *ip_hw_instance, char *buf);
807 };
808
hw_id_show(struct ip_hw_instance * ip_hw_instance,char * buf)809 static ssize_t hw_id_show(struct ip_hw_instance *ip_hw_instance, char *buf)
810 {
811 return sysfs_emit(buf, "%d\n", ip_hw_instance->hw_id);
812 }
813
num_instance_show(struct ip_hw_instance * ip_hw_instance,char * buf)814 static ssize_t num_instance_show(struct ip_hw_instance *ip_hw_instance, char *buf)
815 {
816 return sysfs_emit(buf, "%d\n", ip_hw_instance->num_instance);
817 }
818
major_show(struct ip_hw_instance * ip_hw_instance,char * buf)819 static ssize_t major_show(struct ip_hw_instance *ip_hw_instance, char *buf)
820 {
821 return sysfs_emit(buf, "%d\n", ip_hw_instance->major);
822 }
823
minor_show(struct ip_hw_instance * ip_hw_instance,char * buf)824 static ssize_t minor_show(struct ip_hw_instance *ip_hw_instance, char *buf)
825 {
826 return sysfs_emit(buf, "%d\n", ip_hw_instance->minor);
827 }
828
revision_show(struct ip_hw_instance * ip_hw_instance,char * buf)829 static ssize_t revision_show(struct ip_hw_instance *ip_hw_instance, char *buf)
830 {
831 return sysfs_emit(buf, "%d\n", ip_hw_instance->revision);
832 }
833
harvest_show(struct ip_hw_instance * ip_hw_instance,char * buf)834 static ssize_t harvest_show(struct ip_hw_instance *ip_hw_instance, char *buf)
835 {
836 return sysfs_emit(buf, "0x%01X\n", ip_hw_instance->harvest);
837 }
838
num_base_addresses_show(struct ip_hw_instance * ip_hw_instance,char * buf)839 static ssize_t num_base_addresses_show(struct ip_hw_instance *ip_hw_instance, char *buf)
840 {
841 return sysfs_emit(buf, "%d\n", ip_hw_instance->num_base_addresses);
842 }
843
base_addr_show(struct ip_hw_instance * ip_hw_instance,char * buf)844 static ssize_t base_addr_show(struct ip_hw_instance *ip_hw_instance, char *buf)
845 {
846 ssize_t res, at;
847 int ii;
848
849 for (res = at = ii = 0; ii < ip_hw_instance->num_base_addresses; ii++) {
850 /* Here we satisfy the condition that, at + size <= PAGE_SIZE.
851 */
852 if (at + 12 > PAGE_SIZE)
853 break;
854 res = sysfs_emit_at(buf, at, "0x%08X\n",
855 ip_hw_instance->base_addr[ii]);
856 if (res <= 0)
857 break;
858 at += res;
859 }
860
861 return res < 0 ? res : at;
862 }
863
864 static struct ip_hw_instance_attr ip_hw_attr[] = {
865 __ATTR_RO(hw_id),
866 __ATTR_RO(num_instance),
867 __ATTR_RO(major),
868 __ATTR_RO(minor),
869 __ATTR_RO(revision),
870 __ATTR_RO(harvest),
871 __ATTR_RO(num_base_addresses),
872 __ATTR_RO(base_addr),
873 };
874
875 static struct attribute *ip_hw_instance_attrs[ARRAY_SIZE(ip_hw_attr) + 1];
876 ATTRIBUTE_GROUPS(ip_hw_instance);
877
878 #define to_ip_hw_instance(x) container_of(x, struct ip_hw_instance, kobj)
879 #define to_ip_hw_instance_attr(x) container_of(x, struct ip_hw_instance_attr, attr)
880
ip_hw_instance_attr_show(struct kobject * kobj,struct attribute * attr,char * buf)881 static ssize_t ip_hw_instance_attr_show(struct kobject *kobj,
882 struct attribute *attr,
883 char *buf)
884 {
885 struct ip_hw_instance *ip_hw_instance = to_ip_hw_instance(kobj);
886 struct ip_hw_instance_attr *ip_hw_attr = to_ip_hw_instance_attr(attr);
887
888 if (!ip_hw_attr->show)
889 return -EIO;
890
891 return ip_hw_attr->show(ip_hw_instance, buf);
892 }
893
894 static const struct sysfs_ops ip_hw_instance_sysfs_ops = {
895 .show = ip_hw_instance_attr_show,
896 };
897
ip_hw_instance_release(struct kobject * kobj)898 static void ip_hw_instance_release(struct kobject *kobj)
899 {
900 struct ip_hw_instance *ip_hw_instance = to_ip_hw_instance(kobj);
901
902 kfree(ip_hw_instance);
903 }
904
905 static const struct kobj_type ip_hw_instance_ktype = {
906 .release = ip_hw_instance_release,
907 .sysfs_ops = &ip_hw_instance_sysfs_ops,
908 .default_groups = ip_hw_instance_groups,
909 };
910
911 /* -------------------------------------------------- */
912
913 #define to_ip_hw_id(x) container_of(to_kset(x), struct ip_hw_id, hw_id_kset)
914
ip_hw_id_release(struct kobject * kobj)915 static void ip_hw_id_release(struct kobject *kobj)
916 {
917 struct ip_hw_id *ip_hw_id = to_ip_hw_id(kobj);
918
919 if (!list_empty(&ip_hw_id->hw_id_kset.list))
920 DRM_ERROR("ip_hw_id->hw_id_kset is not empty");
921 kfree(ip_hw_id);
922 }
923
924 static const struct kobj_type ip_hw_id_ktype = {
925 .release = ip_hw_id_release,
926 .sysfs_ops = &kobj_sysfs_ops,
927 };
928
929 /* -------------------------------------------------- */
930
931 static void die_kobj_release(struct kobject *kobj);
932 static void ip_disc_release(struct kobject *kobj);
933
934 struct ip_die_entry_attribute {
935 struct attribute attr;
936 ssize_t (*show)(struct ip_die_entry *ip_die_entry, char *buf);
937 };
938
939 #define to_ip_die_entry_attr(x) container_of(x, struct ip_die_entry_attribute, attr)
940
num_ips_show(struct ip_die_entry * ip_die_entry,char * buf)941 static ssize_t num_ips_show(struct ip_die_entry *ip_die_entry, char *buf)
942 {
943 return sysfs_emit(buf, "%d\n", ip_die_entry->num_ips);
944 }
945
946 /* If there are more ip_die_entry attrs, other than the number of IPs,
947 * we can make this intro an array of attrs, and then initialize
948 * ip_die_entry_attrs in a loop.
949 */
950 static struct ip_die_entry_attribute num_ips_attr =
951 __ATTR_RO(num_ips);
952
953 static struct attribute *ip_die_entry_attrs[] = {
954 &num_ips_attr.attr,
955 NULL,
956 };
957 ATTRIBUTE_GROUPS(ip_die_entry); /* ip_die_entry_groups */
958
959 #define to_ip_die_entry(x) container_of(to_kset(x), struct ip_die_entry, ip_kset)
960
ip_die_entry_attr_show(struct kobject * kobj,struct attribute * attr,char * buf)961 static ssize_t ip_die_entry_attr_show(struct kobject *kobj,
962 struct attribute *attr,
963 char *buf)
964 {
965 struct ip_die_entry_attribute *ip_die_entry_attr = to_ip_die_entry_attr(attr);
966 struct ip_die_entry *ip_die_entry = to_ip_die_entry(kobj);
967
968 if (!ip_die_entry_attr->show)
969 return -EIO;
970
971 return ip_die_entry_attr->show(ip_die_entry, buf);
972 }
973
ip_die_entry_release(struct kobject * kobj)974 static void ip_die_entry_release(struct kobject *kobj)
975 {
976 struct ip_die_entry *ip_die_entry = to_ip_die_entry(kobj);
977
978 if (!list_empty(&ip_die_entry->ip_kset.list))
979 DRM_ERROR("ip_die_entry->ip_kset is not empty");
980 kfree(ip_die_entry);
981 }
982
983 static const struct sysfs_ops ip_die_entry_sysfs_ops = {
984 .show = ip_die_entry_attr_show,
985 };
986
987 static const struct kobj_type ip_die_entry_ktype = {
988 .release = ip_die_entry_release,
989 .sysfs_ops = &ip_die_entry_sysfs_ops,
990 .default_groups = ip_die_entry_groups,
991 };
992
993 static const struct kobj_type die_kobj_ktype = {
994 .release = die_kobj_release,
995 .sysfs_ops = &kobj_sysfs_ops,
996 };
997
998 static const struct kobj_type ip_discovery_ktype = {
999 .release = ip_disc_release,
1000 .sysfs_ops = &kobj_sysfs_ops,
1001 };
1002
1003 struct ip_discovery_top {
1004 struct kobject kobj; /* ip_discovery/ */
1005 struct kset die_kset; /* ip_discovery/die/, contains ip_die_entry */
1006 struct amdgpu_device *adev;
1007 };
1008
die_kobj_release(struct kobject * kobj)1009 static void die_kobj_release(struct kobject *kobj)
1010 {
1011 struct ip_discovery_top *ip_top = container_of(to_kset(kobj),
1012 struct ip_discovery_top,
1013 die_kset);
1014 if (!list_empty(&ip_top->die_kset.list))
1015 DRM_ERROR("ip_top->die_kset is not empty");
1016 }
1017
ip_disc_release(struct kobject * kobj)1018 static void ip_disc_release(struct kobject *kobj)
1019 {
1020 struct ip_discovery_top *ip_top = container_of(kobj, struct ip_discovery_top,
1021 kobj);
1022 struct amdgpu_device *adev = ip_top->adev;
1023
1024 adev->ip_top = NULL;
1025 kfree(ip_top);
1026 }
1027
amdgpu_discovery_get_harvest_info(struct amdgpu_device * adev,uint16_t hw_id,uint8_t inst)1028 static uint8_t amdgpu_discovery_get_harvest_info(struct amdgpu_device *adev,
1029 uint16_t hw_id, uint8_t inst)
1030 {
1031 uint8_t harvest = 0;
1032
1033 /* Until a uniform way is figured, get mask based on hwid */
1034 switch (hw_id) {
1035 case VCN_HWID:
1036 /* VCN vs UVD+VCE */
1037 if (!amdgpu_ip_version(adev, VCE_HWIP, 0))
1038 harvest = ((1 << inst) & adev->vcn.inst_mask) == 0;
1039 break;
1040 case DMU_HWID:
1041 if (adev->harvest_ip_mask & AMD_HARVEST_IP_DMU_MASK)
1042 harvest = 0x1;
1043 break;
1044 case UMC_HWID:
1045 /* TODO: It needs another parsing; for now, ignore.*/
1046 break;
1047 case GC_HWID:
1048 harvest = ((1 << inst) & adev->gfx.xcc_mask) == 0;
1049 break;
1050 case SDMA0_HWID:
1051 harvest = ((1 << inst) & adev->sdma.sdma_mask) == 0;
1052 break;
1053 default:
1054 break;
1055 }
1056
1057 return harvest;
1058 }
1059
amdgpu_discovery_sysfs_ips(struct amdgpu_device * adev,struct ip_die_entry * ip_die_entry,const size_t _ip_offset,const int num_ips,bool reg_base_64)1060 static int amdgpu_discovery_sysfs_ips(struct amdgpu_device *adev,
1061 struct ip_die_entry *ip_die_entry,
1062 const size_t _ip_offset, const int num_ips,
1063 bool reg_base_64)
1064 {
1065 int ii, jj, kk, res;
1066 uint16_t hw_id;
1067 uint8_t inst;
1068
1069 DRM_DEBUG("num_ips:%d", num_ips);
1070
1071 /* Find all IPs of a given HW ID, and add their instance to
1072 * #die/#hw_id/#instance/<attributes>
1073 */
1074 for (ii = 0; ii < HW_ID_MAX; ii++) {
1075 struct ip_hw_id *ip_hw_id = NULL;
1076 size_t ip_offset = _ip_offset;
1077
1078 for (jj = 0; jj < num_ips; jj++) {
1079 struct ip_v4 *ip;
1080 struct ip_hw_instance *ip_hw_instance;
1081
1082 ip = (struct ip_v4 *)(adev->mman.discovery_bin + ip_offset);
1083 inst = ip->instance_number;
1084 hw_id = le16_to_cpu(ip->hw_id);
1085 if (amdgpu_discovery_validate_ip(adev, inst, hw_id) ||
1086 hw_id != ii)
1087 goto next_ip;
1088
1089 DRM_DEBUG("match:%d @ ip_offset:%zu", ii, ip_offset);
1090
1091 /* We have a hw_id match; register the hw
1092 * block if not yet registered.
1093 */
1094 if (!ip_hw_id) {
1095 ip_hw_id = kzalloc(sizeof(*ip_hw_id), GFP_KERNEL);
1096 if (!ip_hw_id)
1097 return -ENOMEM;
1098 ip_hw_id->hw_id = ii;
1099
1100 kobject_set_name(&ip_hw_id->hw_id_kset.kobj, "%d", ii);
1101 ip_hw_id->hw_id_kset.kobj.kset = &ip_die_entry->ip_kset;
1102 ip_hw_id->hw_id_kset.kobj.ktype = &ip_hw_id_ktype;
1103 res = kset_register(&ip_hw_id->hw_id_kset);
1104 if (res) {
1105 DRM_ERROR("Couldn't register ip_hw_id kset");
1106 kfree(ip_hw_id);
1107 return res;
1108 }
1109 if (hw_id_names[ii]) {
1110 res = sysfs_create_link(&ip_die_entry->ip_kset.kobj,
1111 &ip_hw_id->hw_id_kset.kobj,
1112 hw_id_names[ii]);
1113 if (res) {
1114 DRM_ERROR("Couldn't create IP link %s in IP Die:%s\n",
1115 hw_id_names[ii],
1116 kobject_name(&ip_die_entry->ip_kset.kobj));
1117 }
1118 }
1119 }
1120
1121 /* Now register its instance.
1122 */
1123 ip_hw_instance = kzalloc(struct_size(ip_hw_instance,
1124 base_addr,
1125 ip->num_base_address),
1126 GFP_KERNEL);
1127 if (!ip_hw_instance) {
1128 DRM_ERROR("no memory for ip_hw_instance");
1129 return -ENOMEM;
1130 }
1131 ip_hw_instance->hw_id = le16_to_cpu(ip->hw_id); /* == ii */
1132 ip_hw_instance->num_instance = ip->instance_number;
1133 ip_hw_instance->major = ip->major;
1134 ip_hw_instance->minor = ip->minor;
1135 ip_hw_instance->revision = ip->revision;
1136 ip_hw_instance->harvest =
1137 amdgpu_discovery_get_harvest_info(
1138 adev, ip_hw_instance->hw_id,
1139 ip_hw_instance->num_instance);
1140 ip_hw_instance->num_base_addresses = ip->num_base_address;
1141
1142 for (kk = 0; kk < ip_hw_instance->num_base_addresses; kk++) {
1143 if (reg_base_64)
1144 ip_hw_instance->base_addr[kk] =
1145 lower_32_bits(le64_to_cpu(ip->base_address_64[kk])) & 0x3FFFFFFF;
1146 else
1147 ip_hw_instance->base_addr[kk] = ip->base_address[kk];
1148 }
1149
1150 kobject_init(&ip_hw_instance->kobj, &ip_hw_instance_ktype);
1151 ip_hw_instance->kobj.kset = &ip_hw_id->hw_id_kset;
1152 res = kobject_add(&ip_hw_instance->kobj, NULL,
1153 "%d", ip_hw_instance->num_instance);
1154 next_ip:
1155 if (reg_base_64)
1156 ip_offset += struct_size(ip, base_address_64,
1157 ip->num_base_address);
1158 else
1159 ip_offset += struct_size(ip, base_address,
1160 ip->num_base_address);
1161 }
1162 }
1163
1164 return 0;
1165 }
1166
amdgpu_discovery_sysfs_recurse(struct amdgpu_device * adev)1167 static int amdgpu_discovery_sysfs_recurse(struct amdgpu_device *adev)
1168 {
1169 struct binary_header *bhdr;
1170 struct ip_discovery_header *ihdr;
1171 struct die_header *dhdr;
1172 struct kset *die_kset = &adev->ip_top->die_kset;
1173 u16 num_dies, die_offset, num_ips;
1174 size_t ip_offset;
1175 int ii, res;
1176
1177 bhdr = (struct binary_header *)adev->mman.discovery_bin;
1178 ihdr = (struct ip_discovery_header *)(adev->mman.discovery_bin +
1179 le16_to_cpu(bhdr->table_list[IP_DISCOVERY].offset));
1180 num_dies = le16_to_cpu(ihdr->num_dies);
1181
1182 DRM_DEBUG("number of dies: %d\n", num_dies);
1183
1184 for (ii = 0; ii < num_dies; ii++) {
1185 struct ip_die_entry *ip_die_entry;
1186
1187 die_offset = le16_to_cpu(ihdr->die_info[ii].die_offset);
1188 dhdr = (struct die_header *)(adev->mman.discovery_bin + die_offset);
1189 num_ips = le16_to_cpu(dhdr->num_ips);
1190 ip_offset = die_offset + sizeof(*dhdr);
1191
1192 /* Add the die to the kset.
1193 *
1194 * dhdr->die_id == ii, which was checked in
1195 * amdgpu_discovery_reg_base_init().
1196 */
1197
1198 ip_die_entry = kzalloc(sizeof(*ip_die_entry), GFP_KERNEL);
1199 if (!ip_die_entry)
1200 return -ENOMEM;
1201
1202 ip_die_entry->num_ips = num_ips;
1203
1204 kobject_set_name(&ip_die_entry->ip_kset.kobj, "%d", le16_to_cpu(dhdr->die_id));
1205 ip_die_entry->ip_kset.kobj.kset = die_kset;
1206 ip_die_entry->ip_kset.kobj.ktype = &ip_die_entry_ktype;
1207 res = kset_register(&ip_die_entry->ip_kset);
1208 if (res) {
1209 DRM_ERROR("Couldn't register ip_die_entry kset");
1210 kfree(ip_die_entry);
1211 return res;
1212 }
1213
1214 amdgpu_discovery_sysfs_ips(adev, ip_die_entry, ip_offset, num_ips, !!ihdr->base_addr_64_bit);
1215 }
1216
1217 return 0;
1218 }
1219
amdgpu_discovery_sysfs_init(struct amdgpu_device * adev)1220 static int amdgpu_discovery_sysfs_init(struct amdgpu_device *adev)
1221 {
1222 struct kset *die_kset;
1223 int res, ii;
1224
1225 if (!adev->mman.discovery_bin)
1226 return -EINVAL;
1227
1228 adev->ip_top = kzalloc(sizeof(*adev->ip_top), GFP_KERNEL);
1229 if (!adev->ip_top)
1230 return -ENOMEM;
1231
1232 adev->ip_top->adev = adev;
1233
1234 res = kobject_init_and_add(&adev->ip_top->kobj, &ip_discovery_ktype,
1235 &adev->dev->kobj, "ip_discovery");
1236 if (res) {
1237 DRM_ERROR("Couldn't init and add ip_discovery/");
1238 goto Err;
1239 }
1240
1241 die_kset = &adev->ip_top->die_kset;
1242 kobject_set_name(&die_kset->kobj, "%s", "die");
1243 die_kset->kobj.parent = &adev->ip_top->kobj;
1244 die_kset->kobj.ktype = &die_kobj_ktype;
1245 res = kset_register(&adev->ip_top->die_kset);
1246 if (res) {
1247 DRM_ERROR("Couldn't register die_kset");
1248 goto Err;
1249 }
1250
1251 for (ii = 0; ii < ARRAY_SIZE(ip_hw_attr); ii++)
1252 ip_hw_instance_attrs[ii] = &ip_hw_attr[ii].attr;
1253 ip_hw_instance_attrs[ii] = NULL;
1254
1255 res = amdgpu_discovery_sysfs_recurse(adev);
1256
1257 return res;
1258 Err:
1259 kobject_put(&adev->ip_top->kobj);
1260 return res;
1261 }
1262
1263 /* -------------------------------------------------- */
1264
1265 #define list_to_kobj(el) container_of(el, struct kobject, entry)
1266
amdgpu_discovery_sysfs_ip_hw_free(struct ip_hw_id * ip_hw_id)1267 static void amdgpu_discovery_sysfs_ip_hw_free(struct ip_hw_id *ip_hw_id)
1268 {
1269 struct list_head *el, *tmp;
1270 struct kset *hw_id_kset;
1271
1272 hw_id_kset = &ip_hw_id->hw_id_kset;
1273 spin_lock(&hw_id_kset->list_lock);
1274 list_for_each_prev_safe(el, tmp, &hw_id_kset->list) {
1275 list_del_init(el);
1276 spin_unlock(&hw_id_kset->list_lock);
1277 /* kobject is embedded in ip_hw_instance */
1278 kobject_put(list_to_kobj(el));
1279 spin_lock(&hw_id_kset->list_lock);
1280 }
1281 spin_unlock(&hw_id_kset->list_lock);
1282 kobject_put(&ip_hw_id->hw_id_kset.kobj);
1283 }
1284
amdgpu_discovery_sysfs_die_free(struct ip_die_entry * ip_die_entry)1285 static void amdgpu_discovery_sysfs_die_free(struct ip_die_entry *ip_die_entry)
1286 {
1287 struct list_head *el, *tmp;
1288 struct kset *ip_kset;
1289
1290 ip_kset = &ip_die_entry->ip_kset;
1291 spin_lock(&ip_kset->list_lock);
1292 list_for_each_prev_safe(el, tmp, &ip_kset->list) {
1293 list_del_init(el);
1294 spin_unlock(&ip_kset->list_lock);
1295 amdgpu_discovery_sysfs_ip_hw_free(to_ip_hw_id(list_to_kobj(el)));
1296 spin_lock(&ip_kset->list_lock);
1297 }
1298 spin_unlock(&ip_kset->list_lock);
1299 kobject_put(&ip_die_entry->ip_kset.kobj);
1300 }
1301
amdgpu_discovery_sysfs_fini(struct amdgpu_device * adev)1302 static void amdgpu_discovery_sysfs_fini(struct amdgpu_device *adev)
1303 {
1304 struct list_head *el, *tmp;
1305 struct kset *die_kset;
1306
1307 die_kset = &adev->ip_top->die_kset;
1308 spin_lock(&die_kset->list_lock);
1309 list_for_each_prev_safe(el, tmp, &die_kset->list) {
1310 list_del_init(el);
1311 spin_unlock(&die_kset->list_lock);
1312 amdgpu_discovery_sysfs_die_free(to_ip_die_entry(list_to_kobj(el)));
1313 spin_lock(&die_kset->list_lock);
1314 }
1315 spin_unlock(&die_kset->list_lock);
1316 kobject_put(&adev->ip_top->die_kset.kobj);
1317 kobject_put(&adev->ip_top->kobj);
1318 }
1319
1320 /* ================================================== */
1321
amdgpu_discovery_reg_base_init(struct amdgpu_device * adev)1322 static int amdgpu_discovery_reg_base_init(struct amdgpu_device *adev)
1323 {
1324 uint8_t num_base_address, subrev, variant;
1325 struct binary_header *bhdr;
1326 struct ip_discovery_header *ihdr;
1327 struct die_header *dhdr;
1328 struct ip_v4 *ip;
1329 uint16_t die_offset;
1330 uint16_t ip_offset;
1331 uint16_t num_dies;
1332 uint32_t wafl_ver;
1333 uint16_t num_ips;
1334 uint16_t hw_id;
1335 uint8_t inst;
1336 int hw_ip;
1337 int i, j, k;
1338 int r;
1339
1340 r = amdgpu_discovery_init(adev);
1341 if (r)
1342 return r;
1343
1344 wafl_ver = 0;
1345 adev->gfx.xcc_mask = 0;
1346 adev->sdma.sdma_mask = 0;
1347 adev->vcn.inst_mask = 0;
1348 adev->jpeg.inst_mask = 0;
1349 bhdr = (struct binary_header *)adev->mman.discovery_bin;
1350 ihdr = (struct ip_discovery_header *)(adev->mman.discovery_bin +
1351 le16_to_cpu(bhdr->table_list[IP_DISCOVERY].offset));
1352 num_dies = le16_to_cpu(ihdr->num_dies);
1353
1354 DRM_DEBUG("number of dies: %d\n", num_dies);
1355
1356 for (i = 0; i < num_dies; i++) {
1357 die_offset = le16_to_cpu(ihdr->die_info[i].die_offset);
1358 dhdr = (struct die_header *)(adev->mman.discovery_bin + die_offset);
1359 num_ips = le16_to_cpu(dhdr->num_ips);
1360 ip_offset = die_offset + sizeof(*dhdr);
1361
1362 if (le16_to_cpu(dhdr->die_id) != i) {
1363 DRM_ERROR("invalid die id %d, expected %d\n",
1364 le16_to_cpu(dhdr->die_id), i);
1365 return -EINVAL;
1366 }
1367
1368 DRM_DEBUG("number of hardware IPs on die%d: %d\n",
1369 le16_to_cpu(dhdr->die_id), num_ips);
1370
1371 for (j = 0; j < num_ips; j++) {
1372 ip = (struct ip_v4 *)(adev->mman.discovery_bin + ip_offset);
1373
1374 inst = ip->instance_number;
1375 hw_id = le16_to_cpu(ip->hw_id);
1376 if (amdgpu_discovery_validate_ip(adev, inst, hw_id))
1377 goto next_ip;
1378
1379 num_base_address = ip->num_base_address;
1380
1381 DRM_DEBUG("%s(%d) #%d v%d.%d.%d:\n",
1382 hw_id_names[le16_to_cpu(ip->hw_id)],
1383 le16_to_cpu(ip->hw_id),
1384 ip->instance_number,
1385 ip->major, ip->minor,
1386 ip->revision);
1387
1388 if (le16_to_cpu(ip->hw_id) == VCN_HWID) {
1389 /* Bit [5:0]: original revision value
1390 * Bit [7:6]: en/decode capability:
1391 * 0b00 : VCN function normally
1392 * 0b10 : encode is disabled
1393 * 0b01 : decode is disabled
1394 */
1395 if (adev->vcn.num_vcn_inst <
1396 AMDGPU_MAX_VCN_INSTANCES) {
1397 adev->vcn.inst[adev->vcn.num_vcn_inst].vcn_config =
1398 ip->revision & 0xc0;
1399 adev->vcn.num_vcn_inst++;
1400 adev->vcn.inst_mask |=
1401 (1U << ip->instance_number);
1402 adev->jpeg.inst_mask |=
1403 (1U << ip->instance_number);
1404 } else {
1405 dev_err(adev->dev, "Too many VCN instances: %d vs %d\n",
1406 adev->vcn.num_vcn_inst + 1,
1407 AMDGPU_MAX_VCN_INSTANCES);
1408 }
1409 ip->revision &= ~0xc0;
1410 }
1411 if (le16_to_cpu(ip->hw_id) == SDMA0_HWID ||
1412 le16_to_cpu(ip->hw_id) == SDMA1_HWID ||
1413 le16_to_cpu(ip->hw_id) == SDMA2_HWID ||
1414 le16_to_cpu(ip->hw_id) == SDMA3_HWID) {
1415 if (adev->sdma.num_instances <
1416 AMDGPU_MAX_SDMA_INSTANCES) {
1417 adev->sdma.num_instances++;
1418 adev->sdma.sdma_mask |=
1419 (1U << ip->instance_number);
1420 } else {
1421 dev_err(adev->dev, "Too many SDMA instances: %d vs %d\n",
1422 adev->sdma.num_instances + 1,
1423 AMDGPU_MAX_SDMA_INSTANCES);
1424 }
1425 }
1426
1427 if (le16_to_cpu(ip->hw_id) == VPE_HWID) {
1428 if (adev->vpe.num_instances < AMDGPU_MAX_VPE_INSTANCES)
1429 adev->vpe.num_instances++;
1430 else
1431 dev_err(adev->dev, "Too many VPE instances: %d vs %d\n",
1432 adev->vpe.num_instances + 1,
1433 AMDGPU_MAX_VPE_INSTANCES);
1434 }
1435
1436 if (le16_to_cpu(ip->hw_id) == UMC_HWID) {
1437 adev->gmc.num_umc++;
1438 adev->umc.node_inst_num++;
1439 }
1440
1441 if (le16_to_cpu(ip->hw_id) == GC_HWID)
1442 adev->gfx.xcc_mask |=
1443 (1U << ip->instance_number);
1444
1445 if (!wafl_ver && le16_to_cpu(ip->hw_id) == WAFLC_HWID)
1446 wafl_ver = IP_VERSION_FULL(ip->major, ip->minor,
1447 ip->revision, 0, 0);
1448
1449 for (k = 0; k < num_base_address; k++) {
1450 /*
1451 * convert the endianness of base addresses in place,
1452 * so that we don't need to convert them when accessing adev->reg_offset.
1453 */
1454 if (ihdr->base_addr_64_bit)
1455 /* Truncate the 64bit base address from ip discovery
1456 * and only store lower 32bit ip base in reg_offset[].
1457 * Bits > 32 follows ASIC specific format, thus just
1458 * discard them and handle it within specific ASIC.
1459 * By this way reg_offset[] and related helpers can
1460 * stay unchanged.
1461 * The base address is in dwords, thus clear the
1462 * highest 2 bits to store.
1463 */
1464 ip->base_address[k] =
1465 lower_32_bits(le64_to_cpu(ip->base_address_64[k])) & 0x3FFFFFFF;
1466 else
1467 ip->base_address[k] = le32_to_cpu(ip->base_address[k]);
1468 DRM_DEBUG("\t0x%08x\n", ip->base_address[k]);
1469 }
1470
1471 for (hw_ip = 0; hw_ip < MAX_HWIP; hw_ip++) {
1472 if (hw_id_map[hw_ip] == le16_to_cpu(ip->hw_id) &&
1473 hw_id_map[hw_ip] != 0) {
1474 DRM_DEBUG("set register base offset for %s\n",
1475 hw_id_names[le16_to_cpu(ip->hw_id)]);
1476 adev->reg_offset[hw_ip][ip->instance_number] =
1477 ip->base_address;
1478 /* Instance support is somewhat inconsistent.
1479 * SDMA is a good example. Sienna cichlid has 4 total
1480 * SDMA instances, each enumerated separately (HWIDs
1481 * 42, 43, 68, 69). Arcturus has 8 total SDMA instances,
1482 * but they are enumerated as multiple instances of the
1483 * same HWIDs (4x HWID 42, 4x HWID 43). UMC is another
1484 * example. On most chips there are multiple instances
1485 * with the same HWID.
1486 */
1487
1488 if (ihdr->version < 3) {
1489 subrev = 0;
1490 variant = 0;
1491 } else {
1492 subrev = ip->sub_revision;
1493 variant = ip->variant;
1494 }
1495
1496 adev->ip_versions[hw_ip]
1497 [ip->instance_number] =
1498 IP_VERSION_FULL(ip->major,
1499 ip->minor,
1500 ip->revision,
1501 variant,
1502 subrev);
1503 }
1504 }
1505
1506 next_ip:
1507 if (ihdr->base_addr_64_bit)
1508 ip_offset += struct_size(ip, base_address_64, ip->num_base_address);
1509 else
1510 ip_offset += struct_size(ip, base_address, ip->num_base_address);
1511 }
1512 }
1513
1514 if (wafl_ver && !adev->ip_versions[XGMI_HWIP][0])
1515 adev->ip_versions[XGMI_HWIP][0] = wafl_ver;
1516
1517 return 0;
1518 }
1519
amdgpu_discovery_harvest_ip(struct amdgpu_device * adev)1520 static void amdgpu_discovery_harvest_ip(struct amdgpu_device *adev)
1521 {
1522 struct ip_discovery_header *ihdr;
1523 struct binary_header *bhdr;
1524 int vcn_harvest_count = 0;
1525 int umc_harvest_count = 0;
1526 uint16_t offset, ihdr_ver;
1527
1528 bhdr = (struct binary_header *)adev->mman.discovery_bin;
1529 offset = le16_to_cpu(bhdr->table_list[IP_DISCOVERY].offset);
1530 ihdr = (struct ip_discovery_header *)(adev->mman.discovery_bin +
1531 offset);
1532 ihdr_ver = le16_to_cpu(ihdr->version);
1533 /*
1534 * Harvest table does not fit Navi1x and legacy GPUs,
1535 * so read harvest bit per IP data structure to set
1536 * harvest configuration.
1537 */
1538 if (amdgpu_ip_version(adev, GC_HWIP, 0) < IP_VERSION(10, 2, 0) &&
1539 ihdr_ver <= 2) {
1540 if ((adev->pdev->device == 0x731E &&
1541 (adev->pdev->revision == 0xC6 ||
1542 adev->pdev->revision == 0xC7)) ||
1543 (adev->pdev->device == 0x7340 &&
1544 adev->pdev->revision == 0xC9) ||
1545 (adev->pdev->device == 0x7360 &&
1546 adev->pdev->revision == 0xC7))
1547 amdgpu_discovery_read_harvest_bit_per_ip(adev,
1548 &vcn_harvest_count);
1549 } else {
1550 amdgpu_discovery_read_from_harvest_table(adev,
1551 &vcn_harvest_count,
1552 &umc_harvest_count);
1553 }
1554
1555 amdgpu_discovery_harvest_config_quirk(adev);
1556
1557 if (vcn_harvest_count == adev->vcn.num_vcn_inst) {
1558 adev->harvest_ip_mask |= AMD_HARVEST_IP_VCN_MASK;
1559 adev->harvest_ip_mask |= AMD_HARVEST_IP_JPEG_MASK;
1560 }
1561
1562 if (umc_harvest_count < adev->gmc.num_umc) {
1563 adev->gmc.num_umc -= umc_harvest_count;
1564 }
1565 }
1566
1567 union gc_info {
1568 struct gc_info_v1_0 v1;
1569 struct gc_info_v1_1 v1_1;
1570 struct gc_info_v1_2 v1_2;
1571 struct gc_info_v1_3 v1_3;
1572 struct gc_info_v2_0 v2;
1573 struct gc_info_v2_1 v2_1;
1574 };
1575
amdgpu_discovery_get_gfx_info(struct amdgpu_device * adev)1576 static int amdgpu_discovery_get_gfx_info(struct amdgpu_device *adev)
1577 {
1578 struct binary_header *bhdr;
1579 union gc_info *gc_info;
1580 u16 offset;
1581
1582 if (!adev->mman.discovery_bin) {
1583 DRM_ERROR("ip discovery uninitialized\n");
1584 return -EINVAL;
1585 }
1586
1587 bhdr = (struct binary_header *)adev->mman.discovery_bin;
1588 offset = le16_to_cpu(bhdr->table_list[GC].offset);
1589
1590 if (!offset)
1591 return 0;
1592
1593 gc_info = (union gc_info *)(adev->mman.discovery_bin + offset);
1594
1595 switch (le16_to_cpu(gc_info->v1.header.version_major)) {
1596 case 1:
1597 adev->gfx.config.max_shader_engines = le32_to_cpu(gc_info->v1.gc_num_se);
1598 adev->gfx.config.max_cu_per_sh = 2 * (le32_to_cpu(gc_info->v1.gc_num_wgp0_per_sa) +
1599 le32_to_cpu(gc_info->v1.gc_num_wgp1_per_sa));
1600 adev->gfx.config.max_sh_per_se = le32_to_cpu(gc_info->v1.gc_num_sa_per_se);
1601 adev->gfx.config.max_backends_per_se = le32_to_cpu(gc_info->v1.gc_num_rb_per_se);
1602 adev->gfx.config.max_texture_channel_caches = le32_to_cpu(gc_info->v1.gc_num_gl2c);
1603 adev->gfx.config.max_gprs = le32_to_cpu(gc_info->v1.gc_num_gprs);
1604 adev->gfx.config.max_gs_threads = le32_to_cpu(gc_info->v1.gc_num_max_gs_thds);
1605 adev->gfx.config.gs_vgt_table_depth = le32_to_cpu(gc_info->v1.gc_gs_table_depth);
1606 adev->gfx.config.gs_prim_buffer_depth = le32_to_cpu(gc_info->v1.gc_gsprim_buff_depth);
1607 adev->gfx.config.double_offchip_lds_buf = le32_to_cpu(gc_info->v1.gc_double_offchip_lds_buffer);
1608 adev->gfx.cu_info.wave_front_size = le32_to_cpu(gc_info->v1.gc_wave_size);
1609 adev->gfx.cu_info.max_waves_per_simd = le32_to_cpu(gc_info->v1.gc_max_waves_per_simd);
1610 adev->gfx.cu_info.max_scratch_slots_per_cu = le32_to_cpu(gc_info->v1.gc_max_scratch_slots_per_cu);
1611 adev->gfx.cu_info.lds_size = le32_to_cpu(gc_info->v1.gc_lds_size);
1612 adev->gfx.config.num_sc_per_sh = le32_to_cpu(gc_info->v1.gc_num_sc_per_se) /
1613 le32_to_cpu(gc_info->v1.gc_num_sa_per_se);
1614 adev->gfx.config.num_packer_per_sc = le32_to_cpu(gc_info->v1.gc_num_packer_per_sc);
1615 if (le16_to_cpu(gc_info->v1.header.version_minor) >= 1) {
1616 adev->gfx.config.gc_num_tcp_per_sa = le32_to_cpu(gc_info->v1_1.gc_num_tcp_per_sa);
1617 adev->gfx.config.gc_num_sdp_interface = le32_to_cpu(gc_info->v1_1.gc_num_sdp_interface);
1618 adev->gfx.config.gc_num_tcps = le32_to_cpu(gc_info->v1_1.gc_num_tcps);
1619 }
1620 if (le16_to_cpu(gc_info->v1.header.version_minor) >= 2) {
1621 adev->gfx.config.gc_num_tcp_per_wpg = le32_to_cpu(gc_info->v1_2.gc_num_tcp_per_wpg);
1622 adev->gfx.config.gc_tcp_l1_size = le32_to_cpu(gc_info->v1_2.gc_tcp_l1_size);
1623 adev->gfx.config.gc_num_sqc_per_wgp = le32_to_cpu(gc_info->v1_2.gc_num_sqc_per_wgp);
1624 adev->gfx.config.gc_l1_instruction_cache_size_per_sqc = le32_to_cpu(gc_info->v1_2.gc_l1_instruction_cache_size_per_sqc);
1625 adev->gfx.config.gc_l1_data_cache_size_per_sqc = le32_to_cpu(gc_info->v1_2.gc_l1_data_cache_size_per_sqc);
1626 adev->gfx.config.gc_gl1c_per_sa = le32_to_cpu(gc_info->v1_2.gc_gl1c_per_sa);
1627 adev->gfx.config.gc_gl1c_size_per_instance = le32_to_cpu(gc_info->v1_2.gc_gl1c_size_per_instance);
1628 adev->gfx.config.gc_gl2c_per_gpu = le32_to_cpu(gc_info->v1_2.gc_gl2c_per_gpu);
1629 }
1630 if (le16_to_cpu(gc_info->v1.header.version_minor) >= 3) {
1631 adev->gfx.config.gc_tcp_size_per_cu = le32_to_cpu(gc_info->v1_3.gc_tcp_size_per_cu);
1632 adev->gfx.config.gc_tcp_cache_line_size = le32_to_cpu(gc_info->v1_3.gc_tcp_cache_line_size);
1633 adev->gfx.config.gc_instruction_cache_size_per_sqc = le32_to_cpu(gc_info->v1_3.gc_instruction_cache_size_per_sqc);
1634 adev->gfx.config.gc_instruction_cache_line_size = le32_to_cpu(gc_info->v1_3.gc_instruction_cache_line_size);
1635 adev->gfx.config.gc_scalar_data_cache_size_per_sqc = le32_to_cpu(gc_info->v1_3.gc_scalar_data_cache_size_per_sqc);
1636 adev->gfx.config.gc_scalar_data_cache_line_size = le32_to_cpu(gc_info->v1_3.gc_scalar_data_cache_line_size);
1637 adev->gfx.config.gc_tcc_size = le32_to_cpu(gc_info->v1_3.gc_tcc_size);
1638 adev->gfx.config.gc_tcc_cache_line_size = le32_to_cpu(gc_info->v1_3.gc_tcc_cache_line_size);
1639 }
1640 break;
1641 case 2:
1642 adev->gfx.config.max_shader_engines = le32_to_cpu(gc_info->v2.gc_num_se);
1643 adev->gfx.config.max_cu_per_sh = le32_to_cpu(gc_info->v2.gc_num_cu_per_sh);
1644 adev->gfx.config.max_sh_per_se = le32_to_cpu(gc_info->v2.gc_num_sh_per_se);
1645 adev->gfx.config.max_backends_per_se = le32_to_cpu(gc_info->v2.gc_num_rb_per_se);
1646 adev->gfx.config.max_texture_channel_caches = le32_to_cpu(gc_info->v2.gc_num_tccs);
1647 adev->gfx.config.max_gprs = le32_to_cpu(gc_info->v2.gc_num_gprs);
1648 adev->gfx.config.max_gs_threads = le32_to_cpu(gc_info->v2.gc_num_max_gs_thds);
1649 adev->gfx.config.gs_vgt_table_depth = le32_to_cpu(gc_info->v2.gc_gs_table_depth);
1650 adev->gfx.config.gs_prim_buffer_depth = le32_to_cpu(gc_info->v2.gc_gsprim_buff_depth);
1651 adev->gfx.config.double_offchip_lds_buf = le32_to_cpu(gc_info->v2.gc_double_offchip_lds_buffer);
1652 adev->gfx.cu_info.wave_front_size = le32_to_cpu(gc_info->v2.gc_wave_size);
1653 adev->gfx.cu_info.max_waves_per_simd = le32_to_cpu(gc_info->v2.gc_max_waves_per_simd);
1654 adev->gfx.cu_info.max_scratch_slots_per_cu = le32_to_cpu(gc_info->v2.gc_max_scratch_slots_per_cu);
1655 adev->gfx.cu_info.lds_size = le32_to_cpu(gc_info->v2.gc_lds_size);
1656 adev->gfx.config.num_sc_per_sh = le32_to_cpu(gc_info->v2.gc_num_sc_per_se) /
1657 le32_to_cpu(gc_info->v2.gc_num_sh_per_se);
1658 adev->gfx.config.num_packer_per_sc = le32_to_cpu(gc_info->v2.gc_num_packer_per_sc);
1659 if (le16_to_cpu(gc_info->v2.header.version_minor) == 1) {
1660 adev->gfx.config.gc_num_tcp_per_sa = le32_to_cpu(gc_info->v2_1.gc_num_tcp_per_sh);
1661 adev->gfx.config.gc_tcp_size_per_cu = le32_to_cpu(gc_info->v2_1.gc_tcp_size_per_cu);
1662 adev->gfx.config.gc_num_sdp_interface = le32_to_cpu(gc_info->v2_1.gc_num_sdp_interface); /* per XCD */
1663 adev->gfx.config.gc_num_cu_per_sqc = le32_to_cpu(gc_info->v2_1.gc_num_cu_per_sqc);
1664 adev->gfx.config.gc_l1_instruction_cache_size_per_sqc = le32_to_cpu(gc_info->v2_1.gc_instruction_cache_size_per_sqc);
1665 adev->gfx.config.gc_l1_data_cache_size_per_sqc = le32_to_cpu(gc_info->v2_1.gc_scalar_data_cache_size_per_sqc);
1666 adev->gfx.config.gc_tcc_size = le32_to_cpu(gc_info->v2_1.gc_tcc_size); /* per XCD */
1667 }
1668 break;
1669 default:
1670 dev_err(adev->dev,
1671 "Unhandled GC info table %d.%d\n",
1672 le16_to_cpu(gc_info->v1.header.version_major),
1673 le16_to_cpu(gc_info->v1.header.version_minor));
1674 return -EINVAL;
1675 }
1676 return 0;
1677 }
1678
1679 union mall_info {
1680 struct mall_info_v1_0 v1;
1681 struct mall_info_v2_0 v2;
1682 };
1683
amdgpu_discovery_get_mall_info(struct amdgpu_device * adev)1684 static int amdgpu_discovery_get_mall_info(struct amdgpu_device *adev)
1685 {
1686 struct binary_header *bhdr;
1687 union mall_info *mall_info;
1688 u32 u, mall_size_per_umc, m_s_present, half_use;
1689 u64 mall_size;
1690 u16 offset;
1691
1692 if (!adev->mman.discovery_bin) {
1693 DRM_ERROR("ip discovery uninitialized\n");
1694 return -EINVAL;
1695 }
1696
1697 bhdr = (struct binary_header *)adev->mman.discovery_bin;
1698 offset = le16_to_cpu(bhdr->table_list[MALL_INFO].offset);
1699
1700 if (!offset)
1701 return 0;
1702
1703 mall_info = (union mall_info *)(adev->mman.discovery_bin + offset);
1704
1705 switch (le16_to_cpu(mall_info->v1.header.version_major)) {
1706 case 1:
1707 mall_size = 0;
1708 mall_size_per_umc = le32_to_cpu(mall_info->v1.mall_size_per_m);
1709 m_s_present = le32_to_cpu(mall_info->v1.m_s_present);
1710 half_use = le32_to_cpu(mall_info->v1.m_half_use);
1711 for (u = 0; u < adev->gmc.num_umc; u++) {
1712 if (m_s_present & (1 << u))
1713 mall_size += mall_size_per_umc * 2;
1714 else if (half_use & (1 << u))
1715 mall_size += mall_size_per_umc / 2;
1716 else
1717 mall_size += mall_size_per_umc;
1718 }
1719 adev->gmc.mall_size = mall_size;
1720 adev->gmc.m_half_use = half_use;
1721 break;
1722 case 2:
1723 mall_size_per_umc = le32_to_cpu(mall_info->v2.mall_size_per_umc);
1724 adev->gmc.mall_size = (uint64_t)mall_size_per_umc * adev->gmc.num_umc;
1725 break;
1726 default:
1727 dev_err(adev->dev,
1728 "Unhandled MALL info table %d.%d\n",
1729 le16_to_cpu(mall_info->v1.header.version_major),
1730 le16_to_cpu(mall_info->v1.header.version_minor));
1731 return -EINVAL;
1732 }
1733 return 0;
1734 }
1735
1736 union vcn_info {
1737 struct vcn_info_v1_0 v1;
1738 };
1739
amdgpu_discovery_get_vcn_info(struct amdgpu_device * adev)1740 static int amdgpu_discovery_get_vcn_info(struct amdgpu_device *adev)
1741 {
1742 struct binary_header *bhdr;
1743 union vcn_info *vcn_info;
1744 u16 offset;
1745 int v;
1746
1747 if (!adev->mman.discovery_bin) {
1748 DRM_ERROR("ip discovery uninitialized\n");
1749 return -EINVAL;
1750 }
1751
1752 /* num_vcn_inst is currently limited to AMDGPU_MAX_VCN_INSTANCES
1753 * which is smaller than VCN_INFO_TABLE_MAX_NUM_INSTANCES
1754 * but that may change in the future with new GPUs so keep this
1755 * check for defensive purposes.
1756 */
1757 if (adev->vcn.num_vcn_inst > VCN_INFO_TABLE_MAX_NUM_INSTANCES) {
1758 dev_err(adev->dev, "invalid vcn instances\n");
1759 return -EINVAL;
1760 }
1761
1762 bhdr = (struct binary_header *)adev->mman.discovery_bin;
1763 offset = le16_to_cpu(bhdr->table_list[VCN_INFO].offset);
1764
1765 if (!offset)
1766 return 0;
1767
1768 vcn_info = (union vcn_info *)(adev->mman.discovery_bin + offset);
1769
1770 switch (le16_to_cpu(vcn_info->v1.header.version_major)) {
1771 case 1:
1772 /* num_vcn_inst is currently limited to AMDGPU_MAX_VCN_INSTANCES
1773 * so this won't overflow.
1774 */
1775 for (v = 0; v < adev->vcn.num_vcn_inst; v++) {
1776 adev->vcn.inst[v].vcn_codec_disable_mask =
1777 le32_to_cpu(vcn_info->v1.instance_info[v].fuse_data.all_bits);
1778 }
1779 break;
1780 default:
1781 dev_err(adev->dev,
1782 "Unhandled VCN info table %d.%d\n",
1783 le16_to_cpu(vcn_info->v1.header.version_major),
1784 le16_to_cpu(vcn_info->v1.header.version_minor));
1785 return -EINVAL;
1786 }
1787 return 0;
1788 }
1789
1790 union nps_info {
1791 struct nps_info_v1_0 v1;
1792 };
1793
amdgpu_discovery_refresh_nps_info(struct amdgpu_device * adev,union nps_info * nps_data)1794 static int amdgpu_discovery_refresh_nps_info(struct amdgpu_device *adev,
1795 union nps_info *nps_data)
1796 {
1797 uint64_t vram_size, pos, offset;
1798 struct nps_info_header *nhdr;
1799 struct binary_header bhdr;
1800 uint16_t checksum;
1801
1802 vram_size = (uint64_t)RREG32(mmRCC_CONFIG_MEMSIZE) << 20;
1803 pos = vram_size - DISCOVERY_TMR_OFFSET;
1804 amdgpu_device_vram_access(adev, pos, &bhdr, sizeof(bhdr), false);
1805
1806 offset = le16_to_cpu(bhdr.table_list[NPS_INFO].offset);
1807 checksum = le16_to_cpu(bhdr.table_list[NPS_INFO].checksum);
1808
1809 amdgpu_device_vram_access(adev, (pos + offset), nps_data,
1810 sizeof(*nps_data), false);
1811
1812 nhdr = (struct nps_info_header *)(nps_data);
1813 if (!amdgpu_discovery_verify_checksum((uint8_t *)nps_data,
1814 le32_to_cpu(nhdr->size_bytes),
1815 checksum)) {
1816 dev_err(adev->dev, "nps data refresh, checksum mismatch\n");
1817 return -EINVAL;
1818 }
1819
1820 return 0;
1821 }
1822
amdgpu_discovery_get_nps_info(struct amdgpu_device * adev,uint32_t * nps_type,struct amdgpu_gmc_memrange ** ranges,int * range_cnt,bool refresh)1823 int amdgpu_discovery_get_nps_info(struct amdgpu_device *adev,
1824 uint32_t *nps_type,
1825 struct amdgpu_gmc_memrange **ranges,
1826 int *range_cnt, bool refresh)
1827 {
1828 struct amdgpu_gmc_memrange *mem_ranges;
1829 struct binary_header *bhdr;
1830 union nps_info *nps_info;
1831 union nps_info nps_data;
1832 u16 offset;
1833 int i, r;
1834
1835 if (!nps_type || !range_cnt || !ranges)
1836 return -EINVAL;
1837
1838 if (refresh) {
1839 r = amdgpu_discovery_refresh_nps_info(adev, &nps_data);
1840 if (r)
1841 return r;
1842 nps_info = &nps_data;
1843 } else {
1844 if (!adev->mman.discovery_bin) {
1845 dev_err(adev->dev,
1846 "fetch mem range failed, ip discovery uninitialized\n");
1847 return -EINVAL;
1848 }
1849
1850 bhdr = (struct binary_header *)adev->mman.discovery_bin;
1851 offset = le16_to_cpu(bhdr->table_list[NPS_INFO].offset);
1852
1853 if (!offset)
1854 return -ENOENT;
1855
1856 /* If verification fails, return as if NPS table doesn't exist */
1857 if (amdgpu_discovery_verify_npsinfo(adev, bhdr))
1858 return -ENOENT;
1859
1860 nps_info =
1861 (union nps_info *)(adev->mman.discovery_bin + offset);
1862 }
1863
1864 switch (le16_to_cpu(nps_info->v1.header.version_major)) {
1865 case 1:
1866 mem_ranges = kvcalloc(nps_info->v1.count,
1867 sizeof(*mem_ranges),
1868 GFP_KERNEL);
1869 if (!mem_ranges)
1870 return -ENOMEM;
1871 *nps_type = nps_info->v1.nps_type;
1872 *range_cnt = nps_info->v1.count;
1873 for (i = 0; i < *range_cnt; i++) {
1874 mem_ranges[i].base_address =
1875 nps_info->v1.instance_info[i].base_address;
1876 mem_ranges[i].limit_address =
1877 nps_info->v1.instance_info[i].limit_address;
1878 mem_ranges[i].nid_mask = -1;
1879 mem_ranges[i].flags = 0;
1880 }
1881 *ranges = mem_ranges;
1882 break;
1883 default:
1884 dev_err(adev->dev, "Unhandled NPS info table %d.%d\n",
1885 le16_to_cpu(nps_info->v1.header.version_major),
1886 le16_to_cpu(nps_info->v1.header.version_minor));
1887 return -EINVAL;
1888 }
1889
1890 return 0;
1891 }
1892
amdgpu_discovery_set_common_ip_blocks(struct amdgpu_device * adev)1893 static int amdgpu_discovery_set_common_ip_blocks(struct amdgpu_device *adev)
1894 {
1895 /* what IP to use for this? */
1896 switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
1897 case IP_VERSION(9, 0, 1):
1898 case IP_VERSION(9, 1, 0):
1899 case IP_VERSION(9, 2, 1):
1900 case IP_VERSION(9, 2, 2):
1901 case IP_VERSION(9, 3, 0):
1902 case IP_VERSION(9, 4, 0):
1903 case IP_VERSION(9, 4, 1):
1904 case IP_VERSION(9, 4, 2):
1905 case IP_VERSION(9, 4, 3):
1906 case IP_VERSION(9, 4, 4):
1907 case IP_VERSION(9, 5, 0):
1908 amdgpu_device_ip_block_add(adev, &vega10_common_ip_block);
1909 break;
1910 case IP_VERSION(10, 1, 10):
1911 case IP_VERSION(10, 1, 1):
1912 case IP_VERSION(10, 1, 2):
1913 case IP_VERSION(10, 1, 3):
1914 case IP_VERSION(10, 1, 4):
1915 case IP_VERSION(10, 3, 0):
1916 case IP_VERSION(10, 3, 1):
1917 case IP_VERSION(10, 3, 2):
1918 case IP_VERSION(10, 3, 3):
1919 case IP_VERSION(10, 3, 4):
1920 case IP_VERSION(10, 3, 5):
1921 case IP_VERSION(10, 3, 6):
1922 case IP_VERSION(10, 3, 7):
1923 amdgpu_device_ip_block_add(adev, &nv_common_ip_block);
1924 break;
1925 case IP_VERSION(11, 0, 0):
1926 case IP_VERSION(11, 0, 1):
1927 case IP_VERSION(11, 0, 2):
1928 case IP_VERSION(11, 0, 3):
1929 case IP_VERSION(11, 0, 4):
1930 case IP_VERSION(11, 5, 0):
1931 case IP_VERSION(11, 5, 1):
1932 case IP_VERSION(11, 5, 2):
1933 case IP_VERSION(11, 5, 3):
1934 amdgpu_device_ip_block_add(adev, &soc21_common_ip_block);
1935 break;
1936 case IP_VERSION(12, 0, 0):
1937 case IP_VERSION(12, 0, 1):
1938 amdgpu_device_ip_block_add(adev, &soc24_common_ip_block);
1939 break;
1940 default:
1941 dev_err(adev->dev,
1942 "Failed to add common ip block(GC_HWIP:0x%x)\n",
1943 amdgpu_ip_version(adev, GC_HWIP, 0));
1944 return -EINVAL;
1945 }
1946 return 0;
1947 }
1948
amdgpu_discovery_set_gmc_ip_blocks(struct amdgpu_device * adev)1949 static int amdgpu_discovery_set_gmc_ip_blocks(struct amdgpu_device *adev)
1950 {
1951 /* use GC or MMHUB IP version */
1952 switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
1953 case IP_VERSION(9, 0, 1):
1954 case IP_VERSION(9, 1, 0):
1955 case IP_VERSION(9, 2, 1):
1956 case IP_VERSION(9, 2, 2):
1957 case IP_VERSION(9, 3, 0):
1958 case IP_VERSION(9, 4, 0):
1959 case IP_VERSION(9, 4, 1):
1960 case IP_VERSION(9, 4, 2):
1961 case IP_VERSION(9, 4, 3):
1962 case IP_VERSION(9, 4, 4):
1963 case IP_VERSION(9, 5, 0):
1964 amdgpu_device_ip_block_add(adev, &gmc_v9_0_ip_block);
1965 break;
1966 case IP_VERSION(10, 1, 10):
1967 case IP_VERSION(10, 1, 1):
1968 case IP_VERSION(10, 1, 2):
1969 case IP_VERSION(10, 1, 3):
1970 case IP_VERSION(10, 1, 4):
1971 case IP_VERSION(10, 3, 0):
1972 case IP_VERSION(10, 3, 1):
1973 case IP_VERSION(10, 3, 2):
1974 case IP_VERSION(10, 3, 3):
1975 case IP_VERSION(10, 3, 4):
1976 case IP_VERSION(10, 3, 5):
1977 case IP_VERSION(10, 3, 6):
1978 case IP_VERSION(10, 3, 7):
1979 amdgpu_device_ip_block_add(adev, &gmc_v10_0_ip_block);
1980 break;
1981 case IP_VERSION(11, 0, 0):
1982 case IP_VERSION(11, 0, 1):
1983 case IP_VERSION(11, 0, 2):
1984 case IP_VERSION(11, 0, 3):
1985 case IP_VERSION(11, 0, 4):
1986 case IP_VERSION(11, 5, 0):
1987 case IP_VERSION(11, 5, 1):
1988 case IP_VERSION(11, 5, 2):
1989 case IP_VERSION(11, 5, 3):
1990 amdgpu_device_ip_block_add(adev, &gmc_v11_0_ip_block);
1991 break;
1992 case IP_VERSION(12, 0, 0):
1993 case IP_VERSION(12, 0, 1):
1994 amdgpu_device_ip_block_add(adev, &gmc_v12_0_ip_block);
1995 break;
1996 default:
1997 dev_err(adev->dev, "Failed to add gmc ip block(GC_HWIP:0x%x)\n",
1998 amdgpu_ip_version(adev, GC_HWIP, 0));
1999 return -EINVAL;
2000 }
2001 return 0;
2002 }
2003
amdgpu_discovery_set_ih_ip_blocks(struct amdgpu_device * adev)2004 static int amdgpu_discovery_set_ih_ip_blocks(struct amdgpu_device *adev)
2005 {
2006 switch (amdgpu_ip_version(adev, OSSSYS_HWIP, 0)) {
2007 case IP_VERSION(4, 0, 0):
2008 case IP_VERSION(4, 0, 1):
2009 case IP_VERSION(4, 1, 0):
2010 case IP_VERSION(4, 1, 1):
2011 case IP_VERSION(4, 3, 0):
2012 amdgpu_device_ip_block_add(adev, &vega10_ih_ip_block);
2013 break;
2014 case IP_VERSION(4, 2, 0):
2015 case IP_VERSION(4, 2, 1):
2016 case IP_VERSION(4, 4, 0):
2017 case IP_VERSION(4, 4, 2):
2018 case IP_VERSION(4, 4, 5):
2019 amdgpu_device_ip_block_add(adev, &vega20_ih_ip_block);
2020 break;
2021 case IP_VERSION(5, 0, 0):
2022 case IP_VERSION(5, 0, 1):
2023 case IP_VERSION(5, 0, 2):
2024 case IP_VERSION(5, 0, 3):
2025 case IP_VERSION(5, 2, 0):
2026 case IP_VERSION(5, 2, 1):
2027 amdgpu_device_ip_block_add(adev, &navi10_ih_ip_block);
2028 break;
2029 case IP_VERSION(6, 0, 0):
2030 case IP_VERSION(6, 0, 1):
2031 case IP_VERSION(6, 0, 2):
2032 amdgpu_device_ip_block_add(adev, &ih_v6_0_ip_block);
2033 break;
2034 case IP_VERSION(6, 1, 0):
2035 amdgpu_device_ip_block_add(adev, &ih_v6_1_ip_block);
2036 break;
2037 case IP_VERSION(7, 0, 0):
2038 amdgpu_device_ip_block_add(adev, &ih_v7_0_ip_block);
2039 break;
2040 default:
2041 dev_err(adev->dev,
2042 "Failed to add ih ip block(OSSSYS_HWIP:0x%x)\n",
2043 amdgpu_ip_version(adev, OSSSYS_HWIP, 0));
2044 return -EINVAL;
2045 }
2046 return 0;
2047 }
2048
amdgpu_discovery_set_psp_ip_blocks(struct amdgpu_device * adev)2049 static int amdgpu_discovery_set_psp_ip_blocks(struct amdgpu_device *adev)
2050 {
2051 switch (amdgpu_ip_version(adev, MP0_HWIP, 0)) {
2052 case IP_VERSION(9, 0, 0):
2053 amdgpu_device_ip_block_add(adev, &psp_v3_1_ip_block);
2054 break;
2055 case IP_VERSION(10, 0, 0):
2056 case IP_VERSION(10, 0, 1):
2057 amdgpu_device_ip_block_add(adev, &psp_v10_0_ip_block);
2058 break;
2059 case IP_VERSION(11, 0, 0):
2060 case IP_VERSION(11, 0, 2):
2061 case IP_VERSION(11, 0, 4):
2062 case IP_VERSION(11, 0, 5):
2063 case IP_VERSION(11, 0, 9):
2064 case IP_VERSION(11, 0, 7):
2065 case IP_VERSION(11, 0, 11):
2066 case IP_VERSION(11, 0, 12):
2067 case IP_VERSION(11, 0, 13):
2068 case IP_VERSION(11, 5, 0):
2069 case IP_VERSION(11, 5, 2):
2070 amdgpu_device_ip_block_add(adev, &psp_v11_0_ip_block);
2071 break;
2072 case IP_VERSION(11, 0, 8):
2073 amdgpu_device_ip_block_add(adev, &psp_v11_0_8_ip_block);
2074 break;
2075 case IP_VERSION(11, 0, 3):
2076 case IP_VERSION(12, 0, 1):
2077 amdgpu_device_ip_block_add(adev, &psp_v12_0_ip_block);
2078 break;
2079 case IP_VERSION(13, 0, 0):
2080 case IP_VERSION(13, 0, 1):
2081 case IP_VERSION(13, 0, 2):
2082 case IP_VERSION(13, 0, 3):
2083 case IP_VERSION(13, 0, 5):
2084 case IP_VERSION(13, 0, 6):
2085 case IP_VERSION(13, 0, 7):
2086 case IP_VERSION(13, 0, 8):
2087 case IP_VERSION(13, 0, 10):
2088 case IP_VERSION(13, 0, 11):
2089 case IP_VERSION(13, 0, 12):
2090 case IP_VERSION(13, 0, 14):
2091 case IP_VERSION(14, 0, 0):
2092 case IP_VERSION(14, 0, 1):
2093 case IP_VERSION(14, 0, 4):
2094 amdgpu_device_ip_block_add(adev, &psp_v13_0_ip_block);
2095 break;
2096 case IP_VERSION(13, 0, 4):
2097 amdgpu_device_ip_block_add(adev, &psp_v13_0_4_ip_block);
2098 break;
2099 case IP_VERSION(14, 0, 2):
2100 case IP_VERSION(14, 0, 3):
2101 case IP_VERSION(14, 0, 5):
2102 amdgpu_device_ip_block_add(adev, &psp_v14_0_ip_block);
2103 break;
2104 default:
2105 dev_err(adev->dev,
2106 "Failed to add psp ip block(MP0_HWIP:0x%x)\n",
2107 amdgpu_ip_version(adev, MP0_HWIP, 0));
2108 return -EINVAL;
2109 }
2110 return 0;
2111 }
2112
amdgpu_discovery_set_smu_ip_blocks(struct amdgpu_device * adev)2113 static int amdgpu_discovery_set_smu_ip_blocks(struct amdgpu_device *adev)
2114 {
2115 switch (amdgpu_ip_version(adev, MP1_HWIP, 0)) {
2116 case IP_VERSION(9, 0, 0):
2117 case IP_VERSION(10, 0, 0):
2118 case IP_VERSION(10, 0, 1):
2119 case IP_VERSION(11, 0, 2):
2120 if (adev->asic_type == CHIP_ARCTURUS)
2121 amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block);
2122 else
2123 amdgpu_device_ip_block_add(adev, &pp_smu_ip_block);
2124 break;
2125 case IP_VERSION(11, 0, 0):
2126 case IP_VERSION(11, 0, 5):
2127 case IP_VERSION(11, 0, 9):
2128 case IP_VERSION(11, 0, 7):
2129 case IP_VERSION(11, 0, 11):
2130 case IP_VERSION(11, 0, 12):
2131 case IP_VERSION(11, 0, 13):
2132 case IP_VERSION(11, 5, 0):
2133 case IP_VERSION(11, 5, 2):
2134 amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block);
2135 break;
2136 case IP_VERSION(11, 0, 8):
2137 if (adev->apu_flags & AMD_APU_IS_CYAN_SKILLFISH2)
2138 amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block);
2139 break;
2140 case IP_VERSION(12, 0, 0):
2141 case IP_VERSION(12, 0, 1):
2142 amdgpu_device_ip_block_add(adev, &smu_v12_0_ip_block);
2143 break;
2144 case IP_VERSION(13, 0, 0):
2145 case IP_VERSION(13, 0, 1):
2146 case IP_VERSION(13, 0, 2):
2147 case IP_VERSION(13, 0, 3):
2148 case IP_VERSION(13, 0, 4):
2149 case IP_VERSION(13, 0, 5):
2150 case IP_VERSION(13, 0, 6):
2151 case IP_VERSION(13, 0, 7):
2152 case IP_VERSION(13, 0, 8):
2153 case IP_VERSION(13, 0, 10):
2154 case IP_VERSION(13, 0, 11):
2155 case IP_VERSION(13, 0, 14):
2156 case IP_VERSION(13, 0, 12):
2157 amdgpu_device_ip_block_add(adev, &smu_v13_0_ip_block);
2158 break;
2159 case IP_VERSION(14, 0, 0):
2160 case IP_VERSION(14, 0, 1):
2161 case IP_VERSION(14, 0, 2):
2162 case IP_VERSION(14, 0, 3):
2163 case IP_VERSION(14, 0, 4):
2164 case IP_VERSION(14, 0, 5):
2165 amdgpu_device_ip_block_add(adev, &smu_v14_0_ip_block);
2166 break;
2167 default:
2168 dev_err(adev->dev,
2169 "Failed to add smu ip block(MP1_HWIP:0x%x)\n",
2170 amdgpu_ip_version(adev, MP1_HWIP, 0));
2171 return -EINVAL;
2172 }
2173 return 0;
2174 }
2175
2176 #if defined(CONFIG_DRM_AMD_DC)
amdgpu_discovery_set_sriov_display(struct amdgpu_device * adev)2177 static void amdgpu_discovery_set_sriov_display(struct amdgpu_device *adev)
2178 {
2179 amdgpu_device_set_sriov_virtual_display(adev);
2180 amdgpu_device_ip_block_add(adev, &amdgpu_vkms_ip_block);
2181 }
2182 #endif
2183
amdgpu_discovery_set_display_ip_blocks(struct amdgpu_device * adev)2184 static int amdgpu_discovery_set_display_ip_blocks(struct amdgpu_device *adev)
2185 {
2186 if (adev->enable_virtual_display) {
2187 amdgpu_device_ip_block_add(adev, &amdgpu_vkms_ip_block);
2188 return 0;
2189 }
2190
2191 if (!amdgpu_device_has_dc_support(adev))
2192 return 0;
2193
2194 #if defined(CONFIG_DRM_AMD_DC)
2195 if (amdgpu_ip_version(adev, DCE_HWIP, 0)) {
2196 switch (amdgpu_ip_version(adev, DCE_HWIP, 0)) {
2197 case IP_VERSION(1, 0, 0):
2198 case IP_VERSION(1, 0, 1):
2199 case IP_VERSION(2, 0, 2):
2200 case IP_VERSION(2, 0, 0):
2201 case IP_VERSION(2, 0, 3):
2202 case IP_VERSION(2, 1, 0):
2203 case IP_VERSION(3, 0, 0):
2204 case IP_VERSION(3, 0, 2):
2205 case IP_VERSION(3, 0, 3):
2206 case IP_VERSION(3, 0, 1):
2207 case IP_VERSION(3, 1, 2):
2208 case IP_VERSION(3, 1, 3):
2209 case IP_VERSION(3, 1, 4):
2210 case IP_VERSION(3, 1, 5):
2211 case IP_VERSION(3, 1, 6):
2212 case IP_VERSION(3, 2, 0):
2213 case IP_VERSION(3, 2, 1):
2214 case IP_VERSION(3, 5, 0):
2215 case IP_VERSION(3, 5, 1):
2216 case IP_VERSION(3, 6, 0):
2217 case IP_VERSION(4, 1, 0):
2218 /* TODO: Fix IP version. DC code expects version 4.0.1 */
2219 if (adev->ip_versions[DCE_HWIP][0] == IP_VERSION(4, 1, 0))
2220 adev->ip_versions[DCE_HWIP][0] = IP_VERSION(4, 0, 1);
2221
2222 if (amdgpu_sriov_vf(adev))
2223 amdgpu_discovery_set_sriov_display(adev);
2224 else
2225 amdgpu_device_ip_block_add(adev, &dm_ip_block);
2226 break;
2227 default:
2228 dev_err(adev->dev,
2229 "Failed to add dm ip block(DCE_HWIP:0x%x)\n",
2230 amdgpu_ip_version(adev, DCE_HWIP, 0));
2231 return -EINVAL;
2232 }
2233 } else if (amdgpu_ip_version(adev, DCI_HWIP, 0)) {
2234 switch (amdgpu_ip_version(adev, DCI_HWIP, 0)) {
2235 case IP_VERSION(12, 0, 0):
2236 case IP_VERSION(12, 0, 1):
2237 case IP_VERSION(12, 1, 0):
2238 if (amdgpu_sriov_vf(adev))
2239 amdgpu_discovery_set_sriov_display(adev);
2240 else
2241 amdgpu_device_ip_block_add(adev, &dm_ip_block);
2242 break;
2243 default:
2244 dev_err(adev->dev,
2245 "Failed to add dm ip block(DCI_HWIP:0x%x)\n",
2246 amdgpu_ip_version(adev, DCI_HWIP, 0));
2247 return -EINVAL;
2248 }
2249 }
2250 #endif
2251 return 0;
2252 }
2253
amdgpu_discovery_set_gc_ip_blocks(struct amdgpu_device * adev)2254 static int amdgpu_discovery_set_gc_ip_blocks(struct amdgpu_device *adev)
2255 {
2256 switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
2257 case IP_VERSION(9, 0, 1):
2258 case IP_VERSION(9, 1, 0):
2259 case IP_VERSION(9, 2, 1):
2260 case IP_VERSION(9, 2, 2):
2261 case IP_VERSION(9, 3, 0):
2262 case IP_VERSION(9, 4, 0):
2263 case IP_VERSION(9, 4, 1):
2264 case IP_VERSION(9, 4, 2):
2265 amdgpu_device_ip_block_add(adev, &gfx_v9_0_ip_block);
2266 break;
2267 case IP_VERSION(9, 4, 3):
2268 case IP_VERSION(9, 4, 4):
2269 case IP_VERSION(9, 5, 0):
2270 amdgpu_device_ip_block_add(adev, &gfx_v9_4_3_ip_block);
2271 break;
2272 case IP_VERSION(10, 1, 10):
2273 case IP_VERSION(10, 1, 2):
2274 case IP_VERSION(10, 1, 1):
2275 case IP_VERSION(10, 1, 3):
2276 case IP_VERSION(10, 1, 4):
2277 case IP_VERSION(10, 3, 0):
2278 case IP_VERSION(10, 3, 2):
2279 case IP_VERSION(10, 3, 1):
2280 case IP_VERSION(10, 3, 4):
2281 case IP_VERSION(10, 3, 5):
2282 case IP_VERSION(10, 3, 6):
2283 case IP_VERSION(10, 3, 3):
2284 case IP_VERSION(10, 3, 7):
2285 amdgpu_device_ip_block_add(adev, &gfx_v10_0_ip_block);
2286 break;
2287 case IP_VERSION(11, 0, 0):
2288 case IP_VERSION(11, 0, 1):
2289 case IP_VERSION(11, 0, 2):
2290 case IP_VERSION(11, 0, 3):
2291 case IP_VERSION(11, 0, 4):
2292 case IP_VERSION(11, 5, 0):
2293 case IP_VERSION(11, 5, 1):
2294 case IP_VERSION(11, 5, 2):
2295 case IP_VERSION(11, 5, 3):
2296 amdgpu_device_ip_block_add(adev, &gfx_v11_0_ip_block);
2297 break;
2298 case IP_VERSION(12, 0, 0):
2299 case IP_VERSION(12, 0, 1):
2300 amdgpu_device_ip_block_add(adev, &gfx_v12_0_ip_block);
2301 break;
2302 default:
2303 dev_err(adev->dev, "Failed to add gfx ip block(GC_HWIP:0x%x)\n",
2304 amdgpu_ip_version(adev, GC_HWIP, 0));
2305 return -EINVAL;
2306 }
2307 return 0;
2308 }
2309
amdgpu_discovery_set_sdma_ip_blocks(struct amdgpu_device * adev)2310 static int amdgpu_discovery_set_sdma_ip_blocks(struct amdgpu_device *adev)
2311 {
2312 switch (amdgpu_ip_version(adev, SDMA0_HWIP, 0)) {
2313 case IP_VERSION(4, 0, 0):
2314 case IP_VERSION(4, 0, 1):
2315 case IP_VERSION(4, 1, 0):
2316 case IP_VERSION(4, 1, 1):
2317 case IP_VERSION(4, 1, 2):
2318 case IP_VERSION(4, 2, 0):
2319 case IP_VERSION(4, 2, 2):
2320 case IP_VERSION(4, 4, 0):
2321 amdgpu_device_ip_block_add(adev, &sdma_v4_0_ip_block);
2322 break;
2323 case IP_VERSION(4, 4, 2):
2324 case IP_VERSION(4, 4, 5):
2325 case IP_VERSION(4, 4, 4):
2326 amdgpu_device_ip_block_add(adev, &sdma_v4_4_2_ip_block);
2327 break;
2328 case IP_VERSION(5, 0, 0):
2329 case IP_VERSION(5, 0, 1):
2330 case IP_VERSION(5, 0, 2):
2331 case IP_VERSION(5, 0, 5):
2332 amdgpu_device_ip_block_add(adev, &sdma_v5_0_ip_block);
2333 break;
2334 case IP_VERSION(5, 2, 0):
2335 case IP_VERSION(5, 2, 2):
2336 case IP_VERSION(5, 2, 4):
2337 case IP_VERSION(5, 2, 5):
2338 case IP_VERSION(5, 2, 6):
2339 case IP_VERSION(5, 2, 3):
2340 case IP_VERSION(5, 2, 1):
2341 case IP_VERSION(5, 2, 7):
2342 amdgpu_device_ip_block_add(adev, &sdma_v5_2_ip_block);
2343 break;
2344 case IP_VERSION(6, 0, 0):
2345 case IP_VERSION(6, 0, 1):
2346 case IP_VERSION(6, 0, 2):
2347 case IP_VERSION(6, 0, 3):
2348 case IP_VERSION(6, 1, 0):
2349 case IP_VERSION(6, 1, 1):
2350 case IP_VERSION(6, 1, 2):
2351 case IP_VERSION(6, 1, 3):
2352 amdgpu_device_ip_block_add(adev, &sdma_v6_0_ip_block);
2353 break;
2354 case IP_VERSION(7, 0, 0):
2355 case IP_VERSION(7, 0, 1):
2356 amdgpu_device_ip_block_add(adev, &sdma_v7_0_ip_block);
2357 break;
2358 default:
2359 dev_err(adev->dev,
2360 "Failed to add sdma ip block(SDMA0_HWIP:0x%x)\n",
2361 amdgpu_ip_version(adev, SDMA0_HWIP, 0));
2362 return -EINVAL;
2363 }
2364 return 0;
2365 }
2366
amdgpu_discovery_set_mm_ip_blocks(struct amdgpu_device * adev)2367 static int amdgpu_discovery_set_mm_ip_blocks(struct amdgpu_device *adev)
2368 {
2369 if (amdgpu_ip_version(adev, VCE_HWIP, 0)) {
2370 switch (amdgpu_ip_version(adev, UVD_HWIP, 0)) {
2371 case IP_VERSION(7, 0, 0):
2372 case IP_VERSION(7, 2, 0):
2373 /* UVD is not supported on vega20 SR-IOV */
2374 if (!(adev->asic_type == CHIP_VEGA20 && amdgpu_sriov_vf(adev)))
2375 amdgpu_device_ip_block_add(adev, &uvd_v7_0_ip_block);
2376 break;
2377 default:
2378 dev_err(adev->dev,
2379 "Failed to add uvd v7 ip block(UVD_HWIP:0x%x)\n",
2380 amdgpu_ip_version(adev, UVD_HWIP, 0));
2381 return -EINVAL;
2382 }
2383 switch (amdgpu_ip_version(adev, VCE_HWIP, 0)) {
2384 case IP_VERSION(4, 0, 0):
2385 case IP_VERSION(4, 1, 0):
2386 /* VCE is not supported on vega20 SR-IOV */
2387 if (!(adev->asic_type == CHIP_VEGA20 && amdgpu_sriov_vf(adev)))
2388 amdgpu_device_ip_block_add(adev, &vce_v4_0_ip_block);
2389 break;
2390 default:
2391 dev_err(adev->dev,
2392 "Failed to add VCE v4 ip block(VCE_HWIP:0x%x)\n",
2393 amdgpu_ip_version(adev, VCE_HWIP, 0));
2394 return -EINVAL;
2395 }
2396 } else {
2397 switch (amdgpu_ip_version(adev, UVD_HWIP, 0)) {
2398 case IP_VERSION(1, 0, 0):
2399 case IP_VERSION(1, 0, 1):
2400 amdgpu_device_ip_block_add(adev, &vcn_v1_0_ip_block);
2401 break;
2402 case IP_VERSION(2, 0, 0):
2403 case IP_VERSION(2, 0, 2):
2404 case IP_VERSION(2, 2, 0):
2405 amdgpu_device_ip_block_add(adev, &vcn_v2_0_ip_block);
2406 if (!amdgpu_sriov_vf(adev))
2407 amdgpu_device_ip_block_add(adev, &jpeg_v2_0_ip_block);
2408 break;
2409 case IP_VERSION(2, 0, 3):
2410 break;
2411 case IP_VERSION(2, 5, 0):
2412 amdgpu_device_ip_block_add(adev, &vcn_v2_5_ip_block);
2413 amdgpu_device_ip_block_add(adev, &jpeg_v2_5_ip_block);
2414 break;
2415 case IP_VERSION(2, 6, 0):
2416 amdgpu_device_ip_block_add(adev, &vcn_v2_6_ip_block);
2417 amdgpu_device_ip_block_add(adev, &jpeg_v2_6_ip_block);
2418 break;
2419 case IP_VERSION(3, 0, 0):
2420 case IP_VERSION(3, 0, 16):
2421 case IP_VERSION(3, 1, 1):
2422 case IP_VERSION(3, 1, 2):
2423 case IP_VERSION(3, 0, 2):
2424 amdgpu_device_ip_block_add(adev, &vcn_v3_0_ip_block);
2425 if (!amdgpu_sriov_vf(adev))
2426 amdgpu_device_ip_block_add(adev, &jpeg_v3_0_ip_block);
2427 break;
2428 case IP_VERSION(3, 0, 33):
2429 amdgpu_device_ip_block_add(adev, &vcn_v3_0_ip_block);
2430 break;
2431 case IP_VERSION(4, 0, 0):
2432 case IP_VERSION(4, 0, 2):
2433 case IP_VERSION(4, 0, 4):
2434 amdgpu_device_ip_block_add(adev, &vcn_v4_0_ip_block);
2435 amdgpu_device_ip_block_add(adev, &jpeg_v4_0_ip_block);
2436 break;
2437 case IP_VERSION(4, 0, 3):
2438 amdgpu_device_ip_block_add(adev, &vcn_v4_0_3_ip_block);
2439 amdgpu_device_ip_block_add(adev, &jpeg_v4_0_3_ip_block);
2440 break;
2441 case IP_VERSION(4, 0, 5):
2442 case IP_VERSION(4, 0, 6):
2443 amdgpu_device_ip_block_add(adev, &vcn_v4_0_5_ip_block);
2444 amdgpu_device_ip_block_add(adev, &jpeg_v4_0_5_ip_block);
2445 break;
2446 case IP_VERSION(5, 0, 0):
2447 amdgpu_device_ip_block_add(adev, &vcn_v5_0_0_ip_block);
2448 amdgpu_device_ip_block_add(adev, &jpeg_v5_0_0_ip_block);
2449 break;
2450 case IP_VERSION(5, 0, 1):
2451 amdgpu_device_ip_block_add(adev, &vcn_v5_0_1_ip_block);
2452 amdgpu_device_ip_block_add(adev, &jpeg_v5_0_1_ip_block);
2453 break;
2454 default:
2455 dev_err(adev->dev,
2456 "Failed to add vcn/jpeg ip block(UVD_HWIP:0x%x)\n",
2457 amdgpu_ip_version(adev, UVD_HWIP, 0));
2458 return -EINVAL;
2459 }
2460 }
2461 return 0;
2462 }
2463
amdgpu_discovery_set_mes_ip_blocks(struct amdgpu_device * adev)2464 static int amdgpu_discovery_set_mes_ip_blocks(struct amdgpu_device *adev)
2465 {
2466 switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
2467 case IP_VERSION(11, 0, 0):
2468 case IP_VERSION(11, 0, 1):
2469 case IP_VERSION(11, 0, 2):
2470 case IP_VERSION(11, 0, 3):
2471 case IP_VERSION(11, 0, 4):
2472 case IP_VERSION(11, 5, 0):
2473 case IP_VERSION(11, 5, 1):
2474 case IP_VERSION(11, 5, 2):
2475 case IP_VERSION(11, 5, 3):
2476 amdgpu_device_ip_block_add(adev, &mes_v11_0_ip_block);
2477 adev->enable_mes = true;
2478 adev->enable_mes_kiq = true;
2479 break;
2480 case IP_VERSION(12, 0, 0):
2481 case IP_VERSION(12, 0, 1):
2482 amdgpu_device_ip_block_add(adev, &mes_v12_0_ip_block);
2483 adev->enable_mes = true;
2484 adev->enable_mes_kiq = true;
2485 if (amdgpu_uni_mes)
2486 adev->enable_uni_mes = true;
2487 break;
2488 default:
2489 break;
2490 }
2491 return 0;
2492 }
2493
amdgpu_discovery_init_soc_config(struct amdgpu_device * adev)2494 static void amdgpu_discovery_init_soc_config(struct amdgpu_device *adev)
2495 {
2496 switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
2497 case IP_VERSION(9, 4, 3):
2498 case IP_VERSION(9, 4, 4):
2499 case IP_VERSION(9, 5, 0):
2500 aqua_vanjaram_init_soc_config(adev);
2501 break;
2502 default:
2503 break;
2504 }
2505 }
2506
amdgpu_discovery_set_vpe_ip_blocks(struct amdgpu_device * adev)2507 static int amdgpu_discovery_set_vpe_ip_blocks(struct amdgpu_device *adev)
2508 {
2509 switch (amdgpu_ip_version(adev, VPE_HWIP, 0)) {
2510 case IP_VERSION(6, 1, 0):
2511 case IP_VERSION(6, 1, 1):
2512 case IP_VERSION(6, 1, 3):
2513 amdgpu_device_ip_block_add(adev, &vpe_v6_1_ip_block);
2514 break;
2515 default:
2516 break;
2517 }
2518
2519 return 0;
2520 }
2521
amdgpu_discovery_set_umsch_mm_ip_blocks(struct amdgpu_device * adev)2522 static int amdgpu_discovery_set_umsch_mm_ip_blocks(struct amdgpu_device *adev)
2523 {
2524 switch (amdgpu_ip_version(adev, VCN_HWIP, 0)) {
2525 case IP_VERSION(4, 0, 5):
2526 case IP_VERSION(4, 0, 6):
2527 if (amdgpu_umsch_mm & 0x1) {
2528 amdgpu_device_ip_block_add(adev, &umsch_mm_v4_0_ip_block);
2529 adev->enable_umsch_mm = true;
2530 }
2531 break;
2532 default:
2533 break;
2534 }
2535
2536 return 0;
2537 }
2538
amdgpu_discovery_set_isp_ip_blocks(struct amdgpu_device * adev)2539 static int amdgpu_discovery_set_isp_ip_blocks(struct amdgpu_device *adev)
2540 {
2541 #if defined(CONFIG_DRM_AMD_ISP)
2542 switch (amdgpu_ip_version(adev, ISP_HWIP, 0)) {
2543 case IP_VERSION(4, 1, 0):
2544 amdgpu_device_ip_block_add(adev, &isp_v4_1_0_ip_block);
2545 break;
2546 case IP_VERSION(4, 1, 1):
2547 amdgpu_device_ip_block_add(adev, &isp_v4_1_1_ip_block);
2548 break;
2549 default:
2550 break;
2551 }
2552 #endif
2553
2554 return 0;
2555 }
2556
amdgpu_discovery_set_ip_blocks(struct amdgpu_device * adev)2557 int amdgpu_discovery_set_ip_blocks(struct amdgpu_device *adev)
2558 {
2559 int r;
2560
2561 switch (adev->asic_type) {
2562 case CHIP_VEGA10:
2563 /* This is not fatal. We only need the discovery
2564 * binary for sysfs. We don't need it for a
2565 * functional system.
2566 */
2567 amdgpu_discovery_init(adev);
2568 vega10_reg_base_init(adev);
2569 adev->sdma.num_instances = 2;
2570 adev->sdma.sdma_mask = 3;
2571 adev->gmc.num_umc = 4;
2572 adev->gfx.xcc_mask = 1;
2573 adev->ip_versions[MMHUB_HWIP][0] = IP_VERSION(9, 0, 0);
2574 adev->ip_versions[ATHUB_HWIP][0] = IP_VERSION(9, 0, 0);
2575 adev->ip_versions[OSSSYS_HWIP][0] = IP_VERSION(4, 0, 0);
2576 adev->ip_versions[HDP_HWIP][0] = IP_VERSION(4, 0, 0);
2577 adev->ip_versions[SDMA0_HWIP][0] = IP_VERSION(4, 0, 0);
2578 adev->ip_versions[SDMA1_HWIP][0] = IP_VERSION(4, 0, 0);
2579 adev->ip_versions[DF_HWIP][0] = IP_VERSION(2, 1, 0);
2580 adev->ip_versions[NBIO_HWIP][0] = IP_VERSION(6, 1, 0);
2581 adev->ip_versions[UMC_HWIP][0] = IP_VERSION(6, 0, 0);
2582 adev->ip_versions[MP0_HWIP][0] = IP_VERSION(9, 0, 0);
2583 adev->ip_versions[MP1_HWIP][0] = IP_VERSION(9, 0, 0);
2584 adev->ip_versions[THM_HWIP][0] = IP_VERSION(9, 0, 0);
2585 adev->ip_versions[SMUIO_HWIP][0] = IP_VERSION(9, 0, 0);
2586 adev->ip_versions[GC_HWIP][0] = IP_VERSION(9, 0, 1);
2587 adev->ip_versions[UVD_HWIP][0] = IP_VERSION(7, 0, 0);
2588 adev->ip_versions[VCE_HWIP][0] = IP_VERSION(4, 0, 0);
2589 adev->ip_versions[DCI_HWIP][0] = IP_VERSION(12, 0, 0);
2590 break;
2591 case CHIP_VEGA12:
2592 /* This is not fatal. We only need the discovery
2593 * binary for sysfs. We don't need it for a
2594 * functional system.
2595 */
2596 amdgpu_discovery_init(adev);
2597 vega10_reg_base_init(adev);
2598 adev->sdma.num_instances = 2;
2599 adev->sdma.sdma_mask = 3;
2600 adev->gmc.num_umc = 4;
2601 adev->gfx.xcc_mask = 1;
2602 adev->ip_versions[MMHUB_HWIP][0] = IP_VERSION(9, 3, 0);
2603 adev->ip_versions[ATHUB_HWIP][0] = IP_VERSION(9, 3, 0);
2604 adev->ip_versions[OSSSYS_HWIP][0] = IP_VERSION(4, 0, 1);
2605 adev->ip_versions[HDP_HWIP][0] = IP_VERSION(4, 0, 1);
2606 adev->ip_versions[SDMA0_HWIP][0] = IP_VERSION(4, 0, 1);
2607 adev->ip_versions[SDMA1_HWIP][0] = IP_VERSION(4, 0, 1);
2608 adev->ip_versions[DF_HWIP][0] = IP_VERSION(2, 5, 0);
2609 adev->ip_versions[NBIO_HWIP][0] = IP_VERSION(6, 2, 0);
2610 adev->ip_versions[UMC_HWIP][0] = IP_VERSION(6, 1, 0);
2611 adev->ip_versions[MP0_HWIP][0] = IP_VERSION(9, 0, 0);
2612 adev->ip_versions[MP1_HWIP][0] = IP_VERSION(9, 0, 0);
2613 adev->ip_versions[THM_HWIP][0] = IP_VERSION(9, 0, 0);
2614 adev->ip_versions[SMUIO_HWIP][0] = IP_VERSION(9, 0, 1);
2615 adev->ip_versions[GC_HWIP][0] = IP_VERSION(9, 2, 1);
2616 adev->ip_versions[UVD_HWIP][0] = IP_VERSION(7, 0, 0);
2617 adev->ip_versions[VCE_HWIP][0] = IP_VERSION(4, 0, 0);
2618 adev->ip_versions[DCI_HWIP][0] = IP_VERSION(12, 0, 1);
2619 break;
2620 case CHIP_RAVEN:
2621 /* This is not fatal. We only need the discovery
2622 * binary for sysfs. We don't need it for a
2623 * functional system.
2624 */
2625 amdgpu_discovery_init(adev);
2626 vega10_reg_base_init(adev);
2627 adev->sdma.num_instances = 1;
2628 adev->sdma.sdma_mask = 1;
2629 adev->vcn.num_vcn_inst = 1;
2630 adev->gmc.num_umc = 2;
2631 adev->gfx.xcc_mask = 1;
2632 if (adev->apu_flags & AMD_APU_IS_RAVEN2) {
2633 adev->ip_versions[MMHUB_HWIP][0] = IP_VERSION(9, 2, 0);
2634 adev->ip_versions[ATHUB_HWIP][0] = IP_VERSION(9, 2, 0);
2635 adev->ip_versions[OSSSYS_HWIP][0] = IP_VERSION(4, 1, 1);
2636 adev->ip_versions[HDP_HWIP][0] = IP_VERSION(4, 1, 1);
2637 adev->ip_versions[SDMA0_HWIP][0] = IP_VERSION(4, 1, 1);
2638 adev->ip_versions[DF_HWIP][0] = IP_VERSION(2, 1, 1);
2639 adev->ip_versions[NBIO_HWIP][0] = IP_VERSION(7, 0, 1);
2640 adev->ip_versions[UMC_HWIP][0] = IP_VERSION(7, 5, 0);
2641 adev->ip_versions[MP0_HWIP][0] = IP_VERSION(10, 0, 1);
2642 adev->ip_versions[MP1_HWIP][0] = IP_VERSION(10, 0, 1);
2643 adev->ip_versions[THM_HWIP][0] = IP_VERSION(10, 1, 0);
2644 adev->ip_versions[SMUIO_HWIP][0] = IP_VERSION(10, 0, 1);
2645 adev->ip_versions[GC_HWIP][0] = IP_VERSION(9, 2, 2);
2646 adev->ip_versions[UVD_HWIP][0] = IP_VERSION(1, 0, 1);
2647 adev->ip_versions[DCE_HWIP][0] = IP_VERSION(1, 0, 1);
2648 adev->ip_versions[ISP_HWIP][0] = IP_VERSION(2, 0, 0);
2649 } else {
2650 adev->ip_versions[MMHUB_HWIP][0] = IP_VERSION(9, 1, 0);
2651 adev->ip_versions[ATHUB_HWIP][0] = IP_VERSION(9, 1, 0);
2652 adev->ip_versions[OSSSYS_HWIP][0] = IP_VERSION(4, 1, 0);
2653 adev->ip_versions[HDP_HWIP][0] = IP_VERSION(4, 1, 0);
2654 adev->ip_versions[SDMA0_HWIP][0] = IP_VERSION(4, 1, 0);
2655 adev->ip_versions[DF_HWIP][0] = IP_VERSION(2, 1, 0);
2656 adev->ip_versions[NBIO_HWIP][0] = IP_VERSION(7, 0, 0);
2657 adev->ip_versions[UMC_HWIP][0] = IP_VERSION(7, 0, 0);
2658 adev->ip_versions[MP0_HWIP][0] = IP_VERSION(10, 0, 0);
2659 adev->ip_versions[MP1_HWIP][0] = IP_VERSION(10, 0, 0);
2660 adev->ip_versions[THM_HWIP][0] = IP_VERSION(10, 0, 0);
2661 adev->ip_versions[SMUIO_HWIP][0] = IP_VERSION(10, 0, 0);
2662 adev->ip_versions[GC_HWIP][0] = IP_VERSION(9, 1, 0);
2663 adev->ip_versions[UVD_HWIP][0] = IP_VERSION(1, 0, 0);
2664 adev->ip_versions[DCE_HWIP][0] = IP_VERSION(1, 0, 0);
2665 adev->ip_versions[ISP_HWIP][0] = IP_VERSION(2, 0, 0);
2666 }
2667 break;
2668 case CHIP_VEGA20:
2669 /* This is not fatal. We only need the discovery
2670 * binary for sysfs. We don't need it for a
2671 * functional system.
2672 */
2673 amdgpu_discovery_init(adev);
2674 vega20_reg_base_init(adev);
2675 adev->sdma.num_instances = 2;
2676 adev->sdma.sdma_mask = 3;
2677 adev->gmc.num_umc = 8;
2678 adev->gfx.xcc_mask = 1;
2679 adev->ip_versions[MMHUB_HWIP][0] = IP_VERSION(9, 4, 0);
2680 adev->ip_versions[ATHUB_HWIP][0] = IP_VERSION(9, 4, 0);
2681 adev->ip_versions[OSSSYS_HWIP][0] = IP_VERSION(4, 2, 0);
2682 adev->ip_versions[HDP_HWIP][0] = IP_VERSION(4, 2, 0);
2683 adev->ip_versions[SDMA0_HWIP][0] = IP_VERSION(4, 2, 0);
2684 adev->ip_versions[SDMA1_HWIP][0] = IP_VERSION(4, 2, 0);
2685 adev->ip_versions[DF_HWIP][0] = IP_VERSION(3, 6, 0);
2686 adev->ip_versions[NBIO_HWIP][0] = IP_VERSION(7, 4, 0);
2687 adev->ip_versions[UMC_HWIP][0] = IP_VERSION(6, 1, 1);
2688 adev->ip_versions[MP0_HWIP][0] = IP_VERSION(11, 0, 2);
2689 adev->ip_versions[MP1_HWIP][0] = IP_VERSION(11, 0, 2);
2690 adev->ip_versions[THM_HWIP][0] = IP_VERSION(11, 0, 2);
2691 adev->ip_versions[SMUIO_HWIP][0] = IP_VERSION(11, 0, 2);
2692 adev->ip_versions[GC_HWIP][0] = IP_VERSION(9, 4, 0);
2693 adev->ip_versions[UVD_HWIP][0] = IP_VERSION(7, 2, 0);
2694 adev->ip_versions[UVD_HWIP][1] = IP_VERSION(7, 2, 0);
2695 adev->ip_versions[VCE_HWIP][0] = IP_VERSION(4, 1, 0);
2696 adev->ip_versions[DCI_HWIP][0] = IP_VERSION(12, 1, 0);
2697 break;
2698 case CHIP_ARCTURUS:
2699 /* This is not fatal. We only need the discovery
2700 * binary for sysfs. We don't need it for a
2701 * functional system.
2702 */
2703 amdgpu_discovery_init(adev);
2704 arct_reg_base_init(adev);
2705 adev->sdma.num_instances = 8;
2706 adev->sdma.sdma_mask = 0xff;
2707 adev->vcn.num_vcn_inst = 2;
2708 adev->gmc.num_umc = 8;
2709 adev->gfx.xcc_mask = 1;
2710 adev->ip_versions[MMHUB_HWIP][0] = IP_VERSION(9, 4, 1);
2711 adev->ip_versions[ATHUB_HWIP][0] = IP_VERSION(9, 4, 1);
2712 adev->ip_versions[OSSSYS_HWIP][0] = IP_VERSION(4, 2, 1);
2713 adev->ip_versions[HDP_HWIP][0] = IP_VERSION(4, 2, 1);
2714 adev->ip_versions[SDMA0_HWIP][0] = IP_VERSION(4, 2, 2);
2715 adev->ip_versions[SDMA1_HWIP][0] = IP_VERSION(4, 2, 2);
2716 adev->ip_versions[SDMA1_HWIP][1] = IP_VERSION(4, 2, 2);
2717 adev->ip_versions[SDMA1_HWIP][2] = IP_VERSION(4, 2, 2);
2718 adev->ip_versions[SDMA1_HWIP][3] = IP_VERSION(4, 2, 2);
2719 adev->ip_versions[SDMA1_HWIP][4] = IP_VERSION(4, 2, 2);
2720 adev->ip_versions[SDMA1_HWIP][5] = IP_VERSION(4, 2, 2);
2721 adev->ip_versions[SDMA1_HWIP][6] = IP_VERSION(4, 2, 2);
2722 adev->ip_versions[DF_HWIP][0] = IP_VERSION(3, 6, 1);
2723 adev->ip_versions[NBIO_HWIP][0] = IP_VERSION(7, 4, 1);
2724 adev->ip_versions[UMC_HWIP][0] = IP_VERSION(6, 1, 2);
2725 adev->ip_versions[MP0_HWIP][0] = IP_VERSION(11, 0, 4);
2726 adev->ip_versions[MP1_HWIP][0] = IP_VERSION(11, 0, 2);
2727 adev->ip_versions[THM_HWIP][0] = IP_VERSION(11, 0, 3);
2728 adev->ip_versions[SMUIO_HWIP][0] = IP_VERSION(11, 0, 3);
2729 adev->ip_versions[GC_HWIP][0] = IP_VERSION(9, 4, 1);
2730 adev->ip_versions[UVD_HWIP][0] = IP_VERSION(2, 5, 0);
2731 adev->ip_versions[UVD_HWIP][1] = IP_VERSION(2, 5, 0);
2732 break;
2733 case CHIP_ALDEBARAN:
2734 /* This is not fatal. We only need the discovery
2735 * binary for sysfs. We don't need it for a
2736 * functional system.
2737 */
2738 amdgpu_discovery_init(adev);
2739 aldebaran_reg_base_init(adev);
2740 adev->sdma.num_instances = 5;
2741 adev->sdma.sdma_mask = 0x1f;
2742 adev->vcn.num_vcn_inst = 2;
2743 adev->gmc.num_umc = 4;
2744 adev->gfx.xcc_mask = 1;
2745 adev->ip_versions[MMHUB_HWIP][0] = IP_VERSION(9, 4, 2);
2746 adev->ip_versions[ATHUB_HWIP][0] = IP_VERSION(9, 4, 2);
2747 adev->ip_versions[OSSSYS_HWIP][0] = IP_VERSION(4, 4, 0);
2748 adev->ip_versions[HDP_HWIP][0] = IP_VERSION(4, 4, 0);
2749 adev->ip_versions[SDMA0_HWIP][0] = IP_VERSION(4, 4, 0);
2750 adev->ip_versions[SDMA0_HWIP][1] = IP_VERSION(4, 4, 0);
2751 adev->ip_versions[SDMA0_HWIP][2] = IP_VERSION(4, 4, 0);
2752 adev->ip_versions[SDMA0_HWIP][3] = IP_VERSION(4, 4, 0);
2753 adev->ip_versions[SDMA0_HWIP][4] = IP_VERSION(4, 4, 0);
2754 adev->ip_versions[DF_HWIP][0] = IP_VERSION(3, 6, 2);
2755 adev->ip_versions[NBIO_HWIP][0] = IP_VERSION(7, 4, 4);
2756 adev->ip_versions[UMC_HWIP][0] = IP_VERSION(6, 7, 0);
2757 adev->ip_versions[MP0_HWIP][0] = IP_VERSION(13, 0, 2);
2758 adev->ip_versions[MP1_HWIP][0] = IP_VERSION(13, 0, 2);
2759 adev->ip_versions[THM_HWIP][0] = IP_VERSION(13, 0, 2);
2760 adev->ip_versions[SMUIO_HWIP][0] = IP_VERSION(13, 0, 2);
2761 adev->ip_versions[GC_HWIP][0] = IP_VERSION(9, 4, 2);
2762 adev->ip_versions[UVD_HWIP][0] = IP_VERSION(2, 6, 0);
2763 adev->ip_versions[UVD_HWIP][1] = IP_VERSION(2, 6, 0);
2764 adev->ip_versions[XGMI_HWIP][0] = IP_VERSION(6, 1, 0);
2765 break;
2766 case CHIP_CYAN_SKILLFISH:
2767 if (adev->apu_flags & AMD_APU_IS_CYAN_SKILLFISH2) {
2768 r = amdgpu_discovery_reg_base_init(adev);
2769 if (r)
2770 return -EINVAL;
2771
2772 amdgpu_discovery_harvest_ip(adev);
2773 amdgpu_discovery_get_gfx_info(adev);
2774 amdgpu_discovery_get_mall_info(adev);
2775 amdgpu_discovery_get_vcn_info(adev);
2776 } else {
2777 cyan_skillfish_reg_base_init(adev);
2778 adev->sdma.num_instances = 2;
2779 adev->sdma.sdma_mask = 3;
2780 adev->gfx.xcc_mask = 1;
2781 adev->ip_versions[MMHUB_HWIP][0] = IP_VERSION(2, 0, 3);
2782 adev->ip_versions[ATHUB_HWIP][0] = IP_VERSION(2, 0, 3);
2783 adev->ip_versions[OSSSYS_HWIP][0] = IP_VERSION(5, 0, 1);
2784 adev->ip_versions[HDP_HWIP][0] = IP_VERSION(5, 0, 1);
2785 adev->ip_versions[SDMA0_HWIP][0] = IP_VERSION(5, 0, 1);
2786 adev->ip_versions[SDMA1_HWIP][1] = IP_VERSION(5, 0, 1);
2787 adev->ip_versions[DF_HWIP][0] = IP_VERSION(3, 5, 0);
2788 adev->ip_versions[NBIO_HWIP][0] = IP_VERSION(2, 1, 1);
2789 adev->ip_versions[UMC_HWIP][0] = IP_VERSION(8, 1, 1);
2790 adev->ip_versions[MP0_HWIP][0] = IP_VERSION(11, 0, 8);
2791 adev->ip_versions[MP1_HWIP][0] = IP_VERSION(11, 0, 8);
2792 adev->ip_versions[THM_HWIP][0] = IP_VERSION(11, 0, 1);
2793 adev->ip_versions[SMUIO_HWIP][0] = IP_VERSION(11, 0, 8);
2794 adev->ip_versions[GC_HWIP][0] = IP_VERSION(10, 1, 3);
2795 adev->ip_versions[UVD_HWIP][0] = IP_VERSION(2, 0, 3);
2796 }
2797 break;
2798 default:
2799 r = amdgpu_discovery_reg_base_init(adev);
2800 if (r) {
2801 drm_err(&adev->ddev, "discovery failed: %d\n", r);
2802 return r;
2803 }
2804
2805 amdgpu_discovery_harvest_ip(adev);
2806 amdgpu_discovery_get_gfx_info(adev);
2807 amdgpu_discovery_get_mall_info(adev);
2808 amdgpu_discovery_get_vcn_info(adev);
2809 break;
2810 }
2811
2812 amdgpu_discovery_init_soc_config(adev);
2813 amdgpu_discovery_sysfs_init(adev);
2814
2815 switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
2816 case IP_VERSION(9, 0, 1):
2817 case IP_VERSION(9, 2, 1):
2818 case IP_VERSION(9, 4, 0):
2819 case IP_VERSION(9, 4, 1):
2820 case IP_VERSION(9, 4, 2):
2821 case IP_VERSION(9, 4, 3):
2822 case IP_VERSION(9, 4, 4):
2823 case IP_VERSION(9, 5, 0):
2824 adev->family = AMDGPU_FAMILY_AI;
2825 break;
2826 case IP_VERSION(9, 1, 0):
2827 case IP_VERSION(9, 2, 2):
2828 case IP_VERSION(9, 3, 0):
2829 adev->family = AMDGPU_FAMILY_RV;
2830 break;
2831 case IP_VERSION(10, 1, 10):
2832 case IP_VERSION(10, 1, 1):
2833 case IP_VERSION(10, 1, 2):
2834 case IP_VERSION(10, 1, 3):
2835 case IP_VERSION(10, 1, 4):
2836 case IP_VERSION(10, 3, 0):
2837 case IP_VERSION(10, 3, 2):
2838 case IP_VERSION(10, 3, 4):
2839 case IP_VERSION(10, 3, 5):
2840 adev->family = AMDGPU_FAMILY_NV;
2841 break;
2842 case IP_VERSION(10, 3, 1):
2843 adev->family = AMDGPU_FAMILY_VGH;
2844 adev->apu_flags |= AMD_APU_IS_VANGOGH;
2845 break;
2846 case IP_VERSION(10, 3, 3):
2847 adev->family = AMDGPU_FAMILY_YC;
2848 break;
2849 case IP_VERSION(10, 3, 6):
2850 adev->family = AMDGPU_FAMILY_GC_10_3_6;
2851 break;
2852 case IP_VERSION(10, 3, 7):
2853 adev->family = AMDGPU_FAMILY_GC_10_3_7;
2854 break;
2855 case IP_VERSION(11, 0, 0):
2856 case IP_VERSION(11, 0, 2):
2857 case IP_VERSION(11, 0, 3):
2858 adev->family = AMDGPU_FAMILY_GC_11_0_0;
2859 break;
2860 case IP_VERSION(11, 0, 1):
2861 case IP_VERSION(11, 0, 4):
2862 adev->family = AMDGPU_FAMILY_GC_11_0_1;
2863 break;
2864 case IP_VERSION(11, 5, 0):
2865 case IP_VERSION(11, 5, 1):
2866 case IP_VERSION(11, 5, 2):
2867 case IP_VERSION(11, 5, 3):
2868 adev->family = AMDGPU_FAMILY_GC_11_5_0;
2869 break;
2870 case IP_VERSION(12, 0, 0):
2871 case IP_VERSION(12, 0, 1):
2872 adev->family = AMDGPU_FAMILY_GC_12_0_0;
2873 break;
2874 default:
2875 return -EINVAL;
2876 }
2877
2878 switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
2879 case IP_VERSION(9, 1, 0):
2880 case IP_VERSION(9, 2, 2):
2881 case IP_VERSION(9, 3, 0):
2882 case IP_VERSION(10, 1, 3):
2883 case IP_VERSION(10, 1, 4):
2884 case IP_VERSION(10, 3, 1):
2885 case IP_VERSION(10, 3, 3):
2886 case IP_VERSION(10, 3, 6):
2887 case IP_VERSION(10, 3, 7):
2888 case IP_VERSION(11, 0, 1):
2889 case IP_VERSION(11, 0, 4):
2890 case IP_VERSION(11, 5, 0):
2891 case IP_VERSION(11, 5, 1):
2892 case IP_VERSION(11, 5, 2):
2893 case IP_VERSION(11, 5, 3):
2894 adev->flags |= AMD_IS_APU;
2895 break;
2896 default:
2897 break;
2898 }
2899
2900 /* set NBIO version */
2901 switch (amdgpu_ip_version(adev, NBIO_HWIP, 0)) {
2902 case IP_VERSION(6, 1, 0):
2903 case IP_VERSION(6, 2, 0):
2904 adev->nbio.funcs = &nbio_v6_1_funcs;
2905 adev->nbio.hdp_flush_reg = &nbio_v6_1_hdp_flush_reg;
2906 break;
2907 case IP_VERSION(7, 0, 0):
2908 case IP_VERSION(7, 0, 1):
2909 case IP_VERSION(2, 5, 0):
2910 adev->nbio.funcs = &nbio_v7_0_funcs;
2911 adev->nbio.hdp_flush_reg = &nbio_v7_0_hdp_flush_reg;
2912 break;
2913 case IP_VERSION(7, 4, 0):
2914 case IP_VERSION(7, 4, 1):
2915 case IP_VERSION(7, 4, 4):
2916 adev->nbio.funcs = &nbio_v7_4_funcs;
2917 adev->nbio.hdp_flush_reg = &nbio_v7_4_hdp_flush_reg;
2918 break;
2919 case IP_VERSION(7, 9, 0):
2920 case IP_VERSION(7, 9, 1):
2921 adev->nbio.funcs = &nbio_v7_9_funcs;
2922 adev->nbio.hdp_flush_reg = &nbio_v7_9_hdp_flush_reg;
2923 break;
2924 case IP_VERSION(7, 11, 0):
2925 case IP_VERSION(7, 11, 1):
2926 case IP_VERSION(7, 11, 2):
2927 case IP_VERSION(7, 11, 3):
2928 adev->nbio.funcs = &nbio_v7_11_funcs;
2929 adev->nbio.hdp_flush_reg = &nbio_v7_11_hdp_flush_reg;
2930 break;
2931 case IP_VERSION(7, 2, 0):
2932 case IP_VERSION(7, 2, 1):
2933 case IP_VERSION(7, 3, 0):
2934 case IP_VERSION(7, 5, 0):
2935 case IP_VERSION(7, 5, 1):
2936 adev->nbio.funcs = &nbio_v7_2_funcs;
2937 adev->nbio.hdp_flush_reg = &nbio_v7_2_hdp_flush_reg;
2938 break;
2939 case IP_VERSION(2, 1, 1):
2940 case IP_VERSION(2, 3, 0):
2941 case IP_VERSION(2, 3, 1):
2942 case IP_VERSION(2, 3, 2):
2943 case IP_VERSION(3, 3, 0):
2944 case IP_VERSION(3, 3, 1):
2945 case IP_VERSION(3, 3, 2):
2946 case IP_VERSION(3, 3, 3):
2947 adev->nbio.funcs = &nbio_v2_3_funcs;
2948 adev->nbio.hdp_flush_reg = &nbio_v2_3_hdp_flush_reg;
2949 break;
2950 case IP_VERSION(4, 3, 0):
2951 case IP_VERSION(4, 3, 1):
2952 if (amdgpu_sriov_vf(adev))
2953 adev->nbio.funcs = &nbio_v4_3_sriov_funcs;
2954 else
2955 adev->nbio.funcs = &nbio_v4_3_funcs;
2956 adev->nbio.hdp_flush_reg = &nbio_v4_3_hdp_flush_reg;
2957 break;
2958 case IP_VERSION(7, 7, 0):
2959 case IP_VERSION(7, 7, 1):
2960 adev->nbio.funcs = &nbio_v7_7_funcs;
2961 adev->nbio.hdp_flush_reg = &nbio_v7_7_hdp_flush_reg;
2962 break;
2963 case IP_VERSION(6, 3, 1):
2964 adev->nbio.funcs = &nbif_v6_3_1_funcs;
2965 adev->nbio.hdp_flush_reg = &nbif_v6_3_1_hdp_flush_reg;
2966 break;
2967 default:
2968 break;
2969 }
2970
2971 switch (amdgpu_ip_version(adev, HDP_HWIP, 0)) {
2972 case IP_VERSION(4, 0, 0):
2973 case IP_VERSION(4, 0, 1):
2974 case IP_VERSION(4, 1, 0):
2975 case IP_VERSION(4, 1, 1):
2976 case IP_VERSION(4, 1, 2):
2977 case IP_VERSION(4, 2, 0):
2978 case IP_VERSION(4, 2, 1):
2979 case IP_VERSION(4, 4, 0):
2980 case IP_VERSION(4, 4, 2):
2981 case IP_VERSION(4, 4, 5):
2982 adev->hdp.funcs = &hdp_v4_0_funcs;
2983 break;
2984 case IP_VERSION(5, 0, 0):
2985 case IP_VERSION(5, 0, 1):
2986 case IP_VERSION(5, 0, 2):
2987 case IP_VERSION(5, 0, 3):
2988 case IP_VERSION(5, 0, 4):
2989 case IP_VERSION(5, 2, 0):
2990 adev->hdp.funcs = &hdp_v5_0_funcs;
2991 break;
2992 case IP_VERSION(5, 2, 1):
2993 adev->hdp.funcs = &hdp_v5_2_funcs;
2994 break;
2995 case IP_VERSION(6, 0, 0):
2996 case IP_VERSION(6, 0, 1):
2997 case IP_VERSION(6, 1, 0):
2998 adev->hdp.funcs = &hdp_v6_0_funcs;
2999 break;
3000 case IP_VERSION(7, 0, 0):
3001 adev->hdp.funcs = &hdp_v7_0_funcs;
3002 break;
3003 default:
3004 break;
3005 }
3006
3007 switch (amdgpu_ip_version(adev, DF_HWIP, 0)) {
3008 case IP_VERSION(3, 6, 0):
3009 case IP_VERSION(3, 6, 1):
3010 case IP_VERSION(3, 6, 2):
3011 adev->df.funcs = &df_v3_6_funcs;
3012 break;
3013 case IP_VERSION(2, 1, 0):
3014 case IP_VERSION(2, 1, 1):
3015 case IP_VERSION(2, 5, 0):
3016 case IP_VERSION(3, 5, 1):
3017 case IP_VERSION(3, 5, 2):
3018 adev->df.funcs = &df_v1_7_funcs;
3019 break;
3020 case IP_VERSION(4, 3, 0):
3021 adev->df.funcs = &df_v4_3_funcs;
3022 break;
3023 case IP_VERSION(4, 6, 2):
3024 adev->df.funcs = &df_v4_6_2_funcs;
3025 break;
3026 case IP_VERSION(4, 15, 0):
3027 case IP_VERSION(4, 15, 1):
3028 adev->df.funcs = &df_v4_15_funcs;
3029 break;
3030 default:
3031 break;
3032 }
3033
3034 switch (amdgpu_ip_version(adev, SMUIO_HWIP, 0)) {
3035 case IP_VERSION(9, 0, 0):
3036 case IP_VERSION(9, 0, 1):
3037 case IP_VERSION(10, 0, 0):
3038 case IP_VERSION(10, 0, 1):
3039 case IP_VERSION(10, 0, 2):
3040 adev->smuio.funcs = &smuio_v9_0_funcs;
3041 break;
3042 case IP_VERSION(11, 0, 0):
3043 case IP_VERSION(11, 0, 2):
3044 case IP_VERSION(11, 0, 3):
3045 case IP_VERSION(11, 0, 4):
3046 case IP_VERSION(11, 0, 7):
3047 case IP_VERSION(11, 0, 8):
3048 adev->smuio.funcs = &smuio_v11_0_funcs;
3049 break;
3050 case IP_VERSION(11, 0, 6):
3051 case IP_VERSION(11, 0, 10):
3052 case IP_VERSION(11, 0, 11):
3053 case IP_VERSION(11, 5, 0):
3054 case IP_VERSION(11, 5, 2):
3055 case IP_VERSION(13, 0, 1):
3056 case IP_VERSION(13, 0, 9):
3057 case IP_VERSION(13, 0, 10):
3058 adev->smuio.funcs = &smuio_v11_0_6_funcs;
3059 break;
3060 case IP_VERSION(13, 0, 2):
3061 adev->smuio.funcs = &smuio_v13_0_funcs;
3062 break;
3063 case IP_VERSION(13, 0, 3):
3064 case IP_VERSION(13, 0, 11):
3065 adev->smuio.funcs = &smuio_v13_0_3_funcs;
3066 if (adev->smuio.funcs->get_pkg_type(adev) == AMDGPU_PKG_TYPE_APU) {
3067 adev->flags |= AMD_IS_APU;
3068 }
3069 break;
3070 case IP_VERSION(13, 0, 6):
3071 case IP_VERSION(13, 0, 8):
3072 case IP_VERSION(14, 0, 0):
3073 case IP_VERSION(14, 0, 1):
3074 adev->smuio.funcs = &smuio_v13_0_6_funcs;
3075 break;
3076 case IP_VERSION(14, 0, 2):
3077 adev->smuio.funcs = &smuio_v14_0_2_funcs;
3078 break;
3079 default:
3080 break;
3081 }
3082
3083 switch (amdgpu_ip_version(adev, LSDMA_HWIP, 0)) {
3084 case IP_VERSION(6, 0, 0):
3085 case IP_VERSION(6, 0, 1):
3086 case IP_VERSION(6, 0, 2):
3087 case IP_VERSION(6, 0, 3):
3088 adev->lsdma.funcs = &lsdma_v6_0_funcs;
3089 break;
3090 case IP_VERSION(7, 0, 0):
3091 case IP_VERSION(7, 0, 1):
3092 adev->lsdma.funcs = &lsdma_v7_0_funcs;
3093 break;
3094 default:
3095 break;
3096 }
3097
3098 r = amdgpu_discovery_set_common_ip_blocks(adev);
3099 if (r)
3100 return r;
3101
3102 r = amdgpu_discovery_set_gmc_ip_blocks(adev);
3103 if (r)
3104 return r;
3105
3106 /* For SR-IOV, PSP needs to be initialized before IH */
3107 if (amdgpu_sriov_vf(adev)) {
3108 r = amdgpu_discovery_set_psp_ip_blocks(adev);
3109 if (r)
3110 return r;
3111 r = amdgpu_discovery_set_ih_ip_blocks(adev);
3112 if (r)
3113 return r;
3114 } else {
3115 r = amdgpu_discovery_set_ih_ip_blocks(adev);
3116 if (r)
3117 return r;
3118
3119 if (likely(adev->firmware.load_type == AMDGPU_FW_LOAD_PSP)) {
3120 r = amdgpu_discovery_set_psp_ip_blocks(adev);
3121 if (r)
3122 return r;
3123 }
3124 }
3125
3126 if (likely(adev->firmware.load_type == AMDGPU_FW_LOAD_PSP)) {
3127 r = amdgpu_discovery_set_smu_ip_blocks(adev);
3128 if (r)
3129 return r;
3130 }
3131
3132 r = amdgpu_discovery_set_display_ip_blocks(adev);
3133 if (r)
3134 return r;
3135
3136 r = amdgpu_discovery_set_gc_ip_blocks(adev);
3137 if (r)
3138 return r;
3139
3140 r = amdgpu_discovery_set_sdma_ip_blocks(adev);
3141 if (r)
3142 return r;
3143
3144 if ((adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT &&
3145 !amdgpu_sriov_vf(adev)) ||
3146 (adev->firmware.load_type == AMDGPU_FW_LOAD_RLC_BACKDOOR_AUTO && amdgpu_dpm == 1)) {
3147 r = amdgpu_discovery_set_smu_ip_blocks(adev);
3148 if (r)
3149 return r;
3150 }
3151
3152 r = amdgpu_discovery_set_mm_ip_blocks(adev);
3153 if (r)
3154 return r;
3155
3156 r = amdgpu_discovery_set_mes_ip_blocks(adev);
3157 if (r)
3158 return r;
3159
3160 r = amdgpu_discovery_set_vpe_ip_blocks(adev);
3161 if (r)
3162 return r;
3163
3164 r = amdgpu_discovery_set_umsch_mm_ip_blocks(adev);
3165 if (r)
3166 return r;
3167
3168 r = amdgpu_discovery_set_isp_ip_blocks(adev);
3169 if (r)
3170 return r;
3171 return 0;
3172 }
3173
3174