1 /*
2 * Copyright 2018-2024 Advanced Micro Devices, Inc. All rights reserved.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23
24 #include <linux/firmware.h>
25
26 #include "amdgpu.h"
27 #include "amdgpu_discovery.h"
28 #include "soc15_hw_ip.h"
29 #include "discovery.h"
30 #include "amdgpu_ras.h"
31
32 #include "soc15.h"
33 #include "gfx_v9_0.h"
34 #include "gfx_v9_4_3.h"
35 #include "gmc_v9_0.h"
36 #include "df_v1_7.h"
37 #include "df_v3_6.h"
38 #include "df_v4_3.h"
39 #include "df_v4_6_2.h"
40 #include "df_v4_15.h"
41 #include "nbio_v6_1.h"
42 #include "nbio_v7_0.h"
43 #include "nbio_v7_4.h"
44 #include "nbio_v7_9.h"
45 #include "nbio_v7_11.h"
46 #include "hdp_v4_0.h"
47 #include "vega10_ih.h"
48 #include "vega20_ih.h"
49 #include "sdma_v4_0.h"
50 #include "sdma_v4_4_2.h"
51 #include "uvd_v7_0.h"
52 #include "vce_v4_0.h"
53 #include "vcn_v1_0.h"
54 #include "vcn_v2_5.h"
55 #include "jpeg_v2_5.h"
56 #include "smuio_v9_0.h"
57 #include "gmc_v10_0.h"
58 #include "gmc_v11_0.h"
59 #include "gmc_v12_0.h"
60 #include "gfxhub_v2_0.h"
61 #include "mmhub_v2_0.h"
62 #include "nbio_v2_3.h"
63 #include "nbio_v4_3.h"
64 #include "nbio_v7_2.h"
65 #include "nbio_v7_7.h"
66 #include "nbif_v6_3_1.h"
67 #include "hdp_v5_0.h"
68 #include "hdp_v5_2.h"
69 #include "hdp_v6_0.h"
70 #include "hdp_v7_0.h"
71 #include "nv.h"
72 #include "soc21.h"
73 #include "soc24.h"
74 #include "soc_v1_0.h"
75 #include "navi10_ih.h"
76 #include "ih_v6_0.h"
77 #include "ih_v6_1.h"
78 #include "ih_v7_0.h"
79 #include "gfx_v10_0.h"
80 #include "gfx_v11_0.h"
81 #include "gfx_v12_0.h"
82 #include "gfx_v12_1.h"
83 #include "sdma_v5_0.h"
84 #include "sdma_v5_2.h"
85 #include "sdma_v6_0.h"
86 #include "sdma_v7_0.h"
87 #include "sdma_v7_1.h"
88 #include "lsdma_v6_0.h"
89 #include "lsdma_v7_0.h"
90 #include "lsdma_v7_1.h"
91 #include "vcn_v2_0.h"
92 #include "jpeg_v2_0.h"
93 #include "vcn_v3_0.h"
94 #include "jpeg_v3_0.h"
95 #include "vcn_v4_0.h"
96 #include "jpeg_v4_0.h"
97 #include "vcn_v4_0_3.h"
98 #include "jpeg_v4_0_3.h"
99 #include "vcn_v4_0_5.h"
100 #include "jpeg_v4_0_5.h"
101 #include "amdgpu_vkms.h"
102 #include "mes_v11_0.h"
103 #include "mes_v12_0.h"
104 #include "mes_v12_1.h"
105 #include "smuio_v11_0.h"
106 #include "smuio_v11_0_6.h"
107 #include "smuio_v13_0.h"
108 #include "smuio_v13_0_3.h"
109 #include "smuio_v13_0_6.h"
110 #include "smuio_v14_0_2.h"
111 #include "smuio_v15_0_0.h"
112 #include "smuio_v15_0_8.h"
113 #include "vcn_v5_0_0.h"
114 #include "vcn_v5_0_1.h"
115 #include "vcn_v5_0_2.h"
116 #include "jpeg_v5_0_0.h"
117 #include "jpeg_v5_0_1.h"
118 #include "jpeg_v5_0_2.h"
119 #include "jpeg_v5_3_0.h"
120
121 #include "amdgpu_ras_mgr.h"
122
123 #include "amdgpu_vpe.h"
124 #if defined(CONFIG_DRM_AMD_ISP)
125 #include "amdgpu_isp.h"
126 #endif
127
128 MODULE_FIRMWARE("amdgpu/ip_discovery.bin");
129 MODULE_FIRMWARE("amdgpu/vega10_ip_discovery.bin");
130 MODULE_FIRMWARE("amdgpu/vega12_ip_discovery.bin");
131 MODULE_FIRMWARE("amdgpu/vega20_ip_discovery.bin");
132 MODULE_FIRMWARE("amdgpu/raven_ip_discovery.bin");
133 MODULE_FIRMWARE("amdgpu/raven2_ip_discovery.bin");
134 MODULE_FIRMWARE("amdgpu/picasso_ip_discovery.bin");
135 MODULE_FIRMWARE("amdgpu/arcturus_ip_discovery.bin");
136 MODULE_FIRMWARE("amdgpu/aldebaran_ip_discovery.bin");
137
138 /* Note: These registers are consistent across all the SOCs */
139 #define mmIP_DISCOVERY_VERSION 0x16A00
140 #define mmRCC_CONFIG_MEMSIZE 0xde3
141 #define mmMP0_SMN_C2PMSG_33 0x16061
142 #define mmMM_INDEX 0x0
143 #define mmMM_INDEX_HI 0x6
144 #define mmMM_DATA 0x1
145
146 #define mmDRIVER_SCRATCH_0 0x94
147 #define mmDRIVER_SCRATCH_1 0x95
148 #define mmDRIVER_SCRATCH_2 0x96
149
150 static const char *hw_id_names[HW_ID_MAX] = {
151 [MP1_HWID] = "MP1",
152 [MP2_HWID] = "MP2",
153 [THM_HWID] = "THM",
154 [SMUIO_HWID] = "SMUIO",
155 [FUSE_HWID] = "FUSE",
156 [CLKA_HWID] = "CLKA",
157 [PWR_HWID] = "PWR",
158 [GC_HWID] = "GC",
159 [UVD_HWID] = "UVD",
160 [AUDIO_AZ_HWID] = "AUDIO_AZ",
161 [ACP_HWID] = "ACP",
162 [DCI_HWID] = "DCI",
163 [DMU_HWID] = "DMU",
164 [DCO_HWID] = "DCO",
165 [DIO_HWID] = "DIO",
166 [XDMA_HWID] = "XDMA",
167 [DCEAZ_HWID] = "DCEAZ",
168 [DAZ_HWID] = "DAZ",
169 [SDPMUX_HWID] = "SDPMUX",
170 [NTB_HWID] = "NTB",
171 [IOHC_HWID] = "IOHC",
172 [L2IMU_HWID] = "L2IMU",
173 [VCE_HWID] = "VCE",
174 [MMHUB_HWID] = "MMHUB",
175 [ATHUB_HWID] = "ATHUB",
176 [DBGU_NBIO_HWID] = "DBGU_NBIO",
177 [DFX_HWID] = "DFX",
178 [DBGU0_HWID] = "DBGU0",
179 [DBGU1_HWID] = "DBGU1",
180 [OSSSYS_HWID] = "OSSSYS",
181 [HDP_HWID] = "HDP",
182 [SDMA0_HWID] = "SDMA0",
183 [SDMA1_HWID] = "SDMA1",
184 [SDMA2_HWID] = "SDMA2",
185 [SDMA3_HWID] = "SDMA3",
186 [LSDMA_HWID] = "LSDMA",
187 [ISP_HWID] = "ISP",
188 [DBGU_IO_HWID] = "DBGU_IO",
189 [DF_HWID] = "DF",
190 [CLKB_HWID] = "CLKB",
191 [FCH_HWID] = "FCH",
192 [DFX_DAP_HWID] = "DFX_DAP",
193 [L1IMU_PCIE_HWID] = "L1IMU_PCIE",
194 [L1IMU_NBIF_HWID] = "L1IMU_NBIF",
195 [L1IMU_IOAGR_HWID] = "L1IMU_IOAGR",
196 [L1IMU3_HWID] = "L1IMU3",
197 [L1IMU4_HWID] = "L1IMU4",
198 [L1IMU5_HWID] = "L1IMU5",
199 [L1IMU6_HWID] = "L1IMU6",
200 [L1IMU7_HWID] = "L1IMU7",
201 [L1IMU8_HWID] = "L1IMU8",
202 [L1IMU9_HWID] = "L1IMU9",
203 [L1IMU10_HWID] = "L1IMU10",
204 [L1IMU11_HWID] = "L1IMU11",
205 [L1IMU12_HWID] = "L1IMU12",
206 [L1IMU13_HWID] = "L1IMU13",
207 [L1IMU14_HWID] = "L1IMU14",
208 [L1IMU15_HWID] = "L1IMU15",
209 [WAFLC_HWID] = "WAFLC",
210 [FCH_USB_PD_HWID] = "FCH_USB_PD",
211 [PCIE_HWID] = "PCIE",
212 [PCS_HWID] = "PCS",
213 [DDCL_HWID] = "DDCL",
214 [SST_HWID] = "SST",
215 [IOAGR_HWID] = "IOAGR",
216 [NBIF_HWID] = "NBIF",
217 [IOAPIC_HWID] = "IOAPIC",
218 [SYSTEMHUB_HWID] = "SYSTEMHUB",
219 [NTBCCP_HWID] = "NTBCCP",
220 [UMC_HWID] = "UMC",
221 [SATA_HWID] = "SATA",
222 [USB_HWID] = "USB",
223 [CCXSEC_HWID] = "CCXSEC",
224 [XGMI_HWID] = "XGMI",
225 [XGBE_HWID] = "XGBE",
226 [MP0_HWID] = "MP0",
227 [VPE_HWID] = "VPE",
228 [ATU_HWID] = "ATU",
229 [AIGC_HWID] = "AIGC",
230 };
231
232 static int hw_id_map[MAX_HWIP] = {
233 [GC_HWIP] = GC_HWID,
234 [HDP_HWIP] = HDP_HWID,
235 [SDMA0_HWIP] = SDMA0_HWID,
236 [SDMA1_HWIP] = SDMA1_HWID,
237 [SDMA2_HWIP] = SDMA2_HWID,
238 [SDMA3_HWIP] = SDMA3_HWID,
239 [LSDMA_HWIP] = LSDMA_HWID,
240 [MMHUB_HWIP] = MMHUB_HWID,
241 [ATHUB_HWIP] = ATHUB_HWID,
242 [NBIO_HWIP] = NBIF_HWID,
243 [MP0_HWIP] = MP0_HWID,
244 [MP1_HWIP] = MP1_HWID,
245 [UVD_HWIP] = UVD_HWID,
246 [VCE_HWIP] = VCE_HWID,
247 [DF_HWIP] = DF_HWID,
248 [DCE_HWIP] = DMU_HWID,
249 [OSSSYS_HWIP] = OSSSYS_HWID,
250 [SMUIO_HWIP] = SMUIO_HWID,
251 [PWR_HWIP] = PWR_HWID,
252 [NBIF_HWIP] = NBIF_HWID,
253 [THM_HWIP] = THM_HWID,
254 [CLK_HWIP] = CLKA_HWID,
255 [UMC_HWIP] = UMC_HWID,
256 [XGMI_HWIP] = XGMI_HWID,
257 [DCI_HWIP] = DCI_HWID,
258 [PCIE_HWIP] = PCIE_HWID,
259 [VPE_HWIP] = VPE_HWID,
260 [ISP_HWIP] = ISP_HWID,
261 [ATU_HWIP] = ATU_HWID,
262 };
263
amdgpu_discovery_get_tmr_info(struct amdgpu_device * adev,bool * is_tmr_in_sysmem)264 static int amdgpu_discovery_get_tmr_info(struct amdgpu_device *adev,
265 bool *is_tmr_in_sysmem)
266 {
267 u64 vram_size, tmr_offset, tmr_size;
268 u32 msg, tmr_offset_lo, tmr_offset_hi;
269 int i, ret;
270
271 if (!amdgpu_sriov_vf(adev)) {
272 /* It can take up to two second for IFWI init to complete on some dGPUs,
273 * but generally it should be in the 60-100ms range. Normally this starts
274 * as soon as the device gets power so by the time the OS loads this has long
275 * completed. However, when a card is hotplugged via e.g., USB4, we need to
276 * wait for this to complete. Once the C2PMSG is updated, we can
277 * continue.
278 */
279
280 for (i = 0; i < 2000; i++) {
281 msg = RREG32(mmMP0_SMN_C2PMSG_33);
282 if (msg & 0x80000000)
283 break;
284 msleep(1);
285 }
286 }
287
288 vram_size = RREG32(mmRCC_CONFIG_MEMSIZE);
289 if (vram_size == U32_MAX)
290 return -ENXIO;
291 else if (!vram_size)
292 *is_tmr_in_sysmem = true;
293 else
294 *is_tmr_in_sysmem = false;
295
296 /* init the default tmr size and offset */
297 adev->discovery.size = DISCOVERY_TMR_SIZE;
298 if (vram_size)
299 adev->discovery.offset = (vram_size << 20) - DISCOVERY_TMR_OFFSET;
300
301 if (amdgpu_sriov_vf(adev)) {
302 if (adev->virt.is_dynamic_crit_regn_enabled) {
303 adev->discovery.offset =
304 adev->virt.crit_regn_tbl[AMD_SRIOV_MSG_IPD_TABLE_ID].offset;
305 adev->discovery.size =
306 adev->virt.crit_regn_tbl[AMD_SRIOV_MSG_IPD_TABLE_ID].size_kb << 10;
307 if (!adev->discovery.offset || !adev->discovery.size)
308 return -EINVAL;
309 } else {
310 goto out;
311 }
312 } else {
313 tmr_size = RREG32(mmDRIVER_SCRATCH_2);
314 if (tmr_size) {
315 /* It's preferred to transition to PSP mailbox reg interface
316 * for both bare-metal and passthrough if available */
317 adev->discovery.size = (u32)tmr_size;
318 tmr_offset_lo = RREG32(mmDRIVER_SCRATCH_0);
319 tmr_offset_hi = RREG32(mmDRIVER_SCRATCH_1);
320 adev->discovery.offset = ((u64)le32_to_cpu(tmr_offset_hi) << 32 |
321 le32_to_cpu(tmr_offset_lo));
322 } else if (!vram_size) {
323 /* fall back to apci approach to query tmr offset if vram_size is 0 */
324 ret = amdgpu_acpi_get_tmr_info(adev, &tmr_offset, &tmr_size);
325 if (ret)
326 return ret;
327 adev->discovery.size = DISCOVERY_TMR_SIZE;
328 adev->discovery.offset = tmr_offset + tmr_size - DISCOVERY_TMR_OFFSET;
329 }
330 }
331 out:
332 adev->discovery.bin = kzalloc(adev->discovery.size, GFP_KERNEL);
333 if (!adev->discovery.bin)
334 return -ENOMEM;
335 adev->discovery.debugfs_blob.data = adev->discovery.bin;
336 adev->discovery.debugfs_blob.size = adev->discovery.size;
337
338 return 0;
339 }
340
amdgpu_discovery_read_binary_from_sysmem(struct amdgpu_device * adev,uint8_t * binary)341 static int amdgpu_discovery_read_binary_from_sysmem(struct amdgpu_device *adev, uint8_t *binary)
342 {
343 void *discv_regn;
344
345 /* This region is read-only and reserved from system use */
346 discv_regn = memremap(adev->discovery.offset, adev->discovery.size, MEMREMAP_WC);
347 if (discv_regn) {
348 memcpy(binary, discv_regn, adev->discovery.size);
349 memunmap(discv_regn);
350 return 0;
351 }
352
353 return -ENOENT;
354 }
355
356 #define IP_DISCOVERY_V2 2
357 #define IP_DISCOVERY_V4 4
358
amdgpu_discovery_read_binary_from_mem(struct amdgpu_device * adev,uint8_t * binary,bool is_tmr_in_sysmem)359 static int amdgpu_discovery_read_binary_from_mem(struct amdgpu_device *adev,
360 uint8_t *binary,
361 bool is_tmr_in_sysmem)
362 {
363 int ret = 0;
364
365 if (!is_tmr_in_sysmem) {
366 if (amdgpu_sriov_vf(adev) &&
367 amdgpu_sriov_xgmi_connected_to_cpu(adev)) {
368 ret = amdgpu_discovery_read_binary_from_sysmem(adev, binary);
369 } else {
370 amdgpu_device_vram_access(adev, adev->discovery.offset,
371 (uint32_t *)binary,
372 adev->discovery.size, false);
373 adev->discovery.reserve_tmr = true;
374 }
375 } else {
376 ret = amdgpu_discovery_read_binary_from_sysmem(adev, binary);
377 }
378
379 return ret;
380 }
381
amdgpu_discovery_read_binary_from_file(struct amdgpu_device * adev,uint8_t * binary,const char * fw_name)382 static int amdgpu_discovery_read_binary_from_file(struct amdgpu_device *adev,
383 uint8_t *binary,
384 const char *fw_name)
385 {
386 const struct firmware *fw;
387 int r;
388
389 r = firmware_request_nowarn(&fw, fw_name, adev->dev);
390 if (r) {
391 if (amdgpu_discovery == 2)
392 dev_err(adev->dev, "can't load firmware \"%s\"\n", fw_name);
393 else
394 drm_info(&adev->ddev, "Optional firmware \"%s\" was not found\n", fw_name);
395 return r;
396 }
397
398 memcpy((u8 *)binary, (u8 *)fw->data, fw->size);
399 release_firmware(fw);
400
401 return 0;
402 }
403
amdgpu_discovery_calculate_checksum(uint8_t * data,uint32_t size)404 static uint16_t amdgpu_discovery_calculate_checksum(uint8_t *data, uint32_t size)
405 {
406 uint16_t checksum = 0;
407 int i;
408
409 for (i = 0; i < size; i++)
410 checksum += data[i];
411
412 return checksum;
413 }
414
amdgpu_discovery_verify_checksum(struct amdgpu_device * adev,uint8_t * data,uint32_t size,uint16_t expected)415 static inline bool amdgpu_discovery_verify_checksum(struct amdgpu_device *adev,
416 uint8_t *data, uint32_t size,
417 uint16_t expected)
418 {
419 uint16_t calculated;
420
421 calculated = amdgpu_discovery_calculate_checksum(data, size);
422
423 if (calculated != expected) {
424 dev_err(adev->dev, "Discovery checksum failed: calc 0x%04x != exp 0x%04x, size %u.\n",
425 calculated, expected, size);
426 return false;
427 }
428
429 return true;
430 }
431
amdgpu_discovery_verify_binary_signature(uint8_t * binary)432 static inline bool amdgpu_discovery_verify_binary_signature(uint8_t *binary)
433 {
434 struct binary_header *bhdr;
435 bhdr = (struct binary_header *)binary;
436
437 return (le32_to_cpu(bhdr->binary_signature) == BINARY_SIGNATURE);
438 }
439
amdgpu_discovery_harvest_config_quirk(struct amdgpu_device * adev)440 static void amdgpu_discovery_harvest_config_quirk(struct amdgpu_device *adev)
441 {
442 /*
443 * So far, apply this quirk only on those Navy Flounder boards which
444 * have a bad harvest table of VCN config.
445 */
446 if ((amdgpu_ip_version(adev, UVD_HWIP, 1) == IP_VERSION(3, 0, 1)) &&
447 (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(10, 3, 2))) {
448 switch (adev->pdev->revision) {
449 case 0xC1:
450 case 0xC2:
451 case 0xC3:
452 case 0xC5:
453 case 0xC7:
454 case 0xCF:
455 case 0xDF:
456 adev->vcn.harvest_config |= AMDGPU_VCN_HARVEST_VCN1;
457 adev->vcn.inst_mask &= ~AMDGPU_VCN_HARVEST_VCN1;
458 break;
459 default:
460 break;
461 }
462 }
463 }
464
amdgpu_discovery_verify_npsinfo(struct amdgpu_device * adev,struct table_info * info)465 static int amdgpu_discovery_verify_npsinfo(struct amdgpu_device *adev,
466 struct table_info *info)
467 {
468 uint8_t *discovery_bin = adev->discovery.bin;
469 uint16_t checksum;
470 uint16_t offset;
471
472 offset = le16_to_cpu(info->offset);
473 checksum = le16_to_cpu(info->checksum);
474
475 struct nps_info_header *nhdr =
476 (struct nps_info_header *)(discovery_bin + offset);
477
478 if (le32_to_cpu(nhdr->table_id) != NPS_INFO_TABLE_ID) {
479 dev_dbg(adev->dev, "invalid ip discovery nps info table id\n");
480 return -EINVAL;
481 }
482
483 if (!amdgpu_discovery_verify_checksum(adev, discovery_bin + offset,
484 le32_to_cpu(nhdr->size_bytes),
485 checksum)) {
486 dev_dbg(adev->dev, "invalid nps info data table checksum\n");
487 return -EINVAL;
488 }
489
490 return 0;
491 }
492
amdgpu_discovery_get_fw_name(struct amdgpu_device * adev)493 static const char *amdgpu_discovery_get_fw_name(struct amdgpu_device *adev)
494 {
495 if (amdgpu_discovery == 2) {
496 /* Assume there is valid discovery TMR in VRAM even if binary is sideloaded */
497 adev->discovery.reserve_tmr = true;
498 return "amdgpu/ip_discovery.bin";
499 }
500
501 switch (adev->asic_type) {
502 case CHIP_VEGA10:
503 return "amdgpu/vega10_ip_discovery.bin";
504 case CHIP_VEGA12:
505 return "amdgpu/vega12_ip_discovery.bin";
506 case CHIP_RAVEN:
507 if (adev->apu_flags & AMD_APU_IS_RAVEN2)
508 return "amdgpu/raven2_ip_discovery.bin";
509 else if (adev->apu_flags & AMD_APU_IS_PICASSO)
510 return "amdgpu/picasso_ip_discovery.bin";
511 else
512 return "amdgpu/raven_ip_discovery.bin";
513 case CHIP_VEGA20:
514 return "amdgpu/vega20_ip_discovery.bin";
515 case CHIP_ARCTURUS:
516 return "amdgpu/arcturus_ip_discovery.bin";
517 case CHIP_ALDEBARAN:
518 return "amdgpu/aldebaran_ip_discovery.bin";
519 default:
520 return NULL;
521 }
522 }
523
amdgpu_discovery_get_table_info(struct amdgpu_device * adev,struct table_info ** info,uint16_t table_id)524 static int amdgpu_discovery_get_table_info(struct amdgpu_device *adev,
525 struct table_info **info,
526 uint16_t table_id)
527 {
528 struct binary_header *bhdr =
529 (struct binary_header *)adev->discovery.bin;
530 struct binary_header_v2 *bhdrv2;
531
532 switch (bhdr->version_major) {
533 case 2:
534 bhdrv2 = (struct binary_header_v2 *)adev->discovery.bin;
535 *info = &bhdrv2->table_list[table_id];
536 break;
537 case 1:
538 case 0:
539 *info = &bhdr->table_list[table_id];
540 break;
541 default:
542 dev_err(adev->dev, "Invalid ip discovery table version %d\n",bhdr->version_major);
543 return -EINVAL;
544 }
545
546 return 0;
547 }
548
amdgpu_discovery_table_check(struct amdgpu_device * adev,uint8_t * discovery_bin,uint16_t table_id)549 static int amdgpu_discovery_table_check(struct amdgpu_device *adev,
550 uint8_t *discovery_bin,
551 uint16_t table_id)
552 {
553 int r, act_val, exp_val, table_size;
554 uint16_t offset, checksum;
555 struct table_info *info;
556 bool check_table = true;
557 char *table_name;
558
559 r = amdgpu_discovery_get_table_info(adev, &info, table_id);
560 if (r)
561 return r;
562 offset = le16_to_cpu(info->offset);
563 checksum = le16_to_cpu(info->checksum);
564
565 switch (table_id) {
566 case IP_DISCOVERY: {
567 struct ip_discovery_header *ihdr =
568 (struct ip_discovery_header *)(discovery_bin + offset);
569 act_val = le32_to_cpu(ihdr->signature);
570 exp_val = DISCOVERY_TABLE_SIGNATURE;
571 table_size = le16_to_cpu(ihdr->size);
572 table_name = "data table";
573 break;
574 }
575 case GC: {
576 struct gpu_info_header *ghdr =
577 (struct gpu_info_header *)(discovery_bin + offset);
578 act_val = le32_to_cpu(ghdr->table_id);
579 exp_val = GC_TABLE_ID;
580 table_size = le16_to_cpu(ghdr->size);
581 table_name = "gc table";
582 break;
583 }
584 case HARVEST_INFO: {
585 struct harvest_info_header *hhdr =
586 (struct harvest_info_header *)(discovery_bin + offset);
587 act_val = le32_to_cpu(hhdr->signature);
588 exp_val = HARVEST_TABLE_SIGNATURE;
589 table_size = sizeof(struct harvest_table);
590 table_name = "harvest table";
591 break;
592 }
593 case VCN_INFO: {
594 struct vcn_info_header *vhdr =
595 (struct vcn_info_header *)(discovery_bin + offset);
596 act_val = le32_to_cpu(vhdr->table_id);
597 exp_val = VCN_INFO_TABLE_ID;
598 table_size = le32_to_cpu(vhdr->size_bytes);
599 table_name = "vcn table";
600 break;
601 }
602 case MALL_INFO: {
603 struct mall_info_header *mhdr =
604 (struct mall_info_header *)(discovery_bin + offset);
605 act_val = le32_to_cpu(mhdr->table_id);
606 exp_val = MALL_INFO_TABLE_ID;
607 table_size = le32_to_cpu(mhdr->size_bytes);
608 table_name = "mall table";
609 check_table = false;
610 break;
611 }
612 default:
613 dev_err(adev->dev, "invalid ip discovery table id %d specified\n", table_id);
614 check_table = false;
615 break;
616 }
617
618 if (check_table && offset) {
619 if (act_val != exp_val) {
620 dev_err(adev->dev, "invalid ip discovery %s signature\n", table_name);
621 return -EINVAL;
622 }
623
624 if (!amdgpu_discovery_verify_checksum(adev, discovery_bin + offset,
625 table_size, checksum)) {
626 dev_err(adev->dev, "invalid ip discovery %s checksum\n", table_name);
627 return -EINVAL;
628 }
629 }
630
631 return 0;
632 }
633
amdgpu_discovery_init(struct amdgpu_device * adev)634 static int amdgpu_discovery_init(struct amdgpu_device *adev)
635 {
636 struct binary_header *bhdr;
637 uint8_t *discovery_bin;
638 const char *fw_name;
639 uint16_t offset;
640 uint16_t size;
641 uint16_t checksum;
642 uint16_t table_id;
643 bool is_tmr_in_sysmem;
644 int r;
645
646 r = amdgpu_discovery_get_tmr_info(adev, &is_tmr_in_sysmem);
647 if (r)
648 return r;
649
650 discovery_bin = adev->discovery.bin;
651 /* Read from file if it is the preferred option */
652 fw_name = amdgpu_discovery_get_fw_name(adev);
653 if (fw_name != NULL) {
654 drm_dbg(&adev->ddev, "use ip discovery information from file");
655 r = amdgpu_discovery_read_binary_from_file(adev, discovery_bin,
656 fw_name);
657 if (r)
658 goto out;
659 } else {
660 drm_dbg(&adev->ddev, "use ip discovery information from memory");
661 r = amdgpu_discovery_read_binary_from_mem(adev, discovery_bin,
662 is_tmr_in_sysmem);
663 if (r)
664 goto out;
665 }
666
667 /* check the ip discovery binary signature */
668 if (!amdgpu_discovery_verify_binary_signature(discovery_bin)) {
669 dev_err(adev->dev,
670 "get invalid ip discovery binary signature\n");
671 r = -EINVAL;
672 goto out;
673 }
674
675 bhdr = (struct binary_header *)discovery_bin;
676
677 offset = offsetof(struct binary_header, binary_checksum) +
678 sizeof(bhdr->binary_checksum);
679 size = le16_to_cpu(bhdr->binary_size) - offset;
680 checksum = le16_to_cpu(bhdr->binary_checksum);
681
682 if (!amdgpu_discovery_verify_checksum(adev, discovery_bin + offset, size,
683 checksum)) {
684 dev_err(adev->dev, "invalid ip discovery binary checksum\n");
685 r = -EINVAL;
686 goto out;
687 }
688
689 for (table_id = 0; table_id <= MALL_INFO; table_id++) {
690 r = amdgpu_discovery_table_check(adev, discovery_bin, table_id);
691 if (r)
692 goto out;
693 }
694
695 return 0;
696
697 out:
698 kfree(adev->discovery.bin);
699 adev->discovery.bin = NULL;
700 if ((amdgpu_discovery != 2) &&
701 (RREG32(mmIP_DISCOVERY_VERSION) == 4))
702 amdgpu_ras_query_boot_status(adev, 4);
703 return r;
704 }
705
706 static void amdgpu_discovery_sysfs_fini(struct amdgpu_device *adev);
707
amdgpu_discovery_fini(struct amdgpu_device * adev)708 void amdgpu_discovery_fini(struct amdgpu_device *adev)
709 {
710 amdgpu_discovery_sysfs_fini(adev);
711 kfree(adev->discovery.bin);
712 adev->discovery.bin = NULL;
713 }
714
amdgpu_discovery_validate_ip(struct amdgpu_device * adev,uint8_t instance,uint16_t hw_id)715 static int amdgpu_discovery_validate_ip(struct amdgpu_device *adev,
716 uint8_t instance, uint16_t hw_id)
717 {
718 if (instance >= HWIP_MAX_INSTANCE) {
719 dev_err(adev->dev,
720 "Unexpected instance_number (%d) from ip discovery blob\n",
721 instance);
722 return -EINVAL;
723 }
724 if (hw_id >= HW_ID_MAX) {
725 dev_err(adev->dev,
726 "Unexpected hw_id (%d) from ip discovery blob\n",
727 hw_id);
728 return -EINVAL;
729 }
730
731 return 0;
732 }
733
amdgpu_discovery_read_harvest_bit_per_ip(struct amdgpu_device * adev,uint32_t * vcn_harvest_count)734 static void amdgpu_discovery_read_harvest_bit_per_ip(struct amdgpu_device *adev,
735 uint32_t *vcn_harvest_count)
736 {
737 uint8_t *discovery_bin = adev->discovery.bin;
738 struct binary_header *bhdr;
739 struct ip_discovery_header *ihdr;
740 struct die_header *dhdr;
741 struct ip *ip;
742 uint16_t die_offset, ip_offset, num_dies, num_ips;
743 uint16_t hw_id;
744 uint8_t inst;
745 int i, j;
746
747 bhdr = (struct binary_header *)discovery_bin;
748 ihdr = (struct ip_discovery_header
749 *)(discovery_bin +
750 le16_to_cpu(bhdr->table_list[IP_DISCOVERY].offset));
751 num_dies = le16_to_cpu(ihdr->num_dies);
752
753 /* scan harvest bit of all IP data structures */
754 for (i = 0; i < num_dies; i++) {
755 die_offset = le16_to_cpu(ihdr->die_info[i].die_offset);
756 dhdr = (struct die_header *)(discovery_bin + die_offset);
757 num_ips = le16_to_cpu(dhdr->num_ips);
758 ip_offset = die_offset + sizeof(*dhdr);
759
760 for (j = 0; j < num_ips; j++) {
761 ip = (struct ip *)(discovery_bin + ip_offset);
762 inst = ip->number_instance;
763 hw_id = le16_to_cpu(ip->hw_id);
764 if (amdgpu_discovery_validate_ip(adev, inst, hw_id))
765 goto next_ip;
766
767 if (ip->harvest == 1) {
768 switch (hw_id) {
769 case VCN_HWID:
770 (*vcn_harvest_count)++;
771 if (inst == 0) {
772 adev->vcn.harvest_config |= AMDGPU_VCN_HARVEST_VCN0;
773 adev->vcn.inst_mask &=
774 ~AMDGPU_VCN_HARVEST_VCN0;
775 adev->jpeg.inst_mask &=
776 ~AMDGPU_VCN_HARVEST_VCN0;
777 } else {
778 adev->vcn.harvest_config |= AMDGPU_VCN_HARVEST_VCN1;
779 adev->vcn.inst_mask &=
780 ~AMDGPU_VCN_HARVEST_VCN1;
781 adev->jpeg.inst_mask &=
782 ~AMDGPU_VCN_HARVEST_VCN1;
783 }
784 break;
785 case DMU_HWID:
786 adev->harvest_ip_mask |= AMD_HARVEST_IP_DMU_MASK;
787 break;
788 default:
789 break;
790 }
791 }
792 next_ip:
793 ip_offset += struct_size(ip, base_address,
794 ip->num_base_address);
795 }
796 }
797 }
798
amdgpu_discovery_read_from_harvest_table(struct amdgpu_device * adev,uint32_t * vcn_harvest_count,uint32_t * umc_harvest_count)799 static void amdgpu_discovery_read_from_harvest_table(struct amdgpu_device *adev,
800 uint32_t *vcn_harvest_count,
801 uint32_t *umc_harvest_count)
802 {
803 uint8_t *discovery_bin = adev->discovery.bin;
804 struct table_info *info;
805 struct harvest_table *harvest_info;
806 u16 offset;
807 int i;
808 u64 umc_harvest_config = 0;
809
810 if (amdgpu_discovery_get_table_info(adev, &info, HARVEST_INFO))
811 return;
812 offset = le16_to_cpu(info->offset);
813
814 if (!offset) {
815 dev_err(adev->dev, "invalid harvest table offset\n");
816 return;
817 }
818
819 harvest_info = (struct harvest_table *)(discovery_bin + offset);
820
821 for (i = 0; i < 32; i++) {
822 if (le16_to_cpu(harvest_info->list[i].hw_id) == 0)
823 break;
824
825 switch (le16_to_cpu(harvest_info->list[i].hw_id)) {
826 case VCN_HWID:
827 (*vcn_harvest_count)++;
828 adev->vcn.harvest_config |=
829 (1 << harvest_info->list[i].number_instance);
830 adev->jpeg.harvest_config |=
831 (1 << harvest_info->list[i].number_instance);
832
833 adev->vcn.inst_mask &=
834 ~(1U << harvest_info->list[i].number_instance);
835 adev->jpeg.inst_mask &=
836 ~(1U << harvest_info->list[i].number_instance);
837 break;
838 case DMU_HWID:
839 adev->harvest_ip_mask |= AMD_HARVEST_IP_DMU_MASK;
840 break;
841 case UMC_HWID:
842 umc_harvest_config |=
843 1 << (le16_to_cpu(harvest_info->list[i].number_instance));
844 (*umc_harvest_count)++;
845 break;
846 case GC_HWID:
847 adev->gfx.xcc_mask &=
848 ~(1U << harvest_info->list[i].number_instance);
849 break;
850 case SDMA0_HWID:
851 adev->sdma.sdma_mask &=
852 ~(1U << harvest_info->list[i].number_instance);
853 break;
854 #if defined(CONFIG_DRM_AMD_ISP)
855 case ISP_HWID:
856 adev->isp.harvest_config |=
857 ~(1U << harvest_info->list[i].number_instance);
858 break;
859 #endif
860 default:
861 break;
862 }
863 }
864
865 adev->umc.active_mask = ((1ULL << adev->umc.node_inst_num) - 1ULL) &
866 ~umc_harvest_config;
867 }
868
869 /* ================================================== */
870
871 struct ip_hw_instance {
872 struct kobject kobj; /* ip_discovery/die/#die/#hw_id/#instance/<attrs...> */
873
874 int hw_id;
875 u8 num_instance;
876 u8 major, minor, revision;
877 u8 harvest;
878
879 int num_base_addresses;
880 u32 base_addr[] __counted_by(num_base_addresses);
881 };
882
883 struct ip_hw_id {
884 struct kset hw_id_kset; /* ip_discovery/die/#die/#hw_id/, contains ip_hw_instance */
885 int hw_id;
886 };
887
888 struct ip_die_entry {
889 struct kset ip_kset; /* ip_discovery/die/#die/, contains ip_hw_id */
890 u16 num_ips;
891 };
892
893 /* -------------------------------------------------- */
894
895 struct ip_hw_instance_attr {
896 struct attribute attr;
897 ssize_t (*show)(struct ip_hw_instance *ip_hw_instance, char *buf);
898 };
899
hw_id_show(struct ip_hw_instance * ip_hw_instance,char * buf)900 static ssize_t hw_id_show(struct ip_hw_instance *ip_hw_instance, char *buf)
901 {
902 return sysfs_emit(buf, "%d\n", ip_hw_instance->hw_id);
903 }
904
num_instance_show(struct ip_hw_instance * ip_hw_instance,char * buf)905 static ssize_t num_instance_show(struct ip_hw_instance *ip_hw_instance, char *buf)
906 {
907 return sysfs_emit(buf, "%d\n", ip_hw_instance->num_instance);
908 }
909
major_show(struct ip_hw_instance * ip_hw_instance,char * buf)910 static ssize_t major_show(struct ip_hw_instance *ip_hw_instance, char *buf)
911 {
912 return sysfs_emit(buf, "%d\n", ip_hw_instance->major);
913 }
914
minor_show(struct ip_hw_instance * ip_hw_instance,char * buf)915 static ssize_t minor_show(struct ip_hw_instance *ip_hw_instance, char *buf)
916 {
917 return sysfs_emit(buf, "%d\n", ip_hw_instance->minor);
918 }
919
revision_show(struct ip_hw_instance * ip_hw_instance,char * buf)920 static ssize_t revision_show(struct ip_hw_instance *ip_hw_instance, char *buf)
921 {
922 return sysfs_emit(buf, "%d\n", ip_hw_instance->revision);
923 }
924
harvest_show(struct ip_hw_instance * ip_hw_instance,char * buf)925 static ssize_t harvest_show(struct ip_hw_instance *ip_hw_instance, char *buf)
926 {
927 return sysfs_emit(buf, "0x%01X\n", ip_hw_instance->harvest);
928 }
929
num_base_addresses_show(struct ip_hw_instance * ip_hw_instance,char * buf)930 static ssize_t num_base_addresses_show(struct ip_hw_instance *ip_hw_instance, char *buf)
931 {
932 return sysfs_emit(buf, "%d\n", ip_hw_instance->num_base_addresses);
933 }
934
base_addr_show(struct ip_hw_instance * ip_hw_instance,char * buf)935 static ssize_t base_addr_show(struct ip_hw_instance *ip_hw_instance, char *buf)
936 {
937 ssize_t at;
938 int ii;
939
940 for (at = ii = 0; ii < ip_hw_instance->num_base_addresses; ii++) {
941 /* Here we satisfy the condition that, at + size <= PAGE_SIZE.
942 */
943 if (at + 12 > PAGE_SIZE)
944 break;
945 at += sysfs_emit_at(buf, at, "0x%08X\n",
946 ip_hw_instance->base_addr[ii]);
947 }
948
949 return at;
950 }
951
952 static struct ip_hw_instance_attr ip_hw_attr[] = {
953 __ATTR_RO(hw_id),
954 __ATTR_RO(num_instance),
955 __ATTR_RO(major),
956 __ATTR_RO(minor),
957 __ATTR_RO(revision),
958 __ATTR_RO(harvest),
959 __ATTR_RO(num_base_addresses),
960 __ATTR_RO(base_addr),
961 };
962
963 static struct attribute *ip_hw_instance_attrs[ARRAY_SIZE(ip_hw_attr) + 1];
964 ATTRIBUTE_GROUPS(ip_hw_instance);
965
966 #define to_ip_hw_instance(x) container_of(x, struct ip_hw_instance, kobj)
967 #define to_ip_hw_instance_attr(x) container_of(x, struct ip_hw_instance_attr, attr)
968
ip_hw_instance_attr_show(struct kobject * kobj,struct attribute * attr,char * buf)969 static ssize_t ip_hw_instance_attr_show(struct kobject *kobj,
970 struct attribute *attr,
971 char *buf)
972 {
973 struct ip_hw_instance *ip_hw_instance = to_ip_hw_instance(kobj);
974 struct ip_hw_instance_attr *ip_hw_attr = to_ip_hw_instance_attr(attr);
975
976 if (!ip_hw_attr->show)
977 return -EIO;
978
979 return ip_hw_attr->show(ip_hw_instance, buf);
980 }
981
982 static const struct sysfs_ops ip_hw_instance_sysfs_ops = {
983 .show = ip_hw_instance_attr_show,
984 };
985
ip_hw_instance_release(struct kobject * kobj)986 static void ip_hw_instance_release(struct kobject *kobj)
987 {
988 struct ip_hw_instance *ip_hw_instance = to_ip_hw_instance(kobj);
989
990 kfree(ip_hw_instance);
991 }
992
993 static const struct kobj_type ip_hw_instance_ktype = {
994 .release = ip_hw_instance_release,
995 .sysfs_ops = &ip_hw_instance_sysfs_ops,
996 .default_groups = ip_hw_instance_groups,
997 };
998
999 /* -------------------------------------------------- */
1000
1001 #define to_ip_hw_id(x) container_of(to_kset(x), struct ip_hw_id, hw_id_kset)
1002
ip_hw_id_release(struct kobject * kobj)1003 static void ip_hw_id_release(struct kobject *kobj)
1004 {
1005 struct ip_hw_id *ip_hw_id = to_ip_hw_id(kobj);
1006
1007 if (!list_empty(&ip_hw_id->hw_id_kset.list))
1008 DRM_ERROR("ip_hw_id->hw_id_kset is not empty");
1009 kfree(ip_hw_id);
1010 }
1011
1012 static const struct kobj_type ip_hw_id_ktype = {
1013 .release = ip_hw_id_release,
1014 .sysfs_ops = &kobj_sysfs_ops,
1015 };
1016
1017 /* -------------------------------------------------- */
1018
1019 static void die_kobj_release(struct kobject *kobj);
1020 static void ip_disc_release(struct kobject *kobj);
1021
1022 struct ip_die_entry_attribute {
1023 struct attribute attr;
1024 ssize_t (*show)(struct ip_die_entry *ip_die_entry, char *buf);
1025 };
1026
1027 #define to_ip_die_entry_attr(x) container_of(x, struct ip_die_entry_attribute, attr)
1028
num_ips_show(struct ip_die_entry * ip_die_entry,char * buf)1029 static ssize_t num_ips_show(struct ip_die_entry *ip_die_entry, char *buf)
1030 {
1031 return sysfs_emit(buf, "%d\n", ip_die_entry->num_ips);
1032 }
1033
1034 /* If there are more ip_die_entry attrs, other than the number of IPs,
1035 * we can make this intro an array of attrs, and then initialize
1036 * ip_die_entry_attrs in a loop.
1037 */
1038 static struct ip_die_entry_attribute num_ips_attr =
1039 __ATTR_RO(num_ips);
1040
1041 static struct attribute *ip_die_entry_attrs[] = {
1042 &num_ips_attr.attr,
1043 NULL,
1044 };
1045 ATTRIBUTE_GROUPS(ip_die_entry); /* ip_die_entry_groups */
1046
1047 #define to_ip_die_entry(x) container_of(to_kset(x), struct ip_die_entry, ip_kset)
1048
ip_die_entry_attr_show(struct kobject * kobj,struct attribute * attr,char * buf)1049 static ssize_t ip_die_entry_attr_show(struct kobject *kobj,
1050 struct attribute *attr,
1051 char *buf)
1052 {
1053 struct ip_die_entry_attribute *ip_die_entry_attr = to_ip_die_entry_attr(attr);
1054 struct ip_die_entry *ip_die_entry = to_ip_die_entry(kobj);
1055
1056 if (!ip_die_entry_attr->show)
1057 return -EIO;
1058
1059 return ip_die_entry_attr->show(ip_die_entry, buf);
1060 }
1061
ip_die_entry_release(struct kobject * kobj)1062 static void ip_die_entry_release(struct kobject *kobj)
1063 {
1064 struct ip_die_entry *ip_die_entry = to_ip_die_entry(kobj);
1065
1066 if (!list_empty(&ip_die_entry->ip_kset.list))
1067 DRM_ERROR("ip_die_entry->ip_kset is not empty");
1068 kfree(ip_die_entry);
1069 }
1070
1071 static const struct sysfs_ops ip_die_entry_sysfs_ops = {
1072 .show = ip_die_entry_attr_show,
1073 };
1074
1075 static const struct kobj_type ip_die_entry_ktype = {
1076 .release = ip_die_entry_release,
1077 .sysfs_ops = &ip_die_entry_sysfs_ops,
1078 .default_groups = ip_die_entry_groups,
1079 };
1080
1081 static const struct kobj_type die_kobj_ktype = {
1082 .release = die_kobj_release,
1083 .sysfs_ops = &kobj_sysfs_ops,
1084 };
1085
1086 static const struct kobj_type ip_discovery_ktype = {
1087 .release = ip_disc_release,
1088 .sysfs_ops = &kobj_sysfs_ops,
1089 };
1090
1091 struct ip_discovery_top {
1092 struct kobject kobj; /* ip_discovery/ */
1093 struct kset die_kset; /* ip_discovery/die/, contains ip_die_entry */
1094 struct amdgpu_device *adev;
1095 };
1096
die_kobj_release(struct kobject * kobj)1097 static void die_kobj_release(struct kobject *kobj)
1098 {
1099 struct ip_discovery_top *ip_top = container_of(to_kset(kobj),
1100 struct ip_discovery_top,
1101 die_kset);
1102 if (!list_empty(&ip_top->die_kset.list))
1103 DRM_ERROR("ip_top->die_kset is not empty");
1104 }
1105
ip_disc_release(struct kobject * kobj)1106 static void ip_disc_release(struct kobject *kobj)
1107 {
1108 struct ip_discovery_top *ip_top = container_of(kobj, struct ip_discovery_top,
1109 kobj);
1110 struct amdgpu_device *adev = ip_top->adev;
1111
1112 kfree(ip_top);
1113 adev->discovery.ip_top = NULL;
1114 }
1115
amdgpu_discovery_get_harvest_info(struct amdgpu_device * adev,uint16_t hw_id,uint8_t inst)1116 static uint8_t amdgpu_discovery_get_harvest_info(struct amdgpu_device *adev,
1117 uint16_t hw_id, uint8_t inst)
1118 {
1119 uint8_t harvest = 0;
1120
1121 /* Until a uniform way is figured, get mask based on hwid */
1122 switch (hw_id) {
1123 case VCN_HWID:
1124 /* VCN vs UVD+VCE */
1125 if (!amdgpu_ip_version(adev, VCE_HWIP, 0))
1126 harvest = ((1 << inst) & adev->vcn.inst_mask) == 0;
1127 break;
1128 case DMU_HWID:
1129 if (adev->harvest_ip_mask & AMD_HARVEST_IP_DMU_MASK)
1130 harvest = 0x1;
1131 break;
1132 case UMC_HWID:
1133 /* TODO: It needs another parsing; for now, ignore.*/
1134 break;
1135 case GC_HWID:
1136 harvest = ((1 << inst) & adev->gfx.xcc_mask) == 0;
1137 break;
1138 case SDMA0_HWID:
1139 harvest = ((1 << inst) & adev->sdma.sdma_mask) == 0;
1140 break;
1141 default:
1142 break;
1143 }
1144
1145 return harvest;
1146 }
1147
amdgpu_discovery_sysfs_ips(struct amdgpu_device * adev,struct ip_die_entry * ip_die_entry,const size_t _ip_offset,const int num_ips,bool reg_base_64)1148 static int amdgpu_discovery_sysfs_ips(struct amdgpu_device *adev,
1149 struct ip_die_entry *ip_die_entry,
1150 const size_t _ip_offset, const int num_ips,
1151 bool reg_base_64)
1152 {
1153 uint8_t *discovery_bin = adev->discovery.bin;
1154 int ii, jj, kk, res;
1155 uint16_t hw_id;
1156 uint8_t inst;
1157
1158 DRM_DEBUG("num_ips:%d", num_ips);
1159
1160 /* Find all IPs of a given HW ID, and add their instance to
1161 * #die/#hw_id/#instance/<attributes>
1162 */
1163 for (ii = 0; ii < HW_ID_MAX; ii++) {
1164 struct ip_hw_id *ip_hw_id = NULL;
1165 size_t ip_offset = _ip_offset;
1166
1167 for (jj = 0; jj < num_ips; jj++) {
1168 struct ip_v4 *ip;
1169 struct ip_hw_instance *ip_hw_instance;
1170
1171 ip = (struct ip_v4 *)(discovery_bin + ip_offset);
1172 inst = ip->instance_number;
1173 hw_id = le16_to_cpu(ip->hw_id);
1174 if (amdgpu_discovery_validate_ip(adev, inst, hw_id) ||
1175 hw_id != ii)
1176 goto next_ip;
1177
1178 DRM_DEBUG("match:%d @ ip_offset:%zu", ii, ip_offset);
1179
1180 /* We have a hw_id match; register the hw
1181 * block if not yet registered.
1182 */
1183 if (!ip_hw_id) {
1184 ip_hw_id = kzalloc_obj(*ip_hw_id);
1185 if (!ip_hw_id)
1186 return -ENOMEM;
1187 ip_hw_id->hw_id = ii;
1188
1189 kobject_set_name(&ip_hw_id->hw_id_kset.kobj, "%d", ii);
1190 ip_hw_id->hw_id_kset.kobj.kset = &ip_die_entry->ip_kset;
1191 ip_hw_id->hw_id_kset.kobj.ktype = &ip_hw_id_ktype;
1192 res = kset_register(&ip_hw_id->hw_id_kset);
1193 if (res) {
1194 DRM_ERROR("Couldn't register ip_hw_id kset");
1195 kfree(ip_hw_id);
1196 return res;
1197 }
1198 if (hw_id_names[ii]) {
1199 res = sysfs_create_link(&ip_die_entry->ip_kset.kobj,
1200 &ip_hw_id->hw_id_kset.kobj,
1201 hw_id_names[ii]);
1202 if (res) {
1203 DRM_ERROR("Couldn't create IP link %s in IP Die:%s\n",
1204 hw_id_names[ii],
1205 kobject_name(&ip_die_entry->ip_kset.kobj));
1206 }
1207 }
1208 }
1209
1210 /* Now register its instance.
1211 */
1212 ip_hw_instance = kzalloc_flex(*ip_hw_instance,
1213 base_addr,
1214 ip->num_base_address);
1215 if (!ip_hw_instance) {
1216 DRM_ERROR("no memory for ip_hw_instance");
1217 return -ENOMEM;
1218 }
1219 ip_hw_instance->hw_id = le16_to_cpu(ip->hw_id); /* == ii */
1220 ip_hw_instance->num_instance = ip->instance_number;
1221 ip_hw_instance->major = ip->major;
1222 ip_hw_instance->minor = ip->minor;
1223 ip_hw_instance->revision = ip->revision;
1224 ip_hw_instance->harvest =
1225 amdgpu_discovery_get_harvest_info(
1226 adev, ip_hw_instance->hw_id,
1227 ip_hw_instance->num_instance);
1228 ip_hw_instance->num_base_addresses = ip->num_base_address;
1229
1230 for (kk = 0; kk < ip_hw_instance->num_base_addresses; kk++)
1231 ip_hw_instance->base_addr[kk] = ip->base_address[kk];
1232
1233 kobject_init(&ip_hw_instance->kobj, &ip_hw_instance_ktype);
1234 ip_hw_instance->kobj.kset = &ip_hw_id->hw_id_kset;
1235 res = kobject_add(&ip_hw_instance->kobj, NULL,
1236 "%d", ip_hw_instance->num_instance);
1237 next_ip:
1238 if (reg_base_64)
1239 ip_offset += struct_size(ip, base_address_64,
1240 ip->num_base_address);
1241 else
1242 ip_offset += struct_size(ip, base_address,
1243 ip->num_base_address);
1244 }
1245 }
1246
1247 return 0;
1248 }
1249
amdgpu_discovery_sysfs_recurse(struct amdgpu_device * adev)1250 static int amdgpu_discovery_sysfs_recurse(struct amdgpu_device *adev)
1251 {
1252 struct ip_discovery_top *ip_top = adev->discovery.ip_top;
1253 uint8_t *discovery_bin = adev->discovery.bin;
1254 struct table_info *info;
1255 struct ip_discovery_header *ihdr;
1256 struct die_header *dhdr;
1257 struct kset *die_kset = &ip_top->die_kset;
1258 u16 num_dies, die_offset, num_ips;
1259 size_t ip_offset;
1260 int ii, res;
1261
1262 res = amdgpu_discovery_get_table_info(adev, &info, IP_DISCOVERY);
1263 if (res)
1264 return res;
1265 ihdr = (struct ip_discovery_header
1266 *)(discovery_bin +
1267 le16_to_cpu(info->offset));
1268 num_dies = le16_to_cpu(ihdr->num_dies);
1269
1270 DRM_DEBUG("number of dies: %d\n", num_dies);
1271
1272 for (ii = 0; ii < num_dies; ii++) {
1273 struct ip_die_entry *ip_die_entry;
1274
1275 die_offset = le16_to_cpu(ihdr->die_info[ii].die_offset);
1276 dhdr = (struct die_header *)(discovery_bin + die_offset);
1277 num_ips = le16_to_cpu(dhdr->num_ips);
1278 ip_offset = die_offset + sizeof(*dhdr);
1279
1280 /* Add the die to the kset.
1281 *
1282 * dhdr->die_id == ii, which was checked in
1283 * amdgpu_discovery_reg_base_init().
1284 */
1285
1286 ip_die_entry = kzalloc_obj(*ip_die_entry);
1287 if (!ip_die_entry)
1288 return -ENOMEM;
1289
1290 ip_die_entry->num_ips = num_ips;
1291
1292 kobject_set_name(&ip_die_entry->ip_kset.kobj, "%d", le16_to_cpu(dhdr->die_id));
1293 ip_die_entry->ip_kset.kobj.kset = die_kset;
1294 ip_die_entry->ip_kset.kobj.ktype = &ip_die_entry_ktype;
1295 res = kset_register(&ip_die_entry->ip_kset);
1296 if (res) {
1297 DRM_ERROR("Couldn't register ip_die_entry kset");
1298 kfree(ip_die_entry);
1299 return res;
1300 }
1301
1302 amdgpu_discovery_sysfs_ips(adev, ip_die_entry, ip_offset, num_ips, !!ihdr->base_addr_64_bit);
1303 }
1304
1305 return 0;
1306 }
1307
amdgpu_discovery_sysfs_init(struct amdgpu_device * adev)1308 static int amdgpu_discovery_sysfs_init(struct amdgpu_device *adev)
1309 {
1310 uint8_t *discovery_bin = adev->discovery.bin;
1311 struct ip_discovery_top *ip_top;
1312 struct kset *die_kset;
1313 int res, ii;
1314
1315 if (!discovery_bin)
1316 return -EINVAL;
1317
1318 ip_top = kzalloc_obj(*ip_top);
1319 if (!ip_top)
1320 return -ENOMEM;
1321
1322 ip_top->adev = adev;
1323 adev->discovery.ip_top = ip_top;
1324 res = kobject_init_and_add(&ip_top->kobj, &ip_discovery_ktype,
1325 &adev->dev->kobj, "ip_discovery");
1326 if (res) {
1327 DRM_ERROR("Couldn't init and add ip_discovery/");
1328 goto Err;
1329 }
1330
1331 die_kset = &ip_top->die_kset;
1332 kobject_set_name(&die_kset->kobj, "%s", "die");
1333 die_kset->kobj.parent = &ip_top->kobj;
1334 die_kset->kobj.ktype = &die_kobj_ktype;
1335 res = kset_register(&ip_top->die_kset);
1336 if (res) {
1337 DRM_ERROR("Couldn't register die_kset");
1338 goto Err;
1339 }
1340
1341 for (ii = 0; ii < ARRAY_SIZE(ip_hw_attr); ii++)
1342 ip_hw_instance_attrs[ii] = &ip_hw_attr[ii].attr;
1343 ip_hw_instance_attrs[ii] = NULL;
1344
1345 res = amdgpu_discovery_sysfs_recurse(adev);
1346
1347 return res;
1348 Err:
1349 kobject_put(&ip_top->kobj);
1350 return res;
1351 }
1352
1353 /* -------------------------------------------------- */
1354
1355 #define list_to_kobj(el) container_of(el, struct kobject, entry)
1356
amdgpu_discovery_sysfs_ip_hw_free(struct ip_hw_id * ip_hw_id)1357 static void amdgpu_discovery_sysfs_ip_hw_free(struct ip_hw_id *ip_hw_id)
1358 {
1359 struct list_head *el, *tmp;
1360 struct kset *hw_id_kset;
1361
1362 hw_id_kset = &ip_hw_id->hw_id_kset;
1363 spin_lock(&hw_id_kset->list_lock);
1364 list_for_each_prev_safe(el, tmp, &hw_id_kset->list) {
1365 list_del_init(el);
1366 spin_unlock(&hw_id_kset->list_lock);
1367 /* kobject is embedded in ip_hw_instance */
1368 kobject_put(list_to_kobj(el));
1369 spin_lock(&hw_id_kset->list_lock);
1370 }
1371 spin_unlock(&hw_id_kset->list_lock);
1372 kobject_put(&ip_hw_id->hw_id_kset.kobj);
1373 }
1374
amdgpu_discovery_sysfs_die_free(struct ip_die_entry * ip_die_entry)1375 static void amdgpu_discovery_sysfs_die_free(struct ip_die_entry *ip_die_entry)
1376 {
1377 struct list_head *el, *tmp;
1378 struct kset *ip_kset;
1379
1380 ip_kset = &ip_die_entry->ip_kset;
1381 spin_lock(&ip_kset->list_lock);
1382 list_for_each_prev_safe(el, tmp, &ip_kset->list) {
1383 list_del_init(el);
1384 spin_unlock(&ip_kset->list_lock);
1385 amdgpu_discovery_sysfs_ip_hw_free(to_ip_hw_id(list_to_kobj(el)));
1386 spin_lock(&ip_kset->list_lock);
1387 }
1388 spin_unlock(&ip_kset->list_lock);
1389 kobject_put(&ip_die_entry->ip_kset.kobj);
1390 }
1391
amdgpu_discovery_sysfs_fini(struct amdgpu_device * adev)1392 static void amdgpu_discovery_sysfs_fini(struct amdgpu_device *adev)
1393 {
1394 struct ip_discovery_top *ip_top = adev->discovery.ip_top;
1395 struct list_head *el, *tmp;
1396 struct kset *die_kset;
1397
1398 if (!ip_top)
1399 return;
1400
1401 die_kset = &ip_top->die_kset;
1402 spin_lock(&die_kset->list_lock);
1403 list_for_each_prev_safe(el, tmp, &die_kset->list) {
1404 list_del_init(el);
1405 spin_unlock(&die_kset->list_lock);
1406 amdgpu_discovery_sysfs_die_free(to_ip_die_entry(list_to_kobj(el)));
1407 spin_lock(&die_kset->list_lock);
1408 }
1409 spin_unlock(&die_kset->list_lock);
1410 kobject_put(&ip_top->die_kset.kobj);
1411 kobject_put(&ip_top->kobj);
1412 }
1413
1414 /* devcoredump support */
amdgpu_discovery_dump(struct amdgpu_device * adev,struct drm_printer * p)1415 void amdgpu_discovery_dump(struct amdgpu_device *adev, struct drm_printer *p)
1416 {
1417 struct ip_discovery_top *ip_top = adev->discovery.ip_top;
1418 struct ip_die_entry *ip_die_entry;
1419 struct list_head *el_die, *el_hw_id, *el_hw_inst;
1420 struct ip_hw_id *hw_id;
1421 struct kset *die_kset;
1422 struct ip_hw_instance *ip_inst;
1423 int i = 0, j;
1424
1425 if (!ip_top)
1426 return;
1427
1428 die_kset = &ip_top->die_kset;
1429
1430 drm_printf(p, "\nHW IP Discovery\n");
1431
1432 spin_lock(&die_kset->list_lock);
1433 list_for_each(el_die, &die_kset->list) {
1434 drm_printf(p, "die %d\n", i++);
1435 ip_die_entry = to_ip_die_entry(list_to_kobj(el_die));
1436
1437 list_for_each(el_hw_id, &ip_die_entry->ip_kset.list) {
1438 hw_id = to_ip_hw_id(list_to_kobj(el_hw_id));
1439 drm_printf(p, "hw_id %d %s\n", hw_id->hw_id, hw_id_names[hw_id->hw_id]);
1440
1441 list_for_each(el_hw_inst, &hw_id->hw_id_kset.list) {
1442 ip_inst = to_ip_hw_instance(list_to_kobj(el_hw_inst));
1443 drm_printf(p, "\tinstance %d\n", ip_inst->num_instance);
1444 drm_printf(p, "\tmajor %d\n", ip_inst->major);
1445 drm_printf(p, "\tminor %d\n", ip_inst->minor);
1446 drm_printf(p, "\trevision %d\n", ip_inst->revision);
1447 drm_printf(p, "\tharvest 0x%01X\n", ip_inst->harvest);
1448 drm_printf(p, "\tnum_base_addresses %d\n",
1449 ip_inst->num_base_addresses);
1450 for (j = 0; j < ip_inst->num_base_addresses; j++)
1451 drm_printf(p, "\tbase_addr[%d] 0x%08X\n",
1452 j, ip_inst->base_addr[j]);
1453 }
1454 }
1455 }
1456 spin_unlock(&die_kset->list_lock);
1457 }
1458
1459
1460 /* ================================================== */
1461
amdgpu_discovery_reg_base_init(struct amdgpu_device * adev)1462 static int amdgpu_discovery_reg_base_init(struct amdgpu_device *adev)
1463 {
1464 uint8_t num_base_address, subrev, variant;
1465 struct table_info *info;
1466 struct ip_discovery_header *ihdr;
1467 struct die_header *dhdr;
1468 uint8_t *discovery_bin;
1469 struct ip_v4 *ip;
1470 uint16_t die_offset;
1471 uint16_t ip_offset;
1472 uint16_t num_dies;
1473 uint32_t wafl_ver;
1474 uint16_t num_ips;
1475 uint16_t hw_id;
1476 uint8_t inst;
1477 int hw_ip;
1478 int i, j, k;
1479 int r;
1480
1481 r = amdgpu_discovery_init(adev);
1482 if (r)
1483 return r;
1484 discovery_bin = adev->discovery.bin;
1485 wafl_ver = 0;
1486 adev->gfx.xcc_mask = 0;
1487 adev->sdma.sdma_mask = 0;
1488 adev->vcn.inst_mask = 0;
1489 adev->jpeg.inst_mask = 0;
1490 r = amdgpu_discovery_get_table_info(adev, &info, IP_DISCOVERY);
1491 if (r)
1492 return r;
1493 ihdr = (struct ip_discovery_header
1494 *)(discovery_bin +
1495 le16_to_cpu(info->offset));
1496 num_dies = le16_to_cpu(ihdr->num_dies);
1497
1498 DRM_DEBUG("number of dies: %d\n", num_dies);
1499
1500 for (i = 0; i < num_dies; i++) {
1501 die_offset = le16_to_cpu(ihdr->die_info[i].die_offset);
1502 dhdr = (struct die_header *)(discovery_bin + die_offset);
1503 num_ips = le16_to_cpu(dhdr->num_ips);
1504 ip_offset = die_offset + sizeof(*dhdr);
1505
1506 if (le16_to_cpu(dhdr->die_id) != i) {
1507 DRM_ERROR("invalid die id %d, expected %d\n",
1508 le16_to_cpu(dhdr->die_id), i);
1509 return -EINVAL;
1510 }
1511
1512 DRM_DEBUG("number of hardware IPs on die%d: %d\n",
1513 le16_to_cpu(dhdr->die_id), num_ips);
1514
1515 for (j = 0; j < num_ips; j++) {
1516 ip = (struct ip_v4 *)(discovery_bin + ip_offset);
1517
1518 inst = ip->instance_number;
1519 hw_id = le16_to_cpu(ip->hw_id);
1520 if (amdgpu_discovery_validate_ip(adev, inst, hw_id))
1521 goto next_ip;
1522
1523 num_base_address = ip->num_base_address;
1524
1525 DRM_DEBUG("%s(%d) #%d v%d.%d.%d:\n",
1526 hw_id_names[le16_to_cpu(ip->hw_id)],
1527 le16_to_cpu(ip->hw_id),
1528 ip->instance_number,
1529 ip->major, ip->minor,
1530 ip->revision);
1531
1532 if (le16_to_cpu(ip->hw_id) == VCN_HWID) {
1533 /* Bit [5:0]: original revision value
1534 * Bit [7:6]: en/decode capability:
1535 * 0b00 : VCN function normally
1536 * 0b10 : encode is disabled
1537 * 0b01 : decode is disabled
1538 */
1539 if (adev->vcn.num_vcn_inst <
1540 AMDGPU_MAX_VCN_INSTANCES) {
1541 adev->vcn.inst[adev->vcn.num_vcn_inst].vcn_config =
1542 ip->revision & 0xc0;
1543 adev->vcn.num_vcn_inst++;
1544 adev->vcn.inst_mask |=
1545 (1U << ip->instance_number);
1546 adev->jpeg.inst_mask |=
1547 (1U << ip->instance_number);
1548 } else {
1549 dev_err(adev->dev, "Too many VCN instances: %d vs %d\n",
1550 adev->vcn.num_vcn_inst + 1,
1551 AMDGPU_MAX_VCN_INSTANCES);
1552 }
1553 ip->revision &= ~0xc0;
1554 }
1555 if (le16_to_cpu(ip->hw_id) == SDMA0_HWID ||
1556 le16_to_cpu(ip->hw_id) == SDMA1_HWID ||
1557 le16_to_cpu(ip->hw_id) == SDMA2_HWID ||
1558 le16_to_cpu(ip->hw_id) == SDMA3_HWID) {
1559 if (adev->sdma.num_instances <
1560 AMDGPU_MAX_SDMA_INSTANCES) {
1561 adev->sdma.num_instances++;
1562 adev->sdma.sdma_mask |=
1563 (1U << ip->instance_number);
1564 } else {
1565 dev_err(adev->dev, "Too many SDMA instances: %d vs %d\n",
1566 adev->sdma.num_instances + 1,
1567 AMDGPU_MAX_SDMA_INSTANCES);
1568 }
1569 }
1570
1571 if (le16_to_cpu(ip->hw_id) == VPE_HWID) {
1572 if (adev->vpe.num_instances < AMDGPU_MAX_VPE_INSTANCES)
1573 adev->vpe.num_instances++;
1574 else
1575 dev_err(adev->dev, "Too many VPE instances: %d vs %d\n",
1576 adev->vpe.num_instances + 1,
1577 AMDGPU_MAX_VPE_INSTANCES);
1578 }
1579
1580 if (le16_to_cpu(ip->hw_id) == UMC_HWID) {
1581 adev->gmc.num_umc++;
1582 adev->umc.node_inst_num++;
1583 }
1584
1585 if (le16_to_cpu(ip->hw_id) == GC_HWID)
1586 adev->gfx.xcc_mask |=
1587 (1U << ip->instance_number);
1588
1589 if (!wafl_ver && le16_to_cpu(ip->hw_id) == WAFLC_HWID)
1590 wafl_ver = IP_VERSION_FULL(ip->major, ip->minor,
1591 ip->revision, 0, 0);
1592
1593 for (k = 0; k < num_base_address; k++) {
1594 /*
1595 * convert the endianness of base addresses in place,
1596 * so that we don't need to convert them when accessing adev->reg_offset.
1597 */
1598 if (ihdr->base_addr_64_bit)
1599 /* Truncate the 64bit base address from ip discovery
1600 * and only store lower 32bit ip base in reg_offset[].
1601 * Bits > 32 follows ASIC specific format, thus just
1602 * discard them and handle it within specific ASIC.
1603 * By this way reg_offset[] and related helpers can
1604 * stay unchanged.
1605 * The base address is in dwords, thus clear the
1606 * highest 2 bits to store.
1607 */
1608 ip->base_address[k] =
1609 lower_32_bits(le64_to_cpu(ip->base_address_64[k])) & 0x3FFFFFFF;
1610 else
1611 ip->base_address[k] = le32_to_cpu(ip->base_address[k]);
1612 DRM_DEBUG("\t0x%08x\n", ip->base_address[k]);
1613 }
1614
1615 for (hw_ip = 0; hw_ip < MAX_HWIP; hw_ip++) {
1616 if (hw_id_map[hw_ip] == le16_to_cpu(ip->hw_id) &&
1617 hw_id_map[hw_ip] != 0) {
1618 DRM_DEBUG("set register base offset for %s\n",
1619 hw_id_names[le16_to_cpu(ip->hw_id)]);
1620 adev->reg_offset[hw_ip][ip->instance_number] =
1621 ip->base_address;
1622 /* Instance support is somewhat inconsistent.
1623 * SDMA is a good example. Sienna cichlid has 4 total
1624 * SDMA instances, each enumerated separately (HWIDs
1625 * 42, 43, 68, 69). Arcturus has 8 total SDMA instances,
1626 * but they are enumerated as multiple instances of the
1627 * same HWIDs (4x HWID 42, 4x HWID 43). UMC is another
1628 * example. On most chips there are multiple instances
1629 * with the same HWID.
1630 */
1631
1632 if (ihdr->version < 3) {
1633 subrev = 0;
1634 variant = 0;
1635 } else {
1636 subrev = ip->sub_revision;
1637 variant = ip->variant;
1638 }
1639
1640 adev->ip_versions[hw_ip]
1641 [ip->instance_number] =
1642 IP_VERSION_FULL(ip->major,
1643 ip->minor,
1644 ip->revision,
1645 variant,
1646 subrev);
1647 }
1648 }
1649
1650 next_ip:
1651 if (ihdr->base_addr_64_bit)
1652 ip_offset += struct_size(ip, base_address_64, ip->num_base_address);
1653 else
1654 ip_offset += struct_size(ip, base_address, ip->num_base_address);
1655 }
1656 }
1657
1658 if (wafl_ver && !adev->ip_versions[XGMI_HWIP][0])
1659 adev->ip_versions[XGMI_HWIP][0] = wafl_ver;
1660
1661 return 0;
1662 }
1663
amdgpu_discovery_harvest_ip(struct amdgpu_device * adev)1664 static void amdgpu_discovery_harvest_ip(struct amdgpu_device *adev)
1665 {
1666 uint8_t *discovery_bin = adev->discovery.bin;
1667 struct ip_discovery_header *ihdr;
1668 struct table_info *info;
1669 int vcn_harvest_count = 0;
1670 int umc_harvest_count = 0;
1671 uint16_t ihdr_ver;
1672
1673 if (amdgpu_discovery_get_table_info(adev, &info, IP_DISCOVERY))
1674 return;
1675 ihdr = (struct ip_discovery_header *)(discovery_bin +
1676 le16_to_cpu(info->offset));
1677 ihdr_ver = le16_to_cpu(ihdr->version);
1678 /*
1679 * Harvest table does not fit Navi1x and legacy GPUs,
1680 * so read harvest bit per IP data structure to set
1681 * harvest configuration.
1682 */
1683 if (amdgpu_ip_version(adev, GC_HWIP, 0) < IP_VERSION(10, 2, 0) &&
1684 ihdr_ver <= 2) {
1685 if ((adev->pdev->device == 0x731E &&
1686 (adev->pdev->revision == 0xC6 ||
1687 adev->pdev->revision == 0xC7)) ||
1688 (adev->pdev->device == 0x7340 &&
1689 adev->pdev->revision == 0xC9) ||
1690 (adev->pdev->device == 0x7360 &&
1691 adev->pdev->revision == 0xC7))
1692 amdgpu_discovery_read_harvest_bit_per_ip(adev,
1693 &vcn_harvest_count);
1694 } else {
1695 amdgpu_discovery_read_from_harvest_table(adev,
1696 &vcn_harvest_count,
1697 &umc_harvest_count);
1698 }
1699
1700 amdgpu_discovery_harvest_config_quirk(adev);
1701
1702 if (vcn_harvest_count == adev->vcn.num_vcn_inst) {
1703 adev->harvest_ip_mask |= AMD_HARVEST_IP_VCN_MASK;
1704 adev->harvest_ip_mask |= AMD_HARVEST_IP_JPEG_MASK;
1705 }
1706
1707 if (umc_harvest_count < adev->gmc.num_umc) {
1708 adev->gmc.num_umc -= umc_harvest_count;
1709 }
1710 }
1711
1712 union gc_info {
1713 struct gc_info_v1_0 v1;
1714 struct gc_info_v1_1 v1_1;
1715 struct gc_info_v1_2 v1_2;
1716 struct gc_info_v1_3 v1_3;
1717 struct gc_info_v2_0 v2;
1718 struct gc_info_v2_1 v2_1;
1719 };
1720
amdgpu_discovery_get_gfx_info(struct amdgpu_device * adev)1721 static int amdgpu_discovery_get_gfx_info(struct amdgpu_device *adev)
1722 {
1723 uint8_t *discovery_bin = adev->discovery.bin;
1724 struct table_info *info;
1725 union gc_info *gc_info;
1726 u16 offset;
1727
1728 if (!discovery_bin) {
1729 DRM_ERROR("ip discovery uninitialized\n");
1730 return -EINVAL;
1731 }
1732
1733 if (amdgpu_discovery_get_table_info(adev, &info, GC))
1734 return -EINVAL;
1735 offset = le16_to_cpu(info->offset);
1736
1737 if (!offset)
1738 return 0;
1739
1740 gc_info = (union gc_info *)(discovery_bin + offset);
1741
1742 switch (le16_to_cpu(gc_info->v1.header.version_major)) {
1743 case 1:
1744 adev->gfx.config.max_shader_engines = le32_to_cpu(gc_info->v1.gc_num_se);
1745 adev->gfx.config.max_cu_per_sh = 2 * (le32_to_cpu(gc_info->v1.gc_num_wgp0_per_sa) +
1746 le32_to_cpu(gc_info->v1.gc_num_wgp1_per_sa));
1747 adev->gfx.config.max_sh_per_se = le32_to_cpu(gc_info->v1.gc_num_sa_per_se);
1748 adev->gfx.config.max_backends_per_se = le32_to_cpu(gc_info->v1.gc_num_rb_per_se);
1749 adev->gfx.config.max_texture_channel_caches = le32_to_cpu(gc_info->v1.gc_num_gl2c);
1750 adev->gfx.config.max_gprs = le32_to_cpu(gc_info->v1.gc_num_gprs);
1751 adev->gfx.config.max_gs_threads = le32_to_cpu(gc_info->v1.gc_num_max_gs_thds);
1752 adev->gfx.config.gs_vgt_table_depth = le32_to_cpu(gc_info->v1.gc_gs_table_depth);
1753 adev->gfx.config.gs_prim_buffer_depth = le32_to_cpu(gc_info->v1.gc_gsprim_buff_depth);
1754 adev->gfx.config.double_offchip_lds_buf = le32_to_cpu(gc_info->v1.gc_double_offchip_lds_buffer);
1755 adev->gfx.cu_info.wave_front_size = le32_to_cpu(gc_info->v1.gc_wave_size);
1756 adev->gfx.cu_info.max_waves_per_simd = le32_to_cpu(gc_info->v1.gc_max_waves_per_simd);
1757 adev->gfx.cu_info.max_scratch_slots_per_cu = le32_to_cpu(gc_info->v1.gc_max_scratch_slots_per_cu);
1758 adev->gfx.cu_info.lds_size = le32_to_cpu(gc_info->v1.gc_lds_size);
1759 adev->gfx.config.num_sc_per_sh = le32_to_cpu(gc_info->v1.gc_num_sc_per_se) /
1760 le32_to_cpu(gc_info->v1.gc_num_sa_per_se);
1761 adev->gfx.config.num_packer_per_sc = le32_to_cpu(gc_info->v1.gc_num_packer_per_sc);
1762 if (le16_to_cpu(gc_info->v1.header.version_minor) >= 1) {
1763 adev->gfx.config.gc_num_tcp_per_sa = le32_to_cpu(gc_info->v1_1.gc_num_tcp_per_sa);
1764 adev->gfx.config.gc_num_sdp_interface = le32_to_cpu(gc_info->v1_1.gc_num_sdp_interface);
1765 adev->gfx.config.gc_num_tcps = le32_to_cpu(gc_info->v1_1.gc_num_tcps);
1766 }
1767 if (le16_to_cpu(gc_info->v1.header.version_minor) >= 2) {
1768 adev->gfx.config.gc_num_tcp_per_wpg = le32_to_cpu(gc_info->v1_2.gc_num_tcp_per_wpg);
1769 adev->gfx.config.gc_tcp_l1_size = le32_to_cpu(gc_info->v1_2.gc_tcp_l1_size);
1770 adev->gfx.config.gc_num_sqc_per_wgp = le32_to_cpu(gc_info->v1_2.gc_num_sqc_per_wgp);
1771 adev->gfx.config.gc_l1_instruction_cache_size_per_sqc = le32_to_cpu(gc_info->v1_2.gc_l1_instruction_cache_size_per_sqc);
1772 adev->gfx.config.gc_l1_data_cache_size_per_sqc = le32_to_cpu(gc_info->v1_2.gc_l1_data_cache_size_per_sqc);
1773 adev->gfx.config.gc_gl1c_per_sa = le32_to_cpu(gc_info->v1_2.gc_gl1c_per_sa);
1774 adev->gfx.config.gc_gl1c_size_per_instance = le32_to_cpu(gc_info->v1_2.gc_gl1c_size_per_instance);
1775 adev->gfx.config.gc_gl2c_per_gpu = le32_to_cpu(gc_info->v1_2.gc_gl2c_per_gpu);
1776 }
1777 if (le16_to_cpu(gc_info->v1.header.version_minor) >= 3) {
1778 adev->gfx.config.gc_tcp_size_per_cu = le32_to_cpu(gc_info->v1_3.gc_tcp_size_per_cu);
1779 adev->gfx.config.gc_tcp_cache_line_size = le32_to_cpu(gc_info->v1_3.gc_tcp_cache_line_size);
1780 adev->gfx.config.gc_instruction_cache_size_per_sqc = le32_to_cpu(gc_info->v1_3.gc_instruction_cache_size_per_sqc);
1781 adev->gfx.config.gc_instruction_cache_line_size = le32_to_cpu(gc_info->v1_3.gc_instruction_cache_line_size);
1782 adev->gfx.config.gc_scalar_data_cache_size_per_sqc = le32_to_cpu(gc_info->v1_3.gc_scalar_data_cache_size_per_sqc);
1783 adev->gfx.config.gc_scalar_data_cache_line_size = le32_to_cpu(gc_info->v1_3.gc_scalar_data_cache_line_size);
1784 adev->gfx.config.gc_tcc_size = le32_to_cpu(gc_info->v1_3.gc_tcc_size);
1785 adev->gfx.config.gc_tcc_cache_line_size = le32_to_cpu(gc_info->v1_3.gc_tcc_cache_line_size);
1786 }
1787 break;
1788 case 2:
1789 adev->gfx.config.max_shader_engines = le32_to_cpu(gc_info->v2.gc_num_se);
1790 adev->gfx.config.max_cu_per_sh = le32_to_cpu(gc_info->v2.gc_num_cu_per_sh);
1791 adev->gfx.config.max_sh_per_se = le32_to_cpu(gc_info->v2.gc_num_sh_per_se);
1792 adev->gfx.config.max_backends_per_se = le32_to_cpu(gc_info->v2.gc_num_rb_per_se);
1793 adev->gfx.config.max_texture_channel_caches = le32_to_cpu(gc_info->v2.gc_num_tccs);
1794 adev->gfx.config.max_gprs = le32_to_cpu(gc_info->v2.gc_num_gprs);
1795 adev->gfx.config.max_gs_threads = le32_to_cpu(gc_info->v2.gc_num_max_gs_thds);
1796 adev->gfx.config.gs_vgt_table_depth = le32_to_cpu(gc_info->v2.gc_gs_table_depth);
1797 adev->gfx.config.gs_prim_buffer_depth = le32_to_cpu(gc_info->v2.gc_gsprim_buff_depth);
1798 adev->gfx.config.double_offchip_lds_buf = le32_to_cpu(gc_info->v2.gc_double_offchip_lds_buffer);
1799 adev->gfx.cu_info.wave_front_size = le32_to_cpu(gc_info->v2.gc_wave_size);
1800 adev->gfx.cu_info.max_waves_per_simd = le32_to_cpu(gc_info->v2.gc_max_waves_per_simd);
1801 adev->gfx.cu_info.max_scratch_slots_per_cu = le32_to_cpu(gc_info->v2.gc_max_scratch_slots_per_cu);
1802 adev->gfx.cu_info.lds_size = le32_to_cpu(gc_info->v2.gc_lds_size);
1803 adev->gfx.config.num_sc_per_sh = le32_to_cpu(gc_info->v2.gc_num_sc_per_se) /
1804 le32_to_cpu(gc_info->v2.gc_num_sh_per_se);
1805 adev->gfx.config.num_packer_per_sc = le32_to_cpu(gc_info->v2.gc_num_packer_per_sc);
1806 if (le16_to_cpu(gc_info->v2.header.version_minor) == 1) {
1807 adev->gfx.config.gc_num_tcp_per_sa = le32_to_cpu(gc_info->v2_1.gc_num_tcp_per_sh);
1808 adev->gfx.config.gc_tcp_size_per_cu = le32_to_cpu(gc_info->v2_1.gc_tcp_size_per_cu);
1809 adev->gfx.config.gc_num_sdp_interface = le32_to_cpu(gc_info->v2_1.gc_num_sdp_interface); /* per XCD */
1810 adev->gfx.config.gc_num_cu_per_sqc = le32_to_cpu(gc_info->v2_1.gc_num_cu_per_sqc);
1811 adev->gfx.config.gc_l1_instruction_cache_size_per_sqc = le32_to_cpu(gc_info->v2_1.gc_instruction_cache_size_per_sqc);
1812 adev->gfx.config.gc_l1_data_cache_size_per_sqc = le32_to_cpu(gc_info->v2_1.gc_scalar_data_cache_size_per_sqc);
1813 adev->gfx.config.gc_tcc_size = le32_to_cpu(gc_info->v2_1.gc_tcc_size); /* per XCD */
1814 }
1815 break;
1816 default:
1817 dev_err(adev->dev,
1818 "Unhandled GC info table %d.%d\n",
1819 le16_to_cpu(gc_info->v1.header.version_major),
1820 le16_to_cpu(gc_info->v1.header.version_minor));
1821 return -EINVAL;
1822 }
1823 return 0;
1824 }
1825
1826 union mall_info {
1827 struct mall_info_v1_0 v1;
1828 struct mall_info_v2_0 v2;
1829 };
1830
amdgpu_discovery_get_mall_info(struct amdgpu_device * adev)1831 static int amdgpu_discovery_get_mall_info(struct amdgpu_device *adev)
1832 {
1833 uint8_t *discovery_bin = adev->discovery.bin;
1834 struct table_info *info;
1835 union mall_info *mall_info;
1836 u32 u, mall_size_per_umc, m_s_present, half_use;
1837 u64 mall_size;
1838 u16 offset;
1839
1840 if (!discovery_bin) {
1841 DRM_ERROR("ip discovery uninitialized\n");
1842 return -EINVAL;
1843 }
1844
1845 if (amdgpu_discovery_get_table_info(adev, &info, MALL_INFO))
1846 return -EINVAL;
1847 offset = le16_to_cpu(info->offset);
1848
1849 if (!offset)
1850 return 0;
1851
1852 mall_info = (union mall_info *)(discovery_bin + offset);
1853
1854 switch (le16_to_cpu(mall_info->v1.header.version_major)) {
1855 case 1:
1856 mall_size = 0;
1857 mall_size_per_umc = le32_to_cpu(mall_info->v1.mall_size_per_m);
1858 m_s_present = le32_to_cpu(mall_info->v1.m_s_present);
1859 half_use = le32_to_cpu(mall_info->v1.m_half_use);
1860 for (u = 0; u < adev->gmc.num_umc; u++) {
1861 if (m_s_present & (1 << u))
1862 mall_size += mall_size_per_umc * 2;
1863 else if (half_use & (1 << u))
1864 mall_size += mall_size_per_umc / 2;
1865 else
1866 mall_size += mall_size_per_umc;
1867 }
1868 adev->gmc.mall_size = mall_size;
1869 adev->gmc.m_half_use = half_use;
1870 break;
1871 case 2:
1872 mall_size_per_umc = le32_to_cpu(mall_info->v2.mall_size_per_umc);
1873 adev->gmc.mall_size = (uint64_t)mall_size_per_umc * adev->gmc.num_umc;
1874 break;
1875 default:
1876 dev_err(adev->dev,
1877 "Unhandled MALL info table %d.%d\n",
1878 le16_to_cpu(mall_info->v1.header.version_major),
1879 le16_to_cpu(mall_info->v1.header.version_minor));
1880 return -EINVAL;
1881 }
1882 return 0;
1883 }
1884
1885 union vcn_info {
1886 struct vcn_info_v1_0 v1;
1887 };
1888
amdgpu_discovery_get_vcn_info(struct amdgpu_device * adev)1889 static int amdgpu_discovery_get_vcn_info(struct amdgpu_device *adev)
1890 {
1891 uint8_t *discovery_bin = adev->discovery.bin;
1892 struct table_info *info;
1893 union vcn_info *vcn_info;
1894 u16 offset;
1895 int v;
1896
1897 if (!discovery_bin) {
1898 DRM_ERROR("ip discovery uninitialized\n");
1899 return -EINVAL;
1900 }
1901
1902 /* num_vcn_inst is currently limited to AMDGPU_MAX_VCN_INSTANCES
1903 * which is smaller than VCN_INFO_TABLE_MAX_NUM_INSTANCES
1904 * but that may change in the future with new GPUs so keep this
1905 * check for defensive purposes.
1906 */
1907 if (adev->vcn.num_vcn_inst > VCN_INFO_TABLE_MAX_NUM_INSTANCES) {
1908 dev_err(adev->dev, "invalid vcn instances\n");
1909 return -EINVAL;
1910 }
1911
1912 if (amdgpu_discovery_get_table_info(adev, &info, VCN_INFO))
1913 return -EINVAL;
1914 offset = le16_to_cpu(info->offset);
1915
1916 if (!offset)
1917 return 0;
1918
1919 vcn_info = (union vcn_info *)(discovery_bin + offset);
1920
1921 switch (le16_to_cpu(vcn_info->v1.header.version_major)) {
1922 case 1:
1923 /* num_vcn_inst is currently limited to AMDGPU_MAX_VCN_INSTANCES
1924 * so this won't overflow.
1925 */
1926 for (v = 0; v < adev->vcn.num_vcn_inst; v++) {
1927 adev->vcn.inst[v].vcn_codec_disable_mask =
1928 le32_to_cpu(vcn_info->v1.instance_info[v].fuse_data.all_bits);
1929 }
1930 break;
1931 default:
1932 dev_err(adev->dev,
1933 "Unhandled VCN info table %d.%d\n",
1934 le16_to_cpu(vcn_info->v1.header.version_major),
1935 le16_to_cpu(vcn_info->v1.header.version_minor));
1936 return -EINVAL;
1937 }
1938 return 0;
1939 }
1940
1941 union nps_info {
1942 struct nps_info_v1_0 v1;
1943 };
1944
amdgpu_discovery_refresh_nps_info(struct amdgpu_device * adev,union nps_info * nps_data)1945 static int amdgpu_discovery_refresh_nps_info(struct amdgpu_device *adev,
1946 union nps_info *nps_data)
1947 {
1948 uint64_t vram_size, pos, offset;
1949 struct nps_info_header *nhdr;
1950 struct binary_header bhdr;
1951 struct binary_header_v2 bhdrv2;
1952 uint16_t checksum;
1953
1954 vram_size = (uint64_t)RREG32(mmRCC_CONFIG_MEMSIZE) << 20;
1955 pos = vram_size - DISCOVERY_TMR_OFFSET;
1956 amdgpu_device_vram_access(adev, pos, &bhdr, sizeof(bhdr), false);
1957
1958 switch (bhdr.version_major) {
1959 case 2:
1960 amdgpu_device_vram_access(adev, pos, &bhdrv2, sizeof(bhdrv2), false);
1961 offset = le16_to_cpu(bhdrv2.table_list[NPS_INFO].offset);
1962 checksum = le16_to_cpu(bhdrv2.table_list[NPS_INFO].checksum);
1963 break;
1964 case 1:
1965 offset = le16_to_cpu(bhdr.table_list[NPS_INFO].offset);
1966 checksum = le16_to_cpu(bhdr.table_list[NPS_INFO].checksum);
1967 break;
1968 default:
1969 return -EINVAL;
1970 }
1971
1972 amdgpu_device_vram_access(adev, (pos + offset), nps_data,
1973 sizeof(*nps_data), false);
1974
1975 nhdr = (struct nps_info_header *)(nps_data);
1976 if (!amdgpu_discovery_verify_checksum(adev, (uint8_t *)nps_data,
1977 le32_to_cpu(nhdr->size_bytes),
1978 checksum)) {
1979 dev_err(adev->dev, "nps data refresh, checksum mismatch\n");
1980 return -EINVAL;
1981 }
1982
1983 return 0;
1984 }
1985
amdgpu_discovery_get_nps_info(struct amdgpu_device * adev,uint32_t * nps_type,struct amdgpu_gmc_memrange * ranges,int * range_cnt,bool refresh)1986 int amdgpu_discovery_get_nps_info(struct amdgpu_device *adev,
1987 uint32_t *nps_type,
1988 struct amdgpu_gmc_memrange *ranges,
1989 int *range_cnt, bool refresh)
1990 {
1991 uint8_t *discovery_bin = adev->discovery.bin;
1992 struct table_info *info;
1993 union nps_info *nps_info;
1994 union nps_info nps_data;
1995 u16 offset;
1996 int i, r;
1997
1998 if (!nps_type || !range_cnt || !ranges)
1999 return -EINVAL;
2000
2001 if (refresh) {
2002 r = amdgpu_discovery_refresh_nps_info(adev, &nps_data);
2003 if (r)
2004 return r;
2005 nps_info = &nps_data;
2006 } else {
2007 if (!discovery_bin) {
2008 dev_err(adev->dev,
2009 "fetch mem range failed, ip discovery uninitialized\n");
2010 return -EINVAL;
2011 }
2012
2013 if (amdgpu_discovery_get_table_info(adev, &info, NPS_INFO))
2014 return -EINVAL;
2015 offset = le16_to_cpu(info->offset);
2016
2017 if (!offset)
2018 return -ENOENT;
2019
2020 /* If verification fails, return as if NPS table doesn't exist */
2021 if (amdgpu_discovery_verify_npsinfo(adev, info))
2022 return -ENOENT;
2023
2024 nps_info = (union nps_info *)(discovery_bin + offset);
2025 }
2026
2027 switch (le16_to_cpu(nps_info->v1.header.version_major)) {
2028 case 1:
2029 *nps_type = nps_info->v1.nps_type;
2030 if (*range_cnt < nps_info->v1.count) {
2031 dev_dbg(adev->dev,
2032 "not enough space for nps ranges: %d < %d\n",
2033 *range_cnt, nps_info->v1.count);
2034 return -ENOSPC;
2035 }
2036 *range_cnt = nps_info->v1.count;
2037 for (i = 0; i < *range_cnt; i++) {
2038 ranges[i].base_address =
2039 nps_info->v1.instance_info[i].base_address;
2040 ranges[i].limit_address =
2041 nps_info->v1.instance_info[i].limit_address;
2042 ranges[i].nid_mask = -1;
2043 ranges[i].flags = 0;
2044 }
2045 break;
2046 default:
2047 dev_err(adev->dev, "Unhandled NPS info table %d.%d\n",
2048 le16_to_cpu(nps_info->v1.header.version_major),
2049 le16_to_cpu(nps_info->v1.header.version_minor));
2050 return -EINVAL;
2051 }
2052
2053 return 0;
2054 }
2055
amdgpu_discovery_set_common_ip_blocks(struct amdgpu_device * adev)2056 static int amdgpu_discovery_set_common_ip_blocks(struct amdgpu_device *adev)
2057 {
2058 /* what IP to use for this? */
2059 switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
2060 case IP_VERSION(9, 0, 1):
2061 case IP_VERSION(9, 1, 0):
2062 case IP_VERSION(9, 2, 1):
2063 case IP_VERSION(9, 2, 2):
2064 case IP_VERSION(9, 3, 0):
2065 case IP_VERSION(9, 4, 0):
2066 case IP_VERSION(9, 4, 1):
2067 case IP_VERSION(9, 4, 2):
2068 case IP_VERSION(9, 4, 3):
2069 case IP_VERSION(9, 4, 4):
2070 case IP_VERSION(9, 5, 0):
2071 amdgpu_device_ip_block_add(adev, &vega10_common_ip_block);
2072 break;
2073 case IP_VERSION(10, 1, 10):
2074 case IP_VERSION(10, 1, 1):
2075 case IP_VERSION(10, 1, 2):
2076 case IP_VERSION(10, 1, 3):
2077 case IP_VERSION(10, 1, 4):
2078 case IP_VERSION(10, 3, 0):
2079 case IP_VERSION(10, 3, 1):
2080 case IP_VERSION(10, 3, 2):
2081 case IP_VERSION(10, 3, 3):
2082 case IP_VERSION(10, 3, 4):
2083 case IP_VERSION(10, 3, 5):
2084 case IP_VERSION(10, 3, 6):
2085 case IP_VERSION(10, 3, 7):
2086 amdgpu_device_ip_block_add(adev, &nv_common_ip_block);
2087 break;
2088 case IP_VERSION(11, 0, 0):
2089 case IP_VERSION(11, 0, 1):
2090 case IP_VERSION(11, 0, 2):
2091 case IP_VERSION(11, 0, 3):
2092 case IP_VERSION(11, 0, 4):
2093 case IP_VERSION(11, 5, 0):
2094 case IP_VERSION(11, 5, 1):
2095 case IP_VERSION(11, 5, 2):
2096 case IP_VERSION(11, 5, 3):
2097 case IP_VERSION(11, 5, 4):
2098 amdgpu_device_ip_block_add(adev, &soc21_common_ip_block);
2099 break;
2100 case IP_VERSION(12, 0, 0):
2101 case IP_VERSION(12, 0, 1):
2102 amdgpu_device_ip_block_add(adev, &soc24_common_ip_block);
2103 break;
2104 case IP_VERSION(12, 1, 0):
2105 amdgpu_device_ip_block_add(adev, &soc_v1_0_common_ip_block);
2106 break;
2107 default:
2108 dev_err(adev->dev,
2109 "Failed to add common ip block(GC_HWIP:0x%x)\n",
2110 amdgpu_ip_version(adev, GC_HWIP, 0));
2111 return -EINVAL;
2112 }
2113 return 0;
2114 }
2115
amdgpu_discovery_set_gmc_ip_blocks(struct amdgpu_device * adev)2116 static int amdgpu_discovery_set_gmc_ip_blocks(struct amdgpu_device *adev)
2117 {
2118 /* use GC or MMHUB IP version */
2119 switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
2120 case IP_VERSION(9, 0, 1):
2121 case IP_VERSION(9, 1, 0):
2122 case IP_VERSION(9, 2, 1):
2123 case IP_VERSION(9, 2, 2):
2124 case IP_VERSION(9, 3, 0):
2125 case IP_VERSION(9, 4, 0):
2126 case IP_VERSION(9, 4, 1):
2127 case IP_VERSION(9, 4, 2):
2128 case IP_VERSION(9, 4, 3):
2129 case IP_VERSION(9, 4, 4):
2130 case IP_VERSION(9, 5, 0):
2131 amdgpu_device_ip_block_add(adev, &gmc_v9_0_ip_block);
2132 break;
2133 case IP_VERSION(10, 1, 10):
2134 case IP_VERSION(10, 1, 1):
2135 case IP_VERSION(10, 1, 2):
2136 case IP_VERSION(10, 1, 3):
2137 case IP_VERSION(10, 1, 4):
2138 case IP_VERSION(10, 3, 0):
2139 case IP_VERSION(10, 3, 1):
2140 case IP_VERSION(10, 3, 2):
2141 case IP_VERSION(10, 3, 3):
2142 case IP_VERSION(10, 3, 4):
2143 case IP_VERSION(10, 3, 5):
2144 case IP_VERSION(10, 3, 6):
2145 case IP_VERSION(10, 3, 7):
2146 amdgpu_device_ip_block_add(adev, &gmc_v10_0_ip_block);
2147 break;
2148 case IP_VERSION(11, 0, 0):
2149 case IP_VERSION(11, 0, 1):
2150 case IP_VERSION(11, 0, 2):
2151 case IP_VERSION(11, 0, 3):
2152 case IP_VERSION(11, 0, 4):
2153 case IP_VERSION(11, 5, 0):
2154 case IP_VERSION(11, 5, 1):
2155 case IP_VERSION(11, 5, 2):
2156 case IP_VERSION(11, 5, 3):
2157 case IP_VERSION(11, 5, 4):
2158 amdgpu_device_ip_block_add(adev, &gmc_v11_0_ip_block);
2159 break;
2160 case IP_VERSION(12, 0, 0):
2161 case IP_VERSION(12, 0, 1):
2162 case IP_VERSION(12, 1, 0):
2163 amdgpu_device_ip_block_add(adev, &gmc_v12_0_ip_block);
2164 break;
2165 default:
2166 dev_err(adev->dev, "Failed to add gmc ip block(GC_HWIP:0x%x)\n",
2167 amdgpu_ip_version(adev, GC_HWIP, 0));
2168 return -EINVAL;
2169 }
2170 return 0;
2171 }
2172
amdgpu_discovery_set_ih_ip_blocks(struct amdgpu_device * adev)2173 static int amdgpu_discovery_set_ih_ip_blocks(struct amdgpu_device *adev)
2174 {
2175 switch (amdgpu_ip_version(adev, OSSSYS_HWIP, 0)) {
2176 case IP_VERSION(4, 0, 0):
2177 case IP_VERSION(4, 0, 1):
2178 case IP_VERSION(4, 1, 0):
2179 case IP_VERSION(4, 1, 1):
2180 case IP_VERSION(4, 3, 0):
2181 amdgpu_device_ip_block_add(adev, &vega10_ih_ip_block);
2182 break;
2183 case IP_VERSION(4, 2, 0):
2184 case IP_VERSION(4, 2, 1):
2185 case IP_VERSION(4, 4, 0):
2186 case IP_VERSION(4, 4, 2):
2187 case IP_VERSION(4, 4, 5):
2188 amdgpu_device_ip_block_add(adev, &vega20_ih_ip_block);
2189 break;
2190 case IP_VERSION(5, 0, 0):
2191 case IP_VERSION(5, 0, 1):
2192 case IP_VERSION(5, 0, 2):
2193 case IP_VERSION(5, 0, 3):
2194 case IP_VERSION(5, 2, 0):
2195 case IP_VERSION(5, 2, 1):
2196 amdgpu_device_ip_block_add(adev, &navi10_ih_ip_block);
2197 break;
2198 case IP_VERSION(6, 0, 0):
2199 case IP_VERSION(6, 0, 1):
2200 case IP_VERSION(6, 0, 2):
2201 amdgpu_device_ip_block_add(adev, &ih_v6_0_ip_block);
2202 break;
2203 case IP_VERSION(6, 1, 0):
2204 case IP_VERSION(6, 1, 1):
2205 amdgpu_device_ip_block_add(adev, &ih_v6_1_ip_block);
2206 break;
2207 case IP_VERSION(7, 0, 0):
2208 case IP_VERSION(7, 1, 0):
2209 amdgpu_device_ip_block_add(adev, &ih_v7_0_ip_block);
2210 break;
2211 default:
2212 dev_err(adev->dev,
2213 "Failed to add ih ip block(OSSSYS_HWIP:0x%x)\n",
2214 amdgpu_ip_version(adev, OSSSYS_HWIP, 0));
2215 return -EINVAL;
2216 }
2217 return 0;
2218 }
2219
amdgpu_discovery_set_psp_ip_blocks(struct amdgpu_device * adev)2220 static int amdgpu_discovery_set_psp_ip_blocks(struct amdgpu_device *adev)
2221 {
2222 switch (amdgpu_ip_version(adev, MP0_HWIP, 0)) {
2223 case IP_VERSION(9, 0, 0):
2224 amdgpu_device_ip_block_add(adev, &psp_v3_1_ip_block);
2225 break;
2226 case IP_VERSION(10, 0, 0):
2227 case IP_VERSION(10, 0, 1):
2228 amdgpu_device_ip_block_add(adev, &psp_v10_0_ip_block);
2229 break;
2230 case IP_VERSION(11, 0, 0):
2231 case IP_VERSION(11, 0, 2):
2232 case IP_VERSION(11, 0, 4):
2233 case IP_VERSION(11, 0, 5):
2234 case IP_VERSION(11, 0, 9):
2235 case IP_VERSION(11, 0, 7):
2236 case IP_VERSION(11, 0, 11):
2237 case IP_VERSION(11, 0, 12):
2238 case IP_VERSION(11, 0, 13):
2239 case IP_VERSION(11, 5, 0):
2240 case IP_VERSION(11, 5, 2):
2241 amdgpu_device_ip_block_add(adev, &psp_v11_0_ip_block);
2242 break;
2243 case IP_VERSION(11, 0, 8):
2244 amdgpu_device_ip_block_add(adev, &psp_v11_0_8_ip_block);
2245 break;
2246 case IP_VERSION(11, 0, 3):
2247 case IP_VERSION(12, 0, 1):
2248 amdgpu_device_ip_block_add(adev, &psp_v12_0_ip_block);
2249 break;
2250 case IP_VERSION(13, 0, 0):
2251 case IP_VERSION(13, 0, 1):
2252 case IP_VERSION(13, 0, 2):
2253 case IP_VERSION(13, 0, 3):
2254 case IP_VERSION(13, 0, 5):
2255 case IP_VERSION(13, 0, 6):
2256 case IP_VERSION(13, 0, 7):
2257 case IP_VERSION(13, 0, 8):
2258 case IP_VERSION(13, 0, 10):
2259 case IP_VERSION(13, 0, 11):
2260 case IP_VERSION(13, 0, 12):
2261 case IP_VERSION(13, 0, 14):
2262 case IP_VERSION(13, 0, 15):
2263 case IP_VERSION(14, 0, 0):
2264 case IP_VERSION(14, 0, 1):
2265 case IP_VERSION(14, 0, 4):
2266 amdgpu_device_ip_block_add(adev, &psp_v13_0_ip_block);
2267 break;
2268 case IP_VERSION(13, 0, 4):
2269 amdgpu_device_ip_block_add(adev, &psp_v13_0_4_ip_block);
2270 break;
2271 case IP_VERSION(14, 0, 2):
2272 case IP_VERSION(14, 0, 3):
2273 case IP_VERSION(14, 0, 5):
2274 amdgpu_device_ip_block_add(adev, &psp_v14_0_ip_block);
2275 break;
2276 case IP_VERSION(15, 0, 0):
2277 amdgpu_device_ip_block_add(adev, &psp_v15_0_ip_block);
2278 break;
2279 case IP_VERSION(15, 0, 8):
2280 amdgpu_device_ip_block_add(adev, &psp_v15_0_8_ip_block);
2281 break;
2282 default:
2283 dev_err(adev->dev,
2284 "Failed to add psp ip block(MP0_HWIP:0x%x)\n",
2285 amdgpu_ip_version(adev, MP0_HWIP, 0));
2286 return -EINVAL;
2287 }
2288 return 0;
2289 }
2290
amdgpu_discovery_set_smu_ip_blocks(struct amdgpu_device * adev)2291 static int amdgpu_discovery_set_smu_ip_blocks(struct amdgpu_device *adev)
2292 {
2293 switch (amdgpu_ip_version(adev, MP1_HWIP, 0)) {
2294 case IP_VERSION(9, 0, 0):
2295 case IP_VERSION(10, 0, 0):
2296 case IP_VERSION(10, 0, 1):
2297 case IP_VERSION(11, 0, 2):
2298 if (adev->asic_type == CHIP_ARCTURUS)
2299 amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block);
2300 else
2301 amdgpu_device_ip_block_add(adev, &pp_smu_ip_block);
2302 break;
2303 case IP_VERSION(11, 0, 0):
2304 case IP_VERSION(11, 0, 5):
2305 case IP_VERSION(11, 0, 9):
2306 case IP_VERSION(11, 0, 7):
2307 case IP_VERSION(11, 0, 11):
2308 case IP_VERSION(11, 0, 12):
2309 case IP_VERSION(11, 0, 13):
2310 case IP_VERSION(11, 5, 0):
2311 case IP_VERSION(11, 5, 2):
2312 amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block);
2313 break;
2314 case IP_VERSION(11, 0, 8):
2315 if (adev->apu_flags & AMD_APU_IS_CYAN_SKILLFISH2)
2316 amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block);
2317 break;
2318 case IP_VERSION(12, 0, 0):
2319 case IP_VERSION(12, 0, 1):
2320 amdgpu_device_ip_block_add(adev, &smu_v12_0_ip_block);
2321 break;
2322 case IP_VERSION(13, 0, 0):
2323 case IP_VERSION(13, 0, 1):
2324 case IP_VERSION(13, 0, 2):
2325 case IP_VERSION(13, 0, 3):
2326 case IP_VERSION(13, 0, 4):
2327 case IP_VERSION(13, 0, 5):
2328 case IP_VERSION(13, 0, 6):
2329 case IP_VERSION(13, 0, 7):
2330 case IP_VERSION(13, 0, 8):
2331 case IP_VERSION(13, 0, 10):
2332 case IP_VERSION(13, 0, 11):
2333 case IP_VERSION(13, 0, 14):
2334 case IP_VERSION(13, 0, 12):
2335 amdgpu_device_ip_block_add(adev, &smu_v13_0_ip_block);
2336 break;
2337 case IP_VERSION(14, 0, 0):
2338 case IP_VERSION(14, 0, 1):
2339 case IP_VERSION(14, 0, 2):
2340 case IP_VERSION(14, 0, 3):
2341 case IP_VERSION(14, 0, 4):
2342 case IP_VERSION(14, 0, 5):
2343 amdgpu_device_ip_block_add(adev, &smu_v14_0_ip_block);
2344 break;
2345 case IP_VERSION(15, 0, 0):
2346 case IP_VERSION(15, 0, 8):
2347 amdgpu_device_ip_block_add(adev, &smu_v15_0_ip_block);
2348 break;
2349 default:
2350 dev_err(adev->dev,
2351 "Failed to add smu ip block(MP1_HWIP:0x%x)\n",
2352 amdgpu_ip_version(adev, MP1_HWIP, 0));
2353 return -EINVAL;
2354 }
2355 return 0;
2356 }
2357
2358 #if defined(CONFIG_DRM_AMD_DC)
amdgpu_discovery_set_sriov_display(struct amdgpu_device * adev)2359 static void amdgpu_discovery_set_sriov_display(struct amdgpu_device *adev)
2360 {
2361 amdgpu_device_set_sriov_virtual_display(adev);
2362 amdgpu_device_ip_block_add(adev, &amdgpu_vkms_ip_block);
2363 }
2364 #endif
2365
amdgpu_discovery_set_display_ip_blocks(struct amdgpu_device * adev)2366 static int amdgpu_discovery_set_display_ip_blocks(struct amdgpu_device *adev)
2367 {
2368 if (adev->enable_virtual_display) {
2369 amdgpu_device_ip_block_add(adev, &amdgpu_vkms_ip_block);
2370 return 0;
2371 }
2372
2373 if (!amdgpu_device_has_dc_support(adev))
2374 return 0;
2375
2376 #if defined(CONFIG_DRM_AMD_DC)
2377 if (amdgpu_ip_version(adev, DCE_HWIP, 0)) {
2378 switch (amdgpu_ip_version(adev, DCE_HWIP, 0)) {
2379 case IP_VERSION(1, 0, 0):
2380 case IP_VERSION(1, 0, 1):
2381 case IP_VERSION(2, 0, 2):
2382 case IP_VERSION(2, 0, 0):
2383 case IP_VERSION(2, 0, 3):
2384 case IP_VERSION(2, 1, 0):
2385 case IP_VERSION(3, 0, 0):
2386 case IP_VERSION(3, 0, 2):
2387 case IP_VERSION(3, 0, 3):
2388 case IP_VERSION(3, 0, 1):
2389 case IP_VERSION(3, 1, 2):
2390 case IP_VERSION(3, 1, 3):
2391 case IP_VERSION(3, 1, 4):
2392 case IP_VERSION(3, 1, 5):
2393 case IP_VERSION(3, 1, 6):
2394 case IP_VERSION(3, 2, 0):
2395 case IP_VERSION(3, 2, 1):
2396 case IP_VERSION(3, 5, 0):
2397 case IP_VERSION(3, 5, 1):
2398 case IP_VERSION(3, 6, 0):
2399 case IP_VERSION(4, 1, 0):
2400 case IP_VERSION(4, 2, 0):
2401 /* TODO: Fix IP version. DC code expects version 4.0.1 */
2402 if (adev->ip_versions[DCE_HWIP][0] == IP_VERSION(4, 1, 0))
2403 adev->ip_versions[DCE_HWIP][0] = IP_VERSION(4, 0, 1);
2404
2405 if (amdgpu_sriov_vf(adev))
2406 amdgpu_discovery_set_sriov_display(adev);
2407 else
2408 amdgpu_device_ip_block_add(adev, &dm_ip_block);
2409 break;
2410 default:
2411 dev_err(adev->dev,
2412 "Failed to add dm ip block(DCE_HWIP:0x%x)\n",
2413 amdgpu_ip_version(adev, DCE_HWIP, 0));
2414 return -EINVAL;
2415 }
2416 } else if (amdgpu_ip_version(adev, DCI_HWIP, 0)) {
2417 switch (amdgpu_ip_version(adev, DCI_HWIP, 0)) {
2418 case IP_VERSION(12, 0, 0):
2419 case IP_VERSION(12, 0, 1):
2420 case IP_VERSION(12, 1, 0):
2421 if (amdgpu_sriov_vf(adev))
2422 amdgpu_discovery_set_sriov_display(adev);
2423 else
2424 amdgpu_device_ip_block_add(adev, &dm_ip_block);
2425 break;
2426 default:
2427 dev_err(adev->dev,
2428 "Failed to add dm ip block(DCI_HWIP:0x%x)\n",
2429 amdgpu_ip_version(adev, DCI_HWIP, 0));
2430 return -EINVAL;
2431 }
2432 }
2433 #endif
2434 return 0;
2435 }
2436
amdgpu_discovery_set_gc_ip_blocks(struct amdgpu_device * adev)2437 static int amdgpu_discovery_set_gc_ip_blocks(struct amdgpu_device *adev)
2438 {
2439 switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
2440 case IP_VERSION(9, 0, 1):
2441 case IP_VERSION(9, 1, 0):
2442 case IP_VERSION(9, 2, 1):
2443 case IP_VERSION(9, 2, 2):
2444 case IP_VERSION(9, 3, 0):
2445 case IP_VERSION(9, 4, 0):
2446 case IP_VERSION(9, 4, 1):
2447 case IP_VERSION(9, 4, 2):
2448 amdgpu_device_ip_block_add(adev, &gfx_v9_0_ip_block);
2449 break;
2450 case IP_VERSION(9, 4, 3):
2451 case IP_VERSION(9, 4, 4):
2452 case IP_VERSION(9, 5, 0):
2453 amdgpu_device_ip_block_add(adev, &gfx_v9_4_3_ip_block);
2454 break;
2455 case IP_VERSION(10, 1, 10):
2456 case IP_VERSION(10, 1, 2):
2457 case IP_VERSION(10, 1, 1):
2458 case IP_VERSION(10, 1, 3):
2459 case IP_VERSION(10, 1, 4):
2460 case IP_VERSION(10, 3, 0):
2461 case IP_VERSION(10, 3, 2):
2462 case IP_VERSION(10, 3, 1):
2463 case IP_VERSION(10, 3, 4):
2464 case IP_VERSION(10, 3, 5):
2465 case IP_VERSION(10, 3, 6):
2466 case IP_VERSION(10, 3, 3):
2467 case IP_VERSION(10, 3, 7):
2468 amdgpu_device_ip_block_add(adev, &gfx_v10_0_ip_block);
2469 break;
2470 case IP_VERSION(11, 0, 0):
2471 case IP_VERSION(11, 0, 1):
2472 case IP_VERSION(11, 0, 2):
2473 case IP_VERSION(11, 0, 3):
2474 case IP_VERSION(11, 0, 4):
2475 case IP_VERSION(11, 5, 0):
2476 case IP_VERSION(11, 5, 1):
2477 case IP_VERSION(11, 5, 2):
2478 case IP_VERSION(11, 5, 3):
2479 case IP_VERSION(11, 5, 4):
2480 amdgpu_device_ip_block_add(adev, &gfx_v11_0_ip_block);
2481 break;
2482 case IP_VERSION(12, 0, 0):
2483 case IP_VERSION(12, 0, 1):
2484 amdgpu_device_ip_block_add(adev, &gfx_v12_0_ip_block);
2485 break;
2486 case IP_VERSION(12, 1, 0):
2487 amdgpu_device_ip_block_add(adev, &gfx_v12_1_ip_block);
2488 break;
2489 default:
2490 dev_err(adev->dev, "Failed to add gfx ip block(GC_HWIP:0x%x)\n",
2491 amdgpu_ip_version(adev, GC_HWIP, 0));
2492 return -EINVAL;
2493 }
2494 return 0;
2495 }
2496
amdgpu_discovery_set_sdma_ip_blocks(struct amdgpu_device * adev)2497 static int amdgpu_discovery_set_sdma_ip_blocks(struct amdgpu_device *adev)
2498 {
2499 switch (amdgpu_ip_version(adev, SDMA0_HWIP, 0)) {
2500 case IP_VERSION(4, 0, 0):
2501 case IP_VERSION(4, 0, 1):
2502 case IP_VERSION(4, 1, 0):
2503 case IP_VERSION(4, 1, 1):
2504 case IP_VERSION(4, 1, 2):
2505 case IP_VERSION(4, 2, 0):
2506 case IP_VERSION(4, 2, 2):
2507 case IP_VERSION(4, 4, 0):
2508 amdgpu_device_ip_block_add(adev, &sdma_v4_0_ip_block);
2509 break;
2510 case IP_VERSION(4, 4, 2):
2511 case IP_VERSION(4, 4, 5):
2512 case IP_VERSION(4, 4, 4):
2513 amdgpu_device_ip_block_add(adev, &sdma_v4_4_2_ip_block);
2514 break;
2515 case IP_VERSION(5, 0, 0):
2516 case IP_VERSION(5, 0, 1):
2517 case IP_VERSION(5, 0, 2):
2518 case IP_VERSION(5, 0, 5):
2519 amdgpu_device_ip_block_add(adev, &sdma_v5_0_ip_block);
2520 break;
2521 case IP_VERSION(5, 2, 0):
2522 case IP_VERSION(5, 2, 2):
2523 case IP_VERSION(5, 2, 4):
2524 case IP_VERSION(5, 2, 5):
2525 case IP_VERSION(5, 2, 6):
2526 case IP_VERSION(5, 2, 3):
2527 case IP_VERSION(5, 2, 1):
2528 case IP_VERSION(5, 2, 7):
2529 amdgpu_device_ip_block_add(adev, &sdma_v5_2_ip_block);
2530 break;
2531 case IP_VERSION(6, 0, 0):
2532 case IP_VERSION(6, 0, 1):
2533 case IP_VERSION(6, 0, 2):
2534 case IP_VERSION(6, 0, 3):
2535 case IP_VERSION(6, 1, 0):
2536 case IP_VERSION(6, 1, 1):
2537 case IP_VERSION(6, 1, 2):
2538 case IP_VERSION(6, 1, 3):
2539 case IP_VERSION(6, 1, 4):
2540 amdgpu_device_ip_block_add(adev, &sdma_v6_0_ip_block);
2541 break;
2542 case IP_VERSION(7, 0, 0):
2543 case IP_VERSION(7, 0, 1):
2544 amdgpu_device_ip_block_add(adev, &sdma_v7_0_ip_block);
2545 break;
2546 case IP_VERSION(7, 1, 0):
2547 amdgpu_device_ip_block_add(adev, &sdma_v7_1_ip_block);
2548 break;
2549 default:
2550 dev_err(adev->dev,
2551 "Failed to add sdma ip block(SDMA0_HWIP:0x%x)\n",
2552 amdgpu_ip_version(adev, SDMA0_HWIP, 0));
2553 return -EINVAL;
2554 }
2555
2556 return 0;
2557 }
2558
amdgpu_discovery_set_ras_ip_blocks(struct amdgpu_device * adev)2559 static int amdgpu_discovery_set_ras_ip_blocks(struct amdgpu_device *adev)
2560 {
2561 switch (amdgpu_ip_version(adev, MP0_HWIP, 0)) {
2562 case IP_VERSION(13, 0, 6):
2563 case IP_VERSION(13, 0, 12):
2564 case IP_VERSION(13, 0, 14):
2565 amdgpu_device_ip_block_add(adev, &ras_v1_0_ip_block);
2566 break;
2567 default:
2568 break;
2569 }
2570 return 0;
2571 }
2572
amdgpu_discovery_set_mm_ip_blocks(struct amdgpu_device * adev)2573 static int amdgpu_discovery_set_mm_ip_blocks(struct amdgpu_device *adev)
2574 {
2575 if (amdgpu_ip_version(adev, VCE_HWIP, 0)) {
2576 switch (amdgpu_ip_version(adev, UVD_HWIP, 0)) {
2577 case IP_VERSION(7, 0, 0):
2578 case IP_VERSION(7, 2, 0):
2579 /* UVD is not supported on vega20 SR-IOV */
2580 if (!(adev->asic_type == CHIP_VEGA20 && amdgpu_sriov_vf(adev)))
2581 amdgpu_device_ip_block_add(adev, &uvd_v7_0_ip_block);
2582 break;
2583 default:
2584 dev_err(adev->dev,
2585 "Failed to add uvd v7 ip block(UVD_HWIP:0x%x)\n",
2586 amdgpu_ip_version(adev, UVD_HWIP, 0));
2587 return -EINVAL;
2588 }
2589 switch (amdgpu_ip_version(adev, VCE_HWIP, 0)) {
2590 case IP_VERSION(4, 0, 0):
2591 case IP_VERSION(4, 1, 0):
2592 /* VCE is not supported on vega20 SR-IOV */
2593 if (!(adev->asic_type == CHIP_VEGA20 && amdgpu_sriov_vf(adev)))
2594 amdgpu_device_ip_block_add(adev, &vce_v4_0_ip_block);
2595 break;
2596 default:
2597 dev_err(adev->dev,
2598 "Failed to add VCE v4 ip block(VCE_HWIP:0x%x)\n",
2599 amdgpu_ip_version(adev, VCE_HWIP, 0));
2600 return -EINVAL;
2601 }
2602 } else {
2603 switch (amdgpu_ip_version(adev, UVD_HWIP, 0)) {
2604 case IP_VERSION(1, 0, 0):
2605 case IP_VERSION(1, 0, 1):
2606 amdgpu_device_ip_block_add(adev, &vcn_v1_0_ip_block);
2607 break;
2608 case IP_VERSION(2, 0, 0):
2609 case IP_VERSION(2, 0, 2):
2610 case IP_VERSION(2, 2, 0):
2611 amdgpu_device_ip_block_add(adev, &vcn_v2_0_ip_block);
2612 if (!amdgpu_sriov_vf(adev))
2613 amdgpu_device_ip_block_add(adev, &jpeg_v2_0_ip_block);
2614 break;
2615 case IP_VERSION(2, 0, 3):
2616 break;
2617 case IP_VERSION(2, 5, 0):
2618 amdgpu_device_ip_block_add(adev, &vcn_v2_5_ip_block);
2619 amdgpu_device_ip_block_add(adev, &jpeg_v2_5_ip_block);
2620 break;
2621 case IP_VERSION(2, 6, 0):
2622 amdgpu_device_ip_block_add(adev, &vcn_v2_6_ip_block);
2623 amdgpu_device_ip_block_add(adev, &jpeg_v2_6_ip_block);
2624 break;
2625 case IP_VERSION(3, 0, 0):
2626 case IP_VERSION(3, 0, 16):
2627 case IP_VERSION(3, 1, 1):
2628 case IP_VERSION(3, 1, 2):
2629 case IP_VERSION(3, 0, 2):
2630 amdgpu_device_ip_block_add(adev, &vcn_v3_0_ip_block);
2631 if (!amdgpu_sriov_vf(adev))
2632 amdgpu_device_ip_block_add(adev, &jpeg_v3_0_ip_block);
2633 break;
2634 case IP_VERSION(3, 0, 33):
2635 amdgpu_device_ip_block_add(adev, &vcn_v3_0_ip_block);
2636 break;
2637 case IP_VERSION(4, 0, 0):
2638 case IP_VERSION(4, 0, 2):
2639 case IP_VERSION(4, 0, 4):
2640 amdgpu_device_ip_block_add(adev, &vcn_v4_0_ip_block);
2641 amdgpu_device_ip_block_add(adev, &jpeg_v4_0_ip_block);
2642 break;
2643 case IP_VERSION(4, 0, 3):
2644 amdgpu_device_ip_block_add(adev, &vcn_v4_0_3_ip_block);
2645 amdgpu_device_ip_block_add(adev, &jpeg_v4_0_3_ip_block);
2646 break;
2647 case IP_VERSION(4, 0, 5):
2648 case IP_VERSION(4, 0, 6):
2649 amdgpu_device_ip_block_add(adev, &vcn_v4_0_5_ip_block);
2650 amdgpu_device_ip_block_add(adev, &jpeg_v4_0_5_ip_block);
2651 break;
2652 case IP_VERSION(5, 0, 0):
2653 amdgpu_device_ip_block_add(adev, &vcn_v5_0_0_ip_block);
2654 amdgpu_device_ip_block_add(adev, &jpeg_v5_0_0_ip_block);
2655 break;
2656 case IP_VERSION(5, 3, 0):
2657 amdgpu_device_ip_block_add(adev, &vcn_v5_0_0_ip_block);
2658 amdgpu_device_ip_block_add(adev, &jpeg_v5_3_0_ip_block);
2659 break;
2660 case IP_VERSION(5, 0, 1):
2661 amdgpu_device_ip_block_add(adev, &vcn_v5_0_1_ip_block);
2662 amdgpu_device_ip_block_add(adev, &jpeg_v5_0_1_ip_block);
2663 break;
2664 case IP_VERSION(5, 0, 2):
2665 amdgpu_device_ip_block_add(adev, &vcn_v5_0_2_ip_block);
2666 amdgpu_device_ip_block_add(adev, &jpeg_v5_0_2_ip_block);
2667 break;
2668 default:
2669 dev_err(adev->dev,
2670 "Failed to add vcn/jpeg ip block(UVD_HWIP:0x%x)\n",
2671 amdgpu_ip_version(adev, UVD_HWIP, 0));
2672 return -EINVAL;
2673 }
2674 }
2675 return 0;
2676 }
2677
amdgpu_discovery_set_mes_ip_blocks(struct amdgpu_device * adev)2678 static int amdgpu_discovery_set_mes_ip_blocks(struct amdgpu_device *adev)
2679 {
2680 switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
2681 case IP_VERSION(11, 0, 0):
2682 case IP_VERSION(11, 0, 1):
2683 case IP_VERSION(11, 0, 2):
2684 case IP_VERSION(11, 0, 3):
2685 case IP_VERSION(11, 0, 4):
2686 case IP_VERSION(11, 5, 0):
2687 case IP_VERSION(11, 5, 1):
2688 case IP_VERSION(11, 5, 2):
2689 case IP_VERSION(11, 5, 3):
2690 case IP_VERSION(11, 5, 4):
2691 amdgpu_device_ip_block_add(adev, &mes_v11_0_ip_block);
2692 adev->enable_mes = true;
2693 adev->enable_mes_kiq = true;
2694 break;
2695 case IP_VERSION(12, 0, 0):
2696 case IP_VERSION(12, 0, 1):
2697 amdgpu_device_ip_block_add(adev, &mes_v12_0_ip_block);
2698 adev->enable_mes = true;
2699 adev->enable_mes_kiq = true;
2700 if (amdgpu_uni_mes)
2701 adev->enable_uni_mes = true;
2702 break;
2703 case IP_VERSION(12, 1, 0):
2704 amdgpu_device_ip_block_add(adev, &mes_v12_1_ip_block);
2705 adev->enable_mes = true;
2706 adev->enable_mes_kiq = true;
2707 if (amdgpu_uni_mes)
2708 adev->enable_uni_mes = true;
2709 break;
2710 default:
2711 break;
2712 }
2713 return 0;
2714 }
2715
amdgpu_discovery_init_soc_config(struct amdgpu_device * adev)2716 static void amdgpu_discovery_init_soc_config(struct amdgpu_device *adev)
2717 {
2718 switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
2719 case IP_VERSION(9, 4, 3):
2720 case IP_VERSION(9, 4, 4):
2721 case IP_VERSION(9, 5, 0):
2722 aqua_vanjaram_init_soc_config(adev);
2723 break;
2724 case IP_VERSION(12, 1, 0):
2725 soc_v1_0_init_soc_config(adev);
2726 break;
2727 default:
2728 break;
2729 }
2730 }
2731
amdgpu_discovery_set_vpe_ip_blocks(struct amdgpu_device * adev)2732 static int amdgpu_discovery_set_vpe_ip_blocks(struct amdgpu_device *adev)
2733 {
2734 switch (amdgpu_ip_version(adev, VPE_HWIP, 0)) {
2735 case IP_VERSION(6, 1, 0):
2736 case IP_VERSION(6, 1, 1):
2737 case IP_VERSION(6, 1, 3):
2738 amdgpu_device_ip_block_add(adev, &vpe_v6_1_ip_block);
2739 break;
2740 default:
2741 break;
2742 }
2743
2744 return 0;
2745 }
2746
amdgpu_discovery_set_umsch_mm_ip_blocks(struct amdgpu_device * adev)2747 static int amdgpu_discovery_set_umsch_mm_ip_blocks(struct amdgpu_device *adev)
2748 {
2749 switch (amdgpu_ip_version(adev, VCN_HWIP, 0)) {
2750 case IP_VERSION(4, 0, 5):
2751 case IP_VERSION(4, 0, 6):
2752 if (amdgpu_umsch_mm & 0x1) {
2753 amdgpu_device_ip_block_add(adev, &umsch_mm_v4_0_ip_block);
2754 adev->enable_umsch_mm = true;
2755 }
2756 break;
2757 default:
2758 break;
2759 }
2760
2761 return 0;
2762 }
2763
amdgpu_discovery_set_isp_ip_blocks(struct amdgpu_device * adev)2764 static int amdgpu_discovery_set_isp_ip_blocks(struct amdgpu_device *adev)
2765 {
2766 #if defined(CONFIG_DRM_AMD_ISP)
2767 switch (amdgpu_ip_version(adev, ISP_HWIP, 0)) {
2768 case IP_VERSION(4, 1, 0):
2769 amdgpu_device_ip_block_add(adev, &isp_v4_1_0_ip_block);
2770 break;
2771 case IP_VERSION(4, 1, 1):
2772 amdgpu_device_ip_block_add(adev, &isp_v4_1_1_ip_block);
2773 break;
2774 default:
2775 break;
2776 }
2777 #endif
2778
2779 return 0;
2780 }
2781
amdgpu_discovery_set_ip_blocks(struct amdgpu_device * adev)2782 int amdgpu_discovery_set_ip_blocks(struct amdgpu_device *adev)
2783 {
2784 int r;
2785
2786 switch (adev->asic_type) {
2787 case CHIP_VEGA10:
2788 /* This is not fatal. We only need the discovery
2789 * binary for sysfs. We don't need it for a
2790 * functional system.
2791 */
2792 amdgpu_discovery_init(adev);
2793 vega10_reg_base_init(adev);
2794 adev->sdma.num_instances = 2;
2795 adev->sdma.sdma_mask = 3;
2796 adev->gmc.num_umc = 4;
2797 adev->gfx.xcc_mask = 1;
2798 adev->ip_versions[MMHUB_HWIP][0] = IP_VERSION(9, 0, 0);
2799 adev->ip_versions[ATHUB_HWIP][0] = IP_VERSION(9, 0, 0);
2800 adev->ip_versions[OSSSYS_HWIP][0] = IP_VERSION(4, 0, 0);
2801 adev->ip_versions[HDP_HWIP][0] = IP_VERSION(4, 0, 0);
2802 adev->ip_versions[SDMA0_HWIP][0] = IP_VERSION(4, 0, 0);
2803 adev->ip_versions[SDMA1_HWIP][0] = IP_VERSION(4, 0, 0);
2804 adev->ip_versions[DF_HWIP][0] = IP_VERSION(2, 1, 0);
2805 adev->ip_versions[NBIO_HWIP][0] = IP_VERSION(6, 1, 0);
2806 adev->ip_versions[UMC_HWIP][0] = IP_VERSION(6, 0, 0);
2807 adev->ip_versions[MP0_HWIP][0] = IP_VERSION(9, 0, 0);
2808 adev->ip_versions[MP1_HWIP][0] = IP_VERSION(9, 0, 0);
2809 adev->ip_versions[THM_HWIP][0] = IP_VERSION(9, 0, 0);
2810 adev->ip_versions[SMUIO_HWIP][0] = IP_VERSION(9, 0, 0);
2811 adev->ip_versions[GC_HWIP][0] = IP_VERSION(9, 0, 1);
2812 adev->ip_versions[UVD_HWIP][0] = IP_VERSION(7, 0, 0);
2813 adev->ip_versions[VCE_HWIP][0] = IP_VERSION(4, 0, 0);
2814 adev->ip_versions[DCI_HWIP][0] = IP_VERSION(12, 0, 0);
2815 break;
2816 case CHIP_VEGA12:
2817 /* This is not fatal. We only need the discovery
2818 * binary for sysfs. We don't need it for a
2819 * functional system.
2820 */
2821 amdgpu_discovery_init(adev);
2822 vega10_reg_base_init(adev);
2823 adev->sdma.num_instances = 2;
2824 adev->sdma.sdma_mask = 3;
2825 adev->gmc.num_umc = 4;
2826 adev->gfx.xcc_mask = 1;
2827 adev->ip_versions[MMHUB_HWIP][0] = IP_VERSION(9, 3, 0);
2828 adev->ip_versions[ATHUB_HWIP][0] = IP_VERSION(9, 3, 0);
2829 adev->ip_versions[OSSSYS_HWIP][0] = IP_VERSION(4, 0, 1);
2830 adev->ip_versions[HDP_HWIP][0] = IP_VERSION(4, 0, 1);
2831 adev->ip_versions[SDMA0_HWIP][0] = IP_VERSION(4, 0, 1);
2832 adev->ip_versions[SDMA1_HWIP][0] = IP_VERSION(4, 0, 1);
2833 adev->ip_versions[DF_HWIP][0] = IP_VERSION(2, 5, 0);
2834 adev->ip_versions[NBIO_HWIP][0] = IP_VERSION(6, 2, 0);
2835 adev->ip_versions[UMC_HWIP][0] = IP_VERSION(6, 1, 0);
2836 adev->ip_versions[MP0_HWIP][0] = IP_VERSION(9, 0, 0);
2837 adev->ip_versions[MP1_HWIP][0] = IP_VERSION(9, 0, 0);
2838 adev->ip_versions[THM_HWIP][0] = IP_VERSION(9, 0, 0);
2839 adev->ip_versions[SMUIO_HWIP][0] = IP_VERSION(9, 0, 1);
2840 adev->ip_versions[GC_HWIP][0] = IP_VERSION(9, 2, 1);
2841 adev->ip_versions[UVD_HWIP][0] = IP_VERSION(7, 0, 0);
2842 adev->ip_versions[VCE_HWIP][0] = IP_VERSION(4, 0, 0);
2843 adev->ip_versions[DCI_HWIP][0] = IP_VERSION(12, 0, 1);
2844 break;
2845 case CHIP_RAVEN:
2846 /* This is not fatal. We only need the discovery
2847 * binary for sysfs. We don't need it for a
2848 * functional system.
2849 */
2850 amdgpu_discovery_init(adev);
2851 vega10_reg_base_init(adev);
2852 adev->sdma.num_instances = 1;
2853 adev->sdma.sdma_mask = 1;
2854 adev->vcn.num_vcn_inst = 1;
2855 adev->gmc.num_umc = 2;
2856 adev->gfx.xcc_mask = 1;
2857 if (adev->apu_flags & AMD_APU_IS_RAVEN2) {
2858 adev->ip_versions[MMHUB_HWIP][0] = IP_VERSION(9, 2, 0);
2859 adev->ip_versions[ATHUB_HWIP][0] = IP_VERSION(9, 2, 0);
2860 adev->ip_versions[OSSSYS_HWIP][0] = IP_VERSION(4, 1, 1);
2861 adev->ip_versions[HDP_HWIP][0] = IP_VERSION(4, 1, 1);
2862 adev->ip_versions[SDMA0_HWIP][0] = IP_VERSION(4, 1, 1);
2863 adev->ip_versions[DF_HWIP][0] = IP_VERSION(2, 1, 1);
2864 adev->ip_versions[NBIO_HWIP][0] = IP_VERSION(7, 0, 1);
2865 adev->ip_versions[UMC_HWIP][0] = IP_VERSION(7, 5, 0);
2866 adev->ip_versions[MP0_HWIP][0] = IP_VERSION(10, 0, 1);
2867 adev->ip_versions[MP1_HWIP][0] = IP_VERSION(10, 0, 1);
2868 adev->ip_versions[THM_HWIP][0] = IP_VERSION(10, 1, 0);
2869 adev->ip_versions[SMUIO_HWIP][0] = IP_VERSION(10, 0, 1);
2870 adev->ip_versions[GC_HWIP][0] = IP_VERSION(9, 2, 2);
2871 adev->ip_versions[UVD_HWIP][0] = IP_VERSION(1, 0, 1);
2872 adev->ip_versions[DCE_HWIP][0] = IP_VERSION(1, 0, 1);
2873 adev->ip_versions[ISP_HWIP][0] = IP_VERSION(2, 0, 0);
2874 } else {
2875 adev->ip_versions[MMHUB_HWIP][0] = IP_VERSION(9, 1, 0);
2876 adev->ip_versions[ATHUB_HWIP][0] = IP_VERSION(9, 1, 0);
2877 adev->ip_versions[OSSSYS_HWIP][0] = IP_VERSION(4, 1, 0);
2878 adev->ip_versions[HDP_HWIP][0] = IP_VERSION(4, 1, 0);
2879 adev->ip_versions[SDMA0_HWIP][0] = IP_VERSION(4, 1, 0);
2880 adev->ip_versions[DF_HWIP][0] = IP_VERSION(2, 1, 0);
2881 adev->ip_versions[NBIO_HWIP][0] = IP_VERSION(7, 0, 0);
2882 adev->ip_versions[UMC_HWIP][0] = IP_VERSION(7, 0, 0);
2883 adev->ip_versions[MP0_HWIP][0] = IP_VERSION(10, 0, 0);
2884 adev->ip_versions[MP1_HWIP][0] = IP_VERSION(10, 0, 0);
2885 adev->ip_versions[THM_HWIP][0] = IP_VERSION(10, 0, 0);
2886 adev->ip_versions[SMUIO_HWIP][0] = IP_VERSION(10, 0, 0);
2887 adev->ip_versions[GC_HWIP][0] = IP_VERSION(9, 1, 0);
2888 adev->ip_versions[UVD_HWIP][0] = IP_VERSION(1, 0, 0);
2889 adev->ip_versions[DCE_HWIP][0] = IP_VERSION(1, 0, 0);
2890 adev->ip_versions[ISP_HWIP][0] = IP_VERSION(2, 0, 0);
2891 }
2892 break;
2893 case CHIP_VEGA20:
2894 /* This is not fatal. We only need the discovery
2895 * binary for sysfs. We don't need it for a
2896 * functional system.
2897 */
2898 amdgpu_discovery_init(adev);
2899 vega20_reg_base_init(adev);
2900 adev->sdma.num_instances = 2;
2901 adev->sdma.sdma_mask = 3;
2902 adev->gmc.num_umc = 8;
2903 adev->gfx.xcc_mask = 1;
2904 adev->ip_versions[MMHUB_HWIP][0] = IP_VERSION(9, 4, 0);
2905 adev->ip_versions[ATHUB_HWIP][0] = IP_VERSION(9, 4, 0);
2906 adev->ip_versions[OSSSYS_HWIP][0] = IP_VERSION(4, 2, 0);
2907 adev->ip_versions[HDP_HWIP][0] = IP_VERSION(4, 2, 0);
2908 adev->ip_versions[SDMA0_HWIP][0] = IP_VERSION(4, 2, 0);
2909 adev->ip_versions[SDMA1_HWIP][0] = IP_VERSION(4, 2, 0);
2910 adev->ip_versions[DF_HWIP][0] = IP_VERSION(3, 6, 0);
2911 adev->ip_versions[NBIO_HWIP][0] = IP_VERSION(7, 4, 0);
2912 adev->ip_versions[UMC_HWIP][0] = IP_VERSION(6, 1, 1);
2913 adev->ip_versions[MP0_HWIP][0] = IP_VERSION(11, 0, 2);
2914 adev->ip_versions[MP1_HWIP][0] = IP_VERSION(11, 0, 2);
2915 adev->ip_versions[THM_HWIP][0] = IP_VERSION(11, 0, 2);
2916 adev->ip_versions[SMUIO_HWIP][0] = IP_VERSION(11, 0, 2);
2917 adev->ip_versions[GC_HWIP][0] = IP_VERSION(9, 4, 0);
2918 adev->ip_versions[UVD_HWIP][0] = IP_VERSION(7, 2, 0);
2919 adev->ip_versions[UVD_HWIP][1] = IP_VERSION(7, 2, 0);
2920 adev->ip_versions[VCE_HWIP][0] = IP_VERSION(4, 1, 0);
2921 adev->ip_versions[DCI_HWIP][0] = IP_VERSION(12, 1, 0);
2922 break;
2923 case CHIP_ARCTURUS:
2924 /* This is not fatal. We only need the discovery
2925 * binary for sysfs. We don't need it for a
2926 * functional system.
2927 */
2928 amdgpu_discovery_init(adev);
2929 arct_reg_base_init(adev);
2930 adev->sdma.num_instances = 8;
2931 adev->sdma.sdma_mask = 0xff;
2932 adev->vcn.num_vcn_inst = 2;
2933 adev->gmc.num_umc = 8;
2934 adev->gfx.xcc_mask = 1;
2935 adev->ip_versions[MMHUB_HWIP][0] = IP_VERSION(9, 4, 1);
2936 adev->ip_versions[ATHUB_HWIP][0] = IP_VERSION(9, 4, 1);
2937 adev->ip_versions[OSSSYS_HWIP][0] = IP_VERSION(4, 2, 1);
2938 adev->ip_versions[HDP_HWIP][0] = IP_VERSION(4, 2, 1);
2939 adev->ip_versions[SDMA0_HWIP][0] = IP_VERSION(4, 2, 2);
2940 adev->ip_versions[SDMA1_HWIP][0] = IP_VERSION(4, 2, 2);
2941 adev->ip_versions[SDMA1_HWIP][1] = IP_VERSION(4, 2, 2);
2942 adev->ip_versions[SDMA1_HWIP][2] = IP_VERSION(4, 2, 2);
2943 adev->ip_versions[SDMA1_HWIP][3] = IP_VERSION(4, 2, 2);
2944 adev->ip_versions[SDMA1_HWIP][4] = IP_VERSION(4, 2, 2);
2945 adev->ip_versions[SDMA1_HWIP][5] = IP_VERSION(4, 2, 2);
2946 adev->ip_versions[SDMA1_HWIP][6] = IP_VERSION(4, 2, 2);
2947 adev->ip_versions[DF_HWIP][0] = IP_VERSION(3, 6, 1);
2948 adev->ip_versions[NBIO_HWIP][0] = IP_VERSION(7, 4, 1);
2949 adev->ip_versions[UMC_HWIP][0] = IP_VERSION(6, 1, 2);
2950 adev->ip_versions[MP0_HWIP][0] = IP_VERSION(11, 0, 4);
2951 adev->ip_versions[MP1_HWIP][0] = IP_VERSION(11, 0, 2);
2952 adev->ip_versions[THM_HWIP][0] = IP_VERSION(11, 0, 3);
2953 adev->ip_versions[SMUIO_HWIP][0] = IP_VERSION(11, 0, 3);
2954 adev->ip_versions[GC_HWIP][0] = IP_VERSION(9, 4, 1);
2955 adev->ip_versions[UVD_HWIP][0] = IP_VERSION(2, 5, 0);
2956 adev->ip_versions[UVD_HWIP][1] = IP_VERSION(2, 5, 0);
2957 break;
2958 case CHIP_ALDEBARAN:
2959 /* This is not fatal. We only need the discovery
2960 * binary for sysfs. We don't need it for a
2961 * functional system.
2962 */
2963 amdgpu_discovery_init(adev);
2964 aldebaran_reg_base_init(adev);
2965 adev->sdma.num_instances = 5;
2966 adev->sdma.sdma_mask = 0x1f;
2967 adev->vcn.num_vcn_inst = 2;
2968 adev->gmc.num_umc = 4;
2969 adev->gfx.xcc_mask = 1;
2970 adev->ip_versions[MMHUB_HWIP][0] = IP_VERSION(9, 4, 2);
2971 adev->ip_versions[ATHUB_HWIP][0] = IP_VERSION(9, 4, 2);
2972 adev->ip_versions[OSSSYS_HWIP][0] = IP_VERSION(4, 4, 0);
2973 adev->ip_versions[HDP_HWIP][0] = IP_VERSION(4, 4, 0);
2974 adev->ip_versions[SDMA0_HWIP][0] = IP_VERSION(4, 4, 0);
2975 adev->ip_versions[SDMA0_HWIP][1] = IP_VERSION(4, 4, 0);
2976 adev->ip_versions[SDMA0_HWIP][2] = IP_VERSION(4, 4, 0);
2977 adev->ip_versions[SDMA0_HWIP][3] = IP_VERSION(4, 4, 0);
2978 adev->ip_versions[SDMA0_HWIP][4] = IP_VERSION(4, 4, 0);
2979 adev->ip_versions[DF_HWIP][0] = IP_VERSION(3, 6, 2);
2980 adev->ip_versions[NBIO_HWIP][0] = IP_VERSION(7, 4, 4);
2981 adev->ip_versions[UMC_HWIP][0] = IP_VERSION(6, 7, 0);
2982 adev->ip_versions[MP0_HWIP][0] = IP_VERSION(13, 0, 2);
2983 adev->ip_versions[MP1_HWIP][0] = IP_VERSION(13, 0, 2);
2984 adev->ip_versions[THM_HWIP][0] = IP_VERSION(13, 0, 2);
2985 adev->ip_versions[SMUIO_HWIP][0] = IP_VERSION(13, 0, 2);
2986 adev->ip_versions[GC_HWIP][0] = IP_VERSION(9, 4, 2);
2987 adev->ip_versions[UVD_HWIP][0] = IP_VERSION(2, 6, 0);
2988 adev->ip_versions[UVD_HWIP][1] = IP_VERSION(2, 6, 0);
2989 adev->ip_versions[XGMI_HWIP][0] = IP_VERSION(6, 1, 0);
2990 break;
2991 case CHIP_CYAN_SKILLFISH:
2992 if (adev->apu_flags & AMD_APU_IS_CYAN_SKILLFISH2) {
2993 r = amdgpu_discovery_reg_base_init(adev);
2994 if (r)
2995 return -EINVAL;
2996
2997 amdgpu_discovery_harvest_ip(adev);
2998 amdgpu_discovery_get_gfx_info(adev);
2999 amdgpu_discovery_get_mall_info(adev);
3000 amdgpu_discovery_get_vcn_info(adev);
3001 } else {
3002 cyan_skillfish_reg_base_init(adev);
3003 adev->sdma.num_instances = 2;
3004 adev->sdma.sdma_mask = 3;
3005 adev->gfx.xcc_mask = 1;
3006 adev->ip_versions[MMHUB_HWIP][0] = IP_VERSION(2, 0, 3);
3007 adev->ip_versions[ATHUB_HWIP][0] = IP_VERSION(2, 0, 3);
3008 adev->ip_versions[OSSSYS_HWIP][0] = IP_VERSION(5, 0, 1);
3009 adev->ip_versions[HDP_HWIP][0] = IP_VERSION(5, 0, 1);
3010 adev->ip_versions[SDMA0_HWIP][0] = IP_VERSION(5, 0, 1);
3011 adev->ip_versions[SDMA1_HWIP][1] = IP_VERSION(5, 0, 1);
3012 adev->ip_versions[DF_HWIP][0] = IP_VERSION(3, 5, 0);
3013 adev->ip_versions[NBIO_HWIP][0] = IP_VERSION(2, 1, 1);
3014 adev->ip_versions[UMC_HWIP][0] = IP_VERSION(8, 1, 1);
3015 adev->ip_versions[MP0_HWIP][0] = IP_VERSION(11, 0, 8);
3016 adev->ip_versions[MP1_HWIP][0] = IP_VERSION(11, 0, 8);
3017 adev->ip_versions[THM_HWIP][0] = IP_VERSION(11, 0, 1);
3018 adev->ip_versions[SMUIO_HWIP][0] = IP_VERSION(11, 0, 8);
3019 adev->ip_versions[GC_HWIP][0] = IP_VERSION(10, 1, 3);
3020 adev->ip_versions[UVD_HWIP][0] = IP_VERSION(2, 0, 3);
3021 }
3022 break;
3023 default:
3024 r = amdgpu_discovery_reg_base_init(adev);
3025 if (r) {
3026 drm_err(&adev->ddev, "discovery failed: %d\n", r);
3027 return r;
3028 }
3029
3030 amdgpu_discovery_harvest_ip(adev);
3031 amdgpu_discovery_get_gfx_info(adev);
3032 amdgpu_discovery_get_mall_info(adev);
3033 amdgpu_discovery_get_vcn_info(adev);
3034 break;
3035 }
3036
3037 amdgpu_discovery_init_soc_config(adev);
3038 amdgpu_discovery_sysfs_init(adev);
3039
3040 switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
3041 case IP_VERSION(9, 0, 1):
3042 case IP_VERSION(9, 2, 1):
3043 case IP_VERSION(9, 4, 0):
3044 case IP_VERSION(9, 4, 1):
3045 case IP_VERSION(9, 4, 2):
3046 case IP_VERSION(9, 4, 3):
3047 case IP_VERSION(9, 4, 4):
3048 case IP_VERSION(9, 5, 0):
3049 adev->family = AMDGPU_FAMILY_AI;
3050 break;
3051 case IP_VERSION(9, 1, 0):
3052 case IP_VERSION(9, 2, 2):
3053 case IP_VERSION(9, 3, 0):
3054 adev->family = AMDGPU_FAMILY_RV;
3055 break;
3056 case IP_VERSION(10, 1, 10):
3057 case IP_VERSION(10, 1, 1):
3058 case IP_VERSION(10, 1, 2):
3059 case IP_VERSION(10, 1, 3):
3060 case IP_VERSION(10, 1, 4):
3061 case IP_VERSION(10, 3, 0):
3062 case IP_VERSION(10, 3, 2):
3063 case IP_VERSION(10, 3, 4):
3064 case IP_VERSION(10, 3, 5):
3065 adev->family = AMDGPU_FAMILY_NV;
3066 break;
3067 case IP_VERSION(10, 3, 1):
3068 adev->family = AMDGPU_FAMILY_VGH;
3069 adev->apu_flags |= AMD_APU_IS_VANGOGH;
3070 break;
3071 case IP_VERSION(10, 3, 3):
3072 adev->family = AMDGPU_FAMILY_YC;
3073 break;
3074 case IP_VERSION(10, 3, 6):
3075 adev->family = AMDGPU_FAMILY_GC_10_3_6;
3076 break;
3077 case IP_VERSION(10, 3, 7):
3078 adev->family = AMDGPU_FAMILY_GC_10_3_7;
3079 break;
3080 case IP_VERSION(11, 0, 0):
3081 case IP_VERSION(11, 0, 2):
3082 case IP_VERSION(11, 0, 3):
3083 adev->family = AMDGPU_FAMILY_GC_11_0_0;
3084 break;
3085 case IP_VERSION(11, 0, 1):
3086 case IP_VERSION(11, 0, 4):
3087 adev->family = AMDGPU_FAMILY_GC_11_0_1;
3088 break;
3089 case IP_VERSION(11, 5, 0):
3090 case IP_VERSION(11, 5, 1):
3091 case IP_VERSION(11, 5, 2):
3092 case IP_VERSION(11, 5, 3):
3093 adev->family = AMDGPU_FAMILY_GC_11_5_0;
3094 break;
3095 case IP_VERSION(11, 5, 4):
3096 adev->family = AMDGPU_FAMILY_GC_11_5_4;
3097 break;
3098 case IP_VERSION(12, 0, 0):
3099 case IP_VERSION(12, 0, 1):
3100 case IP_VERSION(12, 1, 0):
3101 adev->family = AMDGPU_FAMILY_GC_12_0_0;
3102 break;
3103 default:
3104 return -EINVAL;
3105 }
3106
3107 switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
3108 case IP_VERSION(9, 1, 0):
3109 case IP_VERSION(9, 2, 2):
3110 case IP_VERSION(9, 3, 0):
3111 case IP_VERSION(10, 1, 3):
3112 case IP_VERSION(10, 1, 4):
3113 case IP_VERSION(10, 3, 1):
3114 case IP_VERSION(10, 3, 3):
3115 case IP_VERSION(10, 3, 6):
3116 case IP_VERSION(10, 3, 7):
3117 case IP_VERSION(11, 0, 1):
3118 case IP_VERSION(11, 0, 4):
3119 case IP_VERSION(11, 5, 0):
3120 case IP_VERSION(11, 5, 1):
3121 case IP_VERSION(11, 5, 2):
3122 case IP_VERSION(11, 5, 3):
3123 case IP_VERSION(11, 5, 4):
3124 adev->flags |= AMD_IS_APU;
3125 break;
3126 default:
3127 break;
3128 }
3129
3130 /* set NBIO version */
3131 switch (amdgpu_ip_version(adev, NBIO_HWIP, 0)) {
3132 case IP_VERSION(6, 1, 0):
3133 case IP_VERSION(6, 2, 0):
3134 adev->nbio.funcs = &nbio_v6_1_funcs;
3135 adev->nbio.hdp_flush_reg = &nbio_v6_1_hdp_flush_reg;
3136 break;
3137 case IP_VERSION(7, 0, 0):
3138 case IP_VERSION(7, 0, 1):
3139 case IP_VERSION(2, 5, 0):
3140 adev->nbio.funcs = &nbio_v7_0_funcs;
3141 adev->nbio.hdp_flush_reg = &nbio_v7_0_hdp_flush_reg;
3142 break;
3143 case IP_VERSION(7, 4, 0):
3144 case IP_VERSION(7, 4, 1):
3145 case IP_VERSION(7, 4, 4):
3146 adev->nbio.funcs = &nbio_v7_4_funcs;
3147 adev->nbio.hdp_flush_reg = &nbio_v7_4_hdp_flush_reg;
3148 break;
3149 case IP_VERSION(7, 9, 0):
3150 case IP_VERSION(7, 9, 1):
3151 adev->nbio.funcs = &nbio_v7_9_funcs;
3152 adev->nbio.hdp_flush_reg = &nbio_v7_9_hdp_flush_reg;
3153 break;
3154 case IP_VERSION(7, 11, 0):
3155 case IP_VERSION(7, 11, 1):
3156 case IP_VERSION(7, 11, 2):
3157 case IP_VERSION(7, 11, 3):
3158 adev->nbio.funcs = &nbio_v7_11_funcs;
3159 adev->nbio.hdp_flush_reg = &nbio_v7_11_hdp_flush_reg;
3160 break;
3161 case IP_VERSION(7, 2, 0):
3162 case IP_VERSION(7, 2, 1):
3163 case IP_VERSION(7, 3, 0):
3164 case IP_VERSION(7, 5, 0):
3165 case IP_VERSION(7, 5, 1):
3166 adev->nbio.funcs = &nbio_v7_2_funcs;
3167 adev->nbio.hdp_flush_reg = &nbio_v7_2_hdp_flush_reg;
3168 break;
3169 case IP_VERSION(2, 1, 1):
3170 case IP_VERSION(2, 3, 0):
3171 case IP_VERSION(2, 3, 1):
3172 case IP_VERSION(2, 3, 2):
3173 case IP_VERSION(3, 3, 0):
3174 case IP_VERSION(3, 3, 1):
3175 case IP_VERSION(3, 3, 2):
3176 case IP_VERSION(3, 3, 3):
3177 adev->nbio.funcs = &nbio_v2_3_funcs;
3178 adev->nbio.hdp_flush_reg = &nbio_v2_3_hdp_flush_reg;
3179 break;
3180 case IP_VERSION(4, 3, 0):
3181 case IP_VERSION(4, 3, 1):
3182 if (amdgpu_sriov_vf(adev))
3183 adev->nbio.funcs = &nbio_v4_3_sriov_funcs;
3184 else
3185 adev->nbio.funcs = &nbio_v4_3_funcs;
3186 adev->nbio.hdp_flush_reg = &nbio_v4_3_hdp_flush_reg;
3187 break;
3188 case IP_VERSION(7, 7, 0):
3189 case IP_VERSION(7, 7, 1):
3190 adev->nbio.funcs = &nbio_v7_7_funcs;
3191 adev->nbio.hdp_flush_reg = &nbio_v7_7_hdp_flush_reg;
3192 break;
3193 case IP_VERSION(6, 3, 1):
3194 case IP_VERSION(7, 11, 4):
3195 adev->nbio.funcs = &nbif_v6_3_1_funcs;
3196 adev->nbio.hdp_flush_reg = &nbif_v6_3_1_hdp_flush_reg;
3197 break;
3198 default:
3199 break;
3200 }
3201
3202 switch (amdgpu_ip_version(adev, HDP_HWIP, 0)) {
3203 case IP_VERSION(4, 0, 0):
3204 case IP_VERSION(4, 0, 1):
3205 case IP_VERSION(4, 1, 0):
3206 case IP_VERSION(4, 1, 1):
3207 case IP_VERSION(4, 1, 2):
3208 case IP_VERSION(4, 2, 0):
3209 case IP_VERSION(4, 2, 1):
3210 case IP_VERSION(4, 4, 0):
3211 case IP_VERSION(4, 4, 2):
3212 case IP_VERSION(4, 4, 5):
3213 adev->hdp.funcs = &hdp_v4_0_funcs;
3214 break;
3215 case IP_VERSION(5, 0, 0):
3216 case IP_VERSION(5, 0, 1):
3217 case IP_VERSION(5, 0, 2):
3218 case IP_VERSION(5, 0, 3):
3219 case IP_VERSION(5, 0, 4):
3220 case IP_VERSION(5, 2, 0):
3221 adev->hdp.funcs = &hdp_v5_0_funcs;
3222 break;
3223 case IP_VERSION(5, 2, 1):
3224 adev->hdp.funcs = &hdp_v5_2_funcs;
3225 break;
3226 case IP_VERSION(6, 0, 0):
3227 case IP_VERSION(6, 0, 1):
3228 case IP_VERSION(6, 1, 0):
3229 case IP_VERSION(6, 1, 1):
3230 adev->hdp.funcs = &hdp_v6_0_funcs;
3231 break;
3232 case IP_VERSION(7, 0, 0):
3233 adev->hdp.funcs = &hdp_v7_0_funcs;
3234 break;
3235 default:
3236 break;
3237 }
3238
3239 switch (amdgpu_ip_version(adev, DF_HWIP, 0)) {
3240 case IP_VERSION(3, 6, 0):
3241 case IP_VERSION(3, 6, 1):
3242 case IP_VERSION(3, 6, 2):
3243 adev->df.funcs = &df_v3_6_funcs;
3244 break;
3245 case IP_VERSION(2, 1, 0):
3246 case IP_VERSION(2, 1, 1):
3247 case IP_VERSION(2, 5, 0):
3248 case IP_VERSION(3, 5, 1):
3249 case IP_VERSION(3, 5, 2):
3250 adev->df.funcs = &df_v1_7_funcs;
3251 break;
3252 case IP_VERSION(4, 3, 0):
3253 adev->df.funcs = &df_v4_3_funcs;
3254 break;
3255 case IP_VERSION(4, 6, 2):
3256 adev->df.funcs = &df_v4_6_2_funcs;
3257 break;
3258 case IP_VERSION(4, 15, 0):
3259 case IP_VERSION(4, 15, 1):
3260 adev->df.funcs = &df_v4_15_funcs;
3261 break;
3262 default:
3263 break;
3264 }
3265
3266 switch (amdgpu_ip_version(adev, SMUIO_HWIP, 0)) {
3267 case IP_VERSION(9, 0, 0):
3268 case IP_VERSION(9, 0, 1):
3269 case IP_VERSION(10, 0, 0):
3270 case IP_VERSION(10, 0, 1):
3271 case IP_VERSION(10, 0, 2):
3272 adev->smuio.funcs = &smuio_v9_0_funcs;
3273 break;
3274 case IP_VERSION(11, 0, 0):
3275 case IP_VERSION(11, 0, 2):
3276 case IP_VERSION(11, 0, 3):
3277 case IP_VERSION(11, 0, 4):
3278 case IP_VERSION(11, 0, 7):
3279 case IP_VERSION(11, 0, 8):
3280 adev->smuio.funcs = &smuio_v11_0_funcs;
3281 break;
3282 case IP_VERSION(11, 0, 6):
3283 case IP_VERSION(11, 0, 10):
3284 case IP_VERSION(11, 0, 11):
3285 case IP_VERSION(11, 5, 0):
3286 case IP_VERSION(11, 5, 2):
3287 case IP_VERSION(13, 0, 1):
3288 case IP_VERSION(13, 0, 9):
3289 case IP_VERSION(13, 0, 10):
3290 adev->smuio.funcs = &smuio_v11_0_6_funcs;
3291 break;
3292 case IP_VERSION(13, 0, 2):
3293 adev->smuio.funcs = &smuio_v13_0_funcs;
3294 break;
3295 case IP_VERSION(13, 0, 3):
3296 case IP_VERSION(13, 0, 11):
3297 adev->smuio.funcs = &smuio_v13_0_3_funcs;
3298 if (adev->smuio.funcs->get_pkg_type(adev) == AMDGPU_PKG_TYPE_APU) {
3299 adev->flags |= AMD_IS_APU;
3300 }
3301 break;
3302 case IP_VERSION(13, 0, 6):
3303 case IP_VERSION(13, 0, 8):
3304 case IP_VERSION(14, 0, 0):
3305 case IP_VERSION(14, 0, 1):
3306 adev->smuio.funcs = &smuio_v13_0_6_funcs;
3307 break;
3308 case IP_VERSION(14, 0, 2):
3309 adev->smuio.funcs = &smuio_v14_0_2_funcs;
3310 break;
3311 case IP_VERSION(15, 0, 0):
3312 adev->smuio.funcs = &smuio_v15_0_0_funcs;
3313 break;
3314 case IP_VERSION(15, 0, 8):
3315 adev->smuio.funcs = &smuio_v15_0_8_funcs;
3316 break;
3317 default:
3318 break;
3319 }
3320
3321 switch (amdgpu_ip_version(adev, LSDMA_HWIP, 0)) {
3322 case IP_VERSION(6, 0, 0):
3323 case IP_VERSION(6, 0, 1):
3324 case IP_VERSION(6, 0, 2):
3325 case IP_VERSION(6, 0, 3):
3326 adev->lsdma.funcs = &lsdma_v6_0_funcs;
3327 break;
3328 case IP_VERSION(7, 0, 0):
3329 case IP_VERSION(7, 0, 1):
3330 adev->lsdma.funcs = &lsdma_v7_0_funcs;
3331 break;
3332 case IP_VERSION(7, 1, 0):
3333 adev->lsdma.funcs = &lsdma_v7_1_funcs;
3334 break;
3335 default:
3336 break;
3337 }
3338
3339 r = amdgpu_discovery_set_common_ip_blocks(adev);
3340 if (r)
3341 return r;
3342
3343 r = amdgpu_discovery_set_gmc_ip_blocks(adev);
3344 if (r)
3345 return r;
3346
3347 /* For SR-IOV, PSP needs to be initialized before IH */
3348 if (amdgpu_sriov_vf(adev)) {
3349 r = amdgpu_discovery_set_psp_ip_blocks(adev);
3350 if (r)
3351 return r;
3352 r = amdgpu_discovery_set_ih_ip_blocks(adev);
3353 if (r)
3354 return r;
3355 } else {
3356 r = amdgpu_discovery_set_ih_ip_blocks(adev);
3357 if (r)
3358 return r;
3359
3360 if (likely(adev->firmware.load_type == AMDGPU_FW_LOAD_PSP)) {
3361 r = amdgpu_discovery_set_psp_ip_blocks(adev);
3362 if (r)
3363 return r;
3364 }
3365 }
3366
3367 if (likely(adev->firmware.load_type == AMDGPU_FW_LOAD_PSP)) {
3368 r = amdgpu_discovery_set_smu_ip_blocks(adev);
3369 if (r)
3370 return r;
3371 }
3372
3373 r = amdgpu_discovery_set_display_ip_blocks(adev);
3374 if (r)
3375 return r;
3376
3377 r = amdgpu_discovery_set_gc_ip_blocks(adev);
3378 if (r)
3379 return r;
3380
3381 r = amdgpu_discovery_set_sdma_ip_blocks(adev);
3382 if (r)
3383 return r;
3384
3385 r = amdgpu_discovery_set_ras_ip_blocks(adev);
3386 if (r)
3387 return r;
3388
3389 if ((adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT &&
3390 !amdgpu_sriov_vf(adev) &&
3391 amdgpu_dpm == 1) ||
3392 (adev->firmware.load_type == AMDGPU_FW_LOAD_RLC_BACKDOOR_AUTO &&
3393 amdgpu_dpm == 1)) {
3394 r = amdgpu_discovery_set_smu_ip_blocks(adev);
3395 if (r)
3396 return r;
3397 }
3398
3399 r = amdgpu_discovery_set_mm_ip_blocks(adev);
3400 if (r)
3401 return r;
3402
3403 r = amdgpu_discovery_set_mes_ip_blocks(adev);
3404 if (r)
3405 return r;
3406
3407 r = amdgpu_discovery_set_vpe_ip_blocks(adev);
3408 if (r)
3409 return r;
3410
3411 r = amdgpu_discovery_set_umsch_mm_ip_blocks(adev);
3412 if (r)
3413 return r;
3414
3415 r = amdgpu_discovery_set_isp_ip_blocks(adev);
3416 if (r)
3417 return r;
3418 return 0;
3419 }
3420
3421