1 /*
2 * Copyright 2018-2024 Advanced Micro Devices, Inc. All rights reserved.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23
24 #include <linux/firmware.h>
25
26 #include "amdgpu.h"
27 #include "amdgpu_discovery.h"
28 #include "soc15_hw_ip.h"
29 #include "discovery.h"
30 #include "amdgpu_ras.h"
31
32 #include "soc15.h"
33 #include "gfx_v9_0.h"
34 #include "gfx_v9_4_3.h"
35 #include "gmc_v9_0.h"
36 #include "df_v1_7.h"
37 #include "df_v3_6.h"
38 #include "df_v4_3.h"
39 #include "df_v4_6_2.h"
40 #include "df_v4_15.h"
41 #include "nbio_v6_1.h"
42 #include "nbio_v7_0.h"
43 #include "nbio_v7_4.h"
44 #include "nbio_v7_9.h"
45 #include "nbio_v7_11.h"
46 #include "hdp_v4_0.h"
47 #include "vega10_ih.h"
48 #include "vega20_ih.h"
49 #include "sdma_v4_0.h"
50 #include "sdma_v4_4_2.h"
51 #include "uvd_v7_0.h"
52 #include "vce_v4_0.h"
53 #include "vcn_v1_0.h"
54 #include "vcn_v2_5.h"
55 #include "jpeg_v2_5.h"
56 #include "smuio_v9_0.h"
57 #include "gmc_v10_0.h"
58 #include "gmc_v11_0.h"
59 #include "gmc_v12_0.h"
60 #include "gfxhub_v2_0.h"
61 #include "mmhub_v2_0.h"
62 #include "nbio_v2_3.h"
63 #include "nbio_v4_3.h"
64 #include "nbio_v7_2.h"
65 #include "nbio_v7_7.h"
66 #include "nbif_v6_3_1.h"
67 #include "hdp_v5_0.h"
68 #include "hdp_v5_2.h"
69 #include "hdp_v6_0.h"
70 #include "hdp_v7_0.h"
71 #include "nv.h"
72 #include "soc21.h"
73 #include "soc24.h"
74 #include "soc_v1_0.h"
75 #include "navi10_ih.h"
76 #include "ih_v6_0.h"
77 #include "ih_v6_1.h"
78 #include "ih_v7_0.h"
79 #include "gfx_v10_0.h"
80 #include "gfx_v11_0.h"
81 #include "gfx_v12_0.h"
82 #include "gfx_v12_1.h"
83 #include "sdma_v5_0.h"
84 #include "sdma_v5_2.h"
85 #include "sdma_v6_0.h"
86 #include "sdma_v7_0.h"
87 #include "sdma_v7_1.h"
88 #include "lsdma_v6_0.h"
89 #include "lsdma_v7_0.h"
90 #include "vcn_v2_0.h"
91 #include "jpeg_v2_0.h"
92 #include "vcn_v3_0.h"
93 #include "jpeg_v3_0.h"
94 #include "vcn_v4_0.h"
95 #include "jpeg_v4_0.h"
96 #include "vcn_v4_0_3.h"
97 #include "jpeg_v4_0_3.h"
98 #include "vcn_v4_0_5.h"
99 #include "jpeg_v4_0_5.h"
100 #include "amdgpu_vkms.h"
101 #include "mes_v11_0.h"
102 #include "mes_v12_0.h"
103 #include "mes_v12_1.h"
104 #include "smuio_v11_0.h"
105 #include "smuio_v11_0_6.h"
106 #include "smuio_v13_0.h"
107 #include "smuio_v13_0_3.h"
108 #include "smuio_v13_0_6.h"
109 #include "smuio_v14_0_2.h"
110 #include "smuio_v15_0_0.h"
111 #include "smuio_v15_0_8.h"
112 #include "vcn_v5_0_0.h"
113 #include "vcn_v5_0_1.h"
114 #include "jpeg_v5_0_0.h"
115 #include "jpeg_v5_0_1.h"
116 #include "jpeg_v5_3_0.h"
117
118 #include "amdgpu_ras_mgr.h"
119
120 #include "amdgpu_vpe.h"
121 #if defined(CONFIG_DRM_AMD_ISP)
122 #include "amdgpu_isp.h"
123 #endif
124
125 MODULE_FIRMWARE("amdgpu/ip_discovery.bin");
126 MODULE_FIRMWARE("amdgpu/vega10_ip_discovery.bin");
127 MODULE_FIRMWARE("amdgpu/vega12_ip_discovery.bin");
128 MODULE_FIRMWARE("amdgpu/vega20_ip_discovery.bin");
129 MODULE_FIRMWARE("amdgpu/raven_ip_discovery.bin");
130 MODULE_FIRMWARE("amdgpu/raven2_ip_discovery.bin");
131 MODULE_FIRMWARE("amdgpu/picasso_ip_discovery.bin");
132 MODULE_FIRMWARE("amdgpu/arcturus_ip_discovery.bin");
133 MODULE_FIRMWARE("amdgpu/aldebaran_ip_discovery.bin");
134
135 #define mmIP_DISCOVERY_VERSION 0x16A00
136 #define mmRCC_CONFIG_MEMSIZE 0xde3
137 #define mmMP0_SMN_C2PMSG_33 0x16061
138 #define mmMM_INDEX 0x0
139 #define mmMM_INDEX_HI 0x6
140 #define mmMM_DATA 0x1
141
142 static const char *hw_id_names[HW_ID_MAX] = {
143 [MP1_HWID] = "MP1",
144 [MP2_HWID] = "MP2",
145 [THM_HWID] = "THM",
146 [SMUIO_HWID] = "SMUIO",
147 [FUSE_HWID] = "FUSE",
148 [CLKA_HWID] = "CLKA",
149 [PWR_HWID] = "PWR",
150 [GC_HWID] = "GC",
151 [UVD_HWID] = "UVD",
152 [AUDIO_AZ_HWID] = "AUDIO_AZ",
153 [ACP_HWID] = "ACP",
154 [DCI_HWID] = "DCI",
155 [DMU_HWID] = "DMU",
156 [DCO_HWID] = "DCO",
157 [DIO_HWID] = "DIO",
158 [XDMA_HWID] = "XDMA",
159 [DCEAZ_HWID] = "DCEAZ",
160 [DAZ_HWID] = "DAZ",
161 [SDPMUX_HWID] = "SDPMUX",
162 [NTB_HWID] = "NTB",
163 [IOHC_HWID] = "IOHC",
164 [L2IMU_HWID] = "L2IMU",
165 [VCE_HWID] = "VCE",
166 [MMHUB_HWID] = "MMHUB",
167 [ATHUB_HWID] = "ATHUB",
168 [DBGU_NBIO_HWID] = "DBGU_NBIO",
169 [DFX_HWID] = "DFX",
170 [DBGU0_HWID] = "DBGU0",
171 [DBGU1_HWID] = "DBGU1",
172 [OSSSYS_HWID] = "OSSSYS",
173 [HDP_HWID] = "HDP",
174 [SDMA0_HWID] = "SDMA0",
175 [SDMA1_HWID] = "SDMA1",
176 [SDMA2_HWID] = "SDMA2",
177 [SDMA3_HWID] = "SDMA3",
178 [LSDMA_HWID] = "LSDMA",
179 [ISP_HWID] = "ISP",
180 [DBGU_IO_HWID] = "DBGU_IO",
181 [DF_HWID] = "DF",
182 [CLKB_HWID] = "CLKB",
183 [FCH_HWID] = "FCH",
184 [DFX_DAP_HWID] = "DFX_DAP",
185 [L1IMU_PCIE_HWID] = "L1IMU_PCIE",
186 [L1IMU_NBIF_HWID] = "L1IMU_NBIF",
187 [L1IMU_IOAGR_HWID] = "L1IMU_IOAGR",
188 [L1IMU3_HWID] = "L1IMU3",
189 [L1IMU4_HWID] = "L1IMU4",
190 [L1IMU5_HWID] = "L1IMU5",
191 [L1IMU6_HWID] = "L1IMU6",
192 [L1IMU7_HWID] = "L1IMU7",
193 [L1IMU8_HWID] = "L1IMU8",
194 [L1IMU9_HWID] = "L1IMU9",
195 [L1IMU10_HWID] = "L1IMU10",
196 [L1IMU11_HWID] = "L1IMU11",
197 [L1IMU12_HWID] = "L1IMU12",
198 [L1IMU13_HWID] = "L1IMU13",
199 [L1IMU14_HWID] = "L1IMU14",
200 [L1IMU15_HWID] = "L1IMU15",
201 [WAFLC_HWID] = "WAFLC",
202 [FCH_USB_PD_HWID] = "FCH_USB_PD",
203 [PCIE_HWID] = "PCIE",
204 [PCS_HWID] = "PCS",
205 [DDCL_HWID] = "DDCL",
206 [SST_HWID] = "SST",
207 [IOAGR_HWID] = "IOAGR",
208 [NBIF_HWID] = "NBIF",
209 [IOAPIC_HWID] = "IOAPIC",
210 [SYSTEMHUB_HWID] = "SYSTEMHUB",
211 [NTBCCP_HWID] = "NTBCCP",
212 [UMC_HWID] = "UMC",
213 [SATA_HWID] = "SATA",
214 [USB_HWID] = "USB",
215 [CCXSEC_HWID] = "CCXSEC",
216 [XGMI_HWID] = "XGMI",
217 [XGBE_HWID] = "XGBE",
218 [MP0_HWID] = "MP0",
219 [VPE_HWID] = "VPE",
220 [ATU_HWID] = "ATU",
221 [AIGC_HWID] = "AIGC",
222 };
223
224 static int hw_id_map[MAX_HWIP] = {
225 [GC_HWIP] = GC_HWID,
226 [HDP_HWIP] = HDP_HWID,
227 [SDMA0_HWIP] = SDMA0_HWID,
228 [SDMA1_HWIP] = SDMA1_HWID,
229 [SDMA2_HWIP] = SDMA2_HWID,
230 [SDMA3_HWIP] = SDMA3_HWID,
231 [LSDMA_HWIP] = LSDMA_HWID,
232 [MMHUB_HWIP] = MMHUB_HWID,
233 [ATHUB_HWIP] = ATHUB_HWID,
234 [NBIO_HWIP] = NBIF_HWID,
235 [MP0_HWIP] = MP0_HWID,
236 [MP1_HWIP] = MP1_HWID,
237 [UVD_HWIP] = UVD_HWID,
238 [VCE_HWIP] = VCE_HWID,
239 [DF_HWIP] = DF_HWID,
240 [DCE_HWIP] = DMU_HWID,
241 [OSSSYS_HWIP] = OSSSYS_HWID,
242 [SMUIO_HWIP] = SMUIO_HWID,
243 [PWR_HWIP] = PWR_HWID,
244 [NBIF_HWIP] = NBIF_HWID,
245 [THM_HWIP] = THM_HWID,
246 [CLK_HWIP] = CLKA_HWID,
247 [UMC_HWIP] = UMC_HWID,
248 [XGMI_HWIP] = XGMI_HWID,
249 [DCI_HWIP] = DCI_HWID,
250 [PCIE_HWIP] = PCIE_HWID,
251 [VPE_HWIP] = VPE_HWID,
252 [ISP_HWIP] = ISP_HWID,
253 [ATU_HWIP] = ATU_HWID,
254 };
255
amdgpu_discovery_read_binary_from_sysmem(struct amdgpu_device * adev,uint8_t * binary)256 static int amdgpu_discovery_read_binary_from_sysmem(struct amdgpu_device *adev, uint8_t *binary)
257 {
258 u64 tmr_offset, tmr_size, pos;
259 void *discv_regn;
260 int ret;
261
262 ret = amdgpu_acpi_get_tmr_info(adev, &tmr_offset, &tmr_size);
263 if (ret)
264 return ret;
265
266 pos = tmr_offset + tmr_size - DISCOVERY_TMR_OFFSET;
267
268 /* This region is read-only and reserved from system use */
269 discv_regn = memremap(pos, adev->discovery.size, MEMREMAP_WC);
270 if (discv_regn) {
271 memcpy(binary, discv_regn, adev->discovery.size);
272 memunmap(discv_regn);
273 return 0;
274 }
275
276 return -ENOENT;
277 }
278
279 #define IP_DISCOVERY_V2 2
280 #define IP_DISCOVERY_V4 4
281
amdgpu_discovery_read_binary_from_mem(struct amdgpu_device * adev,uint8_t * binary)282 static int amdgpu_discovery_read_binary_from_mem(struct amdgpu_device *adev,
283 uint8_t *binary)
284 {
285 bool sz_valid = true;
286 uint64_t vram_size;
287 int i, ret = 0;
288 u32 msg;
289
290 if (!amdgpu_sriov_vf(adev)) {
291 /* It can take up to two second for IFWI init to complete on some dGPUs,
292 * but generally it should be in the 60-100ms range. Normally this starts
293 * as soon as the device gets power so by the time the OS loads this has long
294 * completed. However, when a card is hotplugged via e.g., USB4, we need to
295 * wait for this to complete. Once the C2PMSG is updated, we can
296 * continue.
297 */
298
299 for (i = 0; i < 2000; i++) {
300 msg = RREG32(mmMP0_SMN_C2PMSG_33);
301 if (msg & 0x80000000)
302 break;
303 msleep(1);
304 }
305 }
306
307 vram_size = RREG32(mmRCC_CONFIG_MEMSIZE);
308 if (!vram_size || vram_size == U32_MAX)
309 sz_valid = false;
310 else
311 vram_size <<= 20;
312
313 /*
314 * If in VRAM, discovery TMR is marked for reservation. If it is in system mem,
315 * then it is not required to be reserved.
316 */
317 if (sz_valid) {
318 if (amdgpu_sriov_vf(adev) && adev->virt.is_dynamic_crit_regn_enabled) {
319 /* For SRIOV VFs with dynamic critical region enabled,
320 * we will get the IPD binary via below call.
321 * If dynamic critical is disabled, fall through to normal seq.
322 */
323 if (amdgpu_virt_get_dynamic_data_info(adev,
324 AMD_SRIOV_MSG_IPD_TABLE_ID, binary,
325 &adev->discovery.size)) {
326 dev_err(adev->dev,
327 "failed to read discovery info from dynamic critical region.");
328 ret = -EINVAL;
329 goto exit;
330 }
331 } else {
332 uint64_t pos = vram_size - DISCOVERY_TMR_OFFSET;
333
334 amdgpu_device_vram_access(adev, pos, (uint32_t *)binary,
335 adev->discovery.size, false);
336 adev->discovery.reserve_tmr = true;
337 }
338 } else {
339 ret = amdgpu_discovery_read_binary_from_sysmem(adev, binary);
340 }
341
342 if (ret)
343 dev_err(adev->dev,
344 "failed to read discovery info from memory, vram size read: %llx",
345 vram_size);
346 exit:
347 return ret;
348 }
349
amdgpu_discovery_read_binary_from_file(struct amdgpu_device * adev,uint8_t * binary,const char * fw_name)350 static int amdgpu_discovery_read_binary_from_file(struct amdgpu_device *adev,
351 uint8_t *binary,
352 const char *fw_name)
353 {
354 const struct firmware *fw;
355 int r;
356
357 r = firmware_request_nowarn(&fw, fw_name, adev->dev);
358 if (r) {
359 if (amdgpu_discovery == 2)
360 dev_err(adev->dev, "can't load firmware \"%s\"\n", fw_name);
361 else
362 drm_info(&adev->ddev, "Optional firmware \"%s\" was not found\n", fw_name);
363 return r;
364 }
365
366 memcpy((u8 *)binary, (u8 *)fw->data, fw->size);
367 release_firmware(fw);
368
369 return 0;
370 }
371
amdgpu_discovery_calculate_checksum(uint8_t * data,uint32_t size)372 static uint16_t amdgpu_discovery_calculate_checksum(uint8_t *data, uint32_t size)
373 {
374 uint16_t checksum = 0;
375 int i;
376
377 for (i = 0; i < size; i++)
378 checksum += data[i];
379
380 return checksum;
381 }
382
amdgpu_discovery_verify_checksum(struct amdgpu_device * adev,uint8_t * data,uint32_t size,uint16_t expected)383 static inline bool amdgpu_discovery_verify_checksum(struct amdgpu_device *adev,
384 uint8_t *data, uint32_t size,
385 uint16_t expected)
386 {
387 uint16_t calculated;
388
389 calculated = amdgpu_discovery_calculate_checksum(data, size);
390
391 if (calculated != expected) {
392 dev_err(adev->dev, "Discovery checksum failed: calc 0x%04x != exp 0x%04x, size %u.\n",
393 calculated, expected, size);
394 return false;
395 }
396
397 return true;
398 }
399
amdgpu_discovery_verify_binary_signature(uint8_t * binary)400 static inline bool amdgpu_discovery_verify_binary_signature(uint8_t *binary)
401 {
402 struct binary_header *bhdr;
403 bhdr = (struct binary_header *)binary;
404
405 return (le32_to_cpu(bhdr->binary_signature) == BINARY_SIGNATURE);
406 }
407
amdgpu_discovery_harvest_config_quirk(struct amdgpu_device * adev)408 static void amdgpu_discovery_harvest_config_quirk(struct amdgpu_device *adev)
409 {
410 /*
411 * So far, apply this quirk only on those Navy Flounder boards which
412 * have a bad harvest table of VCN config.
413 */
414 if ((amdgpu_ip_version(adev, UVD_HWIP, 1) == IP_VERSION(3, 0, 1)) &&
415 (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(10, 3, 2))) {
416 switch (adev->pdev->revision) {
417 case 0xC1:
418 case 0xC2:
419 case 0xC3:
420 case 0xC5:
421 case 0xC7:
422 case 0xCF:
423 case 0xDF:
424 adev->vcn.harvest_config |= AMDGPU_VCN_HARVEST_VCN1;
425 adev->vcn.inst_mask &= ~AMDGPU_VCN_HARVEST_VCN1;
426 break;
427 default:
428 break;
429 }
430 }
431 }
432
amdgpu_discovery_verify_npsinfo(struct amdgpu_device * adev,struct binary_header * bhdr)433 static int amdgpu_discovery_verify_npsinfo(struct amdgpu_device *adev,
434 struct binary_header *bhdr)
435 {
436 uint8_t *discovery_bin = adev->discovery.bin;
437 struct table_info *info;
438 uint16_t checksum;
439 uint16_t offset;
440
441 info = &bhdr->table_list[NPS_INFO];
442 offset = le16_to_cpu(info->offset);
443 checksum = le16_to_cpu(info->checksum);
444
445 struct nps_info_header *nhdr =
446 (struct nps_info_header *)(discovery_bin + offset);
447
448 if (le32_to_cpu(nhdr->table_id) != NPS_INFO_TABLE_ID) {
449 dev_dbg(adev->dev, "invalid ip discovery nps info table id\n");
450 return -EINVAL;
451 }
452
453 if (!amdgpu_discovery_verify_checksum(adev, discovery_bin + offset,
454 le32_to_cpu(nhdr->size_bytes),
455 checksum)) {
456 dev_dbg(adev->dev, "invalid nps info data table checksum\n");
457 return -EINVAL;
458 }
459
460 return 0;
461 }
462
amdgpu_discovery_get_fw_name(struct amdgpu_device * adev)463 static const char *amdgpu_discovery_get_fw_name(struct amdgpu_device *adev)
464 {
465 if (amdgpu_discovery == 2) {
466 /* Assume there is valid discovery TMR in VRAM even if binary is sideloaded */
467 adev->discovery.reserve_tmr = true;
468 return "amdgpu/ip_discovery.bin";
469 }
470
471 switch (adev->asic_type) {
472 case CHIP_VEGA10:
473 return "amdgpu/vega10_ip_discovery.bin";
474 case CHIP_VEGA12:
475 return "amdgpu/vega12_ip_discovery.bin";
476 case CHIP_RAVEN:
477 if (adev->apu_flags & AMD_APU_IS_RAVEN2)
478 return "amdgpu/raven2_ip_discovery.bin";
479 else if (adev->apu_flags & AMD_APU_IS_PICASSO)
480 return "amdgpu/picasso_ip_discovery.bin";
481 else
482 return "amdgpu/raven_ip_discovery.bin";
483 case CHIP_VEGA20:
484 return "amdgpu/vega20_ip_discovery.bin";
485 case CHIP_ARCTURUS:
486 return "amdgpu/arcturus_ip_discovery.bin";
487 case CHIP_ALDEBARAN:
488 return "amdgpu/aldebaran_ip_discovery.bin";
489 default:
490 return NULL;
491 }
492 }
493
amdgpu_discovery_init(struct amdgpu_device * adev)494 static int amdgpu_discovery_init(struct amdgpu_device *adev)
495 {
496 struct table_info *info;
497 struct binary_header *bhdr;
498 uint8_t *discovery_bin;
499 const char *fw_name;
500 uint16_t offset;
501 uint16_t size;
502 uint16_t checksum;
503 int r;
504
505 adev->discovery.bin = kzalloc(DISCOVERY_TMR_SIZE, GFP_KERNEL);
506 if (!adev->discovery.bin)
507 return -ENOMEM;
508 adev->discovery.size = DISCOVERY_TMR_SIZE;
509 adev->discovery.debugfs_blob.data = adev->discovery.bin;
510 adev->discovery.debugfs_blob.size = adev->discovery.size;
511
512 discovery_bin = adev->discovery.bin;
513 /* Read from file if it is the preferred option */
514 fw_name = amdgpu_discovery_get_fw_name(adev);
515 if (fw_name != NULL) {
516 drm_dbg(&adev->ddev, "use ip discovery information from file");
517 r = amdgpu_discovery_read_binary_from_file(adev, discovery_bin,
518 fw_name);
519 if (r)
520 goto out;
521 } else {
522 drm_dbg(&adev->ddev, "use ip discovery information from memory");
523 r = amdgpu_discovery_read_binary_from_mem(adev, discovery_bin);
524 if (r)
525 goto out;
526 }
527
528 /* check the ip discovery binary signature */
529 if (!amdgpu_discovery_verify_binary_signature(discovery_bin)) {
530 dev_err(adev->dev,
531 "get invalid ip discovery binary signature\n");
532 r = -EINVAL;
533 goto out;
534 }
535
536 bhdr = (struct binary_header *)discovery_bin;
537
538 offset = offsetof(struct binary_header, binary_checksum) +
539 sizeof(bhdr->binary_checksum);
540 size = le16_to_cpu(bhdr->binary_size) - offset;
541 checksum = le16_to_cpu(bhdr->binary_checksum);
542
543 if (!amdgpu_discovery_verify_checksum(adev, discovery_bin + offset, size,
544 checksum)) {
545 dev_err(adev->dev, "invalid ip discovery binary checksum\n");
546 r = -EINVAL;
547 goto out;
548 }
549
550 info = &bhdr->table_list[IP_DISCOVERY];
551 offset = le16_to_cpu(info->offset);
552 checksum = le16_to_cpu(info->checksum);
553
554 if (offset) {
555 struct ip_discovery_header *ihdr =
556 (struct ip_discovery_header *)(discovery_bin + offset);
557 if (le32_to_cpu(ihdr->signature) != DISCOVERY_TABLE_SIGNATURE) {
558 dev_err(adev->dev, "invalid ip discovery data table signature\n");
559 r = -EINVAL;
560 goto out;
561 }
562
563 if (!amdgpu_discovery_verify_checksum(adev, discovery_bin + offset,
564 le16_to_cpu(ihdr->size),
565 checksum)) {
566 dev_err(adev->dev, "invalid ip discovery data table checksum\n");
567 r = -EINVAL;
568 goto out;
569 }
570 }
571
572 info = &bhdr->table_list[GC];
573 offset = le16_to_cpu(info->offset);
574 checksum = le16_to_cpu(info->checksum);
575
576 if (offset) {
577 struct gpu_info_header *ghdr =
578 (struct gpu_info_header *)(discovery_bin + offset);
579
580 if (le32_to_cpu(ghdr->table_id) != GC_TABLE_ID) {
581 dev_err(adev->dev, "invalid ip discovery gc table id\n");
582 r = -EINVAL;
583 goto out;
584 }
585
586 if (!amdgpu_discovery_verify_checksum(adev, discovery_bin + offset,
587 le32_to_cpu(ghdr->size),
588 checksum)) {
589 dev_err(adev->dev, "invalid gc data table checksum\n");
590 r = -EINVAL;
591 goto out;
592 }
593 }
594
595 info = &bhdr->table_list[HARVEST_INFO];
596 offset = le16_to_cpu(info->offset);
597 checksum = le16_to_cpu(info->checksum);
598
599 if (offset) {
600 struct harvest_info_header *hhdr =
601 (struct harvest_info_header *)(discovery_bin + offset);
602
603 if (le32_to_cpu(hhdr->signature) != HARVEST_TABLE_SIGNATURE) {
604 dev_err(adev->dev, "invalid ip discovery harvest table signature\n");
605 r = -EINVAL;
606 goto out;
607 }
608
609 if (!amdgpu_discovery_verify_checksum(adev,
610 discovery_bin + offset,
611 sizeof(struct harvest_table), checksum)) {
612 dev_err(adev->dev, "invalid harvest data table checksum\n");
613 r = -EINVAL;
614 goto out;
615 }
616 }
617
618 info = &bhdr->table_list[VCN_INFO];
619 offset = le16_to_cpu(info->offset);
620 checksum = le16_to_cpu(info->checksum);
621
622 if (offset) {
623 struct vcn_info_header *vhdr =
624 (struct vcn_info_header *)(discovery_bin + offset);
625
626 if (le32_to_cpu(vhdr->table_id) != VCN_INFO_TABLE_ID) {
627 dev_err(adev->dev, "invalid ip discovery vcn table id\n");
628 r = -EINVAL;
629 goto out;
630 }
631
632 if (!amdgpu_discovery_verify_checksum(adev,
633 discovery_bin + offset,
634 le32_to_cpu(vhdr->size_bytes), checksum)) {
635 dev_err(adev->dev, "invalid vcn data table checksum\n");
636 r = -EINVAL;
637 goto out;
638 }
639 }
640
641 info = &bhdr->table_list[MALL_INFO];
642 offset = le16_to_cpu(info->offset);
643 checksum = le16_to_cpu(info->checksum);
644
645 if (0 && offset) {
646 struct mall_info_header *mhdr =
647 (struct mall_info_header *)(discovery_bin + offset);
648
649 if (le32_to_cpu(mhdr->table_id) != MALL_INFO_TABLE_ID) {
650 dev_err(adev->dev, "invalid ip discovery mall table id\n");
651 r = -EINVAL;
652 goto out;
653 }
654
655 if (!amdgpu_discovery_verify_checksum(adev,
656 discovery_bin + offset,
657 le32_to_cpu(mhdr->size_bytes), checksum)) {
658 dev_err(adev->dev, "invalid mall data table checksum\n");
659 r = -EINVAL;
660 goto out;
661 }
662 }
663
664 return 0;
665
666 out:
667 kfree(adev->discovery.bin);
668 adev->discovery.bin = NULL;
669 if ((amdgpu_discovery != 2) &&
670 (RREG32(mmIP_DISCOVERY_VERSION) == 4))
671 amdgpu_ras_query_boot_status(adev, 4);
672 return r;
673 }
674
675 static void amdgpu_discovery_sysfs_fini(struct amdgpu_device *adev);
676
amdgpu_discovery_fini(struct amdgpu_device * adev)677 void amdgpu_discovery_fini(struct amdgpu_device *adev)
678 {
679 amdgpu_discovery_sysfs_fini(adev);
680 kfree(adev->discovery.bin);
681 adev->discovery.bin = NULL;
682 }
683
amdgpu_discovery_validate_ip(struct amdgpu_device * adev,uint8_t instance,uint16_t hw_id)684 static int amdgpu_discovery_validate_ip(struct amdgpu_device *adev,
685 uint8_t instance, uint16_t hw_id)
686 {
687 if (instance >= HWIP_MAX_INSTANCE) {
688 dev_err(adev->dev,
689 "Unexpected instance_number (%d) from ip discovery blob\n",
690 instance);
691 return -EINVAL;
692 }
693 if (hw_id >= HW_ID_MAX) {
694 dev_err(adev->dev,
695 "Unexpected hw_id (%d) from ip discovery blob\n",
696 hw_id);
697 return -EINVAL;
698 }
699
700 return 0;
701 }
702
amdgpu_discovery_read_harvest_bit_per_ip(struct amdgpu_device * adev,uint32_t * vcn_harvest_count)703 static void amdgpu_discovery_read_harvest_bit_per_ip(struct amdgpu_device *adev,
704 uint32_t *vcn_harvest_count)
705 {
706 uint8_t *discovery_bin = adev->discovery.bin;
707 struct binary_header *bhdr;
708 struct ip_discovery_header *ihdr;
709 struct die_header *dhdr;
710 struct ip *ip;
711 uint16_t die_offset, ip_offset, num_dies, num_ips;
712 uint16_t hw_id;
713 uint8_t inst;
714 int i, j;
715
716 bhdr = (struct binary_header *)discovery_bin;
717 ihdr = (struct ip_discovery_header
718 *)(discovery_bin +
719 le16_to_cpu(bhdr->table_list[IP_DISCOVERY].offset));
720 num_dies = le16_to_cpu(ihdr->num_dies);
721
722 /* scan harvest bit of all IP data structures */
723 for (i = 0; i < num_dies; i++) {
724 die_offset = le16_to_cpu(ihdr->die_info[i].die_offset);
725 dhdr = (struct die_header *)(discovery_bin + die_offset);
726 num_ips = le16_to_cpu(dhdr->num_ips);
727 ip_offset = die_offset + sizeof(*dhdr);
728
729 for (j = 0; j < num_ips; j++) {
730 ip = (struct ip *)(discovery_bin + ip_offset);
731 inst = ip->number_instance;
732 hw_id = le16_to_cpu(ip->hw_id);
733 if (amdgpu_discovery_validate_ip(adev, inst, hw_id))
734 goto next_ip;
735
736 if (ip->harvest == 1) {
737 switch (hw_id) {
738 case VCN_HWID:
739 (*vcn_harvest_count)++;
740 if (inst == 0) {
741 adev->vcn.harvest_config |= AMDGPU_VCN_HARVEST_VCN0;
742 adev->vcn.inst_mask &=
743 ~AMDGPU_VCN_HARVEST_VCN0;
744 adev->jpeg.inst_mask &=
745 ~AMDGPU_VCN_HARVEST_VCN0;
746 } else {
747 adev->vcn.harvest_config |= AMDGPU_VCN_HARVEST_VCN1;
748 adev->vcn.inst_mask &=
749 ~AMDGPU_VCN_HARVEST_VCN1;
750 adev->jpeg.inst_mask &=
751 ~AMDGPU_VCN_HARVEST_VCN1;
752 }
753 break;
754 case DMU_HWID:
755 adev->harvest_ip_mask |= AMD_HARVEST_IP_DMU_MASK;
756 break;
757 default:
758 break;
759 }
760 }
761 next_ip:
762 ip_offset += struct_size(ip, base_address,
763 ip->num_base_address);
764 }
765 }
766 }
767
amdgpu_discovery_read_from_harvest_table(struct amdgpu_device * adev,uint32_t * vcn_harvest_count,uint32_t * umc_harvest_count)768 static void amdgpu_discovery_read_from_harvest_table(struct amdgpu_device *adev,
769 uint32_t *vcn_harvest_count,
770 uint32_t *umc_harvest_count)
771 {
772 uint8_t *discovery_bin = adev->discovery.bin;
773 struct binary_header *bhdr;
774 struct harvest_table *harvest_info;
775 u16 offset;
776 int i;
777 uint32_t umc_harvest_config = 0;
778
779 bhdr = (struct binary_header *)discovery_bin;
780 offset = le16_to_cpu(bhdr->table_list[HARVEST_INFO].offset);
781
782 if (!offset) {
783 dev_err(adev->dev, "invalid harvest table offset\n");
784 return;
785 }
786
787 harvest_info = (struct harvest_table *)(discovery_bin + offset);
788
789 for (i = 0; i < 32; i++) {
790 if (le16_to_cpu(harvest_info->list[i].hw_id) == 0)
791 break;
792
793 switch (le16_to_cpu(harvest_info->list[i].hw_id)) {
794 case VCN_HWID:
795 (*vcn_harvest_count)++;
796 adev->vcn.harvest_config |=
797 (1 << harvest_info->list[i].number_instance);
798 adev->jpeg.harvest_config |=
799 (1 << harvest_info->list[i].number_instance);
800
801 adev->vcn.inst_mask &=
802 ~(1U << harvest_info->list[i].number_instance);
803 adev->jpeg.inst_mask &=
804 ~(1U << harvest_info->list[i].number_instance);
805 break;
806 case DMU_HWID:
807 adev->harvest_ip_mask |= AMD_HARVEST_IP_DMU_MASK;
808 break;
809 case UMC_HWID:
810 umc_harvest_config |=
811 1 << (le16_to_cpu(harvest_info->list[i].number_instance));
812 (*umc_harvest_count)++;
813 break;
814 case GC_HWID:
815 adev->gfx.xcc_mask &=
816 ~(1U << harvest_info->list[i].number_instance);
817 break;
818 case SDMA0_HWID:
819 adev->sdma.sdma_mask &=
820 ~(1U << harvest_info->list[i].number_instance);
821 break;
822 #if defined(CONFIG_DRM_AMD_ISP)
823 case ISP_HWID:
824 adev->isp.harvest_config |=
825 ~(1U << harvest_info->list[i].number_instance);
826 break;
827 #endif
828 default:
829 break;
830 }
831 }
832
833 adev->umc.active_mask = ((1 << adev->umc.node_inst_num) - 1) &
834 ~umc_harvest_config;
835 }
836
837 /* ================================================== */
838
839 struct ip_hw_instance {
840 struct kobject kobj; /* ip_discovery/die/#die/#hw_id/#instance/<attrs...> */
841
842 int hw_id;
843 u8 num_instance;
844 u8 major, minor, revision;
845 u8 harvest;
846
847 int num_base_addresses;
848 u32 base_addr[] __counted_by(num_base_addresses);
849 };
850
851 struct ip_hw_id {
852 struct kset hw_id_kset; /* ip_discovery/die/#die/#hw_id/, contains ip_hw_instance */
853 int hw_id;
854 };
855
856 struct ip_die_entry {
857 struct kset ip_kset; /* ip_discovery/die/#die/, contains ip_hw_id */
858 u16 num_ips;
859 };
860
861 /* -------------------------------------------------- */
862
863 struct ip_hw_instance_attr {
864 struct attribute attr;
865 ssize_t (*show)(struct ip_hw_instance *ip_hw_instance, char *buf);
866 };
867
hw_id_show(struct ip_hw_instance * ip_hw_instance,char * buf)868 static ssize_t hw_id_show(struct ip_hw_instance *ip_hw_instance, char *buf)
869 {
870 return sysfs_emit(buf, "%d\n", ip_hw_instance->hw_id);
871 }
872
num_instance_show(struct ip_hw_instance * ip_hw_instance,char * buf)873 static ssize_t num_instance_show(struct ip_hw_instance *ip_hw_instance, char *buf)
874 {
875 return sysfs_emit(buf, "%d\n", ip_hw_instance->num_instance);
876 }
877
major_show(struct ip_hw_instance * ip_hw_instance,char * buf)878 static ssize_t major_show(struct ip_hw_instance *ip_hw_instance, char *buf)
879 {
880 return sysfs_emit(buf, "%d\n", ip_hw_instance->major);
881 }
882
minor_show(struct ip_hw_instance * ip_hw_instance,char * buf)883 static ssize_t minor_show(struct ip_hw_instance *ip_hw_instance, char *buf)
884 {
885 return sysfs_emit(buf, "%d\n", ip_hw_instance->minor);
886 }
887
revision_show(struct ip_hw_instance * ip_hw_instance,char * buf)888 static ssize_t revision_show(struct ip_hw_instance *ip_hw_instance, char *buf)
889 {
890 return sysfs_emit(buf, "%d\n", ip_hw_instance->revision);
891 }
892
harvest_show(struct ip_hw_instance * ip_hw_instance,char * buf)893 static ssize_t harvest_show(struct ip_hw_instance *ip_hw_instance, char *buf)
894 {
895 return sysfs_emit(buf, "0x%01X\n", ip_hw_instance->harvest);
896 }
897
num_base_addresses_show(struct ip_hw_instance * ip_hw_instance,char * buf)898 static ssize_t num_base_addresses_show(struct ip_hw_instance *ip_hw_instance, char *buf)
899 {
900 return sysfs_emit(buf, "%d\n", ip_hw_instance->num_base_addresses);
901 }
902
base_addr_show(struct ip_hw_instance * ip_hw_instance,char * buf)903 static ssize_t base_addr_show(struct ip_hw_instance *ip_hw_instance, char *buf)
904 {
905 ssize_t at;
906 int ii;
907
908 for (at = ii = 0; ii < ip_hw_instance->num_base_addresses; ii++) {
909 /* Here we satisfy the condition that, at + size <= PAGE_SIZE.
910 */
911 if (at + 12 > PAGE_SIZE)
912 break;
913 at += sysfs_emit_at(buf, at, "0x%08X\n",
914 ip_hw_instance->base_addr[ii]);
915 }
916
917 return at;
918 }
919
920 static struct ip_hw_instance_attr ip_hw_attr[] = {
921 __ATTR_RO(hw_id),
922 __ATTR_RO(num_instance),
923 __ATTR_RO(major),
924 __ATTR_RO(minor),
925 __ATTR_RO(revision),
926 __ATTR_RO(harvest),
927 __ATTR_RO(num_base_addresses),
928 __ATTR_RO(base_addr),
929 };
930
931 static struct attribute *ip_hw_instance_attrs[ARRAY_SIZE(ip_hw_attr) + 1];
932 ATTRIBUTE_GROUPS(ip_hw_instance);
933
934 #define to_ip_hw_instance(x) container_of(x, struct ip_hw_instance, kobj)
935 #define to_ip_hw_instance_attr(x) container_of(x, struct ip_hw_instance_attr, attr)
936
ip_hw_instance_attr_show(struct kobject * kobj,struct attribute * attr,char * buf)937 static ssize_t ip_hw_instance_attr_show(struct kobject *kobj,
938 struct attribute *attr,
939 char *buf)
940 {
941 struct ip_hw_instance *ip_hw_instance = to_ip_hw_instance(kobj);
942 struct ip_hw_instance_attr *ip_hw_attr = to_ip_hw_instance_attr(attr);
943
944 if (!ip_hw_attr->show)
945 return -EIO;
946
947 return ip_hw_attr->show(ip_hw_instance, buf);
948 }
949
950 static const struct sysfs_ops ip_hw_instance_sysfs_ops = {
951 .show = ip_hw_instance_attr_show,
952 };
953
ip_hw_instance_release(struct kobject * kobj)954 static void ip_hw_instance_release(struct kobject *kobj)
955 {
956 struct ip_hw_instance *ip_hw_instance = to_ip_hw_instance(kobj);
957
958 kfree(ip_hw_instance);
959 }
960
961 static const struct kobj_type ip_hw_instance_ktype = {
962 .release = ip_hw_instance_release,
963 .sysfs_ops = &ip_hw_instance_sysfs_ops,
964 .default_groups = ip_hw_instance_groups,
965 };
966
967 /* -------------------------------------------------- */
968
969 #define to_ip_hw_id(x) container_of(to_kset(x), struct ip_hw_id, hw_id_kset)
970
ip_hw_id_release(struct kobject * kobj)971 static void ip_hw_id_release(struct kobject *kobj)
972 {
973 struct ip_hw_id *ip_hw_id = to_ip_hw_id(kobj);
974
975 if (!list_empty(&ip_hw_id->hw_id_kset.list))
976 DRM_ERROR("ip_hw_id->hw_id_kset is not empty");
977 kfree(ip_hw_id);
978 }
979
980 static const struct kobj_type ip_hw_id_ktype = {
981 .release = ip_hw_id_release,
982 .sysfs_ops = &kobj_sysfs_ops,
983 };
984
985 /* -------------------------------------------------- */
986
987 static void die_kobj_release(struct kobject *kobj);
988 static void ip_disc_release(struct kobject *kobj);
989
990 struct ip_die_entry_attribute {
991 struct attribute attr;
992 ssize_t (*show)(struct ip_die_entry *ip_die_entry, char *buf);
993 };
994
995 #define to_ip_die_entry_attr(x) container_of(x, struct ip_die_entry_attribute, attr)
996
num_ips_show(struct ip_die_entry * ip_die_entry,char * buf)997 static ssize_t num_ips_show(struct ip_die_entry *ip_die_entry, char *buf)
998 {
999 return sysfs_emit(buf, "%d\n", ip_die_entry->num_ips);
1000 }
1001
1002 /* If there are more ip_die_entry attrs, other than the number of IPs,
1003 * we can make this intro an array of attrs, and then initialize
1004 * ip_die_entry_attrs in a loop.
1005 */
1006 static struct ip_die_entry_attribute num_ips_attr =
1007 __ATTR_RO(num_ips);
1008
1009 static struct attribute *ip_die_entry_attrs[] = {
1010 &num_ips_attr.attr,
1011 NULL,
1012 };
1013 ATTRIBUTE_GROUPS(ip_die_entry); /* ip_die_entry_groups */
1014
1015 #define to_ip_die_entry(x) container_of(to_kset(x), struct ip_die_entry, ip_kset)
1016
ip_die_entry_attr_show(struct kobject * kobj,struct attribute * attr,char * buf)1017 static ssize_t ip_die_entry_attr_show(struct kobject *kobj,
1018 struct attribute *attr,
1019 char *buf)
1020 {
1021 struct ip_die_entry_attribute *ip_die_entry_attr = to_ip_die_entry_attr(attr);
1022 struct ip_die_entry *ip_die_entry = to_ip_die_entry(kobj);
1023
1024 if (!ip_die_entry_attr->show)
1025 return -EIO;
1026
1027 return ip_die_entry_attr->show(ip_die_entry, buf);
1028 }
1029
ip_die_entry_release(struct kobject * kobj)1030 static void ip_die_entry_release(struct kobject *kobj)
1031 {
1032 struct ip_die_entry *ip_die_entry = to_ip_die_entry(kobj);
1033
1034 if (!list_empty(&ip_die_entry->ip_kset.list))
1035 DRM_ERROR("ip_die_entry->ip_kset is not empty");
1036 kfree(ip_die_entry);
1037 }
1038
1039 static const struct sysfs_ops ip_die_entry_sysfs_ops = {
1040 .show = ip_die_entry_attr_show,
1041 };
1042
1043 static const struct kobj_type ip_die_entry_ktype = {
1044 .release = ip_die_entry_release,
1045 .sysfs_ops = &ip_die_entry_sysfs_ops,
1046 .default_groups = ip_die_entry_groups,
1047 };
1048
1049 static const struct kobj_type die_kobj_ktype = {
1050 .release = die_kobj_release,
1051 .sysfs_ops = &kobj_sysfs_ops,
1052 };
1053
1054 static const struct kobj_type ip_discovery_ktype = {
1055 .release = ip_disc_release,
1056 .sysfs_ops = &kobj_sysfs_ops,
1057 };
1058
1059 struct ip_discovery_top {
1060 struct kobject kobj; /* ip_discovery/ */
1061 struct kset die_kset; /* ip_discovery/die/, contains ip_die_entry */
1062 struct amdgpu_device *adev;
1063 };
1064
die_kobj_release(struct kobject * kobj)1065 static void die_kobj_release(struct kobject *kobj)
1066 {
1067 struct ip_discovery_top *ip_top = container_of(to_kset(kobj),
1068 struct ip_discovery_top,
1069 die_kset);
1070 if (!list_empty(&ip_top->die_kset.list))
1071 DRM_ERROR("ip_top->die_kset is not empty");
1072 }
1073
ip_disc_release(struct kobject * kobj)1074 static void ip_disc_release(struct kobject *kobj)
1075 {
1076 struct ip_discovery_top *ip_top = container_of(kobj, struct ip_discovery_top,
1077 kobj);
1078 struct amdgpu_device *adev = ip_top->adev;
1079
1080 kfree(ip_top);
1081 adev->discovery.ip_top = NULL;
1082 }
1083
amdgpu_discovery_get_harvest_info(struct amdgpu_device * adev,uint16_t hw_id,uint8_t inst)1084 static uint8_t amdgpu_discovery_get_harvest_info(struct amdgpu_device *adev,
1085 uint16_t hw_id, uint8_t inst)
1086 {
1087 uint8_t harvest = 0;
1088
1089 /* Until a uniform way is figured, get mask based on hwid */
1090 switch (hw_id) {
1091 case VCN_HWID:
1092 /* VCN vs UVD+VCE */
1093 if (!amdgpu_ip_version(adev, VCE_HWIP, 0))
1094 harvest = ((1 << inst) & adev->vcn.inst_mask) == 0;
1095 break;
1096 case DMU_HWID:
1097 if (adev->harvest_ip_mask & AMD_HARVEST_IP_DMU_MASK)
1098 harvest = 0x1;
1099 break;
1100 case UMC_HWID:
1101 /* TODO: It needs another parsing; for now, ignore.*/
1102 break;
1103 case GC_HWID:
1104 harvest = ((1 << inst) & adev->gfx.xcc_mask) == 0;
1105 break;
1106 case SDMA0_HWID:
1107 harvest = ((1 << inst) & adev->sdma.sdma_mask) == 0;
1108 break;
1109 default:
1110 break;
1111 }
1112
1113 return harvest;
1114 }
1115
amdgpu_discovery_sysfs_ips(struct amdgpu_device * adev,struct ip_die_entry * ip_die_entry,const size_t _ip_offset,const int num_ips,bool reg_base_64)1116 static int amdgpu_discovery_sysfs_ips(struct amdgpu_device *adev,
1117 struct ip_die_entry *ip_die_entry,
1118 const size_t _ip_offset, const int num_ips,
1119 bool reg_base_64)
1120 {
1121 uint8_t *discovery_bin = adev->discovery.bin;
1122 int ii, jj, kk, res;
1123 uint16_t hw_id;
1124 uint8_t inst;
1125
1126 DRM_DEBUG("num_ips:%d", num_ips);
1127
1128 /* Find all IPs of a given HW ID, and add their instance to
1129 * #die/#hw_id/#instance/<attributes>
1130 */
1131 for (ii = 0; ii < HW_ID_MAX; ii++) {
1132 struct ip_hw_id *ip_hw_id = NULL;
1133 size_t ip_offset = _ip_offset;
1134
1135 for (jj = 0; jj < num_ips; jj++) {
1136 struct ip_v4 *ip;
1137 struct ip_hw_instance *ip_hw_instance;
1138
1139 ip = (struct ip_v4 *)(discovery_bin + ip_offset);
1140 inst = ip->instance_number;
1141 hw_id = le16_to_cpu(ip->hw_id);
1142 if (amdgpu_discovery_validate_ip(adev, inst, hw_id) ||
1143 hw_id != ii)
1144 goto next_ip;
1145
1146 DRM_DEBUG("match:%d @ ip_offset:%zu", ii, ip_offset);
1147
1148 /* We have a hw_id match; register the hw
1149 * block if not yet registered.
1150 */
1151 if (!ip_hw_id) {
1152 ip_hw_id = kzalloc_obj(*ip_hw_id);
1153 if (!ip_hw_id)
1154 return -ENOMEM;
1155 ip_hw_id->hw_id = ii;
1156
1157 kobject_set_name(&ip_hw_id->hw_id_kset.kobj, "%d", ii);
1158 ip_hw_id->hw_id_kset.kobj.kset = &ip_die_entry->ip_kset;
1159 ip_hw_id->hw_id_kset.kobj.ktype = &ip_hw_id_ktype;
1160 res = kset_register(&ip_hw_id->hw_id_kset);
1161 if (res) {
1162 DRM_ERROR("Couldn't register ip_hw_id kset");
1163 kfree(ip_hw_id);
1164 return res;
1165 }
1166 if (hw_id_names[ii]) {
1167 res = sysfs_create_link(&ip_die_entry->ip_kset.kobj,
1168 &ip_hw_id->hw_id_kset.kobj,
1169 hw_id_names[ii]);
1170 if (res) {
1171 DRM_ERROR("Couldn't create IP link %s in IP Die:%s\n",
1172 hw_id_names[ii],
1173 kobject_name(&ip_die_entry->ip_kset.kobj));
1174 }
1175 }
1176 }
1177
1178 /* Now register its instance.
1179 */
1180 ip_hw_instance = kzalloc_flex(*ip_hw_instance,
1181 base_addr,
1182 ip->num_base_address);
1183 if (!ip_hw_instance) {
1184 DRM_ERROR("no memory for ip_hw_instance");
1185 return -ENOMEM;
1186 }
1187 ip_hw_instance->hw_id = le16_to_cpu(ip->hw_id); /* == ii */
1188 ip_hw_instance->num_instance = ip->instance_number;
1189 ip_hw_instance->major = ip->major;
1190 ip_hw_instance->minor = ip->minor;
1191 ip_hw_instance->revision = ip->revision;
1192 ip_hw_instance->harvest =
1193 amdgpu_discovery_get_harvest_info(
1194 adev, ip_hw_instance->hw_id,
1195 ip_hw_instance->num_instance);
1196 ip_hw_instance->num_base_addresses = ip->num_base_address;
1197
1198 for (kk = 0; kk < ip_hw_instance->num_base_addresses; kk++) {
1199 if (reg_base_64)
1200 ip_hw_instance->base_addr[kk] =
1201 lower_32_bits(le64_to_cpu(ip->base_address_64[kk])) & 0x3FFFFFFF;
1202 else
1203 ip_hw_instance->base_addr[kk] = ip->base_address[kk];
1204 }
1205
1206 kobject_init(&ip_hw_instance->kobj, &ip_hw_instance_ktype);
1207 ip_hw_instance->kobj.kset = &ip_hw_id->hw_id_kset;
1208 res = kobject_add(&ip_hw_instance->kobj, NULL,
1209 "%d", ip_hw_instance->num_instance);
1210 next_ip:
1211 if (reg_base_64)
1212 ip_offset += struct_size(ip, base_address_64,
1213 ip->num_base_address);
1214 else
1215 ip_offset += struct_size(ip, base_address,
1216 ip->num_base_address);
1217 }
1218 }
1219
1220 return 0;
1221 }
1222
amdgpu_discovery_sysfs_recurse(struct amdgpu_device * adev)1223 static int amdgpu_discovery_sysfs_recurse(struct amdgpu_device *adev)
1224 {
1225 struct ip_discovery_top *ip_top = adev->discovery.ip_top;
1226 uint8_t *discovery_bin = adev->discovery.bin;
1227 struct binary_header *bhdr;
1228 struct ip_discovery_header *ihdr;
1229 struct die_header *dhdr;
1230 struct kset *die_kset = &ip_top->die_kset;
1231 u16 num_dies, die_offset, num_ips;
1232 size_t ip_offset;
1233 int ii, res;
1234
1235 bhdr = (struct binary_header *)discovery_bin;
1236 ihdr = (struct ip_discovery_header
1237 *)(discovery_bin +
1238 le16_to_cpu(bhdr->table_list[IP_DISCOVERY].offset));
1239 num_dies = le16_to_cpu(ihdr->num_dies);
1240
1241 DRM_DEBUG("number of dies: %d\n", num_dies);
1242
1243 for (ii = 0; ii < num_dies; ii++) {
1244 struct ip_die_entry *ip_die_entry;
1245
1246 die_offset = le16_to_cpu(ihdr->die_info[ii].die_offset);
1247 dhdr = (struct die_header *)(discovery_bin + die_offset);
1248 num_ips = le16_to_cpu(dhdr->num_ips);
1249 ip_offset = die_offset + sizeof(*dhdr);
1250
1251 /* Add the die to the kset.
1252 *
1253 * dhdr->die_id == ii, which was checked in
1254 * amdgpu_discovery_reg_base_init().
1255 */
1256
1257 ip_die_entry = kzalloc_obj(*ip_die_entry);
1258 if (!ip_die_entry)
1259 return -ENOMEM;
1260
1261 ip_die_entry->num_ips = num_ips;
1262
1263 kobject_set_name(&ip_die_entry->ip_kset.kobj, "%d", le16_to_cpu(dhdr->die_id));
1264 ip_die_entry->ip_kset.kobj.kset = die_kset;
1265 ip_die_entry->ip_kset.kobj.ktype = &ip_die_entry_ktype;
1266 res = kset_register(&ip_die_entry->ip_kset);
1267 if (res) {
1268 DRM_ERROR("Couldn't register ip_die_entry kset");
1269 kfree(ip_die_entry);
1270 return res;
1271 }
1272
1273 amdgpu_discovery_sysfs_ips(adev, ip_die_entry, ip_offset, num_ips, !!ihdr->base_addr_64_bit);
1274 }
1275
1276 return 0;
1277 }
1278
amdgpu_discovery_sysfs_init(struct amdgpu_device * adev)1279 static int amdgpu_discovery_sysfs_init(struct amdgpu_device *adev)
1280 {
1281 uint8_t *discovery_bin = adev->discovery.bin;
1282 struct ip_discovery_top *ip_top;
1283 struct kset *die_kset;
1284 int res, ii;
1285
1286 if (!discovery_bin)
1287 return -EINVAL;
1288
1289 ip_top = kzalloc_obj(*ip_top);
1290 if (!ip_top)
1291 return -ENOMEM;
1292
1293 ip_top->adev = adev;
1294 adev->discovery.ip_top = ip_top;
1295 res = kobject_init_and_add(&ip_top->kobj, &ip_discovery_ktype,
1296 &adev->dev->kobj, "ip_discovery");
1297 if (res) {
1298 DRM_ERROR("Couldn't init and add ip_discovery/");
1299 goto Err;
1300 }
1301
1302 die_kset = &ip_top->die_kset;
1303 kobject_set_name(&die_kset->kobj, "%s", "die");
1304 die_kset->kobj.parent = &ip_top->kobj;
1305 die_kset->kobj.ktype = &die_kobj_ktype;
1306 res = kset_register(&ip_top->die_kset);
1307 if (res) {
1308 DRM_ERROR("Couldn't register die_kset");
1309 goto Err;
1310 }
1311
1312 for (ii = 0; ii < ARRAY_SIZE(ip_hw_attr); ii++)
1313 ip_hw_instance_attrs[ii] = &ip_hw_attr[ii].attr;
1314 ip_hw_instance_attrs[ii] = NULL;
1315
1316 res = amdgpu_discovery_sysfs_recurse(adev);
1317
1318 return res;
1319 Err:
1320 kobject_put(&ip_top->kobj);
1321 return res;
1322 }
1323
1324 /* -------------------------------------------------- */
1325
1326 #define list_to_kobj(el) container_of(el, struct kobject, entry)
1327
amdgpu_discovery_sysfs_ip_hw_free(struct ip_hw_id * ip_hw_id)1328 static void amdgpu_discovery_sysfs_ip_hw_free(struct ip_hw_id *ip_hw_id)
1329 {
1330 struct list_head *el, *tmp;
1331 struct kset *hw_id_kset;
1332
1333 hw_id_kset = &ip_hw_id->hw_id_kset;
1334 spin_lock(&hw_id_kset->list_lock);
1335 list_for_each_prev_safe(el, tmp, &hw_id_kset->list) {
1336 list_del_init(el);
1337 spin_unlock(&hw_id_kset->list_lock);
1338 /* kobject is embedded in ip_hw_instance */
1339 kobject_put(list_to_kobj(el));
1340 spin_lock(&hw_id_kset->list_lock);
1341 }
1342 spin_unlock(&hw_id_kset->list_lock);
1343 kobject_put(&ip_hw_id->hw_id_kset.kobj);
1344 }
1345
amdgpu_discovery_sysfs_die_free(struct ip_die_entry * ip_die_entry)1346 static void amdgpu_discovery_sysfs_die_free(struct ip_die_entry *ip_die_entry)
1347 {
1348 struct list_head *el, *tmp;
1349 struct kset *ip_kset;
1350
1351 ip_kset = &ip_die_entry->ip_kset;
1352 spin_lock(&ip_kset->list_lock);
1353 list_for_each_prev_safe(el, tmp, &ip_kset->list) {
1354 list_del_init(el);
1355 spin_unlock(&ip_kset->list_lock);
1356 amdgpu_discovery_sysfs_ip_hw_free(to_ip_hw_id(list_to_kobj(el)));
1357 spin_lock(&ip_kset->list_lock);
1358 }
1359 spin_unlock(&ip_kset->list_lock);
1360 kobject_put(&ip_die_entry->ip_kset.kobj);
1361 }
1362
amdgpu_discovery_sysfs_fini(struct amdgpu_device * adev)1363 static void amdgpu_discovery_sysfs_fini(struct amdgpu_device *adev)
1364 {
1365 struct ip_discovery_top *ip_top = adev->discovery.ip_top;
1366 struct list_head *el, *tmp;
1367 struct kset *die_kset;
1368
1369 die_kset = &ip_top->die_kset;
1370 spin_lock(&die_kset->list_lock);
1371 list_for_each_prev_safe(el, tmp, &die_kset->list) {
1372 list_del_init(el);
1373 spin_unlock(&die_kset->list_lock);
1374 amdgpu_discovery_sysfs_die_free(to_ip_die_entry(list_to_kobj(el)));
1375 spin_lock(&die_kset->list_lock);
1376 }
1377 spin_unlock(&die_kset->list_lock);
1378 kobject_put(&ip_top->die_kset.kobj);
1379 kobject_put(&ip_top->kobj);
1380 }
1381
1382 /* ================================================== */
1383
amdgpu_discovery_reg_base_init(struct amdgpu_device * adev)1384 static int amdgpu_discovery_reg_base_init(struct amdgpu_device *adev)
1385 {
1386 uint8_t num_base_address, subrev, variant;
1387 struct binary_header *bhdr;
1388 struct ip_discovery_header *ihdr;
1389 struct die_header *dhdr;
1390 uint8_t *discovery_bin;
1391 struct ip_v4 *ip;
1392 uint16_t die_offset;
1393 uint16_t ip_offset;
1394 uint16_t num_dies;
1395 uint32_t wafl_ver;
1396 uint16_t num_ips;
1397 uint16_t hw_id;
1398 uint8_t inst;
1399 int hw_ip;
1400 int i, j, k;
1401 int r;
1402
1403 r = amdgpu_discovery_init(adev);
1404 if (r)
1405 return r;
1406 discovery_bin = adev->discovery.bin;
1407 wafl_ver = 0;
1408 adev->gfx.xcc_mask = 0;
1409 adev->sdma.sdma_mask = 0;
1410 adev->vcn.inst_mask = 0;
1411 adev->jpeg.inst_mask = 0;
1412 bhdr = (struct binary_header *)discovery_bin;
1413 ihdr = (struct ip_discovery_header
1414 *)(discovery_bin +
1415 le16_to_cpu(bhdr->table_list[IP_DISCOVERY].offset));
1416 num_dies = le16_to_cpu(ihdr->num_dies);
1417
1418 DRM_DEBUG("number of dies: %d\n", num_dies);
1419
1420 for (i = 0; i < num_dies; i++) {
1421 die_offset = le16_to_cpu(ihdr->die_info[i].die_offset);
1422 dhdr = (struct die_header *)(discovery_bin + die_offset);
1423 num_ips = le16_to_cpu(dhdr->num_ips);
1424 ip_offset = die_offset + sizeof(*dhdr);
1425
1426 if (le16_to_cpu(dhdr->die_id) != i) {
1427 DRM_ERROR("invalid die id %d, expected %d\n",
1428 le16_to_cpu(dhdr->die_id), i);
1429 return -EINVAL;
1430 }
1431
1432 DRM_DEBUG("number of hardware IPs on die%d: %d\n",
1433 le16_to_cpu(dhdr->die_id), num_ips);
1434
1435 for (j = 0; j < num_ips; j++) {
1436 ip = (struct ip_v4 *)(discovery_bin + ip_offset);
1437
1438 inst = ip->instance_number;
1439 hw_id = le16_to_cpu(ip->hw_id);
1440 if (amdgpu_discovery_validate_ip(adev, inst, hw_id))
1441 goto next_ip;
1442
1443 num_base_address = ip->num_base_address;
1444
1445 DRM_DEBUG("%s(%d) #%d v%d.%d.%d:\n",
1446 hw_id_names[le16_to_cpu(ip->hw_id)],
1447 le16_to_cpu(ip->hw_id),
1448 ip->instance_number,
1449 ip->major, ip->minor,
1450 ip->revision);
1451
1452 if (le16_to_cpu(ip->hw_id) == VCN_HWID) {
1453 /* Bit [5:0]: original revision value
1454 * Bit [7:6]: en/decode capability:
1455 * 0b00 : VCN function normally
1456 * 0b10 : encode is disabled
1457 * 0b01 : decode is disabled
1458 */
1459 if (adev->vcn.num_vcn_inst <
1460 AMDGPU_MAX_VCN_INSTANCES) {
1461 adev->vcn.inst[adev->vcn.num_vcn_inst].vcn_config =
1462 ip->revision & 0xc0;
1463 adev->vcn.num_vcn_inst++;
1464 adev->vcn.inst_mask |=
1465 (1U << ip->instance_number);
1466 adev->jpeg.inst_mask |=
1467 (1U << ip->instance_number);
1468 } else {
1469 dev_err(adev->dev, "Too many VCN instances: %d vs %d\n",
1470 adev->vcn.num_vcn_inst + 1,
1471 AMDGPU_MAX_VCN_INSTANCES);
1472 }
1473 ip->revision &= ~0xc0;
1474 }
1475 if (le16_to_cpu(ip->hw_id) == SDMA0_HWID ||
1476 le16_to_cpu(ip->hw_id) == SDMA1_HWID ||
1477 le16_to_cpu(ip->hw_id) == SDMA2_HWID ||
1478 le16_to_cpu(ip->hw_id) == SDMA3_HWID) {
1479 if (adev->sdma.num_instances <
1480 AMDGPU_MAX_SDMA_INSTANCES) {
1481 adev->sdma.num_instances++;
1482 adev->sdma.sdma_mask |=
1483 (1U << ip->instance_number);
1484 } else {
1485 dev_err(adev->dev, "Too many SDMA instances: %d vs %d\n",
1486 adev->sdma.num_instances + 1,
1487 AMDGPU_MAX_SDMA_INSTANCES);
1488 }
1489 }
1490
1491 if (le16_to_cpu(ip->hw_id) == VPE_HWID) {
1492 if (adev->vpe.num_instances < AMDGPU_MAX_VPE_INSTANCES)
1493 adev->vpe.num_instances++;
1494 else
1495 dev_err(adev->dev, "Too many VPE instances: %d vs %d\n",
1496 adev->vpe.num_instances + 1,
1497 AMDGPU_MAX_VPE_INSTANCES);
1498 }
1499
1500 if (le16_to_cpu(ip->hw_id) == UMC_HWID) {
1501 adev->gmc.num_umc++;
1502 adev->umc.node_inst_num++;
1503 }
1504
1505 if (le16_to_cpu(ip->hw_id) == GC_HWID)
1506 adev->gfx.xcc_mask |=
1507 (1U << ip->instance_number);
1508
1509 if (!wafl_ver && le16_to_cpu(ip->hw_id) == WAFLC_HWID)
1510 wafl_ver = IP_VERSION_FULL(ip->major, ip->minor,
1511 ip->revision, 0, 0);
1512
1513 for (k = 0; k < num_base_address; k++) {
1514 /*
1515 * convert the endianness of base addresses in place,
1516 * so that we don't need to convert them when accessing adev->reg_offset.
1517 */
1518 if (ihdr->base_addr_64_bit)
1519 /* Truncate the 64bit base address from ip discovery
1520 * and only store lower 32bit ip base in reg_offset[].
1521 * Bits > 32 follows ASIC specific format, thus just
1522 * discard them and handle it within specific ASIC.
1523 * By this way reg_offset[] and related helpers can
1524 * stay unchanged.
1525 * The base address is in dwords, thus clear the
1526 * highest 2 bits to store.
1527 */
1528 ip->base_address[k] =
1529 lower_32_bits(le64_to_cpu(ip->base_address_64[k])) & 0x3FFFFFFF;
1530 else
1531 ip->base_address[k] = le32_to_cpu(ip->base_address[k]);
1532 DRM_DEBUG("\t0x%08x\n", ip->base_address[k]);
1533 }
1534
1535 for (hw_ip = 0; hw_ip < MAX_HWIP; hw_ip++) {
1536 if (hw_id_map[hw_ip] == le16_to_cpu(ip->hw_id) &&
1537 hw_id_map[hw_ip] != 0) {
1538 DRM_DEBUG("set register base offset for %s\n",
1539 hw_id_names[le16_to_cpu(ip->hw_id)]);
1540 adev->reg_offset[hw_ip][ip->instance_number] =
1541 ip->base_address;
1542 /* Instance support is somewhat inconsistent.
1543 * SDMA is a good example. Sienna cichlid has 4 total
1544 * SDMA instances, each enumerated separately (HWIDs
1545 * 42, 43, 68, 69). Arcturus has 8 total SDMA instances,
1546 * but they are enumerated as multiple instances of the
1547 * same HWIDs (4x HWID 42, 4x HWID 43). UMC is another
1548 * example. On most chips there are multiple instances
1549 * with the same HWID.
1550 */
1551
1552 if (ihdr->version < 3) {
1553 subrev = 0;
1554 variant = 0;
1555 } else {
1556 subrev = ip->sub_revision;
1557 variant = ip->variant;
1558 }
1559
1560 adev->ip_versions[hw_ip]
1561 [ip->instance_number] =
1562 IP_VERSION_FULL(ip->major,
1563 ip->minor,
1564 ip->revision,
1565 variant,
1566 subrev);
1567 }
1568 }
1569
1570 next_ip:
1571 if (ihdr->base_addr_64_bit)
1572 ip_offset += struct_size(ip, base_address_64, ip->num_base_address);
1573 else
1574 ip_offset += struct_size(ip, base_address, ip->num_base_address);
1575 }
1576 }
1577
1578 if (wafl_ver && !adev->ip_versions[XGMI_HWIP][0])
1579 adev->ip_versions[XGMI_HWIP][0] = wafl_ver;
1580
1581 return 0;
1582 }
1583
amdgpu_discovery_harvest_ip(struct amdgpu_device * adev)1584 static void amdgpu_discovery_harvest_ip(struct amdgpu_device *adev)
1585 {
1586 uint8_t *discovery_bin = adev->discovery.bin;
1587 struct ip_discovery_header *ihdr;
1588 struct binary_header *bhdr;
1589 int vcn_harvest_count = 0;
1590 int umc_harvest_count = 0;
1591 uint16_t offset, ihdr_ver;
1592
1593 bhdr = (struct binary_header *)discovery_bin;
1594 offset = le16_to_cpu(bhdr->table_list[IP_DISCOVERY].offset);
1595 ihdr = (struct ip_discovery_header *)(discovery_bin + offset);
1596 ihdr_ver = le16_to_cpu(ihdr->version);
1597 /*
1598 * Harvest table does not fit Navi1x and legacy GPUs,
1599 * so read harvest bit per IP data structure to set
1600 * harvest configuration.
1601 */
1602 if (amdgpu_ip_version(adev, GC_HWIP, 0) < IP_VERSION(10, 2, 0) &&
1603 ihdr_ver <= 2) {
1604 if ((adev->pdev->device == 0x731E &&
1605 (adev->pdev->revision == 0xC6 ||
1606 adev->pdev->revision == 0xC7)) ||
1607 (adev->pdev->device == 0x7340 &&
1608 adev->pdev->revision == 0xC9) ||
1609 (adev->pdev->device == 0x7360 &&
1610 adev->pdev->revision == 0xC7))
1611 amdgpu_discovery_read_harvest_bit_per_ip(adev,
1612 &vcn_harvest_count);
1613 } else {
1614 amdgpu_discovery_read_from_harvest_table(adev,
1615 &vcn_harvest_count,
1616 &umc_harvest_count);
1617 }
1618
1619 amdgpu_discovery_harvest_config_quirk(adev);
1620
1621 if (vcn_harvest_count == adev->vcn.num_vcn_inst) {
1622 adev->harvest_ip_mask |= AMD_HARVEST_IP_VCN_MASK;
1623 adev->harvest_ip_mask |= AMD_HARVEST_IP_JPEG_MASK;
1624 }
1625
1626 if (umc_harvest_count < adev->gmc.num_umc) {
1627 adev->gmc.num_umc -= umc_harvest_count;
1628 }
1629 }
1630
1631 union gc_info {
1632 struct gc_info_v1_0 v1;
1633 struct gc_info_v1_1 v1_1;
1634 struct gc_info_v1_2 v1_2;
1635 struct gc_info_v1_3 v1_3;
1636 struct gc_info_v2_0 v2;
1637 struct gc_info_v2_1 v2_1;
1638 };
1639
amdgpu_discovery_get_gfx_info(struct amdgpu_device * adev)1640 static int amdgpu_discovery_get_gfx_info(struct amdgpu_device *adev)
1641 {
1642 uint8_t *discovery_bin = adev->discovery.bin;
1643 struct binary_header *bhdr;
1644 union gc_info *gc_info;
1645 u16 offset;
1646
1647 if (!discovery_bin) {
1648 DRM_ERROR("ip discovery uninitialized\n");
1649 return -EINVAL;
1650 }
1651
1652 bhdr = (struct binary_header *)discovery_bin;
1653 offset = le16_to_cpu(bhdr->table_list[GC].offset);
1654
1655 if (!offset)
1656 return 0;
1657
1658 gc_info = (union gc_info *)(discovery_bin + offset);
1659
1660 switch (le16_to_cpu(gc_info->v1.header.version_major)) {
1661 case 1:
1662 adev->gfx.config.max_shader_engines = le32_to_cpu(gc_info->v1.gc_num_se);
1663 adev->gfx.config.max_cu_per_sh = 2 * (le32_to_cpu(gc_info->v1.gc_num_wgp0_per_sa) +
1664 le32_to_cpu(gc_info->v1.gc_num_wgp1_per_sa));
1665 adev->gfx.config.max_sh_per_se = le32_to_cpu(gc_info->v1.gc_num_sa_per_se);
1666 adev->gfx.config.max_backends_per_se = le32_to_cpu(gc_info->v1.gc_num_rb_per_se);
1667 adev->gfx.config.max_texture_channel_caches = le32_to_cpu(gc_info->v1.gc_num_gl2c);
1668 adev->gfx.config.max_gprs = le32_to_cpu(gc_info->v1.gc_num_gprs);
1669 adev->gfx.config.max_gs_threads = le32_to_cpu(gc_info->v1.gc_num_max_gs_thds);
1670 adev->gfx.config.gs_vgt_table_depth = le32_to_cpu(gc_info->v1.gc_gs_table_depth);
1671 adev->gfx.config.gs_prim_buffer_depth = le32_to_cpu(gc_info->v1.gc_gsprim_buff_depth);
1672 adev->gfx.config.double_offchip_lds_buf = le32_to_cpu(gc_info->v1.gc_double_offchip_lds_buffer);
1673 adev->gfx.cu_info.wave_front_size = le32_to_cpu(gc_info->v1.gc_wave_size);
1674 adev->gfx.cu_info.max_waves_per_simd = le32_to_cpu(gc_info->v1.gc_max_waves_per_simd);
1675 adev->gfx.cu_info.max_scratch_slots_per_cu = le32_to_cpu(gc_info->v1.gc_max_scratch_slots_per_cu);
1676 adev->gfx.cu_info.lds_size = le32_to_cpu(gc_info->v1.gc_lds_size);
1677 adev->gfx.config.num_sc_per_sh = le32_to_cpu(gc_info->v1.gc_num_sc_per_se) /
1678 le32_to_cpu(gc_info->v1.gc_num_sa_per_se);
1679 adev->gfx.config.num_packer_per_sc = le32_to_cpu(gc_info->v1.gc_num_packer_per_sc);
1680 if (le16_to_cpu(gc_info->v1.header.version_minor) >= 1) {
1681 adev->gfx.config.gc_num_tcp_per_sa = le32_to_cpu(gc_info->v1_1.gc_num_tcp_per_sa);
1682 adev->gfx.config.gc_num_sdp_interface = le32_to_cpu(gc_info->v1_1.gc_num_sdp_interface);
1683 adev->gfx.config.gc_num_tcps = le32_to_cpu(gc_info->v1_1.gc_num_tcps);
1684 }
1685 if (le16_to_cpu(gc_info->v1.header.version_minor) >= 2) {
1686 adev->gfx.config.gc_num_tcp_per_wpg = le32_to_cpu(gc_info->v1_2.gc_num_tcp_per_wpg);
1687 adev->gfx.config.gc_tcp_l1_size = le32_to_cpu(gc_info->v1_2.gc_tcp_l1_size);
1688 adev->gfx.config.gc_num_sqc_per_wgp = le32_to_cpu(gc_info->v1_2.gc_num_sqc_per_wgp);
1689 adev->gfx.config.gc_l1_instruction_cache_size_per_sqc = le32_to_cpu(gc_info->v1_2.gc_l1_instruction_cache_size_per_sqc);
1690 adev->gfx.config.gc_l1_data_cache_size_per_sqc = le32_to_cpu(gc_info->v1_2.gc_l1_data_cache_size_per_sqc);
1691 adev->gfx.config.gc_gl1c_per_sa = le32_to_cpu(gc_info->v1_2.gc_gl1c_per_sa);
1692 adev->gfx.config.gc_gl1c_size_per_instance = le32_to_cpu(gc_info->v1_2.gc_gl1c_size_per_instance);
1693 adev->gfx.config.gc_gl2c_per_gpu = le32_to_cpu(gc_info->v1_2.gc_gl2c_per_gpu);
1694 }
1695 if (le16_to_cpu(gc_info->v1.header.version_minor) >= 3) {
1696 adev->gfx.config.gc_tcp_size_per_cu = le32_to_cpu(gc_info->v1_3.gc_tcp_size_per_cu);
1697 adev->gfx.config.gc_tcp_cache_line_size = le32_to_cpu(gc_info->v1_3.gc_tcp_cache_line_size);
1698 adev->gfx.config.gc_instruction_cache_size_per_sqc = le32_to_cpu(gc_info->v1_3.gc_instruction_cache_size_per_sqc);
1699 adev->gfx.config.gc_instruction_cache_line_size = le32_to_cpu(gc_info->v1_3.gc_instruction_cache_line_size);
1700 adev->gfx.config.gc_scalar_data_cache_size_per_sqc = le32_to_cpu(gc_info->v1_3.gc_scalar_data_cache_size_per_sqc);
1701 adev->gfx.config.gc_scalar_data_cache_line_size = le32_to_cpu(gc_info->v1_3.gc_scalar_data_cache_line_size);
1702 adev->gfx.config.gc_tcc_size = le32_to_cpu(gc_info->v1_3.gc_tcc_size);
1703 adev->gfx.config.gc_tcc_cache_line_size = le32_to_cpu(gc_info->v1_3.gc_tcc_cache_line_size);
1704 }
1705 break;
1706 case 2:
1707 adev->gfx.config.max_shader_engines = le32_to_cpu(gc_info->v2.gc_num_se);
1708 adev->gfx.config.max_cu_per_sh = le32_to_cpu(gc_info->v2.gc_num_cu_per_sh);
1709 adev->gfx.config.max_sh_per_se = le32_to_cpu(gc_info->v2.gc_num_sh_per_se);
1710 adev->gfx.config.max_backends_per_se = le32_to_cpu(gc_info->v2.gc_num_rb_per_se);
1711 adev->gfx.config.max_texture_channel_caches = le32_to_cpu(gc_info->v2.gc_num_tccs);
1712 adev->gfx.config.max_gprs = le32_to_cpu(gc_info->v2.gc_num_gprs);
1713 adev->gfx.config.max_gs_threads = le32_to_cpu(gc_info->v2.gc_num_max_gs_thds);
1714 adev->gfx.config.gs_vgt_table_depth = le32_to_cpu(gc_info->v2.gc_gs_table_depth);
1715 adev->gfx.config.gs_prim_buffer_depth = le32_to_cpu(gc_info->v2.gc_gsprim_buff_depth);
1716 adev->gfx.config.double_offchip_lds_buf = le32_to_cpu(gc_info->v2.gc_double_offchip_lds_buffer);
1717 adev->gfx.cu_info.wave_front_size = le32_to_cpu(gc_info->v2.gc_wave_size);
1718 adev->gfx.cu_info.max_waves_per_simd = le32_to_cpu(gc_info->v2.gc_max_waves_per_simd);
1719 adev->gfx.cu_info.max_scratch_slots_per_cu = le32_to_cpu(gc_info->v2.gc_max_scratch_slots_per_cu);
1720 adev->gfx.cu_info.lds_size = le32_to_cpu(gc_info->v2.gc_lds_size);
1721 adev->gfx.config.num_sc_per_sh = le32_to_cpu(gc_info->v2.gc_num_sc_per_se) /
1722 le32_to_cpu(gc_info->v2.gc_num_sh_per_se);
1723 adev->gfx.config.num_packer_per_sc = le32_to_cpu(gc_info->v2.gc_num_packer_per_sc);
1724 if (le16_to_cpu(gc_info->v2.header.version_minor) == 1) {
1725 adev->gfx.config.gc_num_tcp_per_sa = le32_to_cpu(gc_info->v2_1.gc_num_tcp_per_sh);
1726 adev->gfx.config.gc_tcp_size_per_cu = le32_to_cpu(gc_info->v2_1.gc_tcp_size_per_cu);
1727 adev->gfx.config.gc_num_sdp_interface = le32_to_cpu(gc_info->v2_1.gc_num_sdp_interface); /* per XCD */
1728 adev->gfx.config.gc_num_cu_per_sqc = le32_to_cpu(gc_info->v2_1.gc_num_cu_per_sqc);
1729 adev->gfx.config.gc_l1_instruction_cache_size_per_sqc = le32_to_cpu(gc_info->v2_1.gc_instruction_cache_size_per_sqc);
1730 adev->gfx.config.gc_l1_data_cache_size_per_sqc = le32_to_cpu(gc_info->v2_1.gc_scalar_data_cache_size_per_sqc);
1731 adev->gfx.config.gc_tcc_size = le32_to_cpu(gc_info->v2_1.gc_tcc_size); /* per XCD */
1732 }
1733 break;
1734 default:
1735 dev_err(adev->dev,
1736 "Unhandled GC info table %d.%d\n",
1737 le16_to_cpu(gc_info->v1.header.version_major),
1738 le16_to_cpu(gc_info->v1.header.version_minor));
1739 return -EINVAL;
1740 }
1741 return 0;
1742 }
1743
1744 union mall_info {
1745 struct mall_info_v1_0 v1;
1746 struct mall_info_v2_0 v2;
1747 };
1748
amdgpu_discovery_get_mall_info(struct amdgpu_device * adev)1749 static int amdgpu_discovery_get_mall_info(struct amdgpu_device *adev)
1750 {
1751 uint8_t *discovery_bin = adev->discovery.bin;
1752 struct binary_header *bhdr;
1753 union mall_info *mall_info;
1754 u32 u, mall_size_per_umc, m_s_present, half_use;
1755 u64 mall_size;
1756 u16 offset;
1757
1758 if (!discovery_bin) {
1759 DRM_ERROR("ip discovery uninitialized\n");
1760 return -EINVAL;
1761 }
1762
1763 bhdr = (struct binary_header *)discovery_bin;
1764 offset = le16_to_cpu(bhdr->table_list[MALL_INFO].offset);
1765
1766 if (!offset)
1767 return 0;
1768
1769 mall_info = (union mall_info *)(discovery_bin + offset);
1770
1771 switch (le16_to_cpu(mall_info->v1.header.version_major)) {
1772 case 1:
1773 mall_size = 0;
1774 mall_size_per_umc = le32_to_cpu(mall_info->v1.mall_size_per_m);
1775 m_s_present = le32_to_cpu(mall_info->v1.m_s_present);
1776 half_use = le32_to_cpu(mall_info->v1.m_half_use);
1777 for (u = 0; u < adev->gmc.num_umc; u++) {
1778 if (m_s_present & (1 << u))
1779 mall_size += mall_size_per_umc * 2;
1780 else if (half_use & (1 << u))
1781 mall_size += mall_size_per_umc / 2;
1782 else
1783 mall_size += mall_size_per_umc;
1784 }
1785 adev->gmc.mall_size = mall_size;
1786 adev->gmc.m_half_use = half_use;
1787 break;
1788 case 2:
1789 mall_size_per_umc = le32_to_cpu(mall_info->v2.mall_size_per_umc);
1790 adev->gmc.mall_size = (uint64_t)mall_size_per_umc * adev->gmc.num_umc;
1791 break;
1792 default:
1793 dev_err(adev->dev,
1794 "Unhandled MALL info table %d.%d\n",
1795 le16_to_cpu(mall_info->v1.header.version_major),
1796 le16_to_cpu(mall_info->v1.header.version_minor));
1797 return -EINVAL;
1798 }
1799 return 0;
1800 }
1801
1802 union vcn_info {
1803 struct vcn_info_v1_0 v1;
1804 };
1805
amdgpu_discovery_get_vcn_info(struct amdgpu_device * adev)1806 static int amdgpu_discovery_get_vcn_info(struct amdgpu_device *adev)
1807 {
1808 uint8_t *discovery_bin = adev->discovery.bin;
1809 struct binary_header *bhdr;
1810 union vcn_info *vcn_info;
1811 u16 offset;
1812 int v;
1813
1814 if (!discovery_bin) {
1815 DRM_ERROR("ip discovery uninitialized\n");
1816 return -EINVAL;
1817 }
1818
1819 /* num_vcn_inst is currently limited to AMDGPU_MAX_VCN_INSTANCES
1820 * which is smaller than VCN_INFO_TABLE_MAX_NUM_INSTANCES
1821 * but that may change in the future with new GPUs so keep this
1822 * check for defensive purposes.
1823 */
1824 if (adev->vcn.num_vcn_inst > VCN_INFO_TABLE_MAX_NUM_INSTANCES) {
1825 dev_err(adev->dev, "invalid vcn instances\n");
1826 return -EINVAL;
1827 }
1828
1829 bhdr = (struct binary_header *)discovery_bin;
1830 offset = le16_to_cpu(bhdr->table_list[VCN_INFO].offset);
1831
1832 if (!offset)
1833 return 0;
1834
1835 vcn_info = (union vcn_info *)(discovery_bin + offset);
1836
1837 switch (le16_to_cpu(vcn_info->v1.header.version_major)) {
1838 case 1:
1839 /* num_vcn_inst is currently limited to AMDGPU_MAX_VCN_INSTANCES
1840 * so this won't overflow.
1841 */
1842 for (v = 0; v < adev->vcn.num_vcn_inst; v++) {
1843 adev->vcn.inst[v].vcn_codec_disable_mask =
1844 le32_to_cpu(vcn_info->v1.instance_info[v].fuse_data.all_bits);
1845 }
1846 break;
1847 default:
1848 dev_err(adev->dev,
1849 "Unhandled VCN info table %d.%d\n",
1850 le16_to_cpu(vcn_info->v1.header.version_major),
1851 le16_to_cpu(vcn_info->v1.header.version_minor));
1852 return -EINVAL;
1853 }
1854 return 0;
1855 }
1856
1857 union nps_info {
1858 struct nps_info_v1_0 v1;
1859 };
1860
amdgpu_discovery_refresh_nps_info(struct amdgpu_device * adev,union nps_info * nps_data)1861 static int amdgpu_discovery_refresh_nps_info(struct amdgpu_device *adev,
1862 union nps_info *nps_data)
1863 {
1864 uint64_t vram_size, pos, offset;
1865 struct nps_info_header *nhdr;
1866 struct binary_header bhdr;
1867 uint16_t checksum;
1868
1869 vram_size = (uint64_t)RREG32(mmRCC_CONFIG_MEMSIZE) << 20;
1870 pos = vram_size - DISCOVERY_TMR_OFFSET;
1871 amdgpu_device_vram_access(adev, pos, &bhdr, sizeof(bhdr), false);
1872
1873 offset = le16_to_cpu(bhdr.table_list[NPS_INFO].offset);
1874 checksum = le16_to_cpu(bhdr.table_list[NPS_INFO].checksum);
1875
1876 amdgpu_device_vram_access(adev, (pos + offset), nps_data,
1877 sizeof(*nps_data), false);
1878
1879 nhdr = (struct nps_info_header *)(nps_data);
1880 if (!amdgpu_discovery_verify_checksum(adev, (uint8_t *)nps_data,
1881 le32_to_cpu(nhdr->size_bytes),
1882 checksum)) {
1883 dev_err(adev->dev, "nps data refresh, checksum mismatch\n");
1884 return -EINVAL;
1885 }
1886
1887 return 0;
1888 }
1889
amdgpu_discovery_get_nps_info(struct amdgpu_device * adev,uint32_t * nps_type,struct amdgpu_gmc_memrange ** ranges,int * range_cnt,bool refresh)1890 int amdgpu_discovery_get_nps_info(struct amdgpu_device *adev,
1891 uint32_t *nps_type,
1892 struct amdgpu_gmc_memrange **ranges,
1893 int *range_cnt, bool refresh)
1894 {
1895 uint8_t *discovery_bin = adev->discovery.bin;
1896 struct amdgpu_gmc_memrange *mem_ranges;
1897 struct binary_header *bhdr;
1898 union nps_info *nps_info;
1899 union nps_info nps_data;
1900 u16 offset;
1901 int i, r;
1902
1903 if (!nps_type || !range_cnt || !ranges)
1904 return -EINVAL;
1905
1906 if (refresh) {
1907 r = amdgpu_discovery_refresh_nps_info(adev, &nps_data);
1908 if (r)
1909 return r;
1910 nps_info = &nps_data;
1911 } else {
1912 if (!discovery_bin) {
1913 dev_err(adev->dev,
1914 "fetch mem range failed, ip discovery uninitialized\n");
1915 return -EINVAL;
1916 }
1917
1918 bhdr = (struct binary_header *)discovery_bin;
1919 offset = le16_to_cpu(bhdr->table_list[NPS_INFO].offset);
1920
1921 if (!offset)
1922 return -ENOENT;
1923
1924 /* If verification fails, return as if NPS table doesn't exist */
1925 if (amdgpu_discovery_verify_npsinfo(adev, bhdr))
1926 return -ENOENT;
1927
1928 nps_info = (union nps_info *)(discovery_bin + offset);
1929 }
1930
1931 switch (le16_to_cpu(nps_info->v1.header.version_major)) {
1932 case 1:
1933 mem_ranges = kvzalloc_objs(*mem_ranges, nps_info->v1.count);
1934 if (!mem_ranges)
1935 return -ENOMEM;
1936 *nps_type = nps_info->v1.nps_type;
1937 *range_cnt = nps_info->v1.count;
1938 for (i = 0; i < *range_cnt; i++) {
1939 mem_ranges[i].base_address =
1940 nps_info->v1.instance_info[i].base_address;
1941 mem_ranges[i].limit_address =
1942 nps_info->v1.instance_info[i].limit_address;
1943 mem_ranges[i].nid_mask = -1;
1944 mem_ranges[i].flags = 0;
1945 }
1946 *ranges = mem_ranges;
1947 break;
1948 default:
1949 dev_err(adev->dev, "Unhandled NPS info table %d.%d\n",
1950 le16_to_cpu(nps_info->v1.header.version_major),
1951 le16_to_cpu(nps_info->v1.header.version_minor));
1952 return -EINVAL;
1953 }
1954
1955 return 0;
1956 }
1957
amdgpu_discovery_set_common_ip_blocks(struct amdgpu_device * adev)1958 static int amdgpu_discovery_set_common_ip_blocks(struct amdgpu_device *adev)
1959 {
1960 /* what IP to use for this? */
1961 switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
1962 case IP_VERSION(9, 0, 1):
1963 case IP_VERSION(9, 1, 0):
1964 case IP_VERSION(9, 2, 1):
1965 case IP_VERSION(9, 2, 2):
1966 case IP_VERSION(9, 3, 0):
1967 case IP_VERSION(9, 4, 0):
1968 case IP_VERSION(9, 4, 1):
1969 case IP_VERSION(9, 4, 2):
1970 case IP_VERSION(9, 4, 3):
1971 case IP_VERSION(9, 4, 4):
1972 case IP_VERSION(9, 5, 0):
1973 amdgpu_device_ip_block_add(adev, &vega10_common_ip_block);
1974 break;
1975 case IP_VERSION(10, 1, 10):
1976 case IP_VERSION(10, 1, 1):
1977 case IP_VERSION(10, 1, 2):
1978 case IP_VERSION(10, 1, 3):
1979 case IP_VERSION(10, 1, 4):
1980 case IP_VERSION(10, 3, 0):
1981 case IP_VERSION(10, 3, 1):
1982 case IP_VERSION(10, 3, 2):
1983 case IP_VERSION(10, 3, 3):
1984 case IP_VERSION(10, 3, 4):
1985 case IP_VERSION(10, 3, 5):
1986 case IP_VERSION(10, 3, 6):
1987 case IP_VERSION(10, 3, 7):
1988 amdgpu_device_ip_block_add(adev, &nv_common_ip_block);
1989 break;
1990 case IP_VERSION(11, 0, 0):
1991 case IP_VERSION(11, 0, 1):
1992 case IP_VERSION(11, 0, 2):
1993 case IP_VERSION(11, 0, 3):
1994 case IP_VERSION(11, 0, 4):
1995 case IP_VERSION(11, 5, 0):
1996 case IP_VERSION(11, 5, 1):
1997 case IP_VERSION(11, 5, 2):
1998 case IP_VERSION(11, 5, 3):
1999 case IP_VERSION(11, 5, 4):
2000 amdgpu_device_ip_block_add(adev, &soc21_common_ip_block);
2001 break;
2002 case IP_VERSION(12, 0, 0):
2003 case IP_VERSION(12, 0, 1):
2004 amdgpu_device_ip_block_add(adev, &soc24_common_ip_block);
2005 break;
2006 case IP_VERSION(12, 1, 0):
2007 amdgpu_device_ip_block_add(adev, &soc_v1_0_common_ip_block);
2008 break;
2009 default:
2010 dev_err(adev->dev,
2011 "Failed to add common ip block(GC_HWIP:0x%x)\n",
2012 amdgpu_ip_version(adev, GC_HWIP, 0));
2013 return -EINVAL;
2014 }
2015 return 0;
2016 }
2017
amdgpu_discovery_set_gmc_ip_blocks(struct amdgpu_device * adev)2018 static int amdgpu_discovery_set_gmc_ip_blocks(struct amdgpu_device *adev)
2019 {
2020 /* use GC or MMHUB IP version */
2021 switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
2022 case IP_VERSION(9, 0, 1):
2023 case IP_VERSION(9, 1, 0):
2024 case IP_VERSION(9, 2, 1):
2025 case IP_VERSION(9, 2, 2):
2026 case IP_VERSION(9, 3, 0):
2027 case IP_VERSION(9, 4, 0):
2028 case IP_VERSION(9, 4, 1):
2029 case IP_VERSION(9, 4, 2):
2030 case IP_VERSION(9, 4, 3):
2031 case IP_VERSION(9, 4, 4):
2032 case IP_VERSION(9, 5, 0):
2033 amdgpu_device_ip_block_add(adev, &gmc_v9_0_ip_block);
2034 break;
2035 case IP_VERSION(10, 1, 10):
2036 case IP_VERSION(10, 1, 1):
2037 case IP_VERSION(10, 1, 2):
2038 case IP_VERSION(10, 1, 3):
2039 case IP_VERSION(10, 1, 4):
2040 case IP_VERSION(10, 3, 0):
2041 case IP_VERSION(10, 3, 1):
2042 case IP_VERSION(10, 3, 2):
2043 case IP_VERSION(10, 3, 3):
2044 case IP_VERSION(10, 3, 4):
2045 case IP_VERSION(10, 3, 5):
2046 case IP_VERSION(10, 3, 6):
2047 case IP_VERSION(10, 3, 7):
2048 amdgpu_device_ip_block_add(adev, &gmc_v10_0_ip_block);
2049 break;
2050 case IP_VERSION(11, 0, 0):
2051 case IP_VERSION(11, 0, 1):
2052 case IP_VERSION(11, 0, 2):
2053 case IP_VERSION(11, 0, 3):
2054 case IP_VERSION(11, 0, 4):
2055 case IP_VERSION(11, 5, 0):
2056 case IP_VERSION(11, 5, 1):
2057 case IP_VERSION(11, 5, 2):
2058 case IP_VERSION(11, 5, 3):
2059 case IP_VERSION(11, 5, 4):
2060 amdgpu_device_ip_block_add(adev, &gmc_v11_0_ip_block);
2061 break;
2062 case IP_VERSION(12, 0, 0):
2063 case IP_VERSION(12, 0, 1):
2064 case IP_VERSION(12, 1, 0):
2065 amdgpu_device_ip_block_add(adev, &gmc_v12_0_ip_block);
2066 break;
2067 default:
2068 dev_err(adev->dev, "Failed to add gmc ip block(GC_HWIP:0x%x)\n",
2069 amdgpu_ip_version(adev, GC_HWIP, 0));
2070 return -EINVAL;
2071 }
2072 return 0;
2073 }
2074
amdgpu_discovery_set_ih_ip_blocks(struct amdgpu_device * adev)2075 static int amdgpu_discovery_set_ih_ip_blocks(struct amdgpu_device *adev)
2076 {
2077 switch (amdgpu_ip_version(adev, OSSSYS_HWIP, 0)) {
2078 case IP_VERSION(4, 0, 0):
2079 case IP_VERSION(4, 0, 1):
2080 case IP_VERSION(4, 1, 0):
2081 case IP_VERSION(4, 1, 1):
2082 case IP_VERSION(4, 3, 0):
2083 amdgpu_device_ip_block_add(adev, &vega10_ih_ip_block);
2084 break;
2085 case IP_VERSION(4, 2, 0):
2086 case IP_VERSION(4, 2, 1):
2087 case IP_VERSION(4, 4, 0):
2088 case IP_VERSION(4, 4, 2):
2089 case IP_VERSION(4, 4, 5):
2090 amdgpu_device_ip_block_add(adev, &vega20_ih_ip_block);
2091 break;
2092 case IP_VERSION(5, 0, 0):
2093 case IP_VERSION(5, 0, 1):
2094 case IP_VERSION(5, 0, 2):
2095 case IP_VERSION(5, 0, 3):
2096 case IP_VERSION(5, 2, 0):
2097 case IP_VERSION(5, 2, 1):
2098 amdgpu_device_ip_block_add(adev, &navi10_ih_ip_block);
2099 break;
2100 case IP_VERSION(6, 0, 0):
2101 case IP_VERSION(6, 0, 1):
2102 case IP_VERSION(6, 0, 2):
2103 amdgpu_device_ip_block_add(adev, &ih_v6_0_ip_block);
2104 break;
2105 case IP_VERSION(6, 1, 0):
2106 case IP_VERSION(6, 1, 1):
2107 amdgpu_device_ip_block_add(adev, &ih_v6_1_ip_block);
2108 break;
2109 case IP_VERSION(7, 0, 0):
2110 case IP_VERSION(7, 1, 0):
2111 amdgpu_device_ip_block_add(adev, &ih_v7_0_ip_block);
2112 break;
2113 default:
2114 dev_err(adev->dev,
2115 "Failed to add ih ip block(OSSSYS_HWIP:0x%x)\n",
2116 amdgpu_ip_version(adev, OSSSYS_HWIP, 0));
2117 return -EINVAL;
2118 }
2119 return 0;
2120 }
2121
amdgpu_discovery_set_psp_ip_blocks(struct amdgpu_device * adev)2122 static int amdgpu_discovery_set_psp_ip_blocks(struct amdgpu_device *adev)
2123 {
2124 switch (amdgpu_ip_version(adev, MP0_HWIP, 0)) {
2125 case IP_VERSION(9, 0, 0):
2126 amdgpu_device_ip_block_add(adev, &psp_v3_1_ip_block);
2127 break;
2128 case IP_VERSION(10, 0, 0):
2129 case IP_VERSION(10, 0, 1):
2130 amdgpu_device_ip_block_add(adev, &psp_v10_0_ip_block);
2131 break;
2132 case IP_VERSION(11, 0, 0):
2133 case IP_VERSION(11, 0, 2):
2134 case IP_VERSION(11, 0, 4):
2135 case IP_VERSION(11, 0, 5):
2136 case IP_VERSION(11, 0, 9):
2137 case IP_VERSION(11, 0, 7):
2138 case IP_VERSION(11, 0, 11):
2139 case IP_VERSION(11, 0, 12):
2140 case IP_VERSION(11, 0, 13):
2141 case IP_VERSION(11, 5, 0):
2142 case IP_VERSION(11, 5, 2):
2143 amdgpu_device_ip_block_add(adev, &psp_v11_0_ip_block);
2144 break;
2145 case IP_VERSION(11, 0, 8):
2146 amdgpu_device_ip_block_add(adev, &psp_v11_0_8_ip_block);
2147 break;
2148 case IP_VERSION(11, 0, 3):
2149 case IP_VERSION(12, 0, 1):
2150 amdgpu_device_ip_block_add(adev, &psp_v12_0_ip_block);
2151 break;
2152 case IP_VERSION(13, 0, 0):
2153 case IP_VERSION(13, 0, 1):
2154 case IP_VERSION(13, 0, 2):
2155 case IP_VERSION(13, 0, 3):
2156 case IP_VERSION(13, 0, 5):
2157 case IP_VERSION(13, 0, 6):
2158 case IP_VERSION(13, 0, 7):
2159 case IP_VERSION(13, 0, 8):
2160 case IP_VERSION(13, 0, 10):
2161 case IP_VERSION(13, 0, 11):
2162 case IP_VERSION(13, 0, 12):
2163 case IP_VERSION(13, 0, 14):
2164 case IP_VERSION(13, 0, 15):
2165 case IP_VERSION(14, 0, 0):
2166 case IP_VERSION(14, 0, 1):
2167 case IP_VERSION(14, 0, 4):
2168 amdgpu_device_ip_block_add(adev, &psp_v13_0_ip_block);
2169 break;
2170 case IP_VERSION(13, 0, 4):
2171 amdgpu_device_ip_block_add(adev, &psp_v13_0_4_ip_block);
2172 break;
2173 case IP_VERSION(14, 0, 2):
2174 case IP_VERSION(14, 0, 3):
2175 case IP_VERSION(14, 0, 5):
2176 amdgpu_device_ip_block_add(adev, &psp_v14_0_ip_block);
2177 break;
2178 case IP_VERSION(15, 0, 0):
2179 amdgpu_device_ip_block_add(adev, &psp_v15_0_ip_block);
2180 break;
2181 case IP_VERSION(15, 0, 8):
2182 amdgpu_device_ip_block_add(adev, &psp_v15_0_8_ip_block);
2183 break;
2184 default:
2185 dev_err(adev->dev,
2186 "Failed to add psp ip block(MP0_HWIP:0x%x)\n",
2187 amdgpu_ip_version(adev, MP0_HWIP, 0));
2188 return -EINVAL;
2189 }
2190 return 0;
2191 }
2192
amdgpu_discovery_set_smu_ip_blocks(struct amdgpu_device * adev)2193 static int amdgpu_discovery_set_smu_ip_blocks(struct amdgpu_device *adev)
2194 {
2195 switch (amdgpu_ip_version(adev, MP1_HWIP, 0)) {
2196 case IP_VERSION(9, 0, 0):
2197 case IP_VERSION(10, 0, 0):
2198 case IP_VERSION(10, 0, 1):
2199 case IP_VERSION(11, 0, 2):
2200 if (adev->asic_type == CHIP_ARCTURUS)
2201 amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block);
2202 else
2203 amdgpu_device_ip_block_add(adev, &pp_smu_ip_block);
2204 break;
2205 case IP_VERSION(11, 0, 0):
2206 case IP_VERSION(11, 0, 5):
2207 case IP_VERSION(11, 0, 9):
2208 case IP_VERSION(11, 0, 7):
2209 case IP_VERSION(11, 0, 11):
2210 case IP_VERSION(11, 0, 12):
2211 case IP_VERSION(11, 0, 13):
2212 case IP_VERSION(11, 5, 0):
2213 case IP_VERSION(11, 5, 2):
2214 amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block);
2215 break;
2216 case IP_VERSION(11, 0, 8):
2217 if (adev->apu_flags & AMD_APU_IS_CYAN_SKILLFISH2)
2218 amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block);
2219 break;
2220 case IP_VERSION(12, 0, 0):
2221 case IP_VERSION(12, 0, 1):
2222 amdgpu_device_ip_block_add(adev, &smu_v12_0_ip_block);
2223 break;
2224 case IP_VERSION(13, 0, 0):
2225 case IP_VERSION(13, 0, 1):
2226 case IP_VERSION(13, 0, 2):
2227 case IP_VERSION(13, 0, 3):
2228 case IP_VERSION(13, 0, 4):
2229 case IP_VERSION(13, 0, 5):
2230 case IP_VERSION(13, 0, 6):
2231 case IP_VERSION(13, 0, 7):
2232 case IP_VERSION(13, 0, 8):
2233 case IP_VERSION(13, 0, 10):
2234 case IP_VERSION(13, 0, 11):
2235 case IP_VERSION(13, 0, 14):
2236 case IP_VERSION(13, 0, 12):
2237 amdgpu_device_ip_block_add(adev, &smu_v13_0_ip_block);
2238 break;
2239 case IP_VERSION(14, 0, 0):
2240 case IP_VERSION(14, 0, 1):
2241 case IP_VERSION(14, 0, 2):
2242 case IP_VERSION(14, 0, 3):
2243 case IP_VERSION(14, 0, 4):
2244 case IP_VERSION(14, 0, 5):
2245 amdgpu_device_ip_block_add(adev, &smu_v14_0_ip_block);
2246 break;
2247 case IP_VERSION(15, 0, 0):
2248 amdgpu_device_ip_block_add(adev, &smu_v15_0_ip_block);
2249 break;
2250 default:
2251 dev_err(adev->dev,
2252 "Failed to add smu ip block(MP1_HWIP:0x%x)\n",
2253 amdgpu_ip_version(adev, MP1_HWIP, 0));
2254 return -EINVAL;
2255 }
2256 return 0;
2257 }
2258
2259 #if defined(CONFIG_DRM_AMD_DC)
amdgpu_discovery_set_sriov_display(struct amdgpu_device * adev)2260 static void amdgpu_discovery_set_sriov_display(struct amdgpu_device *adev)
2261 {
2262 amdgpu_device_set_sriov_virtual_display(adev);
2263 amdgpu_device_ip_block_add(adev, &amdgpu_vkms_ip_block);
2264 }
2265 #endif
2266
amdgpu_discovery_set_display_ip_blocks(struct amdgpu_device * adev)2267 static int amdgpu_discovery_set_display_ip_blocks(struct amdgpu_device *adev)
2268 {
2269 if (adev->enable_virtual_display) {
2270 amdgpu_device_ip_block_add(adev, &amdgpu_vkms_ip_block);
2271 return 0;
2272 }
2273
2274 if (!amdgpu_device_has_dc_support(adev))
2275 return 0;
2276
2277 #if defined(CONFIG_DRM_AMD_DC)
2278 if (amdgpu_ip_version(adev, DCE_HWIP, 0)) {
2279 switch (amdgpu_ip_version(adev, DCE_HWIP, 0)) {
2280 case IP_VERSION(1, 0, 0):
2281 case IP_VERSION(1, 0, 1):
2282 case IP_VERSION(2, 0, 2):
2283 case IP_VERSION(2, 0, 0):
2284 case IP_VERSION(2, 0, 3):
2285 case IP_VERSION(2, 1, 0):
2286 case IP_VERSION(3, 0, 0):
2287 case IP_VERSION(3, 0, 2):
2288 case IP_VERSION(3, 0, 3):
2289 case IP_VERSION(3, 0, 1):
2290 case IP_VERSION(3, 1, 2):
2291 case IP_VERSION(3, 1, 3):
2292 case IP_VERSION(3, 1, 4):
2293 case IP_VERSION(3, 1, 5):
2294 case IP_VERSION(3, 1, 6):
2295 case IP_VERSION(3, 2, 0):
2296 case IP_VERSION(3, 2, 1):
2297 case IP_VERSION(3, 5, 0):
2298 case IP_VERSION(3, 5, 1):
2299 case IP_VERSION(3, 6, 0):
2300 case IP_VERSION(4, 1, 0):
2301 /* TODO: Fix IP version. DC code expects version 4.0.1 */
2302 if (adev->ip_versions[DCE_HWIP][0] == IP_VERSION(4, 1, 0))
2303 adev->ip_versions[DCE_HWIP][0] = IP_VERSION(4, 0, 1);
2304
2305 if (amdgpu_sriov_vf(adev))
2306 amdgpu_discovery_set_sriov_display(adev);
2307 else
2308 amdgpu_device_ip_block_add(adev, &dm_ip_block);
2309 break;
2310 default:
2311 dev_err(adev->dev,
2312 "Failed to add dm ip block(DCE_HWIP:0x%x)\n",
2313 amdgpu_ip_version(adev, DCE_HWIP, 0));
2314 return -EINVAL;
2315 }
2316 } else if (amdgpu_ip_version(adev, DCI_HWIP, 0)) {
2317 switch (amdgpu_ip_version(adev, DCI_HWIP, 0)) {
2318 case IP_VERSION(12, 0, 0):
2319 case IP_VERSION(12, 0, 1):
2320 case IP_VERSION(12, 1, 0):
2321 if (amdgpu_sriov_vf(adev))
2322 amdgpu_discovery_set_sriov_display(adev);
2323 else
2324 amdgpu_device_ip_block_add(adev, &dm_ip_block);
2325 break;
2326 default:
2327 dev_err(adev->dev,
2328 "Failed to add dm ip block(DCI_HWIP:0x%x)\n",
2329 amdgpu_ip_version(adev, DCI_HWIP, 0));
2330 return -EINVAL;
2331 }
2332 }
2333 #endif
2334 return 0;
2335 }
2336
amdgpu_discovery_set_gc_ip_blocks(struct amdgpu_device * adev)2337 static int amdgpu_discovery_set_gc_ip_blocks(struct amdgpu_device *adev)
2338 {
2339 switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
2340 case IP_VERSION(9, 0, 1):
2341 case IP_VERSION(9, 1, 0):
2342 case IP_VERSION(9, 2, 1):
2343 case IP_VERSION(9, 2, 2):
2344 case IP_VERSION(9, 3, 0):
2345 case IP_VERSION(9, 4, 0):
2346 case IP_VERSION(9, 4, 1):
2347 case IP_VERSION(9, 4, 2):
2348 amdgpu_device_ip_block_add(adev, &gfx_v9_0_ip_block);
2349 break;
2350 case IP_VERSION(9, 4, 3):
2351 case IP_VERSION(9, 4, 4):
2352 case IP_VERSION(9, 5, 0):
2353 amdgpu_device_ip_block_add(adev, &gfx_v9_4_3_ip_block);
2354 break;
2355 case IP_VERSION(10, 1, 10):
2356 case IP_VERSION(10, 1, 2):
2357 case IP_VERSION(10, 1, 1):
2358 case IP_VERSION(10, 1, 3):
2359 case IP_VERSION(10, 1, 4):
2360 case IP_VERSION(10, 3, 0):
2361 case IP_VERSION(10, 3, 2):
2362 case IP_VERSION(10, 3, 1):
2363 case IP_VERSION(10, 3, 4):
2364 case IP_VERSION(10, 3, 5):
2365 case IP_VERSION(10, 3, 6):
2366 case IP_VERSION(10, 3, 3):
2367 case IP_VERSION(10, 3, 7):
2368 amdgpu_device_ip_block_add(adev, &gfx_v10_0_ip_block);
2369 break;
2370 case IP_VERSION(11, 0, 0):
2371 case IP_VERSION(11, 0, 1):
2372 case IP_VERSION(11, 0, 2):
2373 case IP_VERSION(11, 0, 3):
2374 case IP_VERSION(11, 0, 4):
2375 case IP_VERSION(11, 5, 0):
2376 case IP_VERSION(11, 5, 1):
2377 case IP_VERSION(11, 5, 2):
2378 case IP_VERSION(11, 5, 3):
2379 case IP_VERSION(11, 5, 4):
2380 amdgpu_device_ip_block_add(adev, &gfx_v11_0_ip_block);
2381 break;
2382 case IP_VERSION(12, 0, 0):
2383 case IP_VERSION(12, 0, 1):
2384 amdgpu_device_ip_block_add(adev, &gfx_v12_0_ip_block);
2385 break;
2386 case IP_VERSION(12, 1, 0):
2387 amdgpu_device_ip_block_add(adev, &gfx_v12_1_ip_block);
2388 break;
2389 default:
2390 dev_err(adev->dev, "Failed to add gfx ip block(GC_HWIP:0x%x)\n",
2391 amdgpu_ip_version(adev, GC_HWIP, 0));
2392 return -EINVAL;
2393 }
2394 return 0;
2395 }
2396
amdgpu_discovery_set_sdma_ip_blocks(struct amdgpu_device * adev)2397 static int amdgpu_discovery_set_sdma_ip_blocks(struct amdgpu_device *adev)
2398 {
2399 switch (amdgpu_ip_version(adev, SDMA0_HWIP, 0)) {
2400 case IP_VERSION(4, 0, 0):
2401 case IP_VERSION(4, 0, 1):
2402 case IP_VERSION(4, 1, 0):
2403 case IP_VERSION(4, 1, 1):
2404 case IP_VERSION(4, 1, 2):
2405 case IP_VERSION(4, 2, 0):
2406 case IP_VERSION(4, 2, 2):
2407 case IP_VERSION(4, 4, 0):
2408 amdgpu_device_ip_block_add(adev, &sdma_v4_0_ip_block);
2409 break;
2410 case IP_VERSION(4, 4, 2):
2411 case IP_VERSION(4, 4, 5):
2412 case IP_VERSION(4, 4, 4):
2413 amdgpu_device_ip_block_add(adev, &sdma_v4_4_2_ip_block);
2414 break;
2415 case IP_VERSION(5, 0, 0):
2416 case IP_VERSION(5, 0, 1):
2417 case IP_VERSION(5, 0, 2):
2418 case IP_VERSION(5, 0, 5):
2419 amdgpu_device_ip_block_add(adev, &sdma_v5_0_ip_block);
2420 break;
2421 case IP_VERSION(5, 2, 0):
2422 case IP_VERSION(5, 2, 2):
2423 case IP_VERSION(5, 2, 4):
2424 case IP_VERSION(5, 2, 5):
2425 case IP_VERSION(5, 2, 6):
2426 case IP_VERSION(5, 2, 3):
2427 case IP_VERSION(5, 2, 1):
2428 case IP_VERSION(5, 2, 7):
2429 amdgpu_device_ip_block_add(adev, &sdma_v5_2_ip_block);
2430 break;
2431 case IP_VERSION(6, 0, 0):
2432 case IP_VERSION(6, 0, 1):
2433 case IP_VERSION(6, 0, 2):
2434 case IP_VERSION(6, 0, 3):
2435 case IP_VERSION(6, 1, 0):
2436 case IP_VERSION(6, 1, 1):
2437 case IP_VERSION(6, 1, 2):
2438 case IP_VERSION(6, 1, 3):
2439 case IP_VERSION(6, 1, 4):
2440 amdgpu_device_ip_block_add(adev, &sdma_v6_0_ip_block);
2441 break;
2442 case IP_VERSION(7, 0, 0):
2443 case IP_VERSION(7, 0, 1):
2444 amdgpu_device_ip_block_add(adev, &sdma_v7_0_ip_block);
2445 break;
2446 case IP_VERSION(7, 1, 0):
2447 amdgpu_device_ip_block_add(adev, &sdma_v7_1_ip_block);
2448 break;
2449 default:
2450 dev_err(adev->dev,
2451 "Failed to add sdma ip block(SDMA0_HWIP:0x%x)\n",
2452 amdgpu_ip_version(adev, SDMA0_HWIP, 0));
2453 return -EINVAL;
2454 }
2455
2456 return 0;
2457 }
2458
amdgpu_discovery_set_ras_ip_blocks(struct amdgpu_device * adev)2459 static int amdgpu_discovery_set_ras_ip_blocks(struct amdgpu_device *adev)
2460 {
2461 switch (amdgpu_ip_version(adev, MP0_HWIP, 0)) {
2462 case IP_VERSION(13, 0, 6):
2463 case IP_VERSION(13, 0, 12):
2464 case IP_VERSION(13, 0, 14):
2465 amdgpu_device_ip_block_add(adev, &ras_v1_0_ip_block);
2466 break;
2467 default:
2468 break;
2469 }
2470 return 0;
2471 }
2472
amdgpu_discovery_set_mm_ip_blocks(struct amdgpu_device * adev)2473 static int amdgpu_discovery_set_mm_ip_blocks(struct amdgpu_device *adev)
2474 {
2475 if (amdgpu_ip_version(adev, VCE_HWIP, 0)) {
2476 switch (amdgpu_ip_version(adev, UVD_HWIP, 0)) {
2477 case IP_VERSION(7, 0, 0):
2478 case IP_VERSION(7, 2, 0):
2479 /* UVD is not supported on vega20 SR-IOV */
2480 if (!(adev->asic_type == CHIP_VEGA20 && amdgpu_sriov_vf(adev)))
2481 amdgpu_device_ip_block_add(adev, &uvd_v7_0_ip_block);
2482 break;
2483 default:
2484 dev_err(adev->dev,
2485 "Failed to add uvd v7 ip block(UVD_HWIP:0x%x)\n",
2486 amdgpu_ip_version(adev, UVD_HWIP, 0));
2487 return -EINVAL;
2488 }
2489 switch (amdgpu_ip_version(adev, VCE_HWIP, 0)) {
2490 case IP_VERSION(4, 0, 0):
2491 case IP_VERSION(4, 1, 0):
2492 /* VCE is not supported on vega20 SR-IOV */
2493 if (!(adev->asic_type == CHIP_VEGA20 && amdgpu_sriov_vf(adev)))
2494 amdgpu_device_ip_block_add(adev, &vce_v4_0_ip_block);
2495 break;
2496 default:
2497 dev_err(adev->dev,
2498 "Failed to add VCE v4 ip block(VCE_HWIP:0x%x)\n",
2499 amdgpu_ip_version(adev, VCE_HWIP, 0));
2500 return -EINVAL;
2501 }
2502 } else {
2503 switch (amdgpu_ip_version(adev, UVD_HWIP, 0)) {
2504 case IP_VERSION(1, 0, 0):
2505 case IP_VERSION(1, 0, 1):
2506 amdgpu_device_ip_block_add(adev, &vcn_v1_0_ip_block);
2507 break;
2508 case IP_VERSION(2, 0, 0):
2509 case IP_VERSION(2, 0, 2):
2510 case IP_VERSION(2, 2, 0):
2511 amdgpu_device_ip_block_add(adev, &vcn_v2_0_ip_block);
2512 if (!amdgpu_sriov_vf(adev))
2513 amdgpu_device_ip_block_add(adev, &jpeg_v2_0_ip_block);
2514 break;
2515 case IP_VERSION(2, 0, 3):
2516 break;
2517 case IP_VERSION(2, 5, 0):
2518 amdgpu_device_ip_block_add(adev, &vcn_v2_5_ip_block);
2519 amdgpu_device_ip_block_add(adev, &jpeg_v2_5_ip_block);
2520 break;
2521 case IP_VERSION(2, 6, 0):
2522 amdgpu_device_ip_block_add(adev, &vcn_v2_6_ip_block);
2523 amdgpu_device_ip_block_add(adev, &jpeg_v2_6_ip_block);
2524 break;
2525 case IP_VERSION(3, 0, 0):
2526 case IP_VERSION(3, 0, 16):
2527 case IP_VERSION(3, 1, 1):
2528 case IP_VERSION(3, 1, 2):
2529 case IP_VERSION(3, 0, 2):
2530 amdgpu_device_ip_block_add(adev, &vcn_v3_0_ip_block);
2531 if (!amdgpu_sriov_vf(adev))
2532 amdgpu_device_ip_block_add(adev, &jpeg_v3_0_ip_block);
2533 break;
2534 case IP_VERSION(3, 0, 33):
2535 amdgpu_device_ip_block_add(adev, &vcn_v3_0_ip_block);
2536 break;
2537 case IP_VERSION(4, 0, 0):
2538 case IP_VERSION(4, 0, 2):
2539 case IP_VERSION(4, 0, 4):
2540 amdgpu_device_ip_block_add(adev, &vcn_v4_0_ip_block);
2541 amdgpu_device_ip_block_add(adev, &jpeg_v4_0_ip_block);
2542 break;
2543 case IP_VERSION(4, 0, 3):
2544 amdgpu_device_ip_block_add(adev, &vcn_v4_0_3_ip_block);
2545 amdgpu_device_ip_block_add(adev, &jpeg_v4_0_3_ip_block);
2546 break;
2547 case IP_VERSION(4, 0, 5):
2548 case IP_VERSION(4, 0, 6):
2549 amdgpu_device_ip_block_add(adev, &vcn_v4_0_5_ip_block);
2550 amdgpu_device_ip_block_add(adev, &jpeg_v4_0_5_ip_block);
2551 break;
2552 case IP_VERSION(5, 0, 0):
2553 amdgpu_device_ip_block_add(adev, &vcn_v5_0_0_ip_block);
2554 amdgpu_device_ip_block_add(adev, &jpeg_v5_0_0_ip_block);
2555 break;
2556 case IP_VERSION(5, 3, 0):
2557 amdgpu_device_ip_block_add(adev, &vcn_v5_0_0_ip_block);
2558 amdgpu_device_ip_block_add(adev, &jpeg_v5_3_0_ip_block);
2559 break;
2560 case IP_VERSION(5, 0, 1):
2561 amdgpu_device_ip_block_add(adev, &vcn_v5_0_1_ip_block);
2562 amdgpu_device_ip_block_add(adev, &jpeg_v5_0_1_ip_block);
2563 break;
2564 default:
2565 dev_err(adev->dev,
2566 "Failed to add vcn/jpeg ip block(UVD_HWIP:0x%x)\n",
2567 amdgpu_ip_version(adev, UVD_HWIP, 0));
2568 return -EINVAL;
2569 }
2570 }
2571 return 0;
2572 }
2573
amdgpu_discovery_set_mes_ip_blocks(struct amdgpu_device * adev)2574 static int amdgpu_discovery_set_mes_ip_blocks(struct amdgpu_device *adev)
2575 {
2576 switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
2577 case IP_VERSION(11, 0, 0):
2578 case IP_VERSION(11, 0, 1):
2579 case IP_VERSION(11, 0, 2):
2580 case IP_VERSION(11, 0, 3):
2581 case IP_VERSION(11, 0, 4):
2582 case IP_VERSION(11, 5, 0):
2583 case IP_VERSION(11, 5, 1):
2584 case IP_VERSION(11, 5, 2):
2585 case IP_VERSION(11, 5, 3):
2586 case IP_VERSION(11, 5, 4):
2587 amdgpu_device_ip_block_add(adev, &mes_v11_0_ip_block);
2588 adev->enable_mes = true;
2589 adev->enable_mes_kiq = true;
2590 break;
2591 case IP_VERSION(12, 0, 0):
2592 case IP_VERSION(12, 0, 1):
2593 amdgpu_device_ip_block_add(adev, &mes_v12_0_ip_block);
2594 adev->enable_mes = true;
2595 adev->enable_mes_kiq = true;
2596 if (amdgpu_uni_mes)
2597 adev->enable_uni_mes = true;
2598 break;
2599 case IP_VERSION(12, 1, 0):
2600 amdgpu_device_ip_block_add(adev, &mes_v12_1_ip_block);
2601 adev->enable_mes = true;
2602 adev->enable_mes_kiq = true;
2603 if (amdgpu_uni_mes)
2604 adev->enable_uni_mes = true;
2605 break;
2606 default:
2607 break;
2608 }
2609 return 0;
2610 }
2611
amdgpu_discovery_init_soc_config(struct amdgpu_device * adev)2612 static void amdgpu_discovery_init_soc_config(struct amdgpu_device *adev)
2613 {
2614 switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
2615 case IP_VERSION(9, 4, 3):
2616 case IP_VERSION(9, 4, 4):
2617 case IP_VERSION(9, 5, 0):
2618 aqua_vanjaram_init_soc_config(adev);
2619 break;
2620 case IP_VERSION(12, 1, 0):
2621 soc_v1_0_init_soc_config(adev);
2622 break;
2623 default:
2624 break;
2625 }
2626 }
2627
amdgpu_discovery_set_vpe_ip_blocks(struct amdgpu_device * adev)2628 static int amdgpu_discovery_set_vpe_ip_blocks(struct amdgpu_device *adev)
2629 {
2630 switch (amdgpu_ip_version(adev, VPE_HWIP, 0)) {
2631 case IP_VERSION(6, 1, 0):
2632 case IP_VERSION(6, 1, 1):
2633 case IP_VERSION(6, 1, 3):
2634 amdgpu_device_ip_block_add(adev, &vpe_v6_1_ip_block);
2635 break;
2636 default:
2637 break;
2638 }
2639
2640 return 0;
2641 }
2642
amdgpu_discovery_set_umsch_mm_ip_blocks(struct amdgpu_device * adev)2643 static int amdgpu_discovery_set_umsch_mm_ip_blocks(struct amdgpu_device *adev)
2644 {
2645 switch (amdgpu_ip_version(adev, VCN_HWIP, 0)) {
2646 case IP_VERSION(4, 0, 5):
2647 case IP_VERSION(4, 0, 6):
2648 if (amdgpu_umsch_mm & 0x1) {
2649 amdgpu_device_ip_block_add(adev, &umsch_mm_v4_0_ip_block);
2650 adev->enable_umsch_mm = true;
2651 }
2652 break;
2653 default:
2654 break;
2655 }
2656
2657 return 0;
2658 }
2659
amdgpu_discovery_set_isp_ip_blocks(struct amdgpu_device * adev)2660 static int amdgpu_discovery_set_isp_ip_blocks(struct amdgpu_device *adev)
2661 {
2662 #if defined(CONFIG_DRM_AMD_ISP)
2663 switch (amdgpu_ip_version(adev, ISP_HWIP, 0)) {
2664 case IP_VERSION(4, 1, 0):
2665 amdgpu_device_ip_block_add(adev, &isp_v4_1_0_ip_block);
2666 break;
2667 case IP_VERSION(4, 1, 1):
2668 amdgpu_device_ip_block_add(adev, &isp_v4_1_1_ip_block);
2669 break;
2670 default:
2671 break;
2672 }
2673 #endif
2674
2675 return 0;
2676 }
2677
amdgpu_discovery_set_ip_blocks(struct amdgpu_device * adev)2678 int amdgpu_discovery_set_ip_blocks(struct amdgpu_device *adev)
2679 {
2680 int r;
2681
2682 switch (adev->asic_type) {
2683 case CHIP_VEGA10:
2684 /* This is not fatal. We only need the discovery
2685 * binary for sysfs. We don't need it for a
2686 * functional system.
2687 */
2688 amdgpu_discovery_init(adev);
2689 vega10_reg_base_init(adev);
2690 adev->sdma.num_instances = 2;
2691 adev->sdma.sdma_mask = 3;
2692 adev->gmc.num_umc = 4;
2693 adev->gfx.xcc_mask = 1;
2694 adev->ip_versions[MMHUB_HWIP][0] = IP_VERSION(9, 0, 0);
2695 adev->ip_versions[ATHUB_HWIP][0] = IP_VERSION(9, 0, 0);
2696 adev->ip_versions[OSSSYS_HWIP][0] = IP_VERSION(4, 0, 0);
2697 adev->ip_versions[HDP_HWIP][0] = IP_VERSION(4, 0, 0);
2698 adev->ip_versions[SDMA0_HWIP][0] = IP_VERSION(4, 0, 0);
2699 adev->ip_versions[SDMA1_HWIP][0] = IP_VERSION(4, 0, 0);
2700 adev->ip_versions[DF_HWIP][0] = IP_VERSION(2, 1, 0);
2701 adev->ip_versions[NBIO_HWIP][0] = IP_VERSION(6, 1, 0);
2702 adev->ip_versions[UMC_HWIP][0] = IP_VERSION(6, 0, 0);
2703 adev->ip_versions[MP0_HWIP][0] = IP_VERSION(9, 0, 0);
2704 adev->ip_versions[MP1_HWIP][0] = IP_VERSION(9, 0, 0);
2705 adev->ip_versions[THM_HWIP][0] = IP_VERSION(9, 0, 0);
2706 adev->ip_versions[SMUIO_HWIP][0] = IP_VERSION(9, 0, 0);
2707 adev->ip_versions[GC_HWIP][0] = IP_VERSION(9, 0, 1);
2708 adev->ip_versions[UVD_HWIP][0] = IP_VERSION(7, 0, 0);
2709 adev->ip_versions[VCE_HWIP][0] = IP_VERSION(4, 0, 0);
2710 adev->ip_versions[DCI_HWIP][0] = IP_VERSION(12, 0, 0);
2711 break;
2712 case CHIP_VEGA12:
2713 /* This is not fatal. We only need the discovery
2714 * binary for sysfs. We don't need it for a
2715 * functional system.
2716 */
2717 amdgpu_discovery_init(adev);
2718 vega10_reg_base_init(adev);
2719 adev->sdma.num_instances = 2;
2720 adev->sdma.sdma_mask = 3;
2721 adev->gmc.num_umc = 4;
2722 adev->gfx.xcc_mask = 1;
2723 adev->ip_versions[MMHUB_HWIP][0] = IP_VERSION(9, 3, 0);
2724 adev->ip_versions[ATHUB_HWIP][0] = IP_VERSION(9, 3, 0);
2725 adev->ip_versions[OSSSYS_HWIP][0] = IP_VERSION(4, 0, 1);
2726 adev->ip_versions[HDP_HWIP][0] = IP_VERSION(4, 0, 1);
2727 adev->ip_versions[SDMA0_HWIP][0] = IP_VERSION(4, 0, 1);
2728 adev->ip_versions[SDMA1_HWIP][0] = IP_VERSION(4, 0, 1);
2729 adev->ip_versions[DF_HWIP][0] = IP_VERSION(2, 5, 0);
2730 adev->ip_versions[NBIO_HWIP][0] = IP_VERSION(6, 2, 0);
2731 adev->ip_versions[UMC_HWIP][0] = IP_VERSION(6, 1, 0);
2732 adev->ip_versions[MP0_HWIP][0] = IP_VERSION(9, 0, 0);
2733 adev->ip_versions[MP1_HWIP][0] = IP_VERSION(9, 0, 0);
2734 adev->ip_versions[THM_HWIP][0] = IP_VERSION(9, 0, 0);
2735 adev->ip_versions[SMUIO_HWIP][0] = IP_VERSION(9, 0, 1);
2736 adev->ip_versions[GC_HWIP][0] = IP_VERSION(9, 2, 1);
2737 adev->ip_versions[UVD_HWIP][0] = IP_VERSION(7, 0, 0);
2738 adev->ip_versions[VCE_HWIP][0] = IP_VERSION(4, 0, 0);
2739 adev->ip_versions[DCI_HWIP][0] = IP_VERSION(12, 0, 1);
2740 break;
2741 case CHIP_RAVEN:
2742 /* This is not fatal. We only need the discovery
2743 * binary for sysfs. We don't need it for a
2744 * functional system.
2745 */
2746 amdgpu_discovery_init(adev);
2747 vega10_reg_base_init(adev);
2748 adev->sdma.num_instances = 1;
2749 adev->sdma.sdma_mask = 1;
2750 adev->vcn.num_vcn_inst = 1;
2751 adev->gmc.num_umc = 2;
2752 adev->gfx.xcc_mask = 1;
2753 if (adev->apu_flags & AMD_APU_IS_RAVEN2) {
2754 adev->ip_versions[MMHUB_HWIP][0] = IP_VERSION(9, 2, 0);
2755 adev->ip_versions[ATHUB_HWIP][0] = IP_VERSION(9, 2, 0);
2756 adev->ip_versions[OSSSYS_HWIP][0] = IP_VERSION(4, 1, 1);
2757 adev->ip_versions[HDP_HWIP][0] = IP_VERSION(4, 1, 1);
2758 adev->ip_versions[SDMA0_HWIP][0] = IP_VERSION(4, 1, 1);
2759 adev->ip_versions[DF_HWIP][0] = IP_VERSION(2, 1, 1);
2760 adev->ip_versions[NBIO_HWIP][0] = IP_VERSION(7, 0, 1);
2761 adev->ip_versions[UMC_HWIP][0] = IP_VERSION(7, 5, 0);
2762 adev->ip_versions[MP0_HWIP][0] = IP_VERSION(10, 0, 1);
2763 adev->ip_versions[MP1_HWIP][0] = IP_VERSION(10, 0, 1);
2764 adev->ip_versions[THM_HWIP][0] = IP_VERSION(10, 1, 0);
2765 adev->ip_versions[SMUIO_HWIP][0] = IP_VERSION(10, 0, 1);
2766 adev->ip_versions[GC_HWIP][0] = IP_VERSION(9, 2, 2);
2767 adev->ip_versions[UVD_HWIP][0] = IP_VERSION(1, 0, 1);
2768 adev->ip_versions[DCE_HWIP][0] = IP_VERSION(1, 0, 1);
2769 adev->ip_versions[ISP_HWIP][0] = IP_VERSION(2, 0, 0);
2770 } else {
2771 adev->ip_versions[MMHUB_HWIP][0] = IP_VERSION(9, 1, 0);
2772 adev->ip_versions[ATHUB_HWIP][0] = IP_VERSION(9, 1, 0);
2773 adev->ip_versions[OSSSYS_HWIP][0] = IP_VERSION(4, 1, 0);
2774 adev->ip_versions[HDP_HWIP][0] = IP_VERSION(4, 1, 0);
2775 adev->ip_versions[SDMA0_HWIP][0] = IP_VERSION(4, 1, 0);
2776 adev->ip_versions[DF_HWIP][0] = IP_VERSION(2, 1, 0);
2777 adev->ip_versions[NBIO_HWIP][0] = IP_VERSION(7, 0, 0);
2778 adev->ip_versions[UMC_HWIP][0] = IP_VERSION(7, 0, 0);
2779 adev->ip_versions[MP0_HWIP][0] = IP_VERSION(10, 0, 0);
2780 adev->ip_versions[MP1_HWIP][0] = IP_VERSION(10, 0, 0);
2781 adev->ip_versions[THM_HWIP][0] = IP_VERSION(10, 0, 0);
2782 adev->ip_versions[SMUIO_HWIP][0] = IP_VERSION(10, 0, 0);
2783 adev->ip_versions[GC_HWIP][0] = IP_VERSION(9, 1, 0);
2784 adev->ip_versions[UVD_HWIP][0] = IP_VERSION(1, 0, 0);
2785 adev->ip_versions[DCE_HWIP][0] = IP_VERSION(1, 0, 0);
2786 adev->ip_versions[ISP_HWIP][0] = IP_VERSION(2, 0, 0);
2787 }
2788 break;
2789 case CHIP_VEGA20:
2790 /* This is not fatal. We only need the discovery
2791 * binary for sysfs. We don't need it for a
2792 * functional system.
2793 */
2794 amdgpu_discovery_init(adev);
2795 vega20_reg_base_init(adev);
2796 adev->sdma.num_instances = 2;
2797 adev->sdma.sdma_mask = 3;
2798 adev->gmc.num_umc = 8;
2799 adev->gfx.xcc_mask = 1;
2800 adev->ip_versions[MMHUB_HWIP][0] = IP_VERSION(9, 4, 0);
2801 adev->ip_versions[ATHUB_HWIP][0] = IP_VERSION(9, 4, 0);
2802 adev->ip_versions[OSSSYS_HWIP][0] = IP_VERSION(4, 2, 0);
2803 adev->ip_versions[HDP_HWIP][0] = IP_VERSION(4, 2, 0);
2804 adev->ip_versions[SDMA0_HWIP][0] = IP_VERSION(4, 2, 0);
2805 adev->ip_versions[SDMA1_HWIP][0] = IP_VERSION(4, 2, 0);
2806 adev->ip_versions[DF_HWIP][0] = IP_VERSION(3, 6, 0);
2807 adev->ip_versions[NBIO_HWIP][0] = IP_VERSION(7, 4, 0);
2808 adev->ip_versions[UMC_HWIP][0] = IP_VERSION(6, 1, 1);
2809 adev->ip_versions[MP0_HWIP][0] = IP_VERSION(11, 0, 2);
2810 adev->ip_versions[MP1_HWIP][0] = IP_VERSION(11, 0, 2);
2811 adev->ip_versions[THM_HWIP][0] = IP_VERSION(11, 0, 2);
2812 adev->ip_versions[SMUIO_HWIP][0] = IP_VERSION(11, 0, 2);
2813 adev->ip_versions[GC_HWIP][0] = IP_VERSION(9, 4, 0);
2814 adev->ip_versions[UVD_HWIP][0] = IP_VERSION(7, 2, 0);
2815 adev->ip_versions[UVD_HWIP][1] = IP_VERSION(7, 2, 0);
2816 adev->ip_versions[VCE_HWIP][0] = IP_VERSION(4, 1, 0);
2817 adev->ip_versions[DCI_HWIP][0] = IP_VERSION(12, 1, 0);
2818 break;
2819 case CHIP_ARCTURUS:
2820 /* This is not fatal. We only need the discovery
2821 * binary for sysfs. We don't need it for a
2822 * functional system.
2823 */
2824 amdgpu_discovery_init(adev);
2825 arct_reg_base_init(adev);
2826 adev->sdma.num_instances = 8;
2827 adev->sdma.sdma_mask = 0xff;
2828 adev->vcn.num_vcn_inst = 2;
2829 adev->gmc.num_umc = 8;
2830 adev->gfx.xcc_mask = 1;
2831 adev->ip_versions[MMHUB_HWIP][0] = IP_VERSION(9, 4, 1);
2832 adev->ip_versions[ATHUB_HWIP][0] = IP_VERSION(9, 4, 1);
2833 adev->ip_versions[OSSSYS_HWIP][0] = IP_VERSION(4, 2, 1);
2834 adev->ip_versions[HDP_HWIP][0] = IP_VERSION(4, 2, 1);
2835 adev->ip_versions[SDMA0_HWIP][0] = IP_VERSION(4, 2, 2);
2836 adev->ip_versions[SDMA1_HWIP][0] = IP_VERSION(4, 2, 2);
2837 adev->ip_versions[SDMA1_HWIP][1] = IP_VERSION(4, 2, 2);
2838 adev->ip_versions[SDMA1_HWIP][2] = IP_VERSION(4, 2, 2);
2839 adev->ip_versions[SDMA1_HWIP][3] = IP_VERSION(4, 2, 2);
2840 adev->ip_versions[SDMA1_HWIP][4] = IP_VERSION(4, 2, 2);
2841 adev->ip_versions[SDMA1_HWIP][5] = IP_VERSION(4, 2, 2);
2842 adev->ip_versions[SDMA1_HWIP][6] = IP_VERSION(4, 2, 2);
2843 adev->ip_versions[DF_HWIP][0] = IP_VERSION(3, 6, 1);
2844 adev->ip_versions[NBIO_HWIP][0] = IP_VERSION(7, 4, 1);
2845 adev->ip_versions[UMC_HWIP][0] = IP_VERSION(6, 1, 2);
2846 adev->ip_versions[MP0_HWIP][0] = IP_VERSION(11, 0, 4);
2847 adev->ip_versions[MP1_HWIP][0] = IP_VERSION(11, 0, 2);
2848 adev->ip_versions[THM_HWIP][0] = IP_VERSION(11, 0, 3);
2849 adev->ip_versions[SMUIO_HWIP][0] = IP_VERSION(11, 0, 3);
2850 adev->ip_versions[GC_HWIP][0] = IP_VERSION(9, 4, 1);
2851 adev->ip_versions[UVD_HWIP][0] = IP_VERSION(2, 5, 0);
2852 adev->ip_versions[UVD_HWIP][1] = IP_VERSION(2, 5, 0);
2853 break;
2854 case CHIP_ALDEBARAN:
2855 /* This is not fatal. We only need the discovery
2856 * binary for sysfs. We don't need it for a
2857 * functional system.
2858 */
2859 amdgpu_discovery_init(adev);
2860 aldebaran_reg_base_init(adev);
2861 adev->sdma.num_instances = 5;
2862 adev->sdma.sdma_mask = 0x1f;
2863 adev->vcn.num_vcn_inst = 2;
2864 adev->gmc.num_umc = 4;
2865 adev->gfx.xcc_mask = 1;
2866 adev->ip_versions[MMHUB_HWIP][0] = IP_VERSION(9, 4, 2);
2867 adev->ip_versions[ATHUB_HWIP][0] = IP_VERSION(9, 4, 2);
2868 adev->ip_versions[OSSSYS_HWIP][0] = IP_VERSION(4, 4, 0);
2869 adev->ip_versions[HDP_HWIP][0] = IP_VERSION(4, 4, 0);
2870 adev->ip_versions[SDMA0_HWIP][0] = IP_VERSION(4, 4, 0);
2871 adev->ip_versions[SDMA0_HWIP][1] = IP_VERSION(4, 4, 0);
2872 adev->ip_versions[SDMA0_HWIP][2] = IP_VERSION(4, 4, 0);
2873 adev->ip_versions[SDMA0_HWIP][3] = IP_VERSION(4, 4, 0);
2874 adev->ip_versions[SDMA0_HWIP][4] = IP_VERSION(4, 4, 0);
2875 adev->ip_versions[DF_HWIP][0] = IP_VERSION(3, 6, 2);
2876 adev->ip_versions[NBIO_HWIP][0] = IP_VERSION(7, 4, 4);
2877 adev->ip_versions[UMC_HWIP][0] = IP_VERSION(6, 7, 0);
2878 adev->ip_versions[MP0_HWIP][0] = IP_VERSION(13, 0, 2);
2879 adev->ip_versions[MP1_HWIP][0] = IP_VERSION(13, 0, 2);
2880 adev->ip_versions[THM_HWIP][0] = IP_VERSION(13, 0, 2);
2881 adev->ip_versions[SMUIO_HWIP][0] = IP_VERSION(13, 0, 2);
2882 adev->ip_versions[GC_HWIP][0] = IP_VERSION(9, 4, 2);
2883 adev->ip_versions[UVD_HWIP][0] = IP_VERSION(2, 6, 0);
2884 adev->ip_versions[UVD_HWIP][1] = IP_VERSION(2, 6, 0);
2885 adev->ip_versions[XGMI_HWIP][0] = IP_VERSION(6, 1, 0);
2886 break;
2887 case CHIP_CYAN_SKILLFISH:
2888 if (adev->apu_flags & AMD_APU_IS_CYAN_SKILLFISH2) {
2889 r = amdgpu_discovery_reg_base_init(adev);
2890 if (r)
2891 return -EINVAL;
2892
2893 amdgpu_discovery_harvest_ip(adev);
2894 amdgpu_discovery_get_gfx_info(adev);
2895 amdgpu_discovery_get_mall_info(adev);
2896 amdgpu_discovery_get_vcn_info(adev);
2897 } else {
2898 cyan_skillfish_reg_base_init(adev);
2899 adev->sdma.num_instances = 2;
2900 adev->sdma.sdma_mask = 3;
2901 adev->gfx.xcc_mask = 1;
2902 adev->ip_versions[MMHUB_HWIP][0] = IP_VERSION(2, 0, 3);
2903 adev->ip_versions[ATHUB_HWIP][0] = IP_VERSION(2, 0, 3);
2904 adev->ip_versions[OSSSYS_HWIP][0] = IP_VERSION(5, 0, 1);
2905 adev->ip_versions[HDP_HWIP][0] = IP_VERSION(5, 0, 1);
2906 adev->ip_versions[SDMA0_HWIP][0] = IP_VERSION(5, 0, 1);
2907 adev->ip_versions[SDMA1_HWIP][1] = IP_VERSION(5, 0, 1);
2908 adev->ip_versions[DF_HWIP][0] = IP_VERSION(3, 5, 0);
2909 adev->ip_versions[NBIO_HWIP][0] = IP_VERSION(2, 1, 1);
2910 adev->ip_versions[UMC_HWIP][0] = IP_VERSION(8, 1, 1);
2911 adev->ip_versions[MP0_HWIP][0] = IP_VERSION(11, 0, 8);
2912 adev->ip_versions[MP1_HWIP][0] = IP_VERSION(11, 0, 8);
2913 adev->ip_versions[THM_HWIP][0] = IP_VERSION(11, 0, 1);
2914 adev->ip_versions[SMUIO_HWIP][0] = IP_VERSION(11, 0, 8);
2915 adev->ip_versions[GC_HWIP][0] = IP_VERSION(10, 1, 3);
2916 adev->ip_versions[UVD_HWIP][0] = IP_VERSION(2, 0, 3);
2917 }
2918 break;
2919 default:
2920 r = amdgpu_discovery_reg_base_init(adev);
2921 if (r) {
2922 drm_err(&adev->ddev, "discovery failed: %d\n", r);
2923 return r;
2924 }
2925
2926 amdgpu_discovery_harvest_ip(adev);
2927 amdgpu_discovery_get_gfx_info(adev);
2928 amdgpu_discovery_get_mall_info(adev);
2929 amdgpu_discovery_get_vcn_info(adev);
2930 break;
2931 }
2932
2933 amdgpu_discovery_init_soc_config(adev);
2934 amdgpu_discovery_sysfs_init(adev);
2935
2936 switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
2937 case IP_VERSION(9, 0, 1):
2938 case IP_VERSION(9, 2, 1):
2939 case IP_VERSION(9, 4, 0):
2940 case IP_VERSION(9, 4, 1):
2941 case IP_VERSION(9, 4, 2):
2942 case IP_VERSION(9, 4, 3):
2943 case IP_VERSION(9, 4, 4):
2944 case IP_VERSION(9, 5, 0):
2945 adev->family = AMDGPU_FAMILY_AI;
2946 break;
2947 case IP_VERSION(9, 1, 0):
2948 case IP_VERSION(9, 2, 2):
2949 case IP_VERSION(9, 3, 0):
2950 adev->family = AMDGPU_FAMILY_RV;
2951 break;
2952 case IP_VERSION(10, 1, 10):
2953 case IP_VERSION(10, 1, 1):
2954 case IP_VERSION(10, 1, 2):
2955 case IP_VERSION(10, 1, 3):
2956 case IP_VERSION(10, 1, 4):
2957 case IP_VERSION(10, 3, 0):
2958 case IP_VERSION(10, 3, 2):
2959 case IP_VERSION(10, 3, 4):
2960 case IP_VERSION(10, 3, 5):
2961 adev->family = AMDGPU_FAMILY_NV;
2962 break;
2963 case IP_VERSION(10, 3, 1):
2964 adev->family = AMDGPU_FAMILY_VGH;
2965 adev->apu_flags |= AMD_APU_IS_VANGOGH;
2966 break;
2967 case IP_VERSION(10, 3, 3):
2968 adev->family = AMDGPU_FAMILY_YC;
2969 break;
2970 case IP_VERSION(10, 3, 6):
2971 adev->family = AMDGPU_FAMILY_GC_10_3_6;
2972 break;
2973 case IP_VERSION(10, 3, 7):
2974 adev->family = AMDGPU_FAMILY_GC_10_3_7;
2975 break;
2976 case IP_VERSION(11, 0, 0):
2977 case IP_VERSION(11, 0, 2):
2978 case IP_VERSION(11, 0, 3):
2979 adev->family = AMDGPU_FAMILY_GC_11_0_0;
2980 break;
2981 case IP_VERSION(11, 0, 1):
2982 case IP_VERSION(11, 0, 4):
2983 adev->family = AMDGPU_FAMILY_GC_11_0_1;
2984 break;
2985 case IP_VERSION(11, 5, 0):
2986 case IP_VERSION(11, 5, 1):
2987 case IP_VERSION(11, 5, 2):
2988 case IP_VERSION(11, 5, 3):
2989 adev->family = AMDGPU_FAMILY_GC_11_5_0;
2990 break;
2991 case IP_VERSION(11, 5, 4):
2992 adev->family = AMDGPU_FAMILY_GC_11_5_4;
2993 break;
2994 case IP_VERSION(12, 0, 0):
2995 case IP_VERSION(12, 0, 1):
2996 case IP_VERSION(12, 1, 0):
2997 adev->family = AMDGPU_FAMILY_GC_12_0_0;
2998 break;
2999 default:
3000 return -EINVAL;
3001 }
3002
3003 switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
3004 case IP_VERSION(9, 1, 0):
3005 case IP_VERSION(9, 2, 2):
3006 case IP_VERSION(9, 3, 0):
3007 case IP_VERSION(10, 1, 3):
3008 case IP_VERSION(10, 1, 4):
3009 case IP_VERSION(10, 3, 1):
3010 case IP_VERSION(10, 3, 3):
3011 case IP_VERSION(10, 3, 6):
3012 case IP_VERSION(10, 3, 7):
3013 case IP_VERSION(11, 0, 1):
3014 case IP_VERSION(11, 0, 4):
3015 case IP_VERSION(11, 5, 0):
3016 case IP_VERSION(11, 5, 1):
3017 case IP_VERSION(11, 5, 2):
3018 case IP_VERSION(11, 5, 3):
3019 case IP_VERSION(11, 5, 4):
3020 adev->flags |= AMD_IS_APU;
3021 break;
3022 default:
3023 break;
3024 }
3025
3026 /* set NBIO version */
3027 switch (amdgpu_ip_version(adev, NBIO_HWIP, 0)) {
3028 case IP_VERSION(6, 1, 0):
3029 case IP_VERSION(6, 2, 0):
3030 adev->nbio.funcs = &nbio_v6_1_funcs;
3031 adev->nbio.hdp_flush_reg = &nbio_v6_1_hdp_flush_reg;
3032 break;
3033 case IP_VERSION(7, 0, 0):
3034 case IP_VERSION(7, 0, 1):
3035 case IP_VERSION(2, 5, 0):
3036 adev->nbio.funcs = &nbio_v7_0_funcs;
3037 adev->nbio.hdp_flush_reg = &nbio_v7_0_hdp_flush_reg;
3038 break;
3039 case IP_VERSION(7, 4, 0):
3040 case IP_VERSION(7, 4, 1):
3041 case IP_VERSION(7, 4, 4):
3042 adev->nbio.funcs = &nbio_v7_4_funcs;
3043 adev->nbio.hdp_flush_reg = &nbio_v7_4_hdp_flush_reg;
3044 break;
3045 case IP_VERSION(7, 9, 0):
3046 case IP_VERSION(7, 9, 1):
3047 adev->nbio.funcs = &nbio_v7_9_funcs;
3048 adev->nbio.hdp_flush_reg = &nbio_v7_9_hdp_flush_reg;
3049 break;
3050 case IP_VERSION(7, 11, 0):
3051 case IP_VERSION(7, 11, 1):
3052 case IP_VERSION(7, 11, 2):
3053 case IP_VERSION(7, 11, 3):
3054 adev->nbio.funcs = &nbio_v7_11_funcs;
3055 adev->nbio.hdp_flush_reg = &nbio_v7_11_hdp_flush_reg;
3056 break;
3057 case IP_VERSION(7, 2, 0):
3058 case IP_VERSION(7, 2, 1):
3059 case IP_VERSION(7, 3, 0):
3060 case IP_VERSION(7, 5, 0):
3061 case IP_VERSION(7, 5, 1):
3062 adev->nbio.funcs = &nbio_v7_2_funcs;
3063 adev->nbio.hdp_flush_reg = &nbio_v7_2_hdp_flush_reg;
3064 break;
3065 case IP_VERSION(2, 1, 1):
3066 case IP_VERSION(2, 3, 0):
3067 case IP_VERSION(2, 3, 1):
3068 case IP_VERSION(2, 3, 2):
3069 case IP_VERSION(3, 3, 0):
3070 case IP_VERSION(3, 3, 1):
3071 case IP_VERSION(3, 3, 2):
3072 case IP_VERSION(3, 3, 3):
3073 adev->nbio.funcs = &nbio_v2_3_funcs;
3074 adev->nbio.hdp_flush_reg = &nbio_v2_3_hdp_flush_reg;
3075 break;
3076 case IP_VERSION(4, 3, 0):
3077 case IP_VERSION(4, 3, 1):
3078 if (amdgpu_sriov_vf(adev))
3079 adev->nbio.funcs = &nbio_v4_3_sriov_funcs;
3080 else
3081 adev->nbio.funcs = &nbio_v4_3_funcs;
3082 adev->nbio.hdp_flush_reg = &nbio_v4_3_hdp_flush_reg;
3083 break;
3084 case IP_VERSION(7, 7, 0):
3085 case IP_VERSION(7, 7, 1):
3086 adev->nbio.funcs = &nbio_v7_7_funcs;
3087 adev->nbio.hdp_flush_reg = &nbio_v7_7_hdp_flush_reg;
3088 break;
3089 case IP_VERSION(6, 3, 1):
3090 case IP_VERSION(7, 11, 4):
3091 adev->nbio.funcs = &nbif_v6_3_1_funcs;
3092 adev->nbio.hdp_flush_reg = &nbif_v6_3_1_hdp_flush_reg;
3093 break;
3094 default:
3095 break;
3096 }
3097
3098 switch (amdgpu_ip_version(adev, HDP_HWIP, 0)) {
3099 case IP_VERSION(4, 0, 0):
3100 case IP_VERSION(4, 0, 1):
3101 case IP_VERSION(4, 1, 0):
3102 case IP_VERSION(4, 1, 1):
3103 case IP_VERSION(4, 1, 2):
3104 case IP_VERSION(4, 2, 0):
3105 case IP_VERSION(4, 2, 1):
3106 case IP_VERSION(4, 4, 0):
3107 case IP_VERSION(4, 4, 2):
3108 case IP_VERSION(4, 4, 5):
3109 adev->hdp.funcs = &hdp_v4_0_funcs;
3110 break;
3111 case IP_VERSION(5, 0, 0):
3112 case IP_VERSION(5, 0, 1):
3113 case IP_VERSION(5, 0, 2):
3114 case IP_VERSION(5, 0, 3):
3115 case IP_VERSION(5, 0, 4):
3116 case IP_VERSION(5, 2, 0):
3117 adev->hdp.funcs = &hdp_v5_0_funcs;
3118 break;
3119 case IP_VERSION(5, 2, 1):
3120 adev->hdp.funcs = &hdp_v5_2_funcs;
3121 break;
3122 case IP_VERSION(6, 0, 0):
3123 case IP_VERSION(6, 0, 1):
3124 case IP_VERSION(6, 1, 0):
3125 case IP_VERSION(6, 1, 1):
3126 adev->hdp.funcs = &hdp_v6_0_funcs;
3127 break;
3128 case IP_VERSION(7, 0, 0):
3129 adev->hdp.funcs = &hdp_v7_0_funcs;
3130 break;
3131 default:
3132 break;
3133 }
3134
3135 switch (amdgpu_ip_version(adev, DF_HWIP, 0)) {
3136 case IP_VERSION(3, 6, 0):
3137 case IP_VERSION(3, 6, 1):
3138 case IP_VERSION(3, 6, 2):
3139 adev->df.funcs = &df_v3_6_funcs;
3140 break;
3141 case IP_VERSION(2, 1, 0):
3142 case IP_VERSION(2, 1, 1):
3143 case IP_VERSION(2, 5, 0):
3144 case IP_VERSION(3, 5, 1):
3145 case IP_VERSION(3, 5, 2):
3146 adev->df.funcs = &df_v1_7_funcs;
3147 break;
3148 case IP_VERSION(4, 3, 0):
3149 adev->df.funcs = &df_v4_3_funcs;
3150 break;
3151 case IP_VERSION(4, 6, 2):
3152 adev->df.funcs = &df_v4_6_2_funcs;
3153 break;
3154 case IP_VERSION(4, 15, 0):
3155 case IP_VERSION(4, 15, 1):
3156 adev->df.funcs = &df_v4_15_funcs;
3157 break;
3158 default:
3159 break;
3160 }
3161
3162 switch (amdgpu_ip_version(adev, SMUIO_HWIP, 0)) {
3163 case IP_VERSION(9, 0, 0):
3164 case IP_VERSION(9, 0, 1):
3165 case IP_VERSION(10, 0, 0):
3166 case IP_VERSION(10, 0, 1):
3167 case IP_VERSION(10, 0, 2):
3168 adev->smuio.funcs = &smuio_v9_0_funcs;
3169 break;
3170 case IP_VERSION(11, 0, 0):
3171 case IP_VERSION(11, 0, 2):
3172 case IP_VERSION(11, 0, 3):
3173 case IP_VERSION(11, 0, 4):
3174 case IP_VERSION(11, 0, 7):
3175 case IP_VERSION(11, 0, 8):
3176 adev->smuio.funcs = &smuio_v11_0_funcs;
3177 break;
3178 case IP_VERSION(11, 0, 6):
3179 case IP_VERSION(11, 0, 10):
3180 case IP_VERSION(11, 0, 11):
3181 case IP_VERSION(11, 5, 0):
3182 case IP_VERSION(11, 5, 2):
3183 case IP_VERSION(13, 0, 1):
3184 case IP_VERSION(13, 0, 9):
3185 case IP_VERSION(13, 0, 10):
3186 adev->smuio.funcs = &smuio_v11_0_6_funcs;
3187 break;
3188 case IP_VERSION(13, 0, 2):
3189 adev->smuio.funcs = &smuio_v13_0_funcs;
3190 break;
3191 case IP_VERSION(13, 0, 3):
3192 case IP_VERSION(13, 0, 11):
3193 adev->smuio.funcs = &smuio_v13_0_3_funcs;
3194 if (adev->smuio.funcs->get_pkg_type(adev) == AMDGPU_PKG_TYPE_APU) {
3195 adev->flags |= AMD_IS_APU;
3196 }
3197 break;
3198 case IP_VERSION(13, 0, 6):
3199 case IP_VERSION(13, 0, 8):
3200 case IP_VERSION(14, 0, 0):
3201 case IP_VERSION(14, 0, 1):
3202 adev->smuio.funcs = &smuio_v13_0_6_funcs;
3203 break;
3204 case IP_VERSION(14, 0, 2):
3205 adev->smuio.funcs = &smuio_v14_0_2_funcs;
3206 break;
3207 case IP_VERSION(15, 0, 0):
3208 adev->smuio.funcs = &smuio_v15_0_0_funcs;
3209 break;
3210 case IP_VERSION(15, 0, 8):
3211 adev->smuio.funcs = &smuio_v15_0_8_funcs;
3212 break;
3213 default:
3214 break;
3215 }
3216
3217 switch (amdgpu_ip_version(adev, LSDMA_HWIP, 0)) {
3218 case IP_VERSION(6, 0, 0):
3219 case IP_VERSION(6, 0, 1):
3220 case IP_VERSION(6, 0, 2):
3221 case IP_VERSION(6, 0, 3):
3222 adev->lsdma.funcs = &lsdma_v6_0_funcs;
3223 break;
3224 case IP_VERSION(7, 0, 0):
3225 case IP_VERSION(7, 0, 1):
3226 adev->lsdma.funcs = &lsdma_v7_0_funcs;
3227 break;
3228 default:
3229 break;
3230 }
3231
3232 r = amdgpu_discovery_set_common_ip_blocks(adev);
3233 if (r)
3234 return r;
3235
3236 r = amdgpu_discovery_set_gmc_ip_blocks(adev);
3237 if (r)
3238 return r;
3239
3240 /* For SR-IOV, PSP needs to be initialized before IH */
3241 if (amdgpu_sriov_vf(adev)) {
3242 r = amdgpu_discovery_set_psp_ip_blocks(adev);
3243 if (r)
3244 return r;
3245 r = amdgpu_discovery_set_ih_ip_blocks(adev);
3246 if (r)
3247 return r;
3248 } else {
3249 r = amdgpu_discovery_set_ih_ip_blocks(adev);
3250 if (r)
3251 return r;
3252
3253 if (likely(adev->firmware.load_type == AMDGPU_FW_LOAD_PSP)) {
3254 r = amdgpu_discovery_set_psp_ip_blocks(adev);
3255 if (r)
3256 return r;
3257 }
3258 }
3259
3260 if (likely(adev->firmware.load_type == AMDGPU_FW_LOAD_PSP)) {
3261 r = amdgpu_discovery_set_smu_ip_blocks(adev);
3262 if (r)
3263 return r;
3264 }
3265
3266 r = amdgpu_discovery_set_display_ip_blocks(adev);
3267 if (r)
3268 return r;
3269
3270 r = amdgpu_discovery_set_gc_ip_blocks(adev);
3271 if (r)
3272 return r;
3273
3274 r = amdgpu_discovery_set_sdma_ip_blocks(adev);
3275 if (r)
3276 return r;
3277
3278 r = amdgpu_discovery_set_ras_ip_blocks(adev);
3279 if (r)
3280 return r;
3281
3282 if ((adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT &&
3283 !amdgpu_sriov_vf(adev) &&
3284 amdgpu_dpm == 1) ||
3285 (adev->firmware.load_type == AMDGPU_FW_LOAD_RLC_BACKDOOR_AUTO &&
3286 amdgpu_dpm == 1)) {
3287 r = amdgpu_discovery_set_smu_ip_blocks(adev);
3288 if (r)
3289 return r;
3290 }
3291
3292 r = amdgpu_discovery_set_mm_ip_blocks(adev);
3293 if (r)
3294 return r;
3295
3296 r = amdgpu_discovery_set_mes_ip_blocks(adev);
3297 if (r)
3298 return r;
3299
3300 r = amdgpu_discovery_set_vpe_ip_blocks(adev);
3301 if (r)
3302 return r;
3303
3304 r = amdgpu_discovery_set_umsch_mm_ip_blocks(adev);
3305 if (r)
3306 return r;
3307
3308 r = amdgpu_discovery_set_isp_ip_blocks(adev);
3309 if (r)
3310 return r;
3311 return 0;
3312 }
3313
3314