1 /*
2 * Copyright 2008 Advanced Micro Devices, Inc.
3 * Copyright 2008 Red Hat Inc.
4 * Copyright 2009 Jerome Glisse.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
23 *
24 * Authors: Dave Airlie
25 * Alex Deucher
26 * Jerome Glisse
27 */
28
29 #include "amdgpu.h"
30 #include <drm/amdgpu_drm.h>
31 #include <drm/drm_drv.h>
32 #include <drm/drm_fb_helper.h>
33 #include "amdgpu_uvd.h"
34 #include "amdgpu_vce.h"
35 #include "atom.h"
36
37 #include <linux/vga_switcheroo.h>
38 #include <linux/slab.h>
39 #include <linux/uaccess.h>
40 #include <linux/pci.h>
41 #include <linux/pm_runtime.h>
42 #include "amdgpu_amdkfd.h"
43 #include "amdgpu_gem.h"
44 #include "amdgpu_display.h"
45 #include "amdgpu_ras.h"
46 #include "amdgpu_reset.h"
47 #include "amd_pcie.h"
48 #include "amdgpu_userq.h"
49
amdgpu_unregister_gpu_instance(struct amdgpu_device * adev)50 void amdgpu_unregister_gpu_instance(struct amdgpu_device *adev)
51 {
52 struct amdgpu_gpu_instance *gpu_instance;
53 int i;
54
55 mutex_lock(&mgpu_info.mutex);
56
57 for (i = 0; i < mgpu_info.num_gpu; i++) {
58 gpu_instance = &(mgpu_info.gpu_ins[i]);
59 if (gpu_instance->adev == adev) {
60 mgpu_info.gpu_ins[i] =
61 mgpu_info.gpu_ins[mgpu_info.num_gpu - 1];
62 mgpu_info.num_gpu--;
63 if (adev->flags & AMD_IS_APU)
64 mgpu_info.num_apu--;
65 else
66 mgpu_info.num_dgpu--;
67 break;
68 }
69 }
70
71 mutex_unlock(&mgpu_info.mutex);
72 }
73
74 /**
75 * amdgpu_driver_unload_kms - Main unload function for KMS.
76 *
77 * @dev: drm dev pointer
78 *
79 * This is the main unload function for KMS (all asics).
80 * Returns 0 on success.
81 */
amdgpu_driver_unload_kms(struct drm_device * dev)82 void amdgpu_driver_unload_kms(struct drm_device *dev)
83 {
84 struct amdgpu_device *adev = drm_to_adev(dev);
85
86 if (adev == NULL)
87 return;
88
89 amdgpu_unregister_gpu_instance(adev);
90
91 if (adev->rmmio == NULL)
92 return;
93
94 if (amdgpu_acpi_smart_shift_update(dev, AMDGPU_SS_DRV_UNLOAD))
95 DRM_WARN("smart shift update failed\n");
96
97 amdgpu_acpi_fini(adev);
98 amdgpu_device_fini_hw(adev);
99 }
100
amdgpu_register_gpu_instance(struct amdgpu_device * adev)101 void amdgpu_register_gpu_instance(struct amdgpu_device *adev)
102 {
103 struct amdgpu_gpu_instance *gpu_instance;
104
105 mutex_lock(&mgpu_info.mutex);
106
107 if (mgpu_info.num_gpu >= MAX_GPU_INSTANCE) {
108 DRM_ERROR("Cannot register more gpu instance\n");
109 mutex_unlock(&mgpu_info.mutex);
110 return;
111 }
112
113 gpu_instance = &(mgpu_info.gpu_ins[mgpu_info.num_gpu]);
114 gpu_instance->adev = adev;
115 gpu_instance->mgpu_fan_enabled = 0;
116
117 mgpu_info.num_gpu++;
118 if (adev->flags & AMD_IS_APU)
119 mgpu_info.num_apu++;
120 else
121 mgpu_info.num_dgpu++;
122
123 mutex_unlock(&mgpu_info.mutex);
124 }
125
126 /**
127 * amdgpu_driver_load_kms - Main load function for KMS.
128 *
129 * @adev: pointer to struct amdgpu_device
130 * @flags: device flags
131 *
132 * This is the main load function for KMS (all asics).
133 * Returns 0 on success, error on failure.
134 */
amdgpu_driver_load_kms(struct amdgpu_device * adev,unsigned long flags)135 int amdgpu_driver_load_kms(struct amdgpu_device *adev, unsigned long flags)
136 {
137 struct drm_device *dev;
138 int r, acpi_status;
139
140 dev = adev_to_drm(adev);
141
142 /* amdgpu_device_init should report only fatal error
143 * like memory allocation failure or iomapping failure,
144 * or memory manager initialization failure, it must
145 * properly initialize the GPU MC controller and permit
146 * VRAM allocation
147 */
148 r = amdgpu_device_init(adev, flags);
149 if (r) {
150 dev_err(dev->dev, "Fatal error during GPU init\n");
151 goto out;
152 }
153
154 amdgpu_device_detect_runtime_pm_mode(adev);
155
156 /* Call ACPI methods: require modeset init
157 * but failure is not fatal
158 */
159
160 acpi_status = amdgpu_acpi_init(adev);
161 if (acpi_status)
162 dev_dbg(dev->dev, "Error during ACPI methods call\n");
163
164 if (amdgpu_acpi_smart_shift_update(dev, AMDGPU_SS_DRV_LOAD))
165 DRM_WARN("smart shift update failed\n");
166
167 out:
168 if (r)
169 amdgpu_driver_unload_kms(dev);
170
171 return r;
172 }
173
174 static enum amd_ip_block_type
amdgpu_ip_get_block_type(struct amdgpu_device * adev,uint32_t ip)175 amdgpu_ip_get_block_type(struct amdgpu_device *adev, uint32_t ip)
176 {
177 enum amd_ip_block_type type;
178
179 switch (ip) {
180 case AMDGPU_HW_IP_GFX:
181 type = AMD_IP_BLOCK_TYPE_GFX;
182 break;
183 case AMDGPU_HW_IP_COMPUTE:
184 type = AMD_IP_BLOCK_TYPE_GFX;
185 break;
186 case AMDGPU_HW_IP_DMA:
187 type = AMD_IP_BLOCK_TYPE_SDMA;
188 break;
189 case AMDGPU_HW_IP_UVD:
190 case AMDGPU_HW_IP_UVD_ENC:
191 type = AMD_IP_BLOCK_TYPE_UVD;
192 break;
193 case AMDGPU_HW_IP_VCE:
194 type = AMD_IP_BLOCK_TYPE_VCE;
195 break;
196 case AMDGPU_HW_IP_VCN_DEC:
197 case AMDGPU_HW_IP_VCN_ENC:
198 type = AMD_IP_BLOCK_TYPE_VCN;
199 break;
200 case AMDGPU_HW_IP_VCN_JPEG:
201 type = (amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_JPEG)) ?
202 AMD_IP_BLOCK_TYPE_JPEG : AMD_IP_BLOCK_TYPE_VCN;
203 break;
204 default:
205 type = AMD_IP_BLOCK_TYPE_NUM;
206 break;
207 }
208
209 return type;
210 }
211
amdgpu_firmware_info(struct drm_amdgpu_info_firmware * fw_info,struct drm_amdgpu_query_fw * query_fw,struct amdgpu_device * adev)212 static int amdgpu_firmware_info(struct drm_amdgpu_info_firmware *fw_info,
213 struct drm_amdgpu_query_fw *query_fw,
214 struct amdgpu_device *adev)
215 {
216 switch (query_fw->fw_type) {
217 case AMDGPU_INFO_FW_VCE:
218 fw_info->ver = adev->vce.fw_version;
219 fw_info->feature = adev->vce.fb_version;
220 break;
221 case AMDGPU_INFO_FW_UVD:
222 fw_info->ver = adev->uvd.fw_version;
223 fw_info->feature = 0;
224 break;
225 case AMDGPU_INFO_FW_VCN:
226 fw_info->ver = adev->vcn.fw_version;
227 fw_info->feature = 0;
228 break;
229 case AMDGPU_INFO_FW_GMC:
230 fw_info->ver = adev->gmc.fw_version;
231 fw_info->feature = 0;
232 break;
233 case AMDGPU_INFO_FW_GFX_ME:
234 fw_info->ver = adev->gfx.me_fw_version;
235 fw_info->feature = adev->gfx.me_feature_version;
236 break;
237 case AMDGPU_INFO_FW_GFX_PFP:
238 fw_info->ver = adev->gfx.pfp_fw_version;
239 fw_info->feature = adev->gfx.pfp_feature_version;
240 break;
241 case AMDGPU_INFO_FW_GFX_CE:
242 fw_info->ver = adev->gfx.ce_fw_version;
243 fw_info->feature = adev->gfx.ce_feature_version;
244 break;
245 case AMDGPU_INFO_FW_GFX_RLC:
246 fw_info->ver = adev->gfx.rlc_fw_version;
247 fw_info->feature = adev->gfx.rlc_feature_version;
248 break;
249 case AMDGPU_INFO_FW_GFX_RLC_RESTORE_LIST_CNTL:
250 fw_info->ver = adev->gfx.rlc_srlc_fw_version;
251 fw_info->feature = adev->gfx.rlc_srlc_feature_version;
252 break;
253 case AMDGPU_INFO_FW_GFX_RLC_RESTORE_LIST_GPM_MEM:
254 fw_info->ver = adev->gfx.rlc_srlg_fw_version;
255 fw_info->feature = adev->gfx.rlc_srlg_feature_version;
256 break;
257 case AMDGPU_INFO_FW_GFX_RLC_RESTORE_LIST_SRM_MEM:
258 fw_info->ver = adev->gfx.rlc_srls_fw_version;
259 fw_info->feature = adev->gfx.rlc_srls_feature_version;
260 break;
261 case AMDGPU_INFO_FW_GFX_RLCP:
262 fw_info->ver = adev->gfx.rlcp_ucode_version;
263 fw_info->feature = adev->gfx.rlcp_ucode_feature_version;
264 break;
265 case AMDGPU_INFO_FW_GFX_RLCV:
266 fw_info->ver = adev->gfx.rlcv_ucode_version;
267 fw_info->feature = adev->gfx.rlcv_ucode_feature_version;
268 break;
269 case AMDGPU_INFO_FW_GFX_MEC:
270 if (query_fw->index == 0) {
271 fw_info->ver = adev->gfx.mec_fw_version;
272 fw_info->feature = adev->gfx.mec_feature_version;
273 } else if (query_fw->index == 1) {
274 fw_info->ver = adev->gfx.mec2_fw_version;
275 fw_info->feature = adev->gfx.mec2_feature_version;
276 } else
277 return -EINVAL;
278 break;
279 case AMDGPU_INFO_FW_SMC:
280 fw_info->ver = adev->pm.fw_version;
281 fw_info->feature = 0;
282 break;
283 case AMDGPU_INFO_FW_TA:
284 switch (query_fw->index) {
285 case TA_FW_TYPE_PSP_XGMI:
286 fw_info->ver = adev->psp.xgmi_context.context.bin_desc.fw_version;
287 fw_info->feature = adev->psp.xgmi_context.context
288 .bin_desc.feature_version;
289 break;
290 case TA_FW_TYPE_PSP_RAS:
291 fw_info->ver = adev->psp.ras_context.context.bin_desc.fw_version;
292 fw_info->feature = adev->psp.ras_context.context
293 .bin_desc.feature_version;
294 break;
295 case TA_FW_TYPE_PSP_HDCP:
296 fw_info->ver = adev->psp.hdcp_context.context.bin_desc.fw_version;
297 fw_info->feature = adev->psp.hdcp_context.context
298 .bin_desc.feature_version;
299 break;
300 case TA_FW_TYPE_PSP_DTM:
301 fw_info->ver = adev->psp.dtm_context.context.bin_desc.fw_version;
302 fw_info->feature = adev->psp.dtm_context.context
303 .bin_desc.feature_version;
304 break;
305 case TA_FW_TYPE_PSP_RAP:
306 fw_info->ver = adev->psp.rap_context.context.bin_desc.fw_version;
307 fw_info->feature = adev->psp.rap_context.context
308 .bin_desc.feature_version;
309 break;
310 case TA_FW_TYPE_PSP_SECUREDISPLAY:
311 fw_info->ver = adev->psp.securedisplay_context.context.bin_desc.fw_version;
312 fw_info->feature =
313 adev->psp.securedisplay_context.context.bin_desc
314 .feature_version;
315 break;
316 default:
317 return -EINVAL;
318 }
319 break;
320 case AMDGPU_INFO_FW_SDMA:
321 if (query_fw->index >= adev->sdma.num_instances)
322 return -EINVAL;
323 fw_info->ver = adev->sdma.instance[query_fw->index].fw_version;
324 fw_info->feature = adev->sdma.instance[query_fw->index].feature_version;
325 break;
326 case AMDGPU_INFO_FW_SOS:
327 fw_info->ver = adev->psp.sos.fw_version;
328 fw_info->feature = adev->psp.sos.feature_version;
329 break;
330 case AMDGPU_INFO_FW_ASD:
331 fw_info->ver = adev->psp.asd_context.bin_desc.fw_version;
332 fw_info->feature = adev->psp.asd_context.bin_desc.feature_version;
333 break;
334 case AMDGPU_INFO_FW_DMCU:
335 fw_info->ver = adev->dm.dmcu_fw_version;
336 fw_info->feature = 0;
337 break;
338 case AMDGPU_INFO_FW_DMCUB:
339 fw_info->ver = adev->dm.dmcub_fw_version;
340 fw_info->feature = 0;
341 break;
342 case AMDGPU_INFO_FW_TOC:
343 fw_info->ver = adev->psp.toc.fw_version;
344 fw_info->feature = adev->psp.toc.feature_version;
345 break;
346 case AMDGPU_INFO_FW_CAP:
347 fw_info->ver = adev->psp.cap_fw_version;
348 fw_info->feature = adev->psp.cap_feature_version;
349 break;
350 case AMDGPU_INFO_FW_MES_KIQ:
351 fw_info->ver = adev->mes.kiq_version & AMDGPU_MES_VERSION_MASK;
352 fw_info->feature = (adev->mes.kiq_version & AMDGPU_MES_FEAT_VERSION_MASK)
353 >> AMDGPU_MES_FEAT_VERSION_SHIFT;
354 break;
355 case AMDGPU_INFO_FW_MES:
356 fw_info->ver = adev->mes.sched_version & AMDGPU_MES_VERSION_MASK;
357 fw_info->feature = (adev->mes.sched_version & AMDGPU_MES_FEAT_VERSION_MASK)
358 >> AMDGPU_MES_FEAT_VERSION_SHIFT;
359 break;
360 case AMDGPU_INFO_FW_IMU:
361 fw_info->ver = adev->gfx.imu_fw_version;
362 fw_info->feature = 0;
363 break;
364 case AMDGPU_INFO_FW_VPE:
365 fw_info->ver = adev->vpe.fw_version;
366 fw_info->feature = adev->vpe.feature_version;
367 break;
368 default:
369 return -EINVAL;
370 }
371 return 0;
372 }
373
amdgpu_userq_metadata_info_gfx(struct amdgpu_device * adev,struct drm_amdgpu_info * info,struct drm_amdgpu_info_uq_metadata_gfx * meta)374 static int amdgpu_userq_metadata_info_gfx(struct amdgpu_device *adev,
375 struct drm_amdgpu_info *info,
376 struct drm_amdgpu_info_uq_metadata_gfx *meta)
377 {
378 int ret = -EOPNOTSUPP;
379
380 if (adev->gfx.funcs->get_gfx_shadow_info) {
381 struct amdgpu_gfx_shadow_info shadow = {};
382
383 adev->gfx.funcs->get_gfx_shadow_info(adev, &shadow, true);
384 meta->shadow_size = shadow.shadow_size;
385 meta->shadow_alignment = shadow.shadow_alignment;
386 meta->csa_size = shadow.csa_size;
387 meta->csa_alignment = shadow.csa_alignment;
388 ret = 0;
389 }
390
391 return ret;
392 }
393
amdgpu_hw_ip_info(struct amdgpu_device * adev,struct drm_amdgpu_info * info,struct drm_amdgpu_info_hw_ip * result)394 static int amdgpu_hw_ip_info(struct amdgpu_device *adev,
395 struct drm_amdgpu_info *info,
396 struct drm_amdgpu_info_hw_ip *result)
397 {
398 uint32_t ib_start_alignment = 0;
399 uint32_t ib_size_alignment = 0;
400 enum amd_ip_block_type type;
401 unsigned int num_rings = 0;
402 unsigned int i, j;
403
404 if (info->query_hw_ip.ip_instance >= AMDGPU_HW_IP_INSTANCE_MAX_COUNT)
405 return -EINVAL;
406
407 switch (info->query_hw_ip.type) {
408 case AMDGPU_HW_IP_GFX:
409 type = AMD_IP_BLOCK_TYPE_GFX;
410 for (i = 0; i < adev->gfx.num_gfx_rings; i++)
411 if (adev->gfx.gfx_ring[i].sched.ready &&
412 !adev->gfx.gfx_ring[i].no_user_submission)
413 ++num_rings;
414 ib_start_alignment = 32;
415 ib_size_alignment = 32;
416 break;
417 case AMDGPU_HW_IP_COMPUTE:
418 type = AMD_IP_BLOCK_TYPE_GFX;
419 for (i = 0; i < adev->gfx.num_compute_rings; i++)
420 if (adev->gfx.compute_ring[i].sched.ready &&
421 !adev->gfx.compute_ring[i].no_user_submission)
422 ++num_rings;
423 ib_start_alignment = 32;
424 ib_size_alignment = 32;
425 break;
426 case AMDGPU_HW_IP_DMA:
427 type = AMD_IP_BLOCK_TYPE_SDMA;
428 for (i = 0; i < adev->sdma.num_instances; i++)
429 if (adev->sdma.instance[i].ring.sched.ready &&
430 !adev->sdma.instance[i].ring.no_user_submission)
431 ++num_rings;
432 ib_start_alignment = 256;
433 ib_size_alignment = 4;
434 break;
435 case AMDGPU_HW_IP_UVD:
436 type = AMD_IP_BLOCK_TYPE_UVD;
437 for (i = 0; i < adev->uvd.num_uvd_inst; i++) {
438 if (adev->uvd.harvest_config & (1 << i))
439 continue;
440
441 if (adev->uvd.inst[i].ring.sched.ready &&
442 !adev->uvd.inst[i].ring.no_user_submission)
443 ++num_rings;
444 }
445 ib_start_alignment = 256;
446 ib_size_alignment = 64;
447 break;
448 case AMDGPU_HW_IP_VCE:
449 type = AMD_IP_BLOCK_TYPE_VCE;
450 for (i = 0; i < adev->vce.num_rings; i++)
451 if (adev->vce.ring[i].sched.ready &&
452 !adev->vce.ring[i].no_user_submission)
453 ++num_rings;
454 ib_start_alignment = 256;
455 ib_size_alignment = 4;
456 break;
457 case AMDGPU_HW_IP_UVD_ENC:
458 type = AMD_IP_BLOCK_TYPE_UVD;
459 for (i = 0; i < adev->uvd.num_uvd_inst; i++) {
460 if (adev->uvd.harvest_config & (1 << i))
461 continue;
462
463 for (j = 0; j < adev->uvd.num_enc_rings; j++)
464 if (adev->uvd.inst[i].ring_enc[j].sched.ready &&
465 !adev->uvd.inst[i].ring_enc[j].no_user_submission)
466 ++num_rings;
467 }
468 ib_start_alignment = 256;
469 ib_size_alignment = 4;
470 break;
471 case AMDGPU_HW_IP_VCN_DEC:
472 type = AMD_IP_BLOCK_TYPE_VCN;
473 for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
474 if (adev->vcn.harvest_config & (1 << i))
475 continue;
476
477 if (adev->vcn.inst[i].ring_dec.sched.ready &&
478 !adev->vcn.inst[i].ring_dec.no_user_submission)
479 ++num_rings;
480 }
481 ib_start_alignment = 256;
482 ib_size_alignment = 64;
483 break;
484 case AMDGPU_HW_IP_VCN_ENC:
485 type = AMD_IP_BLOCK_TYPE_VCN;
486 for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
487 if (adev->vcn.harvest_config & (1 << i))
488 continue;
489
490 for (j = 0; j < adev->vcn.inst[i].num_enc_rings; j++)
491 if (adev->vcn.inst[i].ring_enc[j].sched.ready &&
492 !adev->vcn.inst[i].ring_enc[j].no_user_submission)
493 ++num_rings;
494 }
495 ib_start_alignment = 256;
496 ib_size_alignment = 4;
497 break;
498 case AMDGPU_HW_IP_VCN_JPEG:
499 type = (amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_JPEG)) ?
500 AMD_IP_BLOCK_TYPE_JPEG : AMD_IP_BLOCK_TYPE_VCN;
501
502 for (i = 0; i < adev->jpeg.num_jpeg_inst; i++) {
503 if (adev->jpeg.harvest_config & (1 << i))
504 continue;
505
506 for (j = 0; j < adev->jpeg.num_jpeg_rings; j++)
507 if (adev->jpeg.inst[i].ring_dec[j].sched.ready &&
508 !adev->jpeg.inst[i].ring_dec[j].no_user_submission)
509 ++num_rings;
510 }
511 ib_start_alignment = 256;
512 ib_size_alignment = 64;
513 break;
514 case AMDGPU_HW_IP_VPE:
515 type = AMD_IP_BLOCK_TYPE_VPE;
516 if (adev->vpe.ring.sched.ready &&
517 !adev->vpe.ring.no_user_submission)
518 ++num_rings;
519 ib_start_alignment = 256;
520 ib_size_alignment = 4;
521 break;
522 default:
523 return -EINVAL;
524 }
525
526 for (i = 0; i < adev->num_ip_blocks; i++)
527 if (adev->ip_blocks[i].version->type == type &&
528 adev->ip_blocks[i].status.valid)
529 break;
530
531 if (i == adev->num_ip_blocks)
532 return 0;
533
534 num_rings = min(amdgpu_ctx_num_entities[info->query_hw_ip.type],
535 num_rings);
536
537 result->hw_ip_version_major = adev->ip_blocks[i].version->major;
538 result->hw_ip_version_minor = adev->ip_blocks[i].version->minor;
539
540 if (adev->asic_type >= CHIP_VEGA10) {
541 switch (type) {
542 case AMD_IP_BLOCK_TYPE_GFX:
543 result->ip_discovery_version =
544 IP_VERSION_MAJ_MIN_REV(amdgpu_ip_version(adev, GC_HWIP, 0));
545 break;
546 case AMD_IP_BLOCK_TYPE_SDMA:
547 result->ip_discovery_version =
548 IP_VERSION_MAJ_MIN_REV(amdgpu_ip_version(adev, SDMA0_HWIP, 0));
549 break;
550 case AMD_IP_BLOCK_TYPE_UVD:
551 case AMD_IP_BLOCK_TYPE_VCN:
552 case AMD_IP_BLOCK_TYPE_JPEG:
553 result->ip_discovery_version =
554 IP_VERSION_MAJ_MIN_REV(amdgpu_ip_version(adev, UVD_HWIP, 0));
555 break;
556 case AMD_IP_BLOCK_TYPE_VCE:
557 result->ip_discovery_version =
558 IP_VERSION_MAJ_MIN_REV(amdgpu_ip_version(adev, VCE_HWIP, 0));
559 break;
560 case AMD_IP_BLOCK_TYPE_VPE:
561 result->ip_discovery_version =
562 IP_VERSION_MAJ_MIN_REV(amdgpu_ip_version(adev, VPE_HWIP, 0));
563 break;
564 default:
565 result->ip_discovery_version = 0;
566 break;
567 }
568 } else {
569 result->ip_discovery_version = 0;
570 }
571 result->capabilities_flags = 0;
572 result->available_rings = (1 << num_rings) - 1;
573 result->ib_start_alignment = ib_start_alignment;
574 result->ib_size_alignment = ib_size_alignment;
575 return 0;
576 }
577
578 /*
579 * Userspace get information ioctl
580 */
581 /**
582 * amdgpu_info_ioctl - answer a device specific request.
583 *
584 * @dev: drm device pointer
585 * @data: request object
586 * @filp: drm filp
587 *
588 * This function is used to pass device specific parameters to the userspace
589 * drivers. Examples include: pci device id, pipeline parms, tiling params,
590 * etc. (all asics).
591 * Returns 0 on success, -EINVAL on failure.
592 */
amdgpu_info_ioctl(struct drm_device * dev,void * data,struct drm_file * filp)593 int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
594 {
595 struct amdgpu_device *adev = drm_to_adev(dev);
596 struct drm_amdgpu_info *info = data;
597 struct amdgpu_mode_info *minfo = &adev->mode_info;
598 void __user *out = (void __user *)(uintptr_t)info->return_pointer;
599 struct amdgpu_fpriv *fpriv;
600 struct amdgpu_ip_block *ip_block;
601 enum amd_ip_block_type type;
602 struct amdgpu_xcp *xcp;
603 u32 count, inst_mask;
604 uint32_t size = info->return_size;
605 struct drm_crtc *crtc;
606 uint32_t ui32 = 0;
607 uint64_t ui64 = 0;
608 int i, found, ret;
609 int ui32_size = sizeof(ui32);
610
611 if (!info->return_size || !info->return_pointer)
612 return -EINVAL;
613
614 switch (info->query) {
615 case AMDGPU_INFO_ACCEL_WORKING:
616 ui32 = adev->accel_working;
617 return copy_to_user(out, &ui32, min(size, 4u)) ? -EFAULT : 0;
618 case AMDGPU_INFO_CRTC_FROM_ID:
619 for (i = 0, found = 0; i < adev->mode_info.num_crtc; i++) {
620 crtc = (struct drm_crtc *)minfo->crtcs[i];
621 if (crtc && crtc->base.id == info->mode_crtc.id) {
622 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
623
624 ui32 = amdgpu_crtc->crtc_id;
625 found = 1;
626 break;
627 }
628 }
629 if (!found) {
630 DRM_DEBUG_KMS("unknown crtc id %d\n", info->mode_crtc.id);
631 return -EINVAL;
632 }
633 return copy_to_user(out, &ui32, min(size, 4u)) ? -EFAULT : 0;
634 case AMDGPU_INFO_HW_IP_INFO: {
635 struct drm_amdgpu_info_hw_ip ip = {};
636
637 ret = amdgpu_hw_ip_info(adev, info, &ip);
638 if (ret)
639 return ret;
640
641 ret = copy_to_user(out, &ip, min_t(size_t, size, sizeof(ip)));
642 return ret ? -EFAULT : 0;
643 }
644 case AMDGPU_INFO_HW_IP_COUNT: {
645 fpriv = (struct amdgpu_fpriv *)filp->driver_priv;
646 type = amdgpu_ip_get_block_type(adev, info->query_hw_ip.type);
647 ip_block = amdgpu_device_ip_get_ip_block(adev, type);
648
649 if (!ip_block || !ip_block->status.valid)
650 return -EINVAL;
651
652 if (adev->xcp_mgr && adev->xcp_mgr->num_xcps > 0 &&
653 fpriv->xcp_id < adev->xcp_mgr->num_xcps) {
654 xcp = &adev->xcp_mgr->xcp[fpriv->xcp_id];
655 switch (type) {
656 case AMD_IP_BLOCK_TYPE_GFX:
657 ret = amdgpu_xcp_get_inst_details(xcp, AMDGPU_XCP_GFX, &inst_mask);
658 if (ret)
659 return ret;
660 count = hweight32(inst_mask);
661 break;
662 case AMD_IP_BLOCK_TYPE_SDMA:
663 ret = amdgpu_xcp_get_inst_details(xcp, AMDGPU_XCP_SDMA, &inst_mask);
664 if (ret)
665 return ret;
666 count = hweight32(inst_mask);
667 break;
668 case AMD_IP_BLOCK_TYPE_JPEG:
669 ret = amdgpu_xcp_get_inst_details(xcp, AMDGPU_XCP_VCN, &inst_mask);
670 if (ret)
671 return ret;
672 count = hweight32(inst_mask) * adev->jpeg.num_jpeg_rings;
673 break;
674 case AMD_IP_BLOCK_TYPE_VCN:
675 ret = amdgpu_xcp_get_inst_details(xcp, AMDGPU_XCP_VCN, &inst_mask);
676 if (ret)
677 return ret;
678 count = hweight32(inst_mask);
679 break;
680 default:
681 return -EINVAL;
682 }
683
684 return copy_to_user(out, &count, min(size, 4u)) ? -EFAULT : 0;
685 }
686
687 switch (type) {
688 case AMD_IP_BLOCK_TYPE_GFX:
689 case AMD_IP_BLOCK_TYPE_VCE:
690 count = 1;
691 break;
692 case AMD_IP_BLOCK_TYPE_SDMA:
693 count = adev->sdma.num_instances;
694 break;
695 case AMD_IP_BLOCK_TYPE_JPEG:
696 count = adev->jpeg.num_jpeg_inst * adev->jpeg.num_jpeg_rings;
697 break;
698 case AMD_IP_BLOCK_TYPE_VCN:
699 count = adev->vcn.num_vcn_inst;
700 break;
701 case AMD_IP_BLOCK_TYPE_UVD:
702 count = adev->uvd.num_uvd_inst;
703 break;
704 /* For all other IP block types not listed in the switch statement
705 * the ip status is valid here and the instance count is one.
706 */
707 default:
708 count = 1;
709 break;
710 }
711
712 return copy_to_user(out, &count, min(size, 4u)) ? -EFAULT : 0;
713 }
714 case AMDGPU_INFO_TIMESTAMP:
715 ui64 = amdgpu_gfx_get_gpu_clock_counter(adev);
716 return copy_to_user(out, &ui64, min(size, 8u)) ? -EFAULT : 0;
717 case AMDGPU_INFO_FW_VERSION: {
718 struct drm_amdgpu_info_firmware fw_info;
719
720 /* We only support one instance of each IP block right now. */
721 if (info->query_fw.ip_instance != 0)
722 return -EINVAL;
723
724 ret = amdgpu_firmware_info(&fw_info, &info->query_fw, adev);
725 if (ret)
726 return ret;
727
728 return copy_to_user(out, &fw_info,
729 min((size_t)size, sizeof(fw_info))) ? -EFAULT : 0;
730 }
731 case AMDGPU_INFO_NUM_BYTES_MOVED:
732 ui64 = atomic64_read(&adev->num_bytes_moved);
733 return copy_to_user(out, &ui64, min(size, 8u)) ? -EFAULT : 0;
734 case AMDGPU_INFO_NUM_EVICTIONS:
735 ui64 = atomic64_read(&adev->num_evictions);
736 return copy_to_user(out, &ui64, min(size, 8u)) ? -EFAULT : 0;
737 case AMDGPU_INFO_NUM_VRAM_CPU_PAGE_FAULTS:
738 ui64 = atomic64_read(&adev->num_vram_cpu_page_faults);
739 return copy_to_user(out, &ui64, min(size, 8u)) ? -EFAULT : 0;
740 case AMDGPU_INFO_VRAM_USAGE:
741 ui64 = ttm_resource_manager_usage(&adev->mman.vram_mgr.manager);
742 return copy_to_user(out, &ui64, min(size, 8u)) ? -EFAULT : 0;
743 case AMDGPU_INFO_VIS_VRAM_USAGE:
744 ui64 = amdgpu_vram_mgr_vis_usage(&adev->mman.vram_mgr);
745 return copy_to_user(out, &ui64, min(size, 8u)) ? -EFAULT : 0;
746 case AMDGPU_INFO_GTT_USAGE:
747 ui64 = ttm_resource_manager_usage(&adev->mman.gtt_mgr.manager);
748 return copy_to_user(out, &ui64, min(size, 8u)) ? -EFAULT : 0;
749 case AMDGPU_INFO_GDS_CONFIG: {
750 struct drm_amdgpu_info_gds gds_info;
751
752 memset(&gds_info, 0, sizeof(gds_info));
753 gds_info.compute_partition_size = adev->gds.gds_size;
754 gds_info.gds_total_size = adev->gds.gds_size;
755 gds_info.gws_per_compute_partition = adev->gds.gws_size;
756 gds_info.oa_per_compute_partition = adev->gds.oa_size;
757 return copy_to_user(out, &gds_info,
758 min((size_t)size, sizeof(gds_info))) ? -EFAULT : 0;
759 }
760 case AMDGPU_INFO_VRAM_GTT: {
761 struct drm_amdgpu_info_vram_gtt vram_gtt;
762
763 vram_gtt.vram_size = adev->gmc.real_vram_size -
764 atomic64_read(&adev->vram_pin_size) -
765 AMDGPU_VM_RESERVED_VRAM;
766 vram_gtt.vram_cpu_accessible_size =
767 min(adev->gmc.visible_vram_size -
768 atomic64_read(&adev->visible_pin_size),
769 vram_gtt.vram_size);
770 vram_gtt.gtt_size = ttm_manager_type(&adev->mman.bdev, TTM_PL_TT)->size;
771 vram_gtt.gtt_size -= atomic64_read(&adev->gart_pin_size);
772 return copy_to_user(out, &vram_gtt,
773 min((size_t)size, sizeof(vram_gtt))) ? -EFAULT : 0;
774 }
775 case AMDGPU_INFO_MEMORY: {
776 struct drm_amdgpu_memory_info mem;
777 struct ttm_resource_manager *gtt_man =
778 &adev->mman.gtt_mgr.manager;
779 struct ttm_resource_manager *vram_man =
780 &adev->mman.vram_mgr.manager;
781
782 memset(&mem, 0, sizeof(mem));
783 mem.vram.total_heap_size = adev->gmc.real_vram_size;
784 mem.vram.usable_heap_size = adev->gmc.real_vram_size -
785 atomic64_read(&adev->vram_pin_size) -
786 AMDGPU_VM_RESERVED_VRAM;
787 mem.vram.heap_usage =
788 ttm_resource_manager_usage(vram_man);
789 mem.vram.max_allocation = mem.vram.usable_heap_size * 3 / 4;
790
791 mem.cpu_accessible_vram.total_heap_size =
792 adev->gmc.visible_vram_size;
793 mem.cpu_accessible_vram.usable_heap_size =
794 min(adev->gmc.visible_vram_size -
795 atomic64_read(&adev->visible_pin_size),
796 mem.vram.usable_heap_size);
797 mem.cpu_accessible_vram.heap_usage =
798 amdgpu_vram_mgr_vis_usage(&adev->mman.vram_mgr);
799 mem.cpu_accessible_vram.max_allocation =
800 mem.cpu_accessible_vram.usable_heap_size * 3 / 4;
801
802 mem.gtt.total_heap_size = gtt_man->size;
803 mem.gtt.usable_heap_size = mem.gtt.total_heap_size -
804 atomic64_read(&adev->gart_pin_size);
805 mem.gtt.heap_usage = ttm_resource_manager_usage(gtt_man);
806 mem.gtt.max_allocation = mem.gtt.usable_heap_size * 3 / 4;
807
808 return copy_to_user(out, &mem,
809 min((size_t)size, sizeof(mem)))
810 ? -EFAULT : 0;
811 }
812 case AMDGPU_INFO_READ_MMR_REG: {
813 int ret = 0;
814 unsigned int n, alloc_size;
815 uint32_t *regs;
816 unsigned int se_num = (info->read_mmr_reg.instance >>
817 AMDGPU_INFO_MMR_SE_INDEX_SHIFT) &
818 AMDGPU_INFO_MMR_SE_INDEX_MASK;
819 unsigned int sh_num = (info->read_mmr_reg.instance >>
820 AMDGPU_INFO_MMR_SH_INDEX_SHIFT) &
821 AMDGPU_INFO_MMR_SH_INDEX_MASK;
822
823 if (!down_read_trylock(&adev->reset_domain->sem))
824 return -ENOENT;
825
826 /* set full masks if the userspace set all bits
827 * in the bitfields
828 */
829 if (se_num == AMDGPU_INFO_MMR_SE_INDEX_MASK) {
830 se_num = 0xffffffff;
831 } else if (se_num >= AMDGPU_GFX_MAX_SE) {
832 ret = -EINVAL;
833 goto out;
834 }
835
836 if (sh_num == AMDGPU_INFO_MMR_SH_INDEX_MASK) {
837 sh_num = 0xffffffff;
838 } else if (sh_num >= AMDGPU_GFX_MAX_SH_PER_SE) {
839 ret = -EINVAL;
840 goto out;
841 }
842
843 if (info->read_mmr_reg.count > 128) {
844 ret = -EINVAL;
845 goto out;
846 }
847
848 regs = kmalloc_array(info->read_mmr_reg.count, sizeof(*regs), GFP_KERNEL);
849 if (!regs) {
850 ret = -ENOMEM;
851 goto out;
852 }
853
854 alloc_size = info->read_mmr_reg.count * sizeof(*regs);
855
856 amdgpu_gfx_off_ctrl(adev, false);
857 for (i = 0; i < info->read_mmr_reg.count; i++) {
858 if (amdgpu_asic_read_register(adev, se_num, sh_num,
859 info->read_mmr_reg.dword_offset + i,
860 ®s[i])) {
861 DRM_DEBUG_KMS("unallowed offset %#x\n",
862 info->read_mmr_reg.dword_offset + i);
863 kfree(regs);
864 amdgpu_gfx_off_ctrl(adev, true);
865 ret = -EFAULT;
866 goto out;
867 }
868 }
869 amdgpu_gfx_off_ctrl(adev, true);
870 n = copy_to_user(out, regs, min(size, alloc_size));
871 kfree(regs);
872 ret = (n ? -EFAULT : 0);
873 out:
874 up_read(&adev->reset_domain->sem);
875 return ret;
876 }
877 case AMDGPU_INFO_DEV_INFO: {
878 struct drm_amdgpu_info_device *dev_info;
879 uint64_t vm_size;
880 uint32_t pcie_gen_mask, pcie_width_mask;
881
882 dev_info = kzalloc(sizeof(*dev_info), GFP_KERNEL);
883 if (!dev_info)
884 return -ENOMEM;
885
886 dev_info->device_id = adev->pdev->device;
887 dev_info->chip_rev = adev->rev_id;
888 dev_info->external_rev = adev->external_rev_id;
889 dev_info->pci_rev = adev->pdev->revision;
890 dev_info->family = adev->family;
891 dev_info->num_shader_engines = adev->gfx.config.max_shader_engines;
892 dev_info->num_shader_arrays_per_engine = adev->gfx.config.max_sh_per_se;
893 /* return all clocks in KHz */
894 dev_info->gpu_counter_freq = amdgpu_asic_get_xclk(adev) * 10;
895 if (adev->pm.dpm_enabled) {
896 dev_info->max_engine_clock = amdgpu_dpm_get_sclk(adev, false) * 10;
897 dev_info->max_memory_clock = amdgpu_dpm_get_mclk(adev, false) * 10;
898 dev_info->min_engine_clock = amdgpu_dpm_get_sclk(adev, true) * 10;
899 dev_info->min_memory_clock = amdgpu_dpm_get_mclk(adev, true) * 10;
900 } else {
901 dev_info->max_engine_clock =
902 dev_info->min_engine_clock =
903 adev->clock.default_sclk * 10;
904 dev_info->max_memory_clock =
905 dev_info->min_memory_clock =
906 adev->clock.default_mclk * 10;
907 }
908 dev_info->enabled_rb_pipes_mask = adev->gfx.config.backend_enable_mask;
909 dev_info->num_rb_pipes = adev->gfx.config.max_backends_per_se *
910 adev->gfx.config.max_shader_engines;
911 dev_info->num_hw_gfx_contexts = adev->gfx.config.max_hw_contexts;
912 dev_info->ids_flags = 0;
913 if (adev->flags & AMD_IS_APU)
914 dev_info->ids_flags |= AMDGPU_IDS_FLAGS_FUSION;
915 if (adev->gfx.mcbp)
916 dev_info->ids_flags |= AMDGPU_IDS_FLAGS_PREEMPTION;
917 if (amdgpu_is_tmz(adev))
918 dev_info->ids_flags |= AMDGPU_IDS_FLAGS_TMZ;
919 if (adev->gfx.config.ta_cntl2_truncate_coord_mode)
920 dev_info->ids_flags |= AMDGPU_IDS_FLAGS_CONFORMANT_TRUNC_COORD;
921
922 if (amdgpu_passthrough(adev))
923 dev_info->ids_flags |= (AMDGPU_IDS_FLAGS_MODE_PT <<
924 AMDGPU_IDS_FLAGS_MODE_SHIFT) &
925 AMDGPU_IDS_FLAGS_MODE_MASK;
926 else if (amdgpu_sriov_vf(adev))
927 dev_info->ids_flags |= (AMDGPU_IDS_FLAGS_MODE_VF <<
928 AMDGPU_IDS_FLAGS_MODE_SHIFT) &
929 AMDGPU_IDS_FLAGS_MODE_MASK;
930
931 vm_size = adev->vm_manager.max_pfn * AMDGPU_GPU_PAGE_SIZE;
932 vm_size -= AMDGPU_VA_RESERVED_TOP;
933
934 /* Older VCE FW versions are buggy and can handle only 40bits */
935 if (adev->vce.fw_version &&
936 adev->vce.fw_version < AMDGPU_VCE_FW_53_45)
937 vm_size = min(vm_size, 1ULL << 40);
938
939 dev_info->virtual_address_offset = AMDGPU_VA_RESERVED_BOTTOM;
940 dev_info->virtual_address_max =
941 min(vm_size, AMDGPU_GMC_HOLE_START);
942
943 if (vm_size > AMDGPU_GMC_HOLE_START) {
944 dev_info->high_va_offset = AMDGPU_GMC_HOLE_END;
945 dev_info->high_va_max = AMDGPU_GMC_HOLE_END | vm_size;
946 }
947 dev_info->virtual_address_alignment = max_t(u32, PAGE_SIZE, AMDGPU_GPU_PAGE_SIZE);
948 dev_info->pte_fragment_size = (1 << adev->vm_manager.fragment_size) * AMDGPU_GPU_PAGE_SIZE;
949 dev_info->gart_page_size = max_t(u32, PAGE_SIZE, AMDGPU_GPU_PAGE_SIZE);
950 dev_info->cu_active_number = adev->gfx.cu_info.number;
951 dev_info->cu_ao_mask = adev->gfx.cu_info.ao_cu_mask;
952 dev_info->ce_ram_size = adev->gfx.ce_ram_size;
953 memcpy(&dev_info->cu_ao_bitmap[0], &adev->gfx.cu_info.ao_cu_bitmap[0],
954 sizeof(adev->gfx.cu_info.ao_cu_bitmap));
955 memcpy(&dev_info->cu_bitmap[0], &adev->gfx.cu_info.bitmap[0],
956 sizeof(dev_info->cu_bitmap));
957 dev_info->vram_type = adev->gmc.vram_type;
958 dev_info->vram_bit_width = adev->gmc.vram_width;
959 dev_info->vce_harvest_config = adev->vce.harvest_config;
960 dev_info->gc_double_offchip_lds_buf =
961 adev->gfx.config.double_offchip_lds_buf;
962 dev_info->wave_front_size = adev->gfx.cu_info.wave_front_size;
963 dev_info->num_shader_visible_vgprs = adev->gfx.config.max_gprs;
964 dev_info->num_cu_per_sh = adev->gfx.config.max_cu_per_sh;
965 dev_info->num_tcc_blocks = adev->gfx.config.max_texture_channel_caches;
966 dev_info->gs_vgt_table_depth = adev->gfx.config.gs_vgt_table_depth;
967 dev_info->gs_prim_buffer_depth = adev->gfx.config.gs_prim_buffer_depth;
968 dev_info->max_gs_waves_per_vgt = adev->gfx.config.max_gs_threads;
969
970 if (adev->family >= AMDGPU_FAMILY_NV)
971 dev_info->pa_sc_tile_steering_override =
972 adev->gfx.config.pa_sc_tile_steering_override;
973
974 dev_info->tcc_disabled_mask = adev->gfx.config.tcc_disabled_mask;
975
976 /* Combine the chip gen mask with the platform (CPU/mobo) mask. */
977 pcie_gen_mask = adev->pm.pcie_gen_mask &
978 (adev->pm.pcie_gen_mask >> CAIL_PCIE_LINK_SPEED_SUPPORT_SHIFT);
979 pcie_width_mask = adev->pm.pcie_mlw_mask &
980 (adev->pm.pcie_mlw_mask >> CAIL_PCIE_LINK_WIDTH_SUPPORT_SHIFT);
981 dev_info->pcie_gen = fls(pcie_gen_mask);
982 dev_info->pcie_num_lanes =
983 pcie_width_mask & CAIL_ASIC_PCIE_LINK_WIDTH_SUPPORT_X32 ? 32 :
984 pcie_width_mask & CAIL_ASIC_PCIE_LINK_WIDTH_SUPPORT_X16 ? 16 :
985 pcie_width_mask & CAIL_ASIC_PCIE_LINK_WIDTH_SUPPORT_X12 ? 12 :
986 pcie_width_mask & CAIL_ASIC_PCIE_LINK_WIDTH_SUPPORT_X8 ? 8 :
987 pcie_width_mask & CAIL_ASIC_PCIE_LINK_WIDTH_SUPPORT_X4 ? 4 :
988 pcie_width_mask & CAIL_ASIC_PCIE_LINK_WIDTH_SUPPORT_X2 ? 2 : 1;
989
990 dev_info->tcp_cache_size = adev->gfx.config.gc_tcp_l1_size;
991 dev_info->num_sqc_per_wgp = adev->gfx.config.gc_num_sqc_per_wgp;
992 dev_info->sqc_data_cache_size = adev->gfx.config.gc_l1_data_cache_size_per_sqc;
993 dev_info->sqc_inst_cache_size = adev->gfx.config.gc_l1_instruction_cache_size_per_sqc;
994 dev_info->gl1c_cache_size = adev->gfx.config.gc_gl1c_size_per_instance *
995 adev->gfx.config.gc_gl1c_per_sa;
996 dev_info->gl2c_cache_size = adev->gfx.config.gc_gl2c_per_gpu;
997 dev_info->mall_size = adev->gmc.mall_size;
998
999
1000 if (adev->gfx.funcs->get_gfx_shadow_info) {
1001 struct amdgpu_gfx_shadow_info shadow_info;
1002
1003 ret = amdgpu_gfx_get_gfx_shadow_info(adev, &shadow_info);
1004 if (!ret) {
1005 dev_info->shadow_size = shadow_info.shadow_size;
1006 dev_info->shadow_alignment = shadow_info.shadow_alignment;
1007 dev_info->csa_size = shadow_info.csa_size;
1008 dev_info->csa_alignment = shadow_info.csa_alignment;
1009 }
1010 }
1011
1012 dev_info->userq_ip_mask = amdgpu_userq_get_supported_ip_mask(adev);
1013
1014 ret = copy_to_user(out, dev_info,
1015 min((size_t)size, sizeof(*dev_info))) ? -EFAULT : 0;
1016 kfree(dev_info);
1017 return ret;
1018 }
1019 case AMDGPU_INFO_VCE_CLOCK_TABLE: {
1020 unsigned int i;
1021 struct drm_amdgpu_info_vce_clock_table vce_clk_table = {};
1022 struct amd_vce_state *vce_state;
1023
1024 for (i = 0; i < AMDGPU_VCE_CLOCK_TABLE_ENTRIES; i++) {
1025 vce_state = amdgpu_dpm_get_vce_clock_state(adev, i);
1026 if (vce_state) {
1027 vce_clk_table.entries[i].sclk = vce_state->sclk;
1028 vce_clk_table.entries[i].mclk = vce_state->mclk;
1029 vce_clk_table.entries[i].eclk = vce_state->evclk;
1030 vce_clk_table.num_valid_entries++;
1031 }
1032 }
1033
1034 return copy_to_user(out, &vce_clk_table,
1035 min((size_t)size, sizeof(vce_clk_table))) ? -EFAULT : 0;
1036 }
1037 case AMDGPU_INFO_VBIOS: {
1038 uint32_t bios_size = adev->bios_size;
1039
1040 switch (info->vbios_info.type) {
1041 case AMDGPU_INFO_VBIOS_SIZE:
1042 return copy_to_user(out, &bios_size,
1043 min((size_t)size, sizeof(bios_size)))
1044 ? -EFAULT : 0;
1045 case AMDGPU_INFO_VBIOS_IMAGE: {
1046 uint8_t *bios;
1047 uint32_t bios_offset = info->vbios_info.offset;
1048
1049 if (bios_offset >= bios_size)
1050 return -EINVAL;
1051
1052 bios = adev->bios + bios_offset;
1053 return copy_to_user(out, bios,
1054 min((size_t)size, (size_t)(bios_size - bios_offset)))
1055 ? -EFAULT : 0;
1056 }
1057 case AMDGPU_INFO_VBIOS_INFO: {
1058 struct drm_amdgpu_info_vbios vbios_info = {};
1059 struct atom_context *atom_context;
1060
1061 atom_context = adev->mode_info.atom_context;
1062 if (atom_context) {
1063 memcpy(vbios_info.name, atom_context->name,
1064 sizeof(atom_context->name));
1065 memcpy(vbios_info.vbios_pn, atom_context->vbios_pn,
1066 sizeof(atom_context->vbios_pn));
1067 vbios_info.version = atom_context->version;
1068 memcpy(vbios_info.vbios_ver_str, atom_context->vbios_ver_str,
1069 sizeof(atom_context->vbios_ver_str));
1070 memcpy(vbios_info.date, atom_context->date,
1071 sizeof(atom_context->date));
1072 }
1073
1074 return copy_to_user(out, &vbios_info,
1075 min((size_t)size, sizeof(vbios_info))) ? -EFAULT : 0;
1076 }
1077 default:
1078 DRM_DEBUG_KMS("Invalid request %d\n",
1079 info->vbios_info.type);
1080 return -EINVAL;
1081 }
1082 }
1083 case AMDGPU_INFO_NUM_HANDLES: {
1084 struct drm_amdgpu_info_num_handles handle;
1085
1086 switch (info->query_hw_ip.type) {
1087 case AMDGPU_HW_IP_UVD:
1088 /* Starting Polaris, we support unlimited UVD handles */
1089 if (adev->asic_type < CHIP_POLARIS10) {
1090 handle.uvd_max_handles = adev->uvd.max_handles;
1091 handle.uvd_used_handles = amdgpu_uvd_used_handles(adev);
1092
1093 return copy_to_user(out, &handle,
1094 min((size_t)size, sizeof(handle))) ? -EFAULT : 0;
1095 } else {
1096 return -ENODATA;
1097 }
1098
1099 break;
1100 default:
1101 return -EINVAL;
1102 }
1103 }
1104 case AMDGPU_INFO_SENSOR: {
1105 if (!adev->pm.dpm_enabled)
1106 return -ENOENT;
1107
1108 switch (info->sensor_info.type) {
1109 case AMDGPU_INFO_SENSOR_GFX_SCLK:
1110 /* get sclk in Mhz */
1111 if (amdgpu_dpm_read_sensor(adev,
1112 AMDGPU_PP_SENSOR_GFX_SCLK,
1113 (void *)&ui32, &ui32_size)) {
1114 return -EINVAL;
1115 }
1116 ui32 /= 100;
1117 break;
1118 case AMDGPU_INFO_SENSOR_GFX_MCLK:
1119 /* get mclk in Mhz */
1120 if (amdgpu_dpm_read_sensor(adev,
1121 AMDGPU_PP_SENSOR_GFX_MCLK,
1122 (void *)&ui32, &ui32_size)) {
1123 return -EINVAL;
1124 }
1125 ui32 /= 100;
1126 break;
1127 case AMDGPU_INFO_SENSOR_GPU_TEMP:
1128 /* get temperature in millidegrees C */
1129 if (amdgpu_dpm_read_sensor(adev,
1130 AMDGPU_PP_SENSOR_GPU_TEMP,
1131 (void *)&ui32, &ui32_size)) {
1132 return -EINVAL;
1133 }
1134 break;
1135 case AMDGPU_INFO_SENSOR_GPU_LOAD:
1136 /* get GPU load */
1137 if (amdgpu_dpm_read_sensor(adev,
1138 AMDGPU_PP_SENSOR_GPU_LOAD,
1139 (void *)&ui32, &ui32_size)) {
1140 return -EINVAL;
1141 }
1142 break;
1143 case AMDGPU_INFO_SENSOR_GPU_AVG_POWER:
1144 /* get average GPU power */
1145 if (amdgpu_dpm_read_sensor(adev,
1146 AMDGPU_PP_SENSOR_GPU_AVG_POWER,
1147 (void *)&ui32, &ui32_size)) {
1148 /* fall back to input power for backwards compat */
1149 if (amdgpu_dpm_read_sensor(adev,
1150 AMDGPU_PP_SENSOR_GPU_INPUT_POWER,
1151 (void *)&ui32, &ui32_size)) {
1152 return -EINVAL;
1153 }
1154 }
1155 ui32 >>= 8;
1156 break;
1157 case AMDGPU_INFO_SENSOR_GPU_INPUT_POWER:
1158 /* get input GPU power */
1159 if (amdgpu_dpm_read_sensor(adev,
1160 AMDGPU_PP_SENSOR_GPU_INPUT_POWER,
1161 (void *)&ui32, &ui32_size)) {
1162 return -EINVAL;
1163 }
1164 ui32 >>= 8;
1165 break;
1166 case AMDGPU_INFO_SENSOR_VDDNB:
1167 /* get VDDNB in millivolts */
1168 if (amdgpu_dpm_read_sensor(adev,
1169 AMDGPU_PP_SENSOR_VDDNB,
1170 (void *)&ui32, &ui32_size)) {
1171 return -EINVAL;
1172 }
1173 break;
1174 case AMDGPU_INFO_SENSOR_VDDGFX:
1175 /* get VDDGFX in millivolts */
1176 if (amdgpu_dpm_read_sensor(adev,
1177 AMDGPU_PP_SENSOR_VDDGFX,
1178 (void *)&ui32, &ui32_size)) {
1179 return -EINVAL;
1180 }
1181 break;
1182 case AMDGPU_INFO_SENSOR_STABLE_PSTATE_GFX_SCLK:
1183 /* get stable pstate sclk in Mhz */
1184 if (amdgpu_dpm_read_sensor(adev,
1185 AMDGPU_PP_SENSOR_STABLE_PSTATE_SCLK,
1186 (void *)&ui32, &ui32_size)) {
1187 return -EINVAL;
1188 }
1189 ui32 /= 100;
1190 break;
1191 case AMDGPU_INFO_SENSOR_STABLE_PSTATE_GFX_MCLK:
1192 /* get stable pstate mclk in Mhz */
1193 if (amdgpu_dpm_read_sensor(adev,
1194 AMDGPU_PP_SENSOR_STABLE_PSTATE_MCLK,
1195 (void *)&ui32, &ui32_size)) {
1196 return -EINVAL;
1197 }
1198 ui32 /= 100;
1199 break;
1200 case AMDGPU_INFO_SENSOR_PEAK_PSTATE_GFX_SCLK:
1201 /* get peak pstate sclk in Mhz */
1202 if (amdgpu_dpm_read_sensor(adev,
1203 AMDGPU_PP_SENSOR_PEAK_PSTATE_SCLK,
1204 (void *)&ui32, &ui32_size)) {
1205 return -EINVAL;
1206 }
1207 ui32 /= 100;
1208 break;
1209 case AMDGPU_INFO_SENSOR_PEAK_PSTATE_GFX_MCLK:
1210 /* get peak pstate mclk in Mhz */
1211 if (amdgpu_dpm_read_sensor(adev,
1212 AMDGPU_PP_SENSOR_PEAK_PSTATE_MCLK,
1213 (void *)&ui32, &ui32_size)) {
1214 return -EINVAL;
1215 }
1216 ui32 /= 100;
1217 break;
1218 default:
1219 DRM_DEBUG_KMS("Invalid request %d\n",
1220 info->sensor_info.type);
1221 return -EINVAL;
1222 }
1223 return copy_to_user(out, &ui32, min(size, 4u)) ? -EFAULT : 0;
1224 }
1225 case AMDGPU_INFO_VRAM_LOST_COUNTER:
1226 ui32 = atomic_read(&adev->vram_lost_counter);
1227 return copy_to_user(out, &ui32, min(size, 4u)) ? -EFAULT : 0;
1228 case AMDGPU_INFO_RAS_ENABLED_FEATURES: {
1229 struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
1230 uint64_t ras_mask;
1231
1232 if (!ras)
1233 return -EINVAL;
1234 ras_mask = (uint64_t)adev->ras_enabled << 32 | ras->features;
1235
1236 return copy_to_user(out, &ras_mask,
1237 min_t(u64, size, sizeof(ras_mask))) ?
1238 -EFAULT : 0;
1239 }
1240 case AMDGPU_INFO_VIDEO_CAPS: {
1241 const struct amdgpu_video_codecs *codecs;
1242 struct drm_amdgpu_info_video_caps *caps;
1243 int r;
1244
1245 if (!adev->asic_funcs->query_video_codecs)
1246 return -EINVAL;
1247
1248 switch (info->video_cap.type) {
1249 case AMDGPU_INFO_VIDEO_CAPS_DECODE:
1250 r = amdgpu_asic_query_video_codecs(adev, false, &codecs);
1251 if (r)
1252 return -EINVAL;
1253 break;
1254 case AMDGPU_INFO_VIDEO_CAPS_ENCODE:
1255 r = amdgpu_asic_query_video_codecs(adev, true, &codecs);
1256 if (r)
1257 return -EINVAL;
1258 break;
1259 default:
1260 DRM_DEBUG_KMS("Invalid request %d\n",
1261 info->video_cap.type);
1262 return -EINVAL;
1263 }
1264
1265 caps = kzalloc(sizeof(*caps), GFP_KERNEL);
1266 if (!caps)
1267 return -ENOMEM;
1268
1269 for (i = 0; i < codecs->codec_count; i++) {
1270 int idx = codecs->codec_array[i].codec_type;
1271
1272 switch (idx) {
1273 case AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG2:
1274 case AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4:
1275 case AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VC1:
1276 case AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC:
1277 case AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC:
1278 case AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_JPEG:
1279 case AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VP9:
1280 case AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_AV1:
1281 caps->codec_info[idx].valid = 1;
1282 caps->codec_info[idx].max_width =
1283 codecs->codec_array[i].max_width;
1284 caps->codec_info[idx].max_height =
1285 codecs->codec_array[i].max_height;
1286 caps->codec_info[idx].max_pixels_per_frame =
1287 codecs->codec_array[i].max_pixels_per_frame;
1288 caps->codec_info[idx].max_level =
1289 codecs->codec_array[i].max_level;
1290 break;
1291 default:
1292 break;
1293 }
1294 }
1295 r = copy_to_user(out, caps,
1296 min((size_t)size, sizeof(*caps))) ? -EFAULT : 0;
1297 kfree(caps);
1298 return r;
1299 }
1300 case AMDGPU_INFO_MAX_IBS: {
1301 uint32_t max_ibs[AMDGPU_HW_IP_NUM];
1302
1303 for (i = 0; i < AMDGPU_HW_IP_NUM; ++i)
1304 max_ibs[i] = amdgpu_ring_max_ibs(i);
1305
1306 return copy_to_user(out, max_ibs,
1307 min((size_t)size, sizeof(max_ibs))) ? -EFAULT : 0;
1308 }
1309 case AMDGPU_INFO_GPUVM_FAULT: {
1310 struct amdgpu_fpriv *fpriv = filp->driver_priv;
1311 struct amdgpu_vm *vm = &fpriv->vm;
1312 struct drm_amdgpu_info_gpuvm_fault gpuvm_fault;
1313 unsigned long flags;
1314
1315 if (!vm)
1316 return -EINVAL;
1317
1318 memset(&gpuvm_fault, 0, sizeof(gpuvm_fault));
1319
1320 xa_lock_irqsave(&adev->vm_manager.pasids, flags);
1321 gpuvm_fault.addr = vm->fault_info.addr;
1322 gpuvm_fault.status = vm->fault_info.status;
1323 gpuvm_fault.vmhub = vm->fault_info.vmhub;
1324 xa_unlock_irqrestore(&adev->vm_manager.pasids, flags);
1325
1326 return copy_to_user(out, &gpuvm_fault,
1327 min((size_t)size, sizeof(gpuvm_fault))) ? -EFAULT : 0;
1328 }
1329 case AMDGPU_INFO_UQ_FW_AREAS: {
1330 struct drm_amdgpu_info_uq_metadata meta_info = {};
1331
1332 switch (info->query_hw_ip.type) {
1333 case AMDGPU_HW_IP_GFX:
1334 ret = amdgpu_userq_metadata_info_gfx(adev, info, &meta_info.gfx);
1335 if (ret)
1336 return ret;
1337
1338 ret = copy_to_user(out, &meta_info,
1339 min((size_t)size, sizeof(meta_info))) ? -EFAULT : 0;
1340 return 0;
1341 default:
1342 return -EINVAL;
1343 }
1344 }
1345 default:
1346 DRM_DEBUG_KMS("Invalid request %d\n", info->query);
1347 return -EINVAL;
1348 }
1349 return 0;
1350 }
1351
1352 /**
1353 * amdgpu_driver_open_kms - drm callback for open
1354 *
1355 * @dev: drm dev pointer
1356 * @file_priv: drm file
1357 *
1358 * On device open, init vm on cayman+ (all asics).
1359 * Returns 0 on success, error on failure.
1360 */
amdgpu_driver_open_kms(struct drm_device * dev,struct drm_file * file_priv)1361 int amdgpu_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv)
1362 {
1363 struct amdgpu_device *adev = drm_to_adev(dev);
1364 struct amdgpu_fpriv *fpriv;
1365 int r, pasid;
1366
1367 /* Ensure IB tests are run on ring */
1368 flush_delayed_work(&adev->delayed_init_work);
1369
1370
1371 if (amdgpu_ras_intr_triggered()) {
1372 DRM_ERROR("RAS Intr triggered, device disabled!!");
1373 return -EHWPOISON;
1374 }
1375
1376 file_priv->driver_priv = NULL;
1377
1378 r = pm_runtime_get_sync(dev->dev);
1379 if (r < 0)
1380 goto pm_put;
1381
1382 fpriv = kzalloc(sizeof(*fpriv), GFP_KERNEL);
1383 if (unlikely(!fpriv)) {
1384 r = -ENOMEM;
1385 goto out_suspend;
1386 }
1387
1388 pasid = amdgpu_pasid_alloc(16);
1389 if (pasid < 0) {
1390 dev_warn(adev->dev, "No more PASIDs available!");
1391 pasid = 0;
1392 }
1393
1394 r = amdgpu_xcp_open_device(adev, fpriv, file_priv);
1395 if (r)
1396 goto error_pasid;
1397
1398 r = amdgpu_vm_init(adev, &fpriv->vm, fpriv->xcp_id);
1399 if (r)
1400 goto error_pasid;
1401
1402 r = amdgpu_vm_set_pasid(adev, &fpriv->vm, pasid);
1403 if (r)
1404 goto error_vm;
1405
1406 fpriv->prt_va = amdgpu_vm_bo_add(adev, &fpriv->vm, NULL);
1407 if (!fpriv->prt_va) {
1408 r = -ENOMEM;
1409 goto error_vm;
1410 }
1411
1412 if (adev->gfx.mcbp) {
1413 uint64_t csa_addr = amdgpu_csa_vaddr(adev) & AMDGPU_GMC_HOLE_MASK;
1414
1415 r = amdgpu_map_static_csa(adev, &fpriv->vm, adev->virt.csa_obj,
1416 &fpriv->csa_va, csa_addr, AMDGPU_CSA_SIZE);
1417 if (r)
1418 goto error_vm;
1419 }
1420
1421 r = amdgpu_seq64_map(adev, &fpriv->vm, &fpriv->seq64_va);
1422 if (r)
1423 goto error_vm;
1424
1425 mutex_init(&fpriv->bo_list_lock);
1426 idr_init_base(&fpriv->bo_list_handles, 1);
1427
1428 r = amdgpu_userq_mgr_init(&fpriv->userq_mgr, file_priv, adev);
1429 if (r)
1430 DRM_WARN("Can't setup usermode queues, use legacy workload submission only\n");
1431
1432 r = amdgpu_eviction_fence_init(&fpriv->evf_mgr);
1433 if (r)
1434 goto error_vm;
1435
1436 amdgpu_ctx_mgr_init(&fpriv->ctx_mgr, adev);
1437
1438 file_priv->driver_priv = fpriv;
1439 goto out_suspend;
1440
1441 error_vm:
1442 amdgpu_vm_fini(adev, &fpriv->vm);
1443
1444 error_pasid:
1445 if (pasid) {
1446 amdgpu_pasid_free(pasid);
1447 amdgpu_vm_set_pasid(adev, &fpriv->vm, 0);
1448 }
1449
1450 kfree(fpriv);
1451
1452 out_suspend:
1453 pm_runtime_mark_last_busy(dev->dev);
1454 pm_put:
1455 pm_runtime_put_autosuspend(dev->dev);
1456
1457 return r;
1458 }
1459
1460 /**
1461 * amdgpu_driver_postclose_kms - drm callback for post close
1462 *
1463 * @dev: drm dev pointer
1464 * @file_priv: drm file
1465 *
1466 * On device post close, tear down vm on cayman+ (all asics).
1467 */
amdgpu_driver_postclose_kms(struct drm_device * dev,struct drm_file * file_priv)1468 void amdgpu_driver_postclose_kms(struct drm_device *dev,
1469 struct drm_file *file_priv)
1470 {
1471 struct amdgpu_device *adev = drm_to_adev(dev);
1472 struct amdgpu_fpriv *fpriv = file_priv->driver_priv;
1473 struct amdgpu_bo_list *list;
1474 struct amdgpu_bo *pd;
1475 u32 pasid;
1476 int handle;
1477
1478 if (!fpriv)
1479 return;
1480
1481 pm_runtime_get_sync(dev->dev);
1482
1483 if (amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_UVD) != NULL)
1484 amdgpu_uvd_free_handles(adev, file_priv);
1485 if (amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_VCE) != NULL)
1486 amdgpu_vce_free_handles(adev, file_priv);
1487
1488 if (fpriv->csa_va) {
1489 uint64_t csa_addr = amdgpu_csa_vaddr(adev) & AMDGPU_GMC_HOLE_MASK;
1490
1491 WARN_ON(amdgpu_unmap_static_csa(adev, &fpriv->vm, adev->virt.csa_obj,
1492 fpriv->csa_va, csa_addr));
1493 fpriv->csa_va = NULL;
1494 }
1495
1496 amdgpu_seq64_unmap(adev, fpriv);
1497
1498 pasid = fpriv->vm.pasid;
1499 pd = amdgpu_bo_ref(fpriv->vm.root.bo);
1500 if (!WARN_ON(amdgpu_bo_reserve(pd, true))) {
1501 amdgpu_vm_bo_del(adev, fpriv->prt_va);
1502 amdgpu_bo_unreserve(pd);
1503 }
1504
1505 amdgpu_ctx_mgr_fini(&fpriv->ctx_mgr);
1506 amdgpu_vm_fini(adev, &fpriv->vm);
1507
1508 if (pasid)
1509 amdgpu_pasid_free_delayed(pd->tbo.base.resv, pasid);
1510 amdgpu_bo_unref(&pd);
1511
1512 idr_for_each_entry(&fpriv->bo_list_handles, list, handle)
1513 amdgpu_bo_list_put(list);
1514
1515 idr_destroy(&fpriv->bo_list_handles);
1516 mutex_destroy(&fpriv->bo_list_lock);
1517
1518 kfree(fpriv);
1519 file_priv->driver_priv = NULL;
1520
1521 pm_runtime_mark_last_busy(dev->dev);
1522 pm_runtime_put_autosuspend(dev->dev);
1523 }
1524
1525
amdgpu_driver_release_kms(struct drm_device * dev)1526 void amdgpu_driver_release_kms(struct drm_device *dev)
1527 {
1528 struct amdgpu_device *adev = drm_to_adev(dev);
1529
1530 amdgpu_device_fini_sw(adev);
1531 pci_set_drvdata(adev->pdev, NULL);
1532 }
1533
1534 /*
1535 * VBlank related functions.
1536 */
1537 /**
1538 * amdgpu_get_vblank_counter_kms - get frame count
1539 *
1540 * @crtc: crtc to get the frame count from
1541 *
1542 * Gets the frame count on the requested crtc (all asics).
1543 * Returns frame count on success, -EINVAL on failure.
1544 */
amdgpu_get_vblank_counter_kms(struct drm_crtc * crtc)1545 u32 amdgpu_get_vblank_counter_kms(struct drm_crtc *crtc)
1546 {
1547 struct drm_device *dev = crtc->dev;
1548 unsigned int pipe = crtc->index;
1549 struct amdgpu_device *adev = drm_to_adev(dev);
1550 int vpos, hpos, stat;
1551 u32 count;
1552
1553 if (pipe >= adev->mode_info.num_crtc) {
1554 DRM_ERROR("Invalid crtc %u\n", pipe);
1555 return -EINVAL;
1556 }
1557
1558 /* The hw increments its frame counter at start of vsync, not at start
1559 * of vblank, as is required by DRM core vblank counter handling.
1560 * Cook the hw count here to make it appear to the caller as if it
1561 * incremented at start of vblank. We measure distance to start of
1562 * vblank in vpos. vpos therefore will be >= 0 between start of vblank
1563 * and start of vsync, so vpos >= 0 means to bump the hw frame counter
1564 * result by 1 to give the proper appearance to caller.
1565 */
1566 if (adev->mode_info.crtcs[pipe]) {
1567 /* Repeat readout if needed to provide stable result if
1568 * we cross start of vsync during the queries.
1569 */
1570 do {
1571 count = amdgpu_display_vblank_get_counter(adev, pipe);
1572 /* Ask amdgpu_display_get_crtc_scanoutpos to return
1573 * vpos as distance to start of vblank, instead of
1574 * regular vertical scanout pos.
1575 */
1576 stat = amdgpu_display_get_crtc_scanoutpos(
1577 dev, pipe, GET_DISTANCE_TO_VBLANKSTART,
1578 &vpos, &hpos, NULL, NULL,
1579 &adev->mode_info.crtcs[pipe]->base.hwmode);
1580 } while (count != amdgpu_display_vblank_get_counter(adev, pipe));
1581
1582 if (((stat & (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_ACCURATE)) !=
1583 (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_ACCURATE))) {
1584 DRM_DEBUG_VBL("Query failed! stat %d\n", stat);
1585 } else {
1586 DRM_DEBUG_VBL("crtc %d: dist from vblank start %d\n",
1587 pipe, vpos);
1588
1589 /* Bump counter if we are at >= leading edge of vblank,
1590 * but before vsync where vpos would turn negative and
1591 * the hw counter really increments.
1592 */
1593 if (vpos >= 0)
1594 count++;
1595 }
1596 } else {
1597 /* Fallback to use value as is. */
1598 count = amdgpu_display_vblank_get_counter(adev, pipe);
1599 DRM_DEBUG_VBL("NULL mode info! Returned count may be wrong.\n");
1600 }
1601
1602 return count;
1603 }
1604
1605 /**
1606 * amdgpu_enable_vblank_kms - enable vblank interrupt
1607 *
1608 * @crtc: crtc to enable vblank interrupt for
1609 *
1610 * Enable the interrupt on the requested crtc (all asics).
1611 * Returns 0 on success, -EINVAL on failure.
1612 */
amdgpu_enable_vblank_kms(struct drm_crtc * crtc)1613 int amdgpu_enable_vblank_kms(struct drm_crtc *crtc)
1614 {
1615 struct drm_device *dev = crtc->dev;
1616 unsigned int pipe = crtc->index;
1617 struct amdgpu_device *adev = drm_to_adev(dev);
1618 int idx = amdgpu_display_crtc_idx_to_irq_type(adev, pipe);
1619
1620 return amdgpu_irq_get(adev, &adev->crtc_irq, idx);
1621 }
1622
1623 /**
1624 * amdgpu_disable_vblank_kms - disable vblank interrupt
1625 *
1626 * @crtc: crtc to disable vblank interrupt for
1627 *
1628 * Disable the interrupt on the requested crtc (all asics).
1629 */
amdgpu_disable_vblank_kms(struct drm_crtc * crtc)1630 void amdgpu_disable_vblank_kms(struct drm_crtc *crtc)
1631 {
1632 struct drm_device *dev = crtc->dev;
1633 unsigned int pipe = crtc->index;
1634 struct amdgpu_device *adev = drm_to_adev(dev);
1635 int idx = amdgpu_display_crtc_idx_to_irq_type(adev, pipe);
1636
1637 amdgpu_irq_put(adev, &adev->crtc_irq, idx);
1638 }
1639
1640 /*
1641 * Debugfs info
1642 */
1643 #if defined(CONFIG_DEBUG_FS)
1644
amdgpu_debugfs_firmware_info_show(struct seq_file * m,void * unused)1645 static int amdgpu_debugfs_firmware_info_show(struct seq_file *m, void *unused)
1646 {
1647 struct amdgpu_device *adev = m->private;
1648 struct drm_amdgpu_info_firmware fw_info;
1649 struct drm_amdgpu_query_fw query_fw;
1650 struct atom_context *ctx = adev->mode_info.atom_context;
1651 uint8_t smu_program, smu_major, smu_minor, smu_debug;
1652 int ret, i;
1653
1654 static const char *ta_fw_name[TA_FW_TYPE_MAX_INDEX] = {
1655 #define TA_FW_NAME(type)[TA_FW_TYPE_PSP_##type] = #type
1656 TA_FW_NAME(XGMI),
1657 TA_FW_NAME(RAS),
1658 TA_FW_NAME(HDCP),
1659 TA_FW_NAME(DTM),
1660 TA_FW_NAME(RAP),
1661 TA_FW_NAME(SECUREDISPLAY),
1662 #undef TA_FW_NAME
1663 };
1664
1665 /* VCE */
1666 query_fw.fw_type = AMDGPU_INFO_FW_VCE;
1667 ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
1668 if (ret)
1669 return ret;
1670 seq_printf(m, "VCE feature version: %u, firmware version: 0x%08x\n",
1671 fw_info.feature, fw_info.ver);
1672
1673 /* UVD */
1674 query_fw.fw_type = AMDGPU_INFO_FW_UVD;
1675 ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
1676 if (ret)
1677 return ret;
1678 seq_printf(m, "UVD feature version: %u, firmware version: 0x%08x\n",
1679 fw_info.feature, fw_info.ver);
1680
1681 /* GMC */
1682 query_fw.fw_type = AMDGPU_INFO_FW_GMC;
1683 ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
1684 if (ret)
1685 return ret;
1686 seq_printf(m, "MC feature version: %u, firmware version: 0x%08x\n",
1687 fw_info.feature, fw_info.ver);
1688
1689 /* ME */
1690 query_fw.fw_type = AMDGPU_INFO_FW_GFX_ME;
1691 ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
1692 if (ret)
1693 return ret;
1694 seq_printf(m, "ME feature version: %u, firmware version: 0x%08x\n",
1695 fw_info.feature, fw_info.ver);
1696
1697 /* PFP */
1698 query_fw.fw_type = AMDGPU_INFO_FW_GFX_PFP;
1699 ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
1700 if (ret)
1701 return ret;
1702 seq_printf(m, "PFP feature version: %u, firmware version: 0x%08x\n",
1703 fw_info.feature, fw_info.ver);
1704
1705 /* CE */
1706 query_fw.fw_type = AMDGPU_INFO_FW_GFX_CE;
1707 ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
1708 if (ret)
1709 return ret;
1710 seq_printf(m, "CE feature version: %u, firmware version: 0x%08x\n",
1711 fw_info.feature, fw_info.ver);
1712
1713 /* RLC */
1714 query_fw.fw_type = AMDGPU_INFO_FW_GFX_RLC;
1715 ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
1716 if (ret)
1717 return ret;
1718 seq_printf(m, "RLC feature version: %u, firmware version: 0x%08x\n",
1719 fw_info.feature, fw_info.ver);
1720
1721 /* RLC SAVE RESTORE LIST CNTL */
1722 query_fw.fw_type = AMDGPU_INFO_FW_GFX_RLC_RESTORE_LIST_CNTL;
1723 ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
1724 if (ret)
1725 return ret;
1726 seq_printf(m, "RLC SRLC feature version: %u, firmware version: 0x%08x\n",
1727 fw_info.feature, fw_info.ver);
1728
1729 /* RLC SAVE RESTORE LIST GPM MEM */
1730 query_fw.fw_type = AMDGPU_INFO_FW_GFX_RLC_RESTORE_LIST_GPM_MEM;
1731 ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
1732 if (ret)
1733 return ret;
1734 seq_printf(m, "RLC SRLG feature version: %u, firmware version: 0x%08x\n",
1735 fw_info.feature, fw_info.ver);
1736
1737 /* RLC SAVE RESTORE LIST SRM MEM */
1738 query_fw.fw_type = AMDGPU_INFO_FW_GFX_RLC_RESTORE_LIST_SRM_MEM;
1739 ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
1740 if (ret)
1741 return ret;
1742 seq_printf(m, "RLC SRLS feature version: %u, firmware version: 0x%08x\n",
1743 fw_info.feature, fw_info.ver);
1744
1745 /* RLCP */
1746 query_fw.fw_type = AMDGPU_INFO_FW_GFX_RLCP;
1747 ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
1748 if (ret)
1749 return ret;
1750 seq_printf(m, "RLCP feature version: %u, firmware version: 0x%08x\n",
1751 fw_info.feature, fw_info.ver);
1752
1753 /* RLCV */
1754 query_fw.fw_type = AMDGPU_INFO_FW_GFX_RLCV;
1755 ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
1756 if (ret)
1757 return ret;
1758 seq_printf(m, "RLCV feature version: %u, firmware version: 0x%08x\n",
1759 fw_info.feature, fw_info.ver);
1760
1761 /* MEC */
1762 query_fw.fw_type = AMDGPU_INFO_FW_GFX_MEC;
1763 query_fw.index = 0;
1764 ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
1765 if (ret)
1766 return ret;
1767 seq_printf(m, "MEC feature version: %u, firmware version: 0x%08x\n",
1768 fw_info.feature, fw_info.ver);
1769
1770 /* MEC2 */
1771 if (adev->gfx.mec2_fw) {
1772 query_fw.index = 1;
1773 ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
1774 if (ret)
1775 return ret;
1776 seq_printf(m, "MEC2 feature version: %u, firmware version: 0x%08x\n",
1777 fw_info.feature, fw_info.ver);
1778 }
1779
1780 /* IMU */
1781 query_fw.fw_type = AMDGPU_INFO_FW_IMU;
1782 query_fw.index = 0;
1783 ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
1784 if (ret)
1785 return ret;
1786 seq_printf(m, "IMU feature version: %u, firmware version: 0x%08x\n",
1787 fw_info.feature, fw_info.ver);
1788
1789 /* PSP SOS */
1790 query_fw.fw_type = AMDGPU_INFO_FW_SOS;
1791 ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
1792 if (ret)
1793 return ret;
1794 seq_printf(m, "SOS feature version: %u, firmware version: 0x%08x\n",
1795 fw_info.feature, fw_info.ver);
1796
1797
1798 /* PSP ASD */
1799 query_fw.fw_type = AMDGPU_INFO_FW_ASD;
1800 ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
1801 if (ret)
1802 return ret;
1803 seq_printf(m, "ASD feature version: %u, firmware version: 0x%08x\n",
1804 fw_info.feature, fw_info.ver);
1805
1806 query_fw.fw_type = AMDGPU_INFO_FW_TA;
1807 for (i = TA_FW_TYPE_PSP_XGMI; i < TA_FW_TYPE_MAX_INDEX; i++) {
1808 query_fw.index = i;
1809 ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
1810 if (ret)
1811 continue;
1812
1813 seq_printf(m, "TA %s feature version: 0x%08x, firmware version: 0x%08x\n",
1814 ta_fw_name[i], fw_info.feature, fw_info.ver);
1815 }
1816
1817 /* SMC */
1818 query_fw.fw_type = AMDGPU_INFO_FW_SMC;
1819 ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
1820 if (ret)
1821 return ret;
1822 smu_program = (fw_info.ver >> 24) & 0xff;
1823 smu_major = (fw_info.ver >> 16) & 0xff;
1824 smu_minor = (fw_info.ver >> 8) & 0xff;
1825 smu_debug = (fw_info.ver >> 0) & 0xff;
1826 seq_printf(m, "SMC feature version: %u, program: %d, firmware version: 0x%08x (%d.%d.%d)\n",
1827 fw_info.feature, smu_program, fw_info.ver, smu_major, smu_minor, smu_debug);
1828
1829 /* SDMA */
1830 query_fw.fw_type = AMDGPU_INFO_FW_SDMA;
1831 for (i = 0; i < adev->sdma.num_instances; i++) {
1832 query_fw.index = i;
1833 ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
1834 if (ret)
1835 return ret;
1836 seq_printf(m, "SDMA%d feature version: %u, firmware version: 0x%08x\n",
1837 i, fw_info.feature, fw_info.ver);
1838 }
1839
1840 /* VCN */
1841 query_fw.fw_type = AMDGPU_INFO_FW_VCN;
1842 ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
1843 if (ret)
1844 return ret;
1845 seq_printf(m, "VCN feature version: %u, firmware version: 0x%08x\n",
1846 fw_info.feature, fw_info.ver);
1847
1848 /* DMCU */
1849 query_fw.fw_type = AMDGPU_INFO_FW_DMCU;
1850 ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
1851 if (ret)
1852 return ret;
1853 seq_printf(m, "DMCU feature version: %u, firmware version: 0x%08x\n",
1854 fw_info.feature, fw_info.ver);
1855
1856 /* DMCUB */
1857 query_fw.fw_type = AMDGPU_INFO_FW_DMCUB;
1858 ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
1859 if (ret)
1860 return ret;
1861 seq_printf(m, "DMCUB feature version: %u, firmware version: 0x%08x\n",
1862 fw_info.feature, fw_info.ver);
1863
1864 /* TOC */
1865 query_fw.fw_type = AMDGPU_INFO_FW_TOC;
1866 ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
1867 if (ret)
1868 return ret;
1869 seq_printf(m, "TOC feature version: %u, firmware version: 0x%08x\n",
1870 fw_info.feature, fw_info.ver);
1871
1872 /* CAP */
1873 if (adev->psp.cap_fw) {
1874 query_fw.fw_type = AMDGPU_INFO_FW_CAP;
1875 ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
1876 if (ret)
1877 return ret;
1878 seq_printf(m, "CAP feature version: %u, firmware version: 0x%08x\n",
1879 fw_info.feature, fw_info.ver);
1880 }
1881
1882 /* MES_KIQ */
1883 query_fw.fw_type = AMDGPU_INFO_FW_MES_KIQ;
1884 ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
1885 if (ret)
1886 return ret;
1887 seq_printf(m, "MES_KIQ feature version: %u, firmware version: 0x%08x\n",
1888 fw_info.feature, fw_info.ver);
1889
1890 /* MES */
1891 query_fw.fw_type = AMDGPU_INFO_FW_MES;
1892 ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
1893 if (ret)
1894 return ret;
1895 seq_printf(m, "MES feature version: %u, firmware version: 0x%08x\n",
1896 fw_info.feature, fw_info.ver);
1897
1898 /* VPE */
1899 query_fw.fw_type = AMDGPU_INFO_FW_VPE;
1900 ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
1901 if (ret)
1902 return ret;
1903 seq_printf(m, "VPE feature version: %u, firmware version: 0x%08x\n",
1904 fw_info.feature, fw_info.ver);
1905
1906 seq_printf(m, "VBIOS version: %s\n", ctx->vbios_pn);
1907
1908 return 0;
1909 }
1910
1911 DEFINE_SHOW_ATTRIBUTE(amdgpu_debugfs_firmware_info);
1912
1913 #endif
1914
amdgpu_debugfs_firmware_init(struct amdgpu_device * adev)1915 void amdgpu_debugfs_firmware_init(struct amdgpu_device *adev)
1916 {
1917 #if defined(CONFIG_DEBUG_FS)
1918 struct drm_minor *minor = adev_to_drm(adev)->primary;
1919 struct dentry *root = minor->debugfs_root;
1920
1921 debugfs_create_file("amdgpu_firmware_info", 0444, root,
1922 adev, &amdgpu_debugfs_firmware_info_fops);
1923
1924 #endif
1925 }
1926