1 /* 2 * Copyright 2008 Advanced Micro Devices, Inc. 3 * Copyright 2008 Red Hat Inc. 4 * Copyright 2009 Jerome Glisse. 5 * 6 * Permission is hereby granted, free of charge, to any person obtaining a 7 * copy of this software and associated documentation files (the "Software"), 8 * to deal in the Software without restriction, including without limitation 9 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 10 * and/or sell copies of the Software, and to permit persons to whom the 11 * Software is furnished to do so, subject to the following conditions: 12 * 13 * The above copyright notice and this permission notice shall be included in 14 * all copies or substantial portions of the Software. 15 * 16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 22 * OTHER DEALINGS IN THE SOFTWARE. 23 * 24 * Authors: Dave Airlie 25 * Alex Deucher 26 * Jerome Glisse 27 */ 28 #include <drm/drmP.h> 29 #include "amdgpu.h" 30 #include <drm/amdgpu_drm.h> 31 #include "amdgpu_sched.h" 32 #include "amdgpu_uvd.h" 33 #include "amdgpu_vce.h" 34 #include "atom.h" 35 36 #include <linux/vga_switcheroo.h> 37 #include <linux/slab.h> 38 #include <linux/pm_runtime.h> 39 #include "amdgpu_amdkfd.h" 40 #include "amdgpu_gem.h" 41 #include "amdgpu_display.h" 42 #include "amdgpu_ras.h" 43 44 static void amdgpu_unregister_gpu_instance(struct amdgpu_device *adev) 45 { 46 struct amdgpu_gpu_instance *gpu_instance; 47 int i; 48 49 mutex_lock(&mgpu_info.mutex); 50 51 for (i = 0; i < mgpu_info.num_gpu; i++) { 52 gpu_instance = &(mgpu_info.gpu_ins[i]); 53 if (gpu_instance->adev == adev) { 54 mgpu_info.gpu_ins[i] = 55 mgpu_info.gpu_ins[mgpu_info.num_gpu - 1]; 56 mgpu_info.num_gpu--; 57 if (adev->flags & AMD_IS_APU) 58 mgpu_info.num_apu--; 59 else 60 mgpu_info.num_dgpu--; 61 break; 62 } 63 } 64 65 mutex_unlock(&mgpu_info.mutex); 66 } 67 68 /** 69 * amdgpu_driver_unload_kms - Main unload function for KMS. 70 * 71 * @dev: drm dev pointer 72 * 73 * This is the main unload function for KMS (all asics). 74 * Returns 0 on success. 75 */ 76 void amdgpu_driver_unload_kms(struct drm_device *dev) 77 { 78 struct amdgpu_device *adev = dev->dev_private; 79 80 if (adev == NULL) 81 return; 82 83 amdgpu_unregister_gpu_instance(adev); 84 85 if (adev->rmmio == NULL) 86 goto done_free; 87 88 if (amdgpu_sriov_vf(adev)) 89 amdgpu_virt_request_full_gpu(adev, false); 90 91 if (amdgpu_device_is_px(dev)) { 92 pm_runtime_get_sync(dev->dev); 93 pm_runtime_forbid(dev->dev); 94 } 95 96 amdgpu_acpi_fini(adev); 97 98 amdgpu_device_fini(adev); 99 100 done_free: 101 kfree(adev); 102 dev->dev_private = NULL; 103 } 104 105 static void amdgpu_register_gpu_instance(struct amdgpu_device *adev) 106 { 107 struct amdgpu_gpu_instance *gpu_instance; 108 109 mutex_lock(&mgpu_info.mutex); 110 111 if (mgpu_info.num_gpu >= MAX_GPU_INSTANCE) { 112 DRM_ERROR("Cannot register more gpu instance\n"); 113 mutex_unlock(&mgpu_info.mutex); 114 return; 115 } 116 117 gpu_instance = &(mgpu_info.gpu_ins[mgpu_info.num_gpu]); 118 gpu_instance->adev = adev; 119 gpu_instance->mgpu_fan_enabled = 0; 120 121 mgpu_info.num_gpu++; 122 if (adev->flags & AMD_IS_APU) 123 mgpu_info.num_apu++; 124 else 125 mgpu_info.num_dgpu++; 126 127 mutex_unlock(&mgpu_info.mutex); 128 } 129 130 /** 131 * amdgpu_driver_load_kms - Main load function for KMS. 132 * 133 * @dev: drm dev pointer 134 * @flags: device flags 135 * 136 * This is the main load function for KMS (all asics). 137 * Returns 0 on success, error on failure. 138 */ 139 int amdgpu_driver_load_kms(struct drm_device *dev, unsigned long flags) 140 { 141 struct amdgpu_device *adev; 142 int r, acpi_status; 143 144 #ifdef CONFIG_DRM_AMDGPU_SI 145 if (!amdgpu_si_support) { 146 switch (flags & AMD_ASIC_MASK) { 147 case CHIP_TAHITI: 148 case CHIP_PITCAIRN: 149 case CHIP_VERDE: 150 case CHIP_OLAND: 151 case CHIP_HAINAN: 152 dev_info(dev->dev, 153 "SI support provided by radeon.\n"); 154 dev_info(dev->dev, 155 "Use radeon.si_support=0 amdgpu.si_support=1 to override.\n" 156 ); 157 return -ENODEV; 158 } 159 } 160 #endif 161 #ifdef CONFIG_DRM_AMDGPU_CIK 162 if (!amdgpu_cik_support) { 163 switch (flags & AMD_ASIC_MASK) { 164 case CHIP_KAVERI: 165 case CHIP_BONAIRE: 166 case CHIP_HAWAII: 167 case CHIP_KABINI: 168 case CHIP_MULLINS: 169 dev_info(dev->dev, 170 "CIK support provided by radeon.\n"); 171 dev_info(dev->dev, 172 "Use radeon.cik_support=0 amdgpu.cik_support=1 to override.\n" 173 ); 174 return -ENODEV; 175 } 176 } 177 #endif 178 179 adev = kzalloc(sizeof(struct amdgpu_device), GFP_KERNEL); 180 if (adev == NULL) { 181 return -ENOMEM; 182 } 183 dev->dev_private = (void *)adev; 184 185 if ((amdgpu_runtime_pm != 0) && 186 amdgpu_has_atpx() && 187 (amdgpu_is_atpx_hybrid() || 188 amdgpu_has_atpx_dgpu_power_cntl()) && 189 ((flags & AMD_IS_APU) == 0) && 190 !pci_is_thunderbolt_attached(dev->pdev)) 191 flags |= AMD_IS_PX; 192 193 /* amdgpu_device_init should report only fatal error 194 * like memory allocation failure or iomapping failure, 195 * or memory manager initialization failure, it must 196 * properly initialize the GPU MC controller and permit 197 * VRAM allocation 198 */ 199 r = amdgpu_device_init(adev, dev, dev->pdev, flags); 200 if (r) { 201 dev_err(&dev->pdev->dev, "Fatal error during GPU init\n"); 202 goto out; 203 } 204 205 /* Call ACPI methods: require modeset init 206 * but failure is not fatal 207 */ 208 if (!r) { 209 acpi_status = amdgpu_acpi_init(adev); 210 if (acpi_status) 211 dev_dbg(&dev->pdev->dev, 212 "Error during ACPI methods call\n"); 213 } 214 215 if (amdgpu_device_is_px(dev)) { 216 dev_pm_set_driver_flags(dev->dev, DPM_FLAG_NEVER_SKIP); 217 pm_runtime_use_autosuspend(dev->dev); 218 pm_runtime_set_autosuspend_delay(dev->dev, 5000); 219 pm_runtime_set_active(dev->dev); 220 pm_runtime_allow(dev->dev); 221 pm_runtime_mark_last_busy(dev->dev); 222 pm_runtime_put_autosuspend(dev->dev); 223 } 224 225 amdgpu_register_gpu_instance(adev); 226 out: 227 if (r) { 228 /* balance pm_runtime_get_sync in amdgpu_driver_unload_kms */ 229 if (adev->rmmio && amdgpu_device_is_px(dev)) 230 pm_runtime_put_noidle(dev->dev); 231 amdgpu_driver_unload_kms(dev); 232 } 233 234 return r; 235 } 236 237 static int amdgpu_firmware_info(struct drm_amdgpu_info_firmware *fw_info, 238 struct drm_amdgpu_query_fw *query_fw, 239 struct amdgpu_device *adev) 240 { 241 switch (query_fw->fw_type) { 242 case AMDGPU_INFO_FW_VCE: 243 fw_info->ver = adev->vce.fw_version; 244 fw_info->feature = adev->vce.fb_version; 245 break; 246 case AMDGPU_INFO_FW_UVD: 247 fw_info->ver = adev->uvd.fw_version; 248 fw_info->feature = 0; 249 break; 250 case AMDGPU_INFO_FW_VCN: 251 fw_info->ver = adev->vcn.fw_version; 252 fw_info->feature = 0; 253 break; 254 case AMDGPU_INFO_FW_GMC: 255 fw_info->ver = adev->gmc.fw_version; 256 fw_info->feature = 0; 257 break; 258 case AMDGPU_INFO_FW_GFX_ME: 259 fw_info->ver = adev->gfx.me_fw_version; 260 fw_info->feature = adev->gfx.me_feature_version; 261 break; 262 case AMDGPU_INFO_FW_GFX_PFP: 263 fw_info->ver = adev->gfx.pfp_fw_version; 264 fw_info->feature = adev->gfx.pfp_feature_version; 265 break; 266 case AMDGPU_INFO_FW_GFX_CE: 267 fw_info->ver = adev->gfx.ce_fw_version; 268 fw_info->feature = adev->gfx.ce_feature_version; 269 break; 270 case AMDGPU_INFO_FW_GFX_RLC: 271 fw_info->ver = adev->gfx.rlc_fw_version; 272 fw_info->feature = adev->gfx.rlc_feature_version; 273 break; 274 case AMDGPU_INFO_FW_GFX_RLC_RESTORE_LIST_CNTL: 275 fw_info->ver = adev->gfx.rlc_srlc_fw_version; 276 fw_info->feature = adev->gfx.rlc_srlc_feature_version; 277 break; 278 case AMDGPU_INFO_FW_GFX_RLC_RESTORE_LIST_GPM_MEM: 279 fw_info->ver = adev->gfx.rlc_srlg_fw_version; 280 fw_info->feature = adev->gfx.rlc_srlg_feature_version; 281 break; 282 case AMDGPU_INFO_FW_GFX_RLC_RESTORE_LIST_SRM_MEM: 283 fw_info->ver = adev->gfx.rlc_srls_fw_version; 284 fw_info->feature = adev->gfx.rlc_srls_feature_version; 285 break; 286 case AMDGPU_INFO_FW_GFX_MEC: 287 if (query_fw->index == 0) { 288 fw_info->ver = adev->gfx.mec_fw_version; 289 fw_info->feature = adev->gfx.mec_feature_version; 290 } else if (query_fw->index == 1) { 291 fw_info->ver = adev->gfx.mec2_fw_version; 292 fw_info->feature = adev->gfx.mec2_feature_version; 293 } else 294 return -EINVAL; 295 break; 296 case AMDGPU_INFO_FW_SMC: 297 fw_info->ver = adev->pm.fw_version; 298 fw_info->feature = 0; 299 break; 300 case AMDGPU_INFO_FW_TA: 301 if (query_fw->index > 1) 302 return -EINVAL; 303 if (query_fw->index == 0) { 304 fw_info->ver = adev->psp.ta_fw_version; 305 fw_info->feature = adev->psp.ta_xgmi_ucode_version; 306 } else { 307 fw_info->ver = adev->psp.ta_fw_version; 308 fw_info->feature = adev->psp.ta_ras_ucode_version; 309 } 310 break; 311 case AMDGPU_INFO_FW_SDMA: 312 if (query_fw->index >= adev->sdma.num_instances) 313 return -EINVAL; 314 fw_info->ver = adev->sdma.instance[query_fw->index].fw_version; 315 fw_info->feature = adev->sdma.instance[query_fw->index].feature_version; 316 break; 317 case AMDGPU_INFO_FW_SOS: 318 fw_info->ver = adev->psp.sos_fw_version; 319 fw_info->feature = adev->psp.sos_feature_version; 320 break; 321 case AMDGPU_INFO_FW_ASD: 322 fw_info->ver = adev->psp.asd_fw_version; 323 fw_info->feature = adev->psp.asd_feature_version; 324 break; 325 case AMDGPU_INFO_FW_DMCU: 326 fw_info->ver = adev->dm.dmcu_fw_version; 327 fw_info->feature = 0; 328 break; 329 default: 330 return -EINVAL; 331 } 332 return 0; 333 } 334 335 static int amdgpu_hw_ip_info(struct amdgpu_device *adev, 336 struct drm_amdgpu_info *info, 337 struct drm_amdgpu_info_hw_ip *result) 338 { 339 uint32_t ib_start_alignment = 0; 340 uint32_t ib_size_alignment = 0; 341 enum amd_ip_block_type type; 342 unsigned int num_rings = 0; 343 unsigned int i, j; 344 345 if (info->query_hw_ip.ip_instance >= AMDGPU_HW_IP_INSTANCE_MAX_COUNT) 346 return -EINVAL; 347 348 switch (info->query_hw_ip.type) { 349 case AMDGPU_HW_IP_GFX: 350 type = AMD_IP_BLOCK_TYPE_GFX; 351 for (i = 0; i < adev->gfx.num_gfx_rings; i++) 352 if (adev->gfx.gfx_ring[i].sched.ready) 353 ++num_rings; 354 ib_start_alignment = 32; 355 ib_size_alignment = 32; 356 break; 357 case AMDGPU_HW_IP_COMPUTE: 358 type = AMD_IP_BLOCK_TYPE_GFX; 359 for (i = 0; i < adev->gfx.num_compute_rings; i++) 360 if (adev->gfx.compute_ring[i].sched.ready) 361 ++num_rings; 362 ib_start_alignment = 32; 363 ib_size_alignment = 32; 364 break; 365 case AMDGPU_HW_IP_DMA: 366 type = AMD_IP_BLOCK_TYPE_SDMA; 367 for (i = 0; i < adev->sdma.num_instances; i++) 368 if (adev->sdma.instance[i].ring.sched.ready) 369 ++num_rings; 370 ib_start_alignment = 256; 371 ib_size_alignment = 4; 372 break; 373 case AMDGPU_HW_IP_UVD: 374 type = AMD_IP_BLOCK_TYPE_UVD; 375 for (i = 0; i < adev->uvd.num_uvd_inst; i++) { 376 if (adev->uvd.harvest_config & (1 << i)) 377 continue; 378 379 if (adev->uvd.inst[i].ring.sched.ready) 380 ++num_rings; 381 } 382 ib_start_alignment = 64; 383 ib_size_alignment = 64; 384 break; 385 case AMDGPU_HW_IP_VCE: 386 type = AMD_IP_BLOCK_TYPE_VCE; 387 for (i = 0; i < adev->vce.num_rings; i++) 388 if (adev->vce.ring[i].sched.ready) 389 ++num_rings; 390 ib_start_alignment = 4; 391 ib_size_alignment = 1; 392 break; 393 case AMDGPU_HW_IP_UVD_ENC: 394 type = AMD_IP_BLOCK_TYPE_UVD; 395 for (i = 0; i < adev->uvd.num_uvd_inst; i++) { 396 if (adev->uvd.harvest_config & (1 << i)) 397 continue; 398 399 for (j = 0; j < adev->uvd.num_enc_rings; j++) 400 if (adev->uvd.inst[i].ring_enc[j].sched.ready) 401 ++num_rings; 402 } 403 ib_start_alignment = 64; 404 ib_size_alignment = 64; 405 break; 406 case AMDGPU_HW_IP_VCN_DEC: 407 type = AMD_IP_BLOCK_TYPE_VCN; 408 if (adev->vcn.ring_dec.sched.ready) 409 ++num_rings; 410 ib_start_alignment = 16; 411 ib_size_alignment = 16; 412 break; 413 case AMDGPU_HW_IP_VCN_ENC: 414 type = AMD_IP_BLOCK_TYPE_VCN; 415 for (i = 0; i < adev->vcn.num_enc_rings; i++) 416 if (adev->vcn.ring_enc[i].sched.ready) 417 ++num_rings; 418 ib_start_alignment = 64; 419 ib_size_alignment = 1; 420 break; 421 case AMDGPU_HW_IP_VCN_JPEG: 422 type = AMD_IP_BLOCK_TYPE_VCN; 423 if (adev->vcn.ring_jpeg.sched.ready) 424 ++num_rings; 425 ib_start_alignment = 16; 426 ib_size_alignment = 16; 427 break; 428 default: 429 return -EINVAL; 430 } 431 432 for (i = 0; i < adev->num_ip_blocks; i++) 433 if (adev->ip_blocks[i].version->type == type && 434 adev->ip_blocks[i].status.valid) 435 break; 436 437 if (i == adev->num_ip_blocks) 438 return 0; 439 440 num_rings = min(amdgpu_ctx_num_entities[info->query_hw_ip.type], 441 num_rings); 442 443 result->hw_ip_version_major = adev->ip_blocks[i].version->major; 444 result->hw_ip_version_minor = adev->ip_blocks[i].version->minor; 445 result->capabilities_flags = 0; 446 result->available_rings = (1 << num_rings) - 1; 447 result->ib_start_alignment = ib_start_alignment; 448 result->ib_size_alignment = ib_size_alignment; 449 return 0; 450 } 451 452 /* 453 * Userspace get information ioctl 454 */ 455 /** 456 * amdgpu_info_ioctl - answer a device specific request. 457 * 458 * @adev: amdgpu device pointer 459 * @data: request object 460 * @filp: drm filp 461 * 462 * This function is used to pass device specific parameters to the userspace 463 * drivers. Examples include: pci device id, pipeline parms, tiling params, 464 * etc. (all asics). 465 * Returns 0 on success, -EINVAL on failure. 466 */ 467 static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) 468 { 469 struct amdgpu_device *adev = dev->dev_private; 470 struct drm_amdgpu_info *info = data; 471 struct amdgpu_mode_info *minfo = &adev->mode_info; 472 void __user *out = (void __user *)(uintptr_t)info->return_pointer; 473 uint32_t size = info->return_size; 474 struct drm_crtc *crtc; 475 uint32_t ui32 = 0; 476 uint64_t ui64 = 0; 477 int i, found; 478 int ui32_size = sizeof(ui32); 479 480 if (!info->return_size || !info->return_pointer) 481 return -EINVAL; 482 483 switch (info->query) { 484 case AMDGPU_INFO_ACCEL_WORKING: 485 ui32 = adev->accel_working; 486 return copy_to_user(out, &ui32, min(size, 4u)) ? -EFAULT : 0; 487 case AMDGPU_INFO_CRTC_FROM_ID: 488 for (i = 0, found = 0; i < adev->mode_info.num_crtc; i++) { 489 crtc = (struct drm_crtc *)minfo->crtcs[i]; 490 if (crtc && crtc->base.id == info->mode_crtc.id) { 491 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); 492 ui32 = amdgpu_crtc->crtc_id; 493 found = 1; 494 break; 495 } 496 } 497 if (!found) { 498 DRM_DEBUG_KMS("unknown crtc id %d\n", info->mode_crtc.id); 499 return -EINVAL; 500 } 501 return copy_to_user(out, &ui32, min(size, 4u)) ? -EFAULT : 0; 502 case AMDGPU_INFO_HW_IP_INFO: { 503 struct drm_amdgpu_info_hw_ip ip = {}; 504 int ret; 505 506 ret = amdgpu_hw_ip_info(adev, info, &ip); 507 if (ret) 508 return ret; 509 510 ret = copy_to_user(out, &ip, min((size_t)size, sizeof(ip))); 511 return ret ? -EFAULT : 0; 512 } 513 case AMDGPU_INFO_HW_IP_COUNT: { 514 enum amd_ip_block_type type; 515 uint32_t count = 0; 516 517 switch (info->query_hw_ip.type) { 518 case AMDGPU_HW_IP_GFX: 519 type = AMD_IP_BLOCK_TYPE_GFX; 520 break; 521 case AMDGPU_HW_IP_COMPUTE: 522 type = AMD_IP_BLOCK_TYPE_GFX; 523 break; 524 case AMDGPU_HW_IP_DMA: 525 type = AMD_IP_BLOCK_TYPE_SDMA; 526 break; 527 case AMDGPU_HW_IP_UVD: 528 type = AMD_IP_BLOCK_TYPE_UVD; 529 break; 530 case AMDGPU_HW_IP_VCE: 531 type = AMD_IP_BLOCK_TYPE_VCE; 532 break; 533 case AMDGPU_HW_IP_UVD_ENC: 534 type = AMD_IP_BLOCK_TYPE_UVD; 535 break; 536 case AMDGPU_HW_IP_VCN_DEC: 537 case AMDGPU_HW_IP_VCN_ENC: 538 case AMDGPU_HW_IP_VCN_JPEG: 539 type = AMD_IP_BLOCK_TYPE_VCN; 540 break; 541 default: 542 return -EINVAL; 543 } 544 545 for (i = 0; i < adev->num_ip_blocks; i++) 546 if (adev->ip_blocks[i].version->type == type && 547 adev->ip_blocks[i].status.valid && 548 count < AMDGPU_HW_IP_INSTANCE_MAX_COUNT) 549 count++; 550 551 return copy_to_user(out, &count, min(size, 4u)) ? -EFAULT : 0; 552 } 553 case AMDGPU_INFO_TIMESTAMP: 554 ui64 = amdgpu_gfx_get_gpu_clock_counter(adev); 555 return copy_to_user(out, &ui64, min(size, 8u)) ? -EFAULT : 0; 556 case AMDGPU_INFO_FW_VERSION: { 557 struct drm_amdgpu_info_firmware fw_info; 558 int ret; 559 560 /* We only support one instance of each IP block right now. */ 561 if (info->query_fw.ip_instance != 0) 562 return -EINVAL; 563 564 ret = amdgpu_firmware_info(&fw_info, &info->query_fw, adev); 565 if (ret) 566 return ret; 567 568 return copy_to_user(out, &fw_info, 569 min((size_t)size, sizeof(fw_info))) ? -EFAULT : 0; 570 } 571 case AMDGPU_INFO_NUM_BYTES_MOVED: 572 ui64 = atomic64_read(&adev->num_bytes_moved); 573 return copy_to_user(out, &ui64, min(size, 8u)) ? -EFAULT : 0; 574 case AMDGPU_INFO_NUM_EVICTIONS: 575 ui64 = atomic64_read(&adev->num_evictions); 576 return copy_to_user(out, &ui64, min(size, 8u)) ? -EFAULT : 0; 577 case AMDGPU_INFO_NUM_VRAM_CPU_PAGE_FAULTS: 578 ui64 = atomic64_read(&adev->num_vram_cpu_page_faults); 579 return copy_to_user(out, &ui64, min(size, 8u)) ? -EFAULT : 0; 580 case AMDGPU_INFO_VRAM_USAGE: 581 ui64 = amdgpu_vram_mgr_usage(&adev->mman.bdev.man[TTM_PL_VRAM]); 582 return copy_to_user(out, &ui64, min(size, 8u)) ? -EFAULT : 0; 583 case AMDGPU_INFO_VIS_VRAM_USAGE: 584 ui64 = amdgpu_vram_mgr_vis_usage(&adev->mman.bdev.man[TTM_PL_VRAM]); 585 return copy_to_user(out, &ui64, min(size, 8u)) ? -EFAULT : 0; 586 case AMDGPU_INFO_GTT_USAGE: 587 ui64 = amdgpu_gtt_mgr_usage(&adev->mman.bdev.man[TTM_PL_TT]); 588 return copy_to_user(out, &ui64, min(size, 8u)) ? -EFAULT : 0; 589 case AMDGPU_INFO_GDS_CONFIG: { 590 struct drm_amdgpu_info_gds gds_info; 591 592 memset(&gds_info, 0, sizeof(gds_info)); 593 gds_info.compute_partition_size = adev->gds.gds_size; 594 gds_info.gds_total_size = adev->gds.gds_size; 595 gds_info.gws_per_compute_partition = adev->gds.gws_size; 596 gds_info.oa_per_compute_partition = adev->gds.oa_size; 597 return copy_to_user(out, &gds_info, 598 min((size_t)size, sizeof(gds_info))) ? -EFAULT : 0; 599 } 600 case AMDGPU_INFO_VRAM_GTT: { 601 struct drm_amdgpu_info_vram_gtt vram_gtt; 602 603 vram_gtt.vram_size = adev->gmc.real_vram_size - 604 atomic64_read(&adev->vram_pin_size); 605 vram_gtt.vram_cpu_accessible_size = adev->gmc.visible_vram_size - 606 atomic64_read(&adev->visible_pin_size); 607 vram_gtt.gtt_size = adev->mman.bdev.man[TTM_PL_TT].size; 608 vram_gtt.gtt_size *= PAGE_SIZE; 609 vram_gtt.gtt_size -= atomic64_read(&adev->gart_pin_size); 610 return copy_to_user(out, &vram_gtt, 611 min((size_t)size, sizeof(vram_gtt))) ? -EFAULT : 0; 612 } 613 case AMDGPU_INFO_MEMORY: { 614 struct drm_amdgpu_memory_info mem; 615 616 memset(&mem, 0, sizeof(mem)); 617 mem.vram.total_heap_size = adev->gmc.real_vram_size; 618 mem.vram.usable_heap_size = adev->gmc.real_vram_size - 619 atomic64_read(&adev->vram_pin_size); 620 mem.vram.heap_usage = 621 amdgpu_vram_mgr_usage(&adev->mman.bdev.man[TTM_PL_VRAM]); 622 mem.vram.max_allocation = mem.vram.usable_heap_size * 3 / 4; 623 624 mem.cpu_accessible_vram.total_heap_size = 625 adev->gmc.visible_vram_size; 626 mem.cpu_accessible_vram.usable_heap_size = adev->gmc.visible_vram_size - 627 atomic64_read(&adev->visible_pin_size); 628 mem.cpu_accessible_vram.heap_usage = 629 amdgpu_vram_mgr_vis_usage(&adev->mman.bdev.man[TTM_PL_VRAM]); 630 mem.cpu_accessible_vram.max_allocation = 631 mem.cpu_accessible_vram.usable_heap_size * 3 / 4; 632 633 mem.gtt.total_heap_size = adev->mman.bdev.man[TTM_PL_TT].size; 634 mem.gtt.total_heap_size *= PAGE_SIZE; 635 mem.gtt.usable_heap_size = mem.gtt.total_heap_size - 636 atomic64_read(&adev->gart_pin_size); 637 mem.gtt.heap_usage = 638 amdgpu_gtt_mgr_usage(&adev->mman.bdev.man[TTM_PL_TT]); 639 mem.gtt.max_allocation = mem.gtt.usable_heap_size * 3 / 4; 640 641 return copy_to_user(out, &mem, 642 min((size_t)size, sizeof(mem))) 643 ? -EFAULT : 0; 644 } 645 case AMDGPU_INFO_READ_MMR_REG: { 646 unsigned n, alloc_size; 647 uint32_t *regs; 648 unsigned se_num = (info->read_mmr_reg.instance >> 649 AMDGPU_INFO_MMR_SE_INDEX_SHIFT) & 650 AMDGPU_INFO_MMR_SE_INDEX_MASK; 651 unsigned sh_num = (info->read_mmr_reg.instance >> 652 AMDGPU_INFO_MMR_SH_INDEX_SHIFT) & 653 AMDGPU_INFO_MMR_SH_INDEX_MASK; 654 655 /* set full masks if the userspace set all bits 656 * in the bitfields */ 657 if (se_num == AMDGPU_INFO_MMR_SE_INDEX_MASK) 658 se_num = 0xffffffff; 659 if (sh_num == AMDGPU_INFO_MMR_SH_INDEX_MASK) 660 sh_num = 0xffffffff; 661 662 regs = kmalloc_array(info->read_mmr_reg.count, sizeof(*regs), GFP_KERNEL); 663 if (!regs) 664 return -ENOMEM; 665 alloc_size = info->read_mmr_reg.count * sizeof(*regs); 666 667 for (i = 0; i < info->read_mmr_reg.count; i++) 668 if (amdgpu_asic_read_register(adev, se_num, sh_num, 669 info->read_mmr_reg.dword_offset + i, 670 ®s[i])) { 671 DRM_DEBUG_KMS("unallowed offset %#x\n", 672 info->read_mmr_reg.dword_offset + i); 673 kfree(regs); 674 return -EFAULT; 675 } 676 n = copy_to_user(out, regs, min(size, alloc_size)); 677 kfree(regs); 678 return n ? -EFAULT : 0; 679 } 680 case AMDGPU_INFO_DEV_INFO: { 681 struct drm_amdgpu_info_device dev_info = {}; 682 uint64_t vm_size; 683 684 dev_info.device_id = dev->pdev->device; 685 dev_info.chip_rev = adev->rev_id; 686 dev_info.external_rev = adev->external_rev_id; 687 dev_info.pci_rev = dev->pdev->revision; 688 dev_info.family = adev->family; 689 dev_info.num_shader_engines = adev->gfx.config.max_shader_engines; 690 dev_info.num_shader_arrays_per_engine = adev->gfx.config.max_sh_per_se; 691 /* return all clocks in KHz */ 692 dev_info.gpu_counter_freq = amdgpu_asic_get_xclk(adev) * 10; 693 if (adev->pm.dpm_enabled) { 694 dev_info.max_engine_clock = amdgpu_dpm_get_sclk(adev, false) * 10; 695 dev_info.max_memory_clock = amdgpu_dpm_get_mclk(adev, false) * 10; 696 } else if (amdgpu_sriov_vf(adev) && amdgim_is_hwperf(adev) && 697 adev->virt.ops->get_pp_clk) { 698 dev_info.max_engine_clock = amdgpu_virt_get_sclk(adev, false) * 10; 699 dev_info.max_memory_clock = amdgpu_virt_get_mclk(adev, false) * 10; 700 } else { 701 dev_info.max_engine_clock = adev->clock.default_sclk * 10; 702 dev_info.max_memory_clock = adev->clock.default_mclk * 10; 703 } 704 dev_info.enabled_rb_pipes_mask = adev->gfx.config.backend_enable_mask; 705 dev_info.num_rb_pipes = adev->gfx.config.max_backends_per_se * 706 adev->gfx.config.max_shader_engines; 707 dev_info.num_hw_gfx_contexts = adev->gfx.config.max_hw_contexts; 708 dev_info._pad = 0; 709 dev_info.ids_flags = 0; 710 if (adev->flags & AMD_IS_APU) 711 dev_info.ids_flags |= AMDGPU_IDS_FLAGS_FUSION; 712 if (amdgpu_mcbp || amdgpu_sriov_vf(adev)) 713 dev_info.ids_flags |= AMDGPU_IDS_FLAGS_PREEMPTION; 714 715 vm_size = adev->vm_manager.max_pfn * AMDGPU_GPU_PAGE_SIZE; 716 vm_size -= AMDGPU_VA_RESERVED_SIZE; 717 718 /* Older VCE FW versions are buggy and can handle only 40bits */ 719 if (adev->vce.fw_version && 720 adev->vce.fw_version < AMDGPU_VCE_FW_53_45) 721 vm_size = min(vm_size, 1ULL << 40); 722 723 dev_info.virtual_address_offset = AMDGPU_VA_RESERVED_SIZE; 724 dev_info.virtual_address_max = 725 min(vm_size, AMDGPU_GMC_HOLE_START); 726 727 if (vm_size > AMDGPU_GMC_HOLE_START) { 728 dev_info.high_va_offset = AMDGPU_GMC_HOLE_END; 729 dev_info.high_va_max = AMDGPU_GMC_HOLE_END | vm_size; 730 } 731 dev_info.virtual_address_alignment = max((int)PAGE_SIZE, AMDGPU_GPU_PAGE_SIZE); 732 dev_info.pte_fragment_size = (1 << adev->vm_manager.fragment_size) * AMDGPU_GPU_PAGE_SIZE; 733 dev_info.gart_page_size = AMDGPU_GPU_PAGE_SIZE; 734 dev_info.cu_active_number = adev->gfx.cu_info.number; 735 dev_info.cu_ao_mask = adev->gfx.cu_info.ao_cu_mask; 736 dev_info.ce_ram_size = adev->gfx.ce_ram_size; 737 memcpy(&dev_info.cu_ao_bitmap[0], &adev->gfx.cu_info.ao_cu_bitmap[0], 738 sizeof(adev->gfx.cu_info.ao_cu_bitmap)); 739 memcpy(&dev_info.cu_bitmap[0], &adev->gfx.cu_info.bitmap[0], 740 sizeof(adev->gfx.cu_info.bitmap)); 741 dev_info.vram_type = adev->gmc.vram_type; 742 dev_info.vram_bit_width = adev->gmc.vram_width; 743 dev_info.vce_harvest_config = adev->vce.harvest_config; 744 dev_info.gc_double_offchip_lds_buf = 745 adev->gfx.config.double_offchip_lds_buf; 746 747 if (amdgpu_ngg) { 748 dev_info.prim_buf_gpu_addr = adev->gfx.ngg.buf[NGG_PRIM].gpu_addr; 749 dev_info.prim_buf_size = adev->gfx.ngg.buf[NGG_PRIM].size; 750 dev_info.pos_buf_gpu_addr = adev->gfx.ngg.buf[NGG_POS].gpu_addr; 751 dev_info.pos_buf_size = adev->gfx.ngg.buf[NGG_POS].size; 752 dev_info.cntl_sb_buf_gpu_addr = adev->gfx.ngg.buf[NGG_CNTL].gpu_addr; 753 dev_info.cntl_sb_buf_size = adev->gfx.ngg.buf[NGG_CNTL].size; 754 dev_info.param_buf_gpu_addr = adev->gfx.ngg.buf[NGG_PARAM].gpu_addr; 755 dev_info.param_buf_size = adev->gfx.ngg.buf[NGG_PARAM].size; 756 } 757 dev_info.wave_front_size = adev->gfx.cu_info.wave_front_size; 758 dev_info.num_shader_visible_vgprs = adev->gfx.config.max_gprs; 759 dev_info.num_cu_per_sh = adev->gfx.config.max_cu_per_sh; 760 dev_info.num_tcc_blocks = adev->gfx.config.max_texture_channel_caches; 761 dev_info.gs_vgt_table_depth = adev->gfx.config.gs_vgt_table_depth; 762 dev_info.gs_prim_buffer_depth = adev->gfx.config.gs_prim_buffer_depth; 763 dev_info.max_gs_waves_per_vgt = adev->gfx.config.max_gs_threads; 764 765 if (adev->family >= AMDGPU_FAMILY_NV) 766 dev_info.pa_sc_tile_steering_override = 767 adev->gfx.config.pa_sc_tile_steering_override; 768 769 return copy_to_user(out, &dev_info, 770 min((size_t)size, sizeof(dev_info))) ? -EFAULT : 0; 771 } 772 case AMDGPU_INFO_VCE_CLOCK_TABLE: { 773 unsigned i; 774 struct drm_amdgpu_info_vce_clock_table vce_clk_table = {}; 775 struct amd_vce_state *vce_state; 776 777 for (i = 0; i < AMDGPU_VCE_CLOCK_TABLE_ENTRIES; i++) { 778 vce_state = amdgpu_dpm_get_vce_clock_state(adev, i); 779 if (vce_state) { 780 vce_clk_table.entries[i].sclk = vce_state->sclk; 781 vce_clk_table.entries[i].mclk = vce_state->mclk; 782 vce_clk_table.entries[i].eclk = vce_state->evclk; 783 vce_clk_table.num_valid_entries++; 784 } 785 } 786 787 return copy_to_user(out, &vce_clk_table, 788 min((size_t)size, sizeof(vce_clk_table))) ? -EFAULT : 0; 789 } 790 case AMDGPU_INFO_VBIOS: { 791 uint32_t bios_size = adev->bios_size; 792 793 switch (info->vbios_info.type) { 794 case AMDGPU_INFO_VBIOS_SIZE: 795 return copy_to_user(out, &bios_size, 796 min((size_t)size, sizeof(bios_size))) 797 ? -EFAULT : 0; 798 case AMDGPU_INFO_VBIOS_IMAGE: { 799 uint8_t *bios; 800 uint32_t bios_offset = info->vbios_info.offset; 801 802 if (bios_offset >= bios_size) 803 return -EINVAL; 804 805 bios = adev->bios + bios_offset; 806 return copy_to_user(out, bios, 807 min((size_t)size, (size_t)(bios_size - bios_offset))) 808 ? -EFAULT : 0; 809 } 810 default: 811 DRM_DEBUG_KMS("Invalid request %d\n", 812 info->vbios_info.type); 813 return -EINVAL; 814 } 815 } 816 case AMDGPU_INFO_NUM_HANDLES: { 817 struct drm_amdgpu_info_num_handles handle; 818 819 switch (info->query_hw_ip.type) { 820 case AMDGPU_HW_IP_UVD: 821 /* Starting Polaris, we support unlimited UVD handles */ 822 if (adev->asic_type < CHIP_POLARIS10) { 823 handle.uvd_max_handles = adev->uvd.max_handles; 824 handle.uvd_used_handles = amdgpu_uvd_used_handles(adev); 825 826 return copy_to_user(out, &handle, 827 min((size_t)size, sizeof(handle))) ? -EFAULT : 0; 828 } else { 829 return -ENODATA; 830 } 831 832 break; 833 default: 834 return -EINVAL; 835 } 836 } 837 case AMDGPU_INFO_SENSOR: { 838 if (!adev->pm.dpm_enabled) 839 return -ENOENT; 840 841 switch (info->sensor_info.type) { 842 case AMDGPU_INFO_SENSOR_GFX_SCLK: 843 /* get sclk in Mhz */ 844 if (amdgpu_dpm_read_sensor(adev, 845 AMDGPU_PP_SENSOR_GFX_SCLK, 846 (void *)&ui32, &ui32_size)) { 847 return -EINVAL; 848 } 849 ui32 /= 100; 850 break; 851 case AMDGPU_INFO_SENSOR_GFX_MCLK: 852 /* get mclk in Mhz */ 853 if (amdgpu_dpm_read_sensor(adev, 854 AMDGPU_PP_SENSOR_GFX_MCLK, 855 (void *)&ui32, &ui32_size)) { 856 return -EINVAL; 857 } 858 ui32 /= 100; 859 break; 860 case AMDGPU_INFO_SENSOR_GPU_TEMP: 861 /* get temperature in millidegrees C */ 862 if (amdgpu_dpm_read_sensor(adev, 863 AMDGPU_PP_SENSOR_GPU_TEMP, 864 (void *)&ui32, &ui32_size)) { 865 return -EINVAL; 866 } 867 break; 868 case AMDGPU_INFO_SENSOR_GPU_LOAD: 869 /* get GPU load */ 870 if (amdgpu_dpm_read_sensor(adev, 871 AMDGPU_PP_SENSOR_GPU_LOAD, 872 (void *)&ui32, &ui32_size)) { 873 return -EINVAL; 874 } 875 break; 876 case AMDGPU_INFO_SENSOR_GPU_AVG_POWER: 877 /* get average GPU power */ 878 if (amdgpu_dpm_read_sensor(adev, 879 AMDGPU_PP_SENSOR_GPU_POWER, 880 (void *)&ui32, &ui32_size)) { 881 return -EINVAL; 882 } 883 ui32 >>= 8; 884 break; 885 case AMDGPU_INFO_SENSOR_VDDNB: 886 /* get VDDNB in millivolts */ 887 if (amdgpu_dpm_read_sensor(adev, 888 AMDGPU_PP_SENSOR_VDDNB, 889 (void *)&ui32, &ui32_size)) { 890 return -EINVAL; 891 } 892 break; 893 case AMDGPU_INFO_SENSOR_VDDGFX: 894 /* get VDDGFX in millivolts */ 895 if (amdgpu_dpm_read_sensor(adev, 896 AMDGPU_PP_SENSOR_VDDGFX, 897 (void *)&ui32, &ui32_size)) { 898 return -EINVAL; 899 } 900 break; 901 case AMDGPU_INFO_SENSOR_STABLE_PSTATE_GFX_SCLK: 902 /* get stable pstate sclk in Mhz */ 903 if (amdgpu_dpm_read_sensor(adev, 904 AMDGPU_PP_SENSOR_STABLE_PSTATE_SCLK, 905 (void *)&ui32, &ui32_size)) { 906 return -EINVAL; 907 } 908 ui32 /= 100; 909 break; 910 case AMDGPU_INFO_SENSOR_STABLE_PSTATE_GFX_MCLK: 911 /* get stable pstate mclk in Mhz */ 912 if (amdgpu_dpm_read_sensor(adev, 913 AMDGPU_PP_SENSOR_STABLE_PSTATE_MCLK, 914 (void *)&ui32, &ui32_size)) { 915 return -EINVAL; 916 } 917 ui32 /= 100; 918 break; 919 default: 920 DRM_DEBUG_KMS("Invalid request %d\n", 921 info->sensor_info.type); 922 return -EINVAL; 923 } 924 return copy_to_user(out, &ui32, min(size, 4u)) ? -EFAULT : 0; 925 } 926 case AMDGPU_INFO_VRAM_LOST_COUNTER: 927 ui32 = atomic_read(&adev->vram_lost_counter); 928 return copy_to_user(out, &ui32, min(size, 4u)) ? -EFAULT : 0; 929 case AMDGPU_INFO_RAS_ENABLED_FEATURES: { 930 struct amdgpu_ras *ras = amdgpu_ras_get_context(adev); 931 uint64_t ras_mask; 932 933 if (!ras) 934 return -EINVAL; 935 ras_mask = (uint64_t)ras->supported << 32 | ras->features; 936 937 return copy_to_user(out, &ras_mask, 938 min_t(u64, size, sizeof(ras_mask))) ? 939 -EFAULT : 0; 940 } 941 default: 942 DRM_DEBUG_KMS("Invalid request %d\n", info->query); 943 return -EINVAL; 944 } 945 return 0; 946 } 947 948 949 /* 950 * Outdated mess for old drm with Xorg being in charge (void function now). 951 */ 952 /** 953 * amdgpu_driver_lastclose_kms - drm callback for last close 954 * 955 * @dev: drm dev pointer 956 * 957 * Switch vga_switcheroo state after last close (all asics). 958 */ 959 void amdgpu_driver_lastclose_kms(struct drm_device *dev) 960 { 961 drm_fb_helper_lastclose(dev); 962 vga_switcheroo_process_delayed_switch(); 963 } 964 965 /** 966 * amdgpu_driver_open_kms - drm callback for open 967 * 968 * @dev: drm dev pointer 969 * @file_priv: drm file 970 * 971 * On device open, init vm on cayman+ (all asics). 972 * Returns 0 on success, error on failure. 973 */ 974 int amdgpu_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv) 975 { 976 struct amdgpu_device *adev = dev->dev_private; 977 struct amdgpu_fpriv *fpriv; 978 int r, pasid; 979 980 /* Ensure IB tests are run on ring */ 981 flush_delayed_work(&adev->delayed_init_work); 982 983 file_priv->driver_priv = NULL; 984 985 r = pm_runtime_get_sync(dev->dev); 986 if (r < 0) 987 return r; 988 989 fpriv = kzalloc(sizeof(*fpriv), GFP_KERNEL); 990 if (unlikely(!fpriv)) { 991 r = -ENOMEM; 992 goto out_suspend; 993 } 994 995 pasid = amdgpu_pasid_alloc(16); 996 if (pasid < 0) { 997 dev_warn(adev->dev, "No more PASIDs available!"); 998 pasid = 0; 999 } 1000 r = amdgpu_vm_init(adev, &fpriv->vm, AMDGPU_VM_CONTEXT_GFX, pasid); 1001 if (r) 1002 goto error_pasid; 1003 1004 fpriv->prt_va = amdgpu_vm_bo_add(adev, &fpriv->vm, NULL); 1005 if (!fpriv->prt_va) { 1006 r = -ENOMEM; 1007 goto error_vm; 1008 } 1009 1010 if (amdgpu_mcbp || amdgpu_sriov_vf(adev)) { 1011 uint64_t csa_addr = amdgpu_csa_vaddr(adev) & AMDGPU_GMC_HOLE_MASK; 1012 1013 r = amdgpu_map_static_csa(adev, &fpriv->vm, adev->virt.csa_obj, 1014 &fpriv->csa_va, csa_addr, AMDGPU_CSA_SIZE); 1015 if (r) 1016 goto error_vm; 1017 } 1018 1019 mutex_init(&fpriv->bo_list_lock); 1020 idr_init(&fpriv->bo_list_handles); 1021 1022 amdgpu_ctx_mgr_init(&fpriv->ctx_mgr); 1023 1024 file_priv->driver_priv = fpriv; 1025 goto out_suspend; 1026 1027 error_vm: 1028 amdgpu_vm_fini(adev, &fpriv->vm); 1029 1030 error_pasid: 1031 if (pasid) 1032 amdgpu_pasid_free(pasid); 1033 1034 kfree(fpriv); 1035 1036 out_suspend: 1037 pm_runtime_mark_last_busy(dev->dev); 1038 pm_runtime_put_autosuspend(dev->dev); 1039 1040 return r; 1041 } 1042 1043 /** 1044 * amdgpu_driver_postclose_kms - drm callback for post close 1045 * 1046 * @dev: drm dev pointer 1047 * @file_priv: drm file 1048 * 1049 * On device post close, tear down vm on cayman+ (all asics). 1050 */ 1051 void amdgpu_driver_postclose_kms(struct drm_device *dev, 1052 struct drm_file *file_priv) 1053 { 1054 struct amdgpu_device *adev = dev->dev_private; 1055 struct amdgpu_fpriv *fpriv = file_priv->driver_priv; 1056 struct amdgpu_bo_list *list; 1057 struct amdgpu_bo *pd; 1058 unsigned int pasid; 1059 int handle; 1060 1061 if (!fpriv) 1062 return; 1063 1064 pm_runtime_get_sync(dev->dev); 1065 1066 if (amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_UVD) != NULL) 1067 amdgpu_uvd_free_handles(adev, file_priv); 1068 if (amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_VCE) != NULL) 1069 amdgpu_vce_free_handles(adev, file_priv); 1070 1071 amdgpu_vm_bo_rmv(adev, fpriv->prt_va); 1072 1073 if (amdgpu_mcbp || amdgpu_sriov_vf(adev)) { 1074 /* TODO: how to handle reserve failure */ 1075 BUG_ON(amdgpu_bo_reserve(adev->virt.csa_obj, true)); 1076 amdgpu_vm_bo_rmv(adev, fpriv->csa_va); 1077 fpriv->csa_va = NULL; 1078 amdgpu_bo_unreserve(adev->virt.csa_obj); 1079 } 1080 1081 pasid = fpriv->vm.pasid; 1082 pd = amdgpu_bo_ref(fpriv->vm.root.base.bo); 1083 1084 amdgpu_ctx_mgr_fini(&fpriv->ctx_mgr); 1085 amdgpu_vm_fini(adev, &fpriv->vm); 1086 1087 if (pasid) 1088 amdgpu_pasid_free_delayed(pd->tbo.resv, pasid); 1089 amdgpu_bo_unref(&pd); 1090 1091 idr_for_each_entry(&fpriv->bo_list_handles, list, handle) 1092 amdgpu_bo_list_put(list); 1093 1094 idr_destroy(&fpriv->bo_list_handles); 1095 mutex_destroy(&fpriv->bo_list_lock); 1096 1097 kfree(fpriv); 1098 file_priv->driver_priv = NULL; 1099 1100 pm_runtime_mark_last_busy(dev->dev); 1101 pm_runtime_put_autosuspend(dev->dev); 1102 } 1103 1104 /* 1105 * VBlank related functions. 1106 */ 1107 /** 1108 * amdgpu_get_vblank_counter_kms - get frame count 1109 * 1110 * @dev: drm dev pointer 1111 * @pipe: crtc to get the frame count from 1112 * 1113 * Gets the frame count on the requested crtc (all asics). 1114 * Returns frame count on success, -EINVAL on failure. 1115 */ 1116 u32 amdgpu_get_vblank_counter_kms(struct drm_device *dev, unsigned int pipe) 1117 { 1118 struct amdgpu_device *adev = dev->dev_private; 1119 int vpos, hpos, stat; 1120 u32 count; 1121 1122 if (pipe >= adev->mode_info.num_crtc) { 1123 DRM_ERROR("Invalid crtc %u\n", pipe); 1124 return -EINVAL; 1125 } 1126 1127 /* The hw increments its frame counter at start of vsync, not at start 1128 * of vblank, as is required by DRM core vblank counter handling. 1129 * Cook the hw count here to make it appear to the caller as if it 1130 * incremented at start of vblank. We measure distance to start of 1131 * vblank in vpos. vpos therefore will be >= 0 between start of vblank 1132 * and start of vsync, so vpos >= 0 means to bump the hw frame counter 1133 * result by 1 to give the proper appearance to caller. 1134 */ 1135 if (adev->mode_info.crtcs[pipe]) { 1136 /* Repeat readout if needed to provide stable result if 1137 * we cross start of vsync during the queries. 1138 */ 1139 do { 1140 count = amdgpu_display_vblank_get_counter(adev, pipe); 1141 /* Ask amdgpu_display_get_crtc_scanoutpos to return 1142 * vpos as distance to start of vblank, instead of 1143 * regular vertical scanout pos. 1144 */ 1145 stat = amdgpu_display_get_crtc_scanoutpos( 1146 dev, pipe, GET_DISTANCE_TO_VBLANKSTART, 1147 &vpos, &hpos, NULL, NULL, 1148 &adev->mode_info.crtcs[pipe]->base.hwmode); 1149 } while (count != amdgpu_display_vblank_get_counter(adev, pipe)); 1150 1151 if (((stat & (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_ACCURATE)) != 1152 (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_ACCURATE))) { 1153 DRM_DEBUG_VBL("Query failed! stat %d\n", stat); 1154 } else { 1155 DRM_DEBUG_VBL("crtc %d: dist from vblank start %d\n", 1156 pipe, vpos); 1157 1158 /* Bump counter if we are at >= leading edge of vblank, 1159 * but before vsync where vpos would turn negative and 1160 * the hw counter really increments. 1161 */ 1162 if (vpos >= 0) 1163 count++; 1164 } 1165 } else { 1166 /* Fallback to use value as is. */ 1167 count = amdgpu_display_vblank_get_counter(adev, pipe); 1168 DRM_DEBUG_VBL("NULL mode info! Returned count may be wrong.\n"); 1169 } 1170 1171 return count; 1172 } 1173 1174 /** 1175 * amdgpu_enable_vblank_kms - enable vblank interrupt 1176 * 1177 * @dev: drm dev pointer 1178 * @pipe: crtc to enable vblank interrupt for 1179 * 1180 * Enable the interrupt on the requested crtc (all asics). 1181 * Returns 0 on success, -EINVAL on failure. 1182 */ 1183 int amdgpu_enable_vblank_kms(struct drm_device *dev, unsigned int pipe) 1184 { 1185 struct amdgpu_device *adev = dev->dev_private; 1186 int idx = amdgpu_display_crtc_idx_to_irq_type(adev, pipe); 1187 1188 return amdgpu_irq_get(adev, &adev->crtc_irq, idx); 1189 } 1190 1191 /** 1192 * amdgpu_disable_vblank_kms - disable vblank interrupt 1193 * 1194 * @dev: drm dev pointer 1195 * @pipe: crtc to disable vblank interrupt for 1196 * 1197 * Disable the interrupt on the requested crtc (all asics). 1198 */ 1199 void amdgpu_disable_vblank_kms(struct drm_device *dev, unsigned int pipe) 1200 { 1201 struct amdgpu_device *adev = dev->dev_private; 1202 int idx = amdgpu_display_crtc_idx_to_irq_type(adev, pipe); 1203 1204 amdgpu_irq_put(adev, &adev->crtc_irq, idx); 1205 } 1206 1207 const struct drm_ioctl_desc amdgpu_ioctls_kms[] = { 1208 DRM_IOCTL_DEF_DRV(AMDGPU_GEM_CREATE, amdgpu_gem_create_ioctl, DRM_AUTH|DRM_RENDER_ALLOW), 1209 DRM_IOCTL_DEF_DRV(AMDGPU_CTX, amdgpu_ctx_ioctl, DRM_AUTH|DRM_RENDER_ALLOW), 1210 DRM_IOCTL_DEF_DRV(AMDGPU_VM, amdgpu_vm_ioctl, DRM_AUTH|DRM_RENDER_ALLOW), 1211 DRM_IOCTL_DEF_DRV(AMDGPU_SCHED, amdgpu_sched_ioctl, DRM_MASTER), 1212 DRM_IOCTL_DEF_DRV(AMDGPU_BO_LIST, amdgpu_bo_list_ioctl, DRM_AUTH|DRM_RENDER_ALLOW), 1213 DRM_IOCTL_DEF_DRV(AMDGPU_FENCE_TO_HANDLE, amdgpu_cs_fence_to_handle_ioctl, DRM_AUTH|DRM_RENDER_ALLOW), 1214 /* KMS */ 1215 DRM_IOCTL_DEF_DRV(AMDGPU_GEM_MMAP, amdgpu_gem_mmap_ioctl, DRM_AUTH|DRM_RENDER_ALLOW), 1216 DRM_IOCTL_DEF_DRV(AMDGPU_GEM_WAIT_IDLE, amdgpu_gem_wait_idle_ioctl, DRM_AUTH|DRM_RENDER_ALLOW), 1217 DRM_IOCTL_DEF_DRV(AMDGPU_CS, amdgpu_cs_ioctl, DRM_AUTH|DRM_RENDER_ALLOW), 1218 DRM_IOCTL_DEF_DRV(AMDGPU_INFO, amdgpu_info_ioctl, DRM_AUTH|DRM_RENDER_ALLOW), 1219 DRM_IOCTL_DEF_DRV(AMDGPU_WAIT_CS, amdgpu_cs_wait_ioctl, DRM_AUTH|DRM_RENDER_ALLOW), 1220 DRM_IOCTL_DEF_DRV(AMDGPU_WAIT_FENCES, amdgpu_cs_wait_fences_ioctl, DRM_AUTH|DRM_RENDER_ALLOW), 1221 DRM_IOCTL_DEF_DRV(AMDGPU_GEM_METADATA, amdgpu_gem_metadata_ioctl, DRM_AUTH|DRM_RENDER_ALLOW), 1222 DRM_IOCTL_DEF_DRV(AMDGPU_GEM_VA, amdgpu_gem_va_ioctl, DRM_AUTH|DRM_RENDER_ALLOW), 1223 DRM_IOCTL_DEF_DRV(AMDGPU_GEM_OP, amdgpu_gem_op_ioctl, DRM_AUTH|DRM_RENDER_ALLOW), 1224 DRM_IOCTL_DEF_DRV(AMDGPU_GEM_USERPTR, amdgpu_gem_userptr_ioctl, DRM_AUTH|DRM_RENDER_ALLOW) 1225 }; 1226 const int amdgpu_max_kms_ioctl = ARRAY_SIZE(amdgpu_ioctls_kms); 1227 1228 /* 1229 * Debugfs info 1230 */ 1231 #if defined(CONFIG_DEBUG_FS) 1232 1233 static int amdgpu_debugfs_firmware_info(struct seq_file *m, void *data) 1234 { 1235 struct drm_info_node *node = (struct drm_info_node *) m->private; 1236 struct drm_device *dev = node->minor->dev; 1237 struct amdgpu_device *adev = dev->dev_private; 1238 struct drm_amdgpu_info_firmware fw_info; 1239 struct drm_amdgpu_query_fw query_fw; 1240 struct atom_context *ctx = adev->mode_info.atom_context; 1241 int ret, i; 1242 1243 /* VCE */ 1244 query_fw.fw_type = AMDGPU_INFO_FW_VCE; 1245 ret = amdgpu_firmware_info(&fw_info, &query_fw, adev); 1246 if (ret) 1247 return ret; 1248 seq_printf(m, "VCE feature version: %u, firmware version: 0x%08x\n", 1249 fw_info.feature, fw_info.ver); 1250 1251 /* UVD */ 1252 query_fw.fw_type = AMDGPU_INFO_FW_UVD; 1253 ret = amdgpu_firmware_info(&fw_info, &query_fw, adev); 1254 if (ret) 1255 return ret; 1256 seq_printf(m, "UVD feature version: %u, firmware version: 0x%08x\n", 1257 fw_info.feature, fw_info.ver); 1258 1259 /* GMC */ 1260 query_fw.fw_type = AMDGPU_INFO_FW_GMC; 1261 ret = amdgpu_firmware_info(&fw_info, &query_fw, adev); 1262 if (ret) 1263 return ret; 1264 seq_printf(m, "MC feature version: %u, firmware version: 0x%08x\n", 1265 fw_info.feature, fw_info.ver); 1266 1267 /* ME */ 1268 query_fw.fw_type = AMDGPU_INFO_FW_GFX_ME; 1269 ret = amdgpu_firmware_info(&fw_info, &query_fw, adev); 1270 if (ret) 1271 return ret; 1272 seq_printf(m, "ME feature version: %u, firmware version: 0x%08x\n", 1273 fw_info.feature, fw_info.ver); 1274 1275 /* PFP */ 1276 query_fw.fw_type = AMDGPU_INFO_FW_GFX_PFP; 1277 ret = amdgpu_firmware_info(&fw_info, &query_fw, adev); 1278 if (ret) 1279 return ret; 1280 seq_printf(m, "PFP feature version: %u, firmware version: 0x%08x\n", 1281 fw_info.feature, fw_info.ver); 1282 1283 /* CE */ 1284 query_fw.fw_type = AMDGPU_INFO_FW_GFX_CE; 1285 ret = amdgpu_firmware_info(&fw_info, &query_fw, adev); 1286 if (ret) 1287 return ret; 1288 seq_printf(m, "CE feature version: %u, firmware version: 0x%08x\n", 1289 fw_info.feature, fw_info.ver); 1290 1291 /* RLC */ 1292 query_fw.fw_type = AMDGPU_INFO_FW_GFX_RLC; 1293 ret = amdgpu_firmware_info(&fw_info, &query_fw, adev); 1294 if (ret) 1295 return ret; 1296 seq_printf(m, "RLC feature version: %u, firmware version: 0x%08x\n", 1297 fw_info.feature, fw_info.ver); 1298 1299 /* RLC SAVE RESTORE LIST CNTL */ 1300 query_fw.fw_type = AMDGPU_INFO_FW_GFX_RLC_RESTORE_LIST_CNTL; 1301 ret = amdgpu_firmware_info(&fw_info, &query_fw, adev); 1302 if (ret) 1303 return ret; 1304 seq_printf(m, "RLC SRLC feature version: %u, firmware version: 0x%08x\n", 1305 fw_info.feature, fw_info.ver); 1306 1307 /* RLC SAVE RESTORE LIST GPM MEM */ 1308 query_fw.fw_type = AMDGPU_INFO_FW_GFX_RLC_RESTORE_LIST_GPM_MEM; 1309 ret = amdgpu_firmware_info(&fw_info, &query_fw, adev); 1310 if (ret) 1311 return ret; 1312 seq_printf(m, "RLC SRLG feature version: %u, firmware version: 0x%08x\n", 1313 fw_info.feature, fw_info.ver); 1314 1315 /* RLC SAVE RESTORE LIST SRM MEM */ 1316 query_fw.fw_type = AMDGPU_INFO_FW_GFX_RLC_RESTORE_LIST_SRM_MEM; 1317 ret = amdgpu_firmware_info(&fw_info, &query_fw, adev); 1318 if (ret) 1319 return ret; 1320 seq_printf(m, "RLC SRLS feature version: %u, firmware version: 0x%08x\n", 1321 fw_info.feature, fw_info.ver); 1322 1323 /* MEC */ 1324 query_fw.fw_type = AMDGPU_INFO_FW_GFX_MEC; 1325 query_fw.index = 0; 1326 ret = amdgpu_firmware_info(&fw_info, &query_fw, adev); 1327 if (ret) 1328 return ret; 1329 seq_printf(m, "MEC feature version: %u, firmware version: 0x%08x\n", 1330 fw_info.feature, fw_info.ver); 1331 1332 /* MEC2 */ 1333 if (adev->asic_type == CHIP_KAVERI || 1334 (adev->asic_type > CHIP_TOPAZ && adev->asic_type != CHIP_STONEY)) { 1335 query_fw.index = 1; 1336 ret = amdgpu_firmware_info(&fw_info, &query_fw, adev); 1337 if (ret) 1338 return ret; 1339 seq_printf(m, "MEC2 feature version: %u, firmware version: 0x%08x\n", 1340 fw_info.feature, fw_info.ver); 1341 } 1342 1343 /* PSP SOS */ 1344 query_fw.fw_type = AMDGPU_INFO_FW_SOS; 1345 ret = amdgpu_firmware_info(&fw_info, &query_fw, adev); 1346 if (ret) 1347 return ret; 1348 seq_printf(m, "SOS feature version: %u, firmware version: 0x%08x\n", 1349 fw_info.feature, fw_info.ver); 1350 1351 1352 /* PSP ASD */ 1353 query_fw.fw_type = AMDGPU_INFO_FW_ASD; 1354 ret = amdgpu_firmware_info(&fw_info, &query_fw, adev); 1355 if (ret) 1356 return ret; 1357 seq_printf(m, "ASD feature version: %u, firmware version: 0x%08x\n", 1358 fw_info.feature, fw_info.ver); 1359 1360 query_fw.fw_type = AMDGPU_INFO_FW_TA; 1361 for (i = 0; i < 2; i++) { 1362 query_fw.index = i; 1363 ret = amdgpu_firmware_info(&fw_info, &query_fw, adev); 1364 if (ret) 1365 continue; 1366 seq_printf(m, "TA %s feature version: %u, firmware version: 0x%08x\n", 1367 i ? "RAS" : "XGMI", fw_info.feature, fw_info.ver); 1368 } 1369 1370 /* SMC */ 1371 query_fw.fw_type = AMDGPU_INFO_FW_SMC; 1372 ret = amdgpu_firmware_info(&fw_info, &query_fw, adev); 1373 if (ret) 1374 return ret; 1375 seq_printf(m, "SMC feature version: %u, firmware version: 0x%08x\n", 1376 fw_info.feature, fw_info.ver); 1377 1378 /* SDMA */ 1379 query_fw.fw_type = AMDGPU_INFO_FW_SDMA; 1380 for (i = 0; i < adev->sdma.num_instances; i++) { 1381 query_fw.index = i; 1382 ret = amdgpu_firmware_info(&fw_info, &query_fw, adev); 1383 if (ret) 1384 return ret; 1385 seq_printf(m, "SDMA%d feature version: %u, firmware version: 0x%08x\n", 1386 i, fw_info.feature, fw_info.ver); 1387 } 1388 1389 /* VCN */ 1390 query_fw.fw_type = AMDGPU_INFO_FW_VCN; 1391 ret = amdgpu_firmware_info(&fw_info, &query_fw, adev); 1392 if (ret) 1393 return ret; 1394 seq_printf(m, "VCN feature version: %u, firmware version: 0x%08x\n", 1395 fw_info.feature, fw_info.ver); 1396 1397 /* DMCU */ 1398 query_fw.fw_type = AMDGPU_INFO_FW_DMCU; 1399 ret = amdgpu_firmware_info(&fw_info, &query_fw, adev); 1400 if (ret) 1401 return ret; 1402 seq_printf(m, "DMCU feature version: %u, firmware version: 0x%08x\n", 1403 fw_info.feature, fw_info.ver); 1404 1405 1406 seq_printf(m, "VBIOS version: %s\n", ctx->vbios_version); 1407 1408 return 0; 1409 } 1410 1411 static const struct drm_info_list amdgpu_firmware_info_list[] = { 1412 {"amdgpu_firmware_info", amdgpu_debugfs_firmware_info, 0, NULL}, 1413 }; 1414 #endif 1415 1416 int amdgpu_debugfs_firmware_init(struct amdgpu_device *adev) 1417 { 1418 #if defined(CONFIG_DEBUG_FS) 1419 return amdgpu_debugfs_add_files(adev, amdgpu_firmware_info_list, 1420 ARRAY_SIZE(amdgpu_firmware_info_list)); 1421 #else 1422 return 0; 1423 #endif 1424 } 1425