1 /* 2 * Copyright 2008 Advanced Micro Devices, Inc. 3 * Copyright 2008 Red Hat Inc. 4 * Copyright 2009 Jerome Glisse. 5 * 6 * Permission is hereby granted, free of charge, to any person obtaining a 7 * copy of this software and associated documentation files (the "Software"), 8 * to deal in the Software without restriction, including without limitation 9 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 10 * and/or sell copies of the Software, and to permit persons to whom the 11 * Software is furnished to do so, subject to the following conditions: 12 * 13 * The above copyright notice and this permission notice shall be included in 14 * all copies or substantial portions of the Software. 15 * 16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 22 * OTHER DEALINGS IN THE SOFTWARE. 23 * 24 * Authors: Dave Airlie 25 * Alex Deucher 26 * Jerome Glisse 27 */ 28 29 #include "amdgpu.h" 30 #include <drm/amdgpu_drm.h> 31 #include <drm/drm_drv.h> 32 #include "amdgpu_uvd.h" 33 #include "amdgpu_vce.h" 34 #include "atom.h" 35 36 #include <linux/vga_switcheroo.h> 37 #include <linux/slab.h> 38 #include <linux/uaccess.h> 39 #include <linux/pci.h> 40 #include <linux/pm_runtime.h> 41 #include "amdgpu_amdkfd.h" 42 #include "amdgpu_gem.h" 43 #include "amdgpu_display.h" 44 #include "amdgpu_ras.h" 45 46 void amdgpu_unregister_gpu_instance(struct amdgpu_device *adev) 47 { 48 struct amdgpu_gpu_instance *gpu_instance; 49 int i; 50 51 mutex_lock(&mgpu_info.mutex); 52 53 for (i = 0; i < mgpu_info.num_gpu; i++) { 54 gpu_instance = &(mgpu_info.gpu_ins[i]); 55 if (gpu_instance->adev == adev) { 56 mgpu_info.gpu_ins[i] = 57 mgpu_info.gpu_ins[mgpu_info.num_gpu - 1]; 58 mgpu_info.num_gpu--; 59 if (adev->flags & AMD_IS_APU) 60 mgpu_info.num_apu--; 61 else 62 mgpu_info.num_dgpu--; 63 break; 64 } 65 } 66 67 mutex_unlock(&mgpu_info.mutex); 68 } 69 70 /** 71 * amdgpu_driver_unload_kms - Main unload function for KMS. 72 * 73 * @dev: drm dev pointer 74 * 75 * This is the main unload function for KMS (all asics). 76 * Returns 0 on success. 77 */ 78 void amdgpu_driver_unload_kms(struct drm_device *dev) 79 { 80 struct amdgpu_device *adev = drm_to_adev(dev); 81 82 if (adev == NULL) 83 return; 84 85 amdgpu_unregister_gpu_instance(adev); 86 87 if (adev->rmmio == NULL) 88 return; 89 90 if (adev->runpm) { 91 pm_runtime_get_sync(dev->dev); 92 pm_runtime_forbid(dev->dev); 93 } 94 95 amdgpu_acpi_fini(adev); 96 amdgpu_device_fini_hw(adev); 97 } 98 99 void amdgpu_register_gpu_instance(struct amdgpu_device *adev) 100 { 101 struct amdgpu_gpu_instance *gpu_instance; 102 103 mutex_lock(&mgpu_info.mutex); 104 105 if (mgpu_info.num_gpu >= MAX_GPU_INSTANCE) { 106 DRM_ERROR("Cannot register more gpu instance\n"); 107 mutex_unlock(&mgpu_info.mutex); 108 return; 109 } 110 111 gpu_instance = &(mgpu_info.gpu_ins[mgpu_info.num_gpu]); 112 gpu_instance->adev = adev; 113 gpu_instance->mgpu_fan_enabled = 0; 114 115 mgpu_info.num_gpu++; 116 if (adev->flags & AMD_IS_APU) 117 mgpu_info.num_apu++; 118 else 119 mgpu_info.num_dgpu++; 120 121 mutex_unlock(&mgpu_info.mutex); 122 } 123 124 /** 125 * amdgpu_driver_load_kms - Main load function for KMS. 126 * 127 * @adev: pointer to struct amdgpu_device 128 * @flags: device flags 129 * 130 * This is the main load function for KMS (all asics). 131 * Returns 0 on success, error on failure. 132 */ 133 int amdgpu_driver_load_kms(struct amdgpu_device *adev, unsigned long flags) 134 { 135 struct drm_device *dev; 136 struct pci_dev *parent; 137 int r, acpi_status; 138 139 dev = adev_to_drm(adev); 140 141 if (amdgpu_has_atpx() && 142 (amdgpu_is_atpx_hybrid() || 143 amdgpu_has_atpx_dgpu_power_cntl()) && 144 ((flags & AMD_IS_APU) == 0) && 145 !pci_is_thunderbolt_attached(to_pci_dev(dev->dev))) 146 flags |= AMD_IS_PX; 147 148 parent = pci_upstream_bridge(adev->pdev); 149 adev->has_pr3 = parent ? pci_pr3_present(parent) : false; 150 151 /* amdgpu_device_init should report only fatal error 152 * like memory allocation failure or iomapping failure, 153 * or memory manager initialization failure, it must 154 * properly initialize the GPU MC controller and permit 155 * VRAM allocation 156 */ 157 r = amdgpu_device_init(adev, flags); 158 if (r) { 159 dev_err(dev->dev, "Fatal error during GPU init\n"); 160 goto out; 161 } 162 163 if (amdgpu_device_supports_px(dev) && 164 (amdgpu_runtime_pm != 0)) { /* enable runpm by default for atpx */ 165 adev->runpm = true; 166 dev_info(adev->dev, "Using ATPX for runtime pm\n"); 167 } else if (amdgpu_device_supports_boco(dev) && 168 (amdgpu_runtime_pm != 0)) { /* enable runpm by default for boco */ 169 adev->runpm = true; 170 dev_info(adev->dev, "Using BOCO for runtime pm\n"); 171 } else if (amdgpu_device_supports_baco(dev) && 172 (amdgpu_runtime_pm != 0)) { 173 switch (adev->asic_type) { 174 case CHIP_VEGA20: 175 case CHIP_ARCTURUS: 176 /* enable runpm if runpm=1 */ 177 if (amdgpu_runtime_pm > 0) 178 adev->runpm = true; 179 break; 180 case CHIP_VEGA10: 181 /* turn runpm on if noretry=0 */ 182 if (!adev->gmc.noretry) 183 adev->runpm = true; 184 break; 185 default: 186 /* enable runpm on CI+ */ 187 adev->runpm = true; 188 break; 189 } 190 if (adev->runpm) 191 dev_info(adev->dev, "Using BACO for runtime pm\n"); 192 } 193 194 /* Call ACPI methods: require modeset init 195 * but failure is not fatal 196 */ 197 198 acpi_status = amdgpu_acpi_init(adev); 199 if (acpi_status) 200 dev_dbg(dev->dev, "Error during ACPI methods call\n"); 201 202 if (adev->runpm) { 203 /* only need to skip on ATPX */ 204 if (amdgpu_device_supports_px(dev)) 205 dev_pm_set_driver_flags(dev->dev, DPM_FLAG_NO_DIRECT_COMPLETE); 206 /* we want direct complete for BOCO */ 207 if (amdgpu_device_supports_boco(dev)) 208 dev_pm_set_driver_flags(dev->dev, DPM_FLAG_SMART_PREPARE | 209 DPM_FLAG_SMART_SUSPEND | 210 DPM_FLAG_MAY_SKIP_RESUME); 211 pm_runtime_use_autosuspend(dev->dev); 212 pm_runtime_set_autosuspend_delay(dev->dev, 5000); 213 pm_runtime_allow(dev->dev); 214 pm_runtime_mark_last_busy(dev->dev); 215 pm_runtime_put_autosuspend(dev->dev); 216 } 217 218 out: 219 if (r) { 220 /* balance pm_runtime_get_sync in amdgpu_driver_unload_kms */ 221 if (adev->rmmio && adev->runpm) 222 pm_runtime_put_noidle(dev->dev); 223 amdgpu_driver_unload_kms(dev); 224 } 225 226 return r; 227 } 228 229 static int amdgpu_firmware_info(struct drm_amdgpu_info_firmware *fw_info, 230 struct drm_amdgpu_query_fw *query_fw, 231 struct amdgpu_device *adev) 232 { 233 switch (query_fw->fw_type) { 234 case AMDGPU_INFO_FW_VCE: 235 fw_info->ver = adev->vce.fw_version; 236 fw_info->feature = adev->vce.fb_version; 237 break; 238 case AMDGPU_INFO_FW_UVD: 239 fw_info->ver = adev->uvd.fw_version; 240 fw_info->feature = 0; 241 break; 242 case AMDGPU_INFO_FW_VCN: 243 fw_info->ver = adev->vcn.fw_version; 244 fw_info->feature = 0; 245 break; 246 case AMDGPU_INFO_FW_GMC: 247 fw_info->ver = adev->gmc.fw_version; 248 fw_info->feature = 0; 249 break; 250 case AMDGPU_INFO_FW_GFX_ME: 251 fw_info->ver = adev->gfx.me_fw_version; 252 fw_info->feature = adev->gfx.me_feature_version; 253 break; 254 case AMDGPU_INFO_FW_GFX_PFP: 255 fw_info->ver = adev->gfx.pfp_fw_version; 256 fw_info->feature = adev->gfx.pfp_feature_version; 257 break; 258 case AMDGPU_INFO_FW_GFX_CE: 259 fw_info->ver = adev->gfx.ce_fw_version; 260 fw_info->feature = adev->gfx.ce_feature_version; 261 break; 262 case AMDGPU_INFO_FW_GFX_RLC: 263 fw_info->ver = adev->gfx.rlc_fw_version; 264 fw_info->feature = adev->gfx.rlc_feature_version; 265 break; 266 case AMDGPU_INFO_FW_GFX_RLC_RESTORE_LIST_CNTL: 267 fw_info->ver = adev->gfx.rlc_srlc_fw_version; 268 fw_info->feature = adev->gfx.rlc_srlc_feature_version; 269 break; 270 case AMDGPU_INFO_FW_GFX_RLC_RESTORE_LIST_GPM_MEM: 271 fw_info->ver = adev->gfx.rlc_srlg_fw_version; 272 fw_info->feature = adev->gfx.rlc_srlg_feature_version; 273 break; 274 case AMDGPU_INFO_FW_GFX_RLC_RESTORE_LIST_SRM_MEM: 275 fw_info->ver = adev->gfx.rlc_srls_fw_version; 276 fw_info->feature = adev->gfx.rlc_srls_feature_version; 277 break; 278 case AMDGPU_INFO_FW_GFX_MEC: 279 if (query_fw->index == 0) { 280 fw_info->ver = adev->gfx.mec_fw_version; 281 fw_info->feature = adev->gfx.mec_feature_version; 282 } else if (query_fw->index == 1) { 283 fw_info->ver = adev->gfx.mec2_fw_version; 284 fw_info->feature = adev->gfx.mec2_feature_version; 285 } else 286 return -EINVAL; 287 break; 288 case AMDGPU_INFO_FW_SMC: 289 fw_info->ver = adev->pm.fw_version; 290 fw_info->feature = 0; 291 break; 292 case AMDGPU_INFO_FW_TA: 293 switch (query_fw->index) { 294 case TA_FW_TYPE_PSP_XGMI: 295 fw_info->ver = adev->psp.ta_fw_version; 296 fw_info->feature = adev->psp.ta_xgmi_ucode_version; 297 break; 298 case TA_FW_TYPE_PSP_RAS: 299 fw_info->ver = adev->psp.ta_fw_version; 300 fw_info->feature = adev->psp.ta_ras_ucode_version; 301 break; 302 case TA_FW_TYPE_PSP_HDCP: 303 fw_info->ver = adev->psp.ta_fw_version; 304 fw_info->feature = adev->psp.ta_hdcp_ucode_version; 305 break; 306 case TA_FW_TYPE_PSP_DTM: 307 fw_info->ver = adev->psp.ta_fw_version; 308 fw_info->feature = adev->psp.ta_dtm_ucode_version; 309 break; 310 case TA_FW_TYPE_PSP_RAP: 311 fw_info->ver = adev->psp.ta_fw_version; 312 fw_info->feature = adev->psp.ta_rap_ucode_version; 313 break; 314 case TA_FW_TYPE_PSP_SECUREDISPLAY: 315 fw_info->ver = adev->psp.ta_fw_version; 316 fw_info->feature = adev->psp.ta_securedisplay_ucode_version; 317 break; 318 default: 319 return -EINVAL; 320 } 321 break; 322 case AMDGPU_INFO_FW_SDMA: 323 if (query_fw->index >= adev->sdma.num_instances) 324 return -EINVAL; 325 fw_info->ver = adev->sdma.instance[query_fw->index].fw_version; 326 fw_info->feature = adev->sdma.instance[query_fw->index].feature_version; 327 break; 328 case AMDGPU_INFO_FW_SOS: 329 fw_info->ver = adev->psp.sos_fw_version; 330 fw_info->feature = adev->psp.sos_feature_version; 331 break; 332 case AMDGPU_INFO_FW_ASD: 333 fw_info->ver = adev->psp.asd_fw_version; 334 fw_info->feature = adev->psp.asd_feature_version; 335 break; 336 case AMDGPU_INFO_FW_DMCU: 337 fw_info->ver = adev->dm.dmcu_fw_version; 338 fw_info->feature = 0; 339 break; 340 case AMDGPU_INFO_FW_DMCUB: 341 fw_info->ver = adev->dm.dmcub_fw_version; 342 fw_info->feature = 0; 343 break; 344 case AMDGPU_INFO_FW_TOC: 345 fw_info->ver = adev->psp.toc_fw_version; 346 fw_info->feature = adev->psp.toc_feature_version; 347 break; 348 default: 349 return -EINVAL; 350 } 351 return 0; 352 } 353 354 static int amdgpu_hw_ip_info(struct amdgpu_device *adev, 355 struct drm_amdgpu_info *info, 356 struct drm_amdgpu_info_hw_ip *result) 357 { 358 uint32_t ib_start_alignment = 0; 359 uint32_t ib_size_alignment = 0; 360 enum amd_ip_block_type type; 361 unsigned int num_rings = 0; 362 unsigned int i, j; 363 364 if (info->query_hw_ip.ip_instance >= AMDGPU_HW_IP_INSTANCE_MAX_COUNT) 365 return -EINVAL; 366 367 switch (info->query_hw_ip.type) { 368 case AMDGPU_HW_IP_GFX: 369 type = AMD_IP_BLOCK_TYPE_GFX; 370 for (i = 0; i < adev->gfx.num_gfx_rings; i++) 371 if (adev->gfx.gfx_ring[i].sched.ready) 372 ++num_rings; 373 ib_start_alignment = 32; 374 ib_size_alignment = 32; 375 break; 376 case AMDGPU_HW_IP_COMPUTE: 377 type = AMD_IP_BLOCK_TYPE_GFX; 378 for (i = 0; i < adev->gfx.num_compute_rings; i++) 379 if (adev->gfx.compute_ring[i].sched.ready) 380 ++num_rings; 381 ib_start_alignment = 32; 382 ib_size_alignment = 32; 383 break; 384 case AMDGPU_HW_IP_DMA: 385 type = AMD_IP_BLOCK_TYPE_SDMA; 386 for (i = 0; i < adev->sdma.num_instances; i++) 387 if (adev->sdma.instance[i].ring.sched.ready) 388 ++num_rings; 389 ib_start_alignment = 256; 390 ib_size_alignment = 4; 391 break; 392 case AMDGPU_HW_IP_UVD: 393 type = AMD_IP_BLOCK_TYPE_UVD; 394 for (i = 0; i < adev->uvd.num_uvd_inst; i++) { 395 if (adev->uvd.harvest_config & (1 << i)) 396 continue; 397 398 if (adev->uvd.inst[i].ring.sched.ready) 399 ++num_rings; 400 } 401 ib_start_alignment = 64; 402 ib_size_alignment = 64; 403 break; 404 case AMDGPU_HW_IP_VCE: 405 type = AMD_IP_BLOCK_TYPE_VCE; 406 for (i = 0; i < adev->vce.num_rings; i++) 407 if (adev->vce.ring[i].sched.ready) 408 ++num_rings; 409 ib_start_alignment = 4; 410 ib_size_alignment = 1; 411 break; 412 case AMDGPU_HW_IP_UVD_ENC: 413 type = AMD_IP_BLOCK_TYPE_UVD; 414 for (i = 0; i < adev->uvd.num_uvd_inst; i++) { 415 if (adev->uvd.harvest_config & (1 << i)) 416 continue; 417 418 for (j = 0; j < adev->uvd.num_enc_rings; j++) 419 if (adev->uvd.inst[i].ring_enc[j].sched.ready) 420 ++num_rings; 421 } 422 ib_start_alignment = 64; 423 ib_size_alignment = 64; 424 break; 425 case AMDGPU_HW_IP_VCN_DEC: 426 type = AMD_IP_BLOCK_TYPE_VCN; 427 for (i = 0; i < adev->vcn.num_vcn_inst; i++) { 428 if (adev->uvd.harvest_config & (1 << i)) 429 continue; 430 431 if (adev->vcn.inst[i].ring_dec.sched.ready) 432 ++num_rings; 433 } 434 ib_start_alignment = 16; 435 ib_size_alignment = 16; 436 break; 437 case AMDGPU_HW_IP_VCN_ENC: 438 type = AMD_IP_BLOCK_TYPE_VCN; 439 for (i = 0; i < adev->vcn.num_vcn_inst; i++) { 440 if (adev->uvd.harvest_config & (1 << i)) 441 continue; 442 443 for (j = 0; j < adev->vcn.num_enc_rings; j++) 444 if (adev->vcn.inst[i].ring_enc[j].sched.ready) 445 ++num_rings; 446 } 447 ib_start_alignment = 64; 448 ib_size_alignment = 1; 449 break; 450 case AMDGPU_HW_IP_VCN_JPEG: 451 type = (amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_JPEG)) ? 452 AMD_IP_BLOCK_TYPE_JPEG : AMD_IP_BLOCK_TYPE_VCN; 453 454 for (i = 0; i < adev->jpeg.num_jpeg_inst; i++) { 455 if (adev->jpeg.harvest_config & (1 << i)) 456 continue; 457 458 if (adev->jpeg.inst[i].ring_dec.sched.ready) 459 ++num_rings; 460 } 461 ib_start_alignment = 16; 462 ib_size_alignment = 16; 463 break; 464 default: 465 return -EINVAL; 466 } 467 468 for (i = 0; i < adev->num_ip_blocks; i++) 469 if (adev->ip_blocks[i].version->type == type && 470 adev->ip_blocks[i].status.valid) 471 break; 472 473 if (i == adev->num_ip_blocks) 474 return 0; 475 476 num_rings = min(amdgpu_ctx_num_entities[info->query_hw_ip.type], 477 num_rings); 478 479 result->hw_ip_version_major = adev->ip_blocks[i].version->major; 480 result->hw_ip_version_minor = adev->ip_blocks[i].version->minor; 481 result->capabilities_flags = 0; 482 result->available_rings = (1 << num_rings) - 1; 483 result->ib_start_alignment = ib_start_alignment; 484 result->ib_size_alignment = ib_size_alignment; 485 return 0; 486 } 487 488 /* 489 * Userspace get information ioctl 490 */ 491 /** 492 * amdgpu_info_ioctl - answer a device specific request. 493 * 494 * @dev: drm device pointer 495 * @data: request object 496 * @filp: drm filp 497 * 498 * This function is used to pass device specific parameters to the userspace 499 * drivers. Examples include: pci device id, pipeline parms, tiling params, 500 * etc. (all asics). 501 * Returns 0 on success, -EINVAL on failure. 502 */ 503 int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) 504 { 505 struct amdgpu_device *adev = drm_to_adev(dev); 506 struct drm_amdgpu_info *info = data; 507 struct amdgpu_mode_info *minfo = &adev->mode_info; 508 void __user *out = (void __user *)(uintptr_t)info->return_pointer; 509 uint32_t size = info->return_size; 510 struct drm_crtc *crtc; 511 uint32_t ui32 = 0; 512 uint64_t ui64 = 0; 513 int i, found; 514 int ui32_size = sizeof(ui32); 515 516 if (!info->return_size || !info->return_pointer) 517 return -EINVAL; 518 519 switch (info->query) { 520 case AMDGPU_INFO_ACCEL_WORKING: 521 ui32 = adev->accel_working; 522 return copy_to_user(out, &ui32, min(size, 4u)) ? -EFAULT : 0; 523 case AMDGPU_INFO_CRTC_FROM_ID: 524 for (i = 0, found = 0; i < adev->mode_info.num_crtc; i++) { 525 crtc = (struct drm_crtc *)minfo->crtcs[i]; 526 if (crtc && crtc->base.id == info->mode_crtc.id) { 527 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); 528 ui32 = amdgpu_crtc->crtc_id; 529 found = 1; 530 break; 531 } 532 } 533 if (!found) { 534 DRM_DEBUG_KMS("unknown crtc id %d\n", info->mode_crtc.id); 535 return -EINVAL; 536 } 537 return copy_to_user(out, &ui32, min(size, 4u)) ? -EFAULT : 0; 538 case AMDGPU_INFO_HW_IP_INFO: { 539 struct drm_amdgpu_info_hw_ip ip = {}; 540 int ret; 541 542 ret = amdgpu_hw_ip_info(adev, info, &ip); 543 if (ret) 544 return ret; 545 546 ret = copy_to_user(out, &ip, min((size_t)size, sizeof(ip))); 547 return ret ? -EFAULT : 0; 548 } 549 case AMDGPU_INFO_HW_IP_COUNT: { 550 enum amd_ip_block_type type; 551 uint32_t count = 0; 552 553 switch (info->query_hw_ip.type) { 554 case AMDGPU_HW_IP_GFX: 555 type = AMD_IP_BLOCK_TYPE_GFX; 556 break; 557 case AMDGPU_HW_IP_COMPUTE: 558 type = AMD_IP_BLOCK_TYPE_GFX; 559 break; 560 case AMDGPU_HW_IP_DMA: 561 type = AMD_IP_BLOCK_TYPE_SDMA; 562 break; 563 case AMDGPU_HW_IP_UVD: 564 type = AMD_IP_BLOCK_TYPE_UVD; 565 break; 566 case AMDGPU_HW_IP_VCE: 567 type = AMD_IP_BLOCK_TYPE_VCE; 568 break; 569 case AMDGPU_HW_IP_UVD_ENC: 570 type = AMD_IP_BLOCK_TYPE_UVD; 571 break; 572 case AMDGPU_HW_IP_VCN_DEC: 573 case AMDGPU_HW_IP_VCN_ENC: 574 type = AMD_IP_BLOCK_TYPE_VCN; 575 break; 576 case AMDGPU_HW_IP_VCN_JPEG: 577 type = (amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_JPEG)) ? 578 AMD_IP_BLOCK_TYPE_JPEG : AMD_IP_BLOCK_TYPE_VCN; 579 break; 580 default: 581 return -EINVAL; 582 } 583 584 for (i = 0; i < adev->num_ip_blocks; i++) 585 if (adev->ip_blocks[i].version->type == type && 586 adev->ip_blocks[i].status.valid && 587 count < AMDGPU_HW_IP_INSTANCE_MAX_COUNT) 588 count++; 589 590 return copy_to_user(out, &count, min(size, 4u)) ? -EFAULT : 0; 591 } 592 case AMDGPU_INFO_TIMESTAMP: 593 ui64 = amdgpu_gfx_get_gpu_clock_counter(adev); 594 return copy_to_user(out, &ui64, min(size, 8u)) ? -EFAULT : 0; 595 case AMDGPU_INFO_FW_VERSION: { 596 struct drm_amdgpu_info_firmware fw_info; 597 int ret; 598 599 /* We only support one instance of each IP block right now. */ 600 if (info->query_fw.ip_instance != 0) 601 return -EINVAL; 602 603 ret = amdgpu_firmware_info(&fw_info, &info->query_fw, adev); 604 if (ret) 605 return ret; 606 607 return copy_to_user(out, &fw_info, 608 min((size_t)size, sizeof(fw_info))) ? -EFAULT : 0; 609 } 610 case AMDGPU_INFO_NUM_BYTES_MOVED: 611 ui64 = atomic64_read(&adev->num_bytes_moved); 612 return copy_to_user(out, &ui64, min(size, 8u)) ? -EFAULT : 0; 613 case AMDGPU_INFO_NUM_EVICTIONS: 614 ui64 = atomic64_read(&adev->num_evictions); 615 return copy_to_user(out, &ui64, min(size, 8u)) ? -EFAULT : 0; 616 case AMDGPU_INFO_NUM_VRAM_CPU_PAGE_FAULTS: 617 ui64 = atomic64_read(&adev->num_vram_cpu_page_faults); 618 return copy_to_user(out, &ui64, min(size, 8u)) ? -EFAULT : 0; 619 case AMDGPU_INFO_VRAM_USAGE: 620 ui64 = amdgpu_vram_mgr_usage(ttm_manager_type(&adev->mman.bdev, TTM_PL_VRAM)); 621 return copy_to_user(out, &ui64, min(size, 8u)) ? -EFAULT : 0; 622 case AMDGPU_INFO_VIS_VRAM_USAGE: 623 ui64 = amdgpu_vram_mgr_vis_usage(ttm_manager_type(&adev->mman.bdev, TTM_PL_VRAM)); 624 return copy_to_user(out, &ui64, min(size, 8u)) ? -EFAULT : 0; 625 case AMDGPU_INFO_GTT_USAGE: 626 ui64 = amdgpu_gtt_mgr_usage(ttm_manager_type(&adev->mman.bdev, TTM_PL_TT)); 627 return copy_to_user(out, &ui64, min(size, 8u)) ? -EFAULT : 0; 628 case AMDGPU_INFO_GDS_CONFIG: { 629 struct drm_amdgpu_info_gds gds_info; 630 631 memset(&gds_info, 0, sizeof(gds_info)); 632 gds_info.compute_partition_size = adev->gds.gds_size; 633 gds_info.gds_total_size = adev->gds.gds_size; 634 gds_info.gws_per_compute_partition = adev->gds.gws_size; 635 gds_info.oa_per_compute_partition = adev->gds.oa_size; 636 return copy_to_user(out, &gds_info, 637 min((size_t)size, sizeof(gds_info))) ? -EFAULT : 0; 638 } 639 case AMDGPU_INFO_VRAM_GTT: { 640 struct drm_amdgpu_info_vram_gtt vram_gtt; 641 642 vram_gtt.vram_size = adev->gmc.real_vram_size - 643 atomic64_read(&adev->vram_pin_size) - 644 AMDGPU_VM_RESERVED_VRAM; 645 vram_gtt.vram_cpu_accessible_size = 646 min(adev->gmc.visible_vram_size - 647 atomic64_read(&adev->visible_pin_size), 648 vram_gtt.vram_size); 649 vram_gtt.gtt_size = ttm_manager_type(&adev->mman.bdev, TTM_PL_TT)->size; 650 vram_gtt.gtt_size *= PAGE_SIZE; 651 vram_gtt.gtt_size -= atomic64_read(&adev->gart_pin_size); 652 return copy_to_user(out, &vram_gtt, 653 min((size_t)size, sizeof(vram_gtt))) ? -EFAULT : 0; 654 } 655 case AMDGPU_INFO_MEMORY: { 656 struct drm_amdgpu_memory_info mem; 657 struct ttm_resource_manager *vram_man = 658 ttm_manager_type(&adev->mman.bdev, TTM_PL_VRAM); 659 struct ttm_resource_manager *gtt_man = 660 ttm_manager_type(&adev->mman.bdev, TTM_PL_TT); 661 memset(&mem, 0, sizeof(mem)); 662 mem.vram.total_heap_size = adev->gmc.real_vram_size; 663 mem.vram.usable_heap_size = adev->gmc.real_vram_size - 664 atomic64_read(&adev->vram_pin_size) - 665 AMDGPU_VM_RESERVED_VRAM; 666 mem.vram.heap_usage = 667 amdgpu_vram_mgr_usage(vram_man); 668 mem.vram.max_allocation = mem.vram.usable_heap_size * 3 / 4; 669 670 mem.cpu_accessible_vram.total_heap_size = 671 adev->gmc.visible_vram_size; 672 mem.cpu_accessible_vram.usable_heap_size = 673 min(adev->gmc.visible_vram_size - 674 atomic64_read(&adev->visible_pin_size), 675 mem.vram.usable_heap_size); 676 mem.cpu_accessible_vram.heap_usage = 677 amdgpu_vram_mgr_vis_usage(vram_man); 678 mem.cpu_accessible_vram.max_allocation = 679 mem.cpu_accessible_vram.usable_heap_size * 3 / 4; 680 681 mem.gtt.total_heap_size = gtt_man->size; 682 mem.gtt.total_heap_size *= PAGE_SIZE; 683 mem.gtt.usable_heap_size = mem.gtt.total_heap_size - 684 atomic64_read(&adev->gart_pin_size); 685 mem.gtt.heap_usage = 686 amdgpu_gtt_mgr_usage(gtt_man); 687 mem.gtt.max_allocation = mem.gtt.usable_heap_size * 3 / 4; 688 689 return copy_to_user(out, &mem, 690 min((size_t)size, sizeof(mem))) 691 ? -EFAULT : 0; 692 } 693 case AMDGPU_INFO_READ_MMR_REG: { 694 unsigned n, alloc_size; 695 uint32_t *regs; 696 unsigned se_num = (info->read_mmr_reg.instance >> 697 AMDGPU_INFO_MMR_SE_INDEX_SHIFT) & 698 AMDGPU_INFO_MMR_SE_INDEX_MASK; 699 unsigned sh_num = (info->read_mmr_reg.instance >> 700 AMDGPU_INFO_MMR_SH_INDEX_SHIFT) & 701 AMDGPU_INFO_MMR_SH_INDEX_MASK; 702 703 /* set full masks if the userspace set all bits 704 * in the bitfields */ 705 if (se_num == AMDGPU_INFO_MMR_SE_INDEX_MASK) 706 se_num = 0xffffffff; 707 else if (se_num >= AMDGPU_GFX_MAX_SE) 708 return -EINVAL; 709 if (sh_num == AMDGPU_INFO_MMR_SH_INDEX_MASK) 710 sh_num = 0xffffffff; 711 else if (sh_num >= AMDGPU_GFX_MAX_SH_PER_SE) 712 return -EINVAL; 713 714 if (info->read_mmr_reg.count > 128) 715 return -EINVAL; 716 717 regs = kmalloc_array(info->read_mmr_reg.count, sizeof(*regs), GFP_KERNEL); 718 if (!regs) 719 return -ENOMEM; 720 alloc_size = info->read_mmr_reg.count * sizeof(*regs); 721 722 amdgpu_gfx_off_ctrl(adev, false); 723 for (i = 0; i < info->read_mmr_reg.count; i++) { 724 if (amdgpu_asic_read_register(adev, se_num, sh_num, 725 info->read_mmr_reg.dword_offset + i, 726 ®s[i])) { 727 DRM_DEBUG_KMS("unallowed offset %#x\n", 728 info->read_mmr_reg.dword_offset + i); 729 kfree(regs); 730 amdgpu_gfx_off_ctrl(adev, true); 731 return -EFAULT; 732 } 733 } 734 amdgpu_gfx_off_ctrl(adev, true); 735 n = copy_to_user(out, regs, min(size, alloc_size)); 736 kfree(regs); 737 return n ? -EFAULT : 0; 738 } 739 case AMDGPU_INFO_DEV_INFO: { 740 struct drm_amdgpu_info_device *dev_info; 741 uint64_t vm_size; 742 int ret; 743 744 dev_info = kzalloc(sizeof(*dev_info), GFP_KERNEL); 745 if (!dev_info) 746 return -ENOMEM; 747 748 dev_info->device_id = adev->pdev->device; 749 dev_info->chip_rev = adev->rev_id; 750 dev_info->external_rev = adev->external_rev_id; 751 dev_info->pci_rev = adev->pdev->revision; 752 dev_info->family = adev->family; 753 dev_info->num_shader_engines = adev->gfx.config.max_shader_engines; 754 dev_info->num_shader_arrays_per_engine = adev->gfx.config.max_sh_per_se; 755 /* return all clocks in KHz */ 756 dev_info->gpu_counter_freq = amdgpu_asic_get_xclk(adev) * 10; 757 if (adev->pm.dpm_enabled) { 758 dev_info->max_engine_clock = amdgpu_dpm_get_sclk(adev, false) * 10; 759 dev_info->max_memory_clock = amdgpu_dpm_get_mclk(adev, false) * 10; 760 } else { 761 dev_info->max_engine_clock = adev->clock.default_sclk * 10; 762 dev_info->max_memory_clock = adev->clock.default_mclk * 10; 763 } 764 dev_info->enabled_rb_pipes_mask = adev->gfx.config.backend_enable_mask; 765 dev_info->num_rb_pipes = adev->gfx.config.max_backends_per_se * 766 adev->gfx.config.max_shader_engines; 767 dev_info->num_hw_gfx_contexts = adev->gfx.config.max_hw_contexts; 768 dev_info->_pad = 0; 769 dev_info->ids_flags = 0; 770 if (adev->flags & AMD_IS_APU) 771 dev_info->ids_flags |= AMDGPU_IDS_FLAGS_FUSION; 772 if (amdgpu_mcbp || amdgpu_sriov_vf(adev)) 773 dev_info->ids_flags |= AMDGPU_IDS_FLAGS_PREEMPTION; 774 if (amdgpu_is_tmz(adev)) 775 dev_info->ids_flags |= AMDGPU_IDS_FLAGS_TMZ; 776 777 vm_size = adev->vm_manager.max_pfn * AMDGPU_GPU_PAGE_SIZE; 778 vm_size -= AMDGPU_VA_RESERVED_SIZE; 779 780 /* Older VCE FW versions are buggy and can handle only 40bits */ 781 if (adev->vce.fw_version && 782 adev->vce.fw_version < AMDGPU_VCE_FW_53_45) 783 vm_size = min(vm_size, 1ULL << 40); 784 785 dev_info->virtual_address_offset = AMDGPU_VA_RESERVED_SIZE; 786 dev_info->virtual_address_max = 787 min(vm_size, AMDGPU_GMC_HOLE_START); 788 789 if (vm_size > AMDGPU_GMC_HOLE_START) { 790 dev_info->high_va_offset = AMDGPU_GMC_HOLE_END; 791 dev_info->high_va_max = AMDGPU_GMC_HOLE_END | vm_size; 792 } 793 dev_info->virtual_address_alignment = max_t(u32, PAGE_SIZE, AMDGPU_GPU_PAGE_SIZE); 794 dev_info->pte_fragment_size = (1 << adev->vm_manager.fragment_size) * AMDGPU_GPU_PAGE_SIZE; 795 dev_info->gart_page_size = max_t(u32, PAGE_SIZE, AMDGPU_GPU_PAGE_SIZE); 796 dev_info->cu_active_number = adev->gfx.cu_info.number; 797 dev_info->cu_ao_mask = adev->gfx.cu_info.ao_cu_mask; 798 dev_info->ce_ram_size = adev->gfx.ce_ram_size; 799 memcpy(&dev_info->cu_ao_bitmap[0], &adev->gfx.cu_info.ao_cu_bitmap[0], 800 sizeof(adev->gfx.cu_info.ao_cu_bitmap)); 801 memcpy(&dev_info->cu_bitmap[0], &adev->gfx.cu_info.bitmap[0], 802 sizeof(adev->gfx.cu_info.bitmap)); 803 dev_info->vram_type = adev->gmc.vram_type; 804 dev_info->vram_bit_width = adev->gmc.vram_width; 805 dev_info->vce_harvest_config = adev->vce.harvest_config; 806 dev_info->gc_double_offchip_lds_buf = 807 adev->gfx.config.double_offchip_lds_buf; 808 dev_info->wave_front_size = adev->gfx.cu_info.wave_front_size; 809 dev_info->num_shader_visible_vgprs = adev->gfx.config.max_gprs; 810 dev_info->num_cu_per_sh = adev->gfx.config.max_cu_per_sh; 811 dev_info->num_tcc_blocks = adev->gfx.config.max_texture_channel_caches; 812 dev_info->gs_vgt_table_depth = adev->gfx.config.gs_vgt_table_depth; 813 dev_info->gs_prim_buffer_depth = adev->gfx.config.gs_prim_buffer_depth; 814 dev_info->max_gs_waves_per_vgt = adev->gfx.config.max_gs_threads; 815 816 if (adev->family >= AMDGPU_FAMILY_NV) 817 dev_info->pa_sc_tile_steering_override = 818 adev->gfx.config.pa_sc_tile_steering_override; 819 820 dev_info->tcc_disabled_mask = adev->gfx.config.tcc_disabled_mask; 821 822 ret = copy_to_user(out, dev_info, 823 min((size_t)size, sizeof(*dev_info))) ? -EFAULT : 0; 824 kfree(dev_info); 825 return ret; 826 } 827 case AMDGPU_INFO_VCE_CLOCK_TABLE: { 828 unsigned i; 829 struct drm_amdgpu_info_vce_clock_table vce_clk_table = {}; 830 struct amd_vce_state *vce_state; 831 832 for (i = 0; i < AMDGPU_VCE_CLOCK_TABLE_ENTRIES; i++) { 833 vce_state = amdgpu_dpm_get_vce_clock_state(adev, i); 834 if (vce_state) { 835 vce_clk_table.entries[i].sclk = vce_state->sclk; 836 vce_clk_table.entries[i].mclk = vce_state->mclk; 837 vce_clk_table.entries[i].eclk = vce_state->evclk; 838 vce_clk_table.num_valid_entries++; 839 } 840 } 841 842 return copy_to_user(out, &vce_clk_table, 843 min((size_t)size, sizeof(vce_clk_table))) ? -EFAULT : 0; 844 } 845 case AMDGPU_INFO_VBIOS: { 846 uint32_t bios_size = adev->bios_size; 847 848 switch (info->vbios_info.type) { 849 case AMDGPU_INFO_VBIOS_SIZE: 850 return copy_to_user(out, &bios_size, 851 min((size_t)size, sizeof(bios_size))) 852 ? -EFAULT : 0; 853 case AMDGPU_INFO_VBIOS_IMAGE: { 854 uint8_t *bios; 855 uint32_t bios_offset = info->vbios_info.offset; 856 857 if (bios_offset >= bios_size) 858 return -EINVAL; 859 860 bios = adev->bios + bios_offset; 861 return copy_to_user(out, bios, 862 min((size_t)size, (size_t)(bios_size - bios_offset))) 863 ? -EFAULT : 0; 864 } 865 default: 866 DRM_DEBUG_KMS("Invalid request %d\n", 867 info->vbios_info.type); 868 return -EINVAL; 869 } 870 } 871 case AMDGPU_INFO_NUM_HANDLES: { 872 struct drm_amdgpu_info_num_handles handle; 873 874 switch (info->query_hw_ip.type) { 875 case AMDGPU_HW_IP_UVD: 876 /* Starting Polaris, we support unlimited UVD handles */ 877 if (adev->asic_type < CHIP_POLARIS10) { 878 handle.uvd_max_handles = adev->uvd.max_handles; 879 handle.uvd_used_handles = amdgpu_uvd_used_handles(adev); 880 881 return copy_to_user(out, &handle, 882 min((size_t)size, sizeof(handle))) ? -EFAULT : 0; 883 } else { 884 return -ENODATA; 885 } 886 887 break; 888 default: 889 return -EINVAL; 890 } 891 } 892 case AMDGPU_INFO_SENSOR: { 893 if (!adev->pm.dpm_enabled) 894 return -ENOENT; 895 896 switch (info->sensor_info.type) { 897 case AMDGPU_INFO_SENSOR_GFX_SCLK: 898 /* get sclk in Mhz */ 899 if (amdgpu_dpm_read_sensor(adev, 900 AMDGPU_PP_SENSOR_GFX_SCLK, 901 (void *)&ui32, &ui32_size)) { 902 return -EINVAL; 903 } 904 ui32 /= 100; 905 break; 906 case AMDGPU_INFO_SENSOR_GFX_MCLK: 907 /* get mclk in Mhz */ 908 if (amdgpu_dpm_read_sensor(adev, 909 AMDGPU_PP_SENSOR_GFX_MCLK, 910 (void *)&ui32, &ui32_size)) { 911 return -EINVAL; 912 } 913 ui32 /= 100; 914 break; 915 case AMDGPU_INFO_SENSOR_GPU_TEMP: 916 /* get temperature in millidegrees C */ 917 if (amdgpu_dpm_read_sensor(adev, 918 AMDGPU_PP_SENSOR_GPU_TEMP, 919 (void *)&ui32, &ui32_size)) { 920 return -EINVAL; 921 } 922 break; 923 case AMDGPU_INFO_SENSOR_GPU_LOAD: 924 /* get GPU load */ 925 if (amdgpu_dpm_read_sensor(adev, 926 AMDGPU_PP_SENSOR_GPU_LOAD, 927 (void *)&ui32, &ui32_size)) { 928 return -EINVAL; 929 } 930 break; 931 case AMDGPU_INFO_SENSOR_GPU_AVG_POWER: 932 /* get average GPU power */ 933 if (amdgpu_dpm_read_sensor(adev, 934 AMDGPU_PP_SENSOR_GPU_POWER, 935 (void *)&ui32, &ui32_size)) { 936 return -EINVAL; 937 } 938 ui32 >>= 8; 939 break; 940 case AMDGPU_INFO_SENSOR_VDDNB: 941 /* get VDDNB in millivolts */ 942 if (amdgpu_dpm_read_sensor(adev, 943 AMDGPU_PP_SENSOR_VDDNB, 944 (void *)&ui32, &ui32_size)) { 945 return -EINVAL; 946 } 947 break; 948 case AMDGPU_INFO_SENSOR_VDDGFX: 949 /* get VDDGFX in millivolts */ 950 if (amdgpu_dpm_read_sensor(adev, 951 AMDGPU_PP_SENSOR_VDDGFX, 952 (void *)&ui32, &ui32_size)) { 953 return -EINVAL; 954 } 955 break; 956 case AMDGPU_INFO_SENSOR_STABLE_PSTATE_GFX_SCLK: 957 /* get stable pstate sclk in Mhz */ 958 if (amdgpu_dpm_read_sensor(adev, 959 AMDGPU_PP_SENSOR_STABLE_PSTATE_SCLK, 960 (void *)&ui32, &ui32_size)) { 961 return -EINVAL; 962 } 963 ui32 /= 100; 964 break; 965 case AMDGPU_INFO_SENSOR_STABLE_PSTATE_GFX_MCLK: 966 /* get stable pstate mclk in Mhz */ 967 if (amdgpu_dpm_read_sensor(adev, 968 AMDGPU_PP_SENSOR_STABLE_PSTATE_MCLK, 969 (void *)&ui32, &ui32_size)) { 970 return -EINVAL; 971 } 972 ui32 /= 100; 973 break; 974 default: 975 DRM_DEBUG_KMS("Invalid request %d\n", 976 info->sensor_info.type); 977 return -EINVAL; 978 } 979 return copy_to_user(out, &ui32, min(size, 4u)) ? -EFAULT : 0; 980 } 981 case AMDGPU_INFO_VRAM_LOST_COUNTER: 982 ui32 = atomic_read(&adev->vram_lost_counter); 983 return copy_to_user(out, &ui32, min(size, 4u)) ? -EFAULT : 0; 984 case AMDGPU_INFO_RAS_ENABLED_FEATURES: { 985 struct amdgpu_ras *ras = amdgpu_ras_get_context(adev); 986 uint64_t ras_mask; 987 988 if (!ras) 989 return -EINVAL; 990 ras_mask = (uint64_t)adev->ras_enabled << 32 | ras->features; 991 992 return copy_to_user(out, &ras_mask, 993 min_t(u64, size, sizeof(ras_mask))) ? 994 -EFAULT : 0; 995 } 996 case AMDGPU_INFO_VIDEO_CAPS: { 997 const struct amdgpu_video_codecs *codecs; 998 struct drm_amdgpu_info_video_caps *caps; 999 int r; 1000 1001 switch (info->video_cap.type) { 1002 case AMDGPU_INFO_VIDEO_CAPS_DECODE: 1003 r = amdgpu_asic_query_video_codecs(adev, false, &codecs); 1004 if (r) 1005 return -EINVAL; 1006 break; 1007 case AMDGPU_INFO_VIDEO_CAPS_ENCODE: 1008 r = amdgpu_asic_query_video_codecs(adev, true, &codecs); 1009 if (r) 1010 return -EINVAL; 1011 break; 1012 default: 1013 DRM_DEBUG_KMS("Invalid request %d\n", 1014 info->video_cap.type); 1015 return -EINVAL; 1016 } 1017 1018 caps = kzalloc(sizeof(*caps), GFP_KERNEL); 1019 if (!caps) 1020 return -ENOMEM; 1021 1022 for (i = 0; i < codecs->codec_count; i++) { 1023 int idx = codecs->codec_array[i].codec_type; 1024 1025 switch (idx) { 1026 case AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG2: 1027 case AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4: 1028 case AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VC1: 1029 case AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC: 1030 case AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC: 1031 case AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_JPEG: 1032 case AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VP9: 1033 case AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_AV1: 1034 caps->codec_info[idx].valid = 1; 1035 caps->codec_info[idx].max_width = 1036 codecs->codec_array[i].max_width; 1037 caps->codec_info[idx].max_height = 1038 codecs->codec_array[i].max_height; 1039 caps->codec_info[idx].max_pixels_per_frame = 1040 codecs->codec_array[i].max_pixels_per_frame; 1041 caps->codec_info[idx].max_level = 1042 codecs->codec_array[i].max_level; 1043 break; 1044 default: 1045 break; 1046 } 1047 } 1048 r = copy_to_user(out, caps, 1049 min((size_t)size, sizeof(*caps))) ? -EFAULT : 0; 1050 kfree(caps); 1051 return r; 1052 } 1053 default: 1054 DRM_DEBUG_KMS("Invalid request %d\n", info->query); 1055 return -EINVAL; 1056 } 1057 return 0; 1058 } 1059 1060 1061 /* 1062 * Outdated mess for old drm with Xorg being in charge (void function now). 1063 */ 1064 /** 1065 * amdgpu_driver_lastclose_kms - drm callback for last close 1066 * 1067 * @dev: drm dev pointer 1068 * 1069 * Switch vga_switcheroo state after last close (all asics). 1070 */ 1071 void amdgpu_driver_lastclose_kms(struct drm_device *dev) 1072 { 1073 drm_fb_helper_lastclose(dev); 1074 vga_switcheroo_process_delayed_switch(); 1075 } 1076 1077 /** 1078 * amdgpu_driver_open_kms - drm callback for open 1079 * 1080 * @dev: drm dev pointer 1081 * @file_priv: drm file 1082 * 1083 * On device open, init vm on cayman+ (all asics). 1084 * Returns 0 on success, error on failure. 1085 */ 1086 int amdgpu_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv) 1087 { 1088 struct amdgpu_device *adev = drm_to_adev(dev); 1089 struct amdgpu_fpriv *fpriv; 1090 int r, pasid; 1091 1092 /* Ensure IB tests are run on ring */ 1093 flush_delayed_work(&adev->delayed_init_work); 1094 1095 1096 if (amdgpu_ras_intr_triggered()) { 1097 DRM_ERROR("RAS Intr triggered, device disabled!!"); 1098 return -EHWPOISON; 1099 } 1100 1101 file_priv->driver_priv = NULL; 1102 1103 r = pm_runtime_get_sync(dev->dev); 1104 if (r < 0) 1105 goto pm_put; 1106 1107 fpriv = kzalloc(sizeof(*fpriv), GFP_KERNEL); 1108 if (unlikely(!fpriv)) { 1109 r = -ENOMEM; 1110 goto out_suspend; 1111 } 1112 1113 pasid = amdgpu_pasid_alloc(16); 1114 if (pasid < 0) { 1115 dev_warn(adev->dev, "No more PASIDs available!"); 1116 pasid = 0; 1117 } 1118 1119 r = amdgpu_vm_init(adev, &fpriv->vm, pasid); 1120 if (r) 1121 goto error_pasid; 1122 1123 fpriv->prt_va = amdgpu_vm_bo_add(adev, &fpriv->vm, NULL); 1124 if (!fpriv->prt_va) { 1125 r = -ENOMEM; 1126 goto error_vm; 1127 } 1128 1129 if (amdgpu_mcbp || amdgpu_sriov_vf(adev)) { 1130 uint64_t csa_addr = amdgpu_csa_vaddr(adev) & AMDGPU_GMC_HOLE_MASK; 1131 1132 r = amdgpu_map_static_csa(adev, &fpriv->vm, adev->virt.csa_obj, 1133 &fpriv->csa_va, csa_addr, AMDGPU_CSA_SIZE); 1134 if (r) 1135 goto error_vm; 1136 } 1137 1138 mutex_init(&fpriv->bo_list_lock); 1139 idr_init(&fpriv->bo_list_handles); 1140 1141 amdgpu_ctx_mgr_init(&fpriv->ctx_mgr); 1142 1143 file_priv->driver_priv = fpriv; 1144 goto out_suspend; 1145 1146 error_vm: 1147 amdgpu_vm_fini(adev, &fpriv->vm); 1148 1149 error_pasid: 1150 if (pasid) 1151 amdgpu_pasid_free(pasid); 1152 1153 kfree(fpriv); 1154 1155 out_suspend: 1156 pm_runtime_mark_last_busy(dev->dev); 1157 pm_put: 1158 pm_runtime_put_autosuspend(dev->dev); 1159 1160 return r; 1161 } 1162 1163 /** 1164 * amdgpu_driver_postclose_kms - drm callback for post close 1165 * 1166 * @dev: drm dev pointer 1167 * @file_priv: drm file 1168 * 1169 * On device post close, tear down vm on cayman+ (all asics). 1170 */ 1171 void amdgpu_driver_postclose_kms(struct drm_device *dev, 1172 struct drm_file *file_priv) 1173 { 1174 struct amdgpu_device *adev = drm_to_adev(dev); 1175 struct amdgpu_fpriv *fpriv = file_priv->driver_priv; 1176 struct amdgpu_bo_list *list; 1177 struct amdgpu_bo *pd; 1178 u32 pasid; 1179 int handle; 1180 1181 if (!fpriv) 1182 return; 1183 1184 pm_runtime_get_sync(dev->dev); 1185 1186 if (amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_UVD) != NULL) 1187 amdgpu_uvd_free_handles(adev, file_priv); 1188 if (amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_VCE) != NULL) 1189 amdgpu_vce_free_handles(adev, file_priv); 1190 1191 amdgpu_vm_bo_rmv(adev, fpriv->prt_va); 1192 1193 if (amdgpu_mcbp || amdgpu_sriov_vf(adev)) { 1194 /* TODO: how to handle reserve failure */ 1195 BUG_ON(amdgpu_bo_reserve(adev->virt.csa_obj, true)); 1196 amdgpu_vm_bo_rmv(adev, fpriv->csa_va); 1197 fpriv->csa_va = NULL; 1198 amdgpu_bo_unreserve(adev->virt.csa_obj); 1199 } 1200 1201 pasid = fpriv->vm.pasid; 1202 pd = amdgpu_bo_ref(fpriv->vm.root.base.bo); 1203 1204 amdgpu_ctx_mgr_fini(&fpriv->ctx_mgr); 1205 amdgpu_vm_fini(adev, &fpriv->vm); 1206 1207 if (pasid) 1208 amdgpu_pasid_free_delayed(pd->tbo.base.resv, pasid); 1209 amdgpu_bo_unref(&pd); 1210 1211 idr_for_each_entry(&fpriv->bo_list_handles, list, handle) 1212 amdgpu_bo_list_put(list); 1213 1214 idr_destroy(&fpriv->bo_list_handles); 1215 mutex_destroy(&fpriv->bo_list_lock); 1216 1217 kfree(fpriv); 1218 file_priv->driver_priv = NULL; 1219 1220 pm_runtime_mark_last_busy(dev->dev); 1221 pm_runtime_put_autosuspend(dev->dev); 1222 } 1223 1224 1225 void amdgpu_driver_release_kms(struct drm_device *dev) 1226 { 1227 struct amdgpu_device *adev = drm_to_adev(dev); 1228 1229 amdgpu_device_fini_sw(adev); 1230 pci_set_drvdata(adev->pdev, NULL); 1231 } 1232 1233 /* 1234 * VBlank related functions. 1235 */ 1236 /** 1237 * amdgpu_get_vblank_counter_kms - get frame count 1238 * 1239 * @crtc: crtc to get the frame count from 1240 * 1241 * Gets the frame count on the requested crtc (all asics). 1242 * Returns frame count on success, -EINVAL on failure. 1243 */ 1244 u32 amdgpu_get_vblank_counter_kms(struct drm_crtc *crtc) 1245 { 1246 struct drm_device *dev = crtc->dev; 1247 unsigned int pipe = crtc->index; 1248 struct amdgpu_device *adev = drm_to_adev(dev); 1249 int vpos, hpos, stat; 1250 u32 count; 1251 1252 if (pipe >= adev->mode_info.num_crtc) { 1253 DRM_ERROR("Invalid crtc %u\n", pipe); 1254 return -EINVAL; 1255 } 1256 1257 /* The hw increments its frame counter at start of vsync, not at start 1258 * of vblank, as is required by DRM core vblank counter handling. 1259 * Cook the hw count here to make it appear to the caller as if it 1260 * incremented at start of vblank. We measure distance to start of 1261 * vblank in vpos. vpos therefore will be >= 0 between start of vblank 1262 * and start of vsync, so vpos >= 0 means to bump the hw frame counter 1263 * result by 1 to give the proper appearance to caller. 1264 */ 1265 if (adev->mode_info.crtcs[pipe]) { 1266 /* Repeat readout if needed to provide stable result if 1267 * we cross start of vsync during the queries. 1268 */ 1269 do { 1270 count = amdgpu_display_vblank_get_counter(adev, pipe); 1271 /* Ask amdgpu_display_get_crtc_scanoutpos to return 1272 * vpos as distance to start of vblank, instead of 1273 * regular vertical scanout pos. 1274 */ 1275 stat = amdgpu_display_get_crtc_scanoutpos( 1276 dev, pipe, GET_DISTANCE_TO_VBLANKSTART, 1277 &vpos, &hpos, NULL, NULL, 1278 &adev->mode_info.crtcs[pipe]->base.hwmode); 1279 } while (count != amdgpu_display_vblank_get_counter(adev, pipe)); 1280 1281 if (((stat & (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_ACCURATE)) != 1282 (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_ACCURATE))) { 1283 DRM_DEBUG_VBL("Query failed! stat %d\n", stat); 1284 } else { 1285 DRM_DEBUG_VBL("crtc %d: dist from vblank start %d\n", 1286 pipe, vpos); 1287 1288 /* Bump counter if we are at >= leading edge of vblank, 1289 * but before vsync where vpos would turn negative and 1290 * the hw counter really increments. 1291 */ 1292 if (vpos >= 0) 1293 count++; 1294 } 1295 } else { 1296 /* Fallback to use value as is. */ 1297 count = amdgpu_display_vblank_get_counter(adev, pipe); 1298 DRM_DEBUG_VBL("NULL mode info! Returned count may be wrong.\n"); 1299 } 1300 1301 return count; 1302 } 1303 1304 /** 1305 * amdgpu_enable_vblank_kms - enable vblank interrupt 1306 * 1307 * @crtc: crtc to enable vblank interrupt for 1308 * 1309 * Enable the interrupt on the requested crtc (all asics). 1310 * Returns 0 on success, -EINVAL on failure. 1311 */ 1312 int amdgpu_enable_vblank_kms(struct drm_crtc *crtc) 1313 { 1314 struct drm_device *dev = crtc->dev; 1315 unsigned int pipe = crtc->index; 1316 struct amdgpu_device *adev = drm_to_adev(dev); 1317 int idx = amdgpu_display_crtc_idx_to_irq_type(adev, pipe); 1318 1319 return amdgpu_irq_get(adev, &adev->crtc_irq, idx); 1320 } 1321 1322 /** 1323 * amdgpu_disable_vblank_kms - disable vblank interrupt 1324 * 1325 * @crtc: crtc to disable vblank interrupt for 1326 * 1327 * Disable the interrupt on the requested crtc (all asics). 1328 */ 1329 void amdgpu_disable_vblank_kms(struct drm_crtc *crtc) 1330 { 1331 struct drm_device *dev = crtc->dev; 1332 unsigned int pipe = crtc->index; 1333 struct amdgpu_device *adev = drm_to_adev(dev); 1334 int idx = amdgpu_display_crtc_idx_to_irq_type(adev, pipe); 1335 1336 amdgpu_irq_put(adev, &adev->crtc_irq, idx); 1337 } 1338 1339 /* 1340 * Debugfs info 1341 */ 1342 #if defined(CONFIG_DEBUG_FS) 1343 1344 static int amdgpu_debugfs_firmware_info_show(struct seq_file *m, void *unused) 1345 { 1346 struct amdgpu_device *adev = (struct amdgpu_device *)m->private; 1347 struct drm_amdgpu_info_firmware fw_info; 1348 struct drm_amdgpu_query_fw query_fw; 1349 struct atom_context *ctx = adev->mode_info.atom_context; 1350 int ret, i; 1351 1352 static const char *ta_fw_name[TA_FW_TYPE_MAX_INDEX] = { 1353 #define TA_FW_NAME(type) [TA_FW_TYPE_PSP_##type] = #type 1354 TA_FW_NAME(XGMI), 1355 TA_FW_NAME(RAS), 1356 TA_FW_NAME(HDCP), 1357 TA_FW_NAME(DTM), 1358 TA_FW_NAME(RAP), 1359 TA_FW_NAME(SECUREDISPLAY), 1360 #undef TA_FW_NAME 1361 }; 1362 1363 /* VCE */ 1364 query_fw.fw_type = AMDGPU_INFO_FW_VCE; 1365 ret = amdgpu_firmware_info(&fw_info, &query_fw, adev); 1366 if (ret) 1367 return ret; 1368 seq_printf(m, "VCE feature version: %u, firmware version: 0x%08x\n", 1369 fw_info.feature, fw_info.ver); 1370 1371 /* UVD */ 1372 query_fw.fw_type = AMDGPU_INFO_FW_UVD; 1373 ret = amdgpu_firmware_info(&fw_info, &query_fw, adev); 1374 if (ret) 1375 return ret; 1376 seq_printf(m, "UVD feature version: %u, firmware version: 0x%08x\n", 1377 fw_info.feature, fw_info.ver); 1378 1379 /* GMC */ 1380 query_fw.fw_type = AMDGPU_INFO_FW_GMC; 1381 ret = amdgpu_firmware_info(&fw_info, &query_fw, adev); 1382 if (ret) 1383 return ret; 1384 seq_printf(m, "MC feature version: %u, firmware version: 0x%08x\n", 1385 fw_info.feature, fw_info.ver); 1386 1387 /* ME */ 1388 query_fw.fw_type = AMDGPU_INFO_FW_GFX_ME; 1389 ret = amdgpu_firmware_info(&fw_info, &query_fw, adev); 1390 if (ret) 1391 return ret; 1392 seq_printf(m, "ME feature version: %u, firmware version: 0x%08x\n", 1393 fw_info.feature, fw_info.ver); 1394 1395 /* PFP */ 1396 query_fw.fw_type = AMDGPU_INFO_FW_GFX_PFP; 1397 ret = amdgpu_firmware_info(&fw_info, &query_fw, adev); 1398 if (ret) 1399 return ret; 1400 seq_printf(m, "PFP feature version: %u, firmware version: 0x%08x\n", 1401 fw_info.feature, fw_info.ver); 1402 1403 /* CE */ 1404 query_fw.fw_type = AMDGPU_INFO_FW_GFX_CE; 1405 ret = amdgpu_firmware_info(&fw_info, &query_fw, adev); 1406 if (ret) 1407 return ret; 1408 seq_printf(m, "CE feature version: %u, firmware version: 0x%08x\n", 1409 fw_info.feature, fw_info.ver); 1410 1411 /* RLC */ 1412 query_fw.fw_type = AMDGPU_INFO_FW_GFX_RLC; 1413 ret = amdgpu_firmware_info(&fw_info, &query_fw, adev); 1414 if (ret) 1415 return ret; 1416 seq_printf(m, "RLC feature version: %u, firmware version: 0x%08x\n", 1417 fw_info.feature, fw_info.ver); 1418 1419 /* RLC SAVE RESTORE LIST CNTL */ 1420 query_fw.fw_type = AMDGPU_INFO_FW_GFX_RLC_RESTORE_LIST_CNTL; 1421 ret = amdgpu_firmware_info(&fw_info, &query_fw, adev); 1422 if (ret) 1423 return ret; 1424 seq_printf(m, "RLC SRLC feature version: %u, firmware version: 0x%08x\n", 1425 fw_info.feature, fw_info.ver); 1426 1427 /* RLC SAVE RESTORE LIST GPM MEM */ 1428 query_fw.fw_type = AMDGPU_INFO_FW_GFX_RLC_RESTORE_LIST_GPM_MEM; 1429 ret = amdgpu_firmware_info(&fw_info, &query_fw, adev); 1430 if (ret) 1431 return ret; 1432 seq_printf(m, "RLC SRLG feature version: %u, firmware version: 0x%08x\n", 1433 fw_info.feature, fw_info.ver); 1434 1435 /* RLC SAVE RESTORE LIST SRM MEM */ 1436 query_fw.fw_type = AMDGPU_INFO_FW_GFX_RLC_RESTORE_LIST_SRM_MEM; 1437 ret = amdgpu_firmware_info(&fw_info, &query_fw, adev); 1438 if (ret) 1439 return ret; 1440 seq_printf(m, "RLC SRLS feature version: %u, firmware version: 0x%08x\n", 1441 fw_info.feature, fw_info.ver); 1442 1443 /* MEC */ 1444 query_fw.fw_type = AMDGPU_INFO_FW_GFX_MEC; 1445 query_fw.index = 0; 1446 ret = amdgpu_firmware_info(&fw_info, &query_fw, adev); 1447 if (ret) 1448 return ret; 1449 seq_printf(m, "MEC feature version: %u, firmware version: 0x%08x\n", 1450 fw_info.feature, fw_info.ver); 1451 1452 /* MEC2 */ 1453 if (adev->gfx.mec2_fw) { 1454 query_fw.index = 1; 1455 ret = amdgpu_firmware_info(&fw_info, &query_fw, adev); 1456 if (ret) 1457 return ret; 1458 seq_printf(m, "MEC2 feature version: %u, firmware version: 0x%08x\n", 1459 fw_info.feature, fw_info.ver); 1460 } 1461 1462 /* PSP SOS */ 1463 query_fw.fw_type = AMDGPU_INFO_FW_SOS; 1464 ret = amdgpu_firmware_info(&fw_info, &query_fw, adev); 1465 if (ret) 1466 return ret; 1467 seq_printf(m, "SOS feature version: %u, firmware version: 0x%08x\n", 1468 fw_info.feature, fw_info.ver); 1469 1470 1471 /* PSP ASD */ 1472 query_fw.fw_type = AMDGPU_INFO_FW_ASD; 1473 ret = amdgpu_firmware_info(&fw_info, &query_fw, adev); 1474 if (ret) 1475 return ret; 1476 seq_printf(m, "ASD feature version: %u, firmware version: 0x%08x\n", 1477 fw_info.feature, fw_info.ver); 1478 1479 query_fw.fw_type = AMDGPU_INFO_FW_TA; 1480 for (i = TA_FW_TYPE_PSP_XGMI; i < TA_FW_TYPE_MAX_INDEX; i++) { 1481 query_fw.index = i; 1482 ret = amdgpu_firmware_info(&fw_info, &query_fw, adev); 1483 if (ret) 1484 continue; 1485 1486 seq_printf(m, "TA %s feature version: 0x%08x, firmware version: 0x%08x\n", 1487 ta_fw_name[i], fw_info.feature, fw_info.ver); 1488 } 1489 1490 /* SMC */ 1491 query_fw.fw_type = AMDGPU_INFO_FW_SMC; 1492 ret = amdgpu_firmware_info(&fw_info, &query_fw, adev); 1493 if (ret) 1494 return ret; 1495 seq_printf(m, "SMC feature version: %u, firmware version: 0x%08x\n", 1496 fw_info.feature, fw_info.ver); 1497 1498 /* SDMA */ 1499 query_fw.fw_type = AMDGPU_INFO_FW_SDMA; 1500 for (i = 0; i < adev->sdma.num_instances; i++) { 1501 query_fw.index = i; 1502 ret = amdgpu_firmware_info(&fw_info, &query_fw, adev); 1503 if (ret) 1504 return ret; 1505 seq_printf(m, "SDMA%d feature version: %u, firmware version: 0x%08x\n", 1506 i, fw_info.feature, fw_info.ver); 1507 } 1508 1509 /* VCN */ 1510 query_fw.fw_type = AMDGPU_INFO_FW_VCN; 1511 ret = amdgpu_firmware_info(&fw_info, &query_fw, adev); 1512 if (ret) 1513 return ret; 1514 seq_printf(m, "VCN feature version: %u, firmware version: 0x%08x\n", 1515 fw_info.feature, fw_info.ver); 1516 1517 /* DMCU */ 1518 query_fw.fw_type = AMDGPU_INFO_FW_DMCU; 1519 ret = amdgpu_firmware_info(&fw_info, &query_fw, adev); 1520 if (ret) 1521 return ret; 1522 seq_printf(m, "DMCU feature version: %u, firmware version: 0x%08x\n", 1523 fw_info.feature, fw_info.ver); 1524 1525 /* DMCUB */ 1526 query_fw.fw_type = AMDGPU_INFO_FW_DMCUB; 1527 ret = amdgpu_firmware_info(&fw_info, &query_fw, adev); 1528 if (ret) 1529 return ret; 1530 seq_printf(m, "DMCUB feature version: %u, firmware version: 0x%08x\n", 1531 fw_info.feature, fw_info.ver); 1532 1533 /* TOC */ 1534 query_fw.fw_type = AMDGPU_INFO_FW_TOC; 1535 ret = amdgpu_firmware_info(&fw_info, &query_fw, adev); 1536 if (ret) 1537 return ret; 1538 seq_printf(m, "TOC feature version: %u, firmware version: 0x%08x\n", 1539 fw_info.feature, fw_info.ver); 1540 1541 seq_printf(m, "VBIOS version: %s\n", ctx->vbios_version); 1542 1543 return 0; 1544 } 1545 1546 DEFINE_SHOW_ATTRIBUTE(amdgpu_debugfs_firmware_info); 1547 1548 #endif 1549 1550 void amdgpu_debugfs_firmware_init(struct amdgpu_device *adev) 1551 { 1552 #if defined(CONFIG_DEBUG_FS) 1553 struct drm_minor *minor = adev_to_drm(adev)->primary; 1554 struct dentry *root = minor->debugfs_root; 1555 1556 debugfs_create_file("amdgpu_firmware_info", 0444, root, 1557 adev, &amdgpu_debugfs_firmware_info_fops); 1558 1559 #endif 1560 } 1561