1 /* 2 * Copyright 2008 Advanced Micro Devices, Inc. 3 * Copyright 2008 Red Hat Inc. 4 * Copyright 2009 Jerome Glisse. 5 * 6 * Permission is hereby granted, free of charge, to any person obtaining a 7 * copy of this software and associated documentation files (the "Software"), 8 * to deal in the Software without restriction, including without limitation 9 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 10 * and/or sell copies of the Software, and to permit persons to whom the 11 * Software is furnished to do so, subject to the following conditions: 12 * 13 * The above copyright notice and this permission notice shall be included in 14 * all copies or substantial portions of the Software. 15 * 16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 22 * OTHER DEALINGS IN THE SOFTWARE. 23 * 24 */ 25 26 #include <linux/kthread.h> 27 #include <linux/pci.h> 28 #include <linux/uaccess.h> 29 #include <linux/pm_runtime.h> 30 31 #include "amdgpu.h" 32 #include "amdgpu_pm.h" 33 #include "amdgpu_dm_debugfs.h" 34 #include "amdgpu_ras.h" 35 #include "amdgpu_rap.h" 36 #include "amdgpu_securedisplay.h" 37 #include "amdgpu_fw_attestation.h" 38 #include "amdgpu_umr.h" 39 40 #include "amdgpu_reset.h" 41 #include "amdgpu_psp_ta.h" 42 43 #if defined(CONFIG_DEBUG_FS) 44 45 /** 46 * amdgpu_debugfs_process_reg_op - Handle MMIO register reads/writes 47 * 48 * @read: True if reading 49 * @f: open file handle 50 * @buf: User buffer to write/read to 51 * @size: Number of bytes to write/read 52 * @pos: Offset to seek to 53 * 54 * This debugfs entry has special meaning on the offset being sought. 55 * Various bits have different meanings: 56 * 57 * Bit 62: Indicates a GRBM bank switch is needed 58 * Bit 61: Indicates a SRBM bank switch is needed (implies bit 62 is 59 * zero) 60 * Bits 24..33: The SE or ME selector if needed 61 * Bits 34..43: The SH (or SA) or PIPE selector if needed 62 * Bits 44..53: The INSTANCE (or CU/WGP) or QUEUE selector if needed 63 * 64 * Bit 23: Indicates that the PM power gating lock should be held 65 * This is necessary to read registers that might be 66 * unreliable during a power gating transistion. 67 * 68 * The lower bits are the BYTE offset of the register to read. This 69 * allows reading multiple registers in a single call and having 70 * the returned size reflect that. 71 */ 72 static int amdgpu_debugfs_process_reg_op(bool read, struct file *f, 73 char __user *buf, size_t size, loff_t *pos) 74 { 75 struct amdgpu_device *adev = file_inode(f)->i_private; 76 ssize_t result = 0; 77 int r; 78 bool pm_pg_lock, use_bank, use_ring; 79 unsigned int instance_bank, sh_bank, se_bank, me, pipe, queue, vmid; 80 81 pm_pg_lock = use_bank = use_ring = false; 82 instance_bank = sh_bank = se_bank = me = pipe = queue = vmid = 0; 83 84 if (size & 0x3 || *pos & 0x3 || 85 ((*pos & (1ULL << 62)) && (*pos & (1ULL << 61)))) 86 return -EINVAL; 87 88 /* are we reading registers for which a PG lock is necessary? */ 89 pm_pg_lock = (*pos >> 23) & 1; 90 91 if (*pos & (1ULL << 62)) { 92 se_bank = (*pos & GENMASK_ULL(33, 24)) >> 24; 93 sh_bank = (*pos & GENMASK_ULL(43, 34)) >> 34; 94 instance_bank = (*pos & GENMASK_ULL(53, 44)) >> 44; 95 96 if (se_bank == 0x3FF) 97 se_bank = 0xFFFFFFFF; 98 if (sh_bank == 0x3FF) 99 sh_bank = 0xFFFFFFFF; 100 if (instance_bank == 0x3FF) 101 instance_bank = 0xFFFFFFFF; 102 use_bank = true; 103 } else if (*pos & (1ULL << 61)) { 104 105 me = (*pos & GENMASK_ULL(33, 24)) >> 24; 106 pipe = (*pos & GENMASK_ULL(43, 34)) >> 34; 107 queue = (*pos & GENMASK_ULL(53, 44)) >> 44; 108 vmid = (*pos & GENMASK_ULL(58, 54)) >> 54; 109 110 use_ring = true; 111 } else { 112 use_bank = use_ring = false; 113 } 114 115 *pos &= (1UL << 22) - 1; 116 117 r = pm_runtime_get_sync(adev_to_drm(adev)->dev); 118 if (r < 0) { 119 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); 120 return r; 121 } 122 123 r = amdgpu_virt_enable_access_debugfs(adev); 124 if (r < 0) { 125 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); 126 return r; 127 } 128 129 if (use_bank) { 130 if ((sh_bank != 0xFFFFFFFF && sh_bank >= adev->gfx.config.max_sh_per_se) || 131 (se_bank != 0xFFFFFFFF && se_bank >= adev->gfx.config.max_shader_engines)) { 132 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); 133 amdgpu_virt_disable_access_debugfs(adev); 134 return -EINVAL; 135 } 136 mutex_lock(&adev->grbm_idx_mutex); 137 amdgpu_gfx_select_se_sh(adev, se_bank, 138 sh_bank, instance_bank, 0); 139 } else if (use_ring) { 140 mutex_lock(&adev->srbm_mutex); 141 amdgpu_gfx_select_me_pipe_q(adev, me, pipe, queue, vmid, 0); 142 } 143 144 if (pm_pg_lock) 145 mutex_lock(&adev->pm.mutex); 146 147 while (size) { 148 uint32_t value; 149 150 if (read) { 151 value = RREG32(*pos >> 2); 152 r = put_user(value, (uint32_t *)buf); 153 } else { 154 r = get_user(value, (uint32_t *)buf); 155 if (!r) 156 amdgpu_mm_wreg_mmio_rlc(adev, *pos >> 2, value, 0); 157 } 158 if (r) { 159 result = r; 160 goto end; 161 } 162 163 result += 4; 164 buf += 4; 165 *pos += 4; 166 size -= 4; 167 } 168 169 end: 170 if (use_bank) { 171 amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff, 0); 172 mutex_unlock(&adev->grbm_idx_mutex); 173 } else if (use_ring) { 174 amdgpu_gfx_select_me_pipe_q(adev, 0, 0, 0, 0, 0); 175 mutex_unlock(&adev->srbm_mutex); 176 } 177 178 if (pm_pg_lock) 179 mutex_unlock(&adev->pm.mutex); 180 181 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); 182 183 amdgpu_virt_disable_access_debugfs(adev); 184 return result; 185 } 186 187 /* 188 * amdgpu_debugfs_regs_read - Callback for reading MMIO registers 189 */ 190 static ssize_t amdgpu_debugfs_regs_read(struct file *f, char __user *buf, 191 size_t size, loff_t *pos) 192 { 193 return amdgpu_debugfs_process_reg_op(true, f, buf, size, pos); 194 } 195 196 /* 197 * amdgpu_debugfs_regs_write - Callback for writing MMIO registers 198 */ 199 static ssize_t amdgpu_debugfs_regs_write(struct file *f, const char __user *buf, 200 size_t size, loff_t *pos) 201 { 202 return amdgpu_debugfs_process_reg_op(false, f, (char __user *)buf, size, pos); 203 } 204 205 static int amdgpu_debugfs_regs2_open(struct inode *inode, struct file *file) 206 { 207 struct amdgpu_debugfs_regs2_data *rd; 208 209 rd = kzalloc(sizeof(*rd), GFP_KERNEL); 210 if (!rd) 211 return -ENOMEM; 212 rd->adev = file_inode(file)->i_private; 213 file->private_data = rd; 214 mutex_init(&rd->lock); 215 216 return 0; 217 } 218 219 static int amdgpu_debugfs_regs2_release(struct inode *inode, struct file *file) 220 { 221 struct amdgpu_debugfs_regs2_data *rd = file->private_data; 222 223 mutex_destroy(&rd->lock); 224 kfree(file->private_data); 225 return 0; 226 } 227 228 static ssize_t amdgpu_debugfs_regs2_op(struct file *f, char __user *buf, u32 offset, size_t size, int write_en) 229 { 230 struct amdgpu_debugfs_regs2_data *rd = f->private_data; 231 struct amdgpu_device *adev = rd->adev; 232 ssize_t result = 0; 233 int r; 234 uint32_t value; 235 236 if (size & 0x3 || offset & 0x3) 237 return -EINVAL; 238 239 r = pm_runtime_get_sync(adev_to_drm(adev)->dev); 240 if (r < 0) { 241 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); 242 return r; 243 } 244 245 r = amdgpu_virt_enable_access_debugfs(adev); 246 if (r < 0) { 247 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); 248 return r; 249 } 250 251 mutex_lock(&rd->lock); 252 253 if (rd->id.use_grbm) { 254 if ((rd->id.grbm.sh != 0xFFFFFFFF && rd->id.grbm.sh >= adev->gfx.config.max_sh_per_se) || 255 (rd->id.grbm.se != 0xFFFFFFFF && rd->id.grbm.se >= adev->gfx.config.max_shader_engines)) { 256 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); 257 amdgpu_virt_disable_access_debugfs(adev); 258 mutex_unlock(&rd->lock); 259 return -EINVAL; 260 } 261 mutex_lock(&adev->grbm_idx_mutex); 262 amdgpu_gfx_select_se_sh(adev, rd->id.grbm.se, 263 rd->id.grbm.sh, 264 rd->id.grbm.instance, rd->id.xcc_id); 265 } 266 267 if (rd->id.use_srbm) { 268 mutex_lock(&adev->srbm_mutex); 269 amdgpu_gfx_select_me_pipe_q(adev, rd->id.srbm.me, rd->id.srbm.pipe, 270 rd->id.srbm.queue, rd->id.srbm.vmid, rd->id.xcc_id); 271 } 272 273 if (rd->id.pg_lock) 274 mutex_lock(&adev->pm.mutex); 275 276 while (size) { 277 if (!write_en) { 278 value = RREG32(offset >> 2); 279 r = put_user(value, (uint32_t *)buf); 280 } else { 281 r = get_user(value, (uint32_t *)buf); 282 if (!r) 283 amdgpu_mm_wreg_mmio_rlc(adev, offset >> 2, value, rd->id.xcc_id); 284 } 285 if (r) { 286 result = r; 287 goto end; 288 } 289 offset += 4; 290 size -= 4; 291 result += 4; 292 buf += 4; 293 } 294 end: 295 if (rd->id.use_grbm) { 296 amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff, rd->id.xcc_id); 297 mutex_unlock(&adev->grbm_idx_mutex); 298 } 299 300 if (rd->id.use_srbm) { 301 amdgpu_gfx_select_me_pipe_q(adev, 0, 0, 0, 0, rd->id.xcc_id); 302 mutex_unlock(&adev->srbm_mutex); 303 } 304 305 if (rd->id.pg_lock) 306 mutex_unlock(&adev->pm.mutex); 307 308 mutex_unlock(&rd->lock); 309 310 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); 311 312 amdgpu_virt_disable_access_debugfs(adev); 313 return result; 314 } 315 316 static long amdgpu_debugfs_regs2_ioctl(struct file *f, unsigned int cmd, unsigned long data) 317 { 318 struct amdgpu_debugfs_regs2_data *rd = f->private_data; 319 struct amdgpu_debugfs_regs2_iocdata v1_data; 320 int r; 321 322 mutex_lock(&rd->lock); 323 324 switch (cmd) { 325 case AMDGPU_DEBUGFS_REGS2_IOC_SET_STATE_V2: 326 r = copy_from_user(&rd->id, (struct amdgpu_debugfs_regs2_iocdata_v2 *)data, 327 sizeof(rd->id)); 328 if (r) 329 r = -EINVAL; 330 goto done; 331 case AMDGPU_DEBUGFS_REGS2_IOC_SET_STATE: 332 r = copy_from_user(&v1_data, (struct amdgpu_debugfs_regs2_iocdata *)data, 333 sizeof(v1_data)); 334 if (r) { 335 r = -EINVAL; 336 goto done; 337 } 338 goto v1_copy; 339 default: 340 r = -EINVAL; 341 goto done; 342 } 343 344 v1_copy: 345 rd->id.use_srbm = v1_data.use_srbm; 346 rd->id.use_grbm = v1_data.use_grbm; 347 rd->id.pg_lock = v1_data.pg_lock; 348 rd->id.grbm.se = v1_data.grbm.se; 349 rd->id.grbm.sh = v1_data.grbm.sh; 350 rd->id.grbm.instance = v1_data.grbm.instance; 351 rd->id.srbm.me = v1_data.srbm.me; 352 rd->id.srbm.pipe = v1_data.srbm.pipe; 353 rd->id.srbm.queue = v1_data.srbm.queue; 354 rd->id.xcc_id = 0; 355 done: 356 mutex_unlock(&rd->lock); 357 return r; 358 } 359 360 static ssize_t amdgpu_debugfs_regs2_read(struct file *f, char __user *buf, size_t size, loff_t *pos) 361 { 362 return amdgpu_debugfs_regs2_op(f, buf, *pos, size, 0); 363 } 364 365 static ssize_t amdgpu_debugfs_regs2_write(struct file *f, const char __user *buf, size_t size, loff_t *pos) 366 { 367 return amdgpu_debugfs_regs2_op(f, (char __user *)buf, *pos, size, 1); 368 } 369 370 static int amdgpu_debugfs_gprwave_open(struct inode *inode, struct file *file) 371 { 372 struct amdgpu_debugfs_gprwave_data *rd; 373 374 rd = kzalloc(sizeof(*rd), GFP_KERNEL); 375 if (!rd) 376 return -ENOMEM; 377 rd->adev = file_inode(file)->i_private; 378 file->private_data = rd; 379 mutex_init(&rd->lock); 380 381 return 0; 382 } 383 384 static int amdgpu_debugfs_gprwave_release(struct inode *inode, struct file *file) 385 { 386 struct amdgpu_debugfs_gprwave_data *rd = file->private_data; 387 388 mutex_destroy(&rd->lock); 389 kfree(file->private_data); 390 return 0; 391 } 392 393 static ssize_t amdgpu_debugfs_gprwave_read(struct file *f, char __user *buf, size_t size, loff_t *pos) 394 { 395 struct amdgpu_debugfs_gprwave_data *rd = f->private_data; 396 struct amdgpu_device *adev = rd->adev; 397 ssize_t result = 0; 398 int r; 399 uint32_t *data, x; 400 401 if (size > 4096 || size & 0x3 || *pos & 0x3) 402 return -EINVAL; 403 404 r = pm_runtime_get_sync(adev_to_drm(adev)->dev); 405 if (r < 0) { 406 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); 407 return r; 408 } 409 410 r = amdgpu_virt_enable_access_debugfs(adev); 411 if (r < 0) { 412 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); 413 return r; 414 } 415 416 data = kcalloc(1024, sizeof(*data), GFP_KERNEL); 417 if (!data) { 418 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); 419 amdgpu_virt_disable_access_debugfs(adev); 420 return -ENOMEM; 421 } 422 423 /* switch to the specific se/sh/cu */ 424 mutex_lock(&adev->grbm_idx_mutex); 425 amdgpu_gfx_select_se_sh(adev, rd->id.se, rd->id.sh, rd->id.cu, rd->id.xcc_id); 426 427 if (!rd->id.gpr_or_wave) { 428 x = 0; 429 if (adev->gfx.funcs->read_wave_data) 430 adev->gfx.funcs->read_wave_data(adev, rd->id.xcc_id, rd->id.simd, rd->id.wave, data, &x); 431 } else { 432 x = size >> 2; 433 if (rd->id.gpr.vpgr_or_sgpr) { 434 if (adev->gfx.funcs->read_wave_vgprs) 435 adev->gfx.funcs->read_wave_vgprs(adev, rd->id.xcc_id, rd->id.simd, rd->id.wave, rd->id.gpr.thread, *pos, size>>2, data); 436 } else { 437 if (adev->gfx.funcs->read_wave_sgprs) 438 adev->gfx.funcs->read_wave_sgprs(adev, rd->id.xcc_id, rd->id.simd, rd->id.wave, *pos, size>>2, data); 439 } 440 } 441 442 amdgpu_gfx_select_se_sh(adev, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, rd->id.xcc_id); 443 mutex_unlock(&adev->grbm_idx_mutex); 444 445 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); 446 447 if (!x) { 448 result = -EINVAL; 449 goto done; 450 } 451 452 while (size && (*pos < x * 4)) { 453 uint32_t value; 454 455 value = data[*pos >> 2]; 456 r = put_user(value, (uint32_t *)buf); 457 if (r) { 458 result = r; 459 goto done; 460 } 461 462 result += 4; 463 buf += 4; 464 *pos += 4; 465 size -= 4; 466 } 467 468 done: 469 amdgpu_virt_disable_access_debugfs(adev); 470 kfree(data); 471 return result; 472 } 473 474 static long amdgpu_debugfs_gprwave_ioctl(struct file *f, unsigned int cmd, unsigned long data) 475 { 476 struct amdgpu_debugfs_gprwave_data *rd = f->private_data; 477 int r = 0; 478 479 mutex_lock(&rd->lock); 480 481 switch (cmd) { 482 case AMDGPU_DEBUGFS_GPRWAVE_IOC_SET_STATE: 483 if (copy_from_user(&rd->id, 484 (struct amdgpu_debugfs_gprwave_iocdata *)data, 485 sizeof(rd->id))) 486 r = -EFAULT; 487 goto done; 488 default: 489 r = -EINVAL; 490 goto done; 491 } 492 493 done: 494 mutex_unlock(&rd->lock); 495 return r; 496 } 497 498 499 500 501 /** 502 * amdgpu_debugfs_regs_pcie_read - Read from a PCIE register 503 * 504 * @f: open file handle 505 * @buf: User buffer to store read data in 506 * @size: Number of bytes to read 507 * @pos: Offset to seek to 508 * 509 * The lower bits are the BYTE offset of the register to read. This 510 * allows reading multiple registers in a single call and having 511 * the returned size reflect that. 512 */ 513 static ssize_t amdgpu_debugfs_regs_pcie_read(struct file *f, char __user *buf, 514 size_t size, loff_t *pos) 515 { 516 struct amdgpu_device *adev = file_inode(f)->i_private; 517 ssize_t result = 0; 518 int r; 519 520 if (size & 0x3 || *pos & 0x3) 521 return -EINVAL; 522 523 r = pm_runtime_get_sync(adev_to_drm(adev)->dev); 524 if (r < 0) { 525 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); 526 return r; 527 } 528 529 r = amdgpu_virt_enable_access_debugfs(adev); 530 if (r < 0) { 531 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); 532 return r; 533 } 534 535 while (size) { 536 uint32_t value; 537 538 if (upper_32_bits(*pos)) 539 value = RREG32_PCIE_EXT(*pos); 540 else 541 value = RREG32_PCIE(*pos); 542 543 r = put_user(value, (uint32_t *)buf); 544 if (r) 545 goto out; 546 547 result += 4; 548 buf += 4; 549 *pos += 4; 550 size -= 4; 551 } 552 553 r = result; 554 out: 555 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); 556 amdgpu_virt_disable_access_debugfs(adev); 557 return r; 558 } 559 560 /** 561 * amdgpu_debugfs_regs_pcie_write - Write to a PCIE register 562 * 563 * @f: open file handle 564 * @buf: User buffer to write data from 565 * @size: Number of bytes to write 566 * @pos: Offset to seek to 567 * 568 * The lower bits are the BYTE offset of the register to write. This 569 * allows writing multiple registers in a single call and having 570 * the returned size reflect that. 571 */ 572 static ssize_t amdgpu_debugfs_regs_pcie_write(struct file *f, const char __user *buf, 573 size_t size, loff_t *pos) 574 { 575 struct amdgpu_device *adev = file_inode(f)->i_private; 576 ssize_t result = 0; 577 int r; 578 579 if (size & 0x3 || *pos & 0x3) 580 return -EINVAL; 581 582 r = pm_runtime_get_sync(adev_to_drm(adev)->dev); 583 if (r < 0) { 584 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); 585 return r; 586 } 587 588 r = amdgpu_virt_enable_access_debugfs(adev); 589 if (r < 0) { 590 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); 591 return r; 592 } 593 594 while (size) { 595 uint32_t value; 596 597 r = get_user(value, (uint32_t *)buf); 598 if (r) 599 goto out; 600 601 if (upper_32_bits(*pos)) 602 WREG32_PCIE_EXT(*pos, value); 603 else 604 WREG32_PCIE(*pos, value); 605 606 result += 4; 607 buf += 4; 608 *pos += 4; 609 size -= 4; 610 } 611 612 r = result; 613 out: 614 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); 615 amdgpu_virt_disable_access_debugfs(adev); 616 return r; 617 } 618 619 /** 620 * amdgpu_debugfs_regs_didt_read - Read from a DIDT register 621 * 622 * @f: open file handle 623 * @buf: User buffer to store read data in 624 * @size: Number of bytes to read 625 * @pos: Offset to seek to 626 * 627 * The lower bits are the BYTE offset of the register to read. This 628 * allows reading multiple registers in a single call and having 629 * the returned size reflect that. 630 */ 631 static ssize_t amdgpu_debugfs_regs_didt_read(struct file *f, char __user *buf, 632 size_t size, loff_t *pos) 633 { 634 struct amdgpu_device *adev = file_inode(f)->i_private; 635 ssize_t result = 0; 636 int r; 637 638 if (size & 0x3 || *pos & 0x3) 639 return -EINVAL; 640 641 if (!adev->didt_rreg) 642 return -EOPNOTSUPP; 643 644 r = pm_runtime_get_sync(adev_to_drm(adev)->dev); 645 if (r < 0) { 646 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); 647 return r; 648 } 649 650 r = amdgpu_virt_enable_access_debugfs(adev); 651 if (r < 0) { 652 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); 653 return r; 654 } 655 656 while (size) { 657 uint32_t value; 658 659 value = RREG32_DIDT(*pos >> 2); 660 r = put_user(value, (uint32_t *)buf); 661 if (r) 662 goto out; 663 664 result += 4; 665 buf += 4; 666 *pos += 4; 667 size -= 4; 668 } 669 670 r = result; 671 out: 672 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); 673 amdgpu_virt_disable_access_debugfs(adev); 674 return r; 675 } 676 677 /** 678 * amdgpu_debugfs_regs_didt_write - Write to a DIDT register 679 * 680 * @f: open file handle 681 * @buf: User buffer to write data from 682 * @size: Number of bytes to write 683 * @pos: Offset to seek to 684 * 685 * The lower bits are the BYTE offset of the register to write. This 686 * allows writing multiple registers in a single call and having 687 * the returned size reflect that. 688 */ 689 static ssize_t amdgpu_debugfs_regs_didt_write(struct file *f, const char __user *buf, 690 size_t size, loff_t *pos) 691 { 692 struct amdgpu_device *adev = file_inode(f)->i_private; 693 ssize_t result = 0; 694 int r; 695 696 if (size & 0x3 || *pos & 0x3) 697 return -EINVAL; 698 699 if (!adev->didt_wreg) 700 return -EOPNOTSUPP; 701 702 r = pm_runtime_get_sync(adev_to_drm(adev)->dev); 703 if (r < 0) { 704 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); 705 return r; 706 } 707 708 r = amdgpu_virt_enable_access_debugfs(adev); 709 if (r < 0) { 710 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); 711 return r; 712 } 713 714 while (size) { 715 uint32_t value; 716 717 r = get_user(value, (uint32_t *)buf); 718 if (r) 719 goto out; 720 721 WREG32_DIDT(*pos >> 2, value); 722 723 result += 4; 724 buf += 4; 725 *pos += 4; 726 size -= 4; 727 } 728 729 r = result; 730 out: 731 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); 732 amdgpu_virt_disable_access_debugfs(adev); 733 return r; 734 } 735 736 /** 737 * amdgpu_debugfs_regs_smc_read - Read from a SMC register 738 * 739 * @f: open file handle 740 * @buf: User buffer to store read data in 741 * @size: Number of bytes to read 742 * @pos: Offset to seek to 743 * 744 * The lower bits are the BYTE offset of the register to read. This 745 * allows reading multiple registers in a single call and having 746 * the returned size reflect that. 747 */ 748 static ssize_t amdgpu_debugfs_regs_smc_read(struct file *f, char __user *buf, 749 size_t size, loff_t *pos) 750 { 751 struct amdgpu_device *adev = file_inode(f)->i_private; 752 ssize_t result = 0; 753 int r; 754 755 if (!adev->smc_rreg) 756 return -EOPNOTSUPP; 757 758 if (size & 0x3 || *pos & 0x3) 759 return -EINVAL; 760 761 r = pm_runtime_get_sync(adev_to_drm(adev)->dev); 762 if (r < 0) { 763 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); 764 return r; 765 } 766 767 r = amdgpu_virt_enable_access_debugfs(adev); 768 if (r < 0) { 769 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); 770 return r; 771 } 772 773 while (size) { 774 uint32_t value; 775 776 value = RREG32_SMC(*pos); 777 r = put_user(value, (uint32_t *)buf); 778 if (r) 779 goto out; 780 781 result += 4; 782 buf += 4; 783 *pos += 4; 784 size -= 4; 785 } 786 787 r = result; 788 out: 789 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); 790 amdgpu_virt_disable_access_debugfs(adev); 791 return r; 792 } 793 794 /** 795 * amdgpu_debugfs_regs_smc_write - Write to a SMC register 796 * 797 * @f: open file handle 798 * @buf: User buffer to write data from 799 * @size: Number of bytes to write 800 * @pos: Offset to seek to 801 * 802 * The lower bits are the BYTE offset of the register to write. This 803 * allows writing multiple registers in a single call and having 804 * the returned size reflect that. 805 */ 806 static ssize_t amdgpu_debugfs_regs_smc_write(struct file *f, const char __user *buf, 807 size_t size, loff_t *pos) 808 { 809 struct amdgpu_device *adev = file_inode(f)->i_private; 810 ssize_t result = 0; 811 int r; 812 813 if (!adev->smc_wreg) 814 return -EOPNOTSUPP; 815 816 if (size & 0x3 || *pos & 0x3) 817 return -EINVAL; 818 819 r = pm_runtime_get_sync(adev_to_drm(adev)->dev); 820 if (r < 0) { 821 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); 822 return r; 823 } 824 825 r = amdgpu_virt_enable_access_debugfs(adev); 826 if (r < 0) { 827 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); 828 return r; 829 } 830 831 while (size) { 832 uint32_t value; 833 834 r = get_user(value, (uint32_t *)buf); 835 if (r) 836 goto out; 837 838 WREG32_SMC(*pos, value); 839 840 result += 4; 841 buf += 4; 842 *pos += 4; 843 size -= 4; 844 } 845 846 r = result; 847 out: 848 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); 849 amdgpu_virt_disable_access_debugfs(adev); 850 return r; 851 } 852 853 /** 854 * amdgpu_debugfs_gca_config_read - Read from gfx config data 855 * 856 * @f: open file handle 857 * @buf: User buffer to store read data in 858 * @size: Number of bytes to read 859 * @pos: Offset to seek to 860 * 861 * This file is used to access configuration data in a somewhat 862 * stable fashion. The format is a series of DWORDs with the first 863 * indicating which revision it is. New content is appended to the 864 * end so that older software can still read the data. 865 */ 866 867 static ssize_t amdgpu_debugfs_gca_config_read(struct file *f, char __user *buf, 868 size_t size, loff_t *pos) 869 { 870 struct amdgpu_device *adev = file_inode(f)->i_private; 871 ssize_t result = 0; 872 int r; 873 uint32_t *config, no_regs = 0; 874 875 if (size & 0x3 || *pos & 0x3) 876 return -EINVAL; 877 878 config = kmalloc_array(256, sizeof(*config), GFP_KERNEL); 879 if (!config) 880 return -ENOMEM; 881 882 /* version, increment each time something is added */ 883 config[no_regs++] = 5; 884 config[no_regs++] = adev->gfx.config.max_shader_engines; 885 config[no_regs++] = adev->gfx.config.max_tile_pipes; 886 config[no_regs++] = adev->gfx.config.max_cu_per_sh; 887 config[no_regs++] = adev->gfx.config.max_sh_per_se; 888 config[no_regs++] = adev->gfx.config.max_backends_per_se; 889 config[no_regs++] = adev->gfx.config.max_texture_channel_caches; 890 config[no_regs++] = adev->gfx.config.max_gprs; 891 config[no_regs++] = adev->gfx.config.max_gs_threads; 892 config[no_regs++] = adev->gfx.config.max_hw_contexts; 893 config[no_regs++] = adev->gfx.config.sc_prim_fifo_size_frontend; 894 config[no_regs++] = adev->gfx.config.sc_prim_fifo_size_backend; 895 config[no_regs++] = adev->gfx.config.sc_hiz_tile_fifo_size; 896 config[no_regs++] = adev->gfx.config.sc_earlyz_tile_fifo_size; 897 config[no_regs++] = adev->gfx.config.num_tile_pipes; 898 config[no_regs++] = adev->gfx.config.backend_enable_mask; 899 config[no_regs++] = adev->gfx.config.mem_max_burst_length_bytes; 900 config[no_regs++] = adev->gfx.config.mem_row_size_in_kb; 901 config[no_regs++] = adev->gfx.config.shader_engine_tile_size; 902 config[no_regs++] = adev->gfx.config.num_gpus; 903 config[no_regs++] = adev->gfx.config.multi_gpu_tile_size; 904 config[no_regs++] = adev->gfx.config.mc_arb_ramcfg; 905 config[no_regs++] = adev->gfx.config.gb_addr_config; 906 config[no_regs++] = adev->gfx.config.num_rbs; 907 908 /* rev==1 */ 909 config[no_regs++] = adev->rev_id; 910 config[no_regs++] = adev->pg_flags; 911 config[no_regs++] = lower_32_bits(adev->cg_flags); 912 913 /* rev==2 */ 914 config[no_regs++] = adev->family; 915 config[no_regs++] = adev->external_rev_id; 916 917 /* rev==3 */ 918 config[no_regs++] = adev->pdev->device; 919 config[no_regs++] = adev->pdev->revision; 920 config[no_regs++] = adev->pdev->subsystem_device; 921 config[no_regs++] = adev->pdev->subsystem_vendor; 922 923 /* rev==4 APU flag */ 924 config[no_regs++] = adev->flags & AMD_IS_APU ? 1 : 0; 925 926 /* rev==5 PG/CG flag upper 32bit */ 927 config[no_regs++] = 0; 928 config[no_regs++] = upper_32_bits(adev->cg_flags); 929 930 while (size && (*pos < no_regs * 4)) { 931 uint32_t value; 932 933 value = config[*pos >> 2]; 934 r = put_user(value, (uint32_t *)buf); 935 if (r) { 936 kfree(config); 937 return r; 938 } 939 940 result += 4; 941 buf += 4; 942 *pos += 4; 943 size -= 4; 944 } 945 946 kfree(config); 947 return result; 948 } 949 950 /** 951 * amdgpu_debugfs_sensor_read - Read from the powerplay sensors 952 * 953 * @f: open file handle 954 * @buf: User buffer to store read data in 955 * @size: Number of bytes to read 956 * @pos: Offset to seek to 957 * 958 * The offset is treated as the BYTE address of one of the sensors 959 * enumerated in amd/include/kgd_pp_interface.h under the 960 * 'amd_pp_sensors' enumeration. For instance to read the UVD VCLK 961 * you would use the offset 3 * 4 = 12. 962 */ 963 static ssize_t amdgpu_debugfs_sensor_read(struct file *f, char __user *buf, 964 size_t size, loff_t *pos) 965 { 966 struct amdgpu_device *adev = file_inode(f)->i_private; 967 int idx, x, outsize, r, valuesize; 968 uint32_t values[16]; 969 970 if (size & 3 || *pos & 0x3) 971 return -EINVAL; 972 973 if (!adev->pm.dpm_enabled) 974 return -EINVAL; 975 976 /* convert offset to sensor number */ 977 idx = *pos >> 2; 978 979 valuesize = sizeof(values); 980 981 r = pm_runtime_get_sync(adev_to_drm(adev)->dev); 982 if (r < 0) { 983 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); 984 return r; 985 } 986 987 r = amdgpu_virt_enable_access_debugfs(adev); 988 if (r < 0) { 989 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); 990 return r; 991 } 992 993 r = amdgpu_dpm_read_sensor(adev, idx, &values[0], &valuesize); 994 995 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); 996 997 if (r) { 998 amdgpu_virt_disable_access_debugfs(adev); 999 return r; 1000 } 1001 1002 if (size > valuesize) { 1003 amdgpu_virt_disable_access_debugfs(adev); 1004 return -EINVAL; 1005 } 1006 1007 outsize = 0; 1008 x = 0; 1009 if (!r) { 1010 while (size) { 1011 r = put_user(values[x++], (int32_t *)buf); 1012 buf += 4; 1013 size -= 4; 1014 outsize += 4; 1015 } 1016 } 1017 1018 amdgpu_virt_disable_access_debugfs(adev); 1019 return !r ? outsize : r; 1020 } 1021 1022 /** amdgpu_debugfs_wave_read - Read WAVE STATUS data 1023 * 1024 * @f: open file handle 1025 * @buf: User buffer to store read data in 1026 * @size: Number of bytes to read 1027 * @pos: Offset to seek to 1028 * 1029 * The offset being sought changes which wave that the status data 1030 * will be returned for. The bits are used as follows: 1031 * 1032 * Bits 0..6: Byte offset into data 1033 * Bits 7..14: SE selector 1034 * Bits 15..22: SH/SA selector 1035 * Bits 23..30: CU/{WGP+SIMD} selector 1036 * Bits 31..36: WAVE ID selector 1037 * Bits 37..44: SIMD ID selector 1038 * 1039 * The returned data begins with one DWORD of version information 1040 * Followed by WAVE STATUS registers relevant to the GFX IP version 1041 * being used. See gfx_v8_0_read_wave_data() for an example output. 1042 */ 1043 static ssize_t amdgpu_debugfs_wave_read(struct file *f, char __user *buf, 1044 size_t size, loff_t *pos) 1045 { 1046 struct amdgpu_device *adev = f->f_inode->i_private; 1047 int r, x; 1048 ssize_t result = 0; 1049 uint32_t offset, se, sh, cu, wave, simd, data[32]; 1050 1051 if (size & 3 || *pos & 3) 1052 return -EINVAL; 1053 1054 /* decode offset */ 1055 offset = (*pos & GENMASK_ULL(6, 0)); 1056 se = (*pos & GENMASK_ULL(14, 7)) >> 7; 1057 sh = (*pos & GENMASK_ULL(22, 15)) >> 15; 1058 cu = (*pos & GENMASK_ULL(30, 23)) >> 23; 1059 wave = (*pos & GENMASK_ULL(36, 31)) >> 31; 1060 simd = (*pos & GENMASK_ULL(44, 37)) >> 37; 1061 1062 r = pm_runtime_get_sync(adev_to_drm(adev)->dev); 1063 if (r < 0) { 1064 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); 1065 return r; 1066 } 1067 1068 r = amdgpu_virt_enable_access_debugfs(adev); 1069 if (r < 0) { 1070 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); 1071 return r; 1072 } 1073 1074 /* switch to the specific se/sh/cu */ 1075 mutex_lock(&adev->grbm_idx_mutex); 1076 amdgpu_gfx_select_se_sh(adev, se, sh, cu, 0); 1077 1078 x = 0; 1079 if (adev->gfx.funcs->read_wave_data) 1080 adev->gfx.funcs->read_wave_data(adev, 0, simd, wave, data, &x); 1081 1082 amdgpu_gfx_select_se_sh(adev, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0); 1083 mutex_unlock(&adev->grbm_idx_mutex); 1084 1085 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); 1086 1087 if (!x) { 1088 amdgpu_virt_disable_access_debugfs(adev); 1089 return -EINVAL; 1090 } 1091 1092 while (size && (offset < x * 4)) { 1093 uint32_t value; 1094 1095 value = data[offset >> 2]; 1096 r = put_user(value, (uint32_t *)buf); 1097 if (r) { 1098 amdgpu_virt_disable_access_debugfs(adev); 1099 return r; 1100 } 1101 1102 result += 4; 1103 buf += 4; 1104 offset += 4; 1105 size -= 4; 1106 } 1107 1108 amdgpu_virt_disable_access_debugfs(adev); 1109 return result; 1110 } 1111 1112 /** amdgpu_debugfs_gpr_read - Read wave gprs 1113 * 1114 * @f: open file handle 1115 * @buf: User buffer to store read data in 1116 * @size: Number of bytes to read 1117 * @pos: Offset to seek to 1118 * 1119 * The offset being sought changes which wave that the status data 1120 * will be returned for. The bits are used as follows: 1121 * 1122 * Bits 0..11: Byte offset into data 1123 * Bits 12..19: SE selector 1124 * Bits 20..27: SH/SA selector 1125 * Bits 28..35: CU/{WGP+SIMD} selector 1126 * Bits 36..43: WAVE ID selector 1127 * Bits 37..44: SIMD ID selector 1128 * Bits 52..59: Thread selector 1129 * Bits 60..61: Bank selector (VGPR=0,SGPR=1) 1130 * 1131 * The return data comes from the SGPR or VGPR register bank for 1132 * the selected operational unit. 1133 */ 1134 static ssize_t amdgpu_debugfs_gpr_read(struct file *f, char __user *buf, 1135 size_t size, loff_t *pos) 1136 { 1137 struct amdgpu_device *adev = f->f_inode->i_private; 1138 int r; 1139 ssize_t result = 0; 1140 uint32_t offset, se, sh, cu, wave, simd, thread, bank, *data; 1141 1142 if (size > 4096 || size & 3 || *pos & 3) 1143 return -EINVAL; 1144 1145 /* decode offset */ 1146 offset = (*pos & GENMASK_ULL(11, 0)) >> 2; 1147 se = (*pos & GENMASK_ULL(19, 12)) >> 12; 1148 sh = (*pos & GENMASK_ULL(27, 20)) >> 20; 1149 cu = (*pos & GENMASK_ULL(35, 28)) >> 28; 1150 wave = (*pos & GENMASK_ULL(43, 36)) >> 36; 1151 simd = (*pos & GENMASK_ULL(51, 44)) >> 44; 1152 thread = (*pos & GENMASK_ULL(59, 52)) >> 52; 1153 bank = (*pos & GENMASK_ULL(61, 60)) >> 60; 1154 1155 data = kcalloc(1024, sizeof(*data), GFP_KERNEL); 1156 if (!data) 1157 return -ENOMEM; 1158 1159 r = pm_runtime_get_sync(adev_to_drm(adev)->dev); 1160 if (r < 0) 1161 goto err; 1162 1163 r = amdgpu_virt_enable_access_debugfs(adev); 1164 if (r < 0) 1165 goto err; 1166 1167 /* switch to the specific se/sh/cu */ 1168 mutex_lock(&adev->grbm_idx_mutex); 1169 amdgpu_gfx_select_se_sh(adev, se, sh, cu, 0); 1170 1171 if (bank == 0) { 1172 if (adev->gfx.funcs->read_wave_vgprs) 1173 adev->gfx.funcs->read_wave_vgprs(adev, 0, simd, wave, thread, offset, size>>2, data); 1174 } else { 1175 if (adev->gfx.funcs->read_wave_sgprs) 1176 adev->gfx.funcs->read_wave_sgprs(adev, 0, simd, wave, offset, size>>2, data); 1177 } 1178 1179 amdgpu_gfx_select_se_sh(adev, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0); 1180 mutex_unlock(&adev->grbm_idx_mutex); 1181 1182 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); 1183 1184 while (size) { 1185 uint32_t value; 1186 1187 value = data[result >> 2]; 1188 r = put_user(value, (uint32_t *)buf); 1189 if (r) { 1190 amdgpu_virt_disable_access_debugfs(adev); 1191 goto err; 1192 } 1193 1194 result += 4; 1195 buf += 4; 1196 size -= 4; 1197 } 1198 1199 kfree(data); 1200 amdgpu_virt_disable_access_debugfs(adev); 1201 return result; 1202 1203 err: 1204 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); 1205 kfree(data); 1206 return r; 1207 } 1208 1209 /** 1210 * amdgpu_debugfs_gfxoff_residency_read - Read GFXOFF residency 1211 * 1212 * @f: open file handle 1213 * @buf: User buffer to store read data in 1214 * @size: Number of bytes to read 1215 * @pos: Offset to seek to 1216 * 1217 * Read the last residency value logged. It doesn't auto update, one needs to 1218 * stop logging before getting the current value. 1219 */ 1220 static ssize_t amdgpu_debugfs_gfxoff_residency_read(struct file *f, char __user *buf, 1221 size_t size, loff_t *pos) 1222 { 1223 struct amdgpu_device *adev = file_inode(f)->i_private; 1224 ssize_t result = 0; 1225 int r; 1226 1227 if (size & 0x3 || *pos & 0x3) 1228 return -EINVAL; 1229 1230 r = pm_runtime_get_sync(adev_to_drm(adev)->dev); 1231 if (r < 0) { 1232 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); 1233 return r; 1234 } 1235 1236 while (size) { 1237 uint32_t value; 1238 1239 r = amdgpu_get_gfx_off_residency(adev, &value); 1240 if (r) 1241 goto out; 1242 1243 r = put_user(value, (uint32_t *)buf); 1244 if (r) 1245 goto out; 1246 1247 result += 4; 1248 buf += 4; 1249 *pos += 4; 1250 size -= 4; 1251 } 1252 1253 r = result; 1254 out: 1255 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); 1256 1257 return r; 1258 } 1259 1260 /** 1261 * amdgpu_debugfs_gfxoff_residency_write - Log GFXOFF Residency 1262 * 1263 * @f: open file handle 1264 * @buf: User buffer to write data from 1265 * @size: Number of bytes to write 1266 * @pos: Offset to seek to 1267 * 1268 * Write a 32-bit non-zero to start logging; write a 32-bit zero to stop 1269 */ 1270 static ssize_t amdgpu_debugfs_gfxoff_residency_write(struct file *f, const char __user *buf, 1271 size_t size, loff_t *pos) 1272 { 1273 struct amdgpu_device *adev = file_inode(f)->i_private; 1274 ssize_t result = 0; 1275 int r; 1276 1277 if (size & 0x3 || *pos & 0x3) 1278 return -EINVAL; 1279 1280 r = pm_runtime_get_sync(adev_to_drm(adev)->dev); 1281 if (r < 0) { 1282 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); 1283 return r; 1284 } 1285 1286 while (size) { 1287 u32 value; 1288 1289 r = get_user(value, (uint32_t *)buf); 1290 if (r) 1291 goto out; 1292 1293 amdgpu_set_gfx_off_residency(adev, value ? true : false); 1294 1295 result += 4; 1296 buf += 4; 1297 *pos += 4; 1298 size -= 4; 1299 } 1300 1301 r = result; 1302 out: 1303 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); 1304 1305 return r; 1306 } 1307 1308 1309 /** 1310 * amdgpu_debugfs_gfxoff_count_read - Read GFXOFF entry count 1311 * 1312 * @f: open file handle 1313 * @buf: User buffer to store read data in 1314 * @size: Number of bytes to read 1315 * @pos: Offset to seek to 1316 */ 1317 static ssize_t amdgpu_debugfs_gfxoff_count_read(struct file *f, char __user *buf, 1318 size_t size, loff_t *pos) 1319 { 1320 struct amdgpu_device *adev = file_inode(f)->i_private; 1321 ssize_t result = 0; 1322 int r; 1323 1324 if (size & 0x3 || *pos & 0x3) 1325 return -EINVAL; 1326 1327 r = pm_runtime_get_sync(adev_to_drm(adev)->dev); 1328 if (r < 0) { 1329 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); 1330 return r; 1331 } 1332 1333 while (size) { 1334 u64 value = 0; 1335 1336 r = amdgpu_get_gfx_off_entrycount(adev, &value); 1337 if (r) 1338 goto out; 1339 1340 r = put_user(value, (u64 *)buf); 1341 if (r) 1342 goto out; 1343 1344 result += 4; 1345 buf += 4; 1346 *pos += 4; 1347 size -= 4; 1348 } 1349 1350 r = result; 1351 out: 1352 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); 1353 1354 return r; 1355 } 1356 1357 /** 1358 * amdgpu_debugfs_gfxoff_write - Enable/disable GFXOFF 1359 * 1360 * @f: open file handle 1361 * @buf: User buffer to write data from 1362 * @size: Number of bytes to write 1363 * @pos: Offset to seek to 1364 * 1365 * Write a 32-bit zero to disable or a 32-bit non-zero to enable 1366 */ 1367 static ssize_t amdgpu_debugfs_gfxoff_write(struct file *f, const char __user *buf, 1368 size_t size, loff_t *pos) 1369 { 1370 struct amdgpu_device *adev = file_inode(f)->i_private; 1371 ssize_t result = 0; 1372 int r; 1373 1374 if (size & 0x3 || *pos & 0x3) 1375 return -EINVAL; 1376 1377 r = pm_runtime_get_sync(adev_to_drm(adev)->dev); 1378 if (r < 0) { 1379 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); 1380 return r; 1381 } 1382 1383 while (size) { 1384 uint32_t value; 1385 1386 r = get_user(value, (uint32_t *)buf); 1387 if (r) 1388 goto out; 1389 1390 amdgpu_gfx_off_ctrl(adev, value ? true : false); 1391 1392 result += 4; 1393 buf += 4; 1394 *pos += 4; 1395 size -= 4; 1396 } 1397 1398 r = result; 1399 out: 1400 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); 1401 1402 return r; 1403 } 1404 1405 1406 /** 1407 * amdgpu_debugfs_gfxoff_read - read gfxoff status 1408 * 1409 * @f: open file handle 1410 * @buf: User buffer to store read data in 1411 * @size: Number of bytes to read 1412 * @pos: Offset to seek to 1413 */ 1414 static ssize_t amdgpu_debugfs_gfxoff_read(struct file *f, char __user *buf, 1415 size_t size, loff_t *pos) 1416 { 1417 struct amdgpu_device *adev = file_inode(f)->i_private; 1418 ssize_t result = 0; 1419 int r; 1420 1421 if (size & 0x3 || *pos & 0x3) 1422 return -EINVAL; 1423 1424 r = pm_runtime_get_sync(adev_to_drm(adev)->dev); 1425 if (r < 0) { 1426 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); 1427 return r; 1428 } 1429 1430 while (size) { 1431 u32 value = adev->gfx.gfx_off_state; 1432 1433 r = put_user(value, (u32 *)buf); 1434 if (r) 1435 goto out; 1436 1437 result += 4; 1438 buf += 4; 1439 *pos += 4; 1440 size -= 4; 1441 } 1442 1443 r = result; 1444 out: 1445 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); 1446 1447 return r; 1448 } 1449 1450 static ssize_t amdgpu_debugfs_gfxoff_status_read(struct file *f, char __user *buf, 1451 size_t size, loff_t *pos) 1452 { 1453 struct amdgpu_device *adev = file_inode(f)->i_private; 1454 ssize_t result = 0; 1455 int r; 1456 1457 if (size & 0x3 || *pos & 0x3) 1458 return -EINVAL; 1459 1460 r = pm_runtime_get_sync(adev_to_drm(adev)->dev); 1461 if (r < 0) { 1462 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); 1463 return r; 1464 } 1465 1466 while (size) { 1467 u32 value; 1468 1469 r = amdgpu_get_gfx_off_status(adev, &value); 1470 if (r) 1471 goto out; 1472 1473 r = put_user(value, (u32 *)buf); 1474 if (r) 1475 goto out; 1476 1477 result += 4; 1478 buf += 4; 1479 *pos += 4; 1480 size -= 4; 1481 } 1482 1483 r = result; 1484 out: 1485 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); 1486 1487 return r; 1488 } 1489 1490 static const struct file_operations amdgpu_debugfs_regs2_fops = { 1491 .owner = THIS_MODULE, 1492 .unlocked_ioctl = amdgpu_debugfs_regs2_ioctl, 1493 .read = amdgpu_debugfs_regs2_read, 1494 .write = amdgpu_debugfs_regs2_write, 1495 .open = amdgpu_debugfs_regs2_open, 1496 .release = amdgpu_debugfs_regs2_release, 1497 .llseek = default_llseek 1498 }; 1499 1500 static const struct file_operations amdgpu_debugfs_gprwave_fops = { 1501 .owner = THIS_MODULE, 1502 .unlocked_ioctl = amdgpu_debugfs_gprwave_ioctl, 1503 .read = amdgpu_debugfs_gprwave_read, 1504 .open = amdgpu_debugfs_gprwave_open, 1505 .release = amdgpu_debugfs_gprwave_release, 1506 .llseek = default_llseek 1507 }; 1508 1509 static const struct file_operations amdgpu_debugfs_regs_fops = { 1510 .owner = THIS_MODULE, 1511 .read = amdgpu_debugfs_regs_read, 1512 .write = amdgpu_debugfs_regs_write, 1513 .llseek = default_llseek 1514 }; 1515 static const struct file_operations amdgpu_debugfs_regs_didt_fops = { 1516 .owner = THIS_MODULE, 1517 .read = amdgpu_debugfs_regs_didt_read, 1518 .write = amdgpu_debugfs_regs_didt_write, 1519 .llseek = default_llseek 1520 }; 1521 static const struct file_operations amdgpu_debugfs_regs_pcie_fops = { 1522 .owner = THIS_MODULE, 1523 .read = amdgpu_debugfs_regs_pcie_read, 1524 .write = amdgpu_debugfs_regs_pcie_write, 1525 .llseek = default_llseek 1526 }; 1527 static const struct file_operations amdgpu_debugfs_regs_smc_fops = { 1528 .owner = THIS_MODULE, 1529 .read = amdgpu_debugfs_regs_smc_read, 1530 .write = amdgpu_debugfs_regs_smc_write, 1531 .llseek = default_llseek 1532 }; 1533 1534 static const struct file_operations amdgpu_debugfs_gca_config_fops = { 1535 .owner = THIS_MODULE, 1536 .read = amdgpu_debugfs_gca_config_read, 1537 .llseek = default_llseek 1538 }; 1539 1540 static const struct file_operations amdgpu_debugfs_sensors_fops = { 1541 .owner = THIS_MODULE, 1542 .read = amdgpu_debugfs_sensor_read, 1543 .llseek = default_llseek 1544 }; 1545 1546 static const struct file_operations amdgpu_debugfs_wave_fops = { 1547 .owner = THIS_MODULE, 1548 .read = amdgpu_debugfs_wave_read, 1549 .llseek = default_llseek 1550 }; 1551 static const struct file_operations amdgpu_debugfs_gpr_fops = { 1552 .owner = THIS_MODULE, 1553 .read = amdgpu_debugfs_gpr_read, 1554 .llseek = default_llseek 1555 }; 1556 1557 static const struct file_operations amdgpu_debugfs_gfxoff_fops = { 1558 .owner = THIS_MODULE, 1559 .read = amdgpu_debugfs_gfxoff_read, 1560 .write = amdgpu_debugfs_gfxoff_write, 1561 .llseek = default_llseek 1562 }; 1563 1564 static const struct file_operations amdgpu_debugfs_gfxoff_status_fops = { 1565 .owner = THIS_MODULE, 1566 .read = amdgpu_debugfs_gfxoff_status_read, 1567 .llseek = default_llseek 1568 }; 1569 1570 static const struct file_operations amdgpu_debugfs_gfxoff_count_fops = { 1571 .owner = THIS_MODULE, 1572 .read = amdgpu_debugfs_gfxoff_count_read, 1573 .llseek = default_llseek 1574 }; 1575 1576 static const struct file_operations amdgpu_debugfs_gfxoff_residency_fops = { 1577 .owner = THIS_MODULE, 1578 .read = amdgpu_debugfs_gfxoff_residency_read, 1579 .write = amdgpu_debugfs_gfxoff_residency_write, 1580 .llseek = default_llseek 1581 }; 1582 1583 static const struct file_operations *debugfs_regs[] = { 1584 &amdgpu_debugfs_regs_fops, 1585 &amdgpu_debugfs_regs2_fops, 1586 &amdgpu_debugfs_gprwave_fops, 1587 &amdgpu_debugfs_regs_didt_fops, 1588 &amdgpu_debugfs_regs_pcie_fops, 1589 &amdgpu_debugfs_regs_smc_fops, 1590 &amdgpu_debugfs_gca_config_fops, 1591 &amdgpu_debugfs_sensors_fops, 1592 &amdgpu_debugfs_wave_fops, 1593 &amdgpu_debugfs_gpr_fops, 1594 &amdgpu_debugfs_gfxoff_fops, 1595 &amdgpu_debugfs_gfxoff_status_fops, 1596 &amdgpu_debugfs_gfxoff_count_fops, 1597 &amdgpu_debugfs_gfxoff_residency_fops, 1598 }; 1599 1600 static const char * const debugfs_regs_names[] = { 1601 "amdgpu_regs", 1602 "amdgpu_regs2", 1603 "amdgpu_gprwave", 1604 "amdgpu_regs_didt", 1605 "amdgpu_regs_pcie", 1606 "amdgpu_regs_smc", 1607 "amdgpu_gca_config", 1608 "amdgpu_sensors", 1609 "amdgpu_wave", 1610 "amdgpu_gpr", 1611 "amdgpu_gfxoff", 1612 "amdgpu_gfxoff_status", 1613 "amdgpu_gfxoff_count", 1614 "amdgpu_gfxoff_residency", 1615 }; 1616 1617 /** 1618 * amdgpu_debugfs_regs_init - Initialize debugfs entries that provide 1619 * register access. 1620 * 1621 * @adev: The device to attach the debugfs entries to 1622 */ 1623 int amdgpu_debugfs_regs_init(struct amdgpu_device *adev) 1624 { 1625 struct drm_minor *minor = adev_to_drm(adev)->primary; 1626 struct dentry *ent, *root = minor->debugfs_root; 1627 unsigned int i; 1628 1629 for (i = 0; i < ARRAY_SIZE(debugfs_regs); i++) { 1630 ent = debugfs_create_file(debugfs_regs_names[i], 1631 S_IFREG | 0400, root, 1632 adev, debugfs_regs[i]); 1633 if (!i && !IS_ERR_OR_NULL(ent)) 1634 i_size_write(ent->d_inode, adev->rmmio_size); 1635 } 1636 1637 return 0; 1638 } 1639 1640 static int amdgpu_debugfs_test_ib_show(struct seq_file *m, void *unused) 1641 { 1642 struct amdgpu_device *adev = m->private; 1643 struct drm_device *dev = adev_to_drm(adev); 1644 int r = 0, i; 1645 1646 r = pm_runtime_get_sync(dev->dev); 1647 if (r < 0) { 1648 pm_runtime_put_autosuspend(dev->dev); 1649 return r; 1650 } 1651 1652 /* Avoid accidently unparking the sched thread during GPU reset */ 1653 r = down_write_killable(&adev->reset_domain->sem); 1654 if (r) 1655 return r; 1656 1657 /* hold on the scheduler */ 1658 for (i = 0; i < AMDGPU_MAX_RINGS; i++) { 1659 struct amdgpu_ring *ring = adev->rings[i]; 1660 1661 if (!amdgpu_ring_sched_ready(ring)) 1662 continue; 1663 drm_sched_wqueue_stop(&ring->sched); 1664 } 1665 1666 seq_puts(m, "run ib test:\n"); 1667 r = amdgpu_ib_ring_tests(adev); 1668 if (r) 1669 seq_printf(m, "ib ring tests failed (%d).\n", r); 1670 else 1671 seq_puts(m, "ib ring tests passed.\n"); 1672 1673 /* go on the scheduler */ 1674 for (i = 0; i < AMDGPU_MAX_RINGS; i++) { 1675 struct amdgpu_ring *ring = adev->rings[i]; 1676 1677 if (!amdgpu_ring_sched_ready(ring)) 1678 continue; 1679 drm_sched_wqueue_start(&ring->sched); 1680 } 1681 1682 up_write(&adev->reset_domain->sem); 1683 1684 pm_runtime_put_autosuspend(dev->dev); 1685 1686 return 0; 1687 } 1688 1689 static int amdgpu_debugfs_evict_vram(void *data, u64 *val) 1690 { 1691 struct amdgpu_device *adev = (struct amdgpu_device *)data; 1692 struct drm_device *dev = adev_to_drm(adev); 1693 int r; 1694 1695 r = pm_runtime_get_sync(dev->dev); 1696 if (r < 0) { 1697 pm_runtime_put_autosuspend(dev->dev); 1698 return r; 1699 } 1700 1701 *val = amdgpu_ttm_evict_resources(adev, TTM_PL_VRAM); 1702 1703 pm_runtime_put_autosuspend(dev->dev); 1704 1705 return 0; 1706 } 1707 1708 1709 static int amdgpu_debugfs_evict_gtt(void *data, u64 *val) 1710 { 1711 struct amdgpu_device *adev = (struct amdgpu_device *)data; 1712 struct drm_device *dev = adev_to_drm(adev); 1713 int r; 1714 1715 r = pm_runtime_get_sync(dev->dev); 1716 if (r < 0) { 1717 pm_runtime_put_autosuspend(dev->dev); 1718 return r; 1719 } 1720 1721 *val = amdgpu_ttm_evict_resources(adev, TTM_PL_TT); 1722 1723 pm_runtime_put_autosuspend(dev->dev); 1724 1725 return 0; 1726 } 1727 1728 static int amdgpu_debugfs_benchmark(void *data, u64 val) 1729 { 1730 struct amdgpu_device *adev = (struct amdgpu_device *)data; 1731 struct drm_device *dev = adev_to_drm(adev); 1732 int r; 1733 1734 r = pm_runtime_get_sync(dev->dev); 1735 if (r < 0) { 1736 pm_runtime_put_autosuspend(dev->dev); 1737 return r; 1738 } 1739 1740 r = amdgpu_benchmark(adev, val); 1741 1742 pm_runtime_put_autosuspend(dev->dev); 1743 1744 return r; 1745 } 1746 1747 static int amdgpu_debugfs_vm_info_show(struct seq_file *m, void *unused) 1748 { 1749 struct amdgpu_device *adev = m->private; 1750 struct drm_device *dev = adev_to_drm(adev); 1751 struct drm_file *file; 1752 int r; 1753 1754 r = mutex_lock_interruptible(&dev->filelist_mutex); 1755 if (r) 1756 return r; 1757 1758 list_for_each_entry(file, &dev->filelist, lhead) { 1759 struct amdgpu_fpriv *fpriv = file->driver_priv; 1760 struct amdgpu_vm *vm = &fpriv->vm; 1761 struct amdgpu_task_info *ti; 1762 1763 ti = amdgpu_vm_get_task_info_vm(vm); 1764 if (ti) { 1765 seq_printf(m, "pid:%d\tProcess:%s ----------\n", ti->task.pid, ti->process_name); 1766 amdgpu_vm_put_task_info(ti); 1767 } 1768 1769 r = amdgpu_bo_reserve(vm->root.bo, true); 1770 if (r) 1771 break; 1772 amdgpu_debugfs_vm_bo_info(vm, m); 1773 amdgpu_bo_unreserve(vm->root.bo); 1774 } 1775 1776 mutex_unlock(&dev->filelist_mutex); 1777 1778 return r; 1779 } 1780 1781 DEFINE_SHOW_ATTRIBUTE(amdgpu_debugfs_test_ib); 1782 DEFINE_SHOW_ATTRIBUTE(amdgpu_debugfs_vm_info); 1783 DEFINE_DEBUGFS_ATTRIBUTE(amdgpu_evict_vram_fops, amdgpu_debugfs_evict_vram, 1784 NULL, "%lld\n"); 1785 DEFINE_DEBUGFS_ATTRIBUTE(amdgpu_evict_gtt_fops, amdgpu_debugfs_evict_gtt, 1786 NULL, "%lld\n"); 1787 DEFINE_DEBUGFS_ATTRIBUTE(amdgpu_benchmark_fops, NULL, amdgpu_debugfs_benchmark, 1788 "%lld\n"); 1789 1790 static void amdgpu_ib_preempt_fences_swap(struct amdgpu_ring *ring, 1791 struct dma_fence **fences) 1792 { 1793 struct amdgpu_fence_driver *drv = &ring->fence_drv; 1794 uint32_t sync_seq, last_seq; 1795 1796 last_seq = atomic_read(&ring->fence_drv.last_seq); 1797 sync_seq = ring->fence_drv.sync_seq; 1798 1799 last_seq &= drv->num_fences_mask; 1800 sync_seq &= drv->num_fences_mask; 1801 1802 do { 1803 struct dma_fence *fence, **ptr; 1804 1805 ++last_seq; 1806 last_seq &= drv->num_fences_mask; 1807 ptr = &drv->fences[last_seq]; 1808 1809 fence = rcu_dereference_protected(*ptr, 1); 1810 RCU_INIT_POINTER(*ptr, NULL); 1811 1812 if (!fence) 1813 continue; 1814 1815 fences[last_seq] = fence; 1816 1817 } while (last_seq != sync_seq); 1818 } 1819 1820 static void amdgpu_ib_preempt_signal_fences(struct dma_fence **fences, 1821 int length) 1822 { 1823 int i; 1824 struct dma_fence *fence; 1825 1826 for (i = 0; i < length; i++) { 1827 fence = fences[i]; 1828 if (!fence) 1829 continue; 1830 dma_fence_signal(fence); 1831 dma_fence_put(fence); 1832 } 1833 } 1834 1835 static void amdgpu_ib_preempt_job_recovery(struct drm_gpu_scheduler *sched) 1836 { 1837 struct drm_sched_job *s_job; 1838 struct dma_fence *fence; 1839 1840 spin_lock(&sched->job_list_lock); 1841 list_for_each_entry(s_job, &sched->pending_list, list) { 1842 fence = sched->ops->run_job(s_job); 1843 dma_fence_put(fence); 1844 } 1845 spin_unlock(&sched->job_list_lock); 1846 } 1847 1848 static void amdgpu_ib_preempt_mark_partial_job(struct amdgpu_ring *ring) 1849 { 1850 struct amdgpu_job *job; 1851 struct drm_sched_job *s_job, *tmp; 1852 uint32_t preempt_seq; 1853 struct dma_fence *fence, **ptr; 1854 struct amdgpu_fence_driver *drv = &ring->fence_drv; 1855 struct drm_gpu_scheduler *sched = &ring->sched; 1856 bool preempted = true; 1857 1858 if (ring->funcs->type != AMDGPU_RING_TYPE_GFX) 1859 return; 1860 1861 preempt_seq = le32_to_cpu(*(drv->cpu_addr + 2)); 1862 if (preempt_seq <= atomic_read(&drv->last_seq)) { 1863 preempted = false; 1864 goto no_preempt; 1865 } 1866 1867 preempt_seq &= drv->num_fences_mask; 1868 ptr = &drv->fences[preempt_seq]; 1869 fence = rcu_dereference_protected(*ptr, 1); 1870 1871 no_preempt: 1872 spin_lock(&sched->job_list_lock); 1873 list_for_each_entry_safe(s_job, tmp, &sched->pending_list, list) { 1874 if (dma_fence_is_signaled(&s_job->s_fence->finished)) { 1875 /* remove job from ring_mirror_list */ 1876 list_del_init(&s_job->list); 1877 sched->ops->free_job(s_job); 1878 continue; 1879 } 1880 job = to_amdgpu_job(s_job); 1881 if (preempted && (&job->hw_fence->base) == fence) 1882 /* mark the job as preempted */ 1883 job->preemption_status |= AMDGPU_IB_PREEMPTED; 1884 } 1885 spin_unlock(&sched->job_list_lock); 1886 } 1887 1888 static int amdgpu_debugfs_ib_preempt(void *data, u64 val) 1889 { 1890 int r, length; 1891 struct amdgpu_ring *ring; 1892 struct dma_fence **fences = NULL; 1893 struct amdgpu_device *adev = (struct amdgpu_device *)data; 1894 1895 if (val >= AMDGPU_MAX_RINGS) 1896 return -EINVAL; 1897 1898 ring = adev->rings[val]; 1899 1900 if (!amdgpu_ring_sched_ready(ring) || 1901 !ring->funcs->preempt_ib) 1902 return -EINVAL; 1903 1904 /* the last preemption failed */ 1905 if (ring->trail_seq != le32_to_cpu(*ring->trail_fence_cpu_addr)) 1906 return -EBUSY; 1907 1908 length = ring->fence_drv.num_fences_mask + 1; 1909 fences = kcalloc(length, sizeof(void *), GFP_KERNEL); 1910 if (!fences) 1911 return -ENOMEM; 1912 1913 /* Avoid accidently unparking the sched thread during GPU reset */ 1914 r = down_read_killable(&adev->reset_domain->sem); 1915 if (r) 1916 goto pro_end; 1917 1918 /* stop the scheduler */ 1919 drm_sched_wqueue_stop(&ring->sched); 1920 1921 /* preempt the IB */ 1922 r = amdgpu_ring_preempt_ib(ring); 1923 if (r) { 1924 DRM_WARN("failed to preempt ring %d\n", ring->idx); 1925 goto failure; 1926 } 1927 1928 amdgpu_fence_process(ring); 1929 1930 if (atomic_read(&ring->fence_drv.last_seq) != 1931 ring->fence_drv.sync_seq) { 1932 DRM_INFO("ring %d was preempted\n", ring->idx); 1933 1934 amdgpu_ib_preempt_mark_partial_job(ring); 1935 1936 /* swap out the old fences */ 1937 amdgpu_ib_preempt_fences_swap(ring, fences); 1938 1939 amdgpu_fence_driver_force_completion(ring); 1940 1941 /* resubmit unfinished jobs */ 1942 amdgpu_ib_preempt_job_recovery(&ring->sched); 1943 1944 /* wait for jobs finished */ 1945 amdgpu_fence_wait_empty(ring); 1946 1947 /* signal the old fences */ 1948 amdgpu_ib_preempt_signal_fences(fences, length); 1949 } 1950 1951 failure: 1952 /* restart the scheduler */ 1953 drm_sched_wqueue_start(&ring->sched); 1954 1955 up_read(&adev->reset_domain->sem); 1956 1957 pro_end: 1958 kfree(fences); 1959 1960 return r; 1961 } 1962 1963 static int amdgpu_debugfs_sclk_set(void *data, u64 val) 1964 { 1965 int ret = 0; 1966 uint32_t max_freq, min_freq; 1967 struct amdgpu_device *adev = (struct amdgpu_device *)data; 1968 1969 if (amdgpu_sriov_multi_vf_mode(adev)) 1970 return -EINVAL; 1971 1972 ret = pm_runtime_get_sync(adev_to_drm(adev)->dev); 1973 if (ret < 0) { 1974 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); 1975 return ret; 1976 } 1977 1978 ret = amdgpu_dpm_get_dpm_freq_range(adev, PP_SCLK, &min_freq, &max_freq); 1979 if (ret == -EOPNOTSUPP) { 1980 ret = 0; 1981 goto out; 1982 } 1983 if (ret || val > max_freq || val < min_freq) { 1984 ret = -EINVAL; 1985 goto out; 1986 } 1987 1988 ret = amdgpu_dpm_set_soft_freq_range(adev, PP_SCLK, (uint32_t)val, (uint32_t)val); 1989 if (ret) 1990 ret = -EINVAL; 1991 1992 out: 1993 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); 1994 1995 return ret; 1996 } 1997 1998 DEFINE_DEBUGFS_ATTRIBUTE(fops_ib_preempt, NULL, 1999 amdgpu_debugfs_ib_preempt, "%llu\n"); 2000 2001 DEFINE_DEBUGFS_ATTRIBUTE(fops_sclk_set, NULL, 2002 amdgpu_debugfs_sclk_set, "%llu\n"); 2003 2004 int amdgpu_debugfs_init(struct amdgpu_device *adev) 2005 { 2006 struct dentry *root = adev_to_drm(adev)->primary->debugfs_root; 2007 struct dentry *ent; 2008 int r, i; 2009 2010 if (!debugfs_initialized()) 2011 return 0; 2012 2013 debugfs_create_x32("amdgpu_smu_debug", 0600, root, 2014 &adev->pm.smu_debug_mask); 2015 2016 ent = debugfs_create_file("amdgpu_preempt_ib", 0600, root, adev, 2017 &fops_ib_preempt); 2018 if (IS_ERR(ent)) { 2019 DRM_ERROR("unable to create amdgpu_preempt_ib debugsfs file\n"); 2020 return PTR_ERR(ent); 2021 } 2022 2023 ent = debugfs_create_file("amdgpu_force_sclk", 0200, root, adev, 2024 &fops_sclk_set); 2025 if (IS_ERR(ent)) { 2026 DRM_ERROR("unable to create amdgpu_set_sclk debugsfs file\n"); 2027 return PTR_ERR(ent); 2028 } 2029 2030 /* Register debugfs entries for amdgpu_ttm */ 2031 amdgpu_ttm_debugfs_init(adev); 2032 amdgpu_debugfs_pm_init(adev); 2033 amdgpu_debugfs_sa_init(adev); 2034 amdgpu_debugfs_fence_init(adev); 2035 amdgpu_debugfs_gem_init(adev); 2036 2037 r = amdgpu_debugfs_regs_init(adev); 2038 if (r) 2039 DRM_ERROR("registering register debugfs failed (%d).\n", r); 2040 2041 amdgpu_debugfs_firmware_init(adev); 2042 amdgpu_ta_if_debugfs_init(adev); 2043 2044 amdgpu_debugfs_mes_event_log_init(adev); 2045 2046 #if defined(CONFIG_DRM_AMD_DC) 2047 if (adev->dc_enabled) 2048 dtn_debugfs_init(adev); 2049 #endif 2050 2051 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) { 2052 struct amdgpu_ring *ring = adev->rings[i]; 2053 2054 if (!ring) 2055 continue; 2056 2057 amdgpu_debugfs_ring_init(adev, ring); 2058 } 2059 2060 for (i = 0; i < adev->vcn.num_vcn_inst; i++) { 2061 if (!amdgpu_vcnfw_log) 2062 break; 2063 2064 if (adev->vcn.harvest_config & (1 << i)) 2065 continue; 2066 2067 amdgpu_debugfs_vcn_fwlog_init(adev, i, &adev->vcn.inst[i]); 2068 } 2069 2070 if (amdgpu_umsch_mm & amdgpu_umsch_mm_fwlog) 2071 amdgpu_debugfs_umsch_fwlog_init(adev, &adev->umsch_mm); 2072 2073 amdgpu_debugfs_vcn_sched_mask_init(adev); 2074 amdgpu_debugfs_jpeg_sched_mask_init(adev); 2075 amdgpu_debugfs_gfx_sched_mask_init(adev); 2076 amdgpu_debugfs_compute_sched_mask_init(adev); 2077 amdgpu_debugfs_sdma_sched_mask_init(adev); 2078 2079 amdgpu_ras_debugfs_create_all(adev); 2080 amdgpu_rap_debugfs_init(adev); 2081 amdgpu_securedisplay_debugfs_init(adev); 2082 amdgpu_fw_attestation_debugfs_init(adev); 2083 amdgpu_psp_debugfs_init(adev); 2084 2085 debugfs_create_file("amdgpu_evict_vram", 0400, root, adev, 2086 &amdgpu_evict_vram_fops); 2087 debugfs_create_file("amdgpu_evict_gtt", 0400, root, adev, 2088 &amdgpu_evict_gtt_fops); 2089 debugfs_create_file("amdgpu_test_ib", 0400, root, adev, 2090 &amdgpu_debugfs_test_ib_fops); 2091 debugfs_create_file("amdgpu_vm_info", 0444, root, adev, 2092 &amdgpu_debugfs_vm_info_fops); 2093 debugfs_create_file("amdgpu_benchmark", 0200, root, adev, 2094 &amdgpu_benchmark_fops); 2095 2096 adev->debugfs_vbios_blob.data = adev->bios; 2097 adev->debugfs_vbios_blob.size = adev->bios_size; 2098 debugfs_create_blob("amdgpu_vbios", 0444, root, 2099 &adev->debugfs_vbios_blob); 2100 2101 if (adev->discovery.debugfs_blob.size) 2102 debugfs_create_blob("amdgpu_discovery", 0444, root, 2103 &adev->discovery.debugfs_blob); 2104 2105 return 0; 2106 } 2107 2108 static int amdgpu_pt_info_read(struct seq_file *m, void *unused) 2109 { 2110 struct drm_file *file; 2111 struct amdgpu_fpriv *fpriv; 2112 struct amdgpu_bo *root_bo; 2113 struct amdgpu_device *adev; 2114 int r; 2115 2116 file = m->private; 2117 if (!file) 2118 return -EINVAL; 2119 2120 adev = drm_to_adev(file->minor->dev); 2121 fpriv = file->driver_priv; 2122 if (!fpriv || !fpriv->vm.root.bo) 2123 return -ENODEV; 2124 2125 root_bo = amdgpu_bo_ref(fpriv->vm.root.bo); 2126 r = amdgpu_bo_reserve(root_bo, true); 2127 if (r) { 2128 amdgpu_bo_unref(&root_bo); 2129 return -EINVAL; 2130 } 2131 2132 seq_printf(m, "pd_address: 0x%llx\n", amdgpu_gmc_pd_addr(fpriv->vm.root.bo)); 2133 seq_printf(m, "max_pfn: 0x%llx\n", adev->vm_manager.max_pfn); 2134 seq_printf(m, "num_level: 0x%x\n", adev->vm_manager.num_level); 2135 seq_printf(m, "block_size: 0x%x\n", adev->vm_manager.block_size); 2136 seq_printf(m, "fragment_size: 0x%x\n", adev->vm_manager.fragment_size); 2137 2138 amdgpu_bo_unreserve(root_bo); 2139 amdgpu_bo_unref(&root_bo); 2140 2141 return 0; 2142 } 2143 2144 static int amdgpu_pt_info_open(struct inode *inode, struct file *file) 2145 { 2146 return single_open(file, amdgpu_pt_info_read, inode->i_private); 2147 } 2148 2149 static const struct file_operations amdgpu_pt_info_fops = { 2150 .owner = THIS_MODULE, 2151 .open = amdgpu_pt_info_open, 2152 .read = seq_read, 2153 .llseek = seq_lseek, 2154 .release = single_release, 2155 }; 2156 2157 void amdgpu_debugfs_vm_init(struct drm_file *file) 2158 { 2159 debugfs_create_file("vm_pagetable_info", 0444, file->debugfs_client, file, 2160 &amdgpu_pt_info_fops); 2161 } 2162 2163 #else 2164 int amdgpu_debugfs_init(struct amdgpu_device *adev) 2165 { 2166 return 0; 2167 } 2168 int amdgpu_debugfs_regs_init(struct amdgpu_device *adev) 2169 { 2170 return 0; 2171 } 2172 void amdgpu_debugfs_vm_init(struct drm_file *file) 2173 { 2174 } 2175 #endif 2176