1 /* 2 * Copyright 2008 Advanced Micro Devices, Inc. 3 * Copyright 2008 Red Hat Inc. 4 * Copyright 2009 Jerome Glisse. 5 * 6 * Permission is hereby granted, free of charge, to any person obtaining a 7 * copy of this software and associated documentation files (the "Software"), 8 * to deal in the Software without restriction, including without limitation 9 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 10 * and/or sell copies of the Software, and to permit persons to whom the 11 * Software is furnished to do so, subject to the following conditions: 12 * 13 * The above copyright notice and this permission notice shall be included in 14 * all copies or substantial portions of the Software. 15 * 16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 22 * OTHER DEALINGS IN THE SOFTWARE. 23 * 24 */ 25 26 #include <linux/kthread.h> 27 #include <drm/drmP.h> 28 #include <linux/debugfs.h> 29 #include "amdgpu.h" 30 31 /* 32 * Debugfs 33 */ 34 int amdgpu_debugfs_add_files(struct amdgpu_device *adev, 35 const struct drm_info_list *files, 36 unsigned nfiles) 37 { 38 unsigned i; 39 40 for (i = 0; i < adev->debugfs_count; i++) { 41 if (adev->debugfs[i].files == files) { 42 /* Already registered */ 43 return 0; 44 } 45 } 46 47 i = adev->debugfs_count + 1; 48 if (i > AMDGPU_DEBUGFS_MAX_COMPONENTS) { 49 DRM_ERROR("Reached maximum number of debugfs components.\n"); 50 DRM_ERROR("Report so we increase " 51 "AMDGPU_DEBUGFS_MAX_COMPONENTS.\n"); 52 return -EINVAL; 53 } 54 adev->debugfs[adev->debugfs_count].files = files; 55 adev->debugfs[adev->debugfs_count].num_files = nfiles; 56 adev->debugfs_count = i; 57 #if defined(CONFIG_DEBUG_FS) 58 drm_debugfs_create_files(files, nfiles, 59 adev->ddev->primary->debugfs_root, 60 adev->ddev->primary); 61 #endif 62 return 0; 63 } 64 65 #if defined(CONFIG_DEBUG_FS) 66 67 68 static int amdgpu_debugfs_process_reg_op(bool read, struct file *f, 69 char __user *buf, size_t size, loff_t *pos) 70 { 71 struct amdgpu_device *adev = file_inode(f)->i_private; 72 ssize_t result = 0; 73 int r; 74 bool pm_pg_lock, use_bank, use_ring; 75 unsigned instance_bank, sh_bank, se_bank, me, pipe, queue; 76 77 pm_pg_lock = use_bank = use_ring = false; 78 instance_bank = sh_bank = se_bank = me = pipe = queue = 0; 79 80 if (size & 0x3 || *pos & 0x3 || 81 ((*pos & (1ULL << 62)) && (*pos & (1ULL << 61)))) 82 return -EINVAL; 83 84 /* are we reading registers for which a PG lock is necessary? */ 85 pm_pg_lock = (*pos >> 23) & 1; 86 87 if (*pos & (1ULL << 62)) { 88 se_bank = (*pos & GENMASK_ULL(33, 24)) >> 24; 89 sh_bank = (*pos & GENMASK_ULL(43, 34)) >> 34; 90 instance_bank = (*pos & GENMASK_ULL(53, 44)) >> 44; 91 92 if (se_bank == 0x3FF) 93 se_bank = 0xFFFFFFFF; 94 if (sh_bank == 0x3FF) 95 sh_bank = 0xFFFFFFFF; 96 if (instance_bank == 0x3FF) 97 instance_bank = 0xFFFFFFFF; 98 use_bank = 1; 99 } else if (*pos & (1ULL << 61)) { 100 101 me = (*pos & GENMASK_ULL(33, 24)) >> 24; 102 pipe = (*pos & GENMASK_ULL(43, 34)) >> 34; 103 queue = (*pos & GENMASK_ULL(53, 44)) >> 44; 104 105 use_ring = 1; 106 } else { 107 use_bank = use_ring = 0; 108 } 109 110 *pos &= (1UL << 22) - 1; 111 112 if (use_bank) { 113 if ((sh_bank != 0xFFFFFFFF && sh_bank >= adev->gfx.config.max_sh_per_se) || 114 (se_bank != 0xFFFFFFFF && se_bank >= adev->gfx.config.max_shader_engines)) 115 return -EINVAL; 116 mutex_lock(&adev->grbm_idx_mutex); 117 amdgpu_gfx_select_se_sh(adev, se_bank, 118 sh_bank, instance_bank); 119 } else if (use_ring) { 120 mutex_lock(&adev->srbm_mutex); 121 amdgpu_gfx_select_me_pipe_q(adev, me, pipe, queue); 122 } 123 124 if (pm_pg_lock) 125 mutex_lock(&adev->pm.mutex); 126 127 while (size) { 128 uint32_t value; 129 130 if (*pos > adev->rmmio_size) 131 goto end; 132 133 if (read) { 134 value = RREG32(*pos >> 2); 135 r = put_user(value, (uint32_t *)buf); 136 } else { 137 r = get_user(value, (uint32_t *)buf); 138 if (!r) 139 WREG32(*pos >> 2, value); 140 } 141 if (r) { 142 result = r; 143 goto end; 144 } 145 146 result += 4; 147 buf += 4; 148 *pos += 4; 149 size -= 4; 150 } 151 152 end: 153 if (use_bank) { 154 amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff); 155 mutex_unlock(&adev->grbm_idx_mutex); 156 } else if (use_ring) { 157 amdgpu_gfx_select_me_pipe_q(adev, 0, 0, 0); 158 mutex_unlock(&adev->srbm_mutex); 159 } 160 161 if (pm_pg_lock) 162 mutex_unlock(&adev->pm.mutex); 163 164 return result; 165 } 166 167 168 static ssize_t amdgpu_debugfs_regs_read(struct file *f, char __user *buf, 169 size_t size, loff_t *pos) 170 { 171 return amdgpu_debugfs_process_reg_op(true, f, buf, size, pos); 172 } 173 174 static ssize_t amdgpu_debugfs_regs_write(struct file *f, const char __user *buf, 175 size_t size, loff_t *pos) 176 { 177 return amdgpu_debugfs_process_reg_op(false, f, (char __user *)buf, size, pos); 178 } 179 180 static ssize_t amdgpu_debugfs_regs_pcie_read(struct file *f, char __user *buf, 181 size_t size, loff_t *pos) 182 { 183 struct amdgpu_device *adev = file_inode(f)->i_private; 184 ssize_t result = 0; 185 int r; 186 187 if (size & 0x3 || *pos & 0x3) 188 return -EINVAL; 189 190 while (size) { 191 uint32_t value; 192 193 value = RREG32_PCIE(*pos >> 2); 194 r = put_user(value, (uint32_t *)buf); 195 if (r) 196 return r; 197 198 result += 4; 199 buf += 4; 200 *pos += 4; 201 size -= 4; 202 } 203 204 return result; 205 } 206 207 static ssize_t amdgpu_debugfs_regs_pcie_write(struct file *f, const char __user *buf, 208 size_t size, loff_t *pos) 209 { 210 struct amdgpu_device *adev = file_inode(f)->i_private; 211 ssize_t result = 0; 212 int r; 213 214 if (size & 0x3 || *pos & 0x3) 215 return -EINVAL; 216 217 while (size) { 218 uint32_t value; 219 220 r = get_user(value, (uint32_t *)buf); 221 if (r) 222 return r; 223 224 WREG32_PCIE(*pos >> 2, value); 225 226 result += 4; 227 buf += 4; 228 *pos += 4; 229 size -= 4; 230 } 231 232 return result; 233 } 234 235 static ssize_t amdgpu_debugfs_regs_didt_read(struct file *f, char __user *buf, 236 size_t size, loff_t *pos) 237 { 238 struct amdgpu_device *adev = file_inode(f)->i_private; 239 ssize_t result = 0; 240 int r; 241 242 if (size & 0x3 || *pos & 0x3) 243 return -EINVAL; 244 245 while (size) { 246 uint32_t value; 247 248 value = RREG32_DIDT(*pos >> 2); 249 r = put_user(value, (uint32_t *)buf); 250 if (r) 251 return r; 252 253 result += 4; 254 buf += 4; 255 *pos += 4; 256 size -= 4; 257 } 258 259 return result; 260 } 261 262 static ssize_t amdgpu_debugfs_regs_didt_write(struct file *f, const char __user *buf, 263 size_t size, loff_t *pos) 264 { 265 struct amdgpu_device *adev = file_inode(f)->i_private; 266 ssize_t result = 0; 267 int r; 268 269 if (size & 0x3 || *pos & 0x3) 270 return -EINVAL; 271 272 while (size) { 273 uint32_t value; 274 275 r = get_user(value, (uint32_t *)buf); 276 if (r) 277 return r; 278 279 WREG32_DIDT(*pos >> 2, value); 280 281 result += 4; 282 buf += 4; 283 *pos += 4; 284 size -= 4; 285 } 286 287 return result; 288 } 289 290 static ssize_t amdgpu_debugfs_regs_smc_read(struct file *f, char __user *buf, 291 size_t size, loff_t *pos) 292 { 293 struct amdgpu_device *adev = file_inode(f)->i_private; 294 ssize_t result = 0; 295 int r; 296 297 if (size & 0x3 || *pos & 0x3) 298 return -EINVAL; 299 300 while (size) { 301 uint32_t value; 302 303 value = RREG32_SMC(*pos); 304 r = put_user(value, (uint32_t *)buf); 305 if (r) 306 return r; 307 308 result += 4; 309 buf += 4; 310 *pos += 4; 311 size -= 4; 312 } 313 314 return result; 315 } 316 317 static ssize_t amdgpu_debugfs_regs_smc_write(struct file *f, const char __user *buf, 318 size_t size, loff_t *pos) 319 { 320 struct amdgpu_device *adev = file_inode(f)->i_private; 321 ssize_t result = 0; 322 int r; 323 324 if (size & 0x3 || *pos & 0x3) 325 return -EINVAL; 326 327 while (size) { 328 uint32_t value; 329 330 r = get_user(value, (uint32_t *)buf); 331 if (r) 332 return r; 333 334 WREG32_SMC(*pos, value); 335 336 result += 4; 337 buf += 4; 338 *pos += 4; 339 size -= 4; 340 } 341 342 return result; 343 } 344 345 static ssize_t amdgpu_debugfs_gca_config_read(struct file *f, char __user *buf, 346 size_t size, loff_t *pos) 347 { 348 struct amdgpu_device *adev = file_inode(f)->i_private; 349 ssize_t result = 0; 350 int r; 351 uint32_t *config, no_regs = 0; 352 353 if (size & 0x3 || *pos & 0x3) 354 return -EINVAL; 355 356 config = kmalloc_array(256, sizeof(*config), GFP_KERNEL); 357 if (!config) 358 return -ENOMEM; 359 360 /* version, increment each time something is added */ 361 config[no_regs++] = 3; 362 config[no_regs++] = adev->gfx.config.max_shader_engines; 363 config[no_regs++] = adev->gfx.config.max_tile_pipes; 364 config[no_regs++] = adev->gfx.config.max_cu_per_sh; 365 config[no_regs++] = adev->gfx.config.max_sh_per_se; 366 config[no_regs++] = adev->gfx.config.max_backends_per_se; 367 config[no_regs++] = adev->gfx.config.max_texture_channel_caches; 368 config[no_regs++] = adev->gfx.config.max_gprs; 369 config[no_regs++] = adev->gfx.config.max_gs_threads; 370 config[no_regs++] = adev->gfx.config.max_hw_contexts; 371 config[no_regs++] = adev->gfx.config.sc_prim_fifo_size_frontend; 372 config[no_regs++] = adev->gfx.config.sc_prim_fifo_size_backend; 373 config[no_regs++] = adev->gfx.config.sc_hiz_tile_fifo_size; 374 config[no_regs++] = adev->gfx.config.sc_earlyz_tile_fifo_size; 375 config[no_regs++] = adev->gfx.config.num_tile_pipes; 376 config[no_regs++] = adev->gfx.config.backend_enable_mask; 377 config[no_regs++] = adev->gfx.config.mem_max_burst_length_bytes; 378 config[no_regs++] = adev->gfx.config.mem_row_size_in_kb; 379 config[no_regs++] = adev->gfx.config.shader_engine_tile_size; 380 config[no_regs++] = adev->gfx.config.num_gpus; 381 config[no_regs++] = adev->gfx.config.multi_gpu_tile_size; 382 config[no_regs++] = adev->gfx.config.mc_arb_ramcfg; 383 config[no_regs++] = adev->gfx.config.gb_addr_config; 384 config[no_regs++] = adev->gfx.config.num_rbs; 385 386 /* rev==1 */ 387 config[no_regs++] = adev->rev_id; 388 config[no_regs++] = adev->pg_flags; 389 config[no_regs++] = adev->cg_flags; 390 391 /* rev==2 */ 392 config[no_regs++] = adev->family; 393 config[no_regs++] = adev->external_rev_id; 394 395 /* rev==3 */ 396 config[no_regs++] = adev->pdev->device; 397 config[no_regs++] = adev->pdev->revision; 398 config[no_regs++] = adev->pdev->subsystem_device; 399 config[no_regs++] = adev->pdev->subsystem_vendor; 400 401 while (size && (*pos < no_regs * 4)) { 402 uint32_t value; 403 404 value = config[*pos >> 2]; 405 r = put_user(value, (uint32_t *)buf); 406 if (r) { 407 kfree(config); 408 return r; 409 } 410 411 result += 4; 412 buf += 4; 413 *pos += 4; 414 size -= 4; 415 } 416 417 kfree(config); 418 return result; 419 } 420 421 static ssize_t amdgpu_debugfs_sensor_read(struct file *f, char __user *buf, 422 size_t size, loff_t *pos) 423 { 424 struct amdgpu_device *adev = file_inode(f)->i_private; 425 int idx, x, outsize, r, valuesize; 426 uint32_t values[16]; 427 428 if (size & 3 || *pos & 0x3) 429 return -EINVAL; 430 431 if (amdgpu_dpm == 0) 432 return -EINVAL; 433 434 /* convert offset to sensor number */ 435 idx = *pos >> 2; 436 437 valuesize = sizeof(values); 438 if (adev->powerplay.pp_funcs && adev->powerplay.pp_funcs->read_sensor) 439 r = amdgpu_dpm_read_sensor(adev, idx, &values[0], &valuesize); 440 else 441 return -EINVAL; 442 443 if (size > valuesize) 444 return -EINVAL; 445 446 outsize = 0; 447 x = 0; 448 if (!r) { 449 while (size) { 450 r = put_user(values[x++], (int32_t *)buf); 451 buf += 4; 452 size -= 4; 453 outsize += 4; 454 } 455 } 456 457 return !r ? outsize : r; 458 } 459 460 static ssize_t amdgpu_debugfs_wave_read(struct file *f, char __user *buf, 461 size_t size, loff_t *pos) 462 { 463 struct amdgpu_device *adev = f->f_inode->i_private; 464 int r, x; 465 ssize_t result=0; 466 uint32_t offset, se, sh, cu, wave, simd, data[32]; 467 468 if (size & 3 || *pos & 3) 469 return -EINVAL; 470 471 /* decode offset */ 472 offset = (*pos & GENMASK_ULL(6, 0)); 473 se = (*pos & GENMASK_ULL(14, 7)) >> 7; 474 sh = (*pos & GENMASK_ULL(22, 15)) >> 15; 475 cu = (*pos & GENMASK_ULL(30, 23)) >> 23; 476 wave = (*pos & GENMASK_ULL(36, 31)) >> 31; 477 simd = (*pos & GENMASK_ULL(44, 37)) >> 37; 478 479 /* switch to the specific se/sh/cu */ 480 mutex_lock(&adev->grbm_idx_mutex); 481 amdgpu_gfx_select_se_sh(adev, se, sh, cu); 482 483 x = 0; 484 if (adev->gfx.funcs->read_wave_data) 485 adev->gfx.funcs->read_wave_data(adev, simd, wave, data, &x); 486 487 amdgpu_gfx_select_se_sh(adev, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF); 488 mutex_unlock(&adev->grbm_idx_mutex); 489 490 if (!x) 491 return -EINVAL; 492 493 while (size && (offset < x * 4)) { 494 uint32_t value; 495 496 value = data[offset >> 2]; 497 r = put_user(value, (uint32_t *)buf); 498 if (r) 499 return r; 500 501 result += 4; 502 buf += 4; 503 offset += 4; 504 size -= 4; 505 } 506 507 return result; 508 } 509 510 static ssize_t amdgpu_debugfs_gpr_read(struct file *f, char __user *buf, 511 size_t size, loff_t *pos) 512 { 513 struct amdgpu_device *adev = f->f_inode->i_private; 514 int r; 515 ssize_t result = 0; 516 uint32_t offset, se, sh, cu, wave, simd, thread, bank, *data; 517 518 if (size & 3 || *pos & 3) 519 return -EINVAL; 520 521 /* decode offset */ 522 offset = *pos & GENMASK_ULL(11, 0); 523 se = (*pos & GENMASK_ULL(19, 12)) >> 12; 524 sh = (*pos & GENMASK_ULL(27, 20)) >> 20; 525 cu = (*pos & GENMASK_ULL(35, 28)) >> 28; 526 wave = (*pos & GENMASK_ULL(43, 36)) >> 36; 527 simd = (*pos & GENMASK_ULL(51, 44)) >> 44; 528 thread = (*pos & GENMASK_ULL(59, 52)) >> 52; 529 bank = (*pos & GENMASK_ULL(61, 60)) >> 60; 530 531 data = kmalloc_array(1024, sizeof(*data), GFP_KERNEL); 532 if (!data) 533 return -ENOMEM; 534 535 /* switch to the specific se/sh/cu */ 536 mutex_lock(&adev->grbm_idx_mutex); 537 amdgpu_gfx_select_se_sh(adev, se, sh, cu); 538 539 if (bank == 0) { 540 if (adev->gfx.funcs->read_wave_vgprs) 541 adev->gfx.funcs->read_wave_vgprs(adev, simd, wave, thread, offset, size>>2, data); 542 } else { 543 if (adev->gfx.funcs->read_wave_sgprs) 544 adev->gfx.funcs->read_wave_sgprs(adev, simd, wave, offset, size>>2, data); 545 } 546 547 amdgpu_gfx_select_se_sh(adev, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF); 548 mutex_unlock(&adev->grbm_idx_mutex); 549 550 while (size) { 551 uint32_t value; 552 553 value = data[offset++]; 554 r = put_user(value, (uint32_t *)buf); 555 if (r) { 556 result = r; 557 goto err; 558 } 559 560 result += 4; 561 buf += 4; 562 size -= 4; 563 } 564 565 err: 566 kfree(data); 567 return result; 568 } 569 570 static const struct file_operations amdgpu_debugfs_regs_fops = { 571 .owner = THIS_MODULE, 572 .read = amdgpu_debugfs_regs_read, 573 .write = amdgpu_debugfs_regs_write, 574 .llseek = default_llseek 575 }; 576 static const struct file_operations amdgpu_debugfs_regs_didt_fops = { 577 .owner = THIS_MODULE, 578 .read = amdgpu_debugfs_regs_didt_read, 579 .write = amdgpu_debugfs_regs_didt_write, 580 .llseek = default_llseek 581 }; 582 static const struct file_operations amdgpu_debugfs_regs_pcie_fops = { 583 .owner = THIS_MODULE, 584 .read = amdgpu_debugfs_regs_pcie_read, 585 .write = amdgpu_debugfs_regs_pcie_write, 586 .llseek = default_llseek 587 }; 588 static const struct file_operations amdgpu_debugfs_regs_smc_fops = { 589 .owner = THIS_MODULE, 590 .read = amdgpu_debugfs_regs_smc_read, 591 .write = amdgpu_debugfs_regs_smc_write, 592 .llseek = default_llseek 593 }; 594 595 static const struct file_operations amdgpu_debugfs_gca_config_fops = { 596 .owner = THIS_MODULE, 597 .read = amdgpu_debugfs_gca_config_read, 598 .llseek = default_llseek 599 }; 600 601 static const struct file_operations amdgpu_debugfs_sensors_fops = { 602 .owner = THIS_MODULE, 603 .read = amdgpu_debugfs_sensor_read, 604 .llseek = default_llseek 605 }; 606 607 static const struct file_operations amdgpu_debugfs_wave_fops = { 608 .owner = THIS_MODULE, 609 .read = amdgpu_debugfs_wave_read, 610 .llseek = default_llseek 611 }; 612 static const struct file_operations amdgpu_debugfs_gpr_fops = { 613 .owner = THIS_MODULE, 614 .read = amdgpu_debugfs_gpr_read, 615 .llseek = default_llseek 616 }; 617 618 static const struct file_operations *debugfs_regs[] = { 619 &amdgpu_debugfs_regs_fops, 620 &amdgpu_debugfs_regs_didt_fops, 621 &amdgpu_debugfs_regs_pcie_fops, 622 &amdgpu_debugfs_regs_smc_fops, 623 &amdgpu_debugfs_gca_config_fops, 624 &amdgpu_debugfs_sensors_fops, 625 &amdgpu_debugfs_wave_fops, 626 &amdgpu_debugfs_gpr_fops, 627 }; 628 629 static const char *debugfs_regs_names[] = { 630 "amdgpu_regs", 631 "amdgpu_regs_didt", 632 "amdgpu_regs_pcie", 633 "amdgpu_regs_smc", 634 "amdgpu_gca_config", 635 "amdgpu_sensors", 636 "amdgpu_wave", 637 "amdgpu_gpr", 638 }; 639 640 int amdgpu_debugfs_regs_init(struct amdgpu_device *adev) 641 { 642 struct drm_minor *minor = adev->ddev->primary; 643 struct dentry *ent, *root = minor->debugfs_root; 644 unsigned i, j; 645 646 for (i = 0; i < ARRAY_SIZE(debugfs_regs); i++) { 647 ent = debugfs_create_file(debugfs_regs_names[i], 648 S_IFREG | S_IRUGO, root, 649 adev, debugfs_regs[i]); 650 if (IS_ERR(ent)) { 651 for (j = 0; j < i; j++) { 652 debugfs_remove(adev->debugfs_regs[i]); 653 adev->debugfs_regs[i] = NULL; 654 } 655 return PTR_ERR(ent); 656 } 657 658 if (!i) 659 i_size_write(ent->d_inode, adev->rmmio_size); 660 adev->debugfs_regs[i] = ent; 661 } 662 663 return 0; 664 } 665 666 void amdgpu_debugfs_regs_cleanup(struct amdgpu_device *adev) 667 { 668 unsigned i; 669 670 for (i = 0; i < ARRAY_SIZE(debugfs_regs); i++) { 671 if (adev->debugfs_regs[i]) { 672 debugfs_remove(adev->debugfs_regs[i]); 673 adev->debugfs_regs[i] = NULL; 674 } 675 } 676 } 677 678 static int amdgpu_debugfs_test_ib(struct seq_file *m, void *data) 679 { 680 struct drm_info_node *node = (struct drm_info_node *) m->private; 681 struct drm_device *dev = node->minor->dev; 682 struct amdgpu_device *adev = dev->dev_private; 683 int r = 0, i; 684 685 /* hold on the scheduler */ 686 for (i = 0; i < AMDGPU_MAX_RINGS; i++) { 687 struct amdgpu_ring *ring = adev->rings[i]; 688 689 if (!ring || !ring->sched.thread) 690 continue; 691 kthread_park(ring->sched.thread); 692 } 693 694 seq_printf(m, "run ib test:\n"); 695 r = amdgpu_ib_ring_tests(adev); 696 if (r) 697 seq_printf(m, "ib ring tests failed (%d).\n", r); 698 else 699 seq_printf(m, "ib ring tests passed.\n"); 700 701 /* go on the scheduler */ 702 for (i = 0; i < AMDGPU_MAX_RINGS; i++) { 703 struct amdgpu_ring *ring = adev->rings[i]; 704 705 if (!ring || !ring->sched.thread) 706 continue; 707 kthread_unpark(ring->sched.thread); 708 } 709 710 return 0; 711 } 712 713 static int amdgpu_debugfs_get_vbios_dump(struct seq_file *m, void *data) 714 { 715 struct drm_info_node *node = (struct drm_info_node *) m->private; 716 struct drm_device *dev = node->minor->dev; 717 struct amdgpu_device *adev = dev->dev_private; 718 719 seq_write(m, adev->bios, adev->bios_size); 720 return 0; 721 } 722 723 static int amdgpu_debugfs_evict_vram(struct seq_file *m, void *data) 724 { 725 struct drm_info_node *node = (struct drm_info_node *)m->private; 726 struct drm_device *dev = node->minor->dev; 727 struct amdgpu_device *adev = dev->dev_private; 728 729 seq_printf(m, "(%d)\n", amdgpu_bo_evict_vram(adev)); 730 return 0; 731 } 732 733 static int amdgpu_debugfs_evict_gtt(struct seq_file *m, void *data) 734 { 735 struct drm_info_node *node = (struct drm_info_node *)m->private; 736 struct drm_device *dev = node->minor->dev; 737 struct amdgpu_device *adev = dev->dev_private; 738 739 seq_printf(m, "(%d)\n", ttm_bo_evict_mm(&adev->mman.bdev, TTM_PL_TT)); 740 return 0; 741 } 742 743 static const struct drm_info_list amdgpu_debugfs_list[] = { 744 {"amdgpu_vbios", amdgpu_debugfs_get_vbios_dump}, 745 {"amdgpu_test_ib", &amdgpu_debugfs_test_ib}, 746 {"amdgpu_evict_vram", &amdgpu_debugfs_evict_vram}, 747 {"amdgpu_evict_gtt", &amdgpu_debugfs_evict_gtt}, 748 }; 749 750 int amdgpu_debugfs_init(struct amdgpu_device *adev) 751 { 752 return amdgpu_debugfs_add_files(adev, amdgpu_debugfs_list, 753 ARRAY_SIZE(amdgpu_debugfs_list)); 754 } 755 756 #else 757 int amdgpu_debugfs_init(struct amdgpu_device *adev) 758 { 759 return 0; 760 } 761 int amdgpu_debugfs_regs_init(struct amdgpu_device *adev) 762 { 763 return 0; 764 } 765 void amdgpu_debugfs_regs_cleanup(struct amdgpu_device *adev) { } 766 #endif 767