xref: /linux/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c (revision e3b9f1e81de2083f359bacd2a94bf1c024f2ede0)
1 /*
2  * Copyright 2008 Advanced Micro Devices, Inc.
3  * Copyright 2008 Red Hat Inc.
4  * Copyright 2009 Jerome Glisse.
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the "Software"),
8  * to deal in the Software without restriction, including without limitation
9  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10  * and/or sell copies of the Software, and to permit persons to whom the
11  * Software is furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in
14  * all copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
19  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22  * OTHER DEALINGS IN THE SOFTWARE.
23  *
24  */
25 
26 #include <linux/kthread.h>
27 #include <drm/drmP.h>
28 #include <linux/debugfs.h>
29 #include "amdgpu.h"
30 
31 /*
32  * Debugfs
33  */
34 int amdgpu_debugfs_add_files(struct amdgpu_device *adev,
35 			     const struct drm_info_list *files,
36 			     unsigned nfiles)
37 {
38 	unsigned i;
39 
40 	for (i = 0; i < adev->debugfs_count; i++) {
41 		if (adev->debugfs[i].files == files) {
42 			/* Already registered */
43 			return 0;
44 		}
45 	}
46 
47 	i = adev->debugfs_count + 1;
48 	if (i > AMDGPU_DEBUGFS_MAX_COMPONENTS) {
49 		DRM_ERROR("Reached maximum number of debugfs components.\n");
50 		DRM_ERROR("Report so we increase "
51 			  "AMDGPU_DEBUGFS_MAX_COMPONENTS.\n");
52 		return -EINVAL;
53 	}
54 	adev->debugfs[adev->debugfs_count].files = files;
55 	adev->debugfs[adev->debugfs_count].num_files = nfiles;
56 	adev->debugfs_count = i;
57 #if defined(CONFIG_DEBUG_FS)
58 	drm_debugfs_create_files(files, nfiles,
59 				 adev->ddev->primary->debugfs_root,
60 				 adev->ddev->primary);
61 #endif
62 	return 0;
63 }
64 
65 #if defined(CONFIG_DEBUG_FS)
66 
67 static ssize_t amdgpu_debugfs_regs_read(struct file *f, char __user *buf,
68 					size_t size, loff_t *pos)
69 {
70 	struct amdgpu_device *adev = file_inode(f)->i_private;
71 	ssize_t result = 0;
72 	int r;
73 	bool pm_pg_lock, use_bank;
74 	unsigned instance_bank, sh_bank, se_bank;
75 
76 	if (size & 0x3 || *pos & 0x3)
77 		return -EINVAL;
78 
79 	/* are we reading registers for which a PG lock is necessary? */
80 	pm_pg_lock = (*pos >> 23) & 1;
81 
82 	if (*pos & (1ULL << 62)) {
83 		se_bank = (*pos & GENMASK_ULL(33, 24)) >> 24;
84 		sh_bank = (*pos & GENMASK_ULL(43, 34)) >> 34;
85 		instance_bank = (*pos & GENMASK_ULL(53, 44)) >> 44;
86 
87 		if (se_bank == 0x3FF)
88 			se_bank = 0xFFFFFFFF;
89 		if (sh_bank == 0x3FF)
90 			sh_bank = 0xFFFFFFFF;
91 		if (instance_bank == 0x3FF)
92 			instance_bank = 0xFFFFFFFF;
93 		use_bank = 1;
94 	} else {
95 		use_bank = 0;
96 	}
97 
98 	*pos &= (1UL << 22) - 1;
99 
100 	if (use_bank) {
101 		if ((sh_bank != 0xFFFFFFFF && sh_bank >= adev->gfx.config.max_sh_per_se) ||
102 		    (se_bank != 0xFFFFFFFF && se_bank >= adev->gfx.config.max_shader_engines))
103 			return -EINVAL;
104 		mutex_lock(&adev->grbm_idx_mutex);
105 		amdgpu_gfx_select_se_sh(adev, se_bank,
106 					sh_bank, instance_bank);
107 	}
108 
109 	if (pm_pg_lock)
110 		mutex_lock(&adev->pm.mutex);
111 
112 	while (size) {
113 		uint32_t value;
114 
115 		if (*pos > adev->rmmio_size)
116 			goto end;
117 
118 		value = RREG32(*pos >> 2);
119 		r = put_user(value, (uint32_t *)buf);
120 		if (r) {
121 			result = r;
122 			goto end;
123 		}
124 
125 		result += 4;
126 		buf += 4;
127 		*pos += 4;
128 		size -= 4;
129 	}
130 
131 end:
132 	if (use_bank) {
133 		amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
134 		mutex_unlock(&adev->grbm_idx_mutex);
135 	}
136 
137 	if (pm_pg_lock)
138 		mutex_unlock(&adev->pm.mutex);
139 
140 	return result;
141 }
142 
143 static ssize_t amdgpu_debugfs_regs_write(struct file *f, const char __user *buf,
144 					 size_t size, loff_t *pos)
145 {
146 	struct amdgpu_device *adev = file_inode(f)->i_private;
147 	ssize_t result = 0;
148 	int r;
149 	bool pm_pg_lock, use_bank;
150 	unsigned instance_bank, sh_bank, se_bank;
151 
152 	if (size & 0x3 || *pos & 0x3)
153 		return -EINVAL;
154 
155 	/* are we reading registers for which a PG lock is necessary? */
156 	pm_pg_lock = (*pos >> 23) & 1;
157 
158 	if (*pos & (1ULL << 62)) {
159 		se_bank = (*pos & GENMASK_ULL(33, 24)) >> 24;
160 		sh_bank = (*pos & GENMASK_ULL(43, 34)) >> 34;
161 		instance_bank = (*pos & GENMASK_ULL(53, 44)) >> 44;
162 
163 		if (se_bank == 0x3FF)
164 			se_bank = 0xFFFFFFFF;
165 		if (sh_bank == 0x3FF)
166 			sh_bank = 0xFFFFFFFF;
167 		if (instance_bank == 0x3FF)
168 			instance_bank = 0xFFFFFFFF;
169 		use_bank = 1;
170 	} else {
171 		use_bank = 0;
172 	}
173 
174 	*pos &= (1UL << 22) - 1;
175 
176 	if (use_bank) {
177 		if ((sh_bank != 0xFFFFFFFF && sh_bank >= adev->gfx.config.max_sh_per_se) ||
178 		    (se_bank != 0xFFFFFFFF && se_bank >= adev->gfx.config.max_shader_engines))
179 			return -EINVAL;
180 		mutex_lock(&adev->grbm_idx_mutex);
181 		amdgpu_gfx_select_se_sh(adev, se_bank,
182 					sh_bank, instance_bank);
183 	}
184 
185 	if (pm_pg_lock)
186 		mutex_lock(&adev->pm.mutex);
187 
188 	while (size) {
189 		uint32_t value;
190 
191 		if (*pos > adev->rmmio_size)
192 			return result;
193 
194 		r = get_user(value, (uint32_t *)buf);
195 		if (r)
196 			return r;
197 
198 		WREG32(*pos >> 2, value);
199 
200 		result += 4;
201 		buf += 4;
202 		*pos += 4;
203 		size -= 4;
204 	}
205 
206 	if (use_bank) {
207 		amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
208 		mutex_unlock(&adev->grbm_idx_mutex);
209 	}
210 
211 	if (pm_pg_lock)
212 		mutex_unlock(&adev->pm.mutex);
213 
214 	return result;
215 }
216 
217 static ssize_t amdgpu_debugfs_regs_pcie_read(struct file *f, char __user *buf,
218 					size_t size, loff_t *pos)
219 {
220 	struct amdgpu_device *adev = file_inode(f)->i_private;
221 	ssize_t result = 0;
222 	int r;
223 
224 	if (size & 0x3 || *pos & 0x3)
225 		return -EINVAL;
226 
227 	while (size) {
228 		uint32_t value;
229 
230 		value = RREG32_PCIE(*pos >> 2);
231 		r = put_user(value, (uint32_t *)buf);
232 		if (r)
233 			return r;
234 
235 		result += 4;
236 		buf += 4;
237 		*pos += 4;
238 		size -= 4;
239 	}
240 
241 	return result;
242 }
243 
244 static ssize_t amdgpu_debugfs_regs_pcie_write(struct file *f, const char __user *buf,
245 					 size_t size, loff_t *pos)
246 {
247 	struct amdgpu_device *adev = file_inode(f)->i_private;
248 	ssize_t result = 0;
249 	int r;
250 
251 	if (size & 0x3 || *pos & 0x3)
252 		return -EINVAL;
253 
254 	while (size) {
255 		uint32_t value;
256 
257 		r = get_user(value, (uint32_t *)buf);
258 		if (r)
259 			return r;
260 
261 		WREG32_PCIE(*pos >> 2, value);
262 
263 		result += 4;
264 		buf += 4;
265 		*pos += 4;
266 		size -= 4;
267 	}
268 
269 	return result;
270 }
271 
272 static ssize_t amdgpu_debugfs_regs_didt_read(struct file *f, char __user *buf,
273 					size_t size, loff_t *pos)
274 {
275 	struct amdgpu_device *adev = file_inode(f)->i_private;
276 	ssize_t result = 0;
277 	int r;
278 
279 	if (size & 0x3 || *pos & 0x3)
280 		return -EINVAL;
281 
282 	while (size) {
283 		uint32_t value;
284 
285 		value = RREG32_DIDT(*pos >> 2);
286 		r = put_user(value, (uint32_t *)buf);
287 		if (r)
288 			return r;
289 
290 		result += 4;
291 		buf += 4;
292 		*pos += 4;
293 		size -= 4;
294 	}
295 
296 	return result;
297 }
298 
299 static ssize_t amdgpu_debugfs_regs_didt_write(struct file *f, const char __user *buf,
300 					 size_t size, loff_t *pos)
301 {
302 	struct amdgpu_device *adev = file_inode(f)->i_private;
303 	ssize_t result = 0;
304 	int r;
305 
306 	if (size & 0x3 || *pos & 0x3)
307 		return -EINVAL;
308 
309 	while (size) {
310 		uint32_t value;
311 
312 		r = get_user(value, (uint32_t *)buf);
313 		if (r)
314 			return r;
315 
316 		WREG32_DIDT(*pos >> 2, value);
317 
318 		result += 4;
319 		buf += 4;
320 		*pos += 4;
321 		size -= 4;
322 	}
323 
324 	return result;
325 }
326 
327 static ssize_t amdgpu_debugfs_regs_smc_read(struct file *f, char __user *buf,
328 					size_t size, loff_t *pos)
329 {
330 	struct amdgpu_device *adev = file_inode(f)->i_private;
331 	ssize_t result = 0;
332 	int r;
333 
334 	if (size & 0x3 || *pos & 0x3)
335 		return -EINVAL;
336 
337 	while (size) {
338 		uint32_t value;
339 
340 		value = RREG32_SMC(*pos);
341 		r = put_user(value, (uint32_t *)buf);
342 		if (r)
343 			return r;
344 
345 		result += 4;
346 		buf += 4;
347 		*pos += 4;
348 		size -= 4;
349 	}
350 
351 	return result;
352 }
353 
354 static ssize_t amdgpu_debugfs_regs_smc_write(struct file *f, const char __user *buf,
355 					 size_t size, loff_t *pos)
356 {
357 	struct amdgpu_device *adev = file_inode(f)->i_private;
358 	ssize_t result = 0;
359 	int r;
360 
361 	if (size & 0x3 || *pos & 0x3)
362 		return -EINVAL;
363 
364 	while (size) {
365 		uint32_t value;
366 
367 		r = get_user(value, (uint32_t *)buf);
368 		if (r)
369 			return r;
370 
371 		WREG32_SMC(*pos, value);
372 
373 		result += 4;
374 		buf += 4;
375 		*pos += 4;
376 		size -= 4;
377 	}
378 
379 	return result;
380 }
381 
382 static ssize_t amdgpu_debugfs_gca_config_read(struct file *f, char __user *buf,
383 					size_t size, loff_t *pos)
384 {
385 	struct amdgpu_device *adev = file_inode(f)->i_private;
386 	ssize_t result = 0;
387 	int r;
388 	uint32_t *config, no_regs = 0;
389 
390 	if (size & 0x3 || *pos & 0x3)
391 		return -EINVAL;
392 
393 	config = kmalloc_array(256, sizeof(*config), GFP_KERNEL);
394 	if (!config)
395 		return -ENOMEM;
396 
397 	/* version, increment each time something is added */
398 	config[no_regs++] = 3;
399 	config[no_regs++] = adev->gfx.config.max_shader_engines;
400 	config[no_regs++] = adev->gfx.config.max_tile_pipes;
401 	config[no_regs++] = adev->gfx.config.max_cu_per_sh;
402 	config[no_regs++] = adev->gfx.config.max_sh_per_se;
403 	config[no_regs++] = adev->gfx.config.max_backends_per_se;
404 	config[no_regs++] = adev->gfx.config.max_texture_channel_caches;
405 	config[no_regs++] = adev->gfx.config.max_gprs;
406 	config[no_regs++] = adev->gfx.config.max_gs_threads;
407 	config[no_regs++] = adev->gfx.config.max_hw_contexts;
408 	config[no_regs++] = adev->gfx.config.sc_prim_fifo_size_frontend;
409 	config[no_regs++] = adev->gfx.config.sc_prim_fifo_size_backend;
410 	config[no_regs++] = adev->gfx.config.sc_hiz_tile_fifo_size;
411 	config[no_regs++] = adev->gfx.config.sc_earlyz_tile_fifo_size;
412 	config[no_regs++] = adev->gfx.config.num_tile_pipes;
413 	config[no_regs++] = adev->gfx.config.backend_enable_mask;
414 	config[no_regs++] = adev->gfx.config.mem_max_burst_length_bytes;
415 	config[no_regs++] = adev->gfx.config.mem_row_size_in_kb;
416 	config[no_regs++] = adev->gfx.config.shader_engine_tile_size;
417 	config[no_regs++] = adev->gfx.config.num_gpus;
418 	config[no_regs++] = adev->gfx.config.multi_gpu_tile_size;
419 	config[no_regs++] = adev->gfx.config.mc_arb_ramcfg;
420 	config[no_regs++] = adev->gfx.config.gb_addr_config;
421 	config[no_regs++] = adev->gfx.config.num_rbs;
422 
423 	/* rev==1 */
424 	config[no_regs++] = adev->rev_id;
425 	config[no_regs++] = adev->pg_flags;
426 	config[no_regs++] = adev->cg_flags;
427 
428 	/* rev==2 */
429 	config[no_regs++] = adev->family;
430 	config[no_regs++] = adev->external_rev_id;
431 
432 	/* rev==3 */
433 	config[no_regs++] = adev->pdev->device;
434 	config[no_regs++] = adev->pdev->revision;
435 	config[no_regs++] = adev->pdev->subsystem_device;
436 	config[no_regs++] = adev->pdev->subsystem_vendor;
437 
438 	while (size && (*pos < no_regs * 4)) {
439 		uint32_t value;
440 
441 		value = config[*pos >> 2];
442 		r = put_user(value, (uint32_t *)buf);
443 		if (r) {
444 			kfree(config);
445 			return r;
446 		}
447 
448 		result += 4;
449 		buf += 4;
450 		*pos += 4;
451 		size -= 4;
452 	}
453 
454 	kfree(config);
455 	return result;
456 }
457 
458 static ssize_t amdgpu_debugfs_sensor_read(struct file *f, char __user *buf,
459 					size_t size, loff_t *pos)
460 {
461 	struct amdgpu_device *adev = file_inode(f)->i_private;
462 	int idx, x, outsize, r, valuesize;
463 	uint32_t values[16];
464 
465 	if (size & 3 || *pos & 0x3)
466 		return -EINVAL;
467 
468 	if (amdgpu_dpm == 0)
469 		return -EINVAL;
470 
471 	/* convert offset to sensor number */
472 	idx = *pos >> 2;
473 
474 	valuesize = sizeof(values);
475 	if (adev->powerplay.pp_funcs && adev->powerplay.pp_funcs->read_sensor)
476 		r = amdgpu_dpm_read_sensor(adev, idx, &values[0], &valuesize);
477 	else
478 		return -EINVAL;
479 
480 	if (size > valuesize)
481 		return -EINVAL;
482 
483 	outsize = 0;
484 	x = 0;
485 	if (!r) {
486 		while (size) {
487 			r = put_user(values[x++], (int32_t *)buf);
488 			buf += 4;
489 			size -= 4;
490 			outsize += 4;
491 		}
492 	}
493 
494 	return !r ? outsize : r;
495 }
496 
497 static ssize_t amdgpu_debugfs_wave_read(struct file *f, char __user *buf,
498 					size_t size, loff_t *pos)
499 {
500 	struct amdgpu_device *adev = f->f_inode->i_private;
501 	int r, x;
502 	ssize_t result=0;
503 	uint32_t offset, se, sh, cu, wave, simd, data[32];
504 
505 	if (size & 3 || *pos & 3)
506 		return -EINVAL;
507 
508 	/* decode offset */
509 	offset = (*pos & GENMASK_ULL(6, 0));
510 	se = (*pos & GENMASK_ULL(14, 7)) >> 7;
511 	sh = (*pos & GENMASK_ULL(22, 15)) >> 15;
512 	cu = (*pos & GENMASK_ULL(30, 23)) >> 23;
513 	wave = (*pos & GENMASK_ULL(36, 31)) >> 31;
514 	simd = (*pos & GENMASK_ULL(44, 37)) >> 37;
515 
516 	/* switch to the specific se/sh/cu */
517 	mutex_lock(&adev->grbm_idx_mutex);
518 	amdgpu_gfx_select_se_sh(adev, se, sh, cu);
519 
520 	x = 0;
521 	if (adev->gfx.funcs->read_wave_data)
522 		adev->gfx.funcs->read_wave_data(adev, simd, wave, data, &x);
523 
524 	amdgpu_gfx_select_se_sh(adev, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF);
525 	mutex_unlock(&adev->grbm_idx_mutex);
526 
527 	if (!x)
528 		return -EINVAL;
529 
530 	while (size && (offset < x * 4)) {
531 		uint32_t value;
532 
533 		value = data[offset >> 2];
534 		r = put_user(value, (uint32_t *)buf);
535 		if (r)
536 			return r;
537 
538 		result += 4;
539 		buf += 4;
540 		offset += 4;
541 		size -= 4;
542 	}
543 
544 	return result;
545 }
546 
547 static ssize_t amdgpu_debugfs_gpr_read(struct file *f, char __user *buf,
548 					size_t size, loff_t *pos)
549 {
550 	struct amdgpu_device *adev = f->f_inode->i_private;
551 	int r;
552 	ssize_t result = 0;
553 	uint32_t offset, se, sh, cu, wave, simd, thread, bank, *data;
554 
555 	if (size & 3 || *pos & 3)
556 		return -EINVAL;
557 
558 	/* decode offset */
559 	offset = *pos & GENMASK_ULL(11, 0);
560 	se = (*pos & GENMASK_ULL(19, 12)) >> 12;
561 	sh = (*pos & GENMASK_ULL(27, 20)) >> 20;
562 	cu = (*pos & GENMASK_ULL(35, 28)) >> 28;
563 	wave = (*pos & GENMASK_ULL(43, 36)) >> 36;
564 	simd = (*pos & GENMASK_ULL(51, 44)) >> 44;
565 	thread = (*pos & GENMASK_ULL(59, 52)) >> 52;
566 	bank = (*pos & GENMASK_ULL(61, 60)) >> 60;
567 
568 	data = kmalloc_array(1024, sizeof(*data), GFP_KERNEL);
569 	if (!data)
570 		return -ENOMEM;
571 
572 	/* switch to the specific se/sh/cu */
573 	mutex_lock(&adev->grbm_idx_mutex);
574 	amdgpu_gfx_select_se_sh(adev, se, sh, cu);
575 
576 	if (bank == 0) {
577 		if (adev->gfx.funcs->read_wave_vgprs)
578 			adev->gfx.funcs->read_wave_vgprs(adev, simd, wave, thread, offset, size>>2, data);
579 	} else {
580 		if (adev->gfx.funcs->read_wave_sgprs)
581 			adev->gfx.funcs->read_wave_sgprs(adev, simd, wave, offset, size>>2, data);
582 	}
583 
584 	amdgpu_gfx_select_se_sh(adev, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF);
585 	mutex_unlock(&adev->grbm_idx_mutex);
586 
587 	while (size) {
588 		uint32_t value;
589 
590 		value = data[offset++];
591 		r = put_user(value, (uint32_t *)buf);
592 		if (r) {
593 			result = r;
594 			goto err;
595 		}
596 
597 		result += 4;
598 		buf += 4;
599 		size -= 4;
600 	}
601 
602 err:
603 	kfree(data);
604 	return result;
605 }
606 
607 static const struct file_operations amdgpu_debugfs_regs_fops = {
608 	.owner = THIS_MODULE,
609 	.read = amdgpu_debugfs_regs_read,
610 	.write = amdgpu_debugfs_regs_write,
611 	.llseek = default_llseek
612 };
613 static const struct file_operations amdgpu_debugfs_regs_didt_fops = {
614 	.owner = THIS_MODULE,
615 	.read = amdgpu_debugfs_regs_didt_read,
616 	.write = amdgpu_debugfs_regs_didt_write,
617 	.llseek = default_llseek
618 };
619 static const struct file_operations amdgpu_debugfs_regs_pcie_fops = {
620 	.owner = THIS_MODULE,
621 	.read = amdgpu_debugfs_regs_pcie_read,
622 	.write = amdgpu_debugfs_regs_pcie_write,
623 	.llseek = default_llseek
624 };
625 static const struct file_operations amdgpu_debugfs_regs_smc_fops = {
626 	.owner = THIS_MODULE,
627 	.read = amdgpu_debugfs_regs_smc_read,
628 	.write = amdgpu_debugfs_regs_smc_write,
629 	.llseek = default_llseek
630 };
631 
632 static const struct file_operations amdgpu_debugfs_gca_config_fops = {
633 	.owner = THIS_MODULE,
634 	.read = amdgpu_debugfs_gca_config_read,
635 	.llseek = default_llseek
636 };
637 
638 static const struct file_operations amdgpu_debugfs_sensors_fops = {
639 	.owner = THIS_MODULE,
640 	.read = amdgpu_debugfs_sensor_read,
641 	.llseek = default_llseek
642 };
643 
644 static const struct file_operations amdgpu_debugfs_wave_fops = {
645 	.owner = THIS_MODULE,
646 	.read = amdgpu_debugfs_wave_read,
647 	.llseek = default_llseek
648 };
649 static const struct file_operations amdgpu_debugfs_gpr_fops = {
650 	.owner = THIS_MODULE,
651 	.read = amdgpu_debugfs_gpr_read,
652 	.llseek = default_llseek
653 };
654 
655 static const struct file_operations *debugfs_regs[] = {
656 	&amdgpu_debugfs_regs_fops,
657 	&amdgpu_debugfs_regs_didt_fops,
658 	&amdgpu_debugfs_regs_pcie_fops,
659 	&amdgpu_debugfs_regs_smc_fops,
660 	&amdgpu_debugfs_gca_config_fops,
661 	&amdgpu_debugfs_sensors_fops,
662 	&amdgpu_debugfs_wave_fops,
663 	&amdgpu_debugfs_gpr_fops,
664 };
665 
666 static const char *debugfs_regs_names[] = {
667 	"amdgpu_regs",
668 	"amdgpu_regs_didt",
669 	"amdgpu_regs_pcie",
670 	"amdgpu_regs_smc",
671 	"amdgpu_gca_config",
672 	"amdgpu_sensors",
673 	"amdgpu_wave",
674 	"amdgpu_gpr",
675 };
676 
677 int amdgpu_debugfs_regs_init(struct amdgpu_device *adev)
678 {
679 	struct drm_minor *minor = adev->ddev->primary;
680 	struct dentry *ent, *root = minor->debugfs_root;
681 	unsigned i, j;
682 
683 	for (i = 0; i < ARRAY_SIZE(debugfs_regs); i++) {
684 		ent = debugfs_create_file(debugfs_regs_names[i],
685 					  S_IFREG | S_IRUGO, root,
686 					  adev, debugfs_regs[i]);
687 		if (IS_ERR(ent)) {
688 			for (j = 0; j < i; j++) {
689 				debugfs_remove(adev->debugfs_regs[i]);
690 				adev->debugfs_regs[i] = NULL;
691 			}
692 			return PTR_ERR(ent);
693 		}
694 
695 		if (!i)
696 			i_size_write(ent->d_inode, adev->rmmio_size);
697 		adev->debugfs_regs[i] = ent;
698 	}
699 
700 	return 0;
701 }
702 
703 void amdgpu_debugfs_regs_cleanup(struct amdgpu_device *adev)
704 {
705 	unsigned i;
706 
707 	for (i = 0; i < ARRAY_SIZE(debugfs_regs); i++) {
708 		if (adev->debugfs_regs[i]) {
709 			debugfs_remove(adev->debugfs_regs[i]);
710 			adev->debugfs_regs[i] = NULL;
711 		}
712 	}
713 }
714 
715 static int amdgpu_debugfs_test_ib(struct seq_file *m, void *data)
716 {
717 	struct drm_info_node *node = (struct drm_info_node *) m->private;
718 	struct drm_device *dev = node->minor->dev;
719 	struct amdgpu_device *adev = dev->dev_private;
720 	int r = 0, i;
721 
722 	/* hold on the scheduler */
723 	for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
724 		struct amdgpu_ring *ring = adev->rings[i];
725 
726 		if (!ring || !ring->sched.thread)
727 			continue;
728 		kthread_park(ring->sched.thread);
729 	}
730 
731 	seq_printf(m, "run ib test:\n");
732 	r = amdgpu_ib_ring_tests(adev);
733 	if (r)
734 		seq_printf(m, "ib ring tests failed (%d).\n", r);
735 	else
736 		seq_printf(m, "ib ring tests passed.\n");
737 
738 	/* go on the scheduler */
739 	for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
740 		struct amdgpu_ring *ring = adev->rings[i];
741 
742 		if (!ring || !ring->sched.thread)
743 			continue;
744 		kthread_unpark(ring->sched.thread);
745 	}
746 
747 	return 0;
748 }
749 
750 static int amdgpu_debugfs_get_vbios_dump(struct seq_file *m, void *data)
751 {
752 	struct drm_info_node *node = (struct drm_info_node *) m->private;
753 	struct drm_device *dev = node->minor->dev;
754 	struct amdgpu_device *adev = dev->dev_private;
755 
756 	seq_write(m, adev->bios, adev->bios_size);
757 	return 0;
758 }
759 
760 static int amdgpu_debugfs_evict_vram(struct seq_file *m, void *data)
761 {
762 	struct drm_info_node *node = (struct drm_info_node *)m->private;
763 	struct drm_device *dev = node->minor->dev;
764 	struct amdgpu_device *adev = dev->dev_private;
765 
766 	seq_printf(m, "(%d)\n", amdgpu_bo_evict_vram(adev));
767 	return 0;
768 }
769 
770 static const struct drm_info_list amdgpu_debugfs_list[] = {
771 	{"amdgpu_vbios", amdgpu_debugfs_get_vbios_dump},
772 	{"amdgpu_test_ib", &amdgpu_debugfs_test_ib},
773 	{"amdgpu_evict_vram", &amdgpu_debugfs_evict_vram}
774 };
775 
776 int amdgpu_debugfs_init(struct amdgpu_device *adev)
777 {
778 	return amdgpu_debugfs_add_files(adev, amdgpu_debugfs_list,
779 					ARRAY_SIZE(amdgpu_debugfs_list));
780 }
781 
782 #else
783 int amdgpu_debugfs_init(struct amdgpu_device *adev)
784 {
785 	return 0;
786 }
787 int amdgpu_debugfs_regs_init(struct amdgpu_device *adev)
788 {
789 	return 0;
790 }
791 void amdgpu_debugfs_regs_cleanup(struct amdgpu_device *adev) { }
792 #endif
793