1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Copyright (C) 2013-2016 Red Hat
4 * Author: Rob Clark <robdclark@gmail.com>
5 */
6
7 #ifdef CONFIG_DEBUG_FS
8
9 #include <linux/debugfs.h>
10 #include <linux/fault-inject.h>
11
12 #include <drm/drm_debugfs.h>
13 #include <drm/drm_fb_helper.h>
14 #include <drm/drm_file.h>
15 #include <drm/drm_framebuffer.h>
16
17 #include "msm_drv.h"
18 #include "msm_gpu.h"
19 #include "msm_kms.h"
20 #include "msm_debugfs.h"
21 #include "disp/msm_disp_snapshot.h"
22
23 /*
24 * GPU Snapshot:
25 */
26
27 struct msm_gpu_show_priv {
28 struct msm_gpu_state *state;
29 struct drm_device *dev;
30 };
31
msm_gpu_show(struct seq_file * m,void * arg)32 static int msm_gpu_show(struct seq_file *m, void *arg)
33 {
34 struct drm_printer p = drm_seq_file_printer(m);
35 struct msm_gpu_show_priv *show_priv = m->private;
36 struct msm_drm_private *priv = show_priv->dev->dev_private;
37 struct msm_gpu *gpu = priv->gpu;
38 int ret;
39
40 ret = mutex_lock_interruptible(&gpu->lock);
41 if (ret)
42 return ret;
43
44 drm_printf(&p, "%s Status:\n", gpu->name);
45 gpu->funcs->show(gpu, show_priv->state, &p);
46
47 mutex_unlock(&gpu->lock);
48
49 return 0;
50 }
51
msm_gpu_release(struct inode * inode,struct file * file)52 static int msm_gpu_release(struct inode *inode, struct file *file)
53 {
54 struct seq_file *m = file->private_data;
55 struct msm_gpu_show_priv *show_priv = m->private;
56 struct msm_drm_private *priv = show_priv->dev->dev_private;
57 struct msm_gpu *gpu = priv->gpu;
58
59 mutex_lock(&gpu->lock);
60 gpu->funcs->gpu_state_put(show_priv->state);
61 mutex_unlock(&gpu->lock);
62
63 kfree(show_priv);
64
65 return single_release(inode, file);
66 }
67
msm_gpu_open(struct inode * inode,struct file * file)68 static int msm_gpu_open(struct inode *inode, struct file *file)
69 {
70 struct drm_device *dev = inode->i_private;
71 struct msm_drm_private *priv = dev->dev_private;
72 struct msm_gpu *gpu = priv->gpu;
73 struct msm_gpu_show_priv *show_priv;
74 int ret;
75
76 if (!gpu || !gpu->funcs->gpu_state_get)
77 return -ENODEV;
78
79 show_priv = kmalloc(sizeof(*show_priv), GFP_KERNEL);
80 if (!show_priv)
81 return -ENOMEM;
82
83 ret = mutex_lock_interruptible(&gpu->lock);
84 if (ret)
85 goto free_priv;
86
87 pm_runtime_get_sync(&gpu->pdev->dev);
88 msm_gpu_hw_init(gpu);
89 show_priv->state = gpu->funcs->gpu_state_get(gpu);
90 pm_runtime_put_sync(&gpu->pdev->dev);
91
92 mutex_unlock(&gpu->lock);
93
94 if (IS_ERR(show_priv->state)) {
95 ret = PTR_ERR(show_priv->state);
96 goto free_priv;
97 }
98
99 show_priv->dev = dev;
100
101 ret = single_open(file, msm_gpu_show, show_priv);
102 if (ret)
103 goto free_priv;
104
105 return 0;
106
107 free_priv:
108 kfree(show_priv);
109 return ret;
110 }
111
112 static const struct file_operations msm_gpu_fops = {
113 .owner = THIS_MODULE,
114 .open = msm_gpu_open,
115 .read = seq_read,
116 .llseek = seq_lseek,
117 .release = msm_gpu_release,
118 };
119
120 /*
121 * Display Snapshot:
122 */
123
msm_kms_show(struct seq_file * m,void * arg)124 static int msm_kms_show(struct seq_file *m, void *arg)
125 {
126 struct drm_printer p = drm_seq_file_printer(m);
127 struct msm_disp_state *state = m->private;
128
129 msm_disp_state_print(state, &p);
130
131 return 0;
132 }
133
msm_kms_release(struct inode * inode,struct file * file)134 static int msm_kms_release(struct inode *inode, struct file *file)
135 {
136 struct seq_file *m = file->private_data;
137 struct msm_disp_state *state = m->private;
138
139 msm_disp_state_free(state);
140
141 return single_release(inode, file);
142 }
143
msm_kms_open(struct inode * inode,struct file * file)144 static int msm_kms_open(struct inode *inode, struct file *file)
145 {
146 struct drm_device *dev = inode->i_private;
147 struct msm_drm_private *priv = dev->dev_private;
148 struct msm_disp_state *state;
149 int ret;
150
151 if (!priv->kms)
152 return -ENODEV;
153
154 ret = mutex_lock_interruptible(&priv->kms->dump_mutex);
155 if (ret)
156 return ret;
157
158 state = msm_disp_snapshot_state_sync(priv->kms);
159
160 mutex_unlock(&priv->kms->dump_mutex);
161
162 if (IS_ERR(state)) {
163 return PTR_ERR(state);
164 }
165
166 ret = single_open(file, msm_kms_show, state);
167 if (ret) {
168 msm_disp_state_free(state);
169 return ret;
170 }
171
172 return 0;
173 }
174
175 static const struct file_operations msm_kms_fops = {
176 .owner = THIS_MODULE,
177 .open = msm_kms_open,
178 .read = seq_read,
179 .llseek = seq_lseek,
180 .release = msm_kms_release,
181 };
182
183 /*
184 * Other debugfs:
185 */
186
187 static unsigned long last_shrink_freed;
188
189 static int
shrink_get(void * data,u64 * val)190 shrink_get(void *data, u64 *val)
191 {
192 *val = last_shrink_freed;
193
194 return 0;
195 }
196
197 static int
shrink_set(void * data,u64 val)198 shrink_set(void *data, u64 val)
199 {
200 struct drm_device *dev = data;
201
202 last_shrink_freed = msm_gem_shrinker_shrink(dev, val);
203
204 return 0;
205 }
206
207 DEFINE_DEBUGFS_ATTRIBUTE(shrink_fops,
208 shrink_get, shrink_set,
209 "0x%08llx\n");
210
211
msm_gem_show(struct seq_file * m,void * arg)212 static int msm_gem_show(struct seq_file *m, void *arg)
213 {
214 struct drm_info_node *node = m->private;
215 struct drm_device *dev = node->minor->dev;
216 struct msm_drm_private *priv = dev->dev_private;
217 int ret;
218
219 ret = mutex_lock_interruptible(&priv->obj_lock);
220 if (ret)
221 return ret;
222
223 msm_gem_describe_objects(&priv->objects, m);
224
225 mutex_unlock(&priv->obj_lock);
226
227 return 0;
228 }
229
msm_mm_show(struct seq_file * m,void * arg)230 static int msm_mm_show(struct seq_file *m, void *arg)
231 {
232 struct drm_info_node *node = m->private;
233 struct drm_device *dev = node->minor->dev;
234 struct drm_printer p = drm_seq_file_printer(m);
235
236 drm_mm_print(&dev->vma_offset_manager->vm_addr_space_mm, &p);
237
238 return 0;
239 }
240
msm_fb_show(struct seq_file * m,void * arg)241 static int msm_fb_show(struct seq_file *m, void *arg)
242 {
243 struct drm_info_node *node = m->private;
244 struct drm_device *dev = node->minor->dev;
245 struct drm_framebuffer *fb, *fbdev_fb = NULL;
246
247 if (dev->fb_helper && dev->fb_helper->fb) {
248 seq_printf(m, "fbcon ");
249 fbdev_fb = dev->fb_helper->fb;
250 msm_framebuffer_describe(fbdev_fb, m);
251 }
252
253 mutex_lock(&dev->mode_config.fb_lock);
254 list_for_each_entry(fb, &dev->mode_config.fb_list, head) {
255 if (fb == fbdev_fb)
256 continue;
257
258 seq_printf(m, "user ");
259 msm_framebuffer_describe(fb, m);
260 }
261 mutex_unlock(&dev->mode_config.fb_lock);
262
263 return 0;
264 }
265
266 static struct drm_info_list msm_debugfs_list[] = {
267 {"gem", msm_gem_show},
268 { "mm", msm_mm_show },
269 };
270
271 static struct drm_info_list msm_kms_debugfs_list[] = {
272 { "fb", msm_fb_show },
273 };
274
late_init_minor(struct drm_minor * minor)275 static int late_init_minor(struct drm_minor *minor)
276 {
277 int ret;
278
279 if (!minor)
280 return 0;
281
282 ret = msm_rd_debugfs_init(minor);
283 if (ret) {
284 DRM_DEV_ERROR(minor->dev->dev, "could not install rd debugfs\n");
285 return ret;
286 }
287
288 ret = msm_perf_debugfs_init(minor);
289 if (ret) {
290 DRM_DEV_ERROR(minor->dev->dev, "could not install perf debugfs\n");
291 return ret;
292 }
293
294 return 0;
295 }
296
msm_debugfs_late_init(struct drm_device * dev)297 int msm_debugfs_late_init(struct drm_device *dev)
298 {
299 int ret;
300 ret = late_init_minor(dev->primary);
301 if (ret)
302 return ret;
303 ret = late_init_minor(dev->render);
304 return ret;
305 }
306
msm_debugfs_gpu_init(struct drm_minor * minor)307 static void msm_debugfs_gpu_init(struct drm_minor *minor)
308 {
309 struct drm_device *dev = minor->dev;
310 struct msm_drm_private *priv = dev->dev_private;
311 struct dentry *gpu_devfreq;
312
313 debugfs_create_file("gpu", S_IRUSR, minor->debugfs_root,
314 dev, &msm_gpu_fops);
315
316 debugfs_create_u32("hangcheck_period_ms", 0600, minor->debugfs_root,
317 &priv->hangcheck_period);
318
319 debugfs_create_bool("disable_err_irq", 0600, minor->debugfs_root,
320 &priv->disable_err_irq);
321
322 gpu_devfreq = debugfs_create_dir("devfreq", minor->debugfs_root);
323
324 debugfs_create_bool("idle_clamp",0600, gpu_devfreq,
325 &priv->gpu_clamp_to_idle);
326
327 debugfs_create_u32("upthreshold",0600, gpu_devfreq,
328 &priv->gpu_devfreq_config.upthreshold);
329
330 debugfs_create_u32("downdifferential",0600, gpu_devfreq,
331 &priv->gpu_devfreq_config.downdifferential);
332 }
333
msm_debugfs_init(struct drm_minor * minor)334 void msm_debugfs_init(struct drm_minor *minor)
335 {
336 struct drm_device *dev = minor->dev;
337 struct msm_drm_private *priv = dev->dev_private;
338
339 drm_debugfs_create_files(msm_debugfs_list,
340 ARRAY_SIZE(msm_debugfs_list),
341 minor->debugfs_root, minor);
342
343 if (priv->gpu_pdev)
344 msm_debugfs_gpu_init(minor);
345
346 if (priv->kms) {
347 drm_debugfs_create_files(msm_kms_debugfs_list,
348 ARRAY_SIZE(msm_kms_debugfs_list),
349 minor->debugfs_root, minor);
350 debugfs_create_file("kms", S_IRUSR, minor->debugfs_root,
351 dev, &msm_kms_fops);
352 }
353
354 debugfs_create_file("shrink", S_IRWXU, minor->debugfs_root,
355 dev, &shrink_fops);
356
357 if (priv->kms && priv->kms->funcs->debugfs_init)
358 priv->kms->funcs->debugfs_init(priv->kms, minor);
359
360 fault_create_debugfs_attr("fail_gem_alloc", minor->debugfs_root,
361 &fail_gem_alloc);
362 fault_create_debugfs_attr("fail_gem_iova", minor->debugfs_root,
363 &fail_gem_iova);
364 }
365 #endif
366
367