1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Copyright (C) 2013-2016 Red Hat
4 * Author: Rob Clark <robdclark@gmail.com>
5 */
6
7 #ifdef CONFIG_DEBUG_FS
8
9 #include <linux/debugfs.h>
10 #include <linux/fault-inject.h>
11
12 #include <drm/drm_debugfs.h>
13 #include <drm/drm_fb_helper.h>
14 #include <drm/drm_file.h>
15 #include <drm/drm_framebuffer.h>
16
17 #include "msm_drv.h"
18 #include "msm_gpu.h"
19 #include "msm_kms.h"
20 #include "msm_debugfs.h"
21 #include "disp/msm_disp_snapshot.h"
22
23 /*
24 * GPU Snapshot:
25 */
26
27 struct msm_gpu_show_priv {
28 struct msm_gpu_state *state;
29 struct drm_device *dev;
30 };
31
msm_gpu_show(struct seq_file * m,void * arg)32 static int msm_gpu_show(struct seq_file *m, void *arg)
33 {
34 struct drm_printer p = drm_seq_file_printer(m);
35 struct msm_gpu_show_priv *show_priv = m->private;
36 struct msm_drm_private *priv = show_priv->dev->dev_private;
37 struct msm_gpu *gpu = priv->gpu;
38 int ret;
39
40 ret = mutex_lock_interruptible(&gpu->lock);
41 if (ret)
42 return ret;
43
44 drm_printf(&p, "%s Status:\n", gpu->name);
45 gpu->funcs->show(gpu, show_priv->state, &p);
46
47 mutex_unlock(&gpu->lock);
48
49 return 0;
50 }
51
msm_gpu_release(struct inode * inode,struct file * file)52 static int msm_gpu_release(struct inode *inode, struct file *file)
53 {
54 struct seq_file *m = file->private_data;
55 struct msm_gpu_show_priv *show_priv = m->private;
56 struct msm_drm_private *priv = show_priv->dev->dev_private;
57 struct msm_gpu *gpu = priv->gpu;
58
59 mutex_lock(&gpu->lock);
60 gpu->funcs->gpu_state_put(show_priv->state);
61 mutex_unlock(&gpu->lock);
62
63 kfree(show_priv);
64
65 return single_release(inode, file);
66 }
67
msm_gpu_open(struct inode * inode,struct file * file)68 static int msm_gpu_open(struct inode *inode, struct file *file)
69 {
70 struct drm_device *dev = inode->i_private;
71 struct msm_drm_private *priv = dev->dev_private;
72 struct msm_gpu *gpu = priv->gpu;
73 struct msm_gpu_show_priv *show_priv;
74 int ret;
75
76 if (!gpu || !gpu->funcs->gpu_state_get)
77 return -ENODEV;
78
79 show_priv = kmalloc(sizeof(*show_priv), GFP_KERNEL);
80 if (!show_priv)
81 return -ENOMEM;
82
83 ret = mutex_lock_interruptible(&gpu->lock);
84 if (ret)
85 goto free_priv;
86
87 pm_runtime_get_sync(&gpu->pdev->dev);
88 msm_gpu_hw_init(gpu);
89 show_priv->state = gpu->funcs->gpu_state_get(gpu);
90 pm_runtime_put_sync(&gpu->pdev->dev);
91
92 mutex_unlock(&gpu->lock);
93
94 if (IS_ERR(show_priv->state)) {
95 ret = PTR_ERR(show_priv->state);
96 goto free_priv;
97 }
98
99 show_priv->dev = dev;
100
101 ret = single_open(file, msm_gpu_show, show_priv);
102 if (ret)
103 goto free_priv;
104
105 return 0;
106
107 free_priv:
108 kfree(show_priv);
109 return ret;
110 }
111
112 static const struct file_operations msm_gpu_fops = {
113 .owner = THIS_MODULE,
114 .open = msm_gpu_open,
115 .read = seq_read,
116 .llseek = seq_lseek,
117 .release = msm_gpu_release,
118 };
119
120 /*
121 * Display Snapshot:
122 */
123
msm_kms_show(struct seq_file * m,void * arg)124 static int msm_kms_show(struct seq_file *m, void *arg)
125 {
126 struct drm_printer p = drm_seq_file_printer(m);
127 struct msm_disp_state *state = m->private;
128
129 msm_disp_state_print(state, &p);
130
131 return 0;
132 }
133
msm_kms_release(struct inode * inode,struct file * file)134 static int msm_kms_release(struct inode *inode, struct file *file)
135 {
136 struct seq_file *m = file->private_data;
137 struct msm_disp_state *state = m->private;
138
139 msm_disp_state_free(state);
140
141 return single_release(inode, file);
142 }
143
msm_kms_open(struct inode * inode,struct file * file)144 static int msm_kms_open(struct inode *inode, struct file *file)
145 {
146 struct drm_device *dev = inode->i_private;
147 struct msm_drm_private *priv = dev->dev_private;
148 struct msm_disp_state *state;
149 int ret;
150
151 if (!priv->kms)
152 return -ENODEV;
153
154 ret = mutex_lock_interruptible(&priv->kms->dump_mutex);
155 if (ret)
156 return ret;
157
158 state = msm_disp_snapshot_state_sync(priv->kms);
159
160 mutex_unlock(&priv->kms->dump_mutex);
161
162 if (IS_ERR(state)) {
163 return PTR_ERR(state);
164 }
165
166 ret = single_open(file, msm_kms_show, state);
167 if (ret) {
168 msm_disp_state_free(state);
169 return ret;
170 }
171
172 return 0;
173 }
174
175 static const struct file_operations msm_kms_fops = {
176 .owner = THIS_MODULE,
177 .open = msm_kms_open,
178 .read = seq_read,
179 .llseek = seq_lseek,
180 .release = msm_kms_release,
181 };
182
183 /*
184 * Other debugfs:
185 */
186
187 static unsigned long last_shrink_freed;
188
189 static int
shrink_get(void * data,u64 * val)190 shrink_get(void *data, u64 *val)
191 {
192 *val = last_shrink_freed;
193
194 return 0;
195 }
196
197 static int
shrink_set(void * data,u64 val)198 shrink_set(void *data, u64 val)
199 {
200 struct drm_device *dev = data;
201
202 last_shrink_freed = msm_gem_shrinker_shrink(dev, val);
203
204 return 0;
205 }
206
207 DEFINE_DEBUGFS_ATTRIBUTE(shrink_fops,
208 shrink_get, shrink_set,
209 "0x%08llx\n");
210
211 /*
212 * Return the number of microseconds to wait until stall-on-fault is
213 * re-enabled. If 0 then it is already enabled or will be re-enabled on the
214 * next submit (unless there's a leftover devcoredump). This is useful for
215 * kernel tests that intentionally produce a fault and check the devcoredump to
216 * wait until the cooldown period is over.
217 */
218
219 static int
stall_reenable_time_get(void * data,u64 * val)220 stall_reenable_time_get(void *data, u64 *val)
221 {
222 struct msm_drm_private *priv = data;
223 unsigned long irq_flags;
224
225 spin_lock_irqsave(&priv->fault_stall_lock, irq_flags);
226
227 if (priv->stall_enabled)
228 *val = 0;
229 else
230 *val = max(ktime_us_delta(priv->stall_reenable_time, ktime_get()), 0);
231
232 spin_unlock_irqrestore(&priv->fault_stall_lock, irq_flags);
233
234 return 0;
235 }
236
237 DEFINE_DEBUGFS_ATTRIBUTE(stall_reenable_time_fops,
238 stall_reenable_time_get, NULL,
239 "%lld\n");
240
msm_gem_show(struct seq_file * m,void * arg)241 static int msm_gem_show(struct seq_file *m, void *arg)
242 {
243 struct drm_info_node *node = m->private;
244 struct drm_device *dev = node->minor->dev;
245 struct msm_drm_private *priv = dev->dev_private;
246 int ret;
247
248 ret = mutex_lock_interruptible(&priv->obj_lock);
249 if (ret)
250 return ret;
251
252 msm_gem_describe_objects(&priv->objects, m);
253
254 mutex_unlock(&priv->obj_lock);
255
256 return 0;
257 }
258
msm_mm_show(struct seq_file * m,void * arg)259 static int msm_mm_show(struct seq_file *m, void *arg)
260 {
261 struct drm_info_node *node = m->private;
262 struct drm_device *dev = node->minor->dev;
263 struct drm_printer p = drm_seq_file_printer(m);
264
265 drm_mm_print(&dev->vma_offset_manager->vm_addr_space_mm, &p);
266
267 return 0;
268 }
269
msm_fb_show(struct seq_file * m,void * arg)270 static int msm_fb_show(struct seq_file *m, void *arg)
271 {
272 struct drm_info_node *node = m->private;
273 struct drm_device *dev = node->minor->dev;
274 struct drm_framebuffer *fb, *fbdev_fb = NULL;
275
276 if (dev->fb_helper && dev->fb_helper->fb) {
277 seq_printf(m, "fbcon ");
278 fbdev_fb = dev->fb_helper->fb;
279 msm_framebuffer_describe(fbdev_fb, m);
280 }
281
282 mutex_lock(&dev->mode_config.fb_lock);
283 list_for_each_entry(fb, &dev->mode_config.fb_list, head) {
284 if (fb == fbdev_fb)
285 continue;
286
287 seq_printf(m, "user ");
288 msm_framebuffer_describe(fb, m);
289 }
290 mutex_unlock(&dev->mode_config.fb_lock);
291
292 return 0;
293 }
294
295 static struct drm_info_list msm_debugfs_list[] = {
296 {"gem", msm_gem_show},
297 { "mm", msm_mm_show },
298 };
299
300 static struct drm_info_list msm_kms_debugfs_list[] = {
301 { "fb", msm_fb_show },
302 };
303
late_init_minor(struct drm_minor * minor)304 static int late_init_minor(struct drm_minor *minor)
305 {
306 int ret;
307
308 if (!minor)
309 return 0;
310
311 ret = msm_rd_debugfs_init(minor);
312 if (ret) {
313 DRM_DEV_ERROR(minor->dev->dev, "could not install rd debugfs\n");
314 return ret;
315 }
316
317 ret = msm_perf_debugfs_init(minor);
318 if (ret) {
319 DRM_DEV_ERROR(minor->dev->dev, "could not install perf debugfs\n");
320 return ret;
321 }
322
323 return 0;
324 }
325
msm_debugfs_late_init(struct drm_device * dev)326 int msm_debugfs_late_init(struct drm_device *dev)
327 {
328 int ret;
329 ret = late_init_minor(dev->primary);
330 if (ret)
331 return ret;
332 ret = late_init_minor(dev->render);
333 return ret;
334 }
335
msm_debugfs_gpu_init(struct drm_minor * minor)336 static void msm_debugfs_gpu_init(struct drm_minor *minor)
337 {
338 struct drm_device *dev = minor->dev;
339 struct msm_drm_private *priv = dev->dev_private;
340 struct dentry *gpu_devfreq;
341
342 debugfs_create_file("gpu", S_IRUSR, minor->debugfs_root,
343 dev, &msm_gpu_fops);
344
345 debugfs_create_u32("hangcheck_period_ms", 0600, minor->debugfs_root,
346 &priv->hangcheck_period);
347
348 debugfs_create_bool("disable_err_irq", 0600, minor->debugfs_root,
349 &priv->disable_err_irq);
350
351 debugfs_create_file("stall_reenable_time_us", 0400, minor->debugfs_root,
352 priv, &stall_reenable_time_fops);
353
354 gpu_devfreq = debugfs_create_dir("devfreq", minor->debugfs_root);
355
356 debugfs_create_bool("idle_clamp",0600, gpu_devfreq,
357 &priv->gpu_clamp_to_idle);
358
359 debugfs_create_u32("upthreshold",0600, gpu_devfreq,
360 &priv->gpu_devfreq_config.upthreshold);
361
362 debugfs_create_u32("downdifferential",0600, gpu_devfreq,
363 &priv->gpu_devfreq_config.downdifferential);
364 }
365
msm_debugfs_init(struct drm_minor * minor)366 void msm_debugfs_init(struct drm_minor *minor)
367 {
368 struct drm_device *dev = minor->dev;
369 struct msm_drm_private *priv = dev->dev_private;
370
371 drm_debugfs_create_files(msm_debugfs_list,
372 ARRAY_SIZE(msm_debugfs_list),
373 minor->debugfs_root, minor);
374
375 if (priv->gpu_pdev)
376 msm_debugfs_gpu_init(minor);
377
378 if (priv->kms) {
379 drm_debugfs_create_files(msm_kms_debugfs_list,
380 ARRAY_SIZE(msm_kms_debugfs_list),
381 minor->debugfs_root, minor);
382 debugfs_create_file("kms", S_IRUSR, minor->debugfs_root,
383 dev, &msm_kms_fops);
384 }
385
386 debugfs_create_file("shrink", S_IRWXU, minor->debugfs_root,
387 dev, &shrink_fops);
388
389 if (priv->kms && priv->kms->funcs->debugfs_init)
390 priv->kms->funcs->debugfs_init(priv->kms, minor);
391
392 fault_create_debugfs_attr("fail_gem_alloc", minor->debugfs_root,
393 &fail_gem_alloc);
394 fault_create_debugfs_attr("fail_gem_iova", minor->debugfs_root,
395 &fail_gem_iova);
396 }
397 #endif
398
399