1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Copyright (C) 2020-2024 Intel Corporation
4 */
5
6 #include <linux/debugfs.h>
7 #include <linux/fault-inject.h>
8
9 #include <drm/drm_debugfs.h>
10 #include <drm/drm_file.h>
11 #include <drm/drm_print.h>
12
13 #include <uapi/drm/ivpu_accel.h>
14
15 #include "ivpu_debugfs.h"
16 #include "ivpu_drv.h"
17 #include "ivpu_fw.h"
18 #include "ivpu_fw_log.h"
19 #include "ivpu_gem.h"
20 #include "ivpu_hw.h"
21 #include "ivpu_jsm_msg.h"
22 #include "ivpu_pm.h"
23
seq_to_ivpu(struct seq_file * s)24 static inline struct ivpu_device *seq_to_ivpu(struct seq_file *s)
25 {
26 struct drm_debugfs_entry *entry = s->private;
27
28 return to_ivpu_device(entry->dev);
29 }
30
bo_list_show(struct seq_file * s,void * v)31 static int bo_list_show(struct seq_file *s, void *v)
32 {
33 struct drm_printer p = drm_seq_file_printer(s);
34 struct ivpu_device *vdev = seq_to_ivpu(s);
35
36 ivpu_bo_list(&vdev->drm, &p);
37
38 return 0;
39 }
40
fw_name_show(struct seq_file * s,void * v)41 static int fw_name_show(struct seq_file *s, void *v)
42 {
43 struct ivpu_device *vdev = seq_to_ivpu(s);
44
45 seq_printf(s, "%s\n", vdev->fw->name);
46 return 0;
47 }
48
fw_version_show(struct seq_file * s,void * v)49 static int fw_version_show(struct seq_file *s, void *v)
50 {
51 struct ivpu_device *vdev = seq_to_ivpu(s);
52
53 seq_printf(s, "%s\n", vdev->fw->version);
54 return 0;
55 }
56
fw_trace_capability_show(struct seq_file * s,void * v)57 static int fw_trace_capability_show(struct seq_file *s, void *v)
58 {
59 struct ivpu_device *vdev = seq_to_ivpu(s);
60 u64 trace_hw_component_mask;
61 u32 trace_destination_mask;
62 int ret;
63
64 ret = ivpu_jsm_trace_get_capability(vdev, &trace_destination_mask,
65 &trace_hw_component_mask);
66 if (!ret) {
67 seq_printf(s,
68 "trace_destination_mask: %#18x\n"
69 "trace_hw_component_mask: %#18llx\n",
70 trace_destination_mask, trace_hw_component_mask);
71 }
72 return 0;
73 }
74
fw_trace_config_show(struct seq_file * s,void * v)75 static int fw_trace_config_show(struct seq_file *s, void *v)
76 {
77 struct ivpu_device *vdev = seq_to_ivpu(s);
78 /**
79 * WA: VPU_JSM_MSG_TRACE_GET_CONFIG command is not working yet,
80 * so we use values from vdev->fw instead of calling ivpu_jsm_trace_get_config()
81 */
82 u32 trace_level = vdev->fw->trace_level;
83 u32 trace_destination_mask = vdev->fw->trace_destination_mask;
84 u64 trace_hw_component_mask = vdev->fw->trace_hw_component_mask;
85
86 seq_printf(s,
87 "trace_level: %#18x\n"
88 "trace_destination_mask: %#18x\n"
89 "trace_hw_component_mask: %#18llx\n",
90 trace_level, trace_destination_mask, trace_hw_component_mask);
91
92 return 0;
93 }
94
last_bootmode_show(struct seq_file * s,void * v)95 static int last_bootmode_show(struct seq_file *s, void *v)
96 {
97 struct ivpu_device *vdev = seq_to_ivpu(s);
98
99 seq_printf(s, "%s\n", (vdev->pm->is_warmboot) ? "warmboot" : "coldboot");
100
101 return 0;
102 }
103
reset_counter_show(struct seq_file * s,void * v)104 static int reset_counter_show(struct seq_file *s, void *v)
105 {
106 struct ivpu_device *vdev = seq_to_ivpu(s);
107
108 seq_printf(s, "%d\n", atomic_read(&vdev->pm->reset_counter));
109 return 0;
110 }
111
reset_pending_show(struct seq_file * s,void * v)112 static int reset_pending_show(struct seq_file *s, void *v)
113 {
114 struct ivpu_device *vdev = seq_to_ivpu(s);
115
116 seq_printf(s, "%d\n", atomic_read(&vdev->pm->reset_pending));
117 return 0;
118 }
119
firewall_irq_counter_show(struct seq_file * s,void * v)120 static int firewall_irq_counter_show(struct seq_file *s, void *v)
121 {
122 struct ivpu_device *vdev = seq_to_ivpu(s);
123
124 seq_printf(s, "%d\n", atomic_read(&vdev->hw->firewall_irq_counter));
125 return 0;
126 }
127
128 static const struct drm_debugfs_info vdev_debugfs_list[] = {
129 {"bo_list", bo_list_show, 0},
130 {"fw_name", fw_name_show, 0},
131 {"fw_version", fw_version_show, 0},
132 {"fw_trace_capability", fw_trace_capability_show, 0},
133 {"fw_trace_config", fw_trace_config_show, 0},
134 {"last_bootmode", last_bootmode_show, 0},
135 {"reset_counter", reset_counter_show, 0},
136 {"reset_pending", reset_pending_show, 0},
137 {"firewall_irq_counter", firewall_irq_counter_show, 0},
138 };
139
dvfs_mode_get(void * data,u64 * dvfs_mode)140 static int dvfs_mode_get(void *data, u64 *dvfs_mode)
141 {
142 struct ivpu_device *vdev = (struct ivpu_device *)data;
143
144 *dvfs_mode = vdev->fw->dvfs_mode;
145 return 0;
146 }
147
dvfs_mode_set(void * data,u64 dvfs_mode)148 static int dvfs_mode_set(void *data, u64 dvfs_mode)
149 {
150 struct ivpu_device *vdev = (struct ivpu_device *)data;
151
152 vdev->fw->dvfs_mode = (u32)dvfs_mode;
153 return pci_try_reset_function(to_pci_dev(vdev->drm.dev));
154 }
155
156 DEFINE_DEBUGFS_ATTRIBUTE(dvfs_mode_fops, dvfs_mode_get, dvfs_mode_set, "%llu\n");
157
158 static ssize_t
fw_dyndbg_fops_write(struct file * file,const char __user * user_buf,size_t size,loff_t * pos)159 fw_dyndbg_fops_write(struct file *file, const char __user *user_buf, size_t size, loff_t *pos)
160 {
161 struct ivpu_device *vdev = file->private_data;
162 char buffer[VPU_DYNDBG_CMD_MAX_LEN] = {};
163 int ret;
164
165 if (size >= VPU_DYNDBG_CMD_MAX_LEN)
166 return -EINVAL;
167
168 ret = strncpy_from_user(buffer, user_buf, size);
169 if (ret < 0)
170 return ret;
171
172 ivpu_jsm_dyndbg_control(vdev, buffer, size);
173 return size;
174 }
175
176 static const struct file_operations fw_dyndbg_fops = {
177 .owner = THIS_MODULE,
178 .open = simple_open,
179 .write = fw_dyndbg_fops_write,
180 };
181
fw_log_show(struct seq_file * s,void * v)182 static int fw_log_show(struct seq_file *s, void *v)
183 {
184 struct ivpu_device *vdev = s->private;
185 struct drm_printer p = drm_seq_file_printer(s);
186
187 ivpu_fw_log_print(vdev, true, &p);
188 return 0;
189 }
190
fw_log_fops_open(struct inode * inode,struct file * file)191 static int fw_log_fops_open(struct inode *inode, struct file *file)
192 {
193 return single_open(file, fw_log_show, inode->i_private);
194 }
195
196 static ssize_t
fw_log_fops_write(struct file * file,const char __user * user_buf,size_t size,loff_t * pos)197 fw_log_fops_write(struct file *file, const char __user *user_buf, size_t size, loff_t *pos)
198 {
199 struct seq_file *s = file->private_data;
200 struct ivpu_device *vdev = s->private;
201
202 if (!size)
203 return -EINVAL;
204
205 ivpu_fw_log_mark_read(vdev);
206 return size;
207 }
208
209 static const struct file_operations fw_log_fops = {
210 .owner = THIS_MODULE,
211 .open = fw_log_fops_open,
212 .write = fw_log_fops_write,
213 .read = seq_read,
214 .llseek = seq_lseek,
215 .release = single_release,
216 };
217
218 static ssize_t
fw_profiling_freq_fops_write(struct file * file,const char __user * user_buf,size_t size,loff_t * pos)219 fw_profiling_freq_fops_write(struct file *file, const char __user *user_buf,
220 size_t size, loff_t *pos)
221 {
222 struct ivpu_device *vdev = file->private_data;
223 bool enable;
224 int ret;
225
226 ret = kstrtobool_from_user(user_buf, size, &enable);
227 if (ret < 0)
228 return ret;
229
230 ivpu_hw_profiling_freq_drive(vdev, enable);
231
232 ret = pci_try_reset_function(to_pci_dev(vdev->drm.dev));
233 if (ret)
234 return ret;
235
236 return size;
237 }
238
239 static const struct file_operations fw_profiling_freq_fops = {
240 .owner = THIS_MODULE,
241 .open = simple_open,
242 .write = fw_profiling_freq_fops_write,
243 };
244
245 static ssize_t
fw_trace_destination_mask_fops_write(struct file * file,const char __user * user_buf,size_t size,loff_t * pos)246 fw_trace_destination_mask_fops_write(struct file *file, const char __user *user_buf,
247 size_t size, loff_t *pos)
248 {
249 struct ivpu_device *vdev = file->private_data;
250 struct ivpu_fw_info *fw = vdev->fw;
251 u32 trace_destination_mask;
252 int ret;
253
254 ret = kstrtou32_from_user(user_buf, size, 0, &trace_destination_mask);
255 if (ret < 0)
256 return ret;
257
258 fw->trace_destination_mask = trace_destination_mask;
259
260 ivpu_jsm_trace_set_config(vdev, fw->trace_level, trace_destination_mask,
261 fw->trace_hw_component_mask);
262
263 return size;
264 }
265
266 static const struct file_operations fw_trace_destination_mask_fops = {
267 .owner = THIS_MODULE,
268 .open = simple_open,
269 .write = fw_trace_destination_mask_fops_write,
270 };
271
272 static ssize_t
fw_trace_hw_comp_mask_fops_write(struct file * file,const char __user * user_buf,size_t size,loff_t * pos)273 fw_trace_hw_comp_mask_fops_write(struct file *file, const char __user *user_buf,
274 size_t size, loff_t *pos)
275 {
276 struct ivpu_device *vdev = file->private_data;
277 struct ivpu_fw_info *fw = vdev->fw;
278 u64 trace_hw_component_mask;
279 int ret;
280
281 ret = kstrtou64_from_user(user_buf, size, 0, &trace_hw_component_mask);
282 if (ret < 0)
283 return ret;
284
285 fw->trace_hw_component_mask = trace_hw_component_mask;
286
287 ivpu_jsm_trace_set_config(vdev, fw->trace_level, fw->trace_destination_mask,
288 trace_hw_component_mask);
289
290 return size;
291 }
292
293 static const struct file_operations fw_trace_hw_comp_mask_fops = {
294 .owner = THIS_MODULE,
295 .open = simple_open,
296 .write = fw_trace_hw_comp_mask_fops_write,
297 };
298
299 static ssize_t
fw_trace_level_fops_write(struct file * file,const char __user * user_buf,size_t size,loff_t * pos)300 fw_trace_level_fops_write(struct file *file, const char __user *user_buf, size_t size, loff_t *pos)
301 {
302 struct ivpu_device *vdev = file->private_data;
303 struct ivpu_fw_info *fw = vdev->fw;
304 u32 trace_level;
305 int ret;
306
307 ret = kstrtou32_from_user(user_buf, size, 0, &trace_level);
308 if (ret < 0)
309 return ret;
310
311 fw->trace_level = trace_level;
312
313 ivpu_jsm_trace_set_config(vdev, trace_level, fw->trace_destination_mask,
314 fw->trace_hw_component_mask);
315
316 return size;
317 }
318
319 static const struct file_operations fw_trace_level_fops = {
320 .owner = THIS_MODULE,
321 .open = simple_open,
322 .write = fw_trace_level_fops_write,
323 };
324
325 static ssize_t
ivpu_force_recovery_fn(struct file * file,const char __user * user_buf,size_t size,loff_t * pos)326 ivpu_force_recovery_fn(struct file *file, const char __user *user_buf, size_t size, loff_t *pos)
327 {
328 struct ivpu_device *vdev = file->private_data;
329 int ret;
330
331 if (!size)
332 return -EINVAL;
333
334 ret = ivpu_rpm_get(vdev);
335 if (ret < 0)
336 return ret;
337
338 ivpu_pm_trigger_recovery(vdev, "debugfs");
339 flush_work(&vdev->pm->recovery_work);
340 ivpu_rpm_put(vdev);
341 return size;
342 }
343
344 static const struct file_operations ivpu_force_recovery_fops = {
345 .owner = THIS_MODULE,
346 .open = simple_open,
347 .write = ivpu_force_recovery_fn,
348 };
349
ivpu_reset_engine_fn(void * data,u64 val)350 static int ivpu_reset_engine_fn(void *data, u64 val)
351 {
352 struct ivpu_device *vdev = (struct ivpu_device *)data;
353
354 return ivpu_jsm_reset_engine(vdev, (u32)val);
355 }
356
357 DEFINE_DEBUGFS_ATTRIBUTE(ivpu_reset_engine_fops, NULL, ivpu_reset_engine_fn, "0x%02llx\n");
358
ivpu_resume_engine_fn(void * data,u64 val)359 static int ivpu_resume_engine_fn(void *data, u64 val)
360 {
361 struct ivpu_device *vdev = (struct ivpu_device *)data;
362
363 return ivpu_jsm_hws_resume_engine(vdev, (u32)val);
364 }
365
366 DEFINE_DEBUGFS_ATTRIBUTE(ivpu_resume_engine_fops, NULL, ivpu_resume_engine_fn, "0x%02llx\n");
367
dct_active_get(void * data,u64 * active_percent)368 static int dct_active_get(void *data, u64 *active_percent)
369 {
370 struct ivpu_device *vdev = data;
371
372 *active_percent = vdev->pm->dct_active_percent;
373
374 return 0;
375 }
376
dct_active_set(void * data,u64 active_percent)377 static int dct_active_set(void *data, u64 active_percent)
378 {
379 struct ivpu_device *vdev = data;
380 int ret;
381
382 if (active_percent > 100)
383 return -EINVAL;
384
385 ret = ivpu_rpm_get(vdev);
386 if (ret < 0)
387 return ret;
388
389 if (active_percent)
390 ret = ivpu_pm_dct_enable(vdev, active_percent);
391 else
392 ret = ivpu_pm_dct_disable(vdev);
393
394 ivpu_rpm_put(vdev);
395
396 return ret;
397 }
398
399 DEFINE_DEBUGFS_ATTRIBUTE(ivpu_dct_fops, dct_active_get, dct_active_set, "%llu\n");
400
priority_bands_show(struct seq_file * s,void * v)401 static int priority_bands_show(struct seq_file *s, void *v)
402 {
403 struct ivpu_device *vdev = s->private;
404 struct ivpu_hw_info *hw = vdev->hw;
405
406 for (int band = VPU_JOB_SCHEDULING_PRIORITY_BAND_IDLE;
407 band < VPU_JOB_SCHEDULING_PRIORITY_BAND_COUNT; band++) {
408 switch (band) {
409 case VPU_JOB_SCHEDULING_PRIORITY_BAND_IDLE:
410 seq_puts(s, "Idle: ");
411 break;
412
413 case VPU_JOB_SCHEDULING_PRIORITY_BAND_NORMAL:
414 seq_puts(s, "Normal: ");
415 break;
416
417 case VPU_JOB_SCHEDULING_PRIORITY_BAND_FOCUS:
418 seq_puts(s, "Focus: ");
419 break;
420
421 case VPU_JOB_SCHEDULING_PRIORITY_BAND_REALTIME:
422 seq_puts(s, "Realtime: ");
423 break;
424 }
425
426 seq_printf(s, "grace_period %9u process_grace_period %9u process_quantum %9u\n",
427 hw->hws.grace_period[band], hw->hws.process_grace_period[band],
428 hw->hws.process_quantum[band]);
429 }
430
431 return 0;
432 }
433
priority_bands_fops_open(struct inode * inode,struct file * file)434 static int priority_bands_fops_open(struct inode *inode, struct file *file)
435 {
436 return single_open(file, priority_bands_show, inode->i_private);
437 }
438
439 static ssize_t
priority_bands_fops_write(struct file * file,const char __user * user_buf,size_t size,loff_t * pos)440 priority_bands_fops_write(struct file *file, const char __user *user_buf, size_t size, loff_t *pos)
441 {
442 struct seq_file *s = file->private_data;
443 struct ivpu_device *vdev = s->private;
444 char buf[64];
445 u32 grace_period;
446 u32 process_grace_period;
447 u32 process_quantum;
448 u32 band;
449 int ret;
450
451 if (size >= sizeof(buf))
452 return -EINVAL;
453
454 ret = simple_write_to_buffer(buf, sizeof(buf) - 1, pos, user_buf, size);
455 if (ret < 0)
456 return ret;
457
458 buf[ret] = '\0';
459 ret = sscanf(buf, "%u %u %u %u", &band, &grace_period, &process_grace_period,
460 &process_quantum);
461 if (ret != 4)
462 return -EINVAL;
463
464 if (band >= VPU_JOB_SCHEDULING_PRIORITY_BAND_COUNT)
465 return -EINVAL;
466
467 vdev->hw->hws.grace_period[band] = grace_period;
468 vdev->hw->hws.process_grace_period[band] = process_grace_period;
469 vdev->hw->hws.process_quantum[band] = process_quantum;
470
471 return size;
472 }
473
474 static const struct file_operations ivpu_hws_priority_bands_fops = {
475 .owner = THIS_MODULE,
476 .open = priority_bands_fops_open,
477 .write = priority_bands_fops_write,
478 .read = seq_read,
479 .llseek = seq_lseek,
480 .release = single_release,
481 };
482
ivpu_debugfs_init(struct ivpu_device * vdev)483 void ivpu_debugfs_init(struct ivpu_device *vdev)
484 {
485 struct dentry *debugfs_root = vdev->drm.debugfs_root;
486
487 drm_debugfs_add_files(&vdev->drm, vdev_debugfs_list, ARRAY_SIZE(vdev_debugfs_list));
488
489 debugfs_create_file("force_recovery", 0200, debugfs_root, vdev,
490 &ivpu_force_recovery_fops);
491
492 debugfs_create_file("dvfs_mode", 0644, debugfs_root, vdev,
493 &dvfs_mode_fops);
494
495 debugfs_create_file("fw_dyndbg", 0200, debugfs_root, vdev,
496 &fw_dyndbg_fops);
497 debugfs_create_file("fw_log", 0644, debugfs_root, vdev,
498 &fw_log_fops);
499 debugfs_create_file("fw_trace_destination_mask", 0200, debugfs_root, vdev,
500 &fw_trace_destination_mask_fops);
501 debugfs_create_file("fw_trace_hw_comp_mask", 0200, debugfs_root, vdev,
502 &fw_trace_hw_comp_mask_fops);
503 debugfs_create_file("fw_trace_level", 0200, debugfs_root, vdev,
504 &fw_trace_level_fops);
505 debugfs_create_file("hws_priority_bands", 0200, debugfs_root, vdev,
506 &ivpu_hws_priority_bands_fops);
507
508 debugfs_create_file("reset_engine", 0200, debugfs_root, vdev,
509 &ivpu_reset_engine_fops);
510 debugfs_create_file("resume_engine", 0200, debugfs_root, vdev,
511 &ivpu_resume_engine_fops);
512
513 if (ivpu_hw_ip_gen(vdev) >= IVPU_HW_IP_40XX) {
514 debugfs_create_file("fw_profiling_freq_drive", 0200,
515 debugfs_root, vdev, &fw_profiling_freq_fops);
516 debugfs_create_file("dct", 0644, debugfs_root, vdev, &ivpu_dct_fops);
517 }
518
519 #ifdef CONFIG_FAULT_INJECTION
520 fault_create_debugfs_attr("fail_hw", debugfs_root, &ivpu_hw_failure);
521 #endif
522 }
523