1 // SPDX-License-Identifier: MIT
2 /*
3 * Copyright © 2023-2024 Intel Corporation
4 */
5
6 #include <linux/debugfs.h>
7
8 #include <drm/drm_print.h>
9 #include <drm/drm_debugfs.h>
10
11 #include "xe_debugfs.h"
12 #include "xe_device.h"
13 #include "xe_gt.h"
14 #include "xe_gt_debugfs.h"
15 #include "xe_gt_sriov_pf_config.h"
16 #include "xe_gt_sriov_pf_control.h"
17 #include "xe_gt_sriov_pf_debugfs.h"
18 #include "xe_gt_sriov_pf_helpers.h"
19 #include "xe_gt_sriov_pf_migration.h"
20 #include "xe_gt_sriov_pf_monitor.h"
21 #include "xe_gt_sriov_pf_policy.h"
22 #include "xe_gt_sriov_pf_service.h"
23 #include "xe_guc.h"
24 #include "xe_pm.h"
25 #include "xe_sriov_pf.h"
26 #include "xe_sriov_pf_provision.h"
27
28 /*
29 * /sys/kernel/debug/dri/BDF/
30 * ├── sriov # d_inode->i_private = (xe_device*)
31 * │ ├── pf # d_inode->i_private = (xe_device*)
32 * │ │ ├── tile0 # d_inode->i_private = (xe_tile*)
33 * │ │ │ ├── gt0 # d_inode->i_private = (xe_gt*)
34 * │ │ │ ├── gt1 # d_inode->i_private = (xe_gt*)
35 * │ │ ├── tile1
36 * │ │ │ :
37 * │ ├── vf1 # d_inode->i_private = VFID(1)
38 * │ │ ├── tile0 # d_inode->i_private = (xe_tile*)
39 * │ │ │ ├── gt0 # d_inode->i_private = (xe_gt*)
40 * │ │ │ ├── gt1 # d_inode->i_private = (xe_gt*)
41 * │ │ ├── tile1
42 * │ │ │ :
43 * : :
44 * │ ├── vfN # d_inode->i_private = VFID(N)
45 */
46
extract_priv(struct dentry * d)47 static void *extract_priv(struct dentry *d)
48 {
49 return d->d_inode->i_private;
50 }
51
extract_gt(struct dentry * d)52 static struct xe_gt *extract_gt(struct dentry *d)
53 {
54 return extract_priv(d);
55 }
56
extract_xe(struct dentry * d)57 static struct xe_device *extract_xe(struct dentry *d)
58 {
59 return extract_priv(d->d_parent->d_parent->d_parent);
60 }
61
extract_vfid(struct dentry * d)62 static unsigned int extract_vfid(struct dentry *d)
63 {
64 void *priv = extract_priv(d->d_parent->d_parent);
65
66 return priv == extract_xe(d) ? PFID : (uintptr_t)priv;
67 }
68
69 /*
70 * /sys/kernel/debug/dri/BDF/
71 * ├── sriov
72 * : ├── pf
73 * : ├── tile0
74 * : ├── gt0
75 * : ├── contexts_provisioned
76 * ├── doorbells_provisioned
77 * ├── runtime_registers
78 * ├── adverse_events
79 */
80
81 static const struct drm_info_list pf_info[] = {
82 {
83 "contexts_provisioned",
84 .show = xe_gt_debugfs_simple_show,
85 .data = xe_gt_sriov_pf_config_print_ctxs,
86 },
87 {
88 "doorbells_provisioned",
89 .show = xe_gt_debugfs_simple_show,
90 .data = xe_gt_sriov_pf_config_print_dbs,
91 },
92 {
93 "runtime_registers",
94 .show = xe_gt_debugfs_simple_show,
95 .data = xe_gt_sriov_pf_service_print_runtime,
96 },
97 {
98 "adverse_events",
99 .show = xe_gt_debugfs_simple_show,
100 .data = xe_gt_sriov_pf_monitor_print_events,
101 },
102 };
103
104 /*
105 * /sys/kernel/debug/dri/BDF/
106 * ├── sriov
107 * : ├── pf
108 * : ├── tile0
109 * : ├── gt0
110 * : ├── reset_engine
111 * ├── sample_period
112 * ├── sched_if_idle
113 */
114
115 #define DEFINE_SRIOV_GT_POLICY_DEBUGFS_ATTRIBUTE(POLICY, TYPE, FORMAT) \
116 \
117 static int POLICY##_set(void *data, u64 val) \
118 { \
119 struct xe_gt *gt = extract_gt(data); \
120 struct xe_device *xe = gt_to_xe(gt); \
121 int err; \
122 \
123 if (val > (TYPE)~0ull) \
124 return -EOVERFLOW; \
125 \
126 guard(xe_pm_runtime)(xe); \
127 err = xe_gt_sriov_pf_policy_set_##POLICY(gt, val); \
128 if (!err) \
129 xe_sriov_pf_provision_set_custom_mode(xe); \
130 \
131 return err; \
132 } \
133 \
134 static int POLICY##_get(void *data, u64 *val) \
135 { \
136 struct xe_gt *gt = extract_gt(data); \
137 \
138 *val = xe_gt_sriov_pf_policy_get_##POLICY(gt); \
139 return 0; \
140 } \
141 \
142 DEFINE_DEBUGFS_ATTRIBUTE(POLICY##_fops, POLICY##_get, POLICY##_set, FORMAT)
143
144 DEFINE_SRIOV_GT_POLICY_DEBUGFS_ATTRIBUTE(reset_engine, bool, "%llu\n");
145 DEFINE_SRIOV_GT_POLICY_DEBUGFS_ATTRIBUTE(sched_if_idle, bool, "%llu\n");
146 DEFINE_SRIOV_GT_POLICY_DEBUGFS_ATTRIBUTE(sample_period, u32, "%llu\n");
147
pf_add_policy_attrs(struct xe_gt * gt,struct dentry * parent)148 static void pf_add_policy_attrs(struct xe_gt *gt, struct dentry *parent)
149 {
150 xe_gt_assert(gt, gt == extract_gt(parent));
151 xe_gt_assert(gt, PFID == extract_vfid(parent));
152
153 debugfs_create_file_unsafe("reset_engine", 0644, parent, parent, &reset_engine_fops);
154 debugfs_create_file_unsafe("sched_if_idle", 0644, parent, parent, &sched_if_idle_fops);
155 debugfs_create_file_unsafe("sample_period_ms", 0644, parent, parent, &sample_period_fops);
156 }
157
158 /*
159 * /sys/kernel/debug/dri/BDF/
160 * ├── sriov
161 * : ├── pf
162 * : ├── tile0
163 * : ├── gt0
164 * : ├── sched_groups_mode
165 * ├── sched_groups_exec_quantums_ms
166 * ├── sched_groups_preempt_timeout_us
167 * ├── sched_groups
168 * : ├── group0
169 * :
170 * : └── groupN
171 * ├── vf1
172 * : ├── tile0
173 * : ├── gt0
174 * : ├── sched_groups_exec_quantums_ms
175 * ├── sched_groups_preempt_timeout_us
176 * :
177 */
178
sched_group_mode_to_string(enum xe_sriov_sched_group_modes mode)179 static const char *sched_group_mode_to_string(enum xe_sriov_sched_group_modes mode)
180 {
181 switch (mode) {
182 case XE_SRIOV_SCHED_GROUPS_DISABLED:
183 return "disabled";
184 case XE_SRIOV_SCHED_GROUPS_MEDIA_SLICES:
185 return "media_slices";
186 case XE_SRIOV_SCHED_GROUPS_MODES_COUNT:
187 /* dummy mode to make the compiler happy */
188 break;
189 }
190
191 return "unknown";
192 }
193
sched_groups_info(struct seq_file * m,void * data)194 static int sched_groups_info(struct seq_file *m, void *data)
195 {
196 struct drm_printer p = drm_seq_file_printer(m);
197 struct xe_gt *gt = extract_gt(m->private);
198 enum xe_sriov_sched_group_modes current_mode =
199 gt->sriov.pf.policy.guc.sched_groups.current_mode;
200 enum xe_sriov_sched_group_modes mode;
201
202 for (mode = XE_SRIOV_SCHED_GROUPS_DISABLED;
203 mode < XE_SRIOV_SCHED_GROUPS_MODES_COUNT;
204 mode++) {
205 if (!xe_sriov_gt_pf_policy_has_sched_group_mode(gt, mode))
206 continue;
207
208 drm_printf(&p, "%s%s%s%s",
209 mode == XE_SRIOV_SCHED_GROUPS_DISABLED ? "" : " ",
210 mode == current_mode ? "[" : "",
211 sched_group_mode_to_string(mode),
212 mode == current_mode ? "]" : "");
213 }
214
215 drm_puts(&p, "\n");
216
217 return 0;
218 }
219
sched_groups_open(struct inode * inode,struct file * file)220 static int sched_groups_open(struct inode *inode, struct file *file)
221 {
222 return single_open(file, sched_groups_info, inode->i_private);
223 }
224
sched_groups_write(struct file * file,const char __user * ubuf,size_t size,loff_t * pos)225 static ssize_t sched_groups_write(struct file *file, const char __user *ubuf,
226 size_t size, loff_t *pos)
227 {
228 struct xe_gt *gt = extract_gt(file_inode(file)->i_private);
229 enum xe_sriov_sched_group_modes mode;
230 char name[32];
231 int ret;
232
233 if (*pos)
234 return -ESPIPE;
235
236 if (!size)
237 return -ENODATA;
238
239 if (size > sizeof(name) - 1)
240 return -EINVAL;
241
242 ret = simple_write_to_buffer(name, sizeof(name) - 1, pos, ubuf, size);
243 if (ret < 0)
244 return ret;
245 name[ret] = '\0';
246
247 for (mode = XE_SRIOV_SCHED_GROUPS_DISABLED;
248 mode < XE_SRIOV_SCHED_GROUPS_MODES_COUNT;
249 mode++)
250 if (sysfs_streq(name, sched_group_mode_to_string(mode)))
251 break;
252
253 if (mode == XE_SRIOV_SCHED_GROUPS_MODES_COUNT)
254 return -EINVAL;
255
256 guard(xe_pm_runtime)(gt_to_xe(gt));
257 ret = xe_gt_sriov_pf_policy_set_sched_groups_mode(gt, mode);
258
259 return ret < 0 ? ret : size;
260 }
261
262 static const struct file_operations sched_groups_fops = {
263 .owner = THIS_MODULE,
264 .open = sched_groups_open,
265 .read = seq_read,
266 .write = sched_groups_write,
267 .llseek = seq_lseek,
268 .release = single_release,
269 };
270
sched_groups_config_show(struct seq_file * m,void * data,void (* get)(struct xe_gt *,unsigned int,u32 *,u32))271 static int sched_groups_config_show(struct seq_file *m, void *data,
272 void (*get)(struct xe_gt *, unsigned int, u32 *, u32))
273 {
274 struct drm_printer p = drm_seq_file_printer(m);
275 unsigned int vfid = extract_vfid(m->private);
276 struct xe_gt *gt = extract_gt(m->private);
277 u32 values[GUC_MAX_SCHED_GROUPS];
278 bool first = true;
279 u8 group;
280
281 get(gt, vfid, values, ARRAY_SIZE(values));
282
283 for (group = 0; group < ARRAY_SIZE(values); group++) {
284 drm_printf(&p, "%s%u", first ? "" : ",", values[group]);
285
286 first = false;
287 }
288
289 drm_puts(&p, "\n");
290
291 return 0;
292 }
293
sched_groups_config_write(struct file * file,const char __user * ubuf,size_t size,loff_t * pos,int (* set)(struct xe_gt *,unsigned int,u32 *,u32))294 static ssize_t sched_groups_config_write(struct file *file, const char __user *ubuf,
295 size_t size, loff_t *pos,
296 int (*set)(struct xe_gt *, unsigned int, u32 *, u32))
297 {
298 struct dentry *parent = file_inode(file)->i_private;
299 unsigned int vfid = extract_vfid(parent);
300 struct xe_gt *gt = extract_gt(parent);
301 u32 values[GUC_MAX_SCHED_GROUPS];
302 int *input __free(kfree) = NULL;
303 u32 count;
304 int ret;
305 int i;
306
307 if (*pos)
308 return -ESPIPE;
309
310 if (!size)
311 return -ENODATA;
312
313 ret = parse_int_array_user(ubuf, min(size, GUC_MAX_SCHED_GROUPS * sizeof(u32)), &input);
314 if (ret)
315 return ret;
316
317 count = input[0];
318 if (count > GUC_MAX_SCHED_GROUPS)
319 return -E2BIG;
320
321 for (i = 0; i < count; i++) {
322 if (input[i + 1] < 0 || input[i + 1] > S32_MAX)
323 return -EINVAL;
324
325 values[i] = input[i + 1];
326 }
327
328 guard(xe_pm_runtime)(gt_to_xe(gt));
329 ret = set(gt, vfid, values, count);
330
331 return ret < 0 ? ret : size;
332 }
333
334 #define DEFINE_SRIOV_GT_GRP_CFG_DEBUGFS_ATTRIBUTE(CONFIG) \
335 static int sched_groups_##CONFIG##_show(struct seq_file *m, void *data) \
336 { \
337 return sched_groups_config_show(m, data, \
338 xe_gt_sriov_pf_config_get_groups_##CONFIG); \
339 } \
340 \
341 static int sched_groups_##CONFIG##_open(struct inode *inode, struct file *file) \
342 { \
343 return single_open(file, sched_groups_##CONFIG##_show, \
344 inode->i_private); \
345 } \
346 \
347 static ssize_t sched_groups_##CONFIG##_write(struct file *file, \
348 const char __user *ubuf, \
349 size_t size, loff_t *pos) \
350 { \
351 return sched_groups_config_write(file, ubuf, size, pos, \
352 xe_gt_sriov_pf_config_set_groups_##CONFIG); \
353 } \
354 \
355 static const struct file_operations sched_groups_##CONFIG##_fops = { \
356 .owner = THIS_MODULE, \
357 .open = sched_groups_##CONFIG##_open, \
358 .read = seq_read, \
359 .llseek = seq_lseek, \
360 .write = sched_groups_##CONFIG##_write, \
361 .release = single_release, \
362 }
363
364 DEFINE_SRIOV_GT_GRP_CFG_DEBUGFS_ATTRIBUTE(exec_quantums);
365 DEFINE_SRIOV_GT_GRP_CFG_DEBUGFS_ATTRIBUTE(preempt_timeouts);
366
sched_group_engines_read(struct file * file,char __user * buf,size_t count,loff_t * ppos)367 static ssize_t sched_group_engines_read(struct file *file, char __user *buf,
368 size_t count, loff_t *ppos)
369 {
370 struct dentry *dent = file_dentry(file);
371 struct xe_gt *gt = extract_gt(dent->d_parent->d_parent);
372 struct xe_gt_sriov_scheduler_groups *info = >->sriov.pf.policy.guc.sched_groups;
373 struct guc_sched_group *groups = info->modes[info->current_mode].groups;
374 u32 num_groups = info->modes[info->current_mode].num_groups;
375 unsigned int group = (uintptr_t)extract_priv(dent);
376 struct xe_hw_engine *hwe;
377 enum xe_hw_engine_id id;
378 char engines[128];
379
380 engines[0] = '\0';
381
382 if (group < num_groups) {
383 for_each_hw_engine(hwe, gt, id) {
384 u8 guc_class = xe_engine_class_to_guc_class(hwe->class);
385 u32 mask = groups[group].engines[guc_class];
386
387 if (mask & BIT(hwe->logical_instance)) {
388 strlcat(engines, hwe->name, sizeof(engines));
389 strlcat(engines, " ", sizeof(engines));
390 }
391 }
392 strlcat(engines, "\n", sizeof(engines));
393 }
394
395 return simple_read_from_buffer(buf, count, ppos, engines, strlen(engines));
396 }
397
398 static const struct file_operations sched_group_engines_fops = {
399 .owner = THIS_MODULE,
400 .open = simple_open,
401 .read = sched_group_engines_read,
402 .llseek = default_llseek,
403 };
404
pf_add_sched_groups(struct xe_gt * gt,struct dentry * parent,unsigned int vfid)405 static void pf_add_sched_groups(struct xe_gt *gt, struct dentry *parent, unsigned int vfid)
406 {
407 struct dentry *groups;
408 u8 group;
409
410 xe_gt_assert(gt, gt == extract_gt(parent));
411 xe_gt_assert(gt, vfid == extract_vfid(parent));
412
413 /*
414 * TODO: we currently call this function before we initialize scheduler
415 * groups, so at this point in time we don't know if there are any
416 * valid groups on the GT and we can't selectively register the debugfs
417 * only if there are any. Therefore, we always register the debugfs
418 * files if we're on a platform that has support for groups.
419 * We should rework the flow so that debugfs is registered after the
420 * policy init, so that we check if there are valid groups before
421 * adding the debugfs files.
422 * Similarly, instead of using GUC_MAX_SCHED_GROUPS we could use
423 * gt->sriov.pf.policy.guc.sched_groups.max_number_of_groups.
424 */
425 if (!xe_sriov_gt_pf_policy_has_sched_groups_support(gt))
426 return;
427
428 debugfs_create_file("sched_groups_exec_quantums_ms", 0644, parent, parent,
429 &sched_groups_exec_quantums_fops);
430 debugfs_create_file("sched_groups_preempt_timeouts_us", 0644, parent, parent,
431 &sched_groups_preempt_timeouts_fops);
432
433 if (vfid != PFID)
434 return;
435
436 debugfs_create_file("sched_groups_mode", 0644, parent, parent, &sched_groups_fops);
437
438 groups = debugfs_create_dir("sched_groups", parent);
439 if (IS_ERR(groups))
440 return;
441
442 for (group = 0; group < GUC_MAX_SCHED_GROUPS; group++) {
443 char name[10];
444
445 snprintf(name, sizeof(name), "group%u", group);
446 debugfs_create_file(name, 0644, groups, (void *)(uintptr_t)group,
447 &sched_group_engines_fops);
448 }
449 }
450
451 /*
452 * /sys/kernel/debug/dri/BDF/
453 * ├── sriov
454 * : ├── pf
455 * │ ├── tile0
456 * │ : ├── gt0
457 * │ : ├── doorbells_spare
458 * │ ├── contexts_spare
459 * │ ├── exec_quantum_ms
460 * │ ├── preempt_timeout_us
461 * │ ├── sched_priority
462 * ├── vf1
463 * : ├── tile0
464 * : ├── gt0
465 * : ├── doorbells_quota
466 * ├── contexts_quota
467 * ├── exec_quantum_ms
468 * ├── preempt_timeout_us
469 * ├── sched_priority
470 */
471
472 #define DEFINE_SRIOV_GT_CONFIG_DEBUGFS_ATTRIBUTE(CONFIG, TYPE, FORMAT) \
473 \
474 static int CONFIG##_set(void *data, u64 val) \
475 { \
476 struct xe_gt *gt = extract_gt(data); \
477 unsigned int vfid = extract_vfid(data); \
478 struct xe_device *xe = gt_to_xe(gt); \
479 int err; \
480 \
481 if (val > (TYPE)~0ull) \
482 return -EOVERFLOW; \
483 \
484 guard(xe_pm_runtime)(xe); \
485 err = xe_sriov_pf_wait_ready(xe) ?: \
486 xe_gt_sriov_pf_config_set_##CONFIG(gt, vfid, val); \
487 if (!err) \
488 xe_sriov_pf_provision_set_custom_mode(xe); \
489 \
490 return err; \
491 } \
492 \
493 static int CONFIG##_get(void *data, u64 *val) \
494 { \
495 struct xe_gt *gt = extract_gt(data); \
496 unsigned int vfid = extract_vfid(data); \
497 \
498 *val = xe_gt_sriov_pf_config_get_##CONFIG(gt, vfid); \
499 return 0; \
500 } \
501 \
502 DEFINE_DEBUGFS_ATTRIBUTE(CONFIG##_fops, CONFIG##_get, CONFIG##_set, FORMAT)
503
504 DEFINE_SRIOV_GT_CONFIG_DEBUGFS_ATTRIBUTE(ctxs, u32, "%llu\n");
505 DEFINE_SRIOV_GT_CONFIG_DEBUGFS_ATTRIBUTE(dbs, u32, "%llu\n");
506 DEFINE_SRIOV_GT_CONFIG_DEBUGFS_ATTRIBUTE(exec_quantum, u32, "%llu\n");
507 DEFINE_SRIOV_GT_CONFIG_DEBUGFS_ATTRIBUTE(preempt_timeout, u32, "%llu\n");
508 DEFINE_SRIOV_GT_CONFIG_DEBUGFS_ATTRIBUTE(sched_priority, u32, "%llu\n");
509
510 /*
511 * /sys/kernel/debug/dri/BDF/
512 * ├── sriov
513 * : ├── pf
514 * │ ├── tile0
515 * │ : ├── gt0
516 * │ : ├── threshold_cat_error_count
517 * │ ├── threshold_doorbell_time_us
518 * │ ├── threshold_engine_reset_count
519 * │ ├── threshold_guc_time_us
520 * │ ├── threshold_irq_time_us
521 * │ ├── threshold_page_fault_count
522 * ├── vf1
523 * : ├── tile0
524 * : ├── gt0
525 * : ├── threshold_cat_error_count
526 * ├── threshold_doorbell_time_us
527 * ├── threshold_engine_reset_count
528 * ├── threshold_guc_time_us
529 * ├── threshold_irq_time_us
530 * ├── threshold_page_fault_count
531 */
532
set_threshold(void * data,u64 val,enum xe_guc_klv_threshold_index index)533 static int set_threshold(void *data, u64 val, enum xe_guc_klv_threshold_index index)
534 {
535 struct xe_gt *gt = extract_gt(data);
536 unsigned int vfid = extract_vfid(data);
537 struct xe_device *xe = gt_to_xe(gt);
538 int err;
539
540 if (val > (u32)~0ull)
541 return -EOVERFLOW;
542
543 guard(xe_pm_runtime)(xe);
544 err = xe_gt_sriov_pf_config_set_threshold(gt, vfid, index, val);
545 if (!err)
546 xe_sriov_pf_provision_set_custom_mode(xe);
547
548 return err;
549 }
550
get_threshold(void * data,u64 * val,enum xe_guc_klv_threshold_index index)551 static int get_threshold(void *data, u64 *val, enum xe_guc_klv_threshold_index index)
552 {
553 struct xe_gt *gt = extract_gt(data);
554 unsigned int vfid = extract_vfid(data);
555
556 *val = xe_gt_sriov_pf_config_get_threshold(gt, vfid, index);
557 return 0;
558 }
559
560 #define DEFINE_SRIOV_GT_THRESHOLD_DEBUGFS_ATTRIBUTE(THRESHOLD, INDEX) \
561 \
562 static int THRESHOLD##_set(void *data, u64 val) \
563 { \
564 return set_threshold(data, val, INDEX); \
565 } \
566 \
567 static int THRESHOLD##_get(void *data, u64 *val) \
568 { \
569 return get_threshold(data, val, INDEX); \
570 } \
571 \
572 DEFINE_DEBUGFS_ATTRIBUTE(THRESHOLD##_fops, THRESHOLD##_get, THRESHOLD##_set, "%llu\n")
573
574 /* generate all threshold attributes */
575 #define define_threshold_attribute(TAG, NAME, ...) \
576 DEFINE_SRIOV_GT_THRESHOLD_DEBUGFS_ATTRIBUTE(NAME, MAKE_XE_GUC_KLV_THRESHOLD_INDEX(TAG));
MAKE_XE_GUC_KLV_THRESHOLDS_SET(define_threshold_attribute)577 MAKE_XE_GUC_KLV_THRESHOLDS_SET(define_threshold_attribute)
578 #undef define_threshold_attribute
579
580 static void pf_add_config_attrs(struct xe_gt *gt, struct dentry *parent, unsigned int vfid)
581 {
582 xe_gt_assert(gt, gt == extract_gt(parent));
583 xe_gt_assert(gt, vfid == extract_vfid(parent));
584
585 debugfs_create_file_unsafe(vfid ? "doorbells_quota" : "doorbells_spare",
586 0644, parent, parent, &dbs_fops);
587 debugfs_create_file_unsafe(vfid ? "contexts_quota" : "contexts_spare",
588 0644, parent, parent, &ctxs_fops);
589 debugfs_create_file_unsafe("exec_quantum_ms", 0644, parent, parent,
590 &exec_quantum_fops);
591 debugfs_create_file_unsafe("preempt_timeout_us", 0644, parent, parent,
592 &preempt_timeout_fops);
593 debugfs_create_file_unsafe("sched_priority", 0644, parent, parent,
594 &sched_priority_fops);
595
596 /* register all threshold attributes */
597 #define register_threshold_attribute(TAG, NAME, VER...) ({ \
598 if (IF_ARGS(GUC_FIRMWARE_VER_AT_LEAST(>->uc.guc, VER), true, VER)) \
599 debugfs_create_file_unsafe("threshold_" #NAME, 0644, parent, parent, \
600 &NAME##_fops); \
601 });
602 MAKE_XE_GUC_KLV_THRESHOLDS_SET(register_threshold_attribute)
603 #undef register_threshold_attribute
604 }
605
606 /*
607 * /sys/kernel/debug/dri/BDF/
608 * ├── sriov
609 * : ├── vf1
610 * : ├── tile0
611 * : ├── gt0
612 * : ├── control { stop, pause, resume }
613 */
614
615 static const struct {
616 const char *cmd;
617 int (*fn)(struct xe_gt *gt, unsigned int vfid);
618 } control_cmds[] = {
619 { "stop", xe_gt_sriov_pf_control_stop_vf },
620 { "pause", xe_gt_sriov_pf_control_pause_vf },
621 { "resume", xe_gt_sriov_pf_control_resume_vf },
622 };
623
control_write(struct file * file,const char __user * buf,size_t count,loff_t * pos)624 static ssize_t control_write(struct file *file, const char __user *buf, size_t count, loff_t *pos)
625 {
626 struct dentry *dent = file_dentry(file);
627 struct dentry *parent = dent->d_parent;
628 struct xe_gt *gt = extract_gt(parent);
629 struct xe_device *xe = gt_to_xe(gt);
630 unsigned int vfid = extract_vfid(parent);
631 int ret = -EINVAL;
632 char cmd[32];
633 size_t n;
634
635 xe_gt_assert(gt, vfid);
636 xe_gt_sriov_pf_assert_vfid(gt, vfid);
637
638 if (*pos)
639 return -ESPIPE;
640
641 if (count > sizeof(cmd) - 1)
642 return -EINVAL;
643
644 ret = simple_write_to_buffer(cmd, sizeof(cmd) - 1, pos, buf, count);
645 if (ret < 0)
646 return ret;
647 cmd[ret] = '\0';
648
649 for (n = 0; n < ARRAY_SIZE(control_cmds); n++) {
650 xe_gt_assert(gt, sizeof(cmd) > strlen(control_cmds[n].cmd));
651
652 if (sysfs_streq(cmd, control_cmds[n].cmd)) {
653 guard(xe_pm_runtime)(xe);
654 ret = control_cmds[n].fn ? (*control_cmds[n].fn)(gt, vfid) : 0;
655 break;
656 }
657 }
658
659 return (ret < 0) ? ret : count;
660 }
661
control_read(struct file * file,char __user * buf,size_t count,loff_t * ppos)662 static ssize_t control_read(struct file *file, char __user *buf, size_t count, loff_t *ppos)
663 {
664 char help[128];
665 size_t n;
666
667 help[0] = '\0';
668 for (n = 0; n < ARRAY_SIZE(control_cmds); n++) {
669 strlcat(help, control_cmds[n].cmd, sizeof(help));
670 strlcat(help, "\n", sizeof(help));
671 }
672
673 return simple_read_from_buffer(buf, count, ppos, help, strlen(help));
674 }
675
676 static const struct file_operations control_ops = {
677 .owner = THIS_MODULE,
678 .open = simple_open,
679 .write = control_write,
680 .read = control_read,
681 .llseek = default_llseek,
682 };
683
684 /*
685 * /sys/kernel/debug/dri/BDF/
686 * ├── sriov
687 * : ├── vf1
688 * : ├── tile0
689 * : ├── gt0
690 * : ├── config_blob
691 */
692
693 struct config_blob_data {
694 size_t size;
695 u8 blob[];
696 };
697
config_blob_open(struct inode * inode,struct file * file)698 static int config_blob_open(struct inode *inode, struct file *file)
699 {
700 struct dentry *dent = file_dentry(file);
701 struct dentry *parent = dent->d_parent;
702 struct xe_gt *gt = extract_gt(parent);
703 unsigned int vfid = extract_vfid(parent);
704 struct config_blob_data *cbd;
705 ssize_t ret;
706
707 ret = xe_gt_sriov_pf_config_save(gt, vfid, NULL, 0);
708 if (!ret)
709 return -ENODATA;
710 if (ret < 0)
711 return ret;
712
713 cbd = kzalloc_flex(*cbd, blob, ret);
714 if (!cbd)
715 return -ENOMEM;
716
717 ret = xe_gt_sriov_pf_config_save(gt, vfid, cbd->blob, ret);
718 if (ret < 0) {
719 kfree(cbd);
720 return ret;
721 }
722
723 cbd->size = ret;
724 file->private_data = cbd;
725 return nonseekable_open(inode, file);
726 }
727
config_blob_read(struct file * file,char __user * buf,size_t count,loff_t * pos)728 static ssize_t config_blob_read(struct file *file, char __user *buf,
729 size_t count, loff_t *pos)
730 {
731 struct config_blob_data *cbd = file->private_data;
732
733 return simple_read_from_buffer(buf, count, pos, cbd->blob, cbd->size);
734 }
735
config_blob_write(struct file * file,const char __user * buf,size_t count,loff_t * pos)736 static ssize_t config_blob_write(struct file *file, const char __user *buf,
737 size_t count, loff_t *pos)
738 {
739 struct dentry *dent = file_dentry(file);
740 struct dentry *parent = dent->d_parent;
741 struct xe_gt *gt = extract_gt(parent);
742 unsigned int vfid = extract_vfid(parent);
743 ssize_t ret;
744 void *tmp;
745
746 if (*pos)
747 return -EINVAL;
748
749 if (!count)
750 return -ENODATA;
751
752 if (count > SZ_4K)
753 return -EINVAL;
754
755 tmp = kzalloc(count, GFP_KERNEL);
756 if (!tmp)
757 return -ENOMEM;
758
759 if (copy_from_user(tmp, buf, count)) {
760 ret = -EFAULT;
761 } else {
762 ret = xe_gt_sriov_pf_config_restore(gt, vfid, tmp, count);
763 if (!ret)
764 ret = count;
765 }
766 kfree(tmp);
767 return ret;
768 }
769
config_blob_release(struct inode * inode,struct file * file)770 static int config_blob_release(struct inode *inode, struct file *file)
771 {
772 kfree(file->private_data);
773 return 0;
774 }
775
776 static const struct file_operations config_blob_ops = {
777 .owner = THIS_MODULE,
778 .open = config_blob_open,
779 .read = config_blob_read,
780 .write = config_blob_write,
781 .release = config_blob_release,
782 };
783
pf_add_compat_attrs(struct xe_gt * gt,struct dentry * dent,unsigned int vfid)784 static void pf_add_compat_attrs(struct xe_gt *gt, struct dentry *dent, unsigned int vfid)
785 {
786 struct xe_device *xe = gt_to_xe(gt);
787
788 if (!xe_gt_is_main_type(gt))
789 return;
790
791 if (vfid) {
792 debugfs_create_symlink("ggtt_quota", dent, "../ggtt_quota");
793 if (xe_device_has_lmtt(xe))
794 debugfs_create_symlink("lmem_quota", dent, "../vram_quota");
795 } else {
796 debugfs_create_symlink("ggtt_spare", dent, "../ggtt_spare");
797 debugfs_create_symlink("ggtt_available", dent, "../ggtt_available");
798 debugfs_create_symlink("ggtt_provisioned", dent, "../ggtt_provisioned");
799 if (xe_device_has_lmtt(xe)) {
800 debugfs_create_symlink("lmem_spare", dent, "../vram_spare");
801 debugfs_create_symlink("lmem_provisioned", dent, "../vram_provisioned");
802 }
803 }
804 }
805
pf_populate_gt(struct xe_gt * gt,struct dentry * dent,unsigned int vfid)806 static void pf_populate_gt(struct xe_gt *gt, struct dentry *dent, unsigned int vfid)
807 {
808 struct xe_device *xe = gt_to_xe(gt);
809 struct drm_minor *minor = xe->drm.primary;
810
811 if (vfid) {
812 pf_add_config_attrs(gt, dent, vfid);
813 pf_add_sched_groups(gt, dent, vfid);
814
815 debugfs_create_file("control", 0600, dent, NULL, &control_ops);
816
817 /* for testing/debugging purposes only! */
818 if (IS_ENABLED(CONFIG_DRM_XE_DEBUG)) {
819 debugfs_create_file("config_blob",
820 IS_ENABLED(CONFIG_DRM_XE_DEBUG_SRIOV) ? 0600 : 0400,
821 dent, NULL, &config_blob_ops);
822 }
823
824 } else {
825 pf_add_config_attrs(gt, dent, PFID);
826 pf_add_policy_attrs(gt, dent);
827 pf_add_sched_groups(gt, dent, PFID);
828
829 drm_debugfs_create_files(pf_info, ARRAY_SIZE(pf_info), dent, minor);
830 }
831
832 /* for backward compatibility only */
833 pf_add_compat_attrs(gt, dent, vfid);
834 }
835
836 /**
837 * xe_gt_sriov_pf_debugfs_populate() - Create SR-IOV GT-level debugfs directories and files.
838 * @gt: the &xe_gt to register
839 * @parent: the parent &dentry that represents a &xe_tile
840 * @vfid: the VF identifier
841 *
842 * Add to the @parent directory new debugfs directory that will represent a @gt and
843 * populate it with GT files that are related to the SR-IOV @vfid function.
844 *
845 * This function can only be called on PF.
846 */
xe_gt_sriov_pf_debugfs_populate(struct xe_gt * gt,struct dentry * parent,unsigned int vfid)847 void xe_gt_sriov_pf_debugfs_populate(struct xe_gt *gt, struct dentry *parent, unsigned int vfid)
848 {
849 struct dentry *dent;
850 char name[8]; /* should be enough up to "gt%u\0" for 2^8 - 1 */
851
852 xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
853 xe_gt_assert(gt, extract_priv(parent) == gt->tile);
854 xe_gt_assert(gt, extract_priv(parent->d_parent) == gt_to_xe(gt) ||
855 (uintptr_t)extract_priv(parent->d_parent) == vfid);
856
857 /*
858 * /sys/kernel/debug/dri/BDF/
859 * ├── sriov
860 * │ ├── pf
861 * │ │ ├── tile0 # parent
862 * │ │ │ ├── gt0 # d_inode->i_private = (xe_gt*)
863 * │ │ │ ├── gt1
864 * │ │ : :
865 * │ ├── vf1
866 * │ │ ├── tile0 # parent
867 * │ │ │ ├── gt0 # d_inode->i_private = (xe_gt*)
868 * │ │ │ ├── gt1
869 * │ : : :
870 */
871 snprintf(name, sizeof(name), "gt%u", gt->info.id);
872 dent = debugfs_create_dir(name, parent);
873 if (IS_ERR(dent))
874 return;
875 dent->d_inode->i_private = gt;
876
877 xe_gt_assert(gt, extract_gt(dent) == gt);
878 xe_gt_assert(gt, extract_vfid(dent) == vfid);
879
880 pf_populate_gt(gt, dent, vfid);
881 }
882
pf_add_links(struct xe_gt * gt,struct dentry * dent)883 static void pf_add_links(struct xe_gt *gt, struct dentry *dent)
884 {
885 unsigned int totalvfs = xe_gt_sriov_pf_get_totalvfs(gt);
886 unsigned int vfid;
887 char name[16]; /* should be more than enough for "vf%u\0" and VFID(UINT_MAX) */
888 char symlink[64]; /* should be more enough for "../../sriov/vf%u/tile%u/gt%u\0" */
889
890 for (vfid = 0; vfid <= totalvfs; vfid++) {
891 if (vfid)
892 snprintf(name, sizeof(name), "vf%u", vfid);
893 else
894 snprintf(name, sizeof(name), "pf");
895 snprintf(symlink, sizeof(symlink), "../../sriov/%s/tile%u/gt%u",
896 name, gt->tile->id, gt->info.id);
897 debugfs_create_symlink(name, dent, symlink);
898 }
899 }
900
901 /**
902 * xe_gt_sriov_pf_debugfs_register - Register SR-IOV PF specific entries in GT debugfs.
903 * @gt: the &xe_gt to register
904 * @dent: the &dentry that represents the GT directory
905 *
906 * Instead of actual files, create symlinks for PF and each VF to their GT specific
907 * attributes that should be already exposed in the dedicated debugfs SR-IOV tree.
908 */
xe_gt_sriov_pf_debugfs_register(struct xe_gt * gt,struct dentry * dent)909 void xe_gt_sriov_pf_debugfs_register(struct xe_gt *gt, struct dentry *dent)
910 {
911 xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
912 xe_gt_assert(gt, dent->d_inode->i_private == gt);
913
914 pf_add_links(gt, dent);
915 }
916