1 // SPDX-License-Identifier: MIT
2 /*
3 * Copyright © 2025 Intel Corporation
4 */
5
6 #include <linux/bitops.h>
7 #include <linux/ctype.h>
8 #include <linux/configfs.h>
9 #include <linux/cleanup.h>
10 #include <linux/find.h>
11 #include <linux/init.h>
12 #include <linux/module.h>
13 #include <linux/pci.h>
14 #include <linux/string.h>
15
16 #include "instructions/xe_mi_commands.h"
17 #include "xe_configfs.h"
18 #include "xe_gt_types.h"
19 #include "xe_hw_engine_types.h"
20 #include "xe_module.h"
21 #include "xe_pci_types.h"
22 #include "xe_sriov_types.h"
23
24 /**
25 * DOC: Xe Configfs
26 *
27 * Overview
28 * ========
29 *
30 * Configfs is a filesystem-based manager of kernel objects. Xe KMD registers a
31 * configfs subsystem called ``xe`` that creates a directory in the mounted
32 * configfs directory. The user can create devices under this directory and
33 * configure them as necessary. See Documentation/filesystems/configfs.rst for
34 * more information about how configfs works.
35 *
36 * Create devices
37 * ==============
38 *
39 * To create a device, the ``xe`` module should already be loaded, but some
40 * attributes can only be set before binding the device. It can be accomplished
41 * by blocking the driver autoprobe::
42 *
43 * # echo 0 > /sys/bus/pci/drivers_autoprobe
44 * # modprobe xe
45 *
46 * In order to create a device, the user has to create a directory inside ``xe``::
47 *
48 * # mkdir /sys/kernel/config/xe/0000:03:00.0/
49 *
50 * Every device created is populated by the driver with entries that can be
51 * used to configure it::
52 *
53 * /sys/kernel/config/xe/
54 * ├── 0000:00:02.0
55 * │ └── ...
56 * ├── 0000:00:02.1
57 * │ └── ...
58 * :
59 * └── 0000:03:00.0
60 * ├── survivability_mode
61 * ├── gt_types_allowed
62 * ├── engines_allowed
63 * └── enable_psmi
64 *
65 * After configuring the attributes as per next section, the device can be
66 * probed with::
67 *
68 * # echo 0000:03:00.0 > /sys/bus/pci/drivers/xe/bind
69 * # # or
70 * # echo 0000:03:00.0 > /sys/bus/pci/drivers_probe
71 *
72 * Configure Attributes
73 * ====================
74 *
75 * Survivability mode:
76 * -------------------
77 *
78 * Enable survivability mode on supported cards. This setting only takes
79 * effect when probing the device. Example to enable it::
80 *
81 * # echo 1 > /sys/kernel/config/xe/0000:03:00.0/survivability_mode
82 *
83 * This attribute can only be set before binding to the device.
84 *
85 * Allowed GT types:
86 * -----------------
87 *
88 * Allow only specific types of GTs to be detected and initialized by the
89 * driver. Any combination of GT types can be enabled/disabled, although
90 * some settings will cause the device to fail to probe.
91 *
92 * Writes support both comma- and newline-separated input format. Reads
93 * will always return one GT type per line. "primary" and "media" are the
94 * GT type names supported by this interface.
95 *
96 * This attribute can only be set before binding to the device.
97 *
98 * Examples:
99 *
100 * Allow both primary and media GTs to be initialized and used. This matches
101 * the driver's default behavior::
102 *
103 * # echo 'primary,media' > /sys/kernel/config/xe/0000:03:00.0/gt_types_allowed
104 *
105 * Allow only the primary GT of each tile to be initialized and used,
106 * effectively disabling the media GT if it exists on the platform::
107 *
108 * # echo 'primary' > /sys/kernel/config/xe/0000:03:00.0/gt_types_allowed
109 *
110 * Allow only the media GT of each tile to be initialized and used,
111 * effectively disabling the primary GT. **This configuration will cause
112 * device probe failure on all current platforms, but may be allowed on
113 * igpu platforms in the future**::
114 *
115 * # echo 'media' > /sys/kernel/config/xe/0000:03:00.0/gt_types_allowed
116 *
117 * Disable all GTs. Only other GPU IP (such as display) is potentially usable.
118 * **This configuration will cause device probe failure on all current
119 * platforms, but may be allowed on igpu platforms in the future**::
120 *
121 * # echo '' > /sys/kernel/config/xe/0000:03:00.0/gt_types_allowed
122 *
123 * Allowed engines:
124 * ----------------
125 *
126 * Allow only a set of engine(s) to be available, disabling the other engines
127 * even if they are available in hardware. This is applied after HW fuses are
128 * considered on each tile. Examples:
129 *
130 * Allow only one render and one copy engines, nothing else::
131 *
132 * # echo 'rcs0,bcs0' > /sys/kernel/config/xe/0000:03:00.0/engines_allowed
133 *
134 * Allow only compute engines and first copy engine::
135 *
136 * # echo 'ccs*,bcs0' > /sys/kernel/config/xe/0000:03:00.0/engines_allowed
137 *
138 * Note that the engine names are the per-GT hardware names. On multi-tile
139 * platforms, writing ``rcs0,bcs0`` to this file would allow the first render
140 * and copy engines on each tile.
141 *
142 * The requested configuration may not be supported by the platform and driver
143 * may fail to probe. For example: if at least one copy engine is expected to be
144 * available for migrations, but it's disabled. This is intended for debugging
145 * purposes only.
146 *
147 * This attribute can only be set before binding to the device.
148 *
149 * PSMI
150 * ----
151 *
152 * Enable extra debugging capabilities to trace engine execution. Only useful
153 * during early platform enabling and requires additional hardware connected.
154 * Once it's enabled, additionals WAs are added and runtime configuration is
155 * done via debugfs. Example to enable it::
156 *
157 * # echo 1 > /sys/kernel/config/xe/0000:03:00.0/enable_psmi
158 *
159 * This attribute can only be set before binding to the device.
160 *
161 * Context restore BB
162 * ------------------
163 *
164 * Allow to execute a batch buffer during any context switches. When the
165 * GPU is restoring the context, it executes additional commands. It's useful
166 * for testing additional workarounds and validating certain HW behaviors: it's
167 * not intended for normal execution and will taint the kernel with TAINT_TEST
168 * when used.
169 *
170 * The syntax allows to pass straight instructions to be executed by the engine
171 * in a batch buffer or set specific registers.
172 *
173 * #. Generic instruction::
174 *
175 * <engine-class> cmd <instr> [[dword0] [dword1] [...]]
176 *
177 * #. Simple register setting::
178 *
179 * <engine-class> reg <address> <value>
180 *
181 * Commands are saved per engine class: all instances of that class will execute
182 * those commands during context switch. The instruction, dword arguments,
183 * addresses and values are in hex format like in the examples below.
184 *
185 * #. Execute a LRI command to write 0xDEADBEEF to register 0x4f10 after the
186 * normal context restore::
187 *
188 * # echo 'rcs cmd 11000001 4F100 DEADBEEF' \
189 * > /sys/kernel/config/xe/0000:03:00.0/ctx_restore_post_bb
190 *
191 * #. Execute a LRI command to write 0xDEADBEEF to register 0x4f10 at the
192 * beginning of the context restore::
193 *
194 * # echo 'rcs cmd 11000001 4F100 DEADBEEF' \
195 * > /sys/kernel/config/xe/0000:03:00.0/ctx_restore_mid_bb
196
197 * #. Load certain values in a couple of registers (it can be used as a simpler
198 * alternative to the `cmd`) action::
199 *
200 * # cat > /sys/kernel/config/xe/0000:03:00.0/ctx_restore_post_bb <<EOF
201 * rcs reg 4F100 DEADBEEF
202 * rcs reg 4F104 FFFFFFFF
203 * EOF
204 *
205 * .. note::
206 *
207 * When using multiple lines, make sure to use a command that is
208 * implemented with a single write syscall, like HEREDOC.
209 *
210 * Currently this is implemented only for post and mid context restore and
211 * these attributes can only be set before binding to the device.
212 *
213 * Max SR-IOV Virtual Functions
214 * ----------------------------
215 *
216 * This config allows to limit number of the Virtual Functions (VFs) that can
217 * be managed by the Physical Function (PF) driver, where value 0 disables the
218 * PF mode (no VFs).
219 *
220 * The default max_vfs config value is taken from the max_vfs modparam.
221 *
222 * How to enable PF with support with unlimited (up to HW limit) number of VFs::
223 *
224 * # echo unlimited > /sys/kernel/config/xe/0000:00:02.0/sriov/max_vfs
225 * # echo 0000:00:02.0 > /sys/bus/pci/drivers/xe/bind
226 *
227 * How to enable PF with support up to 3 VFs::
228 *
229 * # echo 3 > /sys/kernel/config/xe/0000:00:02.0/sriov/max_vfs
230 * # echo 0000:00:02.0 > /sys/bus/pci/drivers/xe/bind
231 *
232 * How to disable PF mode and always run as native::
233 *
234 * # echo 0 > /sys/kernel/config/xe/0000:00:02.0/sriov/max_vfs
235 * # echo 0000:00:02.0 > /sys/bus/pci/drivers/xe/bind
236 *
237 * This setting only takes effect when probing the device.
238 *
239 * Remove devices
240 * ==============
241 *
242 * The created device directories can be removed using ``rmdir``::
243 *
244 * # rmdir /sys/kernel/config/xe/0000:03:00.0/
245 */
246
247 /* Similar to struct xe_bb, but not tied to HW (yet) */
248 struct wa_bb {
249 u32 *cs;
250 u32 len; /* in dwords */
251 };
252
253 struct xe_config_group_device {
254 struct config_group group;
255 struct config_group sriov;
256
257 struct xe_config_device {
258 u64 gt_types_allowed;
259 u64 engines_allowed;
260 struct wa_bb ctx_restore_post_bb[XE_ENGINE_CLASS_MAX];
261 struct wa_bb ctx_restore_mid_bb[XE_ENGINE_CLASS_MAX];
262 bool survivability_mode;
263 bool enable_psmi;
264 struct {
265 unsigned int max_vfs;
266 } sriov;
267 } config;
268
269 /* protects attributes */
270 struct mutex lock;
271 /* matching descriptor */
272 const struct xe_device_desc *desc;
273 /* tentative SR-IOV mode */
274 enum xe_sriov_mode mode;
275 };
276
277 static const struct xe_config_device device_defaults = {
278 .gt_types_allowed = U64_MAX,
279 .engines_allowed = U64_MAX,
280 .survivability_mode = false,
281 .enable_psmi = false,
282 .sriov = {
283 .max_vfs = UINT_MAX,
284 },
285 };
286
set_device_defaults(struct xe_config_device * config)287 static void set_device_defaults(struct xe_config_device *config)
288 {
289 *config = device_defaults;
290 #ifdef CONFIG_PCI_IOV
291 config->sriov.max_vfs = xe_modparam.max_vfs;
292 #endif
293 }
294
295 struct engine_info {
296 const char *cls;
297 u64 mask;
298 enum xe_engine_class engine_class;
299 };
300
301 /* Some helpful macros to aid on the sizing of buffer allocation when parsing */
302 #define MAX_ENGINE_CLASS_CHARS 5
303 #define MAX_ENGINE_INSTANCE_CHARS 2
304
305 static const struct engine_info engine_info[] = {
306 { .cls = "rcs", .mask = XE_HW_ENGINE_RCS_MASK, .engine_class = XE_ENGINE_CLASS_RENDER },
307 { .cls = "bcs", .mask = XE_HW_ENGINE_BCS_MASK, .engine_class = XE_ENGINE_CLASS_COPY },
308 { .cls = "vcs", .mask = XE_HW_ENGINE_VCS_MASK, .engine_class = XE_ENGINE_CLASS_VIDEO_DECODE },
309 { .cls = "vecs", .mask = XE_HW_ENGINE_VECS_MASK, .engine_class = XE_ENGINE_CLASS_VIDEO_ENHANCE },
310 { .cls = "ccs", .mask = XE_HW_ENGINE_CCS_MASK, .engine_class = XE_ENGINE_CLASS_COMPUTE },
311 { .cls = "gsccs", .mask = XE_HW_ENGINE_GSCCS_MASK, .engine_class = XE_ENGINE_CLASS_OTHER },
312 };
313
314 static const struct {
315 const char *name;
316 enum xe_gt_type type;
317 } gt_types[] = {
318 { .name = "primary", .type = XE_GT_TYPE_MAIN },
319 { .name = "media", .type = XE_GT_TYPE_MEDIA },
320 };
321
to_xe_config_group_device(struct config_item * item)322 static struct xe_config_group_device *to_xe_config_group_device(struct config_item *item)
323 {
324 return container_of(to_config_group(item), struct xe_config_group_device, group);
325 }
326
to_xe_config_device(struct config_item * item)327 static struct xe_config_device *to_xe_config_device(struct config_item *item)
328 {
329 return &to_xe_config_group_device(item)->config;
330 }
331
is_bound(struct xe_config_group_device * dev)332 static bool is_bound(struct xe_config_group_device *dev)
333 {
334 unsigned int domain, bus, slot, function;
335 struct pci_dev *pdev;
336 const char *name;
337 bool ret;
338
339 lockdep_assert_held(&dev->lock);
340
341 name = dev->group.cg_item.ci_name;
342 if (sscanf(name, "%x:%x:%x.%x", &domain, &bus, &slot, &function) != 4)
343 return false;
344
345 pdev = pci_get_domain_bus_and_slot(domain, bus, PCI_DEVFN(slot, function));
346 if (!pdev)
347 return false;
348
349 ret = pci_get_drvdata(pdev);
350 if (ret)
351 pci_dbg(pdev, "Already bound to driver\n");
352
353 pci_dev_put(pdev);
354 return ret;
355 }
356
survivability_mode_show(struct config_item * item,char * page)357 static ssize_t survivability_mode_show(struct config_item *item, char *page)
358 {
359 struct xe_config_device *dev = to_xe_config_device(item);
360
361 return sprintf(page, "%d\n", dev->survivability_mode);
362 }
363
survivability_mode_store(struct config_item * item,const char * page,size_t len)364 static ssize_t survivability_mode_store(struct config_item *item, const char *page, size_t len)
365 {
366 struct xe_config_group_device *dev = to_xe_config_group_device(item);
367 bool survivability_mode;
368 int ret;
369
370 ret = kstrtobool(page, &survivability_mode);
371 if (ret)
372 return ret;
373
374 guard(mutex)(&dev->lock);
375 if (is_bound(dev))
376 return -EBUSY;
377
378 dev->config.survivability_mode = survivability_mode;
379
380 return len;
381 }
382
gt_types_allowed_show(struct config_item * item,char * page)383 static ssize_t gt_types_allowed_show(struct config_item *item, char *page)
384 {
385 struct xe_config_device *dev = to_xe_config_device(item);
386 char *p = page;
387
388 for (size_t i = 0; i < ARRAY_SIZE(gt_types); i++)
389 if (dev->gt_types_allowed & BIT_ULL(gt_types[i].type))
390 p += sprintf(p, "%s\n", gt_types[i].name);
391
392 return p - page;
393 }
394
gt_types_allowed_store(struct config_item * item,const char * page,size_t len)395 static ssize_t gt_types_allowed_store(struct config_item *item, const char *page,
396 size_t len)
397 {
398 struct xe_config_group_device *dev = to_xe_config_group_device(item);
399 char *buf __free(kfree) = kstrdup(page, GFP_KERNEL);
400 char *p = buf;
401 u64 typemask = 0;
402
403 if (!buf)
404 return -ENOMEM;
405
406 while (p) {
407 char *typename = strsep(&p, ",\n");
408 bool matched = false;
409
410 if (typename[0] == '\0')
411 continue;
412
413 for (size_t i = 0; i < ARRAY_SIZE(gt_types); i++) {
414 if (strcmp(typename, gt_types[i].name) == 0) {
415 typemask |= BIT(gt_types[i].type);
416 matched = true;
417 break;
418 }
419 }
420
421 if (!matched)
422 return -EINVAL;
423 }
424
425 guard(mutex)(&dev->lock);
426 if (is_bound(dev))
427 return -EBUSY;
428
429 dev->config.gt_types_allowed = typemask;
430
431 return len;
432 }
433
engines_allowed_show(struct config_item * item,char * page)434 static ssize_t engines_allowed_show(struct config_item *item, char *page)
435 {
436 struct xe_config_device *dev = to_xe_config_device(item);
437 char *p = page;
438
439 for (size_t i = 0; i < ARRAY_SIZE(engine_info); i++) {
440 u64 mask = engine_info[i].mask;
441
442 if ((dev->engines_allowed & mask) == mask) {
443 p += sprintf(p, "%s*\n", engine_info[i].cls);
444 } else if (mask & dev->engines_allowed) {
445 u16 bit0 = __ffs64(mask), bit;
446
447 mask &= dev->engines_allowed;
448
449 for_each_set_bit(bit, (const unsigned long *)&mask, 64)
450 p += sprintf(p, "%s%u\n", engine_info[i].cls,
451 bit - bit0);
452 }
453 }
454
455 return p - page;
456 }
457
458 /*
459 * Lookup engine_info. If @mask is not NULL, reduce the mask according to the
460 * instance in @pattern.
461 *
462 * Examples of inputs:
463 * - lookup_engine_info("rcs0", &mask): return "rcs" entry from @engine_info and
464 * mask == BIT_ULL(XE_HW_ENGINE_RCS0)
465 * - lookup_engine_info("rcs*", &mask): return "rcs" entry from @engine_info and
466 * mask == XE_HW_ENGINE_RCS_MASK
467 * - lookup_engine_info("rcs", NULL): return "rcs" entry from @engine_info
468 */
lookup_engine_info(const char * pattern,u64 * mask)469 static const struct engine_info *lookup_engine_info(const char *pattern, u64 *mask)
470 {
471 for (size_t i = 0; i < ARRAY_SIZE(engine_info); i++) {
472 u8 instance;
473 u16 bit;
474
475 if (!str_has_prefix(pattern, engine_info[i].cls))
476 continue;
477
478 pattern += strlen(engine_info[i].cls);
479 if (!mask)
480 return *pattern ? NULL : &engine_info[i];
481
482 if (!strcmp(pattern, "*")) {
483 *mask = engine_info[i].mask;
484 return &engine_info[i];
485 }
486
487 if (kstrtou8(pattern, 10, &instance))
488 return NULL;
489
490 bit = __ffs64(engine_info[i].mask) + instance;
491 if (bit >= fls64(engine_info[i].mask))
492 return NULL;
493
494 *mask = BIT_ULL(bit);
495 return &engine_info[i];
496 }
497
498 return NULL;
499 }
500
parse_engine(const char * s,const char * end_chars,u64 * mask,const struct engine_info ** pinfo)501 static int parse_engine(const char *s, const char *end_chars, u64 *mask,
502 const struct engine_info **pinfo)
503 {
504 char buf[MAX_ENGINE_CLASS_CHARS + MAX_ENGINE_INSTANCE_CHARS + 1];
505 const struct engine_info *info;
506 size_t len;
507
508 len = strcspn(s, end_chars);
509 if (len >= sizeof(buf))
510 return -EINVAL;
511
512 memcpy(buf, s, len);
513 buf[len] = '\0';
514
515 info = lookup_engine_info(buf, mask);
516 if (!info)
517 return -ENOENT;
518
519 if (pinfo)
520 *pinfo = info;
521
522 return len;
523 }
524
engines_allowed_store(struct config_item * item,const char * page,size_t len)525 static ssize_t engines_allowed_store(struct config_item *item, const char *page,
526 size_t len)
527 {
528 struct xe_config_group_device *dev = to_xe_config_group_device(item);
529 ssize_t patternlen, p;
530 u64 mask, val = 0;
531
532 for (p = 0; p < len; p += patternlen + 1) {
533 patternlen = parse_engine(page + p, ",\n", &mask, NULL);
534 if (patternlen < 0)
535 return -EINVAL;
536
537 val |= mask;
538 }
539
540 guard(mutex)(&dev->lock);
541 if (is_bound(dev))
542 return -EBUSY;
543
544 dev->config.engines_allowed = val;
545
546 return len;
547 }
548
enable_psmi_show(struct config_item * item,char * page)549 static ssize_t enable_psmi_show(struct config_item *item, char *page)
550 {
551 struct xe_config_device *dev = to_xe_config_device(item);
552
553 return sprintf(page, "%d\n", dev->enable_psmi);
554 }
555
enable_psmi_store(struct config_item * item,const char * page,size_t len)556 static ssize_t enable_psmi_store(struct config_item *item, const char *page, size_t len)
557 {
558 struct xe_config_group_device *dev = to_xe_config_group_device(item);
559 bool val;
560 int ret;
561
562 ret = kstrtobool(page, &val);
563 if (ret)
564 return ret;
565
566 guard(mutex)(&dev->lock);
567 if (is_bound(dev))
568 return -EBUSY;
569
570 dev->config.enable_psmi = val;
571
572 return len;
573 }
574
wa_bb_read_advance(bool dereference,char ** p,const char * append,size_t len,size_t * max_size)575 static bool wa_bb_read_advance(bool dereference, char **p,
576 const char *append, size_t len,
577 size_t *max_size)
578 {
579 if (dereference) {
580 if (len >= *max_size)
581 return false;
582 *max_size -= len;
583 if (append)
584 memcpy(*p, append, len);
585 }
586
587 *p += len;
588
589 return true;
590 }
591
wa_bb_show(struct xe_config_group_device * dev,struct wa_bb wa_bb[static XE_ENGINE_CLASS_MAX],char * data,size_t sz)592 static ssize_t wa_bb_show(struct xe_config_group_device *dev,
593 struct wa_bb wa_bb[static XE_ENGINE_CLASS_MAX],
594 char *data, size_t sz)
595 {
596 char *p = data;
597
598 guard(mutex)(&dev->lock);
599
600 for (size_t i = 0; i < ARRAY_SIZE(engine_info); i++) {
601 enum xe_engine_class ec = engine_info[i].engine_class;
602 size_t len;
603
604 if (!wa_bb[ec].len)
605 continue;
606
607 len = snprintf(p, sz, "%s:", engine_info[i].cls);
608 if (!wa_bb_read_advance(data, &p, NULL, len, &sz))
609 return -ENOBUFS;
610
611 for (size_t j = 0; j < wa_bb[ec].len; j++) {
612 len = snprintf(p, sz, " %08x", wa_bb[ec].cs[j]);
613 if (!wa_bb_read_advance(data, &p, NULL, len, &sz))
614 return -ENOBUFS;
615 }
616
617 if (!wa_bb_read_advance(data, &p, "\n", 1, &sz))
618 return -ENOBUFS;
619 }
620
621 if (!wa_bb_read_advance(data, &p, "", 1, &sz))
622 return -ENOBUFS;
623
624 /* Reserve one more to match check for '\0' */
625 if (!data)
626 p++;
627
628 return p - data;
629 }
630
ctx_restore_mid_bb_show(struct config_item * item,char * page)631 static ssize_t ctx_restore_mid_bb_show(struct config_item *item, char *page)
632 {
633 struct xe_config_group_device *dev = to_xe_config_group_device(item);
634
635 return wa_bb_show(dev, dev->config.ctx_restore_mid_bb, page, SZ_4K);
636 }
637
ctx_restore_post_bb_show(struct config_item * item,char * page)638 static ssize_t ctx_restore_post_bb_show(struct config_item *item, char *page)
639 {
640 struct xe_config_group_device *dev = to_xe_config_group_device(item);
641
642 return wa_bb_show(dev, dev->config.ctx_restore_post_bb, page, SZ_4K);
643 }
644
wa_bb_append(struct wa_bb * wa_bb,u32 val)645 static void wa_bb_append(struct wa_bb *wa_bb, u32 val)
646 {
647 if (wa_bb->cs)
648 wa_bb->cs[wa_bb->len] = val;
649
650 wa_bb->len++;
651 }
652
parse_hex(const char * line,u32 * pval)653 static ssize_t parse_hex(const char *line, u32 *pval)
654 {
655 char numstr[12];
656 const char *p;
657 ssize_t numlen;
658
659 p = line + strspn(line, " \t");
660 if (!*p || *p == '\n')
661 return 0;
662
663 numlen = strcspn(p, " \t\n");
664 if (!numlen || numlen >= sizeof(numstr) - 1)
665 return -EINVAL;
666
667 memcpy(numstr, p, numlen);
668 numstr[numlen] = '\0';
669 p += numlen;
670
671 if (kstrtou32(numstr, 16, pval))
672 return -EINVAL;
673
674 return p - line;
675 }
676
677 /*
678 * Parse lines with the format
679 *
680 * <engine-class> cmd <u32> <u32...>
681 * <engine-class> reg <u32_addr> <u32_val>
682 *
683 * and optionally save them in @wa_bb[i].cs is non-NULL.
684 *
685 * Return the number of dwords parsed.
686 */
parse_wa_bb_lines(const char * lines,struct wa_bb wa_bb[static XE_ENGINE_CLASS_MAX])687 static ssize_t parse_wa_bb_lines(const char *lines,
688 struct wa_bb wa_bb[static XE_ENGINE_CLASS_MAX])
689 {
690 ssize_t dwords = 0, ret;
691 const char *p;
692
693 for (p = lines; *p; p++) {
694 const struct engine_info *info = NULL;
695 u32 val, val2;
696
697 /* Also allow empty lines */
698 p += strspn(p, " \t\n");
699 if (!*p)
700 break;
701
702 ret = parse_engine(p, " \t\n", NULL, &info);
703 if (ret < 0)
704 return ret;
705
706 p += ret;
707 p += strspn(p, " \t");
708
709 if (str_has_prefix(p, "cmd")) {
710 for (p += strlen("cmd"); *p;) {
711 ret = parse_hex(p, &val);
712 if (ret < 0)
713 return -EINVAL;
714 if (!ret)
715 break;
716
717 p += ret;
718 dwords++;
719 wa_bb_append(&wa_bb[info->engine_class], val);
720 }
721 } else if (str_has_prefix(p, "reg")) {
722 p += strlen("reg");
723 ret = parse_hex(p, &val);
724 if (ret <= 0)
725 return -EINVAL;
726
727 p += ret;
728 ret = parse_hex(p, &val2);
729 if (ret <= 0)
730 return -EINVAL;
731
732 p += ret;
733 dwords += 3;
734 wa_bb_append(&wa_bb[info->engine_class],
735 MI_LOAD_REGISTER_IMM | MI_LRI_NUM_REGS(1));
736 wa_bb_append(&wa_bb[info->engine_class], val);
737 wa_bb_append(&wa_bb[info->engine_class], val2);
738 } else {
739 return -EINVAL;
740 }
741 }
742
743 return dwords;
744 }
745
wa_bb_store(struct wa_bb wa_bb[static XE_ENGINE_CLASS_MAX],struct xe_config_group_device * dev,const char * page,size_t len)746 static ssize_t wa_bb_store(struct wa_bb wa_bb[static XE_ENGINE_CLASS_MAX],
747 struct xe_config_group_device *dev,
748 const char *page, size_t len)
749 {
750 /* tmp_wa_bb must match wa_bb's size */
751 struct wa_bb tmp_wa_bb[XE_ENGINE_CLASS_MAX] = { };
752 ssize_t count, class;
753 u32 *tmp;
754
755 /* 1. Count dwords - wa_bb[i].cs is NULL for all classes */
756 count = parse_wa_bb_lines(page, tmp_wa_bb);
757 if (count < 0)
758 return count;
759
760 guard(mutex)(&dev->lock);
761
762 if (is_bound(dev))
763 return -EBUSY;
764
765 /*
766 * 2. Allocate a u32 array and set the pointers to the right positions
767 * according to the length of each class' wa_bb
768 */
769 tmp = krealloc(wa_bb[0].cs, count * sizeof(u32), GFP_KERNEL);
770 if (!tmp)
771 return -ENOMEM;
772
773 if (!count) {
774 memset(wa_bb, 0, sizeof(tmp_wa_bb));
775 return len;
776 }
777
778 for (class = 0, count = 0; class < XE_ENGINE_CLASS_MAX; ++class) {
779 tmp_wa_bb[class].cs = tmp + count;
780 count += tmp_wa_bb[class].len;
781 tmp_wa_bb[class].len = 0;
782 }
783
784 /* 3. Parse wa_bb lines again, this time saving the values */
785 count = parse_wa_bb_lines(page, tmp_wa_bb);
786 if (count < 0)
787 return count;
788
789 memcpy(wa_bb, tmp_wa_bb, sizeof(tmp_wa_bb));
790
791 return len;
792 }
793
ctx_restore_mid_bb_store(struct config_item * item,const char * data,size_t sz)794 static ssize_t ctx_restore_mid_bb_store(struct config_item *item,
795 const char *data, size_t sz)
796 {
797 struct xe_config_group_device *dev = to_xe_config_group_device(item);
798
799 return wa_bb_store(dev->config.ctx_restore_mid_bb, dev, data, sz);
800 }
801
ctx_restore_post_bb_store(struct config_item * item,const char * data,size_t sz)802 static ssize_t ctx_restore_post_bb_store(struct config_item *item,
803 const char *data, size_t sz)
804 {
805 struct xe_config_group_device *dev = to_xe_config_group_device(item);
806
807 return wa_bb_store(dev->config.ctx_restore_post_bb, dev, data, sz);
808 }
809
810 CONFIGFS_ATTR(, ctx_restore_mid_bb);
811 CONFIGFS_ATTR(, ctx_restore_post_bb);
812 CONFIGFS_ATTR(, enable_psmi);
813 CONFIGFS_ATTR(, engines_allowed);
814 CONFIGFS_ATTR(, gt_types_allowed);
815 CONFIGFS_ATTR(, survivability_mode);
816
817 static struct configfs_attribute *xe_config_device_attrs[] = {
818 &attr_ctx_restore_mid_bb,
819 &attr_ctx_restore_post_bb,
820 &attr_enable_psmi,
821 &attr_engines_allowed,
822 &attr_gt_types_allowed,
823 &attr_survivability_mode,
824 NULL,
825 };
826
xe_config_device_release(struct config_item * item)827 static void xe_config_device_release(struct config_item *item)
828 {
829 struct xe_config_group_device *dev = to_xe_config_group_device(item);
830
831 mutex_destroy(&dev->lock);
832
833 kfree(dev->config.ctx_restore_mid_bb[0].cs);
834 kfree(dev->config.ctx_restore_post_bb[0].cs);
835 kfree(dev);
836 }
837
838 static struct configfs_item_operations xe_config_device_ops = {
839 .release = xe_config_device_release,
840 };
841
xe_config_device_is_visible(struct config_item * item,struct configfs_attribute * attr,int n)842 static bool xe_config_device_is_visible(struct config_item *item,
843 struct configfs_attribute *attr, int n)
844 {
845 struct xe_config_group_device *dev = to_xe_config_group_device(item);
846
847 if (attr == &attr_survivability_mode) {
848 if (!dev->desc->is_dgfx || dev->desc->platform < XE_BATTLEMAGE)
849 return false;
850 }
851
852 return true;
853 }
854
855 static struct configfs_group_operations xe_config_device_group_ops = {
856 .is_visible = xe_config_device_is_visible,
857 };
858
859 static const struct config_item_type xe_config_device_type = {
860 .ct_item_ops = &xe_config_device_ops,
861 .ct_group_ops = &xe_config_device_group_ops,
862 .ct_attrs = xe_config_device_attrs,
863 .ct_owner = THIS_MODULE,
864 };
865
sriov_max_vfs_show(struct config_item * item,char * page)866 static ssize_t sriov_max_vfs_show(struct config_item *item, char *page)
867 {
868 struct xe_config_group_device *dev = to_xe_config_group_device(item->ci_parent);
869
870 guard(mutex)(&dev->lock);
871
872 if (dev->config.sriov.max_vfs == UINT_MAX)
873 return sprintf(page, "%s\n", "unlimited");
874 else
875 return sprintf(page, "%u\n", dev->config.sriov.max_vfs);
876 }
877
sriov_max_vfs_store(struct config_item * item,const char * page,size_t len)878 static ssize_t sriov_max_vfs_store(struct config_item *item, const char *page, size_t len)
879 {
880 struct xe_config_group_device *dev = to_xe_config_group_device(item->ci_parent);
881 unsigned int max_vfs;
882 int ret;
883
884 guard(mutex)(&dev->lock);
885
886 if (is_bound(dev))
887 return -EBUSY;
888
889 ret = kstrtouint(page, 0, &max_vfs);
890 if (ret) {
891 if (!sysfs_streq(page, "unlimited"))
892 return ret;
893 max_vfs = UINT_MAX;
894 }
895
896 dev->config.sriov.max_vfs = max_vfs;
897 return len;
898 }
899
900 CONFIGFS_ATTR(sriov_, max_vfs);
901
902 static struct configfs_attribute *xe_config_sriov_attrs[] = {
903 &sriov_attr_max_vfs,
904 NULL,
905 };
906
xe_config_sriov_is_visible(struct config_item * item,struct configfs_attribute * attr,int n)907 static bool xe_config_sriov_is_visible(struct config_item *item,
908 struct configfs_attribute *attr, int n)
909 {
910 struct xe_config_group_device *dev = to_xe_config_group_device(item->ci_parent);
911
912 if (attr == &sriov_attr_max_vfs && dev->mode != XE_SRIOV_MODE_PF)
913 return false;
914
915 return true;
916 }
917
918 static struct configfs_group_operations xe_config_sriov_group_ops = {
919 .is_visible = xe_config_sriov_is_visible,
920 };
921
922 static const struct config_item_type xe_config_sriov_type = {
923 .ct_owner = THIS_MODULE,
924 .ct_group_ops = &xe_config_sriov_group_ops,
925 .ct_attrs = xe_config_sriov_attrs,
926 };
927
xe_match_desc(struct pci_dev * pdev)928 static const struct xe_device_desc *xe_match_desc(struct pci_dev *pdev)
929 {
930 struct device_driver *driver = driver_find("xe", &pci_bus_type);
931 struct pci_driver *drv = to_pci_driver(driver);
932 const struct pci_device_id *ids = drv ? drv->id_table : NULL;
933 const struct pci_device_id *found = pci_match_id(ids, pdev);
934
935 return found ? (const void *)found->driver_data : NULL;
936 }
937
get_physfn_instead(struct pci_dev * virtfn)938 static struct pci_dev *get_physfn_instead(struct pci_dev *virtfn)
939 {
940 struct pci_dev *physfn = pci_physfn(virtfn);
941
942 pci_dev_get(physfn);
943 pci_dev_put(virtfn);
944 return physfn;
945 }
946
xe_config_make_device_group(struct config_group * group,const char * name)947 static struct config_group *xe_config_make_device_group(struct config_group *group,
948 const char *name)
949 {
950 unsigned int domain, bus, slot, function;
951 struct xe_config_group_device *dev;
952 const struct xe_device_desc *match;
953 enum xe_sriov_mode mode;
954 struct pci_dev *pdev;
955 char canonical[16];
956 int vfnumber = 0;
957 int ret;
958
959 ret = sscanf(name, "%x:%x:%x.%x", &domain, &bus, &slot, &function);
960 if (ret != 4)
961 return ERR_PTR(-EINVAL);
962
963 ret = scnprintf(canonical, sizeof(canonical), "%04x:%02x:%02x.%d", domain, bus,
964 PCI_SLOT(PCI_DEVFN(slot, function)),
965 PCI_FUNC(PCI_DEVFN(slot, function)));
966 if (ret != 12 || strcmp(name, canonical))
967 return ERR_PTR(-EINVAL);
968
969 pdev = pci_get_domain_bus_and_slot(domain, bus, PCI_DEVFN(slot, function));
970 mode = pdev ? dev_is_pf(&pdev->dev) ?
971 XE_SRIOV_MODE_PF : XE_SRIOV_MODE_NONE : XE_SRIOV_MODE_VF;
972
973 if (!pdev && function)
974 pdev = pci_get_domain_bus_and_slot(domain, bus, PCI_DEVFN(slot, 0));
975 if (!pdev && slot)
976 pdev = pci_get_domain_bus_and_slot(domain, bus, PCI_DEVFN(0, 0));
977 if (!pdev)
978 return ERR_PTR(-ENODEV);
979
980 if (PCI_DEVFN(slot, function) != pdev->devfn) {
981 pdev = get_physfn_instead(pdev);
982 vfnumber = PCI_DEVFN(slot, function) - pdev->devfn;
983 if (!dev_is_pf(&pdev->dev) || vfnumber > pci_sriov_get_totalvfs(pdev)) {
984 pci_dev_put(pdev);
985 return ERR_PTR(-ENODEV);
986 }
987 }
988
989 match = xe_match_desc(pdev);
990 if (match && vfnumber && !match->has_sriov) {
991 pci_info(pdev, "xe driver does not support VFs on this device\n");
992 match = NULL;
993 } else if (!match) {
994 pci_info(pdev, "xe driver does not support configuration of this device\n");
995 }
996
997 pci_dev_put(pdev);
998
999 if (!match)
1000 return ERR_PTR(-ENOENT);
1001
1002 dev = kzalloc_obj(*dev);
1003 if (!dev)
1004 return ERR_PTR(-ENOMEM);
1005
1006 dev->desc = match;
1007 dev->mode = match->has_sriov ? mode : XE_SRIOV_MODE_NONE;
1008
1009 set_device_defaults(&dev->config);
1010
1011 config_group_init_type_name(&dev->group, name, &xe_config_device_type);
1012 if (dev->mode != XE_SRIOV_MODE_NONE) {
1013 config_group_init_type_name(&dev->sriov, "sriov", &xe_config_sriov_type);
1014 configfs_add_default_group(&dev->sriov, &dev->group);
1015 }
1016
1017 mutex_init(&dev->lock);
1018
1019 return &dev->group;
1020 }
1021
1022 static struct configfs_group_operations xe_config_group_ops = {
1023 .make_group = xe_config_make_device_group,
1024 };
1025
1026 static const struct config_item_type xe_configfs_type = {
1027 .ct_group_ops = &xe_config_group_ops,
1028 .ct_owner = THIS_MODULE,
1029 };
1030
1031 static struct configfs_subsystem xe_configfs = {
1032 .su_group = {
1033 .cg_item = {
1034 .ci_namebuf = "xe",
1035 .ci_type = &xe_configfs_type,
1036 },
1037 },
1038 };
1039
find_xe_config_group_device(struct pci_dev * pdev)1040 static struct xe_config_group_device *find_xe_config_group_device(struct pci_dev *pdev)
1041 {
1042 struct config_item *item;
1043
1044 mutex_lock(&xe_configfs.su_mutex);
1045 item = config_group_find_item(&xe_configfs.su_group, pci_name(pdev));
1046 mutex_unlock(&xe_configfs.su_mutex);
1047
1048 if (!item)
1049 return NULL;
1050
1051 return to_xe_config_group_device(item);
1052 }
1053
dump_custom_dev_config(struct pci_dev * pdev,struct xe_config_group_device * dev)1054 static void dump_custom_dev_config(struct pci_dev *pdev,
1055 struct xe_config_group_device *dev)
1056 {
1057 #define PRI_CUSTOM_ATTR(fmt_, attr_) do { \
1058 if (dev->config.attr_ != device_defaults.attr_) \
1059 pci_info(pdev, "configfs: " __stringify(attr_) " = " fmt_ "\n", \
1060 dev->config.attr_); \
1061 } while (0)
1062
1063 PRI_CUSTOM_ATTR("%llx", gt_types_allowed);
1064 PRI_CUSTOM_ATTR("%llx", engines_allowed);
1065 PRI_CUSTOM_ATTR("%d", enable_psmi);
1066 PRI_CUSTOM_ATTR("%d", survivability_mode);
1067
1068 #undef PRI_CUSTOM_ATTR
1069 }
1070
1071 /**
1072 * xe_configfs_check_device() - Test if device was configured by configfs
1073 * @pdev: the &pci_dev device to test
1074 *
1075 * Try to find the configfs group that belongs to the specified pci device
1076 * and print a diagnostic message if different than the default value.
1077 */
xe_configfs_check_device(struct pci_dev * pdev)1078 void xe_configfs_check_device(struct pci_dev *pdev)
1079 {
1080 struct xe_config_group_device *dev = find_xe_config_group_device(pdev);
1081
1082 if (!dev)
1083 return;
1084
1085 /* memcmp here is safe as both are zero-initialized */
1086 if (memcmp(&dev->config, &device_defaults, sizeof(dev->config))) {
1087 pci_info(pdev, "Found custom settings in configfs\n");
1088 dump_custom_dev_config(pdev, dev);
1089 }
1090
1091 config_group_put(&dev->group);
1092 }
1093
1094 /**
1095 * xe_configfs_get_survivability_mode - get configfs survivability mode attribute
1096 * @pdev: pci device
1097 *
1098 * Return: survivability_mode attribute in configfs
1099 */
xe_configfs_get_survivability_mode(struct pci_dev * pdev)1100 bool xe_configfs_get_survivability_mode(struct pci_dev *pdev)
1101 {
1102 struct xe_config_group_device *dev = find_xe_config_group_device(pdev);
1103 bool mode;
1104
1105 if (!dev)
1106 return device_defaults.survivability_mode;
1107
1108 mode = dev->config.survivability_mode;
1109 config_group_put(&dev->group);
1110
1111 return mode;
1112 }
1113
get_gt_types_allowed(struct pci_dev * pdev)1114 static u64 get_gt_types_allowed(struct pci_dev *pdev)
1115 {
1116 struct xe_config_group_device *dev = find_xe_config_group_device(pdev);
1117 u64 mask;
1118
1119 if (!dev)
1120 return device_defaults.gt_types_allowed;
1121
1122 mask = dev->config.gt_types_allowed;
1123 config_group_put(&dev->group);
1124
1125 return mask;
1126 }
1127
1128 /**
1129 * xe_configfs_primary_gt_allowed - determine whether primary GTs are supported
1130 * @pdev: pci device
1131 *
1132 * Return: True if primary GTs are enabled, false if they have been disabled via
1133 * configfs.
1134 */
xe_configfs_primary_gt_allowed(struct pci_dev * pdev)1135 bool xe_configfs_primary_gt_allowed(struct pci_dev *pdev)
1136 {
1137 return get_gt_types_allowed(pdev) & BIT_ULL(XE_GT_TYPE_MAIN);
1138 }
1139
1140 /**
1141 * xe_configfs_media_gt_allowed - determine whether media GTs are supported
1142 * @pdev: pci device
1143 *
1144 * Return: True if the media GTs are enabled, false if they have been disabled
1145 * via configfs.
1146 */
xe_configfs_media_gt_allowed(struct pci_dev * pdev)1147 bool xe_configfs_media_gt_allowed(struct pci_dev *pdev)
1148 {
1149 return get_gt_types_allowed(pdev) & BIT_ULL(XE_GT_TYPE_MEDIA);
1150 }
1151
1152 /**
1153 * xe_configfs_get_engines_allowed - get engine allowed mask from configfs
1154 * @pdev: pci device
1155 *
1156 * Return: engine mask with allowed engines set in configfs
1157 */
xe_configfs_get_engines_allowed(struct pci_dev * pdev)1158 u64 xe_configfs_get_engines_allowed(struct pci_dev *pdev)
1159 {
1160 struct xe_config_group_device *dev = find_xe_config_group_device(pdev);
1161 u64 engines_allowed;
1162
1163 if (!dev)
1164 return device_defaults.engines_allowed;
1165
1166 engines_allowed = dev->config.engines_allowed;
1167 config_group_put(&dev->group);
1168
1169 return engines_allowed;
1170 }
1171
1172 /**
1173 * xe_configfs_get_psmi_enabled - get configfs enable_psmi setting
1174 * @pdev: pci device
1175 *
1176 * Return: enable_psmi setting in configfs
1177 */
xe_configfs_get_psmi_enabled(struct pci_dev * pdev)1178 bool xe_configfs_get_psmi_enabled(struct pci_dev *pdev)
1179 {
1180 struct xe_config_group_device *dev = find_xe_config_group_device(pdev);
1181 bool ret;
1182
1183 if (!dev)
1184 return false;
1185
1186 ret = dev->config.enable_psmi;
1187 config_group_put(&dev->group);
1188
1189 return ret;
1190 }
1191
1192 /**
1193 * xe_configfs_get_ctx_restore_mid_bb - get configfs ctx_restore_mid_bb setting
1194 * @pdev: pci device
1195 * @class: hw engine class
1196 * @cs: pointer to the bb to use - only valid during probe
1197 *
1198 * Return: Number of dwords used in the mid_ctx_restore setting in configfs
1199 */
xe_configfs_get_ctx_restore_mid_bb(struct pci_dev * pdev,enum xe_engine_class class,const u32 ** cs)1200 u32 xe_configfs_get_ctx_restore_mid_bb(struct pci_dev *pdev,
1201 enum xe_engine_class class,
1202 const u32 **cs)
1203 {
1204 struct xe_config_group_device *dev = find_xe_config_group_device(pdev);
1205 u32 len;
1206
1207 if (!dev)
1208 return 0;
1209
1210 if (cs)
1211 *cs = dev->config.ctx_restore_mid_bb[class].cs;
1212
1213 len = dev->config.ctx_restore_mid_bb[class].len;
1214 config_group_put(&dev->group);
1215
1216 return len;
1217 }
1218
1219 /**
1220 * xe_configfs_get_ctx_restore_post_bb - get configfs ctx_restore_post_bb setting
1221 * @pdev: pci device
1222 * @class: hw engine class
1223 * @cs: pointer to the bb to use - only valid during probe
1224 *
1225 * Return: Number of dwords used in the post_ctx_restore setting in configfs
1226 */
xe_configfs_get_ctx_restore_post_bb(struct pci_dev * pdev,enum xe_engine_class class,const u32 ** cs)1227 u32 xe_configfs_get_ctx_restore_post_bb(struct pci_dev *pdev,
1228 enum xe_engine_class class,
1229 const u32 **cs)
1230 {
1231 struct xe_config_group_device *dev = find_xe_config_group_device(pdev);
1232 u32 len;
1233
1234 if (!dev)
1235 return 0;
1236
1237 *cs = dev->config.ctx_restore_post_bb[class].cs;
1238 len = dev->config.ctx_restore_post_bb[class].len;
1239 config_group_put(&dev->group);
1240
1241 return len;
1242 }
1243
1244 #ifdef CONFIG_PCI_IOV
1245 /**
1246 * xe_configfs_get_max_vfs() - Get number of VFs that could be managed
1247 * @pdev: the &pci_dev device
1248 *
1249 * Find the configfs group that belongs to the PCI device and return maximum
1250 * number of Virtual Functions (VFs) that could be managed by this device.
1251 * If configfs group is not present, use value of max_vfs module parameter.
1252 *
1253 * Return: maximum number of VFs that could be managed.
1254 */
xe_configfs_get_max_vfs(struct pci_dev * pdev)1255 unsigned int xe_configfs_get_max_vfs(struct pci_dev *pdev)
1256 {
1257 struct xe_config_group_device *dev = find_xe_config_group_device(pdev);
1258 unsigned int max_vfs;
1259
1260 if (!dev)
1261 return xe_modparam.max_vfs;
1262
1263 scoped_guard(mutex, &dev->lock)
1264 max_vfs = dev->config.sriov.max_vfs;
1265
1266 config_group_put(&dev->group);
1267
1268 return max_vfs;
1269 }
1270 #endif
1271
xe_configfs_init(void)1272 int __init xe_configfs_init(void)
1273 {
1274 int ret;
1275
1276 config_group_init(&xe_configfs.su_group);
1277 mutex_init(&xe_configfs.su_mutex);
1278 ret = configfs_register_subsystem(&xe_configfs);
1279 if (ret) {
1280 mutex_destroy(&xe_configfs.su_mutex);
1281 return ret;
1282 }
1283
1284 return 0;
1285 }
1286
xe_configfs_exit(void)1287 void xe_configfs_exit(void)
1288 {
1289 configfs_unregister_subsystem(&xe_configfs);
1290 mutex_destroy(&xe_configfs.su_mutex);
1291 }
1292