xref: /linux/drivers/gpu/drm/xe/xe_configfs.c (revision 6dfafbd0299a60bfb5d5e277fdf100037c7ded07)
1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright © 2025 Intel Corporation
4  */
5 
6 #include <linux/bitops.h>
7 #include <linux/ctype.h>
8 #include <linux/configfs.h>
9 #include <linux/cleanup.h>
10 #include <linux/find.h>
11 #include <linux/init.h>
12 #include <linux/module.h>
13 #include <linux/pci.h>
14 #include <linux/string.h>
15 
16 #include "instructions/xe_mi_commands.h"
17 #include "xe_configfs.h"
18 #include "xe_gt_types.h"
19 #include "xe_hw_engine_types.h"
20 #include "xe_module.h"
21 #include "xe_pci_types.h"
22 #include "xe_sriov_types.h"
23 
24 /**
25  * DOC: Xe Configfs
26  *
27  * Overview
28  * ========
29  *
30  * Configfs is a filesystem-based manager of kernel objects. Xe KMD registers a
31  * configfs subsystem called ``xe`` that creates a directory in the mounted
32  * configfs directory. The user can create devices under this directory and
33  * configure them as necessary. See Documentation/filesystems/configfs.rst for
34  * more information about how configfs works.
35  *
36  * Create devices
37  * ==============
38  *
39  * To create a device, the ``xe`` module should already be loaded, but some
40  * attributes can only be set before binding the device. It can be accomplished
41  * by blocking the driver autoprobe::
42  *
43  *	# echo 0 > /sys/bus/pci/drivers_autoprobe
44  *	# modprobe xe
45  *
46  * In order to create a device, the user has to create a directory inside ``xe``::
47  *
48  *	# mkdir /sys/kernel/config/xe/0000:03:00.0/
49  *
50  * Every device created is populated by the driver with entries that can be
51  * used to configure it::
52  *
53  *	/sys/kernel/config/xe/
54  *	├── 0000:00:02.0
55  *	│   └── ...
56  *	├── 0000:00:02.1
57  *	│   └── ...
58  *	:
59  *	└── 0000:03:00.0
60  *	    ├── survivability_mode
61  *	    ├── gt_types_allowed
62  *	    ├── engines_allowed
63  *	    └── enable_psmi
64  *
65  * After configuring the attributes as per next section, the device can be
66  * probed with::
67  *
68  *	# echo 0000:03:00.0 > /sys/bus/pci/drivers/xe/bind
69  *	# # or
70  *	# echo 0000:03:00.0 > /sys/bus/pci/drivers_probe
71  *
72  * Configure Attributes
73  * ====================
74  *
75  * Survivability mode:
76  * -------------------
77  *
78  * Enable survivability mode on supported cards. This setting only takes
79  * effect when probing the device. Example to enable it::
80  *
81  *	# echo 1 > /sys/kernel/config/xe/0000:03:00.0/survivability_mode
82  *
83  * This attribute can only be set before binding to the device.
84  *
85  * Allowed GT types:
86  * -----------------
87  *
88  * Allow only specific types of GTs to be detected and initialized by the
89  * driver.  Any combination of GT types can be enabled/disabled, although
90  * some settings will cause the device to fail to probe.
91  *
92  * Writes support both comma- and newline-separated input format. Reads
93  * will always return one GT type per line. "primary" and "media" are the
94  * GT type names supported by this interface.
95  *
96  * This attribute can only be set before binding to the device.
97  *
98  * Examples:
99  *
100  * Allow both primary and media GTs to be initialized and used.  This matches
101  * the driver's default behavior::
102  *
103  *	# echo 'primary,media' > /sys/kernel/config/xe/0000:03:00.0/gt_types_allowed
104  *
105  * Allow only the primary GT of each tile to be initialized and used,
106  * effectively disabling the media GT if it exists on the platform::
107  *
108  *	# echo 'primary' > /sys/kernel/config/xe/0000:03:00.0/gt_types_allowed
109  *
110  * Allow only the media GT of each tile to be initialized and used,
111  * effectively disabling the primary GT.  **This configuration will cause
112  * device probe failure on all current platforms, but may be allowed on
113  * igpu platforms in the future**::
114  *
115  *	# echo 'media' > /sys/kernel/config/xe/0000:03:00.0/gt_types_allowed
116  *
117  * Disable all GTs.  Only other GPU IP (such as display) is potentially usable.
118  * **This configuration will cause device probe failure on all current
119  * platforms, but may be allowed on igpu platforms in the future**::
120  *
121  *	# echo '' > /sys/kernel/config/xe/0000:03:00.0/gt_types_allowed
122  *
123  * Allowed engines:
124  * ----------------
125  *
126  * Allow only a set of engine(s) to be available, disabling the other engines
127  * even if they are available in hardware. This is applied after HW fuses are
128  * considered on each tile. Examples:
129  *
130  * Allow only one render and one copy engines, nothing else::
131  *
132  *	# echo 'rcs0,bcs0' > /sys/kernel/config/xe/0000:03:00.0/engines_allowed
133  *
134  * Allow only compute engines and first copy engine::
135  *
136  *	# echo 'ccs*,bcs0' > /sys/kernel/config/xe/0000:03:00.0/engines_allowed
137  *
138  * Note that the engine names are the per-GT hardware names. On multi-tile
139  * platforms, writing ``rcs0,bcs0`` to this file would allow the first render
140  * and copy engines on each tile.
141  *
142  * The requested configuration may not be supported by the platform and driver
143  * may fail to probe. For example: if at least one copy engine is expected to be
144  * available for migrations, but it's disabled. This is intended for debugging
145  * purposes only.
146  *
147  * This attribute can only be set before binding to the device.
148  *
149  * PSMI
150  * ----
151  *
152  * Enable extra debugging capabilities to trace engine execution. Only useful
153  * during early platform enabling and requires additional hardware connected.
154  * Once it's enabled, additionals WAs are added and runtime configuration is
155  * done via debugfs. Example to enable it::
156  *
157  *	# echo 1 > /sys/kernel/config/xe/0000:03:00.0/enable_psmi
158  *
159  * This attribute can only be set before binding to the device.
160  *
161  * Context restore BB
162  * ------------------
163  *
164  * Allow to execute a batch buffer during any context switches. When the
165  * GPU is restoring the context, it executes additional commands. It's useful
166  * for testing additional workarounds and validating certain HW behaviors: it's
167  * not intended for normal execution and will taint the kernel with TAINT_TEST
168  * when used.
169  *
170  * The syntax allows to pass straight instructions to be executed by the engine
171  * in a batch buffer or set specific registers.
172  *
173  * #. Generic instruction::
174  *
175  *	<engine-class> cmd <instr> [[dword0] [dword1] [...]]
176  *
177  * #. Simple register setting::
178  *
179  *	<engine-class> reg <address> <value>
180  *
181  * Commands are saved per engine class: all instances of that class will execute
182  * those commands during context switch. The instruction, dword arguments,
183  * addresses and values are in hex format like in the examples below.
184  *
185  * #. Execute a LRI command to write 0xDEADBEEF to register 0x4f10 after the
186  *    normal context restore::
187  *
188  *	# echo 'rcs cmd 11000001 4F100 DEADBEEF' \
189  *		> /sys/kernel/config/xe/0000:03:00.0/ctx_restore_post_bb
190  *
191  * #. Execute a LRI command to write 0xDEADBEEF to register 0x4f10 at the
192  *    beginning of the context restore::
193  *
194  *	# echo 'rcs cmd 11000001 4F100 DEADBEEF' \
195  *		> /sys/kernel/config/xe/0000:03:00.0/ctx_restore_mid_bb
196 
197  * #. Load certain values in a couple of registers (it can be used as a simpler
198  *    alternative to the `cmd`) action::
199  *
200  *	# cat > /sys/kernel/config/xe/0000:03:00.0/ctx_restore_post_bb <<EOF
201  *	rcs reg 4F100 DEADBEEF
202  *	rcs reg 4F104 FFFFFFFF
203  *	EOF
204  *
205  *    .. note::
206  *
207  *       When using multiple lines, make sure to use a command that is
208  *       implemented with a single write syscall, like HEREDOC.
209  *
210  * Currently this is implemented only for post and mid context restore and
211  * these attributes can only be set before binding to the device.
212  *
213  * Max SR-IOV Virtual Functions
214  * ----------------------------
215  *
216  * This config allows to limit number of the Virtual Functions (VFs) that can
217  * be managed by the Physical Function (PF) driver, where value 0 disables the
218  * PF mode (no VFs).
219  *
220  * The default max_vfs config value is taken from the max_vfs modparam.
221  *
222  * How to enable PF with support with unlimited (up to HW limit) number of VFs::
223  *
224  *	# echo unlimited > /sys/kernel/config/xe/0000:00:02.0/sriov/max_vfs
225  *	# echo 0000:00:02.0 > /sys/bus/pci/drivers/xe/bind
226  *
227  * How to enable PF with support up to 3 VFs::
228  *
229  *	# echo 3 > /sys/kernel/config/xe/0000:00:02.0/sriov/max_vfs
230  *	# echo 0000:00:02.0 > /sys/bus/pci/drivers/xe/bind
231  *
232  * How to disable PF mode and always run as native::
233  *
234  *	# echo 0 > /sys/kernel/config/xe/0000:00:02.0/sriov/max_vfs
235  *	# echo 0000:00:02.0 > /sys/bus/pci/drivers/xe/bind
236  *
237  * This setting only takes effect when probing the device.
238  *
239  * Remove devices
240  * ==============
241  *
242  * The created device directories can be removed using ``rmdir``::
243  *
244  *	# rmdir /sys/kernel/config/xe/0000:03:00.0/
245  */
246 
247 /* Similar to struct xe_bb, but not tied to HW (yet) */
248 struct wa_bb {
249 	u32 *cs;
250 	u32 len; /* in dwords */
251 };
252 
253 struct xe_config_group_device {
254 	struct config_group group;
255 	struct config_group sriov;
256 
257 	struct xe_config_device {
258 		u64 gt_types_allowed;
259 		u64 engines_allowed;
260 		struct wa_bb ctx_restore_post_bb[XE_ENGINE_CLASS_MAX];
261 		struct wa_bb ctx_restore_mid_bb[XE_ENGINE_CLASS_MAX];
262 		bool survivability_mode;
263 		bool enable_psmi;
264 		struct {
265 			unsigned int max_vfs;
266 		} sriov;
267 	} config;
268 
269 	/* protects attributes */
270 	struct mutex lock;
271 	/* matching descriptor */
272 	const struct xe_device_desc *desc;
273 	/* tentative SR-IOV mode */
274 	enum xe_sriov_mode mode;
275 };
276 
277 static const struct xe_config_device device_defaults = {
278 	.gt_types_allowed = U64_MAX,
279 	.engines_allowed = U64_MAX,
280 	.survivability_mode = false,
281 	.enable_psmi = false,
282 	.sriov = {
283 		.max_vfs = UINT_MAX,
284 	},
285 };
286 
287 static void set_device_defaults(struct xe_config_device *config)
288 {
289 	*config = device_defaults;
290 #ifdef CONFIG_PCI_IOV
291 	config->sriov.max_vfs = xe_modparam.max_vfs;
292 #endif
293 }
294 
295 struct engine_info {
296 	const char *cls;
297 	u64 mask;
298 	enum xe_engine_class engine_class;
299 };
300 
301 /* Some helpful macros to aid on the sizing of buffer allocation when parsing */
302 #define MAX_ENGINE_CLASS_CHARS 5
303 #define MAX_ENGINE_INSTANCE_CHARS 2
304 
305 static const struct engine_info engine_info[] = {
306 	{ .cls = "rcs", .mask = XE_HW_ENGINE_RCS_MASK, .engine_class = XE_ENGINE_CLASS_RENDER },
307 	{ .cls = "bcs", .mask = XE_HW_ENGINE_BCS_MASK, .engine_class = XE_ENGINE_CLASS_COPY },
308 	{ .cls = "vcs", .mask = XE_HW_ENGINE_VCS_MASK, .engine_class = XE_ENGINE_CLASS_VIDEO_DECODE },
309 	{ .cls = "vecs", .mask = XE_HW_ENGINE_VECS_MASK, .engine_class = XE_ENGINE_CLASS_VIDEO_ENHANCE },
310 	{ .cls = "ccs", .mask = XE_HW_ENGINE_CCS_MASK, .engine_class = XE_ENGINE_CLASS_COMPUTE },
311 	{ .cls = "gsccs", .mask = XE_HW_ENGINE_GSCCS_MASK, .engine_class = XE_ENGINE_CLASS_OTHER },
312 };
313 
314 static const struct {
315 	const char *name;
316 	enum xe_gt_type type;
317 } gt_types[] = {
318 	{ .name = "primary", .type = XE_GT_TYPE_MAIN },
319 	{ .name = "media", .type = XE_GT_TYPE_MEDIA },
320 };
321 
322 static struct xe_config_group_device *to_xe_config_group_device(struct config_item *item)
323 {
324 	return container_of(to_config_group(item), struct xe_config_group_device, group);
325 }
326 
327 static struct xe_config_device *to_xe_config_device(struct config_item *item)
328 {
329 	return &to_xe_config_group_device(item)->config;
330 }
331 
332 static bool is_bound(struct xe_config_group_device *dev)
333 {
334 	unsigned int domain, bus, slot, function;
335 	struct pci_dev *pdev;
336 	const char *name;
337 	bool ret;
338 
339 	lockdep_assert_held(&dev->lock);
340 
341 	name = dev->group.cg_item.ci_name;
342 	if (sscanf(name, "%x:%x:%x.%x", &domain, &bus, &slot, &function) != 4)
343 		return false;
344 
345 	pdev = pci_get_domain_bus_and_slot(domain, bus, PCI_DEVFN(slot, function));
346 	if (!pdev)
347 		return false;
348 
349 	ret = pci_get_drvdata(pdev);
350 	pci_dev_put(pdev);
351 
352 	if (ret)
353 		pci_dbg(pdev, "Already bound to driver\n");
354 
355 	return ret;
356 }
357 
358 static ssize_t survivability_mode_show(struct config_item *item, char *page)
359 {
360 	struct xe_config_device *dev = to_xe_config_device(item);
361 
362 	return sprintf(page, "%d\n", dev->survivability_mode);
363 }
364 
365 static ssize_t survivability_mode_store(struct config_item *item, const char *page, size_t len)
366 {
367 	struct xe_config_group_device *dev = to_xe_config_group_device(item);
368 	bool survivability_mode;
369 	int ret;
370 
371 	ret = kstrtobool(page, &survivability_mode);
372 	if (ret)
373 		return ret;
374 
375 	guard(mutex)(&dev->lock);
376 	if (is_bound(dev))
377 		return -EBUSY;
378 
379 	dev->config.survivability_mode = survivability_mode;
380 
381 	return len;
382 }
383 
384 static ssize_t gt_types_allowed_show(struct config_item *item, char *page)
385 {
386 	struct xe_config_device *dev = to_xe_config_device(item);
387 	char *p = page;
388 
389 	for (size_t i = 0; i < ARRAY_SIZE(gt_types); i++)
390 		if (dev->gt_types_allowed & BIT_ULL(gt_types[i].type))
391 			p += sprintf(p, "%s\n", gt_types[i].name);
392 
393 	return p - page;
394 }
395 
396 static ssize_t gt_types_allowed_store(struct config_item *item, const char *page,
397 				      size_t len)
398 {
399 	struct xe_config_group_device *dev = to_xe_config_group_device(item);
400 	char *buf __free(kfree) = kstrdup(page, GFP_KERNEL);
401 	char *p = buf;
402 	u64 typemask = 0;
403 
404 	if (!buf)
405 		return -ENOMEM;
406 
407 	while (p) {
408 		char *typename = strsep(&p, ",\n");
409 		bool matched = false;
410 
411 		if (typename[0] == '\0')
412 			continue;
413 
414 		for (size_t i = 0; i < ARRAY_SIZE(gt_types); i++) {
415 			if (strcmp(typename, gt_types[i].name) == 0) {
416 				typemask |= BIT(gt_types[i].type);
417 				matched = true;
418 				break;
419 			}
420 		}
421 
422 		if (!matched)
423 			return -EINVAL;
424 	}
425 
426 	guard(mutex)(&dev->lock);
427 	if (is_bound(dev))
428 		return -EBUSY;
429 
430 	dev->config.gt_types_allowed = typemask;
431 
432 	return len;
433 }
434 
435 static ssize_t engines_allowed_show(struct config_item *item, char *page)
436 {
437 	struct xe_config_device *dev = to_xe_config_device(item);
438 	char *p = page;
439 
440 	for (size_t i = 0; i < ARRAY_SIZE(engine_info); i++) {
441 		u64 mask = engine_info[i].mask;
442 
443 		if ((dev->engines_allowed & mask) == mask) {
444 			p += sprintf(p, "%s*\n", engine_info[i].cls);
445 		} else if (mask & dev->engines_allowed) {
446 			u16 bit0 = __ffs64(mask), bit;
447 
448 			mask &= dev->engines_allowed;
449 
450 			for_each_set_bit(bit, (const unsigned long *)&mask, 64)
451 				p += sprintf(p, "%s%u\n", engine_info[i].cls,
452 					     bit - bit0);
453 		}
454 	}
455 
456 	return p - page;
457 }
458 
459 /*
460  * Lookup engine_info. If @mask is not NULL, reduce the mask according to the
461  * instance in @pattern.
462  *
463  * Examples of inputs:
464  * - lookup_engine_info("rcs0", &mask): return "rcs" entry from @engine_info and
465  *   mask == BIT_ULL(XE_HW_ENGINE_RCS0)
466  * - lookup_engine_info("rcs*", &mask): return "rcs" entry from @engine_info and
467  *   mask == XE_HW_ENGINE_RCS_MASK
468  * - lookup_engine_info("rcs", NULL): return "rcs" entry from @engine_info
469  */
470 static const struct engine_info *lookup_engine_info(const char *pattern, u64 *mask)
471 {
472 	for (size_t i = 0; i < ARRAY_SIZE(engine_info); i++) {
473 		u8 instance;
474 		u16 bit;
475 
476 		if (!str_has_prefix(pattern, engine_info[i].cls))
477 			continue;
478 
479 		pattern += strlen(engine_info[i].cls);
480 		if (!mask)
481 			return *pattern ? NULL : &engine_info[i];
482 
483 		if (!strcmp(pattern, "*")) {
484 			*mask = engine_info[i].mask;
485 			return &engine_info[i];
486 		}
487 
488 		if (kstrtou8(pattern, 10, &instance))
489 			return NULL;
490 
491 		bit = __ffs64(engine_info[i].mask) + instance;
492 		if (bit >= fls64(engine_info[i].mask))
493 			return NULL;
494 
495 		*mask = BIT_ULL(bit);
496 		return &engine_info[i];
497 	}
498 
499 	return NULL;
500 }
501 
502 static int parse_engine(const char *s, const char *end_chars, u64 *mask,
503 			const struct engine_info **pinfo)
504 {
505 	char buf[MAX_ENGINE_CLASS_CHARS + MAX_ENGINE_INSTANCE_CHARS + 1];
506 	const struct engine_info *info;
507 	size_t len;
508 
509 	len = strcspn(s, end_chars);
510 	if (len >= sizeof(buf))
511 		return -EINVAL;
512 
513 	memcpy(buf, s, len);
514 	buf[len] = '\0';
515 
516 	info = lookup_engine_info(buf, mask);
517 	if (!info)
518 		return -ENOENT;
519 
520 	if (pinfo)
521 		*pinfo = info;
522 
523 	return len;
524 }
525 
526 static ssize_t engines_allowed_store(struct config_item *item, const char *page,
527 				     size_t len)
528 {
529 	struct xe_config_group_device *dev = to_xe_config_group_device(item);
530 	ssize_t patternlen, p;
531 	u64 mask, val = 0;
532 
533 	for (p = 0; p < len; p += patternlen + 1) {
534 		patternlen = parse_engine(page + p, ",\n", &mask, NULL);
535 		if (patternlen < 0)
536 			return -EINVAL;
537 
538 		val |= mask;
539 	}
540 
541 	guard(mutex)(&dev->lock);
542 	if (is_bound(dev))
543 		return -EBUSY;
544 
545 	dev->config.engines_allowed = val;
546 
547 	return len;
548 }
549 
550 static ssize_t enable_psmi_show(struct config_item *item, char *page)
551 {
552 	struct xe_config_device *dev = to_xe_config_device(item);
553 
554 	return sprintf(page, "%d\n", dev->enable_psmi);
555 }
556 
557 static ssize_t enable_psmi_store(struct config_item *item, const char *page, size_t len)
558 {
559 	struct xe_config_group_device *dev = to_xe_config_group_device(item);
560 	bool val;
561 	int ret;
562 
563 	ret = kstrtobool(page, &val);
564 	if (ret)
565 		return ret;
566 
567 	guard(mutex)(&dev->lock);
568 	if (is_bound(dev))
569 		return -EBUSY;
570 
571 	dev->config.enable_psmi = val;
572 
573 	return len;
574 }
575 
576 static bool wa_bb_read_advance(bool dereference, char **p,
577 			       const char *append, size_t len,
578 			       size_t *max_size)
579 {
580 	if (dereference) {
581 		if (len >= *max_size)
582 			return false;
583 		*max_size -= len;
584 		if (append)
585 			memcpy(*p, append, len);
586 	}
587 
588 	*p += len;
589 
590 	return true;
591 }
592 
593 static ssize_t wa_bb_show(struct xe_config_group_device *dev,
594 			  struct wa_bb wa_bb[static XE_ENGINE_CLASS_MAX],
595 			  char *data, size_t sz)
596 {
597 	char *p = data;
598 
599 	guard(mutex)(&dev->lock);
600 
601 	for (size_t i = 0; i < ARRAY_SIZE(engine_info); i++) {
602 		enum xe_engine_class ec = engine_info[i].engine_class;
603 		size_t len;
604 
605 		if (!wa_bb[ec].len)
606 			continue;
607 
608 		len = snprintf(p, sz, "%s:", engine_info[i].cls);
609 		if (!wa_bb_read_advance(data, &p, NULL, len, &sz))
610 			return -ENOBUFS;
611 
612 		for (size_t j = 0; j < wa_bb[ec].len; j++) {
613 			len = snprintf(p, sz, " %08x", wa_bb[ec].cs[j]);
614 			if (!wa_bb_read_advance(data, &p, NULL, len, &sz))
615 				return -ENOBUFS;
616 		}
617 
618 		if (!wa_bb_read_advance(data, &p, "\n", 1, &sz))
619 			return -ENOBUFS;
620 	}
621 
622 	if (!wa_bb_read_advance(data, &p, "", 1, &sz))
623 		return -ENOBUFS;
624 
625 	/* Reserve one more to match check for '\0' */
626 	if (!data)
627 		p++;
628 
629 	return p - data;
630 }
631 
632 static ssize_t ctx_restore_mid_bb_show(struct config_item *item, char *page)
633 {
634 	struct xe_config_group_device *dev = to_xe_config_group_device(item);
635 
636 	return wa_bb_show(dev, dev->config.ctx_restore_mid_bb, page, SZ_4K);
637 }
638 
639 static ssize_t ctx_restore_post_bb_show(struct config_item *item, char *page)
640 {
641 	struct xe_config_group_device *dev = to_xe_config_group_device(item);
642 
643 	return wa_bb_show(dev, dev->config.ctx_restore_post_bb, page, SZ_4K);
644 }
645 
646 static void wa_bb_append(struct wa_bb *wa_bb, u32 val)
647 {
648 	if (wa_bb->cs)
649 		wa_bb->cs[wa_bb->len] = val;
650 
651 	wa_bb->len++;
652 }
653 
654 static ssize_t parse_hex(const char *line, u32 *pval)
655 {
656 	char numstr[12];
657 	const char *p;
658 	ssize_t numlen;
659 
660 	p = line + strspn(line, " \t");
661 	if (!*p || *p == '\n')
662 		return 0;
663 
664 	numlen = strcspn(p, " \t\n");
665 	if (!numlen || numlen >= sizeof(numstr) - 1)
666 		return -EINVAL;
667 
668 	memcpy(numstr, p, numlen);
669 	numstr[numlen] = '\0';
670 	p += numlen;
671 
672 	if (kstrtou32(numstr, 16, pval))
673 		return -EINVAL;
674 
675 	return p - line;
676 }
677 
678 /*
679  * Parse lines with the format
680  *
681  *	<engine-class> cmd <u32> <u32...>
682  *	<engine-class> reg <u32_addr> <u32_val>
683  *
684  * and optionally save them in @wa_bb[i].cs is non-NULL.
685  *
686  * Return the number of dwords parsed.
687  */
688 static ssize_t parse_wa_bb_lines(const char *lines,
689 				 struct wa_bb wa_bb[static XE_ENGINE_CLASS_MAX])
690 {
691 	ssize_t dwords = 0, ret;
692 	const char *p;
693 
694 	for (p = lines; *p; p++) {
695 		const struct engine_info *info = NULL;
696 		u32 val, val2;
697 
698 		/* Also allow empty lines */
699 		p += strspn(p, " \t\n");
700 		if (!*p)
701 			break;
702 
703 		ret = parse_engine(p, " \t\n", NULL, &info);
704 		if (ret < 0)
705 			return ret;
706 
707 		p += ret;
708 		p += strspn(p, " \t");
709 
710 		if (str_has_prefix(p, "cmd")) {
711 			for (p += strlen("cmd"); *p;) {
712 				ret = parse_hex(p, &val);
713 				if (ret < 0)
714 					return -EINVAL;
715 				if (!ret)
716 					break;
717 
718 				p += ret;
719 				dwords++;
720 				wa_bb_append(&wa_bb[info->engine_class], val);
721 			}
722 		} else if (str_has_prefix(p, "reg")) {
723 			p += strlen("reg");
724 			ret = parse_hex(p, &val);
725 			if (ret <= 0)
726 				return -EINVAL;
727 
728 			p += ret;
729 			ret = parse_hex(p, &val2);
730 			if (ret <= 0)
731 				return -EINVAL;
732 
733 			p += ret;
734 			dwords += 3;
735 			wa_bb_append(&wa_bb[info->engine_class],
736 				     MI_LOAD_REGISTER_IMM | MI_LRI_NUM_REGS(1));
737 			wa_bb_append(&wa_bb[info->engine_class], val);
738 			wa_bb_append(&wa_bb[info->engine_class], val2);
739 		} else {
740 			return -EINVAL;
741 		}
742 	}
743 
744 	return dwords;
745 }
746 
747 static ssize_t wa_bb_store(struct wa_bb wa_bb[static XE_ENGINE_CLASS_MAX],
748 			   struct xe_config_group_device *dev,
749 			   const char *page, size_t len)
750 {
751 	/* tmp_wa_bb must match wa_bb's size */
752 	struct wa_bb tmp_wa_bb[XE_ENGINE_CLASS_MAX] = { };
753 	ssize_t count, class;
754 	u32 *tmp;
755 
756 	/* 1. Count dwords - wa_bb[i].cs is NULL for all classes */
757 	count = parse_wa_bb_lines(page, tmp_wa_bb);
758 	if (count < 0)
759 		return count;
760 
761 	guard(mutex)(&dev->lock);
762 
763 	if (is_bound(dev))
764 		return -EBUSY;
765 
766 	/*
767 	 * 2. Allocate a u32 array and set the pointers to the right positions
768 	 * according to the length of each class' wa_bb
769 	 */
770 	tmp = krealloc(wa_bb[0].cs, count * sizeof(u32), GFP_KERNEL);
771 	if (!tmp)
772 		return -ENOMEM;
773 
774 	if (!count) {
775 		memset(wa_bb, 0, sizeof(tmp_wa_bb));
776 		return len;
777 	}
778 
779 	for (class = 0, count = 0; class < XE_ENGINE_CLASS_MAX; ++class) {
780 		tmp_wa_bb[class].cs = tmp + count;
781 		count += tmp_wa_bb[class].len;
782 		tmp_wa_bb[class].len = 0;
783 	}
784 
785 	/* 3. Parse wa_bb lines again, this time saving the values */
786 	count = parse_wa_bb_lines(page, tmp_wa_bb);
787 	if (count < 0)
788 		return count;
789 
790 	memcpy(wa_bb, tmp_wa_bb, sizeof(tmp_wa_bb));
791 
792 	return len;
793 }
794 
795 static ssize_t ctx_restore_mid_bb_store(struct config_item *item,
796 					const char *data, size_t sz)
797 {
798 	struct xe_config_group_device *dev = to_xe_config_group_device(item);
799 
800 	return wa_bb_store(dev->config.ctx_restore_mid_bb, dev, data, sz);
801 }
802 
803 static ssize_t ctx_restore_post_bb_store(struct config_item *item,
804 					 const char *data, size_t sz)
805 {
806 	struct xe_config_group_device *dev = to_xe_config_group_device(item);
807 
808 	return wa_bb_store(dev->config.ctx_restore_post_bb, dev, data, sz);
809 }
810 
811 CONFIGFS_ATTR(, ctx_restore_mid_bb);
812 CONFIGFS_ATTR(, ctx_restore_post_bb);
813 CONFIGFS_ATTR(, enable_psmi);
814 CONFIGFS_ATTR(, engines_allowed);
815 CONFIGFS_ATTR(, gt_types_allowed);
816 CONFIGFS_ATTR(, survivability_mode);
817 
818 static struct configfs_attribute *xe_config_device_attrs[] = {
819 	&attr_ctx_restore_mid_bb,
820 	&attr_ctx_restore_post_bb,
821 	&attr_enable_psmi,
822 	&attr_engines_allowed,
823 	&attr_gt_types_allowed,
824 	&attr_survivability_mode,
825 	NULL,
826 };
827 
828 static void xe_config_device_release(struct config_item *item)
829 {
830 	struct xe_config_group_device *dev = to_xe_config_group_device(item);
831 
832 	mutex_destroy(&dev->lock);
833 
834 	kfree(dev->config.ctx_restore_post_bb[0].cs);
835 	kfree(dev);
836 }
837 
838 static struct configfs_item_operations xe_config_device_ops = {
839 	.release	= xe_config_device_release,
840 };
841 
842 static bool xe_config_device_is_visible(struct config_item *item,
843 					struct configfs_attribute *attr, int n)
844 {
845 	struct xe_config_group_device *dev = to_xe_config_group_device(item);
846 
847 	if (attr == &attr_survivability_mode) {
848 		if (!dev->desc->is_dgfx || dev->desc->platform < XE_BATTLEMAGE)
849 			return false;
850 	}
851 
852 	return true;
853 }
854 
855 static struct configfs_group_operations xe_config_device_group_ops = {
856 	.is_visible	= xe_config_device_is_visible,
857 };
858 
859 static const struct config_item_type xe_config_device_type = {
860 	.ct_item_ops	= &xe_config_device_ops,
861 	.ct_group_ops	= &xe_config_device_group_ops,
862 	.ct_attrs	= xe_config_device_attrs,
863 	.ct_owner	= THIS_MODULE,
864 };
865 
866 static ssize_t sriov_max_vfs_show(struct config_item *item, char *page)
867 {
868 	struct xe_config_group_device *dev = to_xe_config_group_device(item->ci_parent);
869 
870 	guard(mutex)(&dev->lock);
871 
872 	if (dev->config.sriov.max_vfs == UINT_MAX)
873 		return sprintf(page, "%s\n", "unlimited");
874 	else
875 		return sprintf(page, "%u\n", dev->config.sriov.max_vfs);
876 }
877 
878 static ssize_t sriov_max_vfs_store(struct config_item *item, const char *page, size_t len)
879 {
880 	struct xe_config_group_device *dev = to_xe_config_group_device(item->ci_parent);
881 	unsigned int max_vfs;
882 	int ret;
883 
884 	guard(mutex)(&dev->lock);
885 
886 	if (is_bound(dev))
887 		return -EBUSY;
888 
889 	ret = kstrtouint(page, 0, &max_vfs);
890 	if (ret) {
891 		if (!sysfs_streq(page, "unlimited"))
892 			return ret;
893 		max_vfs = UINT_MAX;
894 	}
895 
896 	dev->config.sriov.max_vfs = max_vfs;
897 	return len;
898 }
899 
900 CONFIGFS_ATTR(sriov_, max_vfs);
901 
902 static struct configfs_attribute *xe_config_sriov_attrs[] = {
903 	&sriov_attr_max_vfs,
904 	NULL,
905 };
906 
907 static bool xe_config_sriov_is_visible(struct config_item *item,
908 				       struct configfs_attribute *attr, int n)
909 {
910 	struct xe_config_group_device *dev = to_xe_config_group_device(item->ci_parent);
911 
912 	if (attr == &sriov_attr_max_vfs && dev->mode != XE_SRIOV_MODE_PF)
913 		return false;
914 
915 	return true;
916 }
917 
918 static struct configfs_group_operations xe_config_sriov_group_ops = {
919 	.is_visible	= xe_config_sriov_is_visible,
920 };
921 
922 static const struct config_item_type xe_config_sriov_type = {
923 	.ct_owner	= THIS_MODULE,
924 	.ct_group_ops	= &xe_config_sriov_group_ops,
925 	.ct_attrs	= xe_config_sriov_attrs,
926 };
927 
928 static const struct xe_device_desc *xe_match_desc(struct pci_dev *pdev)
929 {
930 	struct device_driver *driver = driver_find("xe", &pci_bus_type);
931 	struct pci_driver *drv = to_pci_driver(driver);
932 	const struct pci_device_id *ids = drv ? drv->id_table : NULL;
933 	const struct pci_device_id *found = pci_match_id(ids, pdev);
934 
935 	return found ? (const void *)found->driver_data : NULL;
936 }
937 
938 static struct pci_dev *get_physfn_instead(struct pci_dev *virtfn)
939 {
940 	struct pci_dev *physfn = pci_physfn(virtfn);
941 
942 	pci_dev_get(physfn);
943 	pci_dev_put(virtfn);
944 	return physfn;
945 }
946 
947 static struct config_group *xe_config_make_device_group(struct config_group *group,
948 							const char *name)
949 {
950 	unsigned int domain, bus, slot, function;
951 	struct xe_config_group_device *dev;
952 	const struct xe_device_desc *match;
953 	enum xe_sriov_mode mode;
954 	struct pci_dev *pdev;
955 	char canonical[16];
956 	int vfnumber = 0;
957 	int ret;
958 
959 	ret = sscanf(name, "%x:%x:%x.%x", &domain, &bus, &slot, &function);
960 	if (ret != 4)
961 		return ERR_PTR(-EINVAL);
962 
963 	ret = scnprintf(canonical, sizeof(canonical), "%04x:%02x:%02x.%d", domain, bus,
964 			PCI_SLOT(PCI_DEVFN(slot, function)),
965 			PCI_FUNC(PCI_DEVFN(slot, function)));
966 	if (ret != 12 || strcmp(name, canonical))
967 		return ERR_PTR(-EINVAL);
968 
969 	pdev = pci_get_domain_bus_and_slot(domain, bus, PCI_DEVFN(slot, function));
970 	mode = pdev ? dev_is_pf(&pdev->dev) ?
971 		XE_SRIOV_MODE_PF : XE_SRIOV_MODE_NONE : XE_SRIOV_MODE_VF;
972 
973 	if (!pdev && function)
974 		pdev = pci_get_domain_bus_and_slot(domain, bus, PCI_DEVFN(slot, 0));
975 	if (!pdev && slot)
976 		pdev = pci_get_domain_bus_and_slot(domain, bus, PCI_DEVFN(0, 0));
977 	if (!pdev)
978 		return ERR_PTR(-ENODEV);
979 
980 	if (PCI_DEVFN(slot, function) != pdev->devfn) {
981 		pdev = get_physfn_instead(pdev);
982 		vfnumber = PCI_DEVFN(slot, function) - pdev->devfn;
983 		if (!dev_is_pf(&pdev->dev) || vfnumber > pci_sriov_get_totalvfs(pdev)) {
984 			pci_dev_put(pdev);
985 			return ERR_PTR(-ENODEV);
986 		}
987 	}
988 
989 	match = xe_match_desc(pdev);
990 	if (match && vfnumber && !match->has_sriov) {
991 		pci_info(pdev, "xe driver does not support VFs on this device\n");
992 		match = NULL;
993 	} else if (!match) {
994 		pci_info(pdev, "xe driver does not support configuration of this device\n");
995 	}
996 
997 	pci_dev_put(pdev);
998 
999 	if (!match)
1000 		return ERR_PTR(-ENOENT);
1001 
1002 	dev = kzalloc(sizeof(*dev), GFP_KERNEL);
1003 	if (!dev)
1004 		return ERR_PTR(-ENOMEM);
1005 
1006 	dev->desc = match;
1007 	dev->mode = match->has_sriov ? mode : XE_SRIOV_MODE_NONE;
1008 
1009 	set_device_defaults(&dev->config);
1010 
1011 	config_group_init_type_name(&dev->group, name, &xe_config_device_type);
1012 	if (dev->mode != XE_SRIOV_MODE_NONE) {
1013 		config_group_init_type_name(&dev->sriov, "sriov", &xe_config_sriov_type);
1014 		configfs_add_default_group(&dev->sriov, &dev->group);
1015 	}
1016 
1017 	mutex_init(&dev->lock);
1018 
1019 	return &dev->group;
1020 }
1021 
1022 static struct configfs_group_operations xe_config_group_ops = {
1023 	.make_group	= xe_config_make_device_group,
1024 };
1025 
1026 static const struct config_item_type xe_configfs_type = {
1027 	.ct_group_ops	= &xe_config_group_ops,
1028 	.ct_owner	= THIS_MODULE,
1029 };
1030 
1031 static struct configfs_subsystem xe_configfs = {
1032 	.su_group = {
1033 		.cg_item = {
1034 			.ci_namebuf = "xe",
1035 			.ci_type = &xe_configfs_type,
1036 		},
1037 	},
1038 };
1039 
1040 static struct xe_config_group_device *find_xe_config_group_device(struct pci_dev *pdev)
1041 {
1042 	struct config_item *item;
1043 
1044 	mutex_lock(&xe_configfs.su_mutex);
1045 	item = config_group_find_item(&xe_configfs.su_group, pci_name(pdev));
1046 	mutex_unlock(&xe_configfs.su_mutex);
1047 
1048 	if (!item)
1049 		return NULL;
1050 
1051 	return to_xe_config_group_device(item);
1052 }
1053 
1054 static void dump_custom_dev_config(struct pci_dev *pdev,
1055 				   struct xe_config_group_device *dev)
1056 {
1057 #define PRI_CUSTOM_ATTR(fmt_, attr_) do { \
1058 		if (dev->config.attr_ != device_defaults.attr_) \
1059 			pci_info(pdev, "configfs: " __stringify(attr_) " = " fmt_ "\n", \
1060 				 dev->config.attr_); \
1061 	} while (0)
1062 
1063 	PRI_CUSTOM_ATTR("%llx", gt_types_allowed);
1064 	PRI_CUSTOM_ATTR("%llx", engines_allowed);
1065 	PRI_CUSTOM_ATTR("%d", enable_psmi);
1066 	PRI_CUSTOM_ATTR("%d", survivability_mode);
1067 
1068 #undef PRI_CUSTOM_ATTR
1069 }
1070 
1071 /**
1072  * xe_configfs_check_device() - Test if device was configured by configfs
1073  * @pdev: the &pci_dev device to test
1074  *
1075  * Try to find the configfs group that belongs to the specified pci device
1076  * and print a diagnostic message if different than the default value.
1077  */
1078 void xe_configfs_check_device(struct pci_dev *pdev)
1079 {
1080 	struct xe_config_group_device *dev = find_xe_config_group_device(pdev);
1081 
1082 	if (!dev)
1083 		return;
1084 
1085 	/* memcmp here is safe as both are zero-initialized */
1086 	if (memcmp(&dev->config, &device_defaults, sizeof(dev->config))) {
1087 		pci_info(pdev, "Found custom settings in configfs\n");
1088 		dump_custom_dev_config(pdev, dev);
1089 	}
1090 
1091 	config_group_put(&dev->group);
1092 }
1093 
1094 /**
1095  * xe_configfs_get_survivability_mode - get configfs survivability mode attribute
1096  * @pdev: pci device
1097  *
1098  * Return: survivability_mode attribute in configfs
1099  */
1100 bool xe_configfs_get_survivability_mode(struct pci_dev *pdev)
1101 {
1102 	struct xe_config_group_device *dev = find_xe_config_group_device(pdev);
1103 	bool mode;
1104 
1105 	if (!dev)
1106 		return device_defaults.survivability_mode;
1107 
1108 	mode = dev->config.survivability_mode;
1109 	config_group_put(&dev->group);
1110 
1111 	return mode;
1112 }
1113 
1114 static u64 get_gt_types_allowed(struct pci_dev *pdev)
1115 {
1116 	struct xe_config_group_device *dev = find_xe_config_group_device(pdev);
1117 	u64 mask;
1118 
1119 	if (!dev)
1120 		return device_defaults.gt_types_allowed;
1121 
1122 	mask = dev->config.gt_types_allowed;
1123 	config_group_put(&dev->group);
1124 
1125 	return mask;
1126 }
1127 
1128 /**
1129  * xe_configfs_primary_gt_allowed - determine whether primary GTs are supported
1130  * @pdev: pci device
1131  *
1132  * Return: True if primary GTs are enabled, false if they have been disabled via
1133  *     configfs.
1134  */
1135 bool xe_configfs_primary_gt_allowed(struct pci_dev *pdev)
1136 {
1137 	return get_gt_types_allowed(pdev) & BIT_ULL(XE_GT_TYPE_MAIN);
1138 }
1139 
1140 /**
1141  * xe_configfs_media_gt_allowed - determine whether media GTs are supported
1142  * @pdev: pci device
1143  *
1144  * Return: True if the media GTs are enabled, false if they have been disabled
1145  *     via configfs.
1146  */
1147 bool xe_configfs_media_gt_allowed(struct pci_dev *pdev)
1148 {
1149 	return get_gt_types_allowed(pdev) & BIT_ULL(XE_GT_TYPE_MEDIA);
1150 }
1151 
1152 /**
1153  * xe_configfs_get_engines_allowed - get engine allowed mask from configfs
1154  * @pdev: pci device
1155  *
1156  * Return: engine mask with allowed engines set in configfs
1157  */
1158 u64 xe_configfs_get_engines_allowed(struct pci_dev *pdev)
1159 {
1160 	struct xe_config_group_device *dev = find_xe_config_group_device(pdev);
1161 	u64 engines_allowed;
1162 
1163 	if (!dev)
1164 		return device_defaults.engines_allowed;
1165 
1166 	engines_allowed = dev->config.engines_allowed;
1167 	config_group_put(&dev->group);
1168 
1169 	return engines_allowed;
1170 }
1171 
1172 /**
1173  * xe_configfs_get_psmi_enabled - get configfs enable_psmi setting
1174  * @pdev: pci device
1175  *
1176  * Return: enable_psmi setting in configfs
1177  */
1178 bool xe_configfs_get_psmi_enabled(struct pci_dev *pdev)
1179 {
1180 	struct xe_config_group_device *dev = find_xe_config_group_device(pdev);
1181 	bool ret;
1182 
1183 	if (!dev)
1184 		return false;
1185 
1186 	ret = dev->config.enable_psmi;
1187 	config_group_put(&dev->group);
1188 
1189 	return ret;
1190 }
1191 
1192 /**
1193  * xe_configfs_get_ctx_restore_mid_bb - get configfs ctx_restore_mid_bb setting
1194  * @pdev: pci device
1195  * @class: hw engine class
1196  * @cs: pointer to the bb to use - only valid during probe
1197  *
1198  * Return: Number of dwords used in the mid_ctx_restore setting in configfs
1199  */
1200 u32 xe_configfs_get_ctx_restore_mid_bb(struct pci_dev *pdev,
1201 				       enum xe_engine_class class,
1202 				       const u32 **cs)
1203 {
1204 	struct xe_config_group_device *dev = find_xe_config_group_device(pdev);
1205 	u32 len;
1206 
1207 	if (!dev)
1208 		return 0;
1209 
1210 	if (cs)
1211 		*cs = dev->config.ctx_restore_mid_bb[class].cs;
1212 
1213 	len = dev->config.ctx_restore_mid_bb[class].len;
1214 	config_group_put(&dev->group);
1215 
1216 	return len;
1217 }
1218 
1219 /**
1220  * xe_configfs_get_ctx_restore_post_bb - get configfs ctx_restore_post_bb setting
1221  * @pdev: pci device
1222  * @class: hw engine class
1223  * @cs: pointer to the bb to use - only valid during probe
1224  *
1225  * Return: Number of dwords used in the post_ctx_restore setting in configfs
1226  */
1227 u32 xe_configfs_get_ctx_restore_post_bb(struct pci_dev *pdev,
1228 					enum xe_engine_class class,
1229 					const u32 **cs)
1230 {
1231 	struct xe_config_group_device *dev = find_xe_config_group_device(pdev);
1232 	u32 len;
1233 
1234 	if (!dev)
1235 		return 0;
1236 
1237 	*cs = dev->config.ctx_restore_post_bb[class].cs;
1238 	len = dev->config.ctx_restore_post_bb[class].len;
1239 	config_group_put(&dev->group);
1240 
1241 	return len;
1242 }
1243 
1244 #ifdef CONFIG_PCI_IOV
1245 /**
1246  * xe_configfs_get_max_vfs() - Get number of VFs that could be managed
1247  * @pdev: the &pci_dev device
1248  *
1249  * Find the configfs group that belongs to the PCI device and return maximum
1250  * number of Virtual Functions (VFs) that could be managed by this device.
1251  * If configfs group is not present, use value of max_vfs module parameter.
1252  *
1253  * Return: maximum number of VFs that could be managed.
1254  */
1255 unsigned int xe_configfs_get_max_vfs(struct pci_dev *pdev)
1256 {
1257 	struct xe_config_group_device *dev = find_xe_config_group_device(pdev);
1258 	unsigned int max_vfs;
1259 
1260 	if (!dev)
1261 		return xe_modparam.max_vfs;
1262 
1263 	scoped_guard(mutex, &dev->lock)
1264 		max_vfs = dev->config.sriov.max_vfs;
1265 
1266 	config_group_put(&dev->group);
1267 
1268 	return max_vfs;
1269 }
1270 #endif
1271 
1272 int __init xe_configfs_init(void)
1273 {
1274 	int ret;
1275 
1276 	config_group_init(&xe_configfs.su_group);
1277 	mutex_init(&xe_configfs.su_mutex);
1278 	ret = configfs_register_subsystem(&xe_configfs);
1279 	if (ret) {
1280 		mutex_destroy(&xe_configfs.su_mutex);
1281 		return ret;
1282 	}
1283 
1284 	return 0;
1285 }
1286 
1287 void xe_configfs_exit(void)
1288 {
1289 	configfs_unregister_subsystem(&xe_configfs);
1290 	mutex_destroy(&xe_configfs.su_mutex);
1291 }
1292