xref: /linux/drivers/gpu/drm/xe/xe_configfs.c (revision 68a052239fc4b351e961f698b824f7654a346091)
1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright © 2025 Intel Corporation
4  */
5 
6 #include <linux/bitops.h>
7 #include <linux/ctype.h>
8 #include <linux/configfs.h>
9 #include <linux/cleanup.h>
10 #include <linux/find.h>
11 #include <linux/init.h>
12 #include <linux/module.h>
13 #include <linux/pci.h>
14 #include <linux/string.h>
15 
16 #include "instructions/xe_mi_commands.h"
17 #include "xe_configfs.h"
18 #include "xe_hw_engine_types.h"
19 #include "xe_module.h"
20 #include "xe_pci_types.h"
21 
22 /**
23  * DOC: Xe Configfs
24  *
25  * Overview
26  * ========
27  *
28  * Configfs is a filesystem-based manager of kernel objects. XE KMD registers a
29  * configfs subsystem called ``xe`` that creates a directory in the mounted
30  * configfs directory. The user can create devices under this directory and
31  * configure them as necessary. See Documentation/filesystems/configfs.rst for
32  * more information about how configfs works.
33  *
34  * Create devices
35  * ==============
36  *
37  * To create a device, the ``xe`` module should already be loaded, but some
38  * attributes can only be set before binding the device. It can be accomplished
39  * by blocking the driver autoprobe::
40  *
41  *	# echo 0 > /sys/bus/pci/drivers_autoprobe
42  *	# modprobe xe
43  *
44  * In order to create a device, the user has to create a directory inside ``xe``::
45  *
46  *	# mkdir /sys/kernel/config/xe/0000:03:00.0/
47  *
48  * Every device created is populated by the driver with entries that can be
49  * used to configure it::
50  *
51  *	/sys/kernel/config/xe/
52  *	├── 0000:00:02.0
53  *	│   └── ...
54  *	├── 0000:00:02.1
55  *	│   └── ...
56  *	:
57  *	└── 0000:03:00.0
58  *	    ├── survivability_mode
59  *	    ├── engines_allowed
60  *	    └── enable_psmi
61  *
62  * After configuring the attributes as per next section, the device can be
63  * probed with::
64  *
65  *	# echo 0000:03:00.0 > /sys/bus/pci/drivers/xe/bind
66  *	# # or
67  *	# echo 0000:03:00.0 > /sys/bus/pci/drivers_probe
68  *
69  * Configure Attributes
70  * ====================
71  *
72  * Survivability mode:
73  * -------------------
74  *
75  * Enable survivability mode on supported cards. This setting only takes
76  * effect when probing the device. Example to enable it::
77  *
78  *	# echo 1 > /sys/kernel/config/xe/0000:03:00.0/survivability_mode
79  *
80  * This attribute can only be set before binding to the device.
81  *
82  * Allowed engines:
83  * ----------------
84  *
85  * Allow only a set of engine(s) to be available, disabling the other engines
86  * even if they are available in hardware. This is applied after HW fuses are
87  * considered on each tile. Examples:
88  *
89  * Allow only one render and one copy engines, nothing else::
90  *
91  *	# echo 'rcs0,bcs0' > /sys/kernel/config/xe/0000:03:00.0/engines_allowed
92  *
93  * Allow only compute engines and first copy engine::
94  *
95  *	# echo 'ccs*,bcs0' > /sys/kernel/config/xe/0000:03:00.0/engines_allowed
96  *
97  * Note that the engine names are the per-GT hardware names. On multi-tile
98  * platforms, writing ``rcs0,bcs0`` to this file would allow the first render
99  * and copy engines on each tile.
100  *
101  * The requested configuration may not be supported by the platform and driver
102  * may fail to probe. For example: if at least one copy engine is expected to be
103  * available for migrations, but it's disabled. This is intended for debugging
104  * purposes only.
105  *
106  * This attribute can only be set before binding to the device.
107  *
108  * PSMI
109  * ----
110  *
111  * Enable extra debugging capabilities to trace engine execution. Only useful
112  * during early platform enabling and requires additional hardware connected.
113  * Once it's enabled, additionals WAs are added and runtime configuration is
114  * done via debugfs. Example to enable it::
115  *
116  *	# echo 1 > /sys/kernel/config/xe/0000:03:00.0/enable_psmi
117  *
118  * This attribute can only be set before binding to the device.
119  *
120  * Context restore BB
121  * ------------------
122  *
123  * Allow to execute a batch buffer during any context switches. When the
124  * GPU is restoring the context, it executes additional commands. It's useful
125  * for testing additional workarounds and validating certain HW behaviors: it's
126  * not intended for normal execution and will taint the kernel with TAINT_TEST
127  * when used.
128  *
129  * Currently this is implemented only for post and mid context restore.
130  * Examples:
131  *
132  * #. Execute a LRI command to write 0xDEADBEEF to register 0x4f10 after the
133  *    normal context restore::
134  *
135  *	# echo 'rcs cmd 11000001 4F100 DEADBEEF' \
136  *		> /sys/kernel/config/xe/0000:03:00.0/ctx_restore_post_bb
137  *
138  * #. Execute a LRI command to write 0xDEADBEEF to register 0x4f10 at the
139  *    beginning of the context restore::
140  *
141  *	# echo 'rcs cmd 11000001 4F100 DEADBEEF' \
142  *		> /sys/kernel/config/xe/0000:03:00.0/ctx_restore_mid_bb
143 
144  * #. Load certain values in a couple of registers (it can be used as a simpler
145  *    alternative to the `cmd`) action::
146  *
147  *	# cat > /sys/kernel/config/xe/0000:03:00.0/ctx_restore_post_bb <<EOF
148  *	rcs reg 4F100 DEADBEEF
149  *	rcs reg 4F104 FFFFFFFF
150  *	EOF
151  *
152  *    .. note::
153  *
154  *       When using multiple lines, make sure to use a command that is
155  *       implemented with a single write syscall, like HEREDOC.
156  *
157  * These attributes can only be set before binding to the device.
158  *
159  * Remove devices
160  * ==============
161  *
162  * The created device directories can be removed using ``rmdir``::
163  *
164  *	# rmdir /sys/kernel/config/xe/0000:03:00.0/
165  */
166 
167 /* Similar to struct xe_bb, but not tied to HW (yet) */
168 struct wa_bb {
169 	u32 *cs;
170 	u32 len; /* in dwords */
171 };
172 
173 struct xe_config_group_device {
174 	struct config_group group;
175 
176 	struct xe_config_device {
177 		u64 engines_allowed;
178 		struct wa_bb ctx_restore_post_bb[XE_ENGINE_CLASS_MAX];
179 		struct wa_bb ctx_restore_mid_bb[XE_ENGINE_CLASS_MAX];
180 		bool survivability_mode;
181 		bool enable_psmi;
182 	} config;
183 
184 	/* protects attributes */
185 	struct mutex lock;
186 	/* matching descriptor */
187 	const struct xe_device_desc *desc;
188 };
189 
190 static const struct xe_config_device device_defaults = {
191 	.engines_allowed = U64_MAX,
192 	.survivability_mode = false,
193 	.enable_psmi = false,
194 };
195 
196 static void set_device_defaults(struct xe_config_device *config)
197 {
198 	*config = device_defaults;
199 }
200 
201 struct engine_info {
202 	const char *cls;
203 	u64 mask;
204 	enum xe_engine_class engine_class;
205 };
206 
207 /* Some helpful macros to aid on the sizing of buffer allocation when parsing */
208 #define MAX_ENGINE_CLASS_CHARS 5
209 #define MAX_ENGINE_INSTANCE_CHARS 2
210 
211 static const struct engine_info engine_info[] = {
212 	{ .cls = "rcs", .mask = XE_HW_ENGINE_RCS_MASK, .engine_class = XE_ENGINE_CLASS_RENDER },
213 	{ .cls = "bcs", .mask = XE_HW_ENGINE_BCS_MASK, .engine_class = XE_ENGINE_CLASS_COPY },
214 	{ .cls = "vcs", .mask = XE_HW_ENGINE_VCS_MASK, .engine_class = XE_ENGINE_CLASS_VIDEO_DECODE },
215 	{ .cls = "vecs", .mask = XE_HW_ENGINE_VECS_MASK, .engine_class = XE_ENGINE_CLASS_VIDEO_ENHANCE },
216 	{ .cls = "ccs", .mask = XE_HW_ENGINE_CCS_MASK, .engine_class = XE_ENGINE_CLASS_COMPUTE },
217 	{ .cls = "gsccs", .mask = XE_HW_ENGINE_GSCCS_MASK, .engine_class = XE_ENGINE_CLASS_OTHER },
218 };
219 
220 static struct xe_config_group_device *to_xe_config_group_device(struct config_item *item)
221 {
222 	return container_of(to_config_group(item), struct xe_config_group_device, group);
223 }
224 
225 static struct xe_config_device *to_xe_config_device(struct config_item *item)
226 {
227 	return &to_xe_config_group_device(item)->config;
228 }
229 
230 static bool is_bound(struct xe_config_group_device *dev)
231 {
232 	unsigned int domain, bus, slot, function;
233 	struct pci_dev *pdev;
234 	const char *name;
235 	bool ret;
236 
237 	lockdep_assert_held(&dev->lock);
238 
239 	name = dev->group.cg_item.ci_name;
240 	if (sscanf(name, "%x:%x:%x.%x", &domain, &bus, &slot, &function) != 4)
241 		return false;
242 
243 	pdev = pci_get_domain_bus_and_slot(domain, bus, PCI_DEVFN(slot, function));
244 	if (!pdev)
245 		return false;
246 
247 	ret = pci_get_drvdata(pdev);
248 	pci_dev_put(pdev);
249 
250 	if (ret)
251 		pci_dbg(pdev, "Already bound to driver\n");
252 
253 	return ret;
254 }
255 
256 static ssize_t survivability_mode_show(struct config_item *item, char *page)
257 {
258 	struct xe_config_device *dev = to_xe_config_device(item);
259 
260 	return sprintf(page, "%d\n", dev->survivability_mode);
261 }
262 
263 static ssize_t survivability_mode_store(struct config_item *item, const char *page, size_t len)
264 {
265 	struct xe_config_group_device *dev = to_xe_config_group_device(item);
266 	bool survivability_mode;
267 	int ret;
268 
269 	ret = kstrtobool(page, &survivability_mode);
270 	if (ret)
271 		return ret;
272 
273 	guard(mutex)(&dev->lock);
274 	if (is_bound(dev))
275 		return -EBUSY;
276 
277 	dev->config.survivability_mode = survivability_mode;
278 
279 	return len;
280 }
281 
282 static ssize_t engines_allowed_show(struct config_item *item, char *page)
283 {
284 	struct xe_config_device *dev = to_xe_config_device(item);
285 	char *p = page;
286 
287 	for (size_t i = 0; i < ARRAY_SIZE(engine_info); i++) {
288 		u64 mask = engine_info[i].mask;
289 
290 		if ((dev->engines_allowed & mask) == mask) {
291 			p += sprintf(p, "%s*\n", engine_info[i].cls);
292 		} else if (mask & dev->engines_allowed) {
293 			u16 bit0 = __ffs64(mask), bit;
294 
295 			mask &= dev->engines_allowed;
296 
297 			for_each_set_bit(bit, (const unsigned long *)&mask, 64)
298 				p += sprintf(p, "%s%u\n", engine_info[i].cls,
299 					     bit - bit0);
300 		}
301 	}
302 
303 	return p - page;
304 }
305 
306 /*
307  * Lookup engine_info. If @mask is not NULL, reduce the mask according to the
308  * instance in @pattern.
309  *
310  * Examples of inputs:
311  * - lookup_engine_info("rcs0", &mask): return "rcs" entry from @engine_info and
312  *   mask == BIT_ULL(XE_HW_ENGINE_RCS0)
313  * - lookup_engine_info("rcs*", &mask): return "rcs" entry from @engine_info and
314  *   mask == XE_HW_ENGINE_RCS_MASK
315  * - lookup_engine_info("rcs", NULL): return "rcs" entry from @engine_info
316  */
317 static const struct engine_info *lookup_engine_info(const char *pattern, u64 *mask)
318 {
319 	for (size_t i = 0; i < ARRAY_SIZE(engine_info); i++) {
320 		u8 instance;
321 		u16 bit;
322 
323 		if (!str_has_prefix(pattern, engine_info[i].cls))
324 			continue;
325 
326 		pattern += strlen(engine_info[i].cls);
327 		if (!mask && !*pattern)
328 			return &engine_info[i];
329 
330 		if (!strcmp(pattern, "*")) {
331 			*mask = engine_info[i].mask;
332 			return &engine_info[i];
333 		}
334 
335 		if (kstrtou8(pattern, 10, &instance))
336 			return NULL;
337 
338 		bit = __ffs64(engine_info[i].mask) + instance;
339 		if (bit >= fls64(engine_info[i].mask))
340 			return NULL;
341 
342 		*mask = BIT_ULL(bit);
343 		return &engine_info[i];
344 	}
345 
346 	return NULL;
347 }
348 
349 static int parse_engine(const char *s, const char *end_chars, u64 *mask,
350 			const struct engine_info **pinfo)
351 {
352 	char buf[MAX_ENGINE_CLASS_CHARS + MAX_ENGINE_INSTANCE_CHARS + 1];
353 	const struct engine_info *info;
354 	size_t len;
355 
356 	len = strcspn(s, end_chars);
357 	if (len >= sizeof(buf))
358 		return -EINVAL;
359 
360 	memcpy(buf, s, len);
361 	buf[len] = '\0';
362 
363 	info = lookup_engine_info(buf, mask);
364 	if (!info)
365 		return -ENOENT;
366 
367 	if (pinfo)
368 		*pinfo = info;
369 
370 	return len;
371 }
372 
373 static ssize_t engines_allowed_store(struct config_item *item, const char *page,
374 				     size_t len)
375 {
376 	struct xe_config_group_device *dev = to_xe_config_group_device(item);
377 	ssize_t patternlen, p;
378 	u64 mask, val = 0;
379 
380 	for (p = 0; p < len; p += patternlen + 1) {
381 		patternlen = parse_engine(page + p, ",\n", &mask, NULL);
382 		if (patternlen < 0)
383 			return -EINVAL;
384 
385 		val |= mask;
386 	}
387 
388 	guard(mutex)(&dev->lock);
389 	if (is_bound(dev))
390 		return -EBUSY;
391 
392 	dev->config.engines_allowed = val;
393 
394 	return len;
395 }
396 
397 static ssize_t enable_psmi_show(struct config_item *item, char *page)
398 {
399 	struct xe_config_device *dev = to_xe_config_device(item);
400 
401 	return sprintf(page, "%d\n", dev->enable_psmi);
402 }
403 
404 static ssize_t enable_psmi_store(struct config_item *item, const char *page, size_t len)
405 {
406 	struct xe_config_group_device *dev = to_xe_config_group_device(item);
407 	bool val;
408 	int ret;
409 
410 	ret = kstrtobool(page, &val);
411 	if (ret)
412 		return ret;
413 
414 	guard(mutex)(&dev->lock);
415 	if (is_bound(dev))
416 		return -EBUSY;
417 
418 	dev->config.enable_psmi = val;
419 
420 	return len;
421 }
422 
423 static bool wa_bb_read_advance(bool dereference, char **p,
424 			       const char *append, size_t len,
425 			       size_t *max_size)
426 {
427 	if (dereference) {
428 		if (len >= *max_size)
429 			return false;
430 		*max_size -= len;
431 		if (append)
432 			memcpy(*p, append, len);
433 	}
434 
435 	*p += len;
436 
437 	return true;
438 }
439 
440 static ssize_t wa_bb_show(struct xe_config_group_device *dev,
441 			  struct wa_bb wa_bb[static XE_ENGINE_CLASS_MAX],
442 			  char *data, size_t sz)
443 {
444 	char *p = data;
445 
446 	guard(mutex)(&dev->lock);
447 
448 	for (size_t i = 0; i < ARRAY_SIZE(engine_info); i++) {
449 		enum xe_engine_class ec = engine_info[i].engine_class;
450 		size_t len;
451 
452 		if (!wa_bb[ec].len)
453 			continue;
454 
455 		len = snprintf(p, sz, "%s:", engine_info[i].cls);
456 		if (!wa_bb_read_advance(data, &p, NULL, len, &sz))
457 			return -ENOBUFS;
458 
459 		for (size_t j = 0; j < wa_bb[ec].len; j++) {
460 			len = snprintf(p, sz, " %08x", wa_bb[ec].cs[j]);
461 			if (!wa_bb_read_advance(data, &p, NULL, len, &sz))
462 				return -ENOBUFS;
463 		}
464 
465 		if (!wa_bb_read_advance(data, &p, "\n", 1, &sz))
466 			return -ENOBUFS;
467 	}
468 
469 	if (!wa_bb_read_advance(data, &p, "", 1, &sz))
470 		return -ENOBUFS;
471 
472 	/* Reserve one more to match check for '\0' */
473 	if (!data)
474 		p++;
475 
476 	return p - data;
477 }
478 
479 static ssize_t ctx_restore_mid_bb_show(struct config_item *item, char *page)
480 {
481 	struct xe_config_group_device *dev = to_xe_config_group_device(item);
482 
483 	return wa_bb_show(dev, dev->config.ctx_restore_mid_bb, page, SZ_4K);
484 }
485 
486 static ssize_t ctx_restore_post_bb_show(struct config_item *item, char *page)
487 {
488 	struct xe_config_group_device *dev = to_xe_config_group_device(item);
489 
490 	return wa_bb_show(dev, dev->config.ctx_restore_post_bb, page, SZ_4K);
491 }
492 
493 static void wa_bb_append(struct wa_bb *wa_bb, u32 val)
494 {
495 	if (wa_bb->cs)
496 		wa_bb->cs[wa_bb->len] = val;
497 
498 	wa_bb->len++;
499 }
500 
501 static ssize_t parse_hex(const char *line, u32 *pval)
502 {
503 	char numstr[12];
504 	const char *p;
505 	ssize_t numlen;
506 
507 	p = line + strspn(line, " \t");
508 	if (!*p || *p == '\n')
509 		return 0;
510 
511 	numlen = strcspn(p, " \t\n");
512 	if (!numlen || numlen >= sizeof(numstr) - 1)
513 		return -EINVAL;
514 
515 	memcpy(numstr, p, numlen);
516 	numstr[numlen] = '\0';
517 	p += numlen;
518 
519 	if (kstrtou32(numstr, 16, pval))
520 		return -EINVAL;
521 
522 	return p - line;
523 }
524 
525 /*
526  * Parse lines with the format
527  *
528  *	<engine-class> cmd <u32> <u32...>
529  *	<engine-class> reg <u32_addr> <u32_val>
530  *
531  * and optionally save them in @wa_bb[i].cs is non-NULL.
532  *
533  * Return the number of dwords parsed.
534  */
535 static ssize_t parse_wa_bb_lines(const char *lines,
536 				 struct wa_bb wa_bb[static XE_ENGINE_CLASS_MAX])
537 {
538 	ssize_t dwords = 0, ret;
539 	const char *p;
540 
541 	for (p = lines; *p; p++) {
542 		const struct engine_info *info = NULL;
543 		u32 val, val2;
544 
545 		/* Also allow empty lines */
546 		p += strspn(p, " \t\n");
547 		if (!*p)
548 			break;
549 
550 		ret = parse_engine(p, " \t\n", NULL, &info);
551 		if (ret < 0)
552 			return ret;
553 
554 		p += ret;
555 		p += strspn(p, " \t");
556 
557 		if (str_has_prefix(p, "cmd")) {
558 			for (p += strlen("cmd"); *p;) {
559 				ret = parse_hex(p, &val);
560 				if (ret < 0)
561 					return -EINVAL;
562 				if (!ret)
563 					break;
564 
565 				p += ret;
566 				dwords++;
567 				wa_bb_append(&wa_bb[info->engine_class], val);
568 			}
569 		} else if (str_has_prefix(p, "reg")) {
570 			p += strlen("reg");
571 			ret = parse_hex(p, &val);
572 			if (ret <= 0)
573 				return -EINVAL;
574 
575 			p += ret;
576 			ret = parse_hex(p, &val2);
577 			if (ret <= 0)
578 				return -EINVAL;
579 
580 			p += ret;
581 			dwords += 3;
582 			wa_bb_append(&wa_bb[info->engine_class],
583 				     MI_LOAD_REGISTER_IMM | MI_LRI_NUM_REGS(1));
584 			wa_bb_append(&wa_bb[info->engine_class], val);
585 			wa_bb_append(&wa_bb[info->engine_class], val2);
586 		} else {
587 			return -EINVAL;
588 		}
589 	}
590 
591 	return dwords;
592 }
593 
594 static ssize_t wa_bb_store(struct wa_bb wa_bb[static XE_ENGINE_CLASS_MAX],
595 			   struct xe_config_group_device *dev,
596 			   const char *page, size_t len)
597 {
598 	/* tmp_wa_bb must match wa_bb's size */
599 	struct wa_bb tmp_wa_bb[XE_ENGINE_CLASS_MAX] = { };
600 	ssize_t count, class;
601 	u32 *tmp;
602 
603 	/* 1. Count dwords - wa_bb[i].cs is NULL for all classes */
604 	count = parse_wa_bb_lines(page, tmp_wa_bb);
605 	if (count < 0)
606 		return count;
607 
608 	guard(mutex)(&dev->lock);
609 
610 	if (is_bound(dev))
611 		return -EBUSY;
612 
613 	/*
614 	 * 2. Allocate a u32 array and set the pointers to the right positions
615 	 * according to the length of each class' wa_bb
616 	 */
617 	tmp = krealloc(wa_bb[0].cs, count * sizeof(u32), GFP_KERNEL);
618 	if (!tmp)
619 		return -ENOMEM;
620 
621 	if (!count) {
622 		memset(wa_bb, 0, sizeof(tmp_wa_bb));
623 		return len;
624 	}
625 
626 	for (class = 0, count = 0; class < XE_ENGINE_CLASS_MAX; ++class) {
627 		tmp_wa_bb[class].cs = tmp + count;
628 		count += tmp_wa_bb[class].len;
629 		tmp_wa_bb[class].len = 0;
630 	}
631 
632 	/* 3. Parse wa_bb lines again, this time saving the values */
633 	count = parse_wa_bb_lines(page, tmp_wa_bb);
634 	if (count < 0)
635 		return count;
636 
637 	memcpy(wa_bb, tmp_wa_bb, sizeof(tmp_wa_bb));
638 
639 	return len;
640 }
641 
642 static ssize_t ctx_restore_mid_bb_store(struct config_item *item,
643 					const char *data, size_t sz)
644 {
645 	struct xe_config_group_device *dev = to_xe_config_group_device(item);
646 
647 	return wa_bb_store(dev->config.ctx_restore_mid_bb, dev, data, sz);
648 }
649 
650 static ssize_t ctx_restore_post_bb_store(struct config_item *item,
651 					 const char *data, size_t sz)
652 {
653 	struct xe_config_group_device *dev = to_xe_config_group_device(item);
654 
655 	return wa_bb_store(dev->config.ctx_restore_post_bb, dev, data, sz);
656 }
657 
658 CONFIGFS_ATTR(, ctx_restore_mid_bb);
659 CONFIGFS_ATTR(, ctx_restore_post_bb);
660 CONFIGFS_ATTR(, enable_psmi);
661 CONFIGFS_ATTR(, engines_allowed);
662 CONFIGFS_ATTR(, survivability_mode);
663 
664 static struct configfs_attribute *xe_config_device_attrs[] = {
665 	&attr_ctx_restore_mid_bb,
666 	&attr_ctx_restore_post_bb,
667 	&attr_enable_psmi,
668 	&attr_engines_allowed,
669 	&attr_survivability_mode,
670 	NULL,
671 };
672 
673 static void xe_config_device_release(struct config_item *item)
674 {
675 	struct xe_config_group_device *dev = to_xe_config_group_device(item);
676 
677 	mutex_destroy(&dev->lock);
678 
679 	kfree(dev->config.ctx_restore_post_bb[0].cs);
680 	kfree(dev);
681 }
682 
683 static struct configfs_item_operations xe_config_device_ops = {
684 	.release	= xe_config_device_release,
685 };
686 
687 static bool xe_config_device_is_visible(struct config_item *item,
688 					struct configfs_attribute *attr, int n)
689 {
690 	struct xe_config_group_device *dev = to_xe_config_group_device(item);
691 
692 	if (attr == &attr_survivability_mode) {
693 		if (!dev->desc->is_dgfx || dev->desc->platform < XE_BATTLEMAGE)
694 			return false;
695 	}
696 
697 	return true;
698 }
699 
700 static struct configfs_group_operations xe_config_device_group_ops = {
701 	.is_visible	= xe_config_device_is_visible,
702 };
703 
704 static const struct config_item_type xe_config_device_type = {
705 	.ct_item_ops	= &xe_config_device_ops,
706 	.ct_group_ops	= &xe_config_device_group_ops,
707 	.ct_attrs	= xe_config_device_attrs,
708 	.ct_owner	= THIS_MODULE,
709 };
710 
711 static const struct xe_device_desc *xe_match_desc(struct pci_dev *pdev)
712 {
713 	struct device_driver *driver = driver_find("xe", &pci_bus_type);
714 	struct pci_driver *drv = to_pci_driver(driver);
715 	const struct pci_device_id *ids = drv ? drv->id_table : NULL;
716 	const struct pci_device_id *found = pci_match_id(ids, pdev);
717 
718 	return found ? (const void *)found->driver_data : NULL;
719 }
720 
721 static struct pci_dev *get_physfn_instead(struct pci_dev *virtfn)
722 {
723 	struct pci_dev *physfn = pci_physfn(virtfn);
724 
725 	pci_dev_get(physfn);
726 	pci_dev_put(virtfn);
727 	return physfn;
728 }
729 
730 static struct config_group *xe_config_make_device_group(struct config_group *group,
731 							const char *name)
732 {
733 	unsigned int domain, bus, slot, function;
734 	struct xe_config_group_device *dev;
735 	const struct xe_device_desc *match;
736 	struct pci_dev *pdev;
737 	char canonical[16];
738 	int vfnumber = 0;
739 	int ret;
740 
741 	ret = sscanf(name, "%x:%x:%x.%x", &domain, &bus, &slot, &function);
742 	if (ret != 4)
743 		return ERR_PTR(-EINVAL);
744 
745 	ret = scnprintf(canonical, sizeof(canonical), "%04x:%02x:%02x.%d", domain, bus,
746 			PCI_SLOT(PCI_DEVFN(slot, function)),
747 			PCI_FUNC(PCI_DEVFN(slot, function)));
748 	if (ret != 12 || strcmp(name, canonical))
749 		return ERR_PTR(-EINVAL);
750 
751 	pdev = pci_get_domain_bus_and_slot(domain, bus, PCI_DEVFN(slot, function));
752 	if (!pdev && function)
753 		pdev = pci_get_domain_bus_and_slot(domain, bus, PCI_DEVFN(slot, 0));
754 	if (!pdev && slot)
755 		pdev = pci_get_domain_bus_and_slot(domain, bus, PCI_DEVFN(0, 0));
756 	if (!pdev)
757 		return ERR_PTR(-ENODEV);
758 
759 	if (PCI_DEVFN(slot, function) != pdev->devfn) {
760 		pdev = get_physfn_instead(pdev);
761 		vfnumber = PCI_DEVFN(slot, function) - pdev->devfn;
762 		if (!dev_is_pf(&pdev->dev) || vfnumber > pci_sriov_get_totalvfs(pdev)) {
763 			pci_dev_put(pdev);
764 			return ERR_PTR(-ENODEV);
765 		}
766 	}
767 
768 	match = xe_match_desc(pdev);
769 	if (match && vfnumber && !match->has_sriov) {
770 		pci_info(pdev, "xe driver does not support VFs on this device\n");
771 		match = NULL;
772 	} else if (!match) {
773 		pci_info(pdev, "xe driver does not support configuration of this device\n");
774 	}
775 
776 	pci_dev_put(pdev);
777 
778 	if (!match)
779 		return ERR_PTR(-ENOENT);
780 
781 	dev = kzalloc(sizeof(*dev), GFP_KERNEL);
782 	if (!dev)
783 		return ERR_PTR(-ENOMEM);
784 
785 	dev->desc = match;
786 	set_device_defaults(&dev->config);
787 
788 	config_group_init_type_name(&dev->group, name, &xe_config_device_type);
789 
790 	mutex_init(&dev->lock);
791 
792 	return &dev->group;
793 }
794 
795 static struct configfs_group_operations xe_config_group_ops = {
796 	.make_group	= xe_config_make_device_group,
797 };
798 
799 static const struct config_item_type xe_configfs_type = {
800 	.ct_group_ops	= &xe_config_group_ops,
801 	.ct_owner	= THIS_MODULE,
802 };
803 
804 static struct configfs_subsystem xe_configfs = {
805 	.su_group = {
806 		.cg_item = {
807 			.ci_namebuf = "xe",
808 			.ci_type = &xe_configfs_type,
809 		},
810 	},
811 };
812 
813 static struct xe_config_group_device *find_xe_config_group_device(struct pci_dev *pdev)
814 {
815 	struct config_item *item;
816 
817 	mutex_lock(&xe_configfs.su_mutex);
818 	item = config_group_find_item(&xe_configfs.su_group, pci_name(pdev));
819 	mutex_unlock(&xe_configfs.su_mutex);
820 
821 	if (!item)
822 		return NULL;
823 
824 	return to_xe_config_group_device(item);
825 }
826 
827 static void dump_custom_dev_config(struct pci_dev *pdev,
828 				   struct xe_config_group_device *dev)
829 {
830 #define PRI_CUSTOM_ATTR(fmt_, attr_) do { \
831 		if (dev->config.attr_ != device_defaults.attr_) \
832 			pci_info(pdev, "configfs: " __stringify(attr_) " = " fmt_ "\n", \
833 				 dev->config.attr_); \
834 	} while (0)
835 
836 	PRI_CUSTOM_ATTR("%llx", engines_allowed);
837 	PRI_CUSTOM_ATTR("%d", enable_psmi);
838 	PRI_CUSTOM_ATTR("%d", survivability_mode);
839 
840 #undef PRI_CUSTOM_ATTR
841 }
842 
843 /**
844  * xe_configfs_check_device() - Test if device was configured by configfs
845  * @pdev: the &pci_dev device to test
846  *
847  * Try to find the configfs group that belongs to the specified pci device
848  * and print a diagnostic message if different than the default value.
849  */
850 void xe_configfs_check_device(struct pci_dev *pdev)
851 {
852 	struct xe_config_group_device *dev = find_xe_config_group_device(pdev);
853 
854 	if (!dev)
855 		return;
856 
857 	/* memcmp here is safe as both are zero-initialized */
858 	if (memcmp(&dev->config, &device_defaults, sizeof(dev->config))) {
859 		pci_info(pdev, "Found custom settings in configfs\n");
860 		dump_custom_dev_config(pdev, dev);
861 	}
862 
863 	config_group_put(&dev->group);
864 }
865 
866 /**
867  * xe_configfs_get_survivability_mode - get configfs survivability mode attribute
868  * @pdev: pci device
869  *
870  * Return: survivability_mode attribute in configfs
871  */
872 bool xe_configfs_get_survivability_mode(struct pci_dev *pdev)
873 {
874 	struct xe_config_group_device *dev = find_xe_config_group_device(pdev);
875 	bool mode;
876 
877 	if (!dev)
878 		return device_defaults.survivability_mode;
879 
880 	mode = dev->config.survivability_mode;
881 	config_group_put(&dev->group);
882 
883 	return mode;
884 }
885 
886 /**
887  * xe_configfs_get_engines_allowed - get engine allowed mask from configfs
888  * @pdev: pci device
889  *
890  * Return: engine mask with allowed engines set in configfs
891  */
892 u64 xe_configfs_get_engines_allowed(struct pci_dev *pdev)
893 {
894 	struct xe_config_group_device *dev = find_xe_config_group_device(pdev);
895 	u64 engines_allowed;
896 
897 	if (!dev)
898 		return device_defaults.engines_allowed;
899 
900 	engines_allowed = dev->config.engines_allowed;
901 	config_group_put(&dev->group);
902 
903 	return engines_allowed;
904 }
905 
906 /**
907  * xe_configfs_get_psmi_enabled - get configfs enable_psmi setting
908  * @pdev: pci device
909  *
910  * Return: enable_psmi setting in configfs
911  */
912 bool xe_configfs_get_psmi_enabled(struct pci_dev *pdev)
913 {
914 	struct xe_config_group_device *dev = find_xe_config_group_device(pdev);
915 	bool ret;
916 
917 	if (!dev)
918 		return false;
919 
920 	ret = dev->config.enable_psmi;
921 	config_group_put(&dev->group);
922 
923 	return ret;
924 }
925 
926 /**
927  * xe_configfs_get_ctx_restore_mid_bb - get configfs ctx_restore_mid_bb setting
928  * @pdev: pci device
929  * @class: hw engine class
930  * @cs: pointer to the bb to use - only valid during probe
931  *
932  * Return: Number of dwords used in the mid_ctx_restore setting in configfs
933  */
934 u32 xe_configfs_get_ctx_restore_mid_bb(struct pci_dev *pdev,
935 				       enum xe_engine_class class,
936 				       const u32 **cs)
937 {
938 	struct xe_config_group_device *dev = find_xe_config_group_device(pdev);
939 	u32 len;
940 
941 	if (!dev)
942 		return 0;
943 
944 	if (cs)
945 		*cs = dev->config.ctx_restore_mid_bb[class].cs;
946 
947 	len = dev->config.ctx_restore_mid_bb[class].len;
948 	config_group_put(&dev->group);
949 
950 	return len;
951 }
952 
953 /**
954  * xe_configfs_get_ctx_restore_post_bb - get configfs ctx_restore_post_bb setting
955  * @pdev: pci device
956  * @class: hw engine class
957  * @cs: pointer to the bb to use - only valid during probe
958  *
959  * Return: Number of dwords used in the post_ctx_restore setting in configfs
960  */
961 u32 xe_configfs_get_ctx_restore_post_bb(struct pci_dev *pdev,
962 					enum xe_engine_class class,
963 					const u32 **cs)
964 {
965 	struct xe_config_group_device *dev = find_xe_config_group_device(pdev);
966 	u32 len;
967 
968 	if (!dev)
969 		return 0;
970 
971 	*cs = dev->config.ctx_restore_post_bb[class].cs;
972 	len = dev->config.ctx_restore_post_bb[class].len;
973 	config_group_put(&dev->group);
974 
975 	return len;
976 }
977 
978 int __init xe_configfs_init(void)
979 {
980 	int ret;
981 
982 	config_group_init(&xe_configfs.su_group);
983 	mutex_init(&xe_configfs.su_mutex);
984 	ret = configfs_register_subsystem(&xe_configfs);
985 	if (ret) {
986 		mutex_destroy(&xe_configfs.su_mutex);
987 		return ret;
988 	}
989 
990 	return 0;
991 }
992 
993 void xe_configfs_exit(void)
994 {
995 	configfs_unregister_subsystem(&xe_configfs);
996 	mutex_destroy(&xe_configfs.su_mutex);
997 }
998