xref: /linux/drivers/gpu/drm/xe/xe_configfs.c (revision 4327db89f5e02458001b9c296a961265b8613395)
1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright © 2025 Intel Corporation
4  */
5 
6 #include <linux/bitops.h>
7 #include <linux/ctype.h>
8 #include <linux/configfs.h>
9 #include <linux/cleanup.h>
10 #include <linux/find.h>
11 #include <linux/init.h>
12 #include <linux/module.h>
13 #include <linux/pci.h>
14 #include <linux/string.h>
15 
16 #include "instructions/xe_mi_commands.h"
17 #include "xe_configfs.h"
18 #include "xe_gt_types.h"
19 #include "xe_hw_engine_types.h"
20 #include "xe_module.h"
21 #include "xe_pci_types.h"
22 #include "xe_sriov_types.h"
23 
24 /**
25  * DOC: Xe Configfs
26  *
27  * Overview
28  * ========
29  *
30  * Configfs is a filesystem-based manager of kernel objects. Xe KMD registers a
31  * configfs subsystem called ``xe`` that creates a directory in the mounted
32  * configfs directory. The user can create devices under this directory and
33  * configure them as necessary. See Documentation/filesystems/configfs.rst for
34  * more information about how configfs works.
35  *
36  * Create devices
37  * ==============
38  *
39  * To create a device, the ``xe`` module should already be loaded, but some
40  * attributes can only be set before binding the device. It can be accomplished
41  * by blocking the driver autoprobe::
42  *
43  *	# echo 0 > /sys/bus/pci/drivers_autoprobe
44  *	# modprobe xe
45  *
46  * In order to create a device, the user has to create a directory inside ``xe``::
47  *
48  *	# mkdir /sys/kernel/config/xe/0000:03:00.0/
49  *
50  * Every device created is populated by the driver with entries that can be
51  * used to configure it::
52  *
53  *	/sys/kernel/config/xe/
54  *	├── 0000:00:02.0
55  *	│   └── ...
56  *	├── 0000:00:02.1
57  *	│   └── ...
58  *	:
59  *	└── 0000:03:00.0
60  *	    ├── survivability_mode
61  *	    ├── gt_types_allowed
62  *	    ├── engines_allowed
63  *	    └── enable_psmi
64  *
65  * After configuring the attributes as per next section, the device can be
66  * probed with::
67  *
68  *	# echo 0000:03:00.0 > /sys/bus/pci/drivers/xe/bind
69  *	# # or
70  *	# echo 0000:03:00.0 > /sys/bus/pci/drivers_probe
71  *
72  * Configure Attributes
73  * ====================
74  *
75  * Survivability mode:
76  * -------------------
77  *
78  * Enable survivability mode on supported cards. This setting only takes
79  * effect when probing the device. Example to enable it::
80  *
81  *	# echo 1 > /sys/kernel/config/xe/0000:03:00.0/survivability_mode
82  *
83  * This attribute can only be set before binding to the device.
84  *
85  * Allowed GT types:
86  * -----------------
87  *
88  * Allow only specific types of GTs to be detected and initialized by the
89  * driver.  Any combination of GT types can be enabled/disabled, although
90  * some settings will cause the device to fail to probe.
91  *
92  * Writes support both comma- and newline-separated input format. Reads
93  * will always return one GT type per line. "primary" and "media" are the
94  * GT type names supported by this interface.
95  *
96  * This attribute can only be set before binding to the device.
97  *
98  * Examples:
99  *
100  * Allow both primary and media GTs to be initialized and used.  This matches
101  * the driver's default behavior::
102  *
103  *	# echo 'primary,media' > /sys/kernel/config/xe/0000:03:00.0/gt_types_allowed
104  *
105  * Allow only the primary GT of each tile to be initialized and used,
106  * effectively disabling the media GT if it exists on the platform::
107  *
108  *	# echo 'primary' > /sys/kernel/config/xe/0000:03:00.0/gt_types_allowed
109  *
110  * Allow only the media GT of each tile to be initialized and used,
111  * effectively disabling the primary GT.  **This configuration will cause
112  * device probe failure on all current platforms, but may be allowed on
113  * igpu platforms in the future**::
114  *
115  *	# echo 'media' > /sys/kernel/config/xe/0000:03:00.0/gt_types_allowed
116  *
117  * Disable all GTs.  Only other GPU IP (such as display) is potentially usable.
118  * **This configuration will cause device probe failure on all current
119  * platforms, but may be allowed on igpu platforms in the future**::
120  *
121  *	# echo '' > /sys/kernel/config/xe/0000:03:00.0/gt_types_allowed
122  *
123  * Allowed engines:
124  * ----------------
125  *
126  * Allow only a set of engine(s) to be available, disabling the other engines
127  * even if they are available in hardware. This is applied after HW fuses are
128  * considered on each tile. Examples:
129  *
130  * Allow only one render and one copy engines, nothing else::
131  *
132  *	# echo 'rcs0,bcs0' > /sys/kernel/config/xe/0000:03:00.0/engines_allowed
133  *
134  * Allow only compute engines and first copy engine::
135  *
136  *	# echo 'ccs*,bcs0' > /sys/kernel/config/xe/0000:03:00.0/engines_allowed
137  *
138  * Note that the engine names are the per-GT hardware names. On multi-tile
139  * platforms, writing ``rcs0,bcs0`` to this file would allow the first render
140  * and copy engines on each tile.
141  *
142  * The requested configuration may not be supported by the platform and driver
143  * may fail to probe. For example: if at least one copy engine is expected to be
144  * available for migrations, but it's disabled. This is intended for debugging
145  * purposes only.
146  *
147  * This attribute can only be set before binding to the device.
148  *
149  * PSMI
150  * ----
151  *
152  * Enable extra debugging capabilities to trace engine execution. Only useful
153  * during early platform enabling and requires additional hardware connected.
154  * Once it's enabled, additionals WAs are added and runtime configuration is
155  * done via debugfs. Example to enable it::
156  *
157  *	# echo 1 > /sys/kernel/config/xe/0000:03:00.0/enable_psmi
158  *
159  * This attribute can only be set before binding to the device.
160  *
161  * Context restore BB
162  * ------------------
163  *
164  * Allow to execute a batch buffer during any context switches. When the
165  * GPU is restoring the context, it executes additional commands. It's useful
166  * for testing additional workarounds and validating certain HW behaviors: it's
167  * not intended for normal execution and will taint the kernel with TAINT_TEST
168  * when used.
169  *
170  * The syntax allows to pass straight instructions to be executed by the engine
171  * in a batch buffer or set specific registers.
172  *
173  * #. Generic instruction::
174  *
175  *	<engine-class> cmd <instr> [[dword0] [dword1] [...]]
176  *
177  * #. Simple register setting::
178  *
179  *	<engine-class> reg <address> <value>
180  *
181  * Commands are saved per engine class: all instances of that class will execute
182  * those commands during context switch. The instruction, dword arguments,
183  * addresses and values are in hex format like in the examples below.
184  *
185  * #. Execute a LRI command to write 0xDEADBEEF to register 0x4f10 after the
186  *    normal context restore::
187  *
188  *	# echo 'rcs cmd 11000001 4F100 DEADBEEF' \
189  *		> /sys/kernel/config/xe/0000:03:00.0/ctx_restore_post_bb
190  *
191  * #. Execute a LRI command to write 0xDEADBEEF to register 0x4f10 at the
192  *    beginning of the context restore::
193  *
194  *	# echo 'rcs cmd 11000001 4F100 DEADBEEF' \
195  *		> /sys/kernel/config/xe/0000:03:00.0/ctx_restore_mid_bb
196 
197  * #. Load certain values in a couple of registers (it can be used as a simpler
198  *    alternative to the `cmd`) action::
199  *
200  *	# cat > /sys/kernel/config/xe/0000:03:00.0/ctx_restore_post_bb <<EOF
201  *	rcs reg 4F100 DEADBEEF
202  *	rcs reg 4F104 FFFFFFFF
203  *	EOF
204  *
205  *    .. note::
206  *
207  *       When using multiple lines, make sure to use a command that is
208  *       implemented with a single write syscall, like HEREDOC.
209  *
210  * Currently this is implemented only for post and mid context restore and
211  * these attributes can only be set before binding to the device.
212  *
213  * Max SR-IOV Virtual Functions
214  * ----------------------------
215  *
216  * This config allows to limit number of the Virtual Functions (VFs) that can
217  * be managed by the Physical Function (PF) driver, where value 0 disables the
218  * PF mode (no VFs).
219  *
220  * The default max_vfs config value is taken from the max_vfs modparam.
221  *
222  * How to enable PF with support with unlimited (up to HW limit) number of VFs::
223  *
224  *	# echo unlimited > /sys/kernel/config/xe/0000:00:02.0/sriov/max_vfs
225  *	# echo 0000:00:02.0 > /sys/bus/pci/drivers/xe/bind
226  *
227  * How to enable PF with support up to 3 VFs::
228  *
229  *	# echo 3 > /sys/kernel/config/xe/0000:00:02.0/sriov/max_vfs
230  *	# echo 0000:00:02.0 > /sys/bus/pci/drivers/xe/bind
231  *
232  * How to disable PF mode and always run as native::
233  *
234  *	# echo 0 > /sys/kernel/config/xe/0000:00:02.0/sriov/max_vfs
235  *	# echo 0000:00:02.0 > /sys/bus/pci/drivers/xe/bind
236  *
237  * This setting only takes effect when probing the device.
238  *
239  * Remove devices
240  * ==============
241  *
242  * The created device directories can be removed using ``rmdir``::
243  *
244  *	# rmdir /sys/kernel/config/xe/0000:03:00.0/
245  */
246 
247 /* Similar to struct xe_bb, but not tied to HW (yet) */
248 struct wa_bb {
249 	u32 *cs;
250 	u32 len; /* in dwords */
251 };
252 
253 struct xe_config_group_device {
254 	struct config_group group;
255 	struct config_group sriov;
256 
257 	struct xe_config_device {
258 		u64 gt_types_allowed;
259 		u64 engines_allowed;
260 		struct wa_bb ctx_restore_post_bb[XE_ENGINE_CLASS_MAX];
261 		struct wa_bb ctx_restore_mid_bb[XE_ENGINE_CLASS_MAX];
262 		bool survivability_mode;
263 		bool enable_psmi;
264 		struct {
265 			unsigned int max_vfs;
266 		} sriov;
267 	} config;
268 
269 	/* protects attributes */
270 	struct mutex lock;
271 	/* matching descriptor */
272 	const struct xe_device_desc *desc;
273 	/* tentative SR-IOV mode */
274 	enum xe_sriov_mode mode;
275 };
276 
277 static const struct xe_config_device device_defaults = {
278 	.gt_types_allowed = U64_MAX,
279 	.engines_allowed = U64_MAX,
280 	.survivability_mode = false,
281 	.enable_psmi = false,
282 	.sriov = {
283 		.max_vfs = UINT_MAX,
284 	},
285 };
286 
set_device_defaults(struct xe_config_device * config)287 static void set_device_defaults(struct xe_config_device *config)
288 {
289 	*config = device_defaults;
290 #ifdef CONFIG_PCI_IOV
291 	config->sriov.max_vfs = xe_modparam.max_vfs;
292 #endif
293 }
294 
295 struct engine_info {
296 	const char *cls;
297 	u64 mask;
298 	enum xe_engine_class engine_class;
299 };
300 
301 /* Some helpful macros to aid on the sizing of buffer allocation when parsing */
302 #define MAX_ENGINE_CLASS_CHARS 5
303 #define MAX_ENGINE_INSTANCE_CHARS 2
304 
305 static const struct engine_info engine_info[] = {
306 	{ .cls = "rcs", .mask = XE_HW_ENGINE_RCS_MASK, .engine_class = XE_ENGINE_CLASS_RENDER },
307 	{ .cls = "bcs", .mask = XE_HW_ENGINE_BCS_MASK, .engine_class = XE_ENGINE_CLASS_COPY },
308 	{ .cls = "vcs", .mask = XE_HW_ENGINE_VCS_MASK, .engine_class = XE_ENGINE_CLASS_VIDEO_DECODE },
309 	{ .cls = "vecs", .mask = XE_HW_ENGINE_VECS_MASK, .engine_class = XE_ENGINE_CLASS_VIDEO_ENHANCE },
310 	{ .cls = "ccs", .mask = XE_HW_ENGINE_CCS_MASK, .engine_class = XE_ENGINE_CLASS_COMPUTE },
311 	{ .cls = "gsccs", .mask = XE_HW_ENGINE_GSCCS_MASK, .engine_class = XE_ENGINE_CLASS_OTHER },
312 };
313 
314 static const struct {
315 	const char *name;
316 	enum xe_gt_type type;
317 } gt_types[] = {
318 	{ .name = "primary", .type = XE_GT_TYPE_MAIN },
319 	{ .name = "media", .type = XE_GT_TYPE_MEDIA },
320 };
321 
to_xe_config_group_device(struct config_item * item)322 static struct xe_config_group_device *to_xe_config_group_device(struct config_item *item)
323 {
324 	return container_of(to_config_group(item), struct xe_config_group_device, group);
325 }
326 
to_xe_config_device(struct config_item * item)327 static struct xe_config_device *to_xe_config_device(struct config_item *item)
328 {
329 	return &to_xe_config_group_device(item)->config;
330 }
331 
is_bound(struct xe_config_group_device * dev)332 static bool is_bound(struct xe_config_group_device *dev)
333 {
334 	unsigned int domain, bus, slot, function;
335 	struct pci_dev *pdev;
336 	const char *name;
337 	bool ret;
338 
339 	lockdep_assert_held(&dev->lock);
340 
341 	name = dev->group.cg_item.ci_name;
342 	if (sscanf(name, "%x:%x:%x.%x", &domain, &bus, &slot, &function) != 4)
343 		return false;
344 
345 	pdev = pci_get_domain_bus_and_slot(domain, bus, PCI_DEVFN(slot, function));
346 	if (!pdev)
347 		return false;
348 
349 	ret = pci_get_drvdata(pdev);
350 	if (ret)
351 		pci_dbg(pdev, "Already bound to driver\n");
352 
353 	pci_dev_put(pdev);
354 	return ret;
355 }
356 
survivability_mode_show(struct config_item * item,char * page)357 static ssize_t survivability_mode_show(struct config_item *item, char *page)
358 {
359 	struct xe_config_device *dev = to_xe_config_device(item);
360 
361 	return sprintf(page, "%d\n", dev->survivability_mode);
362 }
363 
survivability_mode_store(struct config_item * item,const char * page,size_t len)364 static ssize_t survivability_mode_store(struct config_item *item, const char *page, size_t len)
365 {
366 	struct xe_config_group_device *dev = to_xe_config_group_device(item);
367 	bool survivability_mode;
368 	int ret;
369 
370 	ret = kstrtobool(page, &survivability_mode);
371 	if (ret)
372 		return ret;
373 
374 	guard(mutex)(&dev->lock);
375 	if (is_bound(dev))
376 		return -EBUSY;
377 
378 	dev->config.survivability_mode = survivability_mode;
379 
380 	return len;
381 }
382 
gt_types_allowed_show(struct config_item * item,char * page)383 static ssize_t gt_types_allowed_show(struct config_item *item, char *page)
384 {
385 	struct xe_config_device *dev = to_xe_config_device(item);
386 	char *p = page;
387 
388 	for (size_t i = 0; i < ARRAY_SIZE(gt_types); i++)
389 		if (dev->gt_types_allowed & BIT_ULL(gt_types[i].type))
390 			p += sprintf(p, "%s\n", gt_types[i].name);
391 
392 	return p - page;
393 }
394 
gt_types_allowed_store(struct config_item * item,const char * page,size_t len)395 static ssize_t gt_types_allowed_store(struct config_item *item, const char *page,
396 				      size_t len)
397 {
398 	struct xe_config_group_device *dev = to_xe_config_group_device(item);
399 	char *buf __free(kfree) = kstrdup(page, GFP_KERNEL);
400 	char *p = buf;
401 	u64 typemask = 0;
402 
403 	if (!buf)
404 		return -ENOMEM;
405 
406 	while (p) {
407 		char *typename = strsep(&p, ",\n");
408 		bool matched = false;
409 
410 		if (typename[0] == '\0')
411 			continue;
412 
413 		for (size_t i = 0; i < ARRAY_SIZE(gt_types); i++) {
414 			if (strcmp(typename, gt_types[i].name) == 0) {
415 				typemask |= BIT(gt_types[i].type);
416 				matched = true;
417 				break;
418 			}
419 		}
420 
421 		if (!matched)
422 			return -EINVAL;
423 	}
424 
425 	guard(mutex)(&dev->lock);
426 	if (is_bound(dev))
427 		return -EBUSY;
428 
429 	dev->config.gt_types_allowed = typemask;
430 
431 	return len;
432 }
433 
engines_allowed_show(struct config_item * item,char * page)434 static ssize_t engines_allowed_show(struct config_item *item, char *page)
435 {
436 	struct xe_config_device *dev = to_xe_config_device(item);
437 	char *p = page;
438 
439 	for (size_t i = 0; i < ARRAY_SIZE(engine_info); i++) {
440 		u64 mask = engine_info[i].mask;
441 
442 		if ((dev->engines_allowed & mask) == mask) {
443 			p += sprintf(p, "%s*\n", engine_info[i].cls);
444 		} else if (mask & dev->engines_allowed) {
445 			u16 bit0 = __ffs64(mask), bit;
446 
447 			mask &= dev->engines_allowed;
448 
449 			for_each_set_bit(bit, (const unsigned long *)&mask, 64)
450 				p += sprintf(p, "%s%u\n", engine_info[i].cls,
451 					     bit - bit0);
452 		}
453 	}
454 
455 	return p - page;
456 }
457 
458 /*
459  * Lookup engine_info. If @mask is not NULL, reduce the mask according to the
460  * instance in @pattern.
461  *
462  * Examples of inputs:
463  * - lookup_engine_info("rcs0", &mask): return "rcs" entry from @engine_info and
464  *   mask == BIT_ULL(XE_HW_ENGINE_RCS0)
465  * - lookup_engine_info("rcs*", &mask): return "rcs" entry from @engine_info and
466  *   mask == XE_HW_ENGINE_RCS_MASK
467  * - lookup_engine_info("rcs", NULL): return "rcs" entry from @engine_info
468  */
lookup_engine_info(const char * pattern,u64 * mask)469 static const struct engine_info *lookup_engine_info(const char *pattern, u64 *mask)
470 {
471 	for (size_t i = 0; i < ARRAY_SIZE(engine_info); i++) {
472 		u8 instance;
473 		u16 bit;
474 
475 		if (!str_has_prefix(pattern, engine_info[i].cls))
476 			continue;
477 
478 		pattern += strlen(engine_info[i].cls);
479 		if (!mask)
480 			return *pattern ? NULL : &engine_info[i];
481 
482 		if (!strcmp(pattern, "*")) {
483 			*mask = engine_info[i].mask;
484 			return &engine_info[i];
485 		}
486 
487 		if (kstrtou8(pattern, 10, &instance))
488 			return NULL;
489 
490 		bit = __ffs64(engine_info[i].mask) + instance;
491 		if (bit >= fls64(engine_info[i].mask))
492 			return NULL;
493 
494 		*mask = BIT_ULL(bit);
495 		return &engine_info[i];
496 	}
497 
498 	return NULL;
499 }
500 
parse_engine(const char * s,const char * end_chars,u64 * mask,const struct engine_info ** pinfo)501 static int parse_engine(const char *s, const char *end_chars, u64 *mask,
502 			const struct engine_info **pinfo)
503 {
504 	char buf[MAX_ENGINE_CLASS_CHARS + MAX_ENGINE_INSTANCE_CHARS + 1];
505 	const struct engine_info *info;
506 	size_t len;
507 
508 	len = strcspn(s, end_chars);
509 	if (len >= sizeof(buf))
510 		return -EINVAL;
511 
512 	memcpy(buf, s, len);
513 	buf[len] = '\0';
514 
515 	info = lookup_engine_info(buf, mask);
516 	if (!info)
517 		return -ENOENT;
518 
519 	if (pinfo)
520 		*pinfo = info;
521 
522 	return len;
523 }
524 
engines_allowed_store(struct config_item * item,const char * page,size_t len)525 static ssize_t engines_allowed_store(struct config_item *item, const char *page,
526 				     size_t len)
527 {
528 	struct xe_config_group_device *dev = to_xe_config_group_device(item);
529 	ssize_t patternlen, p;
530 	u64 mask, val = 0;
531 
532 	for (p = 0; p < len; p += patternlen + 1) {
533 		patternlen = parse_engine(page + p, ",\n", &mask, NULL);
534 		if (patternlen < 0)
535 			return -EINVAL;
536 
537 		val |= mask;
538 	}
539 
540 	guard(mutex)(&dev->lock);
541 	if (is_bound(dev))
542 		return -EBUSY;
543 
544 	dev->config.engines_allowed = val;
545 
546 	return len;
547 }
548 
enable_psmi_show(struct config_item * item,char * page)549 static ssize_t enable_psmi_show(struct config_item *item, char *page)
550 {
551 	struct xe_config_device *dev = to_xe_config_device(item);
552 
553 	return sprintf(page, "%d\n", dev->enable_psmi);
554 }
555 
enable_psmi_store(struct config_item * item,const char * page,size_t len)556 static ssize_t enable_psmi_store(struct config_item *item, const char *page, size_t len)
557 {
558 	struct xe_config_group_device *dev = to_xe_config_group_device(item);
559 	bool val;
560 	int ret;
561 
562 	ret = kstrtobool(page, &val);
563 	if (ret)
564 		return ret;
565 
566 	guard(mutex)(&dev->lock);
567 	if (is_bound(dev))
568 		return -EBUSY;
569 
570 	dev->config.enable_psmi = val;
571 
572 	return len;
573 }
574 
wa_bb_read_advance(bool dereference,char ** p,const char * append,size_t len,size_t * max_size)575 static bool wa_bb_read_advance(bool dereference, char **p,
576 			       const char *append, size_t len,
577 			       size_t *max_size)
578 {
579 	if (dereference) {
580 		if (len >= *max_size)
581 			return false;
582 		*max_size -= len;
583 		if (append)
584 			memcpy(*p, append, len);
585 	}
586 
587 	*p += len;
588 
589 	return true;
590 }
591 
wa_bb_show(struct xe_config_group_device * dev,struct wa_bb wa_bb[static XE_ENGINE_CLASS_MAX],char * data,size_t sz)592 static ssize_t wa_bb_show(struct xe_config_group_device *dev,
593 			  struct wa_bb wa_bb[static XE_ENGINE_CLASS_MAX],
594 			  char *data, size_t sz)
595 {
596 	char *p = data;
597 
598 	guard(mutex)(&dev->lock);
599 
600 	for (size_t i = 0; i < ARRAY_SIZE(engine_info); i++) {
601 		enum xe_engine_class ec = engine_info[i].engine_class;
602 		size_t len;
603 
604 		if (!wa_bb[ec].len)
605 			continue;
606 
607 		len = snprintf(p, sz, "%s:", engine_info[i].cls);
608 		if (!wa_bb_read_advance(data, &p, NULL, len, &sz))
609 			return -ENOBUFS;
610 
611 		for (size_t j = 0; j < wa_bb[ec].len; j++) {
612 			len = snprintf(p, sz, " %08x", wa_bb[ec].cs[j]);
613 			if (!wa_bb_read_advance(data, &p, NULL, len, &sz))
614 				return -ENOBUFS;
615 		}
616 
617 		if (!wa_bb_read_advance(data, &p, "\n", 1, &sz))
618 			return -ENOBUFS;
619 	}
620 
621 	if (!wa_bb_read_advance(data, &p, "", 1, &sz))
622 		return -ENOBUFS;
623 
624 	/* Reserve one more to match check for '\0' */
625 	if (!data)
626 		p++;
627 
628 	return p - data;
629 }
630 
ctx_restore_mid_bb_show(struct config_item * item,char * page)631 static ssize_t ctx_restore_mid_bb_show(struct config_item *item, char *page)
632 {
633 	struct xe_config_group_device *dev = to_xe_config_group_device(item);
634 
635 	return wa_bb_show(dev, dev->config.ctx_restore_mid_bb, page, SZ_4K);
636 }
637 
ctx_restore_post_bb_show(struct config_item * item,char * page)638 static ssize_t ctx_restore_post_bb_show(struct config_item *item, char *page)
639 {
640 	struct xe_config_group_device *dev = to_xe_config_group_device(item);
641 
642 	return wa_bb_show(dev, dev->config.ctx_restore_post_bb, page, SZ_4K);
643 }
644 
wa_bb_append(struct wa_bb * wa_bb,u32 val)645 static void wa_bb_append(struct wa_bb *wa_bb, u32 val)
646 {
647 	if (wa_bb->cs)
648 		wa_bb->cs[wa_bb->len] = val;
649 
650 	wa_bb->len++;
651 }
652 
parse_hex(const char * line,u32 * pval)653 static ssize_t parse_hex(const char *line, u32 *pval)
654 {
655 	char numstr[12];
656 	const char *p;
657 	ssize_t numlen;
658 
659 	p = line + strspn(line, " \t");
660 	if (!*p || *p == '\n')
661 		return 0;
662 
663 	numlen = strcspn(p, " \t\n");
664 	if (!numlen || numlen >= sizeof(numstr) - 1)
665 		return -EINVAL;
666 
667 	memcpy(numstr, p, numlen);
668 	numstr[numlen] = '\0';
669 	p += numlen;
670 
671 	if (kstrtou32(numstr, 16, pval))
672 		return -EINVAL;
673 
674 	return p - line;
675 }
676 
677 /*
678  * Parse lines with the format
679  *
680  *	<engine-class> cmd <u32> <u32...>
681  *	<engine-class> reg <u32_addr> <u32_val>
682  *
683  * and optionally save them in @wa_bb[i].cs is non-NULL.
684  *
685  * Return the number of dwords parsed.
686  */
parse_wa_bb_lines(const char * lines,struct wa_bb wa_bb[static XE_ENGINE_CLASS_MAX])687 static ssize_t parse_wa_bb_lines(const char *lines,
688 				 struct wa_bb wa_bb[static XE_ENGINE_CLASS_MAX])
689 {
690 	ssize_t dwords = 0, ret;
691 	const char *p;
692 
693 	for (p = lines; *p; p++) {
694 		const struct engine_info *info = NULL;
695 		u32 val, val2;
696 
697 		/* Also allow empty lines */
698 		p += strspn(p, " \t\n");
699 		if (!*p)
700 			break;
701 
702 		ret = parse_engine(p, " \t\n", NULL, &info);
703 		if (ret < 0)
704 			return ret;
705 
706 		p += ret;
707 		p += strspn(p, " \t");
708 
709 		if (str_has_prefix(p, "cmd")) {
710 			for (p += strlen("cmd"); *p;) {
711 				ret = parse_hex(p, &val);
712 				if (ret < 0)
713 					return -EINVAL;
714 				if (!ret)
715 					break;
716 
717 				p += ret;
718 				dwords++;
719 				wa_bb_append(&wa_bb[info->engine_class], val);
720 			}
721 		} else if (str_has_prefix(p, "reg")) {
722 			p += strlen("reg");
723 			ret = parse_hex(p, &val);
724 			if (ret <= 0)
725 				return -EINVAL;
726 
727 			p += ret;
728 			ret = parse_hex(p, &val2);
729 			if (ret <= 0)
730 				return -EINVAL;
731 
732 			p += ret;
733 			dwords += 3;
734 			wa_bb_append(&wa_bb[info->engine_class],
735 				     MI_LOAD_REGISTER_IMM | MI_LRI_NUM_REGS(1));
736 			wa_bb_append(&wa_bb[info->engine_class], val);
737 			wa_bb_append(&wa_bb[info->engine_class], val2);
738 		} else {
739 			return -EINVAL;
740 		}
741 	}
742 
743 	return dwords;
744 }
745 
wa_bb_store(struct wa_bb wa_bb[static XE_ENGINE_CLASS_MAX],struct xe_config_group_device * dev,const char * page,size_t len)746 static ssize_t wa_bb_store(struct wa_bb wa_bb[static XE_ENGINE_CLASS_MAX],
747 			   struct xe_config_group_device *dev,
748 			   const char *page, size_t len)
749 {
750 	/* tmp_wa_bb must match wa_bb's size */
751 	struct wa_bb tmp_wa_bb[XE_ENGINE_CLASS_MAX] = { };
752 	ssize_t count, class;
753 	u32 *tmp;
754 
755 	/* 1. Count dwords - wa_bb[i].cs is NULL for all classes */
756 	count = parse_wa_bb_lines(page, tmp_wa_bb);
757 	if (count < 0)
758 		return count;
759 
760 	guard(mutex)(&dev->lock);
761 
762 	if (is_bound(dev))
763 		return -EBUSY;
764 
765 	/*
766 	 * 2. Allocate a u32 array and set the pointers to the right positions
767 	 * according to the length of each class' wa_bb
768 	 */
769 	tmp = krealloc(wa_bb[0].cs, count * sizeof(u32), GFP_KERNEL);
770 	if (!tmp)
771 		return -ENOMEM;
772 
773 	if (!count) {
774 		memset(wa_bb, 0, sizeof(tmp_wa_bb));
775 		return len;
776 	}
777 
778 	for (class = 0, count = 0; class < XE_ENGINE_CLASS_MAX; ++class) {
779 		tmp_wa_bb[class].cs = tmp + count;
780 		count += tmp_wa_bb[class].len;
781 		tmp_wa_bb[class].len = 0;
782 	}
783 
784 	/* 3. Parse wa_bb lines again, this time saving the values */
785 	count = parse_wa_bb_lines(page, tmp_wa_bb);
786 	if (count < 0)
787 		return count;
788 
789 	memcpy(wa_bb, tmp_wa_bb, sizeof(tmp_wa_bb));
790 
791 	return len;
792 }
793 
ctx_restore_mid_bb_store(struct config_item * item,const char * data,size_t sz)794 static ssize_t ctx_restore_mid_bb_store(struct config_item *item,
795 					const char *data, size_t sz)
796 {
797 	struct xe_config_group_device *dev = to_xe_config_group_device(item);
798 
799 	return wa_bb_store(dev->config.ctx_restore_mid_bb, dev, data, sz);
800 }
801 
ctx_restore_post_bb_store(struct config_item * item,const char * data,size_t sz)802 static ssize_t ctx_restore_post_bb_store(struct config_item *item,
803 					 const char *data, size_t sz)
804 {
805 	struct xe_config_group_device *dev = to_xe_config_group_device(item);
806 
807 	return wa_bb_store(dev->config.ctx_restore_post_bb, dev, data, sz);
808 }
809 
810 CONFIGFS_ATTR(, ctx_restore_mid_bb);
811 CONFIGFS_ATTR(, ctx_restore_post_bb);
812 CONFIGFS_ATTR(, enable_psmi);
813 CONFIGFS_ATTR(, engines_allowed);
814 CONFIGFS_ATTR(, gt_types_allowed);
815 CONFIGFS_ATTR(, survivability_mode);
816 
817 static struct configfs_attribute *xe_config_device_attrs[] = {
818 	&attr_ctx_restore_mid_bb,
819 	&attr_ctx_restore_post_bb,
820 	&attr_enable_psmi,
821 	&attr_engines_allowed,
822 	&attr_gt_types_allowed,
823 	&attr_survivability_mode,
824 	NULL,
825 };
826 
xe_config_device_release(struct config_item * item)827 static void xe_config_device_release(struct config_item *item)
828 {
829 	struct xe_config_group_device *dev = to_xe_config_group_device(item);
830 
831 	mutex_destroy(&dev->lock);
832 
833 	kfree(dev->config.ctx_restore_post_bb[0].cs);
834 	kfree(dev);
835 }
836 
837 static struct configfs_item_operations xe_config_device_ops = {
838 	.release	= xe_config_device_release,
839 };
840 
xe_config_device_is_visible(struct config_item * item,struct configfs_attribute * attr,int n)841 static bool xe_config_device_is_visible(struct config_item *item,
842 					struct configfs_attribute *attr, int n)
843 {
844 	struct xe_config_group_device *dev = to_xe_config_group_device(item);
845 
846 	if (attr == &attr_survivability_mode) {
847 		if (!dev->desc->is_dgfx || dev->desc->platform < XE_BATTLEMAGE)
848 			return false;
849 	}
850 
851 	return true;
852 }
853 
854 static struct configfs_group_operations xe_config_device_group_ops = {
855 	.is_visible	= xe_config_device_is_visible,
856 };
857 
858 static const struct config_item_type xe_config_device_type = {
859 	.ct_item_ops	= &xe_config_device_ops,
860 	.ct_group_ops	= &xe_config_device_group_ops,
861 	.ct_attrs	= xe_config_device_attrs,
862 	.ct_owner	= THIS_MODULE,
863 };
864 
sriov_max_vfs_show(struct config_item * item,char * page)865 static ssize_t sriov_max_vfs_show(struct config_item *item, char *page)
866 {
867 	struct xe_config_group_device *dev = to_xe_config_group_device(item->ci_parent);
868 
869 	guard(mutex)(&dev->lock);
870 
871 	if (dev->config.sriov.max_vfs == UINT_MAX)
872 		return sprintf(page, "%s\n", "unlimited");
873 	else
874 		return sprintf(page, "%u\n", dev->config.sriov.max_vfs);
875 }
876 
sriov_max_vfs_store(struct config_item * item,const char * page,size_t len)877 static ssize_t sriov_max_vfs_store(struct config_item *item, const char *page, size_t len)
878 {
879 	struct xe_config_group_device *dev = to_xe_config_group_device(item->ci_parent);
880 	unsigned int max_vfs;
881 	int ret;
882 
883 	guard(mutex)(&dev->lock);
884 
885 	if (is_bound(dev))
886 		return -EBUSY;
887 
888 	ret = kstrtouint(page, 0, &max_vfs);
889 	if (ret) {
890 		if (!sysfs_streq(page, "unlimited"))
891 			return ret;
892 		max_vfs = UINT_MAX;
893 	}
894 
895 	dev->config.sriov.max_vfs = max_vfs;
896 	return len;
897 }
898 
899 CONFIGFS_ATTR(sriov_, max_vfs);
900 
901 static struct configfs_attribute *xe_config_sriov_attrs[] = {
902 	&sriov_attr_max_vfs,
903 	NULL,
904 };
905 
xe_config_sriov_is_visible(struct config_item * item,struct configfs_attribute * attr,int n)906 static bool xe_config_sriov_is_visible(struct config_item *item,
907 				       struct configfs_attribute *attr, int n)
908 {
909 	struct xe_config_group_device *dev = to_xe_config_group_device(item->ci_parent);
910 
911 	if (attr == &sriov_attr_max_vfs && dev->mode != XE_SRIOV_MODE_PF)
912 		return false;
913 
914 	return true;
915 }
916 
917 static struct configfs_group_operations xe_config_sriov_group_ops = {
918 	.is_visible	= xe_config_sriov_is_visible,
919 };
920 
921 static const struct config_item_type xe_config_sriov_type = {
922 	.ct_owner	= THIS_MODULE,
923 	.ct_group_ops	= &xe_config_sriov_group_ops,
924 	.ct_attrs	= xe_config_sriov_attrs,
925 };
926 
xe_match_desc(struct pci_dev * pdev)927 static const struct xe_device_desc *xe_match_desc(struct pci_dev *pdev)
928 {
929 	struct device_driver *driver = driver_find("xe", &pci_bus_type);
930 	struct pci_driver *drv = to_pci_driver(driver);
931 	const struct pci_device_id *ids = drv ? drv->id_table : NULL;
932 	const struct pci_device_id *found = pci_match_id(ids, pdev);
933 
934 	return found ? (const void *)found->driver_data : NULL;
935 }
936 
get_physfn_instead(struct pci_dev * virtfn)937 static struct pci_dev *get_physfn_instead(struct pci_dev *virtfn)
938 {
939 	struct pci_dev *physfn = pci_physfn(virtfn);
940 
941 	pci_dev_get(physfn);
942 	pci_dev_put(virtfn);
943 	return physfn;
944 }
945 
xe_config_make_device_group(struct config_group * group,const char * name)946 static struct config_group *xe_config_make_device_group(struct config_group *group,
947 							const char *name)
948 {
949 	unsigned int domain, bus, slot, function;
950 	struct xe_config_group_device *dev;
951 	const struct xe_device_desc *match;
952 	enum xe_sriov_mode mode;
953 	struct pci_dev *pdev;
954 	char canonical[16];
955 	int vfnumber = 0;
956 	int ret;
957 
958 	ret = sscanf(name, "%x:%x:%x.%x", &domain, &bus, &slot, &function);
959 	if (ret != 4)
960 		return ERR_PTR(-EINVAL);
961 
962 	ret = scnprintf(canonical, sizeof(canonical), "%04x:%02x:%02x.%d", domain, bus,
963 			PCI_SLOT(PCI_DEVFN(slot, function)),
964 			PCI_FUNC(PCI_DEVFN(slot, function)));
965 	if (ret != 12 || strcmp(name, canonical))
966 		return ERR_PTR(-EINVAL);
967 
968 	pdev = pci_get_domain_bus_and_slot(domain, bus, PCI_DEVFN(slot, function));
969 	mode = pdev ? dev_is_pf(&pdev->dev) ?
970 		XE_SRIOV_MODE_PF : XE_SRIOV_MODE_NONE : XE_SRIOV_MODE_VF;
971 
972 	if (!pdev && function)
973 		pdev = pci_get_domain_bus_and_slot(domain, bus, PCI_DEVFN(slot, 0));
974 	if (!pdev && slot)
975 		pdev = pci_get_domain_bus_and_slot(domain, bus, PCI_DEVFN(0, 0));
976 	if (!pdev)
977 		return ERR_PTR(-ENODEV);
978 
979 	if (PCI_DEVFN(slot, function) != pdev->devfn) {
980 		pdev = get_physfn_instead(pdev);
981 		vfnumber = PCI_DEVFN(slot, function) - pdev->devfn;
982 		if (!dev_is_pf(&pdev->dev) || vfnumber > pci_sriov_get_totalvfs(pdev)) {
983 			pci_dev_put(pdev);
984 			return ERR_PTR(-ENODEV);
985 		}
986 	}
987 
988 	match = xe_match_desc(pdev);
989 	if (match && vfnumber && !match->has_sriov) {
990 		pci_info(pdev, "xe driver does not support VFs on this device\n");
991 		match = NULL;
992 	} else if (!match) {
993 		pci_info(pdev, "xe driver does not support configuration of this device\n");
994 	}
995 
996 	pci_dev_put(pdev);
997 
998 	if (!match)
999 		return ERR_PTR(-ENOENT);
1000 
1001 	dev = kzalloc(sizeof(*dev), GFP_KERNEL);
1002 	if (!dev)
1003 		return ERR_PTR(-ENOMEM);
1004 
1005 	dev->desc = match;
1006 	dev->mode = match->has_sriov ? mode : XE_SRIOV_MODE_NONE;
1007 
1008 	set_device_defaults(&dev->config);
1009 
1010 	config_group_init_type_name(&dev->group, name, &xe_config_device_type);
1011 	if (dev->mode != XE_SRIOV_MODE_NONE) {
1012 		config_group_init_type_name(&dev->sriov, "sriov", &xe_config_sriov_type);
1013 		configfs_add_default_group(&dev->sriov, &dev->group);
1014 	}
1015 
1016 	mutex_init(&dev->lock);
1017 
1018 	return &dev->group;
1019 }
1020 
1021 static struct configfs_group_operations xe_config_group_ops = {
1022 	.make_group	= xe_config_make_device_group,
1023 };
1024 
1025 static const struct config_item_type xe_configfs_type = {
1026 	.ct_group_ops	= &xe_config_group_ops,
1027 	.ct_owner	= THIS_MODULE,
1028 };
1029 
1030 static struct configfs_subsystem xe_configfs = {
1031 	.su_group = {
1032 		.cg_item = {
1033 			.ci_namebuf = "xe",
1034 			.ci_type = &xe_configfs_type,
1035 		},
1036 	},
1037 };
1038 
find_xe_config_group_device(struct pci_dev * pdev)1039 static struct xe_config_group_device *find_xe_config_group_device(struct pci_dev *pdev)
1040 {
1041 	struct config_item *item;
1042 
1043 	mutex_lock(&xe_configfs.su_mutex);
1044 	item = config_group_find_item(&xe_configfs.su_group, pci_name(pdev));
1045 	mutex_unlock(&xe_configfs.su_mutex);
1046 
1047 	if (!item)
1048 		return NULL;
1049 
1050 	return to_xe_config_group_device(item);
1051 }
1052 
dump_custom_dev_config(struct pci_dev * pdev,struct xe_config_group_device * dev)1053 static void dump_custom_dev_config(struct pci_dev *pdev,
1054 				   struct xe_config_group_device *dev)
1055 {
1056 #define PRI_CUSTOM_ATTR(fmt_, attr_) do { \
1057 		if (dev->config.attr_ != device_defaults.attr_) \
1058 			pci_info(pdev, "configfs: " __stringify(attr_) " = " fmt_ "\n", \
1059 				 dev->config.attr_); \
1060 	} while (0)
1061 
1062 	PRI_CUSTOM_ATTR("%llx", gt_types_allowed);
1063 	PRI_CUSTOM_ATTR("%llx", engines_allowed);
1064 	PRI_CUSTOM_ATTR("%d", enable_psmi);
1065 	PRI_CUSTOM_ATTR("%d", survivability_mode);
1066 
1067 #undef PRI_CUSTOM_ATTR
1068 }
1069 
1070 /**
1071  * xe_configfs_check_device() - Test if device was configured by configfs
1072  * @pdev: the &pci_dev device to test
1073  *
1074  * Try to find the configfs group that belongs to the specified pci device
1075  * and print a diagnostic message if different than the default value.
1076  */
xe_configfs_check_device(struct pci_dev * pdev)1077 void xe_configfs_check_device(struct pci_dev *pdev)
1078 {
1079 	struct xe_config_group_device *dev = find_xe_config_group_device(pdev);
1080 
1081 	if (!dev)
1082 		return;
1083 
1084 	/* memcmp here is safe as both are zero-initialized */
1085 	if (memcmp(&dev->config, &device_defaults, sizeof(dev->config))) {
1086 		pci_info(pdev, "Found custom settings in configfs\n");
1087 		dump_custom_dev_config(pdev, dev);
1088 	}
1089 
1090 	config_group_put(&dev->group);
1091 }
1092 
1093 /**
1094  * xe_configfs_get_survivability_mode - get configfs survivability mode attribute
1095  * @pdev: pci device
1096  *
1097  * Return: survivability_mode attribute in configfs
1098  */
xe_configfs_get_survivability_mode(struct pci_dev * pdev)1099 bool xe_configfs_get_survivability_mode(struct pci_dev *pdev)
1100 {
1101 	struct xe_config_group_device *dev = find_xe_config_group_device(pdev);
1102 	bool mode;
1103 
1104 	if (!dev)
1105 		return device_defaults.survivability_mode;
1106 
1107 	mode = dev->config.survivability_mode;
1108 	config_group_put(&dev->group);
1109 
1110 	return mode;
1111 }
1112 
get_gt_types_allowed(struct pci_dev * pdev)1113 static u64 get_gt_types_allowed(struct pci_dev *pdev)
1114 {
1115 	struct xe_config_group_device *dev = find_xe_config_group_device(pdev);
1116 	u64 mask;
1117 
1118 	if (!dev)
1119 		return device_defaults.gt_types_allowed;
1120 
1121 	mask = dev->config.gt_types_allowed;
1122 	config_group_put(&dev->group);
1123 
1124 	return mask;
1125 }
1126 
1127 /**
1128  * xe_configfs_primary_gt_allowed - determine whether primary GTs are supported
1129  * @pdev: pci device
1130  *
1131  * Return: True if primary GTs are enabled, false if they have been disabled via
1132  *     configfs.
1133  */
xe_configfs_primary_gt_allowed(struct pci_dev * pdev)1134 bool xe_configfs_primary_gt_allowed(struct pci_dev *pdev)
1135 {
1136 	return get_gt_types_allowed(pdev) & BIT_ULL(XE_GT_TYPE_MAIN);
1137 }
1138 
1139 /**
1140  * xe_configfs_media_gt_allowed - determine whether media GTs are supported
1141  * @pdev: pci device
1142  *
1143  * Return: True if the media GTs are enabled, false if they have been disabled
1144  *     via configfs.
1145  */
xe_configfs_media_gt_allowed(struct pci_dev * pdev)1146 bool xe_configfs_media_gt_allowed(struct pci_dev *pdev)
1147 {
1148 	return get_gt_types_allowed(pdev) & BIT_ULL(XE_GT_TYPE_MEDIA);
1149 }
1150 
1151 /**
1152  * xe_configfs_get_engines_allowed - get engine allowed mask from configfs
1153  * @pdev: pci device
1154  *
1155  * Return: engine mask with allowed engines set in configfs
1156  */
xe_configfs_get_engines_allowed(struct pci_dev * pdev)1157 u64 xe_configfs_get_engines_allowed(struct pci_dev *pdev)
1158 {
1159 	struct xe_config_group_device *dev = find_xe_config_group_device(pdev);
1160 	u64 engines_allowed;
1161 
1162 	if (!dev)
1163 		return device_defaults.engines_allowed;
1164 
1165 	engines_allowed = dev->config.engines_allowed;
1166 	config_group_put(&dev->group);
1167 
1168 	return engines_allowed;
1169 }
1170 
1171 /**
1172  * xe_configfs_get_psmi_enabled - get configfs enable_psmi setting
1173  * @pdev: pci device
1174  *
1175  * Return: enable_psmi setting in configfs
1176  */
xe_configfs_get_psmi_enabled(struct pci_dev * pdev)1177 bool xe_configfs_get_psmi_enabled(struct pci_dev *pdev)
1178 {
1179 	struct xe_config_group_device *dev = find_xe_config_group_device(pdev);
1180 	bool ret;
1181 
1182 	if (!dev)
1183 		return false;
1184 
1185 	ret = dev->config.enable_psmi;
1186 	config_group_put(&dev->group);
1187 
1188 	return ret;
1189 }
1190 
1191 /**
1192  * xe_configfs_get_ctx_restore_mid_bb - get configfs ctx_restore_mid_bb setting
1193  * @pdev: pci device
1194  * @class: hw engine class
1195  * @cs: pointer to the bb to use - only valid during probe
1196  *
1197  * Return: Number of dwords used in the mid_ctx_restore setting in configfs
1198  */
xe_configfs_get_ctx_restore_mid_bb(struct pci_dev * pdev,enum xe_engine_class class,const u32 ** cs)1199 u32 xe_configfs_get_ctx_restore_mid_bb(struct pci_dev *pdev,
1200 				       enum xe_engine_class class,
1201 				       const u32 **cs)
1202 {
1203 	struct xe_config_group_device *dev = find_xe_config_group_device(pdev);
1204 	u32 len;
1205 
1206 	if (!dev)
1207 		return 0;
1208 
1209 	if (cs)
1210 		*cs = dev->config.ctx_restore_mid_bb[class].cs;
1211 
1212 	len = dev->config.ctx_restore_mid_bb[class].len;
1213 	config_group_put(&dev->group);
1214 
1215 	return len;
1216 }
1217 
1218 /**
1219  * xe_configfs_get_ctx_restore_post_bb - get configfs ctx_restore_post_bb setting
1220  * @pdev: pci device
1221  * @class: hw engine class
1222  * @cs: pointer to the bb to use - only valid during probe
1223  *
1224  * Return: Number of dwords used in the post_ctx_restore setting in configfs
1225  */
xe_configfs_get_ctx_restore_post_bb(struct pci_dev * pdev,enum xe_engine_class class,const u32 ** cs)1226 u32 xe_configfs_get_ctx_restore_post_bb(struct pci_dev *pdev,
1227 					enum xe_engine_class class,
1228 					const u32 **cs)
1229 {
1230 	struct xe_config_group_device *dev = find_xe_config_group_device(pdev);
1231 	u32 len;
1232 
1233 	if (!dev)
1234 		return 0;
1235 
1236 	*cs = dev->config.ctx_restore_post_bb[class].cs;
1237 	len = dev->config.ctx_restore_post_bb[class].len;
1238 	config_group_put(&dev->group);
1239 
1240 	return len;
1241 }
1242 
1243 #ifdef CONFIG_PCI_IOV
1244 /**
1245  * xe_configfs_get_max_vfs() - Get number of VFs that could be managed
1246  * @pdev: the &pci_dev device
1247  *
1248  * Find the configfs group that belongs to the PCI device and return maximum
1249  * number of Virtual Functions (VFs) that could be managed by this device.
1250  * If configfs group is not present, use value of max_vfs module parameter.
1251  *
1252  * Return: maximum number of VFs that could be managed.
1253  */
xe_configfs_get_max_vfs(struct pci_dev * pdev)1254 unsigned int xe_configfs_get_max_vfs(struct pci_dev *pdev)
1255 {
1256 	struct xe_config_group_device *dev = find_xe_config_group_device(pdev);
1257 	unsigned int max_vfs;
1258 
1259 	if (!dev)
1260 		return xe_modparam.max_vfs;
1261 
1262 	scoped_guard(mutex, &dev->lock)
1263 		max_vfs = dev->config.sriov.max_vfs;
1264 
1265 	config_group_put(&dev->group);
1266 
1267 	return max_vfs;
1268 }
1269 #endif
1270 
xe_configfs_init(void)1271 int __init xe_configfs_init(void)
1272 {
1273 	int ret;
1274 
1275 	config_group_init(&xe_configfs.su_group);
1276 	mutex_init(&xe_configfs.su_mutex);
1277 	ret = configfs_register_subsystem(&xe_configfs);
1278 	if (ret) {
1279 		mutex_destroy(&xe_configfs.su_mutex);
1280 		return ret;
1281 	}
1282 
1283 	return 0;
1284 }
1285 
xe_configfs_exit(void)1286 void xe_configfs_exit(void)
1287 {
1288 	configfs_unregister_subsystem(&xe_configfs);
1289 	mutex_destroy(&xe_configfs.su_mutex);
1290 }
1291