xref: /linux/drivers/gpu/drm/xe/xe_configfs.c (revision 9e4e86a604dfd06402933467578c4b79f5412b2c)
1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright © 2025 Intel Corporation
4  */
5 
6 #include <linux/bitops.h>
7 #include <linux/ctype.h>
8 #include <linux/configfs.h>
9 #include <linux/cleanup.h>
10 #include <linux/find.h>
11 #include <linux/init.h>
12 #include <linux/module.h>
13 #include <linux/pci.h>
14 #include <linux/string.h>
15 
16 #include "instructions/xe_mi_commands.h"
17 #include "xe_configfs.h"
18 #include "xe_defaults.h"
19 #include "xe_gt_types.h"
20 #include "xe_hw_engine_types.h"
21 #include "xe_module.h"
22 #include "xe_pci_types.h"
23 #include "xe_sriov_types.h"
24 
25 /**
26  * DOC: Xe Configfs
27  *
28  * Overview
29  * ========
30  *
31  * Configfs is a filesystem-based manager of kernel objects. Xe KMD registers a
32  * configfs subsystem called ``xe`` that creates a directory in the mounted
33  * configfs directory. The user can create devices under this directory and
34  * configure them as necessary. See Documentation/filesystems/configfs.rst for
35  * more information about how configfs works.
36  *
37  * Create devices
38  * ==============
39  *
40  * To create a device, the ``xe`` module should already be loaded, but some
41  * attributes can only be set before binding the device. It can be accomplished
42  * by blocking the driver autoprobe::
43  *
44  *	# echo 0 > /sys/bus/pci/drivers_autoprobe
45  *	# modprobe xe
46  *
47  * In order to create a device, the user has to create a directory inside ``xe``::
48  *
49  *	# mkdir /sys/kernel/config/xe/0000:03:00.0/
50  *
51  * Every device created is populated by the driver with entries that can be
52  * used to configure it::
53  *
54  *	/sys/kernel/config/xe/
55  *	├── 0000:00:02.0
56  *	│   └── ...
57  *	├── 0000:00:02.1
58  *	│   └── ...
59  *	:
60  *	└── 0000:03:00.0
61  *	    ├── survivability_mode
62  *	    ├── gt_types_allowed
63  *	    ├── engines_allowed
64  *	    └── enable_psmi
65  *
66  * After configuring the attributes as per next section, the device can be
67  * probed with::
68  *
69  *	# echo 0000:03:00.0 > /sys/bus/pci/drivers/xe/bind
70  *	# # or
71  *	# echo 0000:03:00.0 > /sys/bus/pci/drivers_probe
72  *
73  * Configure Attributes
74  * ====================
75  *
76  * Survivability mode:
77  * -------------------
78  *
79  * Enable survivability mode on supported cards. This setting only takes
80  * effect when probing the device. Example to enable it::
81  *
82  *	# echo 1 > /sys/kernel/config/xe/0000:03:00.0/survivability_mode
83  *
84  * This attribute can only be set before binding to the device.
85  *
86  * Allowed GT types:
87  * -----------------
88  *
89  * Allow only specific types of GTs to be detected and initialized by the
90  * driver.  Any combination of GT types can be enabled/disabled, although
91  * some settings will cause the device to fail to probe.
92  *
93  * Writes support both comma- and newline-separated input format. Reads
94  * will always return one GT type per line. "primary" and "media" are the
95  * GT type names supported by this interface.
96  *
97  * This attribute can only be set before binding to the device.
98  *
99  * Examples:
100  *
101  * Allow both primary and media GTs to be initialized and used.  This matches
102  * the driver's default behavior::
103  *
104  *	# echo 'primary,media' > /sys/kernel/config/xe/0000:03:00.0/gt_types_allowed
105  *
106  * Allow only the primary GT of each tile to be initialized and used,
107  * effectively disabling the media GT if it exists on the platform::
108  *
109  *	# echo 'primary' > /sys/kernel/config/xe/0000:03:00.0/gt_types_allowed
110  *
111  * Allow only the media GT of each tile to be initialized and used,
112  * effectively disabling the primary GT.  **This configuration will cause
113  * device probe failure on all current platforms, but may be allowed on
114  * igpu platforms in the future**::
115  *
116  *	# echo 'media' > /sys/kernel/config/xe/0000:03:00.0/gt_types_allowed
117  *
118  * Disable all GTs.  Only other GPU IP (such as display) is potentially usable.
119  * **This configuration will cause device probe failure on all current
120  * platforms, but may be allowed on igpu platforms in the future**::
121  *
122  *	# echo '' > /sys/kernel/config/xe/0000:03:00.0/gt_types_allowed
123  *
124  * Allowed engines:
125  * ----------------
126  *
127  * Allow only a set of engine(s) to be available, disabling the other engines
128  * even if they are available in hardware. This is applied after HW fuses are
129  * considered on each tile. Examples:
130  *
131  * Allow only one render and one copy engines, nothing else::
132  *
133  *	# echo 'rcs0,bcs0' > /sys/kernel/config/xe/0000:03:00.0/engines_allowed
134  *
135  * Allow only compute engines and first copy engine::
136  *
137  *	# echo 'ccs*,bcs0' > /sys/kernel/config/xe/0000:03:00.0/engines_allowed
138  *
139  * Note that the engine names are the per-GT hardware names. On multi-tile
140  * platforms, writing ``rcs0,bcs0`` to this file would allow the first render
141  * and copy engines on each tile.
142  *
143  * The requested configuration may not be supported by the platform and driver
144  * may fail to probe. For example: if at least one copy engine is expected to be
145  * available for migrations, but it's disabled. This is intended for debugging
146  * purposes only.
147  *
148  * This attribute can only be set before binding to the device.
149  *
150  * PSMI
151  * ----
152  *
153  * Enable extra debugging capabilities to trace engine execution. Only useful
154  * during early platform enabling and requires additional hardware connected.
155  * Once it's enabled, additionals WAs are added and runtime configuration is
156  * done via debugfs. Example to enable it::
157  *
158  *	# echo 1 > /sys/kernel/config/xe/0000:03:00.0/enable_psmi
159  *
160  * This attribute can only be set before binding to the device.
161  *
162  * Context restore BB
163  * ------------------
164  *
165  * Allow to execute a batch buffer during any context switches. When the
166  * GPU is restoring the context, it executes additional commands. It's useful
167  * for testing additional workarounds and validating certain HW behaviors: it's
168  * not intended for normal execution and will taint the kernel with TAINT_TEST
169  * when used.
170  *
171  * The syntax allows to pass straight instructions to be executed by the engine
172  * in a batch buffer or set specific registers.
173  *
174  * #. Generic instruction::
175  *
176  *	<engine-class> cmd <instr> [[dword0] [dword1] [...]]
177  *
178  * #. Simple register setting::
179  *
180  *	<engine-class> reg <address> <value>
181  *
182  * Commands are saved per engine class: all instances of that class will execute
183  * those commands during context switch. The instruction, dword arguments,
184  * addresses and values are in hex format like in the examples below.
185  *
186  * #. Execute a LRI command to write 0xDEADBEEF to register 0x4f10 after the
187  *    normal context restore::
188  *
189  *	# echo 'rcs cmd 11000001 4F100 DEADBEEF' \
190  *		> /sys/kernel/config/xe/0000:03:00.0/ctx_restore_post_bb
191  *
192  * #. Execute a LRI command to write 0xDEADBEEF to register 0x4f10 at the
193  *    beginning of the context restore::
194  *
195  *	# echo 'rcs cmd 11000001 4F100 DEADBEEF' \
196  *		> /sys/kernel/config/xe/0000:03:00.0/ctx_restore_mid_bb
197 
198  * #. Load certain values in a couple of registers (it can be used as a simpler
199  *    alternative to the `cmd`) action::
200  *
201  *	# cat > /sys/kernel/config/xe/0000:03:00.0/ctx_restore_post_bb <<EOF
202  *	rcs reg 4F100 DEADBEEF
203  *	rcs reg 4F104 FFFFFFFF
204  *	EOF
205  *
206  *    .. note::
207  *
208  *       When using multiple lines, make sure to use a command that is
209  *       implemented with a single write syscall, like HEREDOC.
210  *
211  * Currently this is implemented only for post and mid context restore and
212  * these attributes can only be set before binding to the device.
213  *
214  * Max SR-IOV Virtual Functions
215  * ----------------------------
216  *
217  * This config allows to limit number of the Virtual Functions (VFs) that can
218  * be managed by the Physical Function (PF) driver, where value 0 disables the
219  * PF mode (no VFs).
220  *
221  * The default max_vfs config value is taken from the max_vfs modparam.
222  *
223  * How to enable PF with support with unlimited (up to HW limit) number of VFs::
224  *
225  *	# echo unlimited > /sys/kernel/config/xe/0000:00:02.0/sriov/max_vfs
226  *	# echo 0000:00:02.0 > /sys/bus/pci/drivers/xe/bind
227  *
228  * How to enable PF with support up to 3 VFs::
229  *
230  *	# echo 3 > /sys/kernel/config/xe/0000:00:02.0/sriov/max_vfs
231  *	# echo 0000:00:02.0 > /sys/bus/pci/drivers/xe/bind
232  *
233  * How to disable PF mode and always run as native::
234  *
235  *	# echo 0 > /sys/kernel/config/xe/0000:00:02.0/sriov/max_vfs
236  *	# echo 0000:00:02.0 > /sys/bus/pci/drivers/xe/bind
237  *
238  * This setting only takes effect when probing the device.
239  *
240  * Remove devices
241  * ==============
242  *
243  * The created device directories can be removed using ``rmdir``::
244  *
245  *	# rmdir /sys/kernel/config/xe/0000:03:00.0/
246  */
247 
248 /* Similar to struct xe_bb, but not tied to HW (yet) */
249 struct wa_bb {
250 	u32 *cs;
251 	u32 len; /* in dwords */
252 };
253 
254 struct xe_config_group_device {
255 	struct config_group group;
256 	struct config_group sriov;
257 
258 	struct xe_config_device {
259 		u64 gt_types_allowed;
260 		u64 engines_allowed;
261 		struct wa_bb ctx_restore_post_bb[XE_ENGINE_CLASS_MAX];
262 		struct wa_bb ctx_restore_mid_bb[XE_ENGINE_CLASS_MAX];
263 		bool survivability_mode;
264 		bool enable_psmi;
265 		struct {
266 			unsigned int max_vfs;
267 			bool admin_only_pf;
268 		} sriov;
269 	} config;
270 
271 	/* protects attributes */
272 	struct mutex lock;
273 	/* matching descriptor */
274 	const struct xe_device_desc *desc;
275 	/* tentative SR-IOV mode */
276 	enum xe_sriov_mode mode;
277 };
278 
279 static const struct xe_config_device device_defaults = {
280 	.gt_types_allowed = U64_MAX,
281 	.engines_allowed = U64_MAX,
282 	.survivability_mode = false,
283 	.enable_psmi = false,
284 	.sriov = {
285 		.max_vfs = XE_DEFAULT_MAX_VFS,
286 		.admin_only_pf = XE_DEFAULT_ADMIN_ONLY_PF,
287 	},
288 };
289 
set_device_defaults(struct xe_config_device * config)290 static void set_device_defaults(struct xe_config_device *config)
291 {
292 	*config = device_defaults;
293 #ifdef CONFIG_PCI_IOV
294 	config->sriov.max_vfs = xe_modparam.max_vfs;
295 #endif
296 }
297 
298 struct engine_info {
299 	const char *cls;
300 	u64 mask;
301 	enum xe_engine_class engine_class;
302 };
303 
304 /* Some helpful macros to aid on the sizing of buffer allocation when parsing */
305 #define MAX_ENGINE_CLASS_CHARS 5
306 #define MAX_ENGINE_INSTANCE_CHARS 2
307 
308 static const struct engine_info engine_info[] = {
309 	{ .cls = "rcs", .mask = XE_HW_ENGINE_RCS_MASK, .engine_class = XE_ENGINE_CLASS_RENDER },
310 	{ .cls = "bcs", .mask = XE_HW_ENGINE_BCS_MASK, .engine_class = XE_ENGINE_CLASS_COPY },
311 	{ .cls = "vcs", .mask = XE_HW_ENGINE_VCS_MASK, .engine_class = XE_ENGINE_CLASS_VIDEO_DECODE },
312 	{ .cls = "vecs", .mask = XE_HW_ENGINE_VECS_MASK, .engine_class = XE_ENGINE_CLASS_VIDEO_ENHANCE },
313 	{ .cls = "ccs", .mask = XE_HW_ENGINE_CCS_MASK, .engine_class = XE_ENGINE_CLASS_COMPUTE },
314 	{ .cls = "gsccs", .mask = XE_HW_ENGINE_GSCCS_MASK, .engine_class = XE_ENGINE_CLASS_OTHER },
315 };
316 
317 static const struct {
318 	const char *name;
319 	enum xe_gt_type type;
320 } gt_types[] = {
321 	{ .name = "primary", .type = XE_GT_TYPE_MAIN },
322 	{ .name = "media", .type = XE_GT_TYPE_MEDIA },
323 };
324 
to_xe_config_group_device(struct config_item * item)325 static struct xe_config_group_device *to_xe_config_group_device(struct config_item *item)
326 {
327 	return container_of(to_config_group(item), struct xe_config_group_device, group);
328 }
329 
to_xe_config_device(struct config_item * item)330 static struct xe_config_device *to_xe_config_device(struct config_item *item)
331 {
332 	return &to_xe_config_group_device(item)->config;
333 }
334 
is_bound(struct xe_config_group_device * dev)335 static bool is_bound(struct xe_config_group_device *dev)
336 {
337 	unsigned int domain, bus, slot, function;
338 	struct pci_dev *pdev;
339 	const char *name;
340 	bool ret;
341 
342 	lockdep_assert_held(&dev->lock);
343 
344 	name = dev->group.cg_item.ci_name;
345 	if (sscanf(name, "%x:%x:%x.%x", &domain, &bus, &slot, &function) != 4)
346 		return false;
347 
348 	pdev = pci_get_domain_bus_and_slot(domain, bus, PCI_DEVFN(slot, function));
349 	if (!pdev)
350 		return false;
351 
352 	ret = pci_get_drvdata(pdev);
353 	if (ret)
354 		pci_dbg(pdev, "Already bound to driver\n");
355 
356 	pci_dev_put(pdev);
357 	return ret;
358 }
359 
survivability_mode_show(struct config_item * item,char * page)360 static ssize_t survivability_mode_show(struct config_item *item, char *page)
361 {
362 	struct xe_config_device *dev = to_xe_config_device(item);
363 
364 	return sprintf(page, "%d\n", dev->survivability_mode);
365 }
366 
survivability_mode_store(struct config_item * item,const char * page,size_t len)367 static ssize_t survivability_mode_store(struct config_item *item, const char *page, size_t len)
368 {
369 	struct xe_config_group_device *dev = to_xe_config_group_device(item);
370 	bool survivability_mode;
371 	int ret;
372 
373 	ret = kstrtobool(page, &survivability_mode);
374 	if (ret)
375 		return ret;
376 
377 	guard(mutex)(&dev->lock);
378 	if (is_bound(dev))
379 		return -EBUSY;
380 
381 	dev->config.survivability_mode = survivability_mode;
382 
383 	return len;
384 }
385 
gt_types_allowed_show(struct config_item * item,char * page)386 static ssize_t gt_types_allowed_show(struct config_item *item, char *page)
387 {
388 	struct xe_config_device *dev = to_xe_config_device(item);
389 	char *p = page;
390 
391 	for (size_t i = 0; i < ARRAY_SIZE(gt_types); i++)
392 		if (dev->gt_types_allowed & BIT_ULL(gt_types[i].type))
393 			p += sprintf(p, "%s\n", gt_types[i].name);
394 
395 	return p - page;
396 }
397 
gt_types_allowed_store(struct config_item * item,const char * page,size_t len)398 static ssize_t gt_types_allowed_store(struct config_item *item, const char *page,
399 				      size_t len)
400 {
401 	struct xe_config_group_device *dev = to_xe_config_group_device(item);
402 	char *buf __free(kfree) = kstrdup(page, GFP_KERNEL);
403 	char *p = buf;
404 	u64 typemask = 0;
405 
406 	if (!buf)
407 		return -ENOMEM;
408 
409 	while (p) {
410 		char *typename = strsep(&p, ",\n");
411 		bool matched = false;
412 
413 		if (typename[0] == '\0')
414 			continue;
415 
416 		for (size_t i = 0; i < ARRAY_SIZE(gt_types); i++) {
417 			if (strcmp(typename, gt_types[i].name) == 0) {
418 				typemask |= BIT(gt_types[i].type);
419 				matched = true;
420 				break;
421 			}
422 		}
423 
424 		if (!matched)
425 			return -EINVAL;
426 	}
427 
428 	guard(mutex)(&dev->lock);
429 	if (is_bound(dev))
430 		return -EBUSY;
431 
432 	dev->config.gt_types_allowed = typemask;
433 
434 	return len;
435 }
436 
engines_allowed_show(struct config_item * item,char * page)437 static ssize_t engines_allowed_show(struct config_item *item, char *page)
438 {
439 	struct xe_config_device *dev = to_xe_config_device(item);
440 	char *p = page;
441 
442 	for (size_t i = 0; i < ARRAY_SIZE(engine_info); i++) {
443 		u64 mask = engine_info[i].mask;
444 
445 		if ((dev->engines_allowed & mask) == mask) {
446 			p += sprintf(p, "%s*\n", engine_info[i].cls);
447 		} else if (mask & dev->engines_allowed) {
448 			u16 bit0 = __ffs64(mask), bit;
449 
450 			mask &= dev->engines_allowed;
451 
452 			for_each_set_bit(bit, (const unsigned long *)&mask, 64)
453 				p += sprintf(p, "%s%u\n", engine_info[i].cls,
454 					     bit - bit0);
455 		}
456 	}
457 
458 	return p - page;
459 }
460 
461 /*
462  * Lookup engine_info. If @mask is not NULL, reduce the mask according to the
463  * instance in @pattern.
464  *
465  * Examples of inputs:
466  * - lookup_engine_info("rcs0", &mask): return "rcs" entry from @engine_info and
467  *   mask == BIT_ULL(XE_HW_ENGINE_RCS0)
468  * - lookup_engine_info("rcs*", &mask): return "rcs" entry from @engine_info and
469  *   mask == XE_HW_ENGINE_RCS_MASK
470  * - lookup_engine_info("rcs", NULL): return "rcs" entry from @engine_info
471  */
lookup_engine_info(const char * pattern,u64 * mask)472 static const struct engine_info *lookup_engine_info(const char *pattern, u64 *mask)
473 {
474 	for (size_t i = 0; i < ARRAY_SIZE(engine_info); i++) {
475 		u8 instance;
476 		u16 bit;
477 
478 		if (!str_has_prefix(pattern, engine_info[i].cls))
479 			continue;
480 
481 		pattern += strlen(engine_info[i].cls);
482 		if (!mask)
483 			return *pattern ? NULL : &engine_info[i];
484 
485 		if (!strcmp(pattern, "*")) {
486 			*mask = engine_info[i].mask;
487 			return &engine_info[i];
488 		}
489 
490 		if (kstrtou8(pattern, 10, &instance))
491 			return NULL;
492 
493 		bit = __ffs64(engine_info[i].mask) + instance;
494 		if (bit >= fls64(engine_info[i].mask))
495 			return NULL;
496 
497 		*mask = BIT_ULL(bit);
498 		return &engine_info[i];
499 	}
500 
501 	return NULL;
502 }
503 
parse_engine(const char * s,const char * end_chars,u64 * mask,const struct engine_info ** pinfo)504 static int parse_engine(const char *s, const char *end_chars, u64 *mask,
505 			const struct engine_info **pinfo)
506 {
507 	char buf[MAX_ENGINE_CLASS_CHARS + MAX_ENGINE_INSTANCE_CHARS + 1];
508 	const struct engine_info *info;
509 	size_t len;
510 
511 	len = strcspn(s, end_chars);
512 	if (len >= sizeof(buf))
513 		return -EINVAL;
514 
515 	memcpy(buf, s, len);
516 	buf[len] = '\0';
517 
518 	info = lookup_engine_info(buf, mask);
519 	if (!info)
520 		return -ENOENT;
521 
522 	if (pinfo)
523 		*pinfo = info;
524 
525 	return len;
526 }
527 
engines_allowed_store(struct config_item * item,const char * page,size_t len)528 static ssize_t engines_allowed_store(struct config_item *item, const char *page,
529 				     size_t len)
530 {
531 	struct xe_config_group_device *dev = to_xe_config_group_device(item);
532 	ssize_t patternlen, p;
533 	u64 mask, val = 0;
534 
535 	for (p = 0; p < len; p += patternlen + 1) {
536 		patternlen = parse_engine(page + p, ",\n", &mask, NULL);
537 		if (patternlen < 0)
538 			return -EINVAL;
539 
540 		val |= mask;
541 	}
542 
543 	guard(mutex)(&dev->lock);
544 	if (is_bound(dev))
545 		return -EBUSY;
546 
547 	dev->config.engines_allowed = val;
548 
549 	return len;
550 }
551 
enable_psmi_show(struct config_item * item,char * page)552 static ssize_t enable_psmi_show(struct config_item *item, char *page)
553 {
554 	struct xe_config_device *dev = to_xe_config_device(item);
555 
556 	return sprintf(page, "%d\n", dev->enable_psmi);
557 }
558 
enable_psmi_store(struct config_item * item,const char * page,size_t len)559 static ssize_t enable_psmi_store(struct config_item *item, const char *page, size_t len)
560 {
561 	struct xe_config_group_device *dev = to_xe_config_group_device(item);
562 	bool val;
563 	int ret;
564 
565 	ret = kstrtobool(page, &val);
566 	if (ret)
567 		return ret;
568 
569 	guard(mutex)(&dev->lock);
570 	if (is_bound(dev))
571 		return -EBUSY;
572 
573 	dev->config.enable_psmi = val;
574 
575 	return len;
576 }
577 
wa_bb_read_advance(bool dereference,char ** p,const char * append,size_t len,size_t * max_size)578 static bool wa_bb_read_advance(bool dereference, char **p,
579 			       const char *append, size_t len,
580 			       size_t *max_size)
581 {
582 	if (dereference) {
583 		if (len >= *max_size)
584 			return false;
585 		*max_size -= len;
586 		if (append)
587 			memcpy(*p, append, len);
588 	}
589 
590 	*p += len;
591 
592 	return true;
593 }
594 
wa_bb_show(struct xe_config_group_device * dev,struct wa_bb wa_bb[static XE_ENGINE_CLASS_MAX],char * data,size_t sz)595 static ssize_t wa_bb_show(struct xe_config_group_device *dev,
596 			  struct wa_bb wa_bb[static XE_ENGINE_CLASS_MAX],
597 			  char *data, size_t sz)
598 {
599 	char *p = data;
600 
601 	guard(mutex)(&dev->lock);
602 
603 	for (size_t i = 0; i < ARRAY_SIZE(engine_info); i++) {
604 		enum xe_engine_class ec = engine_info[i].engine_class;
605 		size_t len;
606 
607 		if (!wa_bb[ec].len)
608 			continue;
609 
610 		len = snprintf(p, sz, "%s:", engine_info[i].cls);
611 		if (!wa_bb_read_advance(data, &p, NULL, len, &sz))
612 			return -ENOBUFS;
613 
614 		for (size_t j = 0; j < wa_bb[ec].len; j++) {
615 			len = snprintf(p, sz, " %08x", wa_bb[ec].cs[j]);
616 			if (!wa_bb_read_advance(data, &p, NULL, len, &sz))
617 				return -ENOBUFS;
618 		}
619 
620 		if (!wa_bb_read_advance(data, &p, "\n", 1, &sz))
621 			return -ENOBUFS;
622 	}
623 
624 	if (!wa_bb_read_advance(data, &p, "", 1, &sz))
625 		return -ENOBUFS;
626 
627 	/* Reserve one more to match check for '\0' */
628 	if (!data)
629 		p++;
630 
631 	return p - data;
632 }
633 
ctx_restore_mid_bb_show(struct config_item * item,char * page)634 static ssize_t ctx_restore_mid_bb_show(struct config_item *item, char *page)
635 {
636 	struct xe_config_group_device *dev = to_xe_config_group_device(item);
637 
638 	return wa_bb_show(dev, dev->config.ctx_restore_mid_bb, page, SZ_4K);
639 }
640 
ctx_restore_post_bb_show(struct config_item * item,char * page)641 static ssize_t ctx_restore_post_bb_show(struct config_item *item, char *page)
642 {
643 	struct xe_config_group_device *dev = to_xe_config_group_device(item);
644 
645 	return wa_bb_show(dev, dev->config.ctx_restore_post_bb, page, SZ_4K);
646 }
647 
wa_bb_append(struct wa_bb * wa_bb,u32 val)648 static void wa_bb_append(struct wa_bb *wa_bb, u32 val)
649 {
650 	if (wa_bb->cs)
651 		wa_bb->cs[wa_bb->len] = val;
652 
653 	wa_bb->len++;
654 }
655 
parse_hex(const char * line,u32 * pval)656 static ssize_t parse_hex(const char *line, u32 *pval)
657 {
658 	char numstr[12];
659 	const char *p;
660 	ssize_t numlen;
661 
662 	p = line + strspn(line, " \t");
663 	if (!*p || *p == '\n')
664 		return 0;
665 
666 	numlen = strcspn(p, " \t\n");
667 	if (!numlen || numlen >= sizeof(numstr) - 1)
668 		return -EINVAL;
669 
670 	memcpy(numstr, p, numlen);
671 	numstr[numlen] = '\0';
672 	p += numlen;
673 
674 	if (kstrtou32(numstr, 16, pval))
675 		return -EINVAL;
676 
677 	return p - line;
678 }
679 
680 /*
681  * Parse lines with the format
682  *
683  *	<engine-class> cmd <u32> <u32...>
684  *	<engine-class> reg <u32_addr> <u32_val>
685  *
686  * and optionally save them in @wa_bb[i].cs is non-NULL.
687  *
688  * Return the number of dwords parsed.
689  */
parse_wa_bb_lines(const char * lines,struct wa_bb wa_bb[static XE_ENGINE_CLASS_MAX])690 static ssize_t parse_wa_bb_lines(const char *lines,
691 				 struct wa_bb wa_bb[static XE_ENGINE_CLASS_MAX])
692 {
693 	ssize_t dwords = 0, ret;
694 	const char *p;
695 
696 	for (p = lines; *p; p++) {
697 		const struct engine_info *info = NULL;
698 		u32 val, val2;
699 
700 		/* Also allow empty lines */
701 		p += strspn(p, " \t\n");
702 		if (!*p)
703 			break;
704 
705 		ret = parse_engine(p, " \t\n", NULL, &info);
706 		if (ret < 0)
707 			return ret;
708 
709 		p += ret;
710 		p += strspn(p, " \t");
711 
712 		if (str_has_prefix(p, "cmd")) {
713 			for (p += strlen("cmd"); *p;) {
714 				ret = parse_hex(p, &val);
715 				if (ret < 0)
716 					return -EINVAL;
717 				if (!ret)
718 					break;
719 
720 				p += ret;
721 				dwords++;
722 				wa_bb_append(&wa_bb[info->engine_class], val);
723 			}
724 		} else if (str_has_prefix(p, "reg")) {
725 			p += strlen("reg");
726 			ret = parse_hex(p, &val);
727 			if (ret <= 0)
728 				return -EINVAL;
729 
730 			p += ret;
731 			ret = parse_hex(p, &val2);
732 			if (ret <= 0)
733 				return -EINVAL;
734 
735 			p += ret;
736 			dwords += 3;
737 			wa_bb_append(&wa_bb[info->engine_class],
738 				     MI_LOAD_REGISTER_IMM | MI_LRI_NUM_REGS(1));
739 			wa_bb_append(&wa_bb[info->engine_class], val);
740 			wa_bb_append(&wa_bb[info->engine_class], val2);
741 		} else {
742 			return -EINVAL;
743 		}
744 	}
745 
746 	return dwords;
747 }
748 
wa_bb_store(struct wa_bb wa_bb[static XE_ENGINE_CLASS_MAX],struct xe_config_group_device * dev,const char * page,size_t len)749 static ssize_t wa_bb_store(struct wa_bb wa_bb[static XE_ENGINE_CLASS_MAX],
750 			   struct xe_config_group_device *dev,
751 			   const char *page, size_t len)
752 {
753 	/* tmp_wa_bb must match wa_bb's size */
754 	struct wa_bb tmp_wa_bb[XE_ENGINE_CLASS_MAX] = { };
755 	ssize_t count, class;
756 	u32 *tmp;
757 
758 	/* 1. Count dwords - wa_bb[i].cs is NULL for all classes */
759 	count = parse_wa_bb_lines(page, tmp_wa_bb);
760 	if (count < 0)
761 		return count;
762 
763 	guard(mutex)(&dev->lock);
764 
765 	if (is_bound(dev))
766 		return -EBUSY;
767 
768 	/*
769 	 * 2. Allocate a u32 array and set the pointers to the right positions
770 	 * according to the length of each class' wa_bb
771 	 */
772 	tmp = krealloc(wa_bb[0].cs, count * sizeof(u32), GFP_KERNEL);
773 	if (!tmp)
774 		return -ENOMEM;
775 
776 	if (!count) {
777 		memset(wa_bb, 0, sizeof(tmp_wa_bb));
778 		return len;
779 	}
780 
781 	for (class = 0, count = 0; class < XE_ENGINE_CLASS_MAX; ++class) {
782 		tmp_wa_bb[class].cs = tmp + count;
783 		count += tmp_wa_bb[class].len;
784 		tmp_wa_bb[class].len = 0;
785 	}
786 
787 	/* 3. Parse wa_bb lines again, this time saving the values */
788 	count = parse_wa_bb_lines(page, tmp_wa_bb);
789 	if (count < 0)
790 		return count;
791 
792 	memcpy(wa_bb, tmp_wa_bb, sizeof(tmp_wa_bb));
793 
794 	return len;
795 }
796 
ctx_restore_mid_bb_store(struct config_item * item,const char * data,size_t sz)797 static ssize_t ctx_restore_mid_bb_store(struct config_item *item,
798 					const char *data, size_t sz)
799 {
800 	struct xe_config_group_device *dev = to_xe_config_group_device(item);
801 
802 	return wa_bb_store(dev->config.ctx_restore_mid_bb, dev, data, sz);
803 }
804 
ctx_restore_post_bb_store(struct config_item * item,const char * data,size_t sz)805 static ssize_t ctx_restore_post_bb_store(struct config_item *item,
806 					 const char *data, size_t sz)
807 {
808 	struct xe_config_group_device *dev = to_xe_config_group_device(item);
809 
810 	return wa_bb_store(dev->config.ctx_restore_post_bb, dev, data, sz);
811 }
812 
813 CONFIGFS_ATTR(, ctx_restore_mid_bb);
814 CONFIGFS_ATTR(, ctx_restore_post_bb);
815 CONFIGFS_ATTR(, enable_psmi);
816 CONFIGFS_ATTR(, engines_allowed);
817 CONFIGFS_ATTR(, gt_types_allowed);
818 CONFIGFS_ATTR(, survivability_mode);
819 
820 static struct configfs_attribute *xe_config_device_attrs[] = {
821 	&attr_ctx_restore_mid_bb,
822 	&attr_ctx_restore_post_bb,
823 	&attr_enable_psmi,
824 	&attr_engines_allowed,
825 	&attr_gt_types_allowed,
826 	&attr_survivability_mode,
827 	NULL,
828 };
829 
xe_config_device_release(struct config_item * item)830 static void xe_config_device_release(struct config_item *item)
831 {
832 	struct xe_config_group_device *dev = to_xe_config_group_device(item);
833 
834 	mutex_destroy(&dev->lock);
835 
836 	kfree(dev->config.ctx_restore_mid_bb[0].cs);
837 	kfree(dev->config.ctx_restore_post_bb[0].cs);
838 	kfree(dev);
839 }
840 
841 static struct configfs_item_operations xe_config_device_ops = {
842 	.release	= xe_config_device_release,
843 };
844 
xe_config_device_is_visible(struct config_item * item,struct configfs_attribute * attr,int n)845 static bool xe_config_device_is_visible(struct config_item *item,
846 					struct configfs_attribute *attr, int n)
847 {
848 	struct xe_config_group_device *dev = to_xe_config_group_device(item);
849 
850 	if (attr == &attr_survivability_mode) {
851 		if (!dev->desc->is_dgfx || dev->desc->platform < XE_BATTLEMAGE)
852 			return false;
853 	}
854 
855 	return true;
856 }
857 
858 static struct configfs_group_operations xe_config_device_group_ops = {
859 	.is_visible	= xe_config_device_is_visible,
860 };
861 
862 static const struct config_item_type xe_config_device_type = {
863 	.ct_item_ops	= &xe_config_device_ops,
864 	.ct_group_ops	= &xe_config_device_group_ops,
865 	.ct_attrs	= xe_config_device_attrs,
866 	.ct_owner	= THIS_MODULE,
867 };
868 
sriov_max_vfs_show(struct config_item * item,char * page)869 static ssize_t sriov_max_vfs_show(struct config_item *item, char *page)
870 {
871 	struct xe_config_group_device *dev = to_xe_config_group_device(item->ci_parent);
872 
873 	guard(mutex)(&dev->lock);
874 
875 	if (dev->config.sriov.max_vfs == UINT_MAX)
876 		return sprintf(page, "%s\n", "unlimited");
877 	else
878 		return sprintf(page, "%u\n", dev->config.sriov.max_vfs);
879 }
880 
sriov_max_vfs_store(struct config_item * item,const char * page,size_t len)881 static ssize_t sriov_max_vfs_store(struct config_item *item, const char *page, size_t len)
882 {
883 	struct xe_config_group_device *dev = to_xe_config_group_device(item->ci_parent);
884 	unsigned int max_vfs;
885 	int ret;
886 
887 	guard(mutex)(&dev->lock);
888 
889 	if (is_bound(dev))
890 		return -EBUSY;
891 
892 	ret = kstrtouint(page, 0, &max_vfs);
893 	if (ret) {
894 		if (!sysfs_streq(page, "unlimited"))
895 			return ret;
896 		max_vfs = UINT_MAX;
897 	}
898 
899 	dev->config.sriov.max_vfs = max_vfs;
900 	return len;
901 }
902 
sriov_admin_only_pf_show(struct config_item * item,char * page)903 static ssize_t sriov_admin_only_pf_show(struct config_item *item, char *page)
904 {
905 	struct xe_config_group_device *dev = to_xe_config_group_device(item->ci_parent);
906 
907 	guard(mutex)(&dev->lock);
908 
909 	return sprintf(page, "%s\n", str_yes_no(dev->config.sriov.admin_only_pf));
910 }
911 
sriov_admin_only_pf_store(struct config_item * item,const char * page,size_t len)912 static ssize_t sriov_admin_only_pf_store(struct config_item *item, const char *page, size_t len)
913 {
914 	struct xe_config_group_device *dev = to_xe_config_group_device(item->ci_parent);
915 	bool admin_only_pf;
916 	int ret;
917 
918 	guard(mutex)(&dev->lock);
919 
920 	if (is_bound(dev))
921 		return -EBUSY;
922 
923 	ret = kstrtobool(page, &admin_only_pf);
924 	if (ret)
925 		return ret;
926 
927 	dev->config.sriov.admin_only_pf = admin_only_pf;
928 	return len;
929 }
930 
931 CONFIGFS_ATTR(sriov_, max_vfs);
932 CONFIGFS_ATTR(sriov_, admin_only_pf);
933 
934 static struct configfs_attribute *xe_config_sriov_attrs[] = {
935 	&sriov_attr_max_vfs,
936 	&sriov_attr_admin_only_pf,
937 	NULL,
938 };
939 
xe_config_sriov_is_visible(struct config_item * item,struct configfs_attribute * attr,int n)940 static bool xe_config_sriov_is_visible(struct config_item *item,
941 				       struct configfs_attribute *attr, int n)
942 {
943 	struct xe_config_group_device *dev = to_xe_config_group_device(item->ci_parent);
944 
945 	if (attr == &sriov_attr_max_vfs && dev->mode != XE_SRIOV_MODE_PF)
946 		return false;
947 	if (attr == &sriov_attr_admin_only_pf && dev->mode != XE_SRIOV_MODE_PF)
948 		return false;
949 
950 	return true;
951 }
952 
953 static struct configfs_group_operations xe_config_sriov_group_ops = {
954 	.is_visible	= xe_config_sriov_is_visible,
955 };
956 
957 static const struct config_item_type xe_config_sriov_type = {
958 	.ct_owner	= THIS_MODULE,
959 	.ct_group_ops	= &xe_config_sriov_group_ops,
960 	.ct_attrs	= xe_config_sriov_attrs,
961 };
962 
xe_match_desc(struct pci_dev * pdev)963 static const struct xe_device_desc *xe_match_desc(struct pci_dev *pdev)
964 {
965 	struct device_driver *driver = driver_find("xe", &pci_bus_type);
966 	struct pci_driver *drv = to_pci_driver(driver);
967 	const struct pci_device_id *ids = drv ? drv->id_table : NULL;
968 	const struct pci_device_id *found = pci_match_id(ids, pdev);
969 
970 	return found ? (const void *)found->driver_data : NULL;
971 }
972 
get_physfn_instead(struct pci_dev * virtfn)973 static struct pci_dev *get_physfn_instead(struct pci_dev *virtfn)
974 {
975 	struct pci_dev *physfn = pci_physfn(virtfn);
976 
977 	pci_dev_get(physfn);
978 	pci_dev_put(virtfn);
979 	return physfn;
980 }
981 
xe_config_make_device_group(struct config_group * group,const char * name)982 static struct config_group *xe_config_make_device_group(struct config_group *group,
983 							const char *name)
984 {
985 	unsigned int domain, bus, slot, function;
986 	struct xe_config_group_device *dev;
987 	const struct xe_device_desc *match;
988 	enum xe_sriov_mode mode;
989 	struct pci_dev *pdev;
990 	char canonical[16];
991 	int vfnumber = 0;
992 	int ret;
993 
994 	ret = sscanf(name, "%x:%x:%x.%x", &domain, &bus, &slot, &function);
995 	if (ret != 4)
996 		return ERR_PTR(-EINVAL);
997 
998 	ret = scnprintf(canonical, sizeof(canonical), "%04x:%02x:%02x.%d", domain, bus,
999 			PCI_SLOT(PCI_DEVFN(slot, function)),
1000 			PCI_FUNC(PCI_DEVFN(slot, function)));
1001 	if (ret != 12 || strcmp(name, canonical))
1002 		return ERR_PTR(-EINVAL);
1003 
1004 	pdev = pci_get_domain_bus_and_slot(domain, bus, PCI_DEVFN(slot, function));
1005 	mode = pdev ? dev_is_pf(&pdev->dev) ?
1006 		XE_SRIOV_MODE_PF : XE_SRIOV_MODE_NONE : XE_SRIOV_MODE_VF;
1007 
1008 	if (!pdev && function)
1009 		pdev = pci_get_domain_bus_and_slot(domain, bus, PCI_DEVFN(slot, 0));
1010 	if (!pdev && slot)
1011 		pdev = pci_get_domain_bus_and_slot(domain, bus, PCI_DEVFN(0, 0));
1012 	if (!pdev)
1013 		return ERR_PTR(-ENODEV);
1014 
1015 	if (PCI_DEVFN(slot, function) != pdev->devfn) {
1016 		pdev = get_physfn_instead(pdev);
1017 		vfnumber = PCI_DEVFN(slot, function) - pdev->devfn;
1018 		if (!dev_is_pf(&pdev->dev) || vfnumber > pci_sriov_get_totalvfs(pdev)) {
1019 			pci_dev_put(pdev);
1020 			return ERR_PTR(-ENODEV);
1021 		}
1022 	}
1023 
1024 	match = xe_match_desc(pdev);
1025 	if (match && vfnumber && !match->has_sriov) {
1026 		pci_info(pdev, "xe driver does not support VFs on this device\n");
1027 		match = NULL;
1028 	} else if (!match) {
1029 		pci_info(pdev, "xe driver does not support configuration of this device\n");
1030 	}
1031 
1032 	pci_dev_put(pdev);
1033 
1034 	if (!match)
1035 		return ERR_PTR(-ENOENT);
1036 
1037 	dev = kzalloc_obj(*dev);
1038 	if (!dev)
1039 		return ERR_PTR(-ENOMEM);
1040 
1041 	dev->desc = match;
1042 	dev->mode = match->has_sriov ? mode : XE_SRIOV_MODE_NONE;
1043 
1044 	set_device_defaults(&dev->config);
1045 
1046 	config_group_init_type_name(&dev->group, name, &xe_config_device_type);
1047 	if (dev->mode != XE_SRIOV_MODE_NONE) {
1048 		config_group_init_type_name(&dev->sriov, "sriov", &xe_config_sriov_type);
1049 		configfs_add_default_group(&dev->sriov, &dev->group);
1050 	}
1051 
1052 	mutex_init(&dev->lock);
1053 
1054 	return &dev->group;
1055 }
1056 
1057 static struct configfs_group_operations xe_config_group_ops = {
1058 	.make_group	= xe_config_make_device_group,
1059 };
1060 
1061 static const struct config_item_type xe_configfs_type = {
1062 	.ct_group_ops	= &xe_config_group_ops,
1063 	.ct_owner	= THIS_MODULE,
1064 };
1065 
1066 static struct configfs_subsystem xe_configfs = {
1067 	.su_group = {
1068 		.cg_item = {
1069 			.ci_namebuf = "xe",
1070 			.ci_type = &xe_configfs_type,
1071 		},
1072 	},
1073 };
1074 
find_xe_config_group_device(struct pci_dev * pdev)1075 static struct xe_config_group_device *find_xe_config_group_device(struct pci_dev *pdev)
1076 {
1077 	struct config_item *item;
1078 
1079 	mutex_lock(&xe_configfs.su_mutex);
1080 	item = config_group_find_item(&xe_configfs.su_group, pci_name(pdev));
1081 	mutex_unlock(&xe_configfs.su_mutex);
1082 
1083 	if (!item)
1084 		return NULL;
1085 
1086 	return to_xe_config_group_device(item);
1087 }
1088 
dump_custom_dev_config(struct pci_dev * pdev,struct xe_config_group_device * dev)1089 static void dump_custom_dev_config(struct pci_dev *pdev,
1090 				   struct xe_config_group_device *dev)
1091 {
1092 #define PRI_CUSTOM_ATTR(fmt_, attr_) do { \
1093 		if (dev->config.attr_ != device_defaults.attr_) \
1094 			pci_info(pdev, "configfs: " __stringify(attr_) " = " fmt_ "\n", \
1095 				 dev->config.attr_); \
1096 	} while (0)
1097 
1098 	PRI_CUSTOM_ATTR("%llx", gt_types_allowed);
1099 	PRI_CUSTOM_ATTR("%llx", engines_allowed);
1100 	PRI_CUSTOM_ATTR("%d", enable_psmi);
1101 	PRI_CUSTOM_ATTR("%d", survivability_mode);
1102 	PRI_CUSTOM_ATTR("%u", sriov.admin_only_pf);
1103 
1104 #undef PRI_CUSTOM_ATTR
1105 }
1106 
1107 /**
1108  * xe_configfs_check_device() - Test if device was configured by configfs
1109  * @pdev: the &pci_dev device to test
1110  *
1111  * Try to find the configfs group that belongs to the specified pci device
1112  * and print a diagnostic message if different than the default value.
1113  */
xe_configfs_check_device(struct pci_dev * pdev)1114 void xe_configfs_check_device(struct pci_dev *pdev)
1115 {
1116 	struct xe_config_group_device *dev = find_xe_config_group_device(pdev);
1117 
1118 	if (!dev)
1119 		return;
1120 
1121 	/* memcmp here is safe as both are zero-initialized */
1122 	if (memcmp(&dev->config, &device_defaults, sizeof(dev->config))) {
1123 		pci_info(pdev, "Found custom settings in configfs\n");
1124 		dump_custom_dev_config(pdev, dev);
1125 	}
1126 
1127 	config_group_put(&dev->group);
1128 }
1129 
1130 /**
1131  * xe_configfs_get_survivability_mode - get configfs survivability mode attribute
1132  * @pdev: pci device
1133  *
1134  * Return: survivability_mode attribute in configfs
1135  */
xe_configfs_get_survivability_mode(struct pci_dev * pdev)1136 bool xe_configfs_get_survivability_mode(struct pci_dev *pdev)
1137 {
1138 	struct xe_config_group_device *dev = find_xe_config_group_device(pdev);
1139 	bool mode;
1140 
1141 	if (!dev)
1142 		return device_defaults.survivability_mode;
1143 
1144 	mode = dev->config.survivability_mode;
1145 	config_group_put(&dev->group);
1146 
1147 	return mode;
1148 }
1149 
get_gt_types_allowed(struct pci_dev * pdev)1150 static u64 get_gt_types_allowed(struct pci_dev *pdev)
1151 {
1152 	struct xe_config_group_device *dev = find_xe_config_group_device(pdev);
1153 	u64 mask;
1154 
1155 	if (!dev)
1156 		return device_defaults.gt_types_allowed;
1157 
1158 	mask = dev->config.gt_types_allowed;
1159 	config_group_put(&dev->group);
1160 
1161 	return mask;
1162 }
1163 
1164 /**
1165  * xe_configfs_primary_gt_allowed - determine whether primary GTs are supported
1166  * @pdev: pci device
1167  *
1168  * Return: True if primary GTs are enabled, false if they have been disabled via
1169  *     configfs.
1170  */
xe_configfs_primary_gt_allowed(struct pci_dev * pdev)1171 bool xe_configfs_primary_gt_allowed(struct pci_dev *pdev)
1172 {
1173 	return get_gt_types_allowed(pdev) & BIT_ULL(XE_GT_TYPE_MAIN);
1174 }
1175 
1176 /**
1177  * xe_configfs_media_gt_allowed - determine whether media GTs are supported
1178  * @pdev: pci device
1179  *
1180  * Return: True if the media GTs are enabled, false if they have been disabled
1181  *     via configfs.
1182  */
xe_configfs_media_gt_allowed(struct pci_dev * pdev)1183 bool xe_configfs_media_gt_allowed(struct pci_dev *pdev)
1184 {
1185 	return get_gt_types_allowed(pdev) & BIT_ULL(XE_GT_TYPE_MEDIA);
1186 }
1187 
1188 /**
1189  * xe_configfs_get_engines_allowed - get engine allowed mask from configfs
1190  * @pdev: pci device
1191  *
1192  * Return: engine mask with allowed engines set in configfs
1193  */
xe_configfs_get_engines_allowed(struct pci_dev * pdev)1194 u64 xe_configfs_get_engines_allowed(struct pci_dev *pdev)
1195 {
1196 	struct xe_config_group_device *dev = find_xe_config_group_device(pdev);
1197 	u64 engines_allowed;
1198 
1199 	if (!dev)
1200 		return device_defaults.engines_allowed;
1201 
1202 	engines_allowed = dev->config.engines_allowed;
1203 	config_group_put(&dev->group);
1204 
1205 	return engines_allowed;
1206 }
1207 
1208 /**
1209  * xe_configfs_get_psmi_enabled - get configfs enable_psmi setting
1210  * @pdev: pci device
1211  *
1212  * Return: enable_psmi setting in configfs
1213  */
xe_configfs_get_psmi_enabled(struct pci_dev * pdev)1214 bool xe_configfs_get_psmi_enabled(struct pci_dev *pdev)
1215 {
1216 	struct xe_config_group_device *dev = find_xe_config_group_device(pdev);
1217 	bool ret;
1218 
1219 	if (!dev)
1220 		return false;
1221 
1222 	ret = dev->config.enable_psmi;
1223 	config_group_put(&dev->group);
1224 
1225 	return ret;
1226 }
1227 
1228 /**
1229  * xe_configfs_get_ctx_restore_mid_bb - get configfs ctx_restore_mid_bb setting
1230  * @pdev: pci device
1231  * @class: hw engine class
1232  * @cs: pointer to the bb to use - only valid during probe
1233  *
1234  * Return: Number of dwords used in the mid_ctx_restore setting in configfs
1235  */
xe_configfs_get_ctx_restore_mid_bb(struct pci_dev * pdev,enum xe_engine_class class,const u32 ** cs)1236 u32 xe_configfs_get_ctx_restore_mid_bb(struct pci_dev *pdev,
1237 				       enum xe_engine_class class,
1238 				       const u32 **cs)
1239 {
1240 	struct xe_config_group_device *dev = find_xe_config_group_device(pdev);
1241 	u32 len;
1242 
1243 	if (!dev)
1244 		return 0;
1245 
1246 	if (cs)
1247 		*cs = dev->config.ctx_restore_mid_bb[class].cs;
1248 
1249 	len = dev->config.ctx_restore_mid_bb[class].len;
1250 	config_group_put(&dev->group);
1251 
1252 	return len;
1253 }
1254 
1255 /**
1256  * xe_configfs_get_ctx_restore_post_bb - get configfs ctx_restore_post_bb setting
1257  * @pdev: pci device
1258  * @class: hw engine class
1259  * @cs: pointer to the bb to use - only valid during probe
1260  *
1261  * Return: Number of dwords used in the post_ctx_restore setting in configfs
1262  */
xe_configfs_get_ctx_restore_post_bb(struct pci_dev * pdev,enum xe_engine_class class,const u32 ** cs)1263 u32 xe_configfs_get_ctx_restore_post_bb(struct pci_dev *pdev,
1264 					enum xe_engine_class class,
1265 					const u32 **cs)
1266 {
1267 	struct xe_config_group_device *dev = find_xe_config_group_device(pdev);
1268 	u32 len;
1269 
1270 	if (!dev)
1271 		return 0;
1272 
1273 	*cs = dev->config.ctx_restore_post_bb[class].cs;
1274 	len = dev->config.ctx_restore_post_bb[class].len;
1275 	config_group_put(&dev->group);
1276 
1277 	return len;
1278 }
1279 
1280 #ifdef CONFIG_PCI_IOV
1281 /**
1282  * xe_configfs_admin_only_pf() - Get PF's operational mode.
1283  * @pdev: the &pci_dev device
1284  *
1285  * Find the configfs group that belongs to the PCI device and return a flag
1286  * whether the PF driver should be dedicated for VFs management only.
1287  *
1288  * If configfs group is not present, use driver's default value.
1289  *
1290  * Return: true if PF driver is dedicated for VFs administration only.
1291  */
xe_configfs_admin_only_pf(struct pci_dev * pdev)1292 bool xe_configfs_admin_only_pf(struct pci_dev *pdev)
1293 {
1294 	struct xe_config_group_device *dev = find_xe_config_group_device(pdev);
1295 	bool admin_only_pf;
1296 
1297 	if (!dev)
1298 		return XE_DEFAULT_ADMIN_ONLY_PF;
1299 
1300 	scoped_guard(mutex, &dev->lock)
1301 		admin_only_pf = dev->config.sriov.admin_only_pf;
1302 
1303 	config_group_put(&dev->group);
1304 
1305 	return admin_only_pf;
1306 }
1307 /**
1308  * xe_configfs_get_max_vfs() - Get number of VFs that could be managed
1309  * @pdev: the &pci_dev device
1310  *
1311  * Find the configfs group that belongs to the PCI device and return maximum
1312  * number of Virtual Functions (VFs) that could be managed by this device.
1313  * If configfs group is not present, use value of max_vfs module parameter.
1314  *
1315  * Return: maximum number of VFs that could be managed.
1316  */
xe_configfs_get_max_vfs(struct pci_dev * pdev)1317 unsigned int xe_configfs_get_max_vfs(struct pci_dev *pdev)
1318 {
1319 	struct xe_config_group_device *dev = find_xe_config_group_device(pdev);
1320 	unsigned int max_vfs;
1321 
1322 	if (!dev)
1323 		return xe_modparam.max_vfs;
1324 
1325 	scoped_guard(mutex, &dev->lock)
1326 		max_vfs = dev->config.sriov.max_vfs;
1327 
1328 	config_group_put(&dev->group);
1329 
1330 	return max_vfs;
1331 }
1332 #endif
1333 
xe_configfs_init(void)1334 int __init xe_configfs_init(void)
1335 {
1336 	int ret;
1337 
1338 	config_group_init(&xe_configfs.su_group);
1339 	mutex_init(&xe_configfs.su_mutex);
1340 	ret = configfs_register_subsystem(&xe_configfs);
1341 	if (ret) {
1342 		mutex_destroy(&xe_configfs.su_mutex);
1343 		return ret;
1344 	}
1345 
1346 	return 0;
1347 }
1348 
xe_configfs_exit(void)1349 void xe_configfs_exit(void)
1350 {
1351 	configfs_unregister_subsystem(&xe_configfs);
1352 	mutex_destroy(&xe_configfs.su_mutex);
1353 }
1354