xref: /linux/drivers/gpu/drm/xe/xe_configfs.c (revision 284fc30e66e602a5df58393860f67477d6a79339)
1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright © 2025 Intel Corporation
4  */
5 
6 #include <linux/bitops.h>
7 #include <linux/ctype.h>
8 #include <linux/configfs.h>
9 #include <linux/cleanup.h>
10 #include <linux/find.h>
11 #include <linux/init.h>
12 #include <linux/module.h>
13 #include <linux/pci.h>
14 #include <linux/string.h>
15 
16 #include "instructions/xe_mi_commands.h"
17 #include "xe_configfs.h"
18 #include "xe_hw_engine_types.h"
19 #include "xe_module.h"
20 #include "xe_pci_types.h"
21 
22 /**
23  * DOC: Xe Configfs
24  *
25  * Overview
26  * ========
27  *
28  * Configfs is a filesystem-based manager of kernel objects. XE KMD registers a
29  * configfs subsystem called ``xe`` that creates a directory in the mounted
30  * configfs directory. The user can create devices under this directory and
31  * configure them as necessary. See Documentation/filesystems/configfs.rst for
32  * more information about how configfs works.
33  *
34  * Create devices
35  * ==============
36  *
37  * To create a device, the ``xe`` module should already be loaded, but some
38  * attributes can only be set before binding the device. It can be accomplished
39  * by blocking the driver autoprobe::
40  *
41  *	# echo 0 > /sys/bus/pci/drivers_autoprobe
42  *	# modprobe xe
43  *
44  * In order to create a device, the user has to create a directory inside ``xe``::
45  *
46  *	# mkdir /sys/kernel/config/xe/0000:03:00.0/
47  *
48  * Every device created is populated by the driver with entries that can be
49  * used to configure it::
50  *
51  *	/sys/kernel/config/xe/
52  *	├── 0000:00:02.0
53  *	│   └── ...
54  *	├── 0000:00:02.1
55  *	│   └── ...
56  *	:
57  *	└── 0000:03:00.0
58  *	    ├── survivability_mode
59  *	    ├── engines_allowed
60  *	    └── enable_psmi
61  *
62  * After configuring the attributes as per next section, the device can be
63  * probed with::
64  *
65  *	# echo 0000:03:00.0 > /sys/bus/pci/drivers/xe/bind
66  *	# # or
67  *	# echo 0000:03:00.0 > /sys/bus/pci/drivers_probe
68  *
69  * Configure Attributes
70  * ====================
71  *
72  * Survivability mode:
73  * -------------------
74  *
75  * Enable survivability mode on supported cards. This setting only takes
76  * effect when probing the device. Example to enable it::
77  *
78  *	# echo 1 > /sys/kernel/config/xe/0000:03:00.0/survivability_mode
79  *
80  * This attribute can only be set before binding to the device.
81  *
82  * Allowed engines:
83  * ----------------
84  *
85  * Allow only a set of engine(s) to be available, disabling the other engines
86  * even if they are available in hardware. This is applied after HW fuses are
87  * considered on each tile. Examples:
88  *
89  * Allow only one render and one copy engines, nothing else::
90  *
91  *	# echo 'rcs0,bcs0' > /sys/kernel/config/xe/0000:03:00.0/engines_allowed
92  *
93  * Allow only compute engines and first copy engine::
94  *
95  *	# echo 'ccs*,bcs0' > /sys/kernel/config/xe/0000:03:00.0/engines_allowed
96  *
97  * Note that the engine names are the per-GT hardware names. On multi-tile
98  * platforms, writing ``rcs0,bcs0`` to this file would allow the first render
99  * and copy engines on each tile.
100  *
101  * The requested configuration may not be supported by the platform and driver
102  * may fail to probe. For example: if at least one copy engine is expected to be
103  * available for migrations, but it's disabled. This is intended for debugging
104  * purposes only.
105  *
106  * This attribute can only be set before binding to the device.
107  *
108  * PSMI
109  * ----
110  *
111  * Enable extra debugging capabilities to trace engine execution. Only useful
112  * during early platform enabling and requires additional hardware connected.
113  * Once it's enabled, additionals WAs are added and runtime configuration is
114  * done via debugfs. Example to enable it::
115  *
116  *	# echo 1 > /sys/kernel/config/xe/0000:03:00.0/enable_psmi
117  *
118  * This attribute can only be set before binding to the device.
119  *
120  * Context restore BB
121  * ------------------
122  *
123  * Allow to execute a batch buffer during any context switches. When the
124  * GPU is restoring the context, it executes additional commands. It's useful
125  * for testing additional workarounds and validating certain HW behaviors: it's
126  * not intended for normal execution and will taint the kernel with TAINT_TEST
127  * when used.
128  *
129  * The syntax allows to pass straight instructions to be executed by the engine
130  * in a batch buffer or set specific registers.
131  *
132  * #. Generic instruction::
133  *
134  *	<engine-class> cmd <instr> [[dword0] [dword1] [...]]
135  *
136  * #. Simple register setting::
137  *
138  *	<engine-class> reg <address> <value>
139  *
140  * Commands are saved per engine class: all instances of that class will execute
141  * those commands during context switch. The instruction, dword arguments,
142  * addresses and values are in hex format like in the examples below.
143  *
144  * #. Execute a LRI command to write 0xDEADBEEF to register 0x4f10 after the
145  *    normal context restore::
146  *
147  *	# echo 'rcs cmd 11000001 4F100 DEADBEEF' \
148  *		> /sys/kernel/config/xe/0000:03:00.0/ctx_restore_post_bb
149  *
150  * #. Execute a LRI command to write 0xDEADBEEF to register 0x4f10 at the
151  *    beginning of the context restore::
152  *
153  *	# echo 'rcs cmd 11000001 4F100 DEADBEEF' \
154  *		> /sys/kernel/config/xe/0000:03:00.0/ctx_restore_mid_bb
155 
156  * #. Load certain values in a couple of registers (it can be used as a simpler
157  *    alternative to the `cmd`) action::
158  *
159  *	# cat > /sys/kernel/config/xe/0000:03:00.0/ctx_restore_post_bb <<EOF
160  *	rcs reg 4F100 DEADBEEF
161  *	rcs reg 4F104 FFFFFFFF
162  *	EOF
163  *
164  *    .. note::
165  *
166  *       When using multiple lines, make sure to use a command that is
167  *       implemented with a single write syscall, like HEREDOC.
168  *
169  * Currently this is implemented only for post and mid context restore and
170  * these attributes can only be set before binding to the device.
171  *
172  * Remove devices
173  * ==============
174  *
175  * The created device directories can be removed using ``rmdir``::
176  *
177  *	# rmdir /sys/kernel/config/xe/0000:03:00.0/
178  */
179 
180 /* Similar to struct xe_bb, but not tied to HW (yet) */
181 struct wa_bb {
182 	u32 *cs;
183 	u32 len; /* in dwords */
184 };
185 
186 struct xe_config_group_device {
187 	struct config_group group;
188 
189 	struct xe_config_device {
190 		u64 engines_allowed;
191 		struct wa_bb ctx_restore_post_bb[XE_ENGINE_CLASS_MAX];
192 		struct wa_bb ctx_restore_mid_bb[XE_ENGINE_CLASS_MAX];
193 		bool survivability_mode;
194 		bool enable_psmi;
195 	} config;
196 
197 	/* protects attributes */
198 	struct mutex lock;
199 	/* matching descriptor */
200 	const struct xe_device_desc *desc;
201 };
202 
203 static const struct xe_config_device device_defaults = {
204 	.engines_allowed = U64_MAX,
205 	.survivability_mode = false,
206 	.enable_psmi = false,
207 };
208 
set_device_defaults(struct xe_config_device * config)209 static void set_device_defaults(struct xe_config_device *config)
210 {
211 	*config = device_defaults;
212 }
213 
214 struct engine_info {
215 	const char *cls;
216 	u64 mask;
217 	enum xe_engine_class engine_class;
218 };
219 
220 /* Some helpful macros to aid on the sizing of buffer allocation when parsing */
221 #define MAX_ENGINE_CLASS_CHARS 5
222 #define MAX_ENGINE_INSTANCE_CHARS 2
223 
224 static const struct engine_info engine_info[] = {
225 	{ .cls = "rcs", .mask = XE_HW_ENGINE_RCS_MASK, .engine_class = XE_ENGINE_CLASS_RENDER },
226 	{ .cls = "bcs", .mask = XE_HW_ENGINE_BCS_MASK, .engine_class = XE_ENGINE_CLASS_COPY },
227 	{ .cls = "vcs", .mask = XE_HW_ENGINE_VCS_MASK, .engine_class = XE_ENGINE_CLASS_VIDEO_DECODE },
228 	{ .cls = "vecs", .mask = XE_HW_ENGINE_VECS_MASK, .engine_class = XE_ENGINE_CLASS_VIDEO_ENHANCE },
229 	{ .cls = "ccs", .mask = XE_HW_ENGINE_CCS_MASK, .engine_class = XE_ENGINE_CLASS_COMPUTE },
230 	{ .cls = "gsccs", .mask = XE_HW_ENGINE_GSCCS_MASK, .engine_class = XE_ENGINE_CLASS_OTHER },
231 };
232 
to_xe_config_group_device(struct config_item * item)233 static struct xe_config_group_device *to_xe_config_group_device(struct config_item *item)
234 {
235 	return container_of(to_config_group(item), struct xe_config_group_device, group);
236 }
237 
to_xe_config_device(struct config_item * item)238 static struct xe_config_device *to_xe_config_device(struct config_item *item)
239 {
240 	return &to_xe_config_group_device(item)->config;
241 }
242 
is_bound(struct xe_config_group_device * dev)243 static bool is_bound(struct xe_config_group_device *dev)
244 {
245 	unsigned int domain, bus, slot, function;
246 	struct pci_dev *pdev;
247 	const char *name;
248 	bool ret;
249 
250 	lockdep_assert_held(&dev->lock);
251 
252 	name = dev->group.cg_item.ci_name;
253 	if (sscanf(name, "%x:%x:%x.%x", &domain, &bus, &slot, &function) != 4)
254 		return false;
255 
256 	pdev = pci_get_domain_bus_and_slot(domain, bus, PCI_DEVFN(slot, function));
257 	if (!pdev)
258 		return false;
259 
260 	ret = pci_get_drvdata(pdev);
261 	pci_dev_put(pdev);
262 
263 	if (ret)
264 		pci_dbg(pdev, "Already bound to driver\n");
265 
266 	return ret;
267 }
268 
survivability_mode_show(struct config_item * item,char * page)269 static ssize_t survivability_mode_show(struct config_item *item, char *page)
270 {
271 	struct xe_config_device *dev = to_xe_config_device(item);
272 
273 	return sprintf(page, "%d\n", dev->survivability_mode);
274 }
275 
survivability_mode_store(struct config_item * item,const char * page,size_t len)276 static ssize_t survivability_mode_store(struct config_item *item, const char *page, size_t len)
277 {
278 	struct xe_config_group_device *dev = to_xe_config_group_device(item);
279 	bool survivability_mode;
280 	int ret;
281 
282 	ret = kstrtobool(page, &survivability_mode);
283 	if (ret)
284 		return ret;
285 
286 	guard(mutex)(&dev->lock);
287 	if (is_bound(dev))
288 		return -EBUSY;
289 
290 	dev->config.survivability_mode = survivability_mode;
291 
292 	return len;
293 }
294 
engines_allowed_show(struct config_item * item,char * page)295 static ssize_t engines_allowed_show(struct config_item *item, char *page)
296 {
297 	struct xe_config_device *dev = to_xe_config_device(item);
298 	char *p = page;
299 
300 	for (size_t i = 0; i < ARRAY_SIZE(engine_info); i++) {
301 		u64 mask = engine_info[i].mask;
302 
303 		if ((dev->engines_allowed & mask) == mask) {
304 			p += sprintf(p, "%s*\n", engine_info[i].cls);
305 		} else if (mask & dev->engines_allowed) {
306 			u16 bit0 = __ffs64(mask), bit;
307 
308 			mask &= dev->engines_allowed;
309 
310 			for_each_set_bit(bit, (const unsigned long *)&mask, 64)
311 				p += sprintf(p, "%s%u\n", engine_info[i].cls,
312 					     bit - bit0);
313 		}
314 	}
315 
316 	return p - page;
317 }
318 
319 /*
320  * Lookup engine_info. If @mask is not NULL, reduce the mask according to the
321  * instance in @pattern.
322  *
323  * Examples of inputs:
324  * - lookup_engine_info("rcs0", &mask): return "rcs" entry from @engine_info and
325  *   mask == BIT_ULL(XE_HW_ENGINE_RCS0)
326  * - lookup_engine_info("rcs*", &mask): return "rcs" entry from @engine_info and
327  *   mask == XE_HW_ENGINE_RCS_MASK
328  * - lookup_engine_info("rcs", NULL): return "rcs" entry from @engine_info
329  */
lookup_engine_info(const char * pattern,u64 * mask)330 static const struct engine_info *lookup_engine_info(const char *pattern, u64 *mask)
331 {
332 	for (size_t i = 0; i < ARRAY_SIZE(engine_info); i++) {
333 		u8 instance;
334 		u16 bit;
335 
336 		if (!str_has_prefix(pattern, engine_info[i].cls))
337 			continue;
338 
339 		pattern += strlen(engine_info[i].cls);
340 		if (!mask)
341 			return *pattern ? NULL : &engine_info[i];
342 
343 		if (!strcmp(pattern, "*")) {
344 			*mask = engine_info[i].mask;
345 			return &engine_info[i];
346 		}
347 
348 		if (kstrtou8(pattern, 10, &instance))
349 			return NULL;
350 
351 		bit = __ffs64(engine_info[i].mask) + instance;
352 		if (bit >= fls64(engine_info[i].mask))
353 			return NULL;
354 
355 		*mask = BIT_ULL(bit);
356 		return &engine_info[i];
357 	}
358 
359 	return NULL;
360 }
361 
parse_engine(const char * s,const char * end_chars,u64 * mask,const struct engine_info ** pinfo)362 static int parse_engine(const char *s, const char *end_chars, u64 *mask,
363 			const struct engine_info **pinfo)
364 {
365 	char buf[MAX_ENGINE_CLASS_CHARS + MAX_ENGINE_INSTANCE_CHARS + 1];
366 	const struct engine_info *info;
367 	size_t len;
368 
369 	len = strcspn(s, end_chars);
370 	if (len >= sizeof(buf))
371 		return -EINVAL;
372 
373 	memcpy(buf, s, len);
374 	buf[len] = '\0';
375 
376 	info = lookup_engine_info(buf, mask);
377 	if (!info)
378 		return -ENOENT;
379 
380 	if (pinfo)
381 		*pinfo = info;
382 
383 	return len;
384 }
385 
engines_allowed_store(struct config_item * item,const char * page,size_t len)386 static ssize_t engines_allowed_store(struct config_item *item, const char *page,
387 				     size_t len)
388 {
389 	struct xe_config_group_device *dev = to_xe_config_group_device(item);
390 	ssize_t patternlen, p;
391 	u64 mask, val = 0;
392 
393 	for (p = 0; p < len; p += patternlen + 1) {
394 		patternlen = parse_engine(page + p, ",\n", &mask, NULL);
395 		if (patternlen < 0)
396 			return -EINVAL;
397 
398 		val |= mask;
399 	}
400 
401 	guard(mutex)(&dev->lock);
402 	if (is_bound(dev))
403 		return -EBUSY;
404 
405 	dev->config.engines_allowed = val;
406 
407 	return len;
408 }
409 
enable_psmi_show(struct config_item * item,char * page)410 static ssize_t enable_psmi_show(struct config_item *item, char *page)
411 {
412 	struct xe_config_device *dev = to_xe_config_device(item);
413 
414 	return sprintf(page, "%d\n", dev->enable_psmi);
415 }
416 
enable_psmi_store(struct config_item * item,const char * page,size_t len)417 static ssize_t enable_psmi_store(struct config_item *item, const char *page, size_t len)
418 {
419 	struct xe_config_group_device *dev = to_xe_config_group_device(item);
420 	bool val;
421 	int ret;
422 
423 	ret = kstrtobool(page, &val);
424 	if (ret)
425 		return ret;
426 
427 	guard(mutex)(&dev->lock);
428 	if (is_bound(dev))
429 		return -EBUSY;
430 
431 	dev->config.enable_psmi = val;
432 
433 	return len;
434 }
435 
wa_bb_read_advance(bool dereference,char ** p,const char * append,size_t len,size_t * max_size)436 static bool wa_bb_read_advance(bool dereference, char **p,
437 			       const char *append, size_t len,
438 			       size_t *max_size)
439 {
440 	if (dereference) {
441 		if (len >= *max_size)
442 			return false;
443 		*max_size -= len;
444 		if (append)
445 			memcpy(*p, append, len);
446 	}
447 
448 	*p += len;
449 
450 	return true;
451 }
452 
wa_bb_show(struct xe_config_group_device * dev,struct wa_bb wa_bb[static XE_ENGINE_CLASS_MAX],char * data,size_t sz)453 static ssize_t wa_bb_show(struct xe_config_group_device *dev,
454 			  struct wa_bb wa_bb[static XE_ENGINE_CLASS_MAX],
455 			  char *data, size_t sz)
456 {
457 	char *p = data;
458 
459 	guard(mutex)(&dev->lock);
460 
461 	for (size_t i = 0; i < ARRAY_SIZE(engine_info); i++) {
462 		enum xe_engine_class ec = engine_info[i].engine_class;
463 		size_t len;
464 
465 		if (!wa_bb[ec].len)
466 			continue;
467 
468 		len = snprintf(p, sz, "%s:", engine_info[i].cls);
469 		if (!wa_bb_read_advance(data, &p, NULL, len, &sz))
470 			return -ENOBUFS;
471 
472 		for (size_t j = 0; j < wa_bb[ec].len; j++) {
473 			len = snprintf(p, sz, " %08x", wa_bb[ec].cs[j]);
474 			if (!wa_bb_read_advance(data, &p, NULL, len, &sz))
475 				return -ENOBUFS;
476 		}
477 
478 		if (!wa_bb_read_advance(data, &p, "\n", 1, &sz))
479 			return -ENOBUFS;
480 	}
481 
482 	if (!wa_bb_read_advance(data, &p, "", 1, &sz))
483 		return -ENOBUFS;
484 
485 	/* Reserve one more to match check for '\0' */
486 	if (!data)
487 		p++;
488 
489 	return p - data;
490 }
491 
ctx_restore_mid_bb_show(struct config_item * item,char * page)492 static ssize_t ctx_restore_mid_bb_show(struct config_item *item, char *page)
493 {
494 	struct xe_config_group_device *dev = to_xe_config_group_device(item);
495 
496 	return wa_bb_show(dev, dev->config.ctx_restore_mid_bb, page, SZ_4K);
497 }
498 
ctx_restore_post_bb_show(struct config_item * item,char * page)499 static ssize_t ctx_restore_post_bb_show(struct config_item *item, char *page)
500 {
501 	struct xe_config_group_device *dev = to_xe_config_group_device(item);
502 
503 	return wa_bb_show(dev, dev->config.ctx_restore_post_bb, page, SZ_4K);
504 }
505 
wa_bb_append(struct wa_bb * wa_bb,u32 val)506 static void wa_bb_append(struct wa_bb *wa_bb, u32 val)
507 {
508 	if (wa_bb->cs)
509 		wa_bb->cs[wa_bb->len] = val;
510 
511 	wa_bb->len++;
512 }
513 
parse_hex(const char * line,u32 * pval)514 static ssize_t parse_hex(const char *line, u32 *pval)
515 {
516 	char numstr[12];
517 	const char *p;
518 	ssize_t numlen;
519 
520 	p = line + strspn(line, " \t");
521 	if (!*p || *p == '\n')
522 		return 0;
523 
524 	numlen = strcspn(p, " \t\n");
525 	if (!numlen || numlen >= sizeof(numstr) - 1)
526 		return -EINVAL;
527 
528 	memcpy(numstr, p, numlen);
529 	numstr[numlen] = '\0';
530 	p += numlen;
531 
532 	if (kstrtou32(numstr, 16, pval))
533 		return -EINVAL;
534 
535 	return p - line;
536 }
537 
538 /*
539  * Parse lines with the format
540  *
541  *	<engine-class> cmd <u32> <u32...>
542  *	<engine-class> reg <u32_addr> <u32_val>
543  *
544  * and optionally save them in @wa_bb[i].cs is non-NULL.
545  *
546  * Return the number of dwords parsed.
547  */
parse_wa_bb_lines(const char * lines,struct wa_bb wa_bb[static XE_ENGINE_CLASS_MAX])548 static ssize_t parse_wa_bb_lines(const char *lines,
549 				 struct wa_bb wa_bb[static XE_ENGINE_CLASS_MAX])
550 {
551 	ssize_t dwords = 0, ret;
552 	const char *p;
553 
554 	for (p = lines; *p; p++) {
555 		const struct engine_info *info = NULL;
556 		u32 val, val2;
557 
558 		/* Also allow empty lines */
559 		p += strspn(p, " \t\n");
560 		if (!*p)
561 			break;
562 
563 		ret = parse_engine(p, " \t\n", NULL, &info);
564 		if (ret < 0)
565 			return ret;
566 
567 		p += ret;
568 		p += strspn(p, " \t");
569 
570 		if (str_has_prefix(p, "cmd")) {
571 			for (p += strlen("cmd"); *p;) {
572 				ret = parse_hex(p, &val);
573 				if (ret < 0)
574 					return -EINVAL;
575 				if (!ret)
576 					break;
577 
578 				p += ret;
579 				dwords++;
580 				wa_bb_append(&wa_bb[info->engine_class], val);
581 			}
582 		} else if (str_has_prefix(p, "reg")) {
583 			p += strlen("reg");
584 			ret = parse_hex(p, &val);
585 			if (ret <= 0)
586 				return -EINVAL;
587 
588 			p += ret;
589 			ret = parse_hex(p, &val2);
590 			if (ret <= 0)
591 				return -EINVAL;
592 
593 			p += ret;
594 			dwords += 3;
595 			wa_bb_append(&wa_bb[info->engine_class],
596 				     MI_LOAD_REGISTER_IMM | MI_LRI_NUM_REGS(1));
597 			wa_bb_append(&wa_bb[info->engine_class], val);
598 			wa_bb_append(&wa_bb[info->engine_class], val2);
599 		} else {
600 			return -EINVAL;
601 		}
602 	}
603 
604 	return dwords;
605 }
606 
wa_bb_store(struct wa_bb wa_bb[static XE_ENGINE_CLASS_MAX],struct xe_config_group_device * dev,const char * page,size_t len)607 static ssize_t wa_bb_store(struct wa_bb wa_bb[static XE_ENGINE_CLASS_MAX],
608 			   struct xe_config_group_device *dev,
609 			   const char *page, size_t len)
610 {
611 	/* tmp_wa_bb must match wa_bb's size */
612 	struct wa_bb tmp_wa_bb[XE_ENGINE_CLASS_MAX] = { };
613 	ssize_t count, class;
614 	u32 *tmp;
615 
616 	/* 1. Count dwords - wa_bb[i].cs is NULL for all classes */
617 	count = parse_wa_bb_lines(page, tmp_wa_bb);
618 	if (count < 0)
619 		return count;
620 
621 	guard(mutex)(&dev->lock);
622 
623 	if (is_bound(dev))
624 		return -EBUSY;
625 
626 	/*
627 	 * 2. Allocate a u32 array and set the pointers to the right positions
628 	 * according to the length of each class' wa_bb
629 	 */
630 	tmp = krealloc(wa_bb[0].cs, count * sizeof(u32), GFP_KERNEL);
631 	if (!tmp)
632 		return -ENOMEM;
633 
634 	if (!count) {
635 		memset(wa_bb, 0, sizeof(tmp_wa_bb));
636 		return len;
637 	}
638 
639 	for (class = 0, count = 0; class < XE_ENGINE_CLASS_MAX; ++class) {
640 		tmp_wa_bb[class].cs = tmp + count;
641 		count += tmp_wa_bb[class].len;
642 		tmp_wa_bb[class].len = 0;
643 	}
644 
645 	/* 3. Parse wa_bb lines again, this time saving the values */
646 	count = parse_wa_bb_lines(page, tmp_wa_bb);
647 	if (count < 0)
648 		return count;
649 
650 	memcpy(wa_bb, tmp_wa_bb, sizeof(tmp_wa_bb));
651 
652 	return len;
653 }
654 
ctx_restore_mid_bb_store(struct config_item * item,const char * data,size_t sz)655 static ssize_t ctx_restore_mid_bb_store(struct config_item *item,
656 					const char *data, size_t sz)
657 {
658 	struct xe_config_group_device *dev = to_xe_config_group_device(item);
659 
660 	return wa_bb_store(dev->config.ctx_restore_mid_bb, dev, data, sz);
661 }
662 
ctx_restore_post_bb_store(struct config_item * item,const char * data,size_t sz)663 static ssize_t ctx_restore_post_bb_store(struct config_item *item,
664 					 const char *data, size_t sz)
665 {
666 	struct xe_config_group_device *dev = to_xe_config_group_device(item);
667 
668 	return wa_bb_store(dev->config.ctx_restore_post_bb, dev, data, sz);
669 }
670 
671 CONFIGFS_ATTR(, ctx_restore_mid_bb);
672 CONFIGFS_ATTR(, ctx_restore_post_bb);
673 CONFIGFS_ATTR(, enable_psmi);
674 CONFIGFS_ATTR(, engines_allowed);
675 CONFIGFS_ATTR(, survivability_mode);
676 
677 static struct configfs_attribute *xe_config_device_attrs[] = {
678 	&attr_ctx_restore_mid_bb,
679 	&attr_ctx_restore_post_bb,
680 	&attr_enable_psmi,
681 	&attr_engines_allowed,
682 	&attr_survivability_mode,
683 	NULL,
684 };
685 
xe_config_device_release(struct config_item * item)686 static void xe_config_device_release(struct config_item *item)
687 {
688 	struct xe_config_group_device *dev = to_xe_config_group_device(item);
689 
690 	mutex_destroy(&dev->lock);
691 
692 	kfree(dev->config.ctx_restore_post_bb[0].cs);
693 	kfree(dev);
694 }
695 
696 static struct configfs_item_operations xe_config_device_ops = {
697 	.release	= xe_config_device_release,
698 };
699 
xe_config_device_is_visible(struct config_item * item,struct configfs_attribute * attr,int n)700 static bool xe_config_device_is_visible(struct config_item *item,
701 					struct configfs_attribute *attr, int n)
702 {
703 	struct xe_config_group_device *dev = to_xe_config_group_device(item);
704 
705 	if (attr == &attr_survivability_mode) {
706 		if (!dev->desc->is_dgfx || dev->desc->platform < XE_BATTLEMAGE)
707 			return false;
708 	}
709 
710 	return true;
711 }
712 
713 static struct configfs_group_operations xe_config_device_group_ops = {
714 	.is_visible	= xe_config_device_is_visible,
715 };
716 
717 static const struct config_item_type xe_config_device_type = {
718 	.ct_item_ops	= &xe_config_device_ops,
719 	.ct_group_ops	= &xe_config_device_group_ops,
720 	.ct_attrs	= xe_config_device_attrs,
721 	.ct_owner	= THIS_MODULE,
722 };
723 
xe_match_desc(struct pci_dev * pdev)724 static const struct xe_device_desc *xe_match_desc(struct pci_dev *pdev)
725 {
726 	struct device_driver *driver = driver_find("xe", &pci_bus_type);
727 	struct pci_driver *drv = to_pci_driver(driver);
728 	const struct pci_device_id *ids = drv ? drv->id_table : NULL;
729 	const struct pci_device_id *found = pci_match_id(ids, pdev);
730 
731 	return found ? (const void *)found->driver_data : NULL;
732 }
733 
get_physfn_instead(struct pci_dev * virtfn)734 static struct pci_dev *get_physfn_instead(struct pci_dev *virtfn)
735 {
736 	struct pci_dev *physfn = pci_physfn(virtfn);
737 
738 	pci_dev_get(physfn);
739 	pci_dev_put(virtfn);
740 	return physfn;
741 }
742 
xe_config_make_device_group(struct config_group * group,const char * name)743 static struct config_group *xe_config_make_device_group(struct config_group *group,
744 							const char *name)
745 {
746 	unsigned int domain, bus, slot, function;
747 	struct xe_config_group_device *dev;
748 	const struct xe_device_desc *match;
749 	struct pci_dev *pdev;
750 	char canonical[16];
751 	int vfnumber = 0;
752 	int ret;
753 
754 	ret = sscanf(name, "%x:%x:%x.%x", &domain, &bus, &slot, &function);
755 	if (ret != 4)
756 		return ERR_PTR(-EINVAL);
757 
758 	ret = scnprintf(canonical, sizeof(canonical), "%04x:%02x:%02x.%d", domain, bus,
759 			PCI_SLOT(PCI_DEVFN(slot, function)),
760 			PCI_FUNC(PCI_DEVFN(slot, function)));
761 	if (ret != 12 || strcmp(name, canonical))
762 		return ERR_PTR(-EINVAL);
763 
764 	pdev = pci_get_domain_bus_and_slot(domain, bus, PCI_DEVFN(slot, function));
765 	if (!pdev && function)
766 		pdev = pci_get_domain_bus_and_slot(domain, bus, PCI_DEVFN(slot, 0));
767 	if (!pdev && slot)
768 		pdev = pci_get_domain_bus_and_slot(domain, bus, PCI_DEVFN(0, 0));
769 	if (!pdev)
770 		return ERR_PTR(-ENODEV);
771 
772 	if (PCI_DEVFN(slot, function) != pdev->devfn) {
773 		pdev = get_physfn_instead(pdev);
774 		vfnumber = PCI_DEVFN(slot, function) - pdev->devfn;
775 		if (!dev_is_pf(&pdev->dev) || vfnumber > pci_sriov_get_totalvfs(pdev)) {
776 			pci_dev_put(pdev);
777 			return ERR_PTR(-ENODEV);
778 		}
779 	}
780 
781 	match = xe_match_desc(pdev);
782 	if (match && vfnumber && !match->has_sriov) {
783 		pci_info(pdev, "xe driver does not support VFs on this device\n");
784 		match = NULL;
785 	} else if (!match) {
786 		pci_info(pdev, "xe driver does not support configuration of this device\n");
787 	}
788 
789 	pci_dev_put(pdev);
790 
791 	if (!match)
792 		return ERR_PTR(-ENOENT);
793 
794 	dev = kzalloc(sizeof(*dev), GFP_KERNEL);
795 	if (!dev)
796 		return ERR_PTR(-ENOMEM);
797 
798 	dev->desc = match;
799 	set_device_defaults(&dev->config);
800 
801 	config_group_init_type_name(&dev->group, name, &xe_config_device_type);
802 
803 	mutex_init(&dev->lock);
804 
805 	return &dev->group;
806 }
807 
808 static struct configfs_group_operations xe_config_group_ops = {
809 	.make_group	= xe_config_make_device_group,
810 };
811 
812 static const struct config_item_type xe_configfs_type = {
813 	.ct_group_ops	= &xe_config_group_ops,
814 	.ct_owner	= THIS_MODULE,
815 };
816 
817 static struct configfs_subsystem xe_configfs = {
818 	.su_group = {
819 		.cg_item = {
820 			.ci_namebuf = "xe",
821 			.ci_type = &xe_configfs_type,
822 		},
823 	},
824 };
825 
find_xe_config_group_device(struct pci_dev * pdev)826 static struct xe_config_group_device *find_xe_config_group_device(struct pci_dev *pdev)
827 {
828 	struct config_item *item;
829 
830 	mutex_lock(&xe_configfs.su_mutex);
831 	item = config_group_find_item(&xe_configfs.su_group, pci_name(pdev));
832 	mutex_unlock(&xe_configfs.su_mutex);
833 
834 	if (!item)
835 		return NULL;
836 
837 	return to_xe_config_group_device(item);
838 }
839 
dump_custom_dev_config(struct pci_dev * pdev,struct xe_config_group_device * dev)840 static void dump_custom_dev_config(struct pci_dev *pdev,
841 				   struct xe_config_group_device *dev)
842 {
843 #define PRI_CUSTOM_ATTR(fmt_, attr_) do { \
844 		if (dev->config.attr_ != device_defaults.attr_) \
845 			pci_info(pdev, "configfs: " __stringify(attr_) " = " fmt_ "\n", \
846 				 dev->config.attr_); \
847 	} while (0)
848 
849 	PRI_CUSTOM_ATTR("%llx", engines_allowed);
850 	PRI_CUSTOM_ATTR("%d", enable_psmi);
851 	PRI_CUSTOM_ATTR("%d", survivability_mode);
852 
853 #undef PRI_CUSTOM_ATTR
854 }
855 
856 /**
857  * xe_configfs_check_device() - Test if device was configured by configfs
858  * @pdev: the &pci_dev device to test
859  *
860  * Try to find the configfs group that belongs to the specified pci device
861  * and print a diagnostic message if different than the default value.
862  */
xe_configfs_check_device(struct pci_dev * pdev)863 void xe_configfs_check_device(struct pci_dev *pdev)
864 {
865 	struct xe_config_group_device *dev = find_xe_config_group_device(pdev);
866 
867 	if (!dev)
868 		return;
869 
870 	/* memcmp here is safe as both are zero-initialized */
871 	if (memcmp(&dev->config, &device_defaults, sizeof(dev->config))) {
872 		pci_info(pdev, "Found custom settings in configfs\n");
873 		dump_custom_dev_config(pdev, dev);
874 	}
875 
876 	config_group_put(&dev->group);
877 }
878 
879 /**
880  * xe_configfs_get_survivability_mode - get configfs survivability mode attribute
881  * @pdev: pci device
882  *
883  * Return: survivability_mode attribute in configfs
884  */
xe_configfs_get_survivability_mode(struct pci_dev * pdev)885 bool xe_configfs_get_survivability_mode(struct pci_dev *pdev)
886 {
887 	struct xe_config_group_device *dev = find_xe_config_group_device(pdev);
888 	bool mode;
889 
890 	if (!dev)
891 		return device_defaults.survivability_mode;
892 
893 	mode = dev->config.survivability_mode;
894 	config_group_put(&dev->group);
895 
896 	return mode;
897 }
898 
899 /**
900  * xe_configfs_get_engines_allowed - get engine allowed mask from configfs
901  * @pdev: pci device
902  *
903  * Return: engine mask with allowed engines set in configfs
904  */
xe_configfs_get_engines_allowed(struct pci_dev * pdev)905 u64 xe_configfs_get_engines_allowed(struct pci_dev *pdev)
906 {
907 	struct xe_config_group_device *dev = find_xe_config_group_device(pdev);
908 	u64 engines_allowed;
909 
910 	if (!dev)
911 		return device_defaults.engines_allowed;
912 
913 	engines_allowed = dev->config.engines_allowed;
914 	config_group_put(&dev->group);
915 
916 	return engines_allowed;
917 }
918 
919 /**
920  * xe_configfs_get_psmi_enabled - get configfs enable_psmi setting
921  * @pdev: pci device
922  *
923  * Return: enable_psmi setting in configfs
924  */
xe_configfs_get_psmi_enabled(struct pci_dev * pdev)925 bool xe_configfs_get_psmi_enabled(struct pci_dev *pdev)
926 {
927 	struct xe_config_group_device *dev = find_xe_config_group_device(pdev);
928 	bool ret;
929 
930 	if (!dev)
931 		return false;
932 
933 	ret = dev->config.enable_psmi;
934 	config_group_put(&dev->group);
935 
936 	return ret;
937 }
938 
939 /**
940  * xe_configfs_get_ctx_restore_mid_bb - get configfs ctx_restore_mid_bb setting
941  * @pdev: pci device
942  * @class: hw engine class
943  * @cs: pointer to the bb to use - only valid during probe
944  *
945  * Return: Number of dwords used in the mid_ctx_restore setting in configfs
946  */
xe_configfs_get_ctx_restore_mid_bb(struct pci_dev * pdev,enum xe_engine_class class,const u32 ** cs)947 u32 xe_configfs_get_ctx_restore_mid_bb(struct pci_dev *pdev,
948 				       enum xe_engine_class class,
949 				       const u32 **cs)
950 {
951 	struct xe_config_group_device *dev = find_xe_config_group_device(pdev);
952 	u32 len;
953 
954 	if (!dev)
955 		return 0;
956 
957 	if (cs)
958 		*cs = dev->config.ctx_restore_mid_bb[class].cs;
959 
960 	len = dev->config.ctx_restore_mid_bb[class].len;
961 	config_group_put(&dev->group);
962 
963 	return len;
964 }
965 
966 /**
967  * xe_configfs_get_ctx_restore_post_bb - get configfs ctx_restore_post_bb setting
968  * @pdev: pci device
969  * @class: hw engine class
970  * @cs: pointer to the bb to use - only valid during probe
971  *
972  * Return: Number of dwords used in the post_ctx_restore setting in configfs
973  */
xe_configfs_get_ctx_restore_post_bb(struct pci_dev * pdev,enum xe_engine_class class,const u32 ** cs)974 u32 xe_configfs_get_ctx_restore_post_bb(struct pci_dev *pdev,
975 					enum xe_engine_class class,
976 					const u32 **cs)
977 {
978 	struct xe_config_group_device *dev = find_xe_config_group_device(pdev);
979 	u32 len;
980 
981 	if (!dev)
982 		return 0;
983 
984 	*cs = dev->config.ctx_restore_post_bb[class].cs;
985 	len = dev->config.ctx_restore_post_bb[class].len;
986 	config_group_put(&dev->group);
987 
988 	return len;
989 }
990 
xe_configfs_init(void)991 int __init xe_configfs_init(void)
992 {
993 	int ret;
994 
995 	config_group_init(&xe_configfs.su_group);
996 	mutex_init(&xe_configfs.su_mutex);
997 	ret = configfs_register_subsystem(&xe_configfs);
998 	if (ret) {
999 		mutex_destroy(&xe_configfs.su_mutex);
1000 		return ret;
1001 	}
1002 
1003 	return 0;
1004 }
1005 
xe_configfs_exit(void)1006 void xe_configfs_exit(void)
1007 {
1008 	configfs_unregister_subsystem(&xe_configfs);
1009 	mutex_destroy(&xe_configfs.su_mutex);
1010 }
1011