xref: /linux/drivers/gpu/drm/xe/xe_configfs.c (revision 89748acdf226fd1a8775ff6fa2703f8412b286c8)
1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright © 2025 Intel Corporation
4  */
5 
6 #include <linux/bitops.h>
7 #include <linux/configfs.h>
8 #include <linux/find.h>
9 #include <linux/init.h>
10 #include <linux/module.h>
11 #include <linux/pci.h>
12 #include <linux/string.h>
13 
14 #include "xe_configfs.h"
15 #include "xe_module.h"
16 
17 #include "xe_hw_engine_types.h"
18 
19 /**
20  * DOC: Xe Configfs
21  *
22  * Overview
23  * =========
24  *
25  * Configfs is a filesystem-based manager of kernel objects. XE KMD registers a
26  * configfs subsystem called ``'xe'`` that creates a directory in the mounted configfs directory
27  * The user can create devices under this directory and configure them as necessary
28  * See Documentation/filesystems/configfs.rst for more information about how configfs works.
29  *
30  * Create devices
31  * ===============
32  *
33  * In order to create a device, the user has to create a directory inside ``'xe'``::
34  *
35  *	mkdir /sys/kernel/config/xe/0000:03:00.0/
36  *
37  * Every device created is populated by the driver with entries that can be
38  * used to configure it::
39  *
40  *	/sys/kernel/config/xe/
41  *		.. 0000:03:00.0/
42  *			... survivability_mode
43  *
44  * Configure Attributes
45  * ====================
46  *
47  * Survivability mode:
48  * -------------------
49  *
50  * Enable survivability mode on supported cards. This setting only takes
51  * effect when probing the device. Example to enable it::
52  *
53  *	# echo 1 > /sys/kernel/config/xe/0000:03:00.0/survivability_mode
54  *	# echo 0000:03:00.0 > /sys/bus/pci/drivers/xe/bind  (Enters survivability mode if supported)
55  *
56  * Allowed engines:
57  * ----------------
58  *
59  * Allow only a set of engine(s) to be available, disabling the other engines
60  * even if they are available in hardware. This is applied after HW fuses are
61  * considered on each tile. Examples:
62  *
63  * Allow only one render and one copy engines, nothing else::
64  *
65  *	# echo 'rcs0,bcs0' > /sys/kernel/config/xe/0000:03:00.0/engines_allowed
66  *
67  * Allow only compute engines and first copy engine::
68  *
69  *	# echo 'ccs*,bcs0' > /sys/kernel/config/xe/0000:03:00.0/engines_allowed
70  *
71  * Note that the engine names are the per-GT hardware names. On multi-tile
72  * platforms, writing ``rcs0,bcs0`` to this file would allow the first render
73  * and copy engines on each tile.
74  *
75  * The requested configuration may not be supported by the platform and driver
76  * may fail to probe. For example: if at least one copy engine is expected to be
77  * available for migrations, but it's disabled. This is intended for debugging
78  * purposes only.
79  *
80  * Remove devices
81  * ==============
82  *
83  * The created device directories can be removed using ``rmdir``::
84  *
85  *	rmdir /sys/kernel/config/xe/0000:03:00.0/
86  */
87 
88 struct xe_config_device {
89 	struct config_group group;
90 
91 	bool survivability_mode;
92 	u64 engines_allowed;
93 
94 	/* protects attributes */
95 	struct mutex lock;
96 };
97 
98 struct engine_info {
99 	const char *cls;
100 	u64 mask;
101 };
102 
103 /* Some helpful macros to aid on the sizing of buffer allocation when parsing */
104 #define MAX_ENGINE_CLASS_CHARS 5
105 #define MAX_ENGINE_INSTANCE_CHARS 2
106 
107 static const struct engine_info engine_info[] = {
108 	{ .cls = "rcs", .mask = XE_HW_ENGINE_RCS_MASK },
109 	{ .cls = "bcs", .mask = XE_HW_ENGINE_BCS_MASK },
110 	{ .cls = "vcs", .mask = XE_HW_ENGINE_VCS_MASK },
111 	{ .cls = "vecs", .mask = XE_HW_ENGINE_VECS_MASK },
112 	{ .cls = "ccs", .mask = XE_HW_ENGINE_CCS_MASK },
113 	{ .cls = "gsccs", .mask = XE_HW_ENGINE_GSCCS_MASK },
114 };
115 
to_xe_config_device(struct config_item * item)116 static struct xe_config_device *to_xe_config_device(struct config_item *item)
117 {
118 	return container_of(to_config_group(item), struct xe_config_device, group);
119 }
120 
survivability_mode_show(struct config_item * item,char * page)121 static ssize_t survivability_mode_show(struct config_item *item, char *page)
122 {
123 	struct xe_config_device *dev = to_xe_config_device(item);
124 
125 	return sprintf(page, "%d\n", dev->survivability_mode);
126 }
127 
survivability_mode_store(struct config_item * item,const char * page,size_t len)128 static ssize_t survivability_mode_store(struct config_item *item, const char *page, size_t len)
129 {
130 	struct xe_config_device *dev = to_xe_config_device(item);
131 	bool survivability_mode;
132 	int ret;
133 
134 	ret = kstrtobool(page, &survivability_mode);
135 	if (ret)
136 		return ret;
137 
138 	mutex_lock(&dev->lock);
139 	dev->survivability_mode = survivability_mode;
140 	mutex_unlock(&dev->lock);
141 
142 	return len;
143 }
144 
engines_allowed_show(struct config_item * item,char * page)145 static ssize_t engines_allowed_show(struct config_item *item, char *page)
146 {
147 	struct xe_config_device *dev = to_xe_config_device(item);
148 	char *p = page;
149 
150 	for (size_t i = 0; i < ARRAY_SIZE(engine_info); i++) {
151 		u64 mask = engine_info[i].mask;
152 
153 		if ((dev->engines_allowed & mask) == mask) {
154 			p += sprintf(p, "%s*\n", engine_info[i].cls);
155 		} else if (mask & dev->engines_allowed) {
156 			u16 bit0 = __ffs64(mask), bit;
157 
158 			mask &= dev->engines_allowed;
159 
160 			for_each_set_bit(bit, (const unsigned long *)&mask, 64)
161 				p += sprintf(p, "%s%u\n", engine_info[i].cls,
162 					     bit - bit0);
163 		}
164 	}
165 
166 	return p - page;
167 }
168 
lookup_engine_mask(const char * pattern,u64 * mask)169 static bool lookup_engine_mask(const char *pattern, u64 *mask)
170 {
171 	for (size_t i = 0; i < ARRAY_SIZE(engine_info); i++) {
172 		u8 instance;
173 		u16 bit;
174 
175 		if (!str_has_prefix(pattern, engine_info[i].cls))
176 			continue;
177 
178 		pattern += strlen(engine_info[i].cls);
179 
180 		if (!strcmp(pattern, "*")) {
181 			*mask = engine_info[i].mask;
182 			return true;
183 		}
184 
185 		if (kstrtou8(pattern, 10, &instance))
186 			return false;
187 
188 		bit = __ffs64(engine_info[i].mask) + instance;
189 		if (bit >= fls64(engine_info[i].mask))
190 			return false;
191 
192 		*mask = BIT_ULL(bit);
193 		return true;
194 	}
195 
196 	return false;
197 }
198 
engines_allowed_store(struct config_item * item,const char * page,size_t len)199 static ssize_t engines_allowed_store(struct config_item *item, const char *page,
200 				     size_t len)
201 {
202 	struct xe_config_device *dev = to_xe_config_device(item);
203 	size_t patternlen, p;
204 	u64 mask, val = 0;
205 
206 	for (p = 0; p < len; p += patternlen + 1) {
207 		char buf[MAX_ENGINE_CLASS_CHARS + MAX_ENGINE_INSTANCE_CHARS + 1];
208 
209 		patternlen = strcspn(page + p, ",\n");
210 		if (patternlen >= sizeof(buf))
211 			return -EINVAL;
212 
213 		memcpy(buf, page + p, patternlen);
214 		buf[patternlen] = '\0';
215 
216 		if (!lookup_engine_mask(buf, &mask))
217 			return -EINVAL;
218 
219 		val |= mask;
220 	}
221 
222 	mutex_lock(&dev->lock);
223 	dev->engines_allowed = val;
224 	mutex_unlock(&dev->lock);
225 
226 	return len;
227 }
228 
229 CONFIGFS_ATTR(, survivability_mode);
230 CONFIGFS_ATTR(, engines_allowed);
231 
232 static struct configfs_attribute *xe_config_device_attrs[] = {
233 	&attr_survivability_mode,
234 	&attr_engines_allowed,
235 	NULL,
236 };
237 
xe_config_device_release(struct config_item * item)238 static void xe_config_device_release(struct config_item *item)
239 {
240 	struct xe_config_device *dev = to_xe_config_device(item);
241 
242 	mutex_destroy(&dev->lock);
243 	kfree(dev);
244 }
245 
246 static struct configfs_item_operations xe_config_device_ops = {
247 	.release	= xe_config_device_release,
248 };
249 
250 static const struct config_item_type xe_config_device_type = {
251 	.ct_item_ops	= &xe_config_device_ops,
252 	.ct_attrs	= xe_config_device_attrs,
253 	.ct_owner	= THIS_MODULE,
254 };
255 
xe_config_make_device_group(struct config_group * group,const char * name)256 static struct config_group *xe_config_make_device_group(struct config_group *group,
257 							const char *name)
258 {
259 	unsigned int domain, bus, slot, function;
260 	struct xe_config_device *dev;
261 	struct pci_dev *pdev;
262 	int ret;
263 
264 	ret = sscanf(name, "%04x:%02x:%02x.%x", &domain, &bus, &slot, &function);
265 	if (ret != 4)
266 		return ERR_PTR(-EINVAL);
267 
268 	pdev = pci_get_domain_bus_and_slot(domain, bus, PCI_DEVFN(slot, function));
269 	if (!pdev)
270 		return ERR_PTR(-ENODEV);
271 	pci_dev_put(pdev);
272 
273 	dev = kzalloc(sizeof(*dev), GFP_KERNEL);
274 	if (!dev)
275 		return ERR_PTR(-ENOMEM);
276 
277 	/* Default values */
278 	dev->engines_allowed = U64_MAX;
279 
280 	config_group_init_type_name(&dev->group, name, &xe_config_device_type);
281 
282 	mutex_init(&dev->lock);
283 
284 	return &dev->group;
285 }
286 
287 static struct configfs_group_operations xe_config_device_group_ops = {
288 	.make_group	= xe_config_make_device_group,
289 };
290 
291 static const struct config_item_type xe_configfs_type = {
292 	.ct_group_ops	= &xe_config_device_group_ops,
293 	.ct_owner	= THIS_MODULE,
294 };
295 
296 static struct configfs_subsystem xe_configfs = {
297 	.su_group = {
298 		.cg_item = {
299 			.ci_namebuf = "xe",
300 			.ci_type = &xe_configfs_type,
301 		},
302 	},
303 };
304 
configfs_find_group(struct pci_dev * pdev)305 static struct xe_config_device *configfs_find_group(struct pci_dev *pdev)
306 {
307 	struct config_item *item;
308 	char name[64];
309 
310 	snprintf(name, sizeof(name), "%04x:%02x:%02x.%x", pci_domain_nr(pdev->bus),
311 		 pdev->bus->number, PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn));
312 
313 	mutex_lock(&xe_configfs.su_mutex);
314 	item = config_group_find_item(&xe_configfs.su_group, name);
315 	mutex_unlock(&xe_configfs.su_mutex);
316 
317 	if (!item)
318 		return NULL;
319 
320 	return to_xe_config_device(item);
321 }
322 
323 /**
324  * xe_configfs_get_survivability_mode - get configfs survivability mode attribute
325  * @pdev: pci device
326  *
327  * find the configfs group that belongs to the pci device and return
328  * the survivability mode attribute
329  *
330  * Return: survivability mode if config group is found, false otherwise
331  */
xe_configfs_get_survivability_mode(struct pci_dev * pdev)332 bool xe_configfs_get_survivability_mode(struct pci_dev *pdev)
333 {
334 	struct xe_config_device *dev = configfs_find_group(pdev);
335 	bool mode;
336 
337 	if (!dev)
338 		return false;
339 
340 	mode = dev->survivability_mode;
341 	config_item_put(&dev->group.cg_item);
342 
343 	return mode;
344 }
345 
346 /**
347  * xe_configfs_clear_survivability_mode - clear configfs survivability mode attribute
348  * @pdev: pci device
349  *
350  * find the configfs group that belongs to the pci device and clear survivability
351  * mode attribute
352  */
xe_configfs_clear_survivability_mode(struct pci_dev * pdev)353 void xe_configfs_clear_survivability_mode(struct pci_dev *pdev)
354 {
355 	struct xe_config_device *dev = configfs_find_group(pdev);
356 
357 	if (!dev)
358 		return;
359 
360 	mutex_lock(&dev->lock);
361 	dev->survivability_mode = 0;
362 	mutex_unlock(&dev->lock);
363 
364 	config_item_put(&dev->group.cg_item);
365 }
366 
367 /**
368  * xe_configfs_get_engines_allowed - get engine allowed mask from configfs
369  * @pdev: pci device
370  *
371  * Find the configfs group that belongs to the pci device and return
372  * the mask of engines allowed to be used.
373  *
374  * Return: engine mask with allowed engines
375  */
xe_configfs_get_engines_allowed(struct pci_dev * pdev)376 u64 xe_configfs_get_engines_allowed(struct pci_dev *pdev)
377 {
378 	struct xe_config_device *dev = configfs_find_group(pdev);
379 	u64 engines_allowed;
380 
381 	if (!dev)
382 		return U64_MAX;
383 
384 	engines_allowed = dev->engines_allowed;
385 	config_item_put(&dev->group.cg_item);
386 
387 	return engines_allowed;
388 }
389 
xe_configfs_init(void)390 int __init xe_configfs_init(void)
391 {
392 	struct config_group *root = &xe_configfs.su_group;
393 	int ret;
394 
395 	config_group_init(root);
396 	mutex_init(&xe_configfs.su_mutex);
397 	ret = configfs_register_subsystem(&xe_configfs);
398 	if (ret) {
399 		pr_err("Error %d while registering %s subsystem\n",
400 		       ret, root->cg_item.ci_namebuf);
401 		return ret;
402 	}
403 
404 	return 0;
405 }
406 
xe_configfs_exit(void)407 void __exit xe_configfs_exit(void)
408 {
409 	configfs_unregister_subsystem(&xe_configfs);
410 }
411 
412