1 // SPDX-License-Identifier: MIT 2 /* 3 * Copyright © 2025 Intel Corporation 4 */ 5 6 #include <linux/bitops.h> 7 #include <linux/configfs.h> 8 #include <linux/find.h> 9 #include <linux/init.h> 10 #include <linux/module.h> 11 #include <linux/pci.h> 12 #include <linux/string.h> 13 14 #include "xe_configfs.h" 15 #include "xe_module.h" 16 17 #include "xe_hw_engine_types.h" 18 19 /** 20 * DOC: Xe Configfs 21 * 22 * Overview 23 * ========= 24 * 25 * Configfs is a filesystem-based manager of kernel objects. XE KMD registers a 26 * configfs subsystem called ``'xe'`` that creates a directory in the mounted configfs directory 27 * The user can create devices under this directory and configure them as necessary 28 * See Documentation/filesystems/configfs.rst for more information about how configfs works. 29 * 30 * Create devices 31 * =============== 32 * 33 * In order to create a device, the user has to create a directory inside ``'xe'``:: 34 * 35 * mkdir /sys/kernel/config/xe/0000:03:00.0/ 36 * 37 * Every device created is populated by the driver with entries that can be 38 * used to configure it:: 39 * 40 * /sys/kernel/config/xe/ 41 * .. 0000:03:00.0/ 42 * ... survivability_mode 43 * 44 * Configure Attributes 45 * ==================== 46 * 47 * Survivability mode: 48 * ------------------- 49 * 50 * Enable survivability mode on supported cards. This setting only takes 51 * effect when probing the device. Example to enable it:: 52 * 53 * # echo 1 > /sys/kernel/config/xe/0000:03:00.0/survivability_mode 54 * # echo 0000:03:00.0 > /sys/bus/pci/drivers/xe/bind (Enters survivability mode if supported) 55 * 56 * Allowed engines: 57 * ---------------- 58 * 59 * Allow only a set of engine(s) to be available, disabling the other engines 60 * even if they are available in hardware. This is applied after HW fuses are 61 * considered on each tile. Examples: 62 * 63 * Allow only one render and one copy engines, nothing else:: 64 * 65 * # echo 'rcs0,bcs0' > /sys/kernel/config/xe/0000:03:00.0/engines_allowed 66 * 67 * Allow only compute engines and first copy engine:: 68 * 69 * # echo 'ccs*,bcs0' > /sys/kernel/config/xe/0000:03:00.0/engines_allowed 70 * 71 * Note that the engine names are the per-GT hardware names. On multi-tile 72 * platforms, writing ``rcs0,bcs0`` to this file would allow the first render 73 * and copy engines on each tile. 74 * 75 * The requested configuration may not be supported by the platform and driver 76 * may fail to probe. For example: if at least one copy engine is expected to be 77 * available for migrations, but it's disabled. This is intended for debugging 78 * purposes only. 79 * 80 * Remove devices 81 * ============== 82 * 83 * The created device directories can be removed using ``rmdir``:: 84 * 85 * rmdir /sys/kernel/config/xe/0000:03:00.0/ 86 */ 87 88 struct xe_config_device { 89 struct config_group group; 90 91 bool survivability_mode; 92 u64 engines_allowed; 93 94 /* protects attributes */ 95 struct mutex lock; 96 }; 97 98 struct engine_info { 99 const char *cls; 100 u64 mask; 101 }; 102 103 /* Some helpful macros to aid on the sizing of buffer allocation when parsing */ 104 #define MAX_ENGINE_CLASS_CHARS 5 105 #define MAX_ENGINE_INSTANCE_CHARS 2 106 107 static const struct engine_info engine_info[] = { 108 { .cls = "rcs", .mask = XE_HW_ENGINE_RCS_MASK }, 109 { .cls = "bcs", .mask = XE_HW_ENGINE_BCS_MASK }, 110 { .cls = "vcs", .mask = XE_HW_ENGINE_VCS_MASK }, 111 { .cls = "vecs", .mask = XE_HW_ENGINE_VECS_MASK }, 112 { .cls = "ccs", .mask = XE_HW_ENGINE_CCS_MASK }, 113 { .cls = "gsccs", .mask = XE_HW_ENGINE_GSCCS_MASK }, 114 }; 115 116 static struct xe_config_device *to_xe_config_device(struct config_item *item) 117 { 118 return container_of(to_config_group(item), struct xe_config_device, group); 119 } 120 121 static ssize_t survivability_mode_show(struct config_item *item, char *page) 122 { 123 struct xe_config_device *dev = to_xe_config_device(item); 124 125 return sprintf(page, "%d\n", dev->survivability_mode); 126 } 127 128 static ssize_t survivability_mode_store(struct config_item *item, const char *page, size_t len) 129 { 130 struct xe_config_device *dev = to_xe_config_device(item); 131 bool survivability_mode; 132 int ret; 133 134 ret = kstrtobool(page, &survivability_mode); 135 if (ret) 136 return ret; 137 138 mutex_lock(&dev->lock); 139 dev->survivability_mode = survivability_mode; 140 mutex_unlock(&dev->lock); 141 142 return len; 143 } 144 145 static ssize_t engines_allowed_show(struct config_item *item, char *page) 146 { 147 struct xe_config_device *dev = to_xe_config_device(item); 148 char *p = page; 149 150 for (size_t i = 0; i < ARRAY_SIZE(engine_info); i++) { 151 u64 mask = engine_info[i].mask; 152 153 if ((dev->engines_allowed & mask) == mask) { 154 p += sprintf(p, "%s*\n", engine_info[i].cls); 155 } else if (mask & dev->engines_allowed) { 156 u16 bit0 = __ffs64(mask), bit; 157 158 mask &= dev->engines_allowed; 159 160 for_each_set_bit(bit, (const unsigned long *)&mask, 64) 161 p += sprintf(p, "%s%u\n", engine_info[i].cls, 162 bit - bit0); 163 } 164 } 165 166 return p - page; 167 } 168 169 static bool lookup_engine_mask(const char *pattern, u64 *mask) 170 { 171 for (size_t i = 0; i < ARRAY_SIZE(engine_info); i++) { 172 u8 instance; 173 u16 bit; 174 175 if (!str_has_prefix(pattern, engine_info[i].cls)) 176 continue; 177 178 pattern += strlen(engine_info[i].cls); 179 180 if (!strcmp(pattern, "*")) { 181 *mask = engine_info[i].mask; 182 return true; 183 } 184 185 if (kstrtou8(pattern, 10, &instance)) 186 return false; 187 188 bit = __ffs64(engine_info[i].mask) + instance; 189 if (bit >= fls64(engine_info[i].mask)) 190 return false; 191 192 *mask = BIT_ULL(bit); 193 return true; 194 } 195 196 return false; 197 } 198 199 static ssize_t engines_allowed_store(struct config_item *item, const char *page, 200 size_t len) 201 { 202 struct xe_config_device *dev = to_xe_config_device(item); 203 size_t patternlen, p; 204 u64 mask, val = 0; 205 206 for (p = 0; p < len; p += patternlen + 1) { 207 char buf[MAX_ENGINE_CLASS_CHARS + MAX_ENGINE_INSTANCE_CHARS + 1]; 208 209 patternlen = strcspn(page + p, ",\n"); 210 if (patternlen >= sizeof(buf)) 211 return -EINVAL; 212 213 memcpy(buf, page + p, patternlen); 214 buf[patternlen] = '\0'; 215 216 if (!lookup_engine_mask(buf, &mask)) 217 return -EINVAL; 218 219 val |= mask; 220 } 221 222 mutex_lock(&dev->lock); 223 dev->engines_allowed = val; 224 mutex_unlock(&dev->lock); 225 226 return len; 227 } 228 229 CONFIGFS_ATTR(, survivability_mode); 230 CONFIGFS_ATTR(, engines_allowed); 231 232 static struct configfs_attribute *xe_config_device_attrs[] = { 233 &attr_survivability_mode, 234 &attr_engines_allowed, 235 NULL, 236 }; 237 238 static void xe_config_device_release(struct config_item *item) 239 { 240 struct xe_config_device *dev = to_xe_config_device(item); 241 242 mutex_destroy(&dev->lock); 243 kfree(dev); 244 } 245 246 static struct configfs_item_operations xe_config_device_ops = { 247 .release = xe_config_device_release, 248 }; 249 250 static const struct config_item_type xe_config_device_type = { 251 .ct_item_ops = &xe_config_device_ops, 252 .ct_attrs = xe_config_device_attrs, 253 .ct_owner = THIS_MODULE, 254 }; 255 256 static struct config_group *xe_config_make_device_group(struct config_group *group, 257 const char *name) 258 { 259 unsigned int domain, bus, slot, function; 260 struct xe_config_device *dev; 261 struct pci_dev *pdev; 262 int ret; 263 264 ret = sscanf(name, "%04x:%02x:%02x.%x", &domain, &bus, &slot, &function); 265 if (ret != 4) 266 return ERR_PTR(-EINVAL); 267 268 pdev = pci_get_domain_bus_and_slot(domain, bus, PCI_DEVFN(slot, function)); 269 if (!pdev) 270 return ERR_PTR(-EINVAL); 271 272 dev = kzalloc(sizeof(*dev), GFP_KERNEL); 273 if (!dev) 274 return ERR_PTR(-ENOMEM); 275 276 /* Default values */ 277 dev->engines_allowed = U64_MAX; 278 279 config_group_init_type_name(&dev->group, name, &xe_config_device_type); 280 281 mutex_init(&dev->lock); 282 283 return &dev->group; 284 } 285 286 static struct configfs_group_operations xe_config_device_group_ops = { 287 .make_group = xe_config_make_device_group, 288 }; 289 290 static const struct config_item_type xe_configfs_type = { 291 .ct_group_ops = &xe_config_device_group_ops, 292 .ct_owner = THIS_MODULE, 293 }; 294 295 static struct configfs_subsystem xe_configfs = { 296 .su_group = { 297 .cg_item = { 298 .ci_namebuf = "xe", 299 .ci_type = &xe_configfs_type, 300 }, 301 }, 302 }; 303 304 static struct xe_config_device *configfs_find_group(struct pci_dev *pdev) 305 { 306 struct config_item *item; 307 char name[64]; 308 309 snprintf(name, sizeof(name), "%04x:%02x:%02x.%x", pci_domain_nr(pdev->bus), 310 pdev->bus->number, PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn)); 311 312 mutex_lock(&xe_configfs.su_mutex); 313 item = config_group_find_item(&xe_configfs.su_group, name); 314 mutex_unlock(&xe_configfs.su_mutex); 315 316 if (!item) 317 return NULL; 318 319 return to_xe_config_device(item); 320 } 321 322 /** 323 * xe_configfs_get_survivability_mode - get configfs survivability mode attribute 324 * @pdev: pci device 325 * 326 * find the configfs group that belongs to the pci device and return 327 * the survivability mode attribute 328 * 329 * Return: survivability mode if config group is found, false otherwise 330 */ 331 bool xe_configfs_get_survivability_mode(struct pci_dev *pdev) 332 { 333 struct xe_config_device *dev = configfs_find_group(pdev); 334 bool mode; 335 336 if (!dev) 337 return false; 338 339 mode = dev->survivability_mode; 340 config_item_put(&dev->group.cg_item); 341 342 return mode; 343 } 344 345 /** 346 * xe_configfs_clear_survivability_mode - clear configfs survivability mode attribute 347 * @pdev: pci device 348 * 349 * find the configfs group that belongs to the pci device and clear survivability 350 * mode attribute 351 */ 352 void xe_configfs_clear_survivability_mode(struct pci_dev *pdev) 353 { 354 struct xe_config_device *dev = configfs_find_group(pdev); 355 356 if (!dev) 357 return; 358 359 mutex_lock(&dev->lock); 360 dev->survivability_mode = 0; 361 mutex_unlock(&dev->lock); 362 363 config_item_put(&dev->group.cg_item); 364 } 365 366 /** 367 * xe_configfs_get_engines_allowed - get engine allowed mask from configfs 368 * @pdev: pci device 369 * 370 * Find the configfs group that belongs to the pci device and return 371 * the mask of engines allowed to be used. 372 * 373 * Return: engine mask with allowed engines 374 */ 375 u64 xe_configfs_get_engines_allowed(struct pci_dev *pdev) 376 { 377 struct xe_config_device *dev = configfs_find_group(pdev); 378 u64 engines_allowed; 379 380 if (!dev) 381 return U64_MAX; 382 383 engines_allowed = dev->engines_allowed; 384 config_item_put(&dev->group.cg_item); 385 386 return engines_allowed; 387 } 388 389 int __init xe_configfs_init(void) 390 { 391 struct config_group *root = &xe_configfs.su_group; 392 int ret; 393 394 config_group_init(root); 395 mutex_init(&xe_configfs.su_mutex); 396 ret = configfs_register_subsystem(&xe_configfs); 397 if (ret) { 398 pr_err("Error %d while registering %s subsystem\n", 399 ret, root->cg_item.ci_namebuf); 400 return ret; 401 } 402 403 return 0; 404 } 405 406 void __exit xe_configfs_exit(void) 407 { 408 configfs_unregister_subsystem(&xe_configfs); 409 } 410 411