1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * class.c - basic device class management 4 * 5 * Copyright (c) 2002-3 Patrick Mochel 6 * Copyright (c) 2002-3 Open Source Development Labs 7 * Copyright (c) 2003-2004 Greg Kroah-Hartman 8 * Copyright (c) 2003-2004 IBM Corp. 9 */ 10 11 #include <linux/device/class.h> 12 #include <linux/device.h> 13 #include <linux/module.h> 14 #include <linux/init.h> 15 #include <linux/string.h> 16 #include <linux/kdev_t.h> 17 #include <linux/err.h> 18 #include <linux/slab.h> 19 #include <linux/blkdev.h> 20 #include <linux/mutex.h> 21 #include "base.h" 22 23 /* /sys/class */ 24 static struct kset *class_kset; 25 26 #define to_class_attr(_attr) container_of(_attr, struct class_attribute, attr) 27 28 /** 29 * class_to_subsys - Turn a struct class into a struct subsys_private 30 * 31 * @class: pointer to the struct bus_type to look up 32 * 33 * The driver core internals need to work on the subsys_private structure, not 34 * the external struct class pointer. This function walks the list of 35 * registered classes in the system and finds the matching one and returns the 36 * internal struct subsys_private that relates to that class. 37 * 38 * Note, the reference count of the return value is INCREMENTED if it is not 39 * NULL. A call to subsys_put() must be done when finished with the pointer in 40 * order for it to be properly freed. 41 */ 42 static struct subsys_private *class_to_subsys(const struct class *class) 43 { 44 struct subsys_private *sp = NULL; 45 struct kobject *kobj; 46 47 if (!class || !class_kset) 48 return NULL; 49 50 spin_lock(&class_kset->list_lock); 51 52 if (list_empty(&class_kset->list)) 53 goto done; 54 55 list_for_each_entry(kobj, &class_kset->list, entry) { 56 struct kset *kset = container_of(kobj, struct kset, kobj); 57 58 sp = container_of_const(kset, struct subsys_private, subsys); 59 if (sp->class == class) 60 goto done; 61 } 62 sp = NULL; 63 done: 64 sp = subsys_get(sp); 65 spin_unlock(&class_kset->list_lock); 66 return sp; 67 } 68 69 static ssize_t class_attr_show(struct kobject *kobj, struct attribute *attr, 70 char *buf) 71 { 72 struct class_attribute *class_attr = to_class_attr(attr); 73 struct subsys_private *cp = to_subsys_private(kobj); 74 ssize_t ret = -EIO; 75 76 if (class_attr->show) 77 ret = class_attr->show(cp->class, class_attr, buf); 78 return ret; 79 } 80 81 static ssize_t class_attr_store(struct kobject *kobj, struct attribute *attr, 82 const char *buf, size_t count) 83 { 84 struct class_attribute *class_attr = to_class_attr(attr); 85 struct subsys_private *cp = to_subsys_private(kobj); 86 ssize_t ret = -EIO; 87 88 if (class_attr->store) 89 ret = class_attr->store(cp->class, class_attr, buf, count); 90 return ret; 91 } 92 93 static void class_release(struct kobject *kobj) 94 { 95 struct subsys_private *cp = to_subsys_private(kobj); 96 struct class *class = cp->class; 97 98 pr_debug("class '%s': release.\n", class->name); 99 100 class->p = NULL; 101 102 if (class->class_release) 103 class->class_release(class); 104 else 105 pr_debug("class '%s' does not have a release() function, " 106 "be careful\n", class->name); 107 108 kfree(cp); 109 } 110 111 static const struct kobj_ns_type_operations *class_child_ns_type(const struct kobject *kobj) 112 { 113 const struct subsys_private *cp = to_subsys_private(kobj); 114 struct class *class = cp->class; 115 116 return class->ns_type; 117 } 118 119 static const struct sysfs_ops class_sysfs_ops = { 120 .show = class_attr_show, 121 .store = class_attr_store, 122 }; 123 124 static const struct kobj_type class_ktype = { 125 .sysfs_ops = &class_sysfs_ops, 126 .release = class_release, 127 .child_ns_type = class_child_ns_type, 128 }; 129 130 int class_create_file_ns(const struct class *cls, const struct class_attribute *attr, 131 const void *ns) 132 { 133 struct subsys_private *sp = class_to_subsys(cls); 134 int error; 135 136 if (!sp) 137 return -EINVAL; 138 139 error = sysfs_create_file_ns(&sp->subsys.kobj, &attr->attr, ns); 140 subsys_put(sp); 141 142 return error; 143 } 144 EXPORT_SYMBOL_GPL(class_create_file_ns); 145 146 void class_remove_file_ns(const struct class *cls, const struct class_attribute *attr, 147 const void *ns) 148 { 149 struct subsys_private *sp = class_to_subsys(cls); 150 151 if (!sp) 152 return; 153 154 sysfs_remove_file_ns(&sp->subsys.kobj, &attr->attr, ns); 155 subsys_put(sp); 156 } 157 EXPORT_SYMBOL_GPL(class_remove_file_ns); 158 159 static struct device *klist_class_to_dev(struct klist_node *n) 160 { 161 struct device_private *p = to_device_private_class(n); 162 return p->device; 163 } 164 165 static void klist_class_dev_get(struct klist_node *n) 166 { 167 struct device *dev = klist_class_to_dev(n); 168 169 get_device(dev); 170 } 171 172 static void klist_class_dev_put(struct klist_node *n) 173 { 174 struct device *dev = klist_class_to_dev(n); 175 176 put_device(dev); 177 } 178 179 int class_register(struct class *cls) 180 { 181 struct subsys_private *cp; 182 struct lock_class_key *key; 183 int error; 184 185 pr_debug("device class '%s': registering\n", cls->name); 186 187 cp = kzalloc(sizeof(*cp), GFP_KERNEL); 188 if (!cp) 189 return -ENOMEM; 190 klist_init(&cp->klist_devices, klist_class_dev_get, klist_class_dev_put); 191 INIT_LIST_HEAD(&cp->interfaces); 192 kset_init(&cp->glue_dirs); 193 key = &cp->lock_key; 194 lockdep_register_key(key); 195 __mutex_init(&cp->mutex, "subsys mutex", key); 196 error = kobject_set_name(&cp->subsys.kobj, "%s", cls->name); 197 if (error) { 198 kfree(cp); 199 return error; 200 } 201 202 /* set the default /sys/dev directory for devices of this class */ 203 if (!cls->dev_kobj) 204 cls->dev_kobj = sysfs_dev_char_kobj; 205 206 cp->subsys.kobj.kset = class_kset; 207 cp->subsys.kobj.ktype = &class_ktype; 208 cp->class = cls; 209 cls->p = cp; 210 211 error = kset_register(&cp->subsys); 212 if (error) 213 goto err_out; 214 215 error = sysfs_create_groups(&cp->subsys.kobj, cls->class_groups); 216 if (error) { 217 kobject_del(&cp->subsys.kobj); 218 kfree_const(cp->subsys.kobj.name); 219 goto err_out; 220 } 221 return 0; 222 223 err_out: 224 kfree(cp); 225 cls->p = NULL; 226 return error; 227 } 228 EXPORT_SYMBOL_GPL(class_register); 229 230 void class_unregister(const struct class *cls) 231 { 232 struct subsys_private *sp = class_to_subsys(cls); 233 234 if (!sp) 235 return; 236 237 pr_debug("device class '%s': unregistering\n", cls->name); 238 239 sysfs_remove_groups(&sp->subsys.kobj, cls->class_groups); 240 kset_unregister(&sp->subsys); 241 subsys_put(sp); 242 } 243 EXPORT_SYMBOL_GPL(class_unregister); 244 245 static void class_create_release(struct class *cls) 246 { 247 pr_debug("%s called for %s\n", __func__, cls->name); 248 kfree(cls); 249 } 250 251 /** 252 * class_create - create a struct class structure 253 * @name: pointer to a string for the name of this class. 254 * 255 * This is used to create a struct class pointer that can then be used 256 * in calls to device_create(). 257 * 258 * Returns &struct class pointer on success, or ERR_PTR() on error. 259 * 260 * Note, the pointer created here is to be destroyed when finished by 261 * making a call to class_destroy(). 262 */ 263 struct class *class_create(const char *name) 264 { 265 struct class *cls; 266 int retval; 267 268 cls = kzalloc(sizeof(*cls), GFP_KERNEL); 269 if (!cls) { 270 retval = -ENOMEM; 271 goto error; 272 } 273 274 cls->name = name; 275 cls->class_release = class_create_release; 276 277 retval = class_register(cls); 278 if (retval) 279 goto error; 280 281 return cls; 282 283 error: 284 kfree(cls); 285 return ERR_PTR(retval); 286 } 287 EXPORT_SYMBOL_GPL(class_create); 288 289 /** 290 * class_destroy - destroys a struct class structure 291 * @cls: pointer to the struct class that is to be destroyed 292 * 293 * Note, the pointer to be destroyed must have been created with a call 294 * to class_create(). 295 */ 296 void class_destroy(const struct class *cls) 297 { 298 if (IS_ERR_OR_NULL(cls)) 299 return; 300 301 class_unregister(cls); 302 } 303 EXPORT_SYMBOL_GPL(class_destroy); 304 305 /** 306 * class_dev_iter_init - initialize class device iterator 307 * @iter: class iterator to initialize 308 * @class: the class we wanna iterate over 309 * @start: the device to start iterating from, if any 310 * @type: device_type of the devices to iterate over, NULL for all 311 * 312 * Initialize class iterator @iter such that it iterates over devices 313 * of @class. If @start is set, the list iteration will start there, 314 * otherwise if it is NULL, the iteration starts at the beginning of 315 * the list. 316 */ 317 void class_dev_iter_init(struct class_dev_iter *iter, const struct class *class, 318 const struct device *start, const struct device_type *type) 319 { 320 struct subsys_private *sp = class_to_subsys(class); 321 struct klist_node *start_knode = NULL; 322 323 if (!sp) 324 return; 325 326 if (start) 327 start_knode = &start->p->knode_class; 328 klist_iter_init_node(&sp->klist_devices, &iter->ki, start_knode); 329 iter->type = type; 330 } 331 EXPORT_SYMBOL_GPL(class_dev_iter_init); 332 333 /** 334 * class_dev_iter_next - iterate to the next device 335 * @iter: class iterator to proceed 336 * 337 * Proceed @iter to the next device and return it. Returns NULL if 338 * iteration is complete. 339 * 340 * The returned device is referenced and won't be released till 341 * iterator is proceed to the next device or exited. The caller is 342 * free to do whatever it wants to do with the device including 343 * calling back into class code. 344 */ 345 struct device *class_dev_iter_next(struct class_dev_iter *iter) 346 { 347 struct klist_node *knode; 348 struct device *dev; 349 350 while (1) { 351 knode = klist_next(&iter->ki); 352 if (!knode) 353 return NULL; 354 dev = klist_class_to_dev(knode); 355 if (!iter->type || iter->type == dev->type) 356 return dev; 357 } 358 } 359 EXPORT_SYMBOL_GPL(class_dev_iter_next); 360 361 /** 362 * class_dev_iter_exit - finish iteration 363 * @iter: class iterator to finish 364 * 365 * Finish an iteration. Always call this function after iteration is 366 * complete whether the iteration ran till the end or not. 367 */ 368 void class_dev_iter_exit(struct class_dev_iter *iter) 369 { 370 klist_iter_exit(&iter->ki); 371 } 372 EXPORT_SYMBOL_GPL(class_dev_iter_exit); 373 374 /** 375 * class_for_each_device - device iterator 376 * @class: the class we're iterating 377 * @start: the device to start with in the list, if any. 378 * @data: data for the callback 379 * @fn: function to be called for each device 380 * 381 * Iterate over @class's list of devices, and call @fn for each, 382 * passing it @data. If @start is set, the list iteration will start 383 * there, otherwise if it is NULL, the iteration starts at the 384 * beginning of the list. 385 * 386 * We check the return of @fn each time. If it returns anything 387 * other than 0, we break out and return that value. 388 * 389 * @fn is allowed to do anything including calling back into class 390 * code. There's no locking restriction. 391 */ 392 int class_for_each_device(const struct class *class, const struct device *start, 393 void *data, int (*fn)(struct device *, void *)) 394 { 395 struct subsys_private *sp = class_to_subsys(class); 396 struct class_dev_iter iter; 397 struct device *dev; 398 int error = 0; 399 400 if (!class) 401 return -EINVAL; 402 if (!sp) { 403 WARN(1, "%s called for class '%s' before it was initialized", 404 __func__, class->name); 405 return -EINVAL; 406 } 407 408 class_dev_iter_init(&iter, class, start, NULL); 409 while ((dev = class_dev_iter_next(&iter))) { 410 error = fn(dev, data); 411 if (error) 412 break; 413 } 414 class_dev_iter_exit(&iter); 415 subsys_put(sp); 416 417 return error; 418 } 419 EXPORT_SYMBOL_GPL(class_for_each_device); 420 421 /** 422 * class_find_device - device iterator for locating a particular device 423 * @class: the class we're iterating 424 * @start: Device to begin with 425 * @data: data for the match function 426 * @match: function to check device 427 * 428 * This is similar to the class_for_each_dev() function above, but it 429 * returns a reference to a device that is 'found' for later use, as 430 * determined by the @match callback. 431 * 432 * The callback should return 0 if the device doesn't match and non-zero 433 * if it does. If the callback returns non-zero, this function will 434 * return to the caller and not iterate over any more devices. 435 * 436 * Note, you will need to drop the reference with put_device() after use. 437 * 438 * @match is allowed to do anything including calling back into class 439 * code. There's no locking restriction. 440 */ 441 struct device *class_find_device(const struct class *class, const struct device *start, 442 const void *data, 443 int (*match)(struct device *, const void *)) 444 { 445 struct subsys_private *sp = class_to_subsys(class); 446 struct class_dev_iter iter; 447 struct device *dev; 448 449 if (!class) 450 return NULL; 451 if (!sp) { 452 WARN(1, "%s called for class '%s' before it was initialized", 453 __func__, class->name); 454 return NULL; 455 } 456 457 class_dev_iter_init(&iter, class, start, NULL); 458 while ((dev = class_dev_iter_next(&iter))) { 459 if (match(dev, data)) { 460 get_device(dev); 461 break; 462 } 463 } 464 class_dev_iter_exit(&iter); 465 subsys_put(sp); 466 467 return dev; 468 } 469 EXPORT_SYMBOL_GPL(class_find_device); 470 471 int class_interface_register(struct class_interface *class_intf) 472 { 473 struct subsys_private *sp; 474 const struct class *parent; 475 struct class_dev_iter iter; 476 struct device *dev; 477 478 if (!class_intf || !class_intf->class) 479 return -ENODEV; 480 481 parent = class_intf->class; 482 sp = class_to_subsys(parent); 483 if (!sp) 484 return -EINVAL; 485 486 /* 487 * Reference in sp is now incremented and will be dropped when 488 * the interface is removed in the call to class_interface_unregister() 489 */ 490 491 mutex_lock(&sp->mutex); 492 list_add_tail(&class_intf->node, &sp->interfaces); 493 if (class_intf->add_dev) { 494 class_dev_iter_init(&iter, parent, NULL, NULL); 495 while ((dev = class_dev_iter_next(&iter))) 496 class_intf->add_dev(dev, class_intf); 497 class_dev_iter_exit(&iter); 498 } 499 mutex_unlock(&sp->mutex); 500 501 return 0; 502 } 503 EXPORT_SYMBOL_GPL(class_interface_register); 504 505 void class_interface_unregister(struct class_interface *class_intf) 506 { 507 struct subsys_private *sp; 508 struct class *parent = class_intf->class; 509 struct class_dev_iter iter; 510 struct device *dev; 511 512 if (!parent) 513 return; 514 515 sp = class_to_subsys(parent); 516 if (!sp) 517 return; 518 519 mutex_lock(&sp->mutex); 520 list_del_init(&class_intf->node); 521 if (class_intf->remove_dev) { 522 class_dev_iter_init(&iter, parent, NULL, NULL); 523 while ((dev = class_dev_iter_next(&iter))) 524 class_intf->remove_dev(dev, class_intf); 525 class_dev_iter_exit(&iter); 526 } 527 mutex_unlock(&sp->mutex); 528 529 /* 530 * Decrement the reference count twice, once for the class_to_subsys() 531 * call in the start of this function, and the second one from the 532 * reference increment in class_interface_register() 533 */ 534 subsys_put(sp); 535 subsys_put(sp); 536 } 537 EXPORT_SYMBOL_GPL(class_interface_unregister); 538 539 ssize_t show_class_attr_string(const struct class *class, 540 const struct class_attribute *attr, char *buf) 541 { 542 struct class_attribute_string *cs; 543 544 cs = container_of(attr, struct class_attribute_string, attr); 545 return sysfs_emit(buf, "%s\n", cs->str); 546 } 547 548 EXPORT_SYMBOL_GPL(show_class_attr_string); 549 550 struct class_compat { 551 struct kobject *kobj; 552 }; 553 554 /** 555 * class_compat_register - register a compatibility class 556 * @name: the name of the class 557 * 558 * Compatibility class are meant as a temporary user-space compatibility 559 * workaround when converting a family of class devices to a bus devices. 560 */ 561 struct class_compat *class_compat_register(const char *name) 562 { 563 struct class_compat *cls; 564 565 cls = kmalloc(sizeof(struct class_compat), GFP_KERNEL); 566 if (!cls) 567 return NULL; 568 cls->kobj = kobject_create_and_add(name, &class_kset->kobj); 569 if (!cls->kobj) { 570 kfree(cls); 571 return NULL; 572 } 573 return cls; 574 } 575 EXPORT_SYMBOL_GPL(class_compat_register); 576 577 /** 578 * class_compat_unregister - unregister a compatibility class 579 * @cls: the class to unregister 580 */ 581 void class_compat_unregister(struct class_compat *cls) 582 { 583 kobject_put(cls->kobj); 584 kfree(cls); 585 } 586 EXPORT_SYMBOL_GPL(class_compat_unregister); 587 588 /** 589 * class_compat_create_link - create a compatibility class device link to 590 * a bus device 591 * @cls: the compatibility class 592 * @dev: the target bus device 593 * @device_link: an optional device to which a "device" link should be created 594 */ 595 int class_compat_create_link(struct class_compat *cls, struct device *dev, 596 struct device *device_link) 597 { 598 int error; 599 600 error = sysfs_create_link(cls->kobj, &dev->kobj, dev_name(dev)); 601 if (error) 602 return error; 603 604 /* 605 * Optionally add a "device" link (typically to the parent), as a 606 * class device would have one and we want to provide as much 607 * backwards compatibility as possible. 608 */ 609 if (device_link) { 610 error = sysfs_create_link(&dev->kobj, &device_link->kobj, 611 "device"); 612 if (error) 613 sysfs_remove_link(cls->kobj, dev_name(dev)); 614 } 615 616 return error; 617 } 618 EXPORT_SYMBOL_GPL(class_compat_create_link); 619 620 /** 621 * class_compat_remove_link - remove a compatibility class device link to 622 * a bus device 623 * @cls: the compatibility class 624 * @dev: the target bus device 625 * @device_link: an optional device to which a "device" link was previously 626 * created 627 */ 628 void class_compat_remove_link(struct class_compat *cls, struct device *dev, 629 struct device *device_link) 630 { 631 if (device_link) 632 sysfs_remove_link(&dev->kobj, "device"); 633 sysfs_remove_link(cls->kobj, dev_name(dev)); 634 } 635 EXPORT_SYMBOL_GPL(class_compat_remove_link); 636 637 int __init classes_init(void) 638 { 639 class_kset = kset_create_and_add("class", NULL, NULL); 640 if (!class_kset) 641 return -ENOMEM; 642 return 0; 643 } 644