xref: /linux/fs/char_dev.c (revision 23b0f90ba871f096474e1c27c3d14f455189d2d9)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  *  linux/fs/char_dev.c
4  *
5  *  Copyright (C) 1991, 1992  Linus Torvalds
6  */
7 
8 #include <linux/init.h>
9 #include <linux/fs.h>
10 #include <linux/kdev_t.h>
11 #include <linux/slab.h>
12 #include <linux/string.h>
13 #include <linux/cleanup.h>
14 
15 #include <linux/major.h>
16 #include <linux/errno.h>
17 #include <linux/module.h>
18 #include <linux/seq_file.h>
19 
20 #include <linux/kobject.h>
21 #include <linux/kobj_map.h>
22 #include <linux/cdev.h>
23 #include <linux/mutex.h>
24 #include <linux/backing-dev.h>
25 #include <linux/tty.h>
26 
27 #include "internal.h"
28 
29 static struct kobj_map *cdev_map __ro_after_init;
30 
31 static DEFINE_MUTEX(chrdevs_lock);
32 
33 #define CHRDEV_MAJOR_HASH_SIZE 255
34 
35 static struct char_device_struct {
36 	struct char_device_struct *next;
37 	unsigned int major;
38 	unsigned int baseminor;
39 	int minorct;
40 	char name[64];
41 	struct cdev *cdev;		/* will die */
42 } *chrdevs[CHRDEV_MAJOR_HASH_SIZE];
43 
44 /* index in the above */
45 static inline int major_to_index(unsigned major)
46 {
47 	return major % CHRDEV_MAJOR_HASH_SIZE;
48 }
49 
50 #ifdef CONFIG_PROC_FS
51 
52 void chrdev_show(struct seq_file *f, off_t offset)
53 {
54 	struct char_device_struct *cd;
55 
56 	mutex_lock(&chrdevs_lock);
57 	for (cd = chrdevs[major_to_index(offset)]; cd; cd = cd->next) {
58 		if (cd->major == offset)
59 			seq_printf(f, "%3d %s\n", cd->major, cd->name);
60 	}
61 	mutex_unlock(&chrdevs_lock);
62 }
63 
64 #endif /* CONFIG_PROC_FS */
65 
66 static int find_dynamic_major(void)
67 {
68 	int i;
69 	struct char_device_struct *cd;
70 
71 	for (i = ARRAY_SIZE(chrdevs)-1; i >= CHRDEV_MAJOR_DYN_END; i--) {
72 		if (chrdevs[i] == NULL)
73 			return i;
74 	}
75 
76 	for (i = CHRDEV_MAJOR_DYN_EXT_START;
77 	     i >= CHRDEV_MAJOR_DYN_EXT_END; i--) {
78 		for (cd = chrdevs[major_to_index(i)]; cd; cd = cd->next)
79 			if (cd->major == i)
80 				break;
81 
82 		if (cd == NULL)
83 			return i;
84 	}
85 
86 	return -EBUSY;
87 }
88 
89 /*
90  * Register a single major with a specified minor range.
91  *
92  * If major == 0 this function will dynamically allocate an unused major.
93  * If major > 0 this function will attempt to reserve the range of minors
94  * with given major.
95  *
96  */
97 static struct char_device_struct *
98 __register_chrdev_region(unsigned int major, unsigned int baseminor,
99 			   int minorct, const char *name)
100 {
101 	struct char_device_struct *cd __free(kfree) = NULL;
102 	struct char_device_struct *curr, *prev = NULL;
103 	int ret;
104 	int i;
105 
106 	if (major >= CHRDEV_MAJOR_MAX) {
107 		pr_err("CHRDEV \"%s\" major requested (%u) is greater than the maximum (%u)\n",
108 		       name, major, CHRDEV_MAJOR_MAX-1);
109 		return ERR_PTR(-EINVAL);
110 	}
111 
112 	if (minorct > MINORMASK + 1 - baseminor) {
113 		pr_err("CHRDEV \"%s\" minor range requested (%u-%u) is out of range of maximum range (%u-%u) for a single major\n",
114 			name, baseminor, baseminor + minorct - 1, 0, MINORMASK);
115 		return ERR_PTR(-EINVAL);
116 	}
117 
118 	cd = kzalloc(sizeof(struct char_device_struct), GFP_KERNEL);
119 	if (cd == NULL)
120 		return ERR_PTR(-ENOMEM);
121 
122 	guard(mutex)(&chrdevs_lock);
123 
124 	if (major == 0) {
125 		ret = find_dynamic_major();
126 		if (ret < 0) {
127 			pr_err("CHRDEV \"%s\" dynamic allocation region is full\n",
128 			       name);
129 			return ERR_PTR(ret);
130 		}
131 		major = ret;
132 	}
133 
134 	ret = -EBUSY;
135 	i = major_to_index(major);
136 	for (curr = chrdevs[i]; curr; prev = curr, curr = curr->next) {
137 		if (curr->major < major)
138 			continue;
139 
140 		if (curr->major > major)
141 			break;
142 
143 		if (curr->baseminor + curr->minorct <= baseminor)
144 			continue;
145 
146 		if (curr->baseminor >= baseminor + minorct)
147 			break;
148 
149 		return ERR_PTR(ret);
150 	}
151 
152 	cd->major = major;
153 	cd->baseminor = baseminor;
154 	cd->minorct = minorct;
155 	strscpy(cd->name, name, sizeof(cd->name));
156 
157 	if (!prev) {
158 		cd->next = curr;
159 		chrdevs[i] = cd;
160 	} else {
161 		cd->next = prev->next;
162 		prev->next = cd;
163 	}
164 
165 	return_ptr(cd);
166 }
167 
168 static struct char_device_struct *
169 __unregister_chrdev_region(unsigned major, unsigned baseminor, int minorct)
170 {
171 	struct char_device_struct *cd = NULL, **cp;
172 	int i = major_to_index(major);
173 
174 	mutex_lock(&chrdevs_lock);
175 	for (cp = &chrdevs[i]; *cp; cp = &(*cp)->next)
176 		if ((*cp)->major == major &&
177 		    (*cp)->baseminor == baseminor &&
178 		    (*cp)->minorct == minorct)
179 			break;
180 	if (*cp) {
181 		cd = *cp;
182 		*cp = cd->next;
183 	}
184 	mutex_unlock(&chrdevs_lock);
185 	return cd;
186 }
187 
188 /**
189  * register_chrdev_region() - register a range of device numbers
190  * @from: the first in the desired range of device numbers; must include
191  *        the major number.
192  * @count: the number of consecutive device numbers required
193  * @name: the name of the device or driver.
194  *
195  * Return value is zero on success, a negative error code on failure.
196  */
197 int register_chrdev_region(dev_t from, unsigned count, const char *name)
198 {
199 	struct char_device_struct *cd;
200 	dev_t to = from + count;
201 	dev_t n, next;
202 
203 	for (n = from; n < to; n = next) {
204 		next = MKDEV(MAJOR(n)+1, 0);
205 		if (next > to)
206 			next = to;
207 		cd = __register_chrdev_region(MAJOR(n), MINOR(n),
208 			       next - n, name);
209 		if (IS_ERR(cd))
210 			goto fail;
211 	}
212 	return 0;
213 fail:
214 	to = n;
215 	for (n = from; n < to; n = next) {
216 		next = MKDEV(MAJOR(n)+1, 0);
217 		kfree(__unregister_chrdev_region(MAJOR(n), MINOR(n), next - n));
218 	}
219 	return PTR_ERR(cd);
220 }
221 
222 /**
223  * alloc_chrdev_region() - register a range of char device numbers
224  * @dev: output parameter for first assigned number
225  * @baseminor: first of the requested range of minor numbers
226  * @count: the number of minor numbers required
227  * @name: the name of the associated device or driver
228  *
229  * Allocates a range of char device numbers.  The major number will be
230  * chosen dynamically, and returned (along with the first minor number)
231  * in @dev.  Returns zero or a negative error code.
232  */
233 int alloc_chrdev_region(dev_t *dev, unsigned baseminor, unsigned count,
234 			const char *name)
235 {
236 	struct char_device_struct *cd;
237 	cd = __register_chrdev_region(0, baseminor, count, name);
238 	if (IS_ERR(cd))
239 		return PTR_ERR(cd);
240 	*dev = MKDEV(cd->major, cd->baseminor);
241 	return 0;
242 }
243 
244 /**
245  * __register_chrdev() - create and register a cdev occupying a range of minors
246  * @major: major device number or 0 for dynamic allocation
247  * @baseminor: first of the requested range of minor numbers
248  * @count: the number of minor numbers required
249  * @name: name of this range of devices
250  * @fops: file operations associated with this devices
251  *
252  * If @major == 0 this functions will dynamically allocate a major and return
253  * its number.
254  *
255  * If @major > 0 this function will attempt to reserve a device with the given
256  * major number and will return zero on success.
257  *
258  * Returns a -ve errno on failure.
259  *
260  * The name of this device has nothing to do with the name of the device in
261  * /dev. It only helps to keep track of the different owners of devices. If
262  * your module name has only one type of devices it's ok to use e.g. the name
263  * of the module here.
264  */
265 int __register_chrdev(unsigned int major, unsigned int baseminor,
266 		      unsigned int count, const char *name,
267 		      const struct file_operations *fops)
268 {
269 	struct char_device_struct *cd;
270 	struct cdev *cdev;
271 	int err = -ENOMEM;
272 
273 	cd = __register_chrdev_region(major, baseminor, count, name);
274 	if (IS_ERR(cd))
275 		return PTR_ERR(cd);
276 
277 	cdev = cdev_alloc();
278 	if (!cdev)
279 		goto out2;
280 
281 	cdev->owner = fops->owner;
282 	cdev->ops = fops;
283 	kobject_set_name(&cdev->kobj, "%s", name);
284 
285 	err = cdev_add(cdev, MKDEV(cd->major, baseminor), count);
286 	if (err)
287 		goto out;
288 
289 	cd->cdev = cdev;
290 
291 	return major ? 0 : cd->major;
292 out:
293 	kobject_put(&cdev->kobj);
294 out2:
295 	kfree(__unregister_chrdev_region(cd->major, baseminor, count));
296 	return err;
297 }
298 
299 /**
300  * unregister_chrdev_region() - unregister a range of device numbers
301  * @from: the first in the range of numbers to unregister
302  * @count: the number of device numbers to unregister
303  *
304  * This function will unregister a range of @count device numbers,
305  * starting with @from.  The caller should normally be the one who
306  * allocated those numbers in the first place...
307  */
308 void unregister_chrdev_region(dev_t from, unsigned count)
309 {
310 	dev_t to = from + count;
311 	dev_t n, next;
312 
313 	for (n = from; n < to; n = next) {
314 		next = MKDEV(MAJOR(n)+1, 0);
315 		if (next > to)
316 			next = to;
317 		kfree(__unregister_chrdev_region(MAJOR(n), MINOR(n), next - n));
318 	}
319 }
320 
321 /**
322  * __unregister_chrdev - unregister and destroy a cdev
323  * @major: major device number
324  * @baseminor: first of the range of minor numbers
325  * @count: the number of minor numbers this cdev is occupying
326  * @name: name of this range of devices
327  *
328  * Unregister and destroy the cdev occupying the region described by
329  * @major, @baseminor and @count.  This function undoes what
330  * __register_chrdev() did.
331  */
332 void __unregister_chrdev(unsigned int major, unsigned int baseminor,
333 			 unsigned int count, const char *name)
334 {
335 	struct char_device_struct *cd;
336 
337 	cd = __unregister_chrdev_region(major, baseminor, count);
338 	if (cd && cd->cdev)
339 		cdev_del(cd->cdev);
340 	kfree(cd);
341 }
342 
343 static __cacheline_aligned_in_smp DEFINE_SPINLOCK(cdev_lock);
344 
345 static struct kobject *cdev_get(struct cdev *p)
346 {
347 	struct module *owner = p->owner;
348 	struct kobject *kobj;
349 
350 	if (!try_module_get(owner))
351 		return NULL;
352 	kobj = kobject_get_unless_zero(&p->kobj);
353 	if (!kobj)
354 		module_put(owner);
355 	return kobj;
356 }
357 
358 void cdev_put(struct cdev *p)
359 {
360 	if (p) {
361 		struct module *owner = p->owner;
362 		kobject_put(&p->kobj);
363 		module_put(owner);
364 	}
365 }
366 
367 /*
368  * Called every time a character special file is opened
369  */
370 static int chrdev_open(struct inode *inode, struct file *filp)
371 {
372 	const struct file_operations *fops;
373 	struct cdev *p;
374 	struct cdev *new = NULL;
375 	int ret = 0;
376 
377 	spin_lock(&cdev_lock);
378 	p = inode->i_cdev;
379 	if (!p) {
380 		struct kobject *kobj;
381 		int idx;
382 		spin_unlock(&cdev_lock);
383 		kobj = kobj_lookup(cdev_map, inode->i_rdev, &idx);
384 		if (!kobj)
385 			return -ENXIO;
386 		new = container_of(kobj, struct cdev, kobj);
387 		spin_lock(&cdev_lock);
388 		/* Check i_cdev again in case somebody beat us to it while
389 		   we dropped the lock. */
390 		p = inode->i_cdev;
391 		if (!p) {
392 			inode->i_cdev = p = new;
393 			list_add(&inode->i_devices, &p->list);
394 			new = NULL;
395 		} else if (!cdev_get(p))
396 			ret = -ENXIO;
397 	} else if (!cdev_get(p))
398 		ret = -ENXIO;
399 	spin_unlock(&cdev_lock);
400 	cdev_put(new);
401 	if (ret)
402 		return ret;
403 
404 	ret = -ENXIO;
405 	fops = fops_get(p->ops);
406 	if (!fops)
407 		goto out_cdev_put;
408 
409 	replace_fops(filp, fops);
410 	if (filp->f_op->open) {
411 		ret = filp->f_op->open(inode, filp);
412 		if (ret)
413 			goto out_cdev_put;
414 	}
415 
416 	return 0;
417 
418  out_cdev_put:
419 	cdev_put(p);
420 	return ret;
421 }
422 
423 void cd_forget(struct inode *inode)
424 {
425 	spin_lock(&cdev_lock);
426 	list_del_init(&inode->i_devices);
427 	inode->i_cdev = NULL;
428 	inode->i_mapping = &inode->i_data;
429 	spin_unlock(&cdev_lock);
430 }
431 
432 static void cdev_purge(struct cdev *cdev)
433 {
434 	spin_lock(&cdev_lock);
435 	while (!list_empty(&cdev->list)) {
436 		struct inode *inode;
437 		inode = container_of(cdev->list.next, struct inode, i_devices);
438 		list_del_init(&inode->i_devices);
439 		inode->i_cdev = NULL;
440 	}
441 	spin_unlock(&cdev_lock);
442 }
443 
444 /*
445  * Dummy default file-operations: the only thing this does
446  * is contain the open that then fills in the correct operations
447  * depending on the special file...
448  */
449 const struct file_operations def_chr_fops = {
450 	.open = chrdev_open,
451 	.llseek = noop_llseek,
452 };
453 
454 static struct kobject *exact_match(dev_t dev, int *part, void *data)
455 {
456 	struct cdev *p = data;
457 	return &p->kobj;
458 }
459 
460 static int exact_lock(dev_t dev, void *data)
461 {
462 	struct cdev *p = data;
463 	return cdev_get(p) ? 0 : -1;
464 }
465 
466 /**
467  * cdev_add() - add a char device to the system
468  * @p: the cdev structure for the device
469  * @dev: the first device number for which this device is responsible
470  * @count: the number of consecutive minor numbers corresponding to this
471  *         device
472  *
473  * cdev_add() adds the device represented by @p to the system, making it
474  * live immediately.  A negative error code is returned on failure.
475  */
476 int cdev_add(struct cdev *p, dev_t dev, unsigned count)
477 {
478 	int error;
479 
480 	p->dev = dev;
481 	p->count = count;
482 
483 	if (WARN_ON(dev == WHITEOUT_DEV)) {
484 		error = -EBUSY;
485 		goto err;
486 	}
487 
488 	error = kobj_map(cdev_map, dev, count, NULL,
489 			 exact_match, exact_lock, p);
490 	if (error)
491 		goto err;
492 
493 	kobject_get(p->kobj.parent);
494 
495 	return 0;
496 
497 err:
498 	kfree_const(p->kobj.name);
499 	p->kobj.name = NULL;
500 	return error;
501 }
502 
503 /**
504  * cdev_set_parent() - set the parent kobject for a char device
505  * @p: the cdev structure
506  * @kobj: the kobject to take a reference to
507  *
508  * cdev_set_parent() sets a parent kobject which will be referenced
509  * appropriately so the parent is not freed before the cdev. This
510  * should be called before cdev_add.
511  */
512 void cdev_set_parent(struct cdev *p, struct kobject *kobj)
513 {
514 	WARN_ON(!kobj->state_initialized);
515 	p->kobj.parent = kobj;
516 }
517 
518 /**
519  * cdev_device_add() - add a char device and it's corresponding
520  *	struct device, linkink
521  * @dev: the device structure
522  * @cdev: the cdev structure
523  *
524  * cdev_device_add() adds the char device represented by @cdev to the system,
525  * just as cdev_add does. It then adds @dev to the system using device_add
526  * The dev_t for the char device will be taken from the struct device which
527  * needs to be initialized first. This helper function correctly takes a
528  * reference to the parent device so the parent will not get released until
529  * all references to the cdev are released.
530  *
531  * This helper uses dev->devt for the device number. If it is not set
532  * it will not add the cdev and it will be equivalent to device_add.
533  *
534  * This function should be used whenever the struct cdev and the
535  * struct device are members of the same structure whose lifetime is
536  * managed by the struct device.
537  *
538  * NOTE: Callers must assume that userspace was able to open the cdev and
539  * can call cdev fops callbacks at any time, even if this function fails.
540  */
541 int cdev_device_add(struct cdev *cdev, struct device *dev)
542 {
543 	int rc = 0;
544 
545 	if (dev->devt) {
546 		cdev_set_parent(cdev, &dev->kobj);
547 
548 		rc = cdev_add(cdev, dev->devt, 1);
549 		if (rc)
550 			return rc;
551 	}
552 
553 	rc = device_add(dev);
554 	if (rc && dev->devt)
555 		cdev_del(cdev);
556 
557 	return rc;
558 }
559 
560 /**
561  * cdev_device_del() - inverse of cdev_device_add
562  * @cdev: the cdev structure
563  * @dev: the device structure
564  *
565  * cdev_device_del() is a helper function to call cdev_del and device_del.
566  * It should be used whenever cdev_device_add is used.
567  *
568  * If dev->devt is not set it will not remove the cdev and will be equivalent
569  * to device_del.
570  *
571  * NOTE: This guarantees that associated sysfs callbacks are not running
572  * or runnable, however any cdevs already open will remain and their fops
573  * will still be callable even after this function returns.
574  */
575 void cdev_device_del(struct cdev *cdev, struct device *dev)
576 {
577 	device_del(dev);
578 	if (dev->devt)
579 		cdev_del(cdev);
580 }
581 
582 static void cdev_unmap(dev_t dev, unsigned count)
583 {
584 	kobj_unmap(cdev_map, dev, count);
585 }
586 
587 /**
588  * cdev_del() - remove a cdev from the system
589  * @p: the cdev structure to be removed
590  *
591  * cdev_del() removes @p from the system, possibly freeing the structure
592  * itself.
593  *
594  * NOTE: This guarantees that cdev device will no longer be able to be
595  * opened, however any cdevs already open will remain and their fops will
596  * still be callable even after cdev_del returns.
597  */
598 void cdev_del(struct cdev *p)
599 {
600 	cdev_unmap(p->dev, p->count);
601 	kobject_put(&p->kobj);
602 }
603 
604 
605 static void cdev_default_release(struct kobject *kobj)
606 {
607 	struct cdev *p = container_of(kobj, struct cdev, kobj);
608 	struct kobject *parent = kobj->parent;
609 
610 	cdev_purge(p);
611 	kobject_put(parent);
612 }
613 
614 static void cdev_dynamic_release(struct kobject *kobj)
615 {
616 	struct cdev *p = container_of(kobj, struct cdev, kobj);
617 	struct kobject *parent = kobj->parent;
618 
619 	cdev_purge(p);
620 	kfree(p);
621 	kobject_put(parent);
622 }
623 
624 static struct kobj_type ktype_cdev_default = {
625 	.release	= cdev_default_release,
626 };
627 
628 static struct kobj_type ktype_cdev_dynamic = {
629 	.release	= cdev_dynamic_release,
630 };
631 
632 /**
633  * cdev_alloc() - allocate a cdev structure
634  *
635  * Allocates and returns a cdev structure, or NULL on failure.
636  */
637 struct cdev *cdev_alloc(void)
638 {
639 	struct cdev *p = kzalloc(sizeof(struct cdev), GFP_KERNEL);
640 	if (p) {
641 		INIT_LIST_HEAD(&p->list);
642 		kobject_init(&p->kobj, &ktype_cdev_dynamic);
643 	}
644 	return p;
645 }
646 
647 /**
648  * cdev_init() - initialize a cdev structure
649  * @cdev: the structure to initialize
650  * @fops: the file_operations for this device
651  *
652  * Initializes @cdev, remembering @fops, making it ready to add to the
653  * system with cdev_add().
654  */
655 void cdev_init(struct cdev *cdev, const struct file_operations *fops)
656 {
657 	memset(cdev, 0, sizeof *cdev);
658 	INIT_LIST_HEAD(&cdev->list);
659 	kobject_init(&cdev->kobj, &ktype_cdev_default);
660 	cdev->ops = fops;
661 }
662 
663 static struct kobject *base_probe(dev_t dev, int *part, void *data)
664 {
665 	if (request_module("char-major-%d-%d", MAJOR(dev), MINOR(dev)) > 0)
666 		/* Make old-style 2.4 aliases work */
667 		request_module("char-major-%d", MAJOR(dev));
668 	return NULL;
669 }
670 
671 void __init chrdev_init(void)
672 {
673 	cdev_map = kobj_map_init(base_probe, &chrdevs_lock);
674 }
675 
676 
677 /* Let modules do char dev stuff */
678 EXPORT_SYMBOL(register_chrdev_region);
679 EXPORT_SYMBOL(unregister_chrdev_region);
680 EXPORT_SYMBOL(alloc_chrdev_region);
681 EXPORT_SYMBOL(cdev_init);
682 EXPORT_SYMBOL(cdev_alloc);
683 EXPORT_SYMBOL(cdev_del);
684 EXPORT_SYMBOL(cdev_add);
685 EXPORT_SYMBOL(cdev_set_parent);
686 EXPORT_SYMBOL(cdev_device_add);
687 EXPORT_SYMBOL(cdev_device_del);
688 EXPORT_SYMBOL(__register_chrdev);
689 EXPORT_SYMBOL(__unregister_chrdev);
690