xref: /linux/fs/char_dev.c (revision 4949009eb8d40a441dcddcd96e101e77d31cf1b2)
1 /*
2  *  linux/fs/char_dev.c
3  *
4  *  Copyright (C) 1991, 1992  Linus Torvalds
5  */
6 
7 #include <linux/init.h>
8 #include <linux/fs.h>
9 #include <linux/kdev_t.h>
10 #include <linux/slab.h>
11 #include <linux/string.h>
12 
13 #include <linux/major.h>
14 #include <linux/errno.h>
15 #include <linux/module.h>
16 #include <linux/seq_file.h>
17 
18 #include <linux/kobject.h>
19 #include <linux/kobj_map.h>
20 #include <linux/cdev.h>
21 #include <linux/mutex.h>
22 #include <linux/backing-dev.h>
23 #include <linux/tty.h>
24 
25 #include "internal.h"
26 
27 /*
28  * capabilities for /dev/mem, /dev/kmem and similar directly mappable character
29  * devices
30  * - permits shared-mmap for read, write and/or exec
31  * - does not permit private mmap in NOMMU mode (can't do COW)
32  * - no readahead or I/O queue unplugging required
33  */
34 struct backing_dev_info directly_mappable_cdev_bdi = {
35 	.name = "char",
36 	.capabilities	= (
37 #ifdef CONFIG_MMU
38 		/* permit private copies of the data to be taken */
39 		BDI_CAP_MAP_COPY |
40 #endif
41 		/* permit direct mmap, for read, write or exec */
42 		BDI_CAP_MAP_DIRECT |
43 		BDI_CAP_READ_MAP | BDI_CAP_WRITE_MAP | BDI_CAP_EXEC_MAP |
44 		/* no writeback happens */
45 		BDI_CAP_NO_ACCT_AND_WRITEBACK),
46 };
47 
48 static struct kobj_map *cdev_map;
49 
50 static DEFINE_MUTEX(chrdevs_lock);
51 
52 static struct char_device_struct {
53 	struct char_device_struct *next;
54 	unsigned int major;
55 	unsigned int baseminor;
56 	int minorct;
57 	char name[64];
58 	struct cdev *cdev;		/* will die */
59 } *chrdevs[CHRDEV_MAJOR_HASH_SIZE];
60 
61 /* index in the above */
62 static inline int major_to_index(unsigned major)
63 {
64 	return major % CHRDEV_MAJOR_HASH_SIZE;
65 }
66 
67 #ifdef CONFIG_PROC_FS
68 
69 void chrdev_show(struct seq_file *f, off_t offset)
70 {
71 	struct char_device_struct *cd;
72 
73 	if (offset < CHRDEV_MAJOR_HASH_SIZE) {
74 		mutex_lock(&chrdevs_lock);
75 		for (cd = chrdevs[offset]; cd; cd = cd->next)
76 			seq_printf(f, "%3d %s\n", cd->major, cd->name);
77 		mutex_unlock(&chrdevs_lock);
78 	}
79 }
80 
81 #endif /* CONFIG_PROC_FS */
82 
83 /*
84  * Register a single major with a specified minor range.
85  *
86  * If major == 0 this functions will dynamically allocate a major and return
87  * its number.
88  *
89  * If major > 0 this function will attempt to reserve the passed range of
90  * minors and will return zero on success.
91  *
92  * Returns a -ve errno on failure.
93  */
94 static struct char_device_struct *
95 __register_chrdev_region(unsigned int major, unsigned int baseminor,
96 			   int minorct, const char *name)
97 {
98 	struct char_device_struct *cd, **cp;
99 	int ret = 0;
100 	int i;
101 
102 	cd = kzalloc(sizeof(struct char_device_struct), GFP_KERNEL);
103 	if (cd == NULL)
104 		return ERR_PTR(-ENOMEM);
105 
106 	mutex_lock(&chrdevs_lock);
107 
108 	/* temporary */
109 	if (major == 0) {
110 		for (i = ARRAY_SIZE(chrdevs)-1; i > 0; i--) {
111 			if (chrdevs[i] == NULL)
112 				break;
113 		}
114 
115 		if (i == 0) {
116 			ret = -EBUSY;
117 			goto out;
118 		}
119 		major = i;
120 	}
121 
122 	cd->major = major;
123 	cd->baseminor = baseminor;
124 	cd->minorct = minorct;
125 	strlcpy(cd->name, name, sizeof(cd->name));
126 
127 	i = major_to_index(major);
128 
129 	for (cp = &chrdevs[i]; *cp; cp = &(*cp)->next)
130 		if ((*cp)->major > major ||
131 		    ((*cp)->major == major &&
132 		     (((*cp)->baseminor >= baseminor) ||
133 		      ((*cp)->baseminor + (*cp)->minorct > baseminor))))
134 			break;
135 
136 	/* Check for overlapping minor ranges.  */
137 	if (*cp && (*cp)->major == major) {
138 		int old_min = (*cp)->baseminor;
139 		int old_max = (*cp)->baseminor + (*cp)->minorct - 1;
140 		int new_min = baseminor;
141 		int new_max = baseminor + minorct - 1;
142 
143 		/* New driver overlaps from the left.  */
144 		if (new_max >= old_min && new_max <= old_max) {
145 			ret = -EBUSY;
146 			goto out;
147 		}
148 
149 		/* New driver overlaps from the right.  */
150 		if (new_min <= old_max && new_min >= old_min) {
151 			ret = -EBUSY;
152 			goto out;
153 		}
154 	}
155 
156 	cd->next = *cp;
157 	*cp = cd;
158 	mutex_unlock(&chrdevs_lock);
159 	return cd;
160 out:
161 	mutex_unlock(&chrdevs_lock);
162 	kfree(cd);
163 	return ERR_PTR(ret);
164 }
165 
166 static struct char_device_struct *
167 __unregister_chrdev_region(unsigned major, unsigned baseminor, int minorct)
168 {
169 	struct char_device_struct *cd = NULL, **cp;
170 	int i = major_to_index(major);
171 
172 	mutex_lock(&chrdevs_lock);
173 	for (cp = &chrdevs[i]; *cp; cp = &(*cp)->next)
174 		if ((*cp)->major == major &&
175 		    (*cp)->baseminor == baseminor &&
176 		    (*cp)->minorct == minorct)
177 			break;
178 	if (*cp) {
179 		cd = *cp;
180 		*cp = cd->next;
181 	}
182 	mutex_unlock(&chrdevs_lock);
183 	return cd;
184 }
185 
186 /**
187  * register_chrdev_region() - register a range of device numbers
188  * @from: the first in the desired range of device numbers; must include
189  *        the major number.
190  * @count: the number of consecutive device numbers required
191  * @name: the name of the device or driver.
192  *
193  * Return value is zero on success, a negative error code on failure.
194  */
195 int register_chrdev_region(dev_t from, unsigned count, const char *name)
196 {
197 	struct char_device_struct *cd;
198 	dev_t to = from + count;
199 	dev_t n, next;
200 
201 	for (n = from; n < to; n = next) {
202 		next = MKDEV(MAJOR(n)+1, 0);
203 		if (next > to)
204 			next = to;
205 		cd = __register_chrdev_region(MAJOR(n), MINOR(n),
206 			       next - n, name);
207 		if (IS_ERR(cd))
208 			goto fail;
209 	}
210 	return 0;
211 fail:
212 	to = n;
213 	for (n = from; n < to; n = next) {
214 		next = MKDEV(MAJOR(n)+1, 0);
215 		kfree(__unregister_chrdev_region(MAJOR(n), MINOR(n), next - n));
216 	}
217 	return PTR_ERR(cd);
218 }
219 
220 /**
221  * alloc_chrdev_region() - register a range of char device numbers
222  * @dev: output parameter for first assigned number
223  * @baseminor: first of the requested range of minor numbers
224  * @count: the number of minor numbers required
225  * @name: the name of the associated device or driver
226  *
227  * Allocates a range of char device numbers.  The major number will be
228  * chosen dynamically, and returned (along with the first minor number)
229  * in @dev.  Returns zero or a negative error code.
230  */
231 int alloc_chrdev_region(dev_t *dev, unsigned baseminor, unsigned count,
232 			const char *name)
233 {
234 	struct char_device_struct *cd;
235 	cd = __register_chrdev_region(0, baseminor, count, name);
236 	if (IS_ERR(cd))
237 		return PTR_ERR(cd);
238 	*dev = MKDEV(cd->major, cd->baseminor);
239 	return 0;
240 }
241 
242 /**
243  * __register_chrdev() - create and register a cdev occupying a range of minors
244  * @major: major device number or 0 for dynamic allocation
245  * @baseminor: first of the requested range of minor numbers
246  * @count: the number of minor numbers required
247  * @name: name of this range of devices
248  * @fops: file operations associated with this devices
249  *
250  * If @major == 0 this functions will dynamically allocate a major and return
251  * its number.
252  *
253  * If @major > 0 this function will attempt to reserve a device with the given
254  * major number and will return zero on success.
255  *
256  * Returns a -ve errno on failure.
257  *
258  * The name of this device has nothing to do with the name of the device in
259  * /dev. It only helps to keep track of the different owners of devices. If
260  * your module name has only one type of devices it's ok to use e.g. the name
261  * of the module here.
262  */
263 int __register_chrdev(unsigned int major, unsigned int baseminor,
264 		      unsigned int count, const char *name,
265 		      const struct file_operations *fops)
266 {
267 	struct char_device_struct *cd;
268 	struct cdev *cdev;
269 	int err = -ENOMEM;
270 
271 	cd = __register_chrdev_region(major, baseminor, count, name);
272 	if (IS_ERR(cd))
273 		return PTR_ERR(cd);
274 
275 	cdev = cdev_alloc();
276 	if (!cdev)
277 		goto out2;
278 
279 	cdev->owner = fops->owner;
280 	cdev->ops = fops;
281 	kobject_set_name(&cdev->kobj, "%s", name);
282 
283 	err = cdev_add(cdev, MKDEV(cd->major, baseminor), count);
284 	if (err)
285 		goto out;
286 
287 	cd->cdev = cdev;
288 
289 	return major ? 0 : cd->major;
290 out:
291 	kobject_put(&cdev->kobj);
292 out2:
293 	kfree(__unregister_chrdev_region(cd->major, baseminor, count));
294 	return err;
295 }
296 
297 /**
298  * unregister_chrdev_region() - return a range of device numbers
299  * @from: the first in the range of numbers to unregister
300  * @count: the number of device numbers to unregister
301  *
302  * This function will unregister a range of @count device numbers,
303  * starting with @from.  The caller should normally be the one who
304  * allocated those numbers in the first place...
305  */
306 void unregister_chrdev_region(dev_t from, unsigned count)
307 {
308 	dev_t to = from + count;
309 	dev_t n, next;
310 
311 	for (n = from; n < to; n = next) {
312 		next = MKDEV(MAJOR(n)+1, 0);
313 		if (next > to)
314 			next = to;
315 		kfree(__unregister_chrdev_region(MAJOR(n), MINOR(n), next - n));
316 	}
317 }
318 
319 /**
320  * __unregister_chrdev - unregister and destroy a cdev
321  * @major: major device number
322  * @baseminor: first of the range of minor numbers
323  * @count: the number of minor numbers this cdev is occupying
324  * @name: name of this range of devices
325  *
326  * Unregister and destroy the cdev occupying the region described by
327  * @major, @baseminor and @count.  This function undoes what
328  * __register_chrdev() did.
329  */
330 void __unregister_chrdev(unsigned int major, unsigned int baseminor,
331 			 unsigned int count, const char *name)
332 {
333 	struct char_device_struct *cd;
334 
335 	cd = __unregister_chrdev_region(major, baseminor, count);
336 	if (cd && cd->cdev)
337 		cdev_del(cd->cdev);
338 	kfree(cd);
339 }
340 
341 static DEFINE_SPINLOCK(cdev_lock);
342 
343 static struct kobject *cdev_get(struct cdev *p)
344 {
345 	struct module *owner = p->owner;
346 	struct kobject *kobj;
347 
348 	if (owner && !try_module_get(owner))
349 		return NULL;
350 	kobj = kobject_get(&p->kobj);
351 	if (!kobj)
352 		module_put(owner);
353 	return kobj;
354 }
355 
356 void cdev_put(struct cdev *p)
357 {
358 	if (p) {
359 		struct module *owner = p->owner;
360 		kobject_put(&p->kobj);
361 		module_put(owner);
362 	}
363 }
364 
365 /*
366  * Called every time a character special file is opened
367  */
368 static int chrdev_open(struct inode *inode, struct file *filp)
369 {
370 	const struct file_operations *fops;
371 	struct cdev *p;
372 	struct cdev *new = NULL;
373 	int ret = 0;
374 
375 	spin_lock(&cdev_lock);
376 	p = inode->i_cdev;
377 	if (!p) {
378 		struct kobject *kobj;
379 		int idx;
380 		spin_unlock(&cdev_lock);
381 		kobj = kobj_lookup(cdev_map, inode->i_rdev, &idx);
382 		if (!kobj)
383 			return -ENXIO;
384 		new = container_of(kobj, struct cdev, kobj);
385 		spin_lock(&cdev_lock);
386 		/* Check i_cdev again in case somebody beat us to it while
387 		   we dropped the lock. */
388 		p = inode->i_cdev;
389 		if (!p) {
390 			inode->i_cdev = p = new;
391 			list_add(&inode->i_devices, &p->list);
392 			new = NULL;
393 		} else if (!cdev_get(p))
394 			ret = -ENXIO;
395 	} else if (!cdev_get(p))
396 		ret = -ENXIO;
397 	spin_unlock(&cdev_lock);
398 	cdev_put(new);
399 	if (ret)
400 		return ret;
401 
402 	ret = -ENXIO;
403 	fops = fops_get(p->ops);
404 	if (!fops)
405 		goto out_cdev_put;
406 
407 	replace_fops(filp, fops);
408 	if (filp->f_op->open) {
409 		ret = filp->f_op->open(inode, filp);
410 		if (ret)
411 			goto out_cdev_put;
412 	}
413 
414 	return 0;
415 
416  out_cdev_put:
417 	cdev_put(p);
418 	return ret;
419 }
420 
421 void cd_forget(struct inode *inode)
422 {
423 	spin_lock(&cdev_lock);
424 	list_del_init(&inode->i_devices);
425 	inode->i_cdev = NULL;
426 	spin_unlock(&cdev_lock);
427 }
428 
429 static void cdev_purge(struct cdev *cdev)
430 {
431 	spin_lock(&cdev_lock);
432 	while (!list_empty(&cdev->list)) {
433 		struct inode *inode;
434 		inode = container_of(cdev->list.next, struct inode, i_devices);
435 		list_del_init(&inode->i_devices);
436 		inode->i_cdev = NULL;
437 	}
438 	spin_unlock(&cdev_lock);
439 }
440 
441 /*
442  * Dummy default file-operations: the only thing this does
443  * is contain the open that then fills in the correct operations
444  * depending on the special file...
445  */
446 const struct file_operations def_chr_fops = {
447 	.open = chrdev_open,
448 	.llseek = noop_llseek,
449 };
450 
451 static struct kobject *exact_match(dev_t dev, int *part, void *data)
452 {
453 	struct cdev *p = data;
454 	return &p->kobj;
455 }
456 
457 static int exact_lock(dev_t dev, void *data)
458 {
459 	struct cdev *p = data;
460 	return cdev_get(p) ? 0 : -1;
461 }
462 
463 /**
464  * cdev_add() - add a char device to the system
465  * @p: the cdev structure for the device
466  * @dev: the first device number for which this device is responsible
467  * @count: the number of consecutive minor numbers corresponding to this
468  *         device
469  *
470  * cdev_add() adds the device represented by @p to the system, making it
471  * live immediately.  A negative error code is returned on failure.
472  */
473 int cdev_add(struct cdev *p, dev_t dev, unsigned count)
474 {
475 	int error;
476 
477 	p->dev = dev;
478 	p->count = count;
479 
480 	error = kobj_map(cdev_map, dev, count, NULL,
481 			 exact_match, exact_lock, p);
482 	if (error)
483 		return error;
484 
485 	kobject_get(p->kobj.parent);
486 
487 	return 0;
488 }
489 
490 static void cdev_unmap(dev_t dev, unsigned count)
491 {
492 	kobj_unmap(cdev_map, dev, count);
493 }
494 
495 /**
496  * cdev_del() - remove a cdev from the system
497  * @p: the cdev structure to be removed
498  *
499  * cdev_del() removes @p from the system, possibly freeing the structure
500  * itself.
501  */
502 void cdev_del(struct cdev *p)
503 {
504 	cdev_unmap(p->dev, p->count);
505 	kobject_put(&p->kobj);
506 }
507 
508 
509 static void cdev_default_release(struct kobject *kobj)
510 {
511 	struct cdev *p = container_of(kobj, struct cdev, kobj);
512 	struct kobject *parent = kobj->parent;
513 
514 	cdev_purge(p);
515 	kobject_put(parent);
516 }
517 
518 static void cdev_dynamic_release(struct kobject *kobj)
519 {
520 	struct cdev *p = container_of(kobj, struct cdev, kobj);
521 	struct kobject *parent = kobj->parent;
522 
523 	cdev_purge(p);
524 	kfree(p);
525 	kobject_put(parent);
526 }
527 
528 static struct kobj_type ktype_cdev_default = {
529 	.release	= cdev_default_release,
530 };
531 
532 static struct kobj_type ktype_cdev_dynamic = {
533 	.release	= cdev_dynamic_release,
534 };
535 
536 /**
537  * cdev_alloc() - allocate a cdev structure
538  *
539  * Allocates and returns a cdev structure, or NULL on failure.
540  */
541 struct cdev *cdev_alloc(void)
542 {
543 	struct cdev *p = kzalloc(sizeof(struct cdev), GFP_KERNEL);
544 	if (p) {
545 		INIT_LIST_HEAD(&p->list);
546 		kobject_init(&p->kobj, &ktype_cdev_dynamic);
547 	}
548 	return p;
549 }
550 
551 /**
552  * cdev_init() - initialize a cdev structure
553  * @cdev: the structure to initialize
554  * @fops: the file_operations for this device
555  *
556  * Initializes @cdev, remembering @fops, making it ready to add to the
557  * system with cdev_add().
558  */
559 void cdev_init(struct cdev *cdev, const struct file_operations *fops)
560 {
561 	memset(cdev, 0, sizeof *cdev);
562 	INIT_LIST_HEAD(&cdev->list);
563 	kobject_init(&cdev->kobj, &ktype_cdev_default);
564 	cdev->ops = fops;
565 }
566 
567 static struct kobject *base_probe(dev_t dev, int *part, void *data)
568 {
569 	if (request_module("char-major-%d-%d", MAJOR(dev), MINOR(dev)) > 0)
570 		/* Make old-style 2.4 aliases work */
571 		request_module("char-major-%d", MAJOR(dev));
572 	return NULL;
573 }
574 
575 void __init chrdev_init(void)
576 {
577 	cdev_map = kobj_map_init(base_probe, &chrdevs_lock);
578 	if (bdi_init(&directly_mappable_cdev_bdi))
579 		panic("Failed to init directly mappable cdev bdi");
580 }
581 
582 
583 /* Let modules do char dev stuff */
584 EXPORT_SYMBOL(register_chrdev_region);
585 EXPORT_SYMBOL(unregister_chrdev_region);
586 EXPORT_SYMBOL(alloc_chrdev_region);
587 EXPORT_SYMBOL(cdev_init);
588 EXPORT_SYMBOL(cdev_alloc);
589 EXPORT_SYMBOL(cdev_del);
590 EXPORT_SYMBOL(cdev_add);
591 EXPORT_SYMBOL(__register_chrdev);
592 EXPORT_SYMBOL(__unregister_chrdev);
593 EXPORT_SYMBOL(directly_mappable_cdev_bdi);
594