xref: /linux/fs/char_dev.c (revision d39d0ed196aa1685bb24771e92f78633c66ac9cb)
1 /*
2  *  linux/fs/char_dev.c
3  *
4  *  Copyright (C) 1991, 1992  Linus Torvalds
5  */
6 
7 #include <linux/init.h>
8 #include <linux/fs.h>
9 #include <linux/kdev_t.h>
10 #include <linux/slab.h>
11 #include <linux/string.h>
12 
13 #include <linux/major.h>
14 #include <linux/errno.h>
15 #include <linux/module.h>
16 #include <linux/seq_file.h>
17 
18 #include <linux/kobject.h>
19 #include <linux/kobj_map.h>
20 #include <linux/cdev.h>
21 #include <linux/mutex.h>
22 #include <linux/backing-dev.h>
23 #include <linux/tty.h>
24 
25 #include "internal.h"
26 
27 /*
28  * capabilities for /dev/mem, /dev/kmem and similar directly mappable character
29  * devices
30  * - permits shared-mmap for read, write and/or exec
31  * - does not permit private mmap in NOMMU mode (can't do COW)
32  * - no readahead or I/O queue unplugging required
33  */
34 struct backing_dev_info directly_mappable_cdev_bdi = {
35 	.name = "char",
36 	.capabilities	= (
37 #ifdef CONFIG_MMU
38 		/* permit private copies of the data to be taken */
39 		BDI_CAP_MAP_COPY |
40 #endif
41 		/* permit direct mmap, for read, write or exec */
42 		BDI_CAP_MAP_DIRECT |
43 		BDI_CAP_READ_MAP | BDI_CAP_WRITE_MAP | BDI_CAP_EXEC_MAP),
44 };
45 
46 static struct kobj_map *cdev_map;
47 
48 static DEFINE_MUTEX(chrdevs_lock);
49 
50 static struct char_device_struct {
51 	struct char_device_struct *next;
52 	unsigned int major;
53 	unsigned int baseminor;
54 	int minorct;
55 	char name[64];
56 	struct cdev *cdev;		/* will die */
57 } *chrdevs[CHRDEV_MAJOR_HASH_SIZE];
58 
59 /* index in the above */
60 static inline int major_to_index(int major)
61 {
62 	return major % CHRDEV_MAJOR_HASH_SIZE;
63 }
64 
65 #ifdef CONFIG_PROC_FS
66 
67 void chrdev_show(struct seq_file *f, off_t offset)
68 {
69 	struct char_device_struct *cd;
70 
71 	if (offset < CHRDEV_MAJOR_HASH_SIZE) {
72 		mutex_lock(&chrdevs_lock);
73 		for (cd = chrdevs[offset]; cd; cd = cd->next)
74 			seq_printf(f, "%3d %s\n", cd->major, cd->name);
75 		mutex_unlock(&chrdevs_lock);
76 	}
77 }
78 
79 #endif /* CONFIG_PROC_FS */
80 
81 /*
82  * Register a single major with a specified minor range.
83  *
84  * If major == 0 this functions will dynamically allocate a major and return
85  * its number.
86  *
87  * If major > 0 this function will attempt to reserve the passed range of
88  * minors and will return zero on success.
89  *
90  * Returns a -ve errno on failure.
91  */
92 static struct char_device_struct *
93 __register_chrdev_region(unsigned int major, unsigned int baseminor,
94 			   int minorct, const char *name)
95 {
96 	struct char_device_struct *cd, **cp;
97 	int ret = 0;
98 	int i;
99 
100 	cd = kzalloc(sizeof(struct char_device_struct), GFP_KERNEL);
101 	if (cd == NULL)
102 		return ERR_PTR(-ENOMEM);
103 
104 	mutex_lock(&chrdevs_lock);
105 
106 	/* temporary */
107 	if (major == 0) {
108 		for (i = ARRAY_SIZE(chrdevs)-1; i > 0; i--) {
109 			if (chrdevs[i] == NULL)
110 				break;
111 		}
112 
113 		if (i == 0) {
114 			ret = -EBUSY;
115 			goto out;
116 		}
117 		major = i;
118 		ret = major;
119 	}
120 
121 	cd->major = major;
122 	cd->baseminor = baseminor;
123 	cd->minorct = minorct;
124 	strlcpy(cd->name, name, sizeof(cd->name));
125 
126 	i = major_to_index(major);
127 
128 	for (cp = &chrdevs[i]; *cp; cp = &(*cp)->next)
129 		if ((*cp)->major > major ||
130 		    ((*cp)->major == major &&
131 		     (((*cp)->baseminor >= baseminor) ||
132 		      ((*cp)->baseminor + (*cp)->minorct > baseminor))))
133 			break;
134 
135 	/* Check for overlapping minor ranges.  */
136 	if (*cp && (*cp)->major == major) {
137 		int old_min = (*cp)->baseminor;
138 		int old_max = (*cp)->baseminor + (*cp)->minorct - 1;
139 		int new_min = baseminor;
140 		int new_max = baseminor + minorct - 1;
141 
142 		/* New driver overlaps from the left.  */
143 		if (new_max >= old_min && new_max <= old_max) {
144 			ret = -EBUSY;
145 			goto out;
146 		}
147 
148 		/* New driver overlaps from the right.  */
149 		if (new_min <= old_max && new_min >= old_min) {
150 			ret = -EBUSY;
151 			goto out;
152 		}
153 	}
154 
155 	cd->next = *cp;
156 	*cp = cd;
157 	mutex_unlock(&chrdevs_lock);
158 	return cd;
159 out:
160 	mutex_unlock(&chrdevs_lock);
161 	kfree(cd);
162 	return ERR_PTR(ret);
163 }
164 
165 static struct char_device_struct *
166 __unregister_chrdev_region(unsigned major, unsigned baseminor, int minorct)
167 {
168 	struct char_device_struct *cd = NULL, **cp;
169 	int i = major_to_index(major);
170 
171 	mutex_lock(&chrdevs_lock);
172 	for (cp = &chrdevs[i]; *cp; cp = &(*cp)->next)
173 		if ((*cp)->major == major &&
174 		    (*cp)->baseminor == baseminor &&
175 		    (*cp)->minorct == minorct)
176 			break;
177 	if (*cp) {
178 		cd = *cp;
179 		*cp = cd->next;
180 	}
181 	mutex_unlock(&chrdevs_lock);
182 	return cd;
183 }
184 
185 /**
186  * register_chrdev_region() - register a range of device numbers
187  * @from: the first in the desired range of device numbers; must include
188  *        the major number.
189  * @count: the number of consecutive device numbers required
190  * @name: the name of the device or driver.
191  *
192  * Return value is zero on success, a negative error code on failure.
193  */
194 int register_chrdev_region(dev_t from, unsigned count, const char *name)
195 {
196 	struct char_device_struct *cd;
197 	dev_t to = from + count;
198 	dev_t n, next;
199 
200 	for (n = from; n < to; n = next) {
201 		next = MKDEV(MAJOR(n)+1, 0);
202 		if (next > to)
203 			next = to;
204 		cd = __register_chrdev_region(MAJOR(n), MINOR(n),
205 			       next - n, name);
206 		if (IS_ERR(cd))
207 			goto fail;
208 	}
209 	return 0;
210 fail:
211 	to = n;
212 	for (n = from; n < to; n = next) {
213 		next = MKDEV(MAJOR(n)+1, 0);
214 		kfree(__unregister_chrdev_region(MAJOR(n), MINOR(n), next - n));
215 	}
216 	return PTR_ERR(cd);
217 }
218 
219 /**
220  * alloc_chrdev_region() - register a range of char device numbers
221  * @dev: output parameter for first assigned number
222  * @baseminor: first of the requested range of minor numbers
223  * @count: the number of minor numbers required
224  * @name: the name of the associated device or driver
225  *
226  * Allocates a range of char device numbers.  The major number will be
227  * chosen dynamically, and returned (along with the first minor number)
228  * in @dev.  Returns zero or a negative error code.
229  */
230 int alloc_chrdev_region(dev_t *dev, unsigned baseminor, unsigned count,
231 			const char *name)
232 {
233 	struct char_device_struct *cd;
234 	cd = __register_chrdev_region(0, baseminor, count, name);
235 	if (IS_ERR(cd))
236 		return PTR_ERR(cd);
237 	*dev = MKDEV(cd->major, cd->baseminor);
238 	return 0;
239 }
240 
241 /**
242  * __register_chrdev() - create and register a cdev occupying a range of minors
243  * @major: major device number or 0 for dynamic allocation
244  * @baseminor: first of the requested range of minor numbers
245  * @count: the number of minor numbers required
246  * @name: name of this range of devices
247  * @fops: file operations associated with this devices
248  *
249  * If @major == 0 this functions will dynamically allocate a major and return
250  * its number.
251  *
252  * If @major > 0 this function will attempt to reserve a device with the given
253  * major number and will return zero on success.
254  *
255  * Returns a -ve errno on failure.
256  *
257  * The name of this device has nothing to do with the name of the device in
258  * /dev. It only helps to keep track of the different owners of devices. If
259  * your module name has only one type of devices it's ok to use e.g. the name
260  * of the module here.
261  */
262 int __register_chrdev(unsigned int major, unsigned int baseminor,
263 		      unsigned int count, const char *name,
264 		      const struct file_operations *fops)
265 {
266 	struct char_device_struct *cd;
267 	struct cdev *cdev;
268 	int err = -ENOMEM;
269 
270 	cd = __register_chrdev_region(major, baseminor, count, name);
271 	if (IS_ERR(cd))
272 		return PTR_ERR(cd);
273 
274 	cdev = cdev_alloc();
275 	if (!cdev)
276 		goto out2;
277 
278 	cdev->owner = fops->owner;
279 	cdev->ops = fops;
280 	kobject_set_name(&cdev->kobj, "%s", name);
281 
282 	err = cdev_add(cdev, MKDEV(cd->major, baseminor), count);
283 	if (err)
284 		goto out;
285 
286 	cd->cdev = cdev;
287 
288 	return major ? 0 : cd->major;
289 out:
290 	kobject_put(&cdev->kobj);
291 out2:
292 	kfree(__unregister_chrdev_region(cd->major, baseminor, count));
293 	return err;
294 }
295 
296 /**
297  * unregister_chrdev_region() - return a range of device numbers
298  * @from: the first in the range of numbers to unregister
299  * @count: the number of device numbers to unregister
300  *
301  * This function will unregister a range of @count device numbers,
302  * starting with @from.  The caller should normally be the one who
303  * allocated those numbers in the first place...
304  */
305 void unregister_chrdev_region(dev_t from, unsigned count)
306 {
307 	dev_t to = from + count;
308 	dev_t n, next;
309 
310 	for (n = from; n < to; n = next) {
311 		next = MKDEV(MAJOR(n)+1, 0);
312 		if (next > to)
313 			next = to;
314 		kfree(__unregister_chrdev_region(MAJOR(n), MINOR(n), next - n));
315 	}
316 }
317 
318 /**
319  * __unregister_chrdev - unregister and destroy a cdev
320  * @major: major device number
321  * @baseminor: first of the range of minor numbers
322  * @count: the number of minor numbers this cdev is occupying
323  * @name: name of this range of devices
324  *
325  * Unregister and destroy the cdev occupying the region described by
326  * @major, @baseminor and @count.  This function undoes what
327  * __register_chrdev() did.
328  */
329 void __unregister_chrdev(unsigned int major, unsigned int baseminor,
330 			 unsigned int count, const char *name)
331 {
332 	struct char_device_struct *cd;
333 
334 	cd = __unregister_chrdev_region(major, baseminor, count);
335 	if (cd && cd->cdev)
336 		cdev_del(cd->cdev);
337 	kfree(cd);
338 }
339 
340 static DEFINE_SPINLOCK(cdev_lock);
341 
342 static struct kobject *cdev_get(struct cdev *p)
343 {
344 	struct module *owner = p->owner;
345 	struct kobject *kobj;
346 
347 	if (owner && !try_module_get(owner))
348 		return NULL;
349 	kobj = kobject_get(&p->kobj);
350 	if (!kobj)
351 		module_put(owner);
352 	return kobj;
353 }
354 
355 void cdev_put(struct cdev *p)
356 {
357 	if (p) {
358 		struct module *owner = p->owner;
359 		kobject_put(&p->kobj);
360 		module_put(owner);
361 	}
362 }
363 
364 /*
365  * Called every time a character special file is opened
366  */
367 static int chrdev_open(struct inode *inode, struct file *filp)
368 {
369 	struct cdev *p;
370 	struct cdev *new = NULL;
371 	int ret = 0;
372 
373 	spin_lock(&cdev_lock);
374 	p = inode->i_cdev;
375 	if (!p) {
376 		struct kobject *kobj;
377 		int idx;
378 		spin_unlock(&cdev_lock);
379 		kobj = kobj_lookup(cdev_map, inode->i_rdev, &idx);
380 		if (!kobj)
381 			return -ENXIO;
382 		new = container_of(kobj, struct cdev, kobj);
383 		spin_lock(&cdev_lock);
384 		/* Check i_cdev again in case somebody beat us to it while
385 		   we dropped the lock. */
386 		p = inode->i_cdev;
387 		if (!p) {
388 			inode->i_cdev = p = new;
389 			list_add(&inode->i_devices, &p->list);
390 			new = NULL;
391 		} else if (!cdev_get(p))
392 			ret = -ENXIO;
393 	} else if (!cdev_get(p))
394 		ret = -ENXIO;
395 	spin_unlock(&cdev_lock);
396 	cdev_put(new);
397 	if (ret)
398 		return ret;
399 
400 	ret = -ENXIO;
401 	filp->f_op = fops_get(p->ops);
402 	if (!filp->f_op)
403 		goto out_cdev_put;
404 
405 	if (filp->f_op->open) {
406 		ret = filp->f_op->open(inode,filp);
407 		if (ret)
408 			goto out_cdev_put;
409 	}
410 
411 	return 0;
412 
413  out_cdev_put:
414 	cdev_put(p);
415 	return ret;
416 }
417 
418 int cdev_index(struct inode *inode)
419 {
420 	int idx;
421 	struct kobject *kobj;
422 
423 	kobj = kobj_lookup(cdev_map, inode->i_rdev, &idx);
424 	if (!kobj)
425 		return -1;
426 	kobject_put(kobj);
427 	return idx;
428 }
429 
430 void cd_forget(struct inode *inode)
431 {
432 	spin_lock(&cdev_lock);
433 	list_del_init(&inode->i_devices);
434 	inode->i_cdev = NULL;
435 	spin_unlock(&cdev_lock);
436 }
437 
438 static void cdev_purge(struct cdev *cdev)
439 {
440 	spin_lock(&cdev_lock);
441 	while (!list_empty(&cdev->list)) {
442 		struct inode *inode;
443 		inode = container_of(cdev->list.next, struct inode, i_devices);
444 		list_del_init(&inode->i_devices);
445 		inode->i_cdev = NULL;
446 	}
447 	spin_unlock(&cdev_lock);
448 }
449 
450 /*
451  * Dummy default file-operations: the only thing this does
452  * is contain the open that then fills in the correct operations
453  * depending on the special file...
454  */
455 const struct file_operations def_chr_fops = {
456 	.open = chrdev_open,
457 };
458 
459 static struct kobject *exact_match(dev_t dev, int *part, void *data)
460 {
461 	struct cdev *p = data;
462 	return &p->kobj;
463 }
464 
465 static int exact_lock(dev_t dev, void *data)
466 {
467 	struct cdev *p = data;
468 	return cdev_get(p) ? 0 : -1;
469 }
470 
471 /**
472  * cdev_add() - add a char device to the system
473  * @p: the cdev structure for the device
474  * @dev: the first device number for which this device is responsible
475  * @count: the number of consecutive minor numbers corresponding to this
476  *         device
477  *
478  * cdev_add() adds the device represented by @p to the system, making it
479  * live immediately.  A negative error code is returned on failure.
480  */
481 int cdev_add(struct cdev *p, dev_t dev, unsigned count)
482 {
483 	p->dev = dev;
484 	p->count = count;
485 	return kobj_map(cdev_map, dev, count, NULL, exact_match, exact_lock, p);
486 }
487 
488 static void cdev_unmap(dev_t dev, unsigned count)
489 {
490 	kobj_unmap(cdev_map, dev, count);
491 }
492 
493 /**
494  * cdev_del() - remove a cdev from the system
495  * @p: the cdev structure to be removed
496  *
497  * cdev_del() removes @p from the system, possibly freeing the structure
498  * itself.
499  */
500 void cdev_del(struct cdev *p)
501 {
502 	cdev_unmap(p->dev, p->count);
503 	kobject_put(&p->kobj);
504 }
505 
506 
507 static void cdev_default_release(struct kobject *kobj)
508 {
509 	struct cdev *p = container_of(kobj, struct cdev, kobj);
510 	cdev_purge(p);
511 }
512 
513 static void cdev_dynamic_release(struct kobject *kobj)
514 {
515 	struct cdev *p = container_of(kobj, struct cdev, kobj);
516 	cdev_purge(p);
517 	kfree(p);
518 }
519 
520 static struct kobj_type ktype_cdev_default = {
521 	.release	= cdev_default_release,
522 };
523 
524 static struct kobj_type ktype_cdev_dynamic = {
525 	.release	= cdev_dynamic_release,
526 };
527 
528 /**
529  * cdev_alloc() - allocate a cdev structure
530  *
531  * Allocates and returns a cdev structure, or NULL on failure.
532  */
533 struct cdev *cdev_alloc(void)
534 {
535 	struct cdev *p = kzalloc(sizeof(struct cdev), GFP_KERNEL);
536 	if (p) {
537 		INIT_LIST_HEAD(&p->list);
538 		kobject_init(&p->kobj, &ktype_cdev_dynamic);
539 	}
540 	return p;
541 }
542 
543 /**
544  * cdev_init() - initialize a cdev structure
545  * @cdev: the structure to initialize
546  * @fops: the file_operations for this device
547  *
548  * Initializes @cdev, remembering @fops, making it ready to add to the
549  * system with cdev_add().
550  */
551 void cdev_init(struct cdev *cdev, const struct file_operations *fops)
552 {
553 	memset(cdev, 0, sizeof *cdev);
554 	INIT_LIST_HEAD(&cdev->list);
555 	kobject_init(&cdev->kobj, &ktype_cdev_default);
556 	cdev->ops = fops;
557 }
558 
559 static struct kobject *base_probe(dev_t dev, int *part, void *data)
560 {
561 	if (request_module("char-major-%d-%d", MAJOR(dev), MINOR(dev)) > 0)
562 		/* Make old-style 2.4 aliases work */
563 		request_module("char-major-%d", MAJOR(dev));
564 	return NULL;
565 }
566 
567 void __init chrdev_init(void)
568 {
569 	cdev_map = kobj_map_init(base_probe, &chrdevs_lock);
570 	bdi_init(&directly_mappable_cdev_bdi);
571 }
572 
573 
574 /* Let modules do char dev stuff */
575 EXPORT_SYMBOL(register_chrdev_region);
576 EXPORT_SYMBOL(unregister_chrdev_region);
577 EXPORT_SYMBOL(alloc_chrdev_region);
578 EXPORT_SYMBOL(cdev_init);
579 EXPORT_SYMBOL(cdev_alloc);
580 EXPORT_SYMBOL(cdev_del);
581 EXPORT_SYMBOL(cdev_add);
582 EXPORT_SYMBOL(cdev_index);
583 EXPORT_SYMBOL(__register_chrdev);
584 EXPORT_SYMBOL(__unregister_chrdev);
585 EXPORT_SYMBOL(directly_mappable_cdev_bdi);
586