xref: /linux/fs/char_dev.c (revision ca55b2fef3a9373fcfc30f82fd26bc7fccbda732)
1 /*
2  *  linux/fs/char_dev.c
3  *
4  *  Copyright (C) 1991, 1992  Linus Torvalds
5  */
6 
7 #include <linux/init.h>
8 #include <linux/fs.h>
9 #include <linux/kdev_t.h>
10 #include <linux/slab.h>
11 #include <linux/string.h>
12 
13 #include <linux/major.h>
14 #include <linux/errno.h>
15 #include <linux/module.h>
16 #include <linux/seq_file.h>
17 
18 #include <linux/kobject.h>
19 #include <linux/kobj_map.h>
20 #include <linux/cdev.h>
21 #include <linux/mutex.h>
22 #include <linux/backing-dev.h>
23 #include <linux/tty.h>
24 
25 #include "internal.h"
26 
27 static struct kobj_map *cdev_map;
28 
29 static DEFINE_MUTEX(chrdevs_lock);
30 
31 static struct char_device_struct {
32 	struct char_device_struct *next;
33 	unsigned int major;
34 	unsigned int baseminor;
35 	int minorct;
36 	char name[64];
37 	struct cdev *cdev;		/* will die */
38 } *chrdevs[CHRDEV_MAJOR_HASH_SIZE];
39 
40 /* index in the above */
41 static inline int major_to_index(unsigned major)
42 {
43 	return major % CHRDEV_MAJOR_HASH_SIZE;
44 }
45 
46 #ifdef CONFIG_PROC_FS
47 
48 void chrdev_show(struct seq_file *f, off_t offset)
49 {
50 	struct char_device_struct *cd;
51 
52 	if (offset < CHRDEV_MAJOR_HASH_SIZE) {
53 		mutex_lock(&chrdevs_lock);
54 		for (cd = chrdevs[offset]; cd; cd = cd->next)
55 			seq_printf(f, "%3d %s\n", cd->major, cd->name);
56 		mutex_unlock(&chrdevs_lock);
57 	}
58 }
59 
60 #endif /* CONFIG_PROC_FS */
61 
62 /*
63  * Register a single major with a specified minor range.
64  *
65  * If major == 0 this functions will dynamically allocate a major and return
66  * its number.
67  *
68  * If major > 0 this function will attempt to reserve the passed range of
69  * minors and will return zero on success.
70  *
71  * Returns a -ve errno on failure.
72  */
73 static struct char_device_struct *
74 __register_chrdev_region(unsigned int major, unsigned int baseminor,
75 			   int minorct, const char *name)
76 {
77 	struct char_device_struct *cd, **cp;
78 	int ret = 0;
79 	int i;
80 
81 	cd = kzalloc(sizeof(struct char_device_struct), GFP_KERNEL);
82 	if (cd == NULL)
83 		return ERR_PTR(-ENOMEM);
84 
85 	mutex_lock(&chrdevs_lock);
86 
87 	/* temporary */
88 	if (major == 0) {
89 		for (i = ARRAY_SIZE(chrdevs)-1; i > 0; i--) {
90 			if (chrdevs[i] == NULL)
91 				break;
92 		}
93 
94 		if (i == 0) {
95 			ret = -EBUSY;
96 			goto out;
97 		}
98 		major = i;
99 	}
100 
101 	cd->major = major;
102 	cd->baseminor = baseminor;
103 	cd->minorct = minorct;
104 	strlcpy(cd->name, name, sizeof(cd->name));
105 
106 	i = major_to_index(major);
107 
108 	for (cp = &chrdevs[i]; *cp; cp = &(*cp)->next)
109 		if ((*cp)->major > major ||
110 		    ((*cp)->major == major &&
111 		     (((*cp)->baseminor >= baseminor) ||
112 		      ((*cp)->baseminor + (*cp)->minorct > baseminor))))
113 			break;
114 
115 	/* Check for overlapping minor ranges.  */
116 	if (*cp && (*cp)->major == major) {
117 		int old_min = (*cp)->baseminor;
118 		int old_max = (*cp)->baseminor + (*cp)->minorct - 1;
119 		int new_min = baseminor;
120 		int new_max = baseminor + minorct - 1;
121 
122 		/* New driver overlaps from the left.  */
123 		if (new_max >= old_min && new_max <= old_max) {
124 			ret = -EBUSY;
125 			goto out;
126 		}
127 
128 		/* New driver overlaps from the right.  */
129 		if (new_min <= old_max && new_min >= old_min) {
130 			ret = -EBUSY;
131 			goto out;
132 		}
133 	}
134 
135 	cd->next = *cp;
136 	*cp = cd;
137 	mutex_unlock(&chrdevs_lock);
138 	return cd;
139 out:
140 	mutex_unlock(&chrdevs_lock);
141 	kfree(cd);
142 	return ERR_PTR(ret);
143 }
144 
145 static struct char_device_struct *
146 __unregister_chrdev_region(unsigned major, unsigned baseminor, int minorct)
147 {
148 	struct char_device_struct *cd = NULL, **cp;
149 	int i = major_to_index(major);
150 
151 	mutex_lock(&chrdevs_lock);
152 	for (cp = &chrdevs[i]; *cp; cp = &(*cp)->next)
153 		if ((*cp)->major == major &&
154 		    (*cp)->baseminor == baseminor &&
155 		    (*cp)->minorct == minorct)
156 			break;
157 	if (*cp) {
158 		cd = *cp;
159 		*cp = cd->next;
160 	}
161 	mutex_unlock(&chrdevs_lock);
162 	return cd;
163 }
164 
165 /**
166  * register_chrdev_region() - register a range of device numbers
167  * @from: the first in the desired range of device numbers; must include
168  *        the major number.
169  * @count: the number of consecutive device numbers required
170  * @name: the name of the device or driver.
171  *
172  * Return value is zero on success, a negative error code on failure.
173  */
174 int register_chrdev_region(dev_t from, unsigned count, const char *name)
175 {
176 	struct char_device_struct *cd;
177 	dev_t to = from + count;
178 	dev_t n, next;
179 
180 	for (n = from; n < to; n = next) {
181 		next = MKDEV(MAJOR(n)+1, 0);
182 		if (next > to)
183 			next = to;
184 		cd = __register_chrdev_region(MAJOR(n), MINOR(n),
185 			       next - n, name);
186 		if (IS_ERR(cd))
187 			goto fail;
188 	}
189 	return 0;
190 fail:
191 	to = n;
192 	for (n = from; n < to; n = next) {
193 		next = MKDEV(MAJOR(n)+1, 0);
194 		kfree(__unregister_chrdev_region(MAJOR(n), MINOR(n), next - n));
195 	}
196 	return PTR_ERR(cd);
197 }
198 
199 /**
200  * alloc_chrdev_region() - register a range of char device numbers
201  * @dev: output parameter for first assigned number
202  * @baseminor: first of the requested range of minor numbers
203  * @count: the number of minor numbers required
204  * @name: the name of the associated device or driver
205  *
206  * Allocates a range of char device numbers.  The major number will be
207  * chosen dynamically, and returned (along with the first minor number)
208  * in @dev.  Returns zero or a negative error code.
209  */
210 int alloc_chrdev_region(dev_t *dev, unsigned baseminor, unsigned count,
211 			const char *name)
212 {
213 	struct char_device_struct *cd;
214 	cd = __register_chrdev_region(0, baseminor, count, name);
215 	if (IS_ERR(cd))
216 		return PTR_ERR(cd);
217 	*dev = MKDEV(cd->major, cd->baseminor);
218 	return 0;
219 }
220 
221 /**
222  * __register_chrdev() - create and register a cdev occupying a range of minors
223  * @major: major device number or 0 for dynamic allocation
224  * @baseminor: first of the requested range of minor numbers
225  * @count: the number of minor numbers required
226  * @name: name of this range of devices
227  * @fops: file operations associated with this devices
228  *
229  * If @major == 0 this functions will dynamically allocate a major and return
230  * its number.
231  *
232  * If @major > 0 this function will attempt to reserve a device with the given
233  * major number and will return zero on success.
234  *
235  * Returns a -ve errno on failure.
236  *
237  * The name of this device has nothing to do with the name of the device in
238  * /dev. It only helps to keep track of the different owners of devices. If
239  * your module name has only one type of devices it's ok to use e.g. the name
240  * of the module here.
241  */
242 int __register_chrdev(unsigned int major, unsigned int baseminor,
243 		      unsigned int count, const char *name,
244 		      const struct file_operations *fops)
245 {
246 	struct char_device_struct *cd;
247 	struct cdev *cdev;
248 	int err = -ENOMEM;
249 
250 	cd = __register_chrdev_region(major, baseminor, count, name);
251 	if (IS_ERR(cd))
252 		return PTR_ERR(cd);
253 
254 	cdev = cdev_alloc();
255 	if (!cdev)
256 		goto out2;
257 
258 	cdev->owner = fops->owner;
259 	cdev->ops = fops;
260 	kobject_set_name(&cdev->kobj, "%s", name);
261 
262 	err = cdev_add(cdev, MKDEV(cd->major, baseminor), count);
263 	if (err)
264 		goto out;
265 
266 	cd->cdev = cdev;
267 
268 	return major ? 0 : cd->major;
269 out:
270 	kobject_put(&cdev->kobj);
271 out2:
272 	kfree(__unregister_chrdev_region(cd->major, baseminor, count));
273 	return err;
274 }
275 
276 /**
277  * unregister_chrdev_region() - unregister a range of device numbers
278  * @from: the first in the range of numbers to unregister
279  * @count: the number of device numbers to unregister
280  *
281  * This function will unregister a range of @count device numbers,
282  * starting with @from.  The caller should normally be the one who
283  * allocated those numbers in the first place...
284  */
285 void unregister_chrdev_region(dev_t from, unsigned count)
286 {
287 	dev_t to = from + count;
288 	dev_t n, next;
289 
290 	for (n = from; n < to; n = next) {
291 		next = MKDEV(MAJOR(n)+1, 0);
292 		if (next > to)
293 			next = to;
294 		kfree(__unregister_chrdev_region(MAJOR(n), MINOR(n), next - n));
295 	}
296 }
297 
298 /**
299  * __unregister_chrdev - unregister and destroy a cdev
300  * @major: major device number
301  * @baseminor: first of the range of minor numbers
302  * @count: the number of minor numbers this cdev is occupying
303  * @name: name of this range of devices
304  *
305  * Unregister and destroy the cdev occupying the region described by
306  * @major, @baseminor and @count.  This function undoes what
307  * __register_chrdev() did.
308  */
309 void __unregister_chrdev(unsigned int major, unsigned int baseminor,
310 			 unsigned int count, const char *name)
311 {
312 	struct char_device_struct *cd;
313 
314 	cd = __unregister_chrdev_region(major, baseminor, count);
315 	if (cd && cd->cdev)
316 		cdev_del(cd->cdev);
317 	kfree(cd);
318 }
319 
320 static DEFINE_SPINLOCK(cdev_lock);
321 
322 static struct kobject *cdev_get(struct cdev *p)
323 {
324 	struct module *owner = p->owner;
325 	struct kobject *kobj;
326 
327 	if (owner && !try_module_get(owner))
328 		return NULL;
329 	kobj = kobject_get(&p->kobj);
330 	if (!kobj)
331 		module_put(owner);
332 	return kobj;
333 }
334 
335 void cdev_put(struct cdev *p)
336 {
337 	if (p) {
338 		struct module *owner = p->owner;
339 		kobject_put(&p->kobj);
340 		module_put(owner);
341 	}
342 }
343 
344 /*
345  * Called every time a character special file is opened
346  */
347 static int chrdev_open(struct inode *inode, struct file *filp)
348 {
349 	const struct file_operations *fops;
350 	struct cdev *p;
351 	struct cdev *new = NULL;
352 	int ret = 0;
353 
354 	spin_lock(&cdev_lock);
355 	p = inode->i_cdev;
356 	if (!p) {
357 		struct kobject *kobj;
358 		int idx;
359 		spin_unlock(&cdev_lock);
360 		kobj = kobj_lookup(cdev_map, inode->i_rdev, &idx);
361 		if (!kobj)
362 			return -ENXIO;
363 		new = container_of(kobj, struct cdev, kobj);
364 		spin_lock(&cdev_lock);
365 		/* Check i_cdev again in case somebody beat us to it while
366 		   we dropped the lock. */
367 		p = inode->i_cdev;
368 		if (!p) {
369 			inode->i_cdev = p = new;
370 			list_add(&inode->i_devices, &p->list);
371 			new = NULL;
372 		} else if (!cdev_get(p))
373 			ret = -ENXIO;
374 	} else if (!cdev_get(p))
375 		ret = -ENXIO;
376 	spin_unlock(&cdev_lock);
377 	cdev_put(new);
378 	if (ret)
379 		return ret;
380 
381 	ret = -ENXIO;
382 	fops = fops_get(p->ops);
383 	if (!fops)
384 		goto out_cdev_put;
385 
386 	replace_fops(filp, fops);
387 	if (filp->f_op->open) {
388 		ret = filp->f_op->open(inode, filp);
389 		if (ret)
390 			goto out_cdev_put;
391 	}
392 
393 	return 0;
394 
395  out_cdev_put:
396 	cdev_put(p);
397 	return ret;
398 }
399 
400 void cd_forget(struct inode *inode)
401 {
402 	spin_lock(&cdev_lock);
403 	list_del_init(&inode->i_devices);
404 	inode->i_cdev = NULL;
405 	spin_unlock(&cdev_lock);
406 }
407 
408 static void cdev_purge(struct cdev *cdev)
409 {
410 	spin_lock(&cdev_lock);
411 	while (!list_empty(&cdev->list)) {
412 		struct inode *inode;
413 		inode = container_of(cdev->list.next, struct inode, i_devices);
414 		list_del_init(&inode->i_devices);
415 		inode->i_cdev = NULL;
416 	}
417 	spin_unlock(&cdev_lock);
418 }
419 
420 /*
421  * Dummy default file-operations: the only thing this does
422  * is contain the open that then fills in the correct operations
423  * depending on the special file...
424  */
425 const struct file_operations def_chr_fops = {
426 	.open = chrdev_open,
427 	.llseek = noop_llseek,
428 };
429 
430 static struct kobject *exact_match(dev_t dev, int *part, void *data)
431 {
432 	struct cdev *p = data;
433 	return &p->kobj;
434 }
435 
436 static int exact_lock(dev_t dev, void *data)
437 {
438 	struct cdev *p = data;
439 	return cdev_get(p) ? 0 : -1;
440 }
441 
442 /**
443  * cdev_add() - add a char device to the system
444  * @p: the cdev structure for the device
445  * @dev: the first device number for which this device is responsible
446  * @count: the number of consecutive minor numbers corresponding to this
447  *         device
448  *
449  * cdev_add() adds the device represented by @p to the system, making it
450  * live immediately.  A negative error code is returned on failure.
451  */
452 int cdev_add(struct cdev *p, dev_t dev, unsigned count)
453 {
454 	int error;
455 
456 	p->dev = dev;
457 	p->count = count;
458 
459 	error = kobj_map(cdev_map, dev, count, NULL,
460 			 exact_match, exact_lock, p);
461 	if (error)
462 		return error;
463 
464 	kobject_get(p->kobj.parent);
465 
466 	return 0;
467 }
468 
469 static void cdev_unmap(dev_t dev, unsigned count)
470 {
471 	kobj_unmap(cdev_map, dev, count);
472 }
473 
474 /**
475  * cdev_del() - remove a cdev from the system
476  * @p: the cdev structure to be removed
477  *
478  * cdev_del() removes @p from the system, possibly freeing the structure
479  * itself.
480  */
481 void cdev_del(struct cdev *p)
482 {
483 	cdev_unmap(p->dev, p->count);
484 	kobject_put(&p->kobj);
485 }
486 
487 
488 static void cdev_default_release(struct kobject *kobj)
489 {
490 	struct cdev *p = container_of(kobj, struct cdev, kobj);
491 	struct kobject *parent = kobj->parent;
492 
493 	cdev_purge(p);
494 	kobject_put(parent);
495 }
496 
497 static void cdev_dynamic_release(struct kobject *kobj)
498 {
499 	struct cdev *p = container_of(kobj, struct cdev, kobj);
500 	struct kobject *parent = kobj->parent;
501 
502 	cdev_purge(p);
503 	kfree(p);
504 	kobject_put(parent);
505 }
506 
507 static struct kobj_type ktype_cdev_default = {
508 	.release	= cdev_default_release,
509 };
510 
511 static struct kobj_type ktype_cdev_dynamic = {
512 	.release	= cdev_dynamic_release,
513 };
514 
515 /**
516  * cdev_alloc() - allocate a cdev structure
517  *
518  * Allocates and returns a cdev structure, or NULL on failure.
519  */
520 struct cdev *cdev_alloc(void)
521 {
522 	struct cdev *p = kzalloc(sizeof(struct cdev), GFP_KERNEL);
523 	if (p) {
524 		INIT_LIST_HEAD(&p->list);
525 		kobject_init(&p->kobj, &ktype_cdev_dynamic);
526 	}
527 	return p;
528 }
529 
530 /**
531  * cdev_init() - initialize a cdev structure
532  * @cdev: the structure to initialize
533  * @fops: the file_operations for this device
534  *
535  * Initializes @cdev, remembering @fops, making it ready to add to the
536  * system with cdev_add().
537  */
538 void cdev_init(struct cdev *cdev, const struct file_operations *fops)
539 {
540 	memset(cdev, 0, sizeof *cdev);
541 	INIT_LIST_HEAD(&cdev->list);
542 	kobject_init(&cdev->kobj, &ktype_cdev_default);
543 	cdev->ops = fops;
544 }
545 
546 static struct kobject *base_probe(dev_t dev, int *part, void *data)
547 {
548 	if (request_module("char-major-%d-%d", MAJOR(dev), MINOR(dev)) > 0)
549 		/* Make old-style 2.4 aliases work */
550 		request_module("char-major-%d", MAJOR(dev));
551 	return NULL;
552 }
553 
554 void __init chrdev_init(void)
555 {
556 	cdev_map = kobj_map_init(base_probe, &chrdevs_lock);
557 }
558 
559 
560 /* Let modules do char dev stuff */
561 EXPORT_SYMBOL(register_chrdev_region);
562 EXPORT_SYMBOL(unregister_chrdev_region);
563 EXPORT_SYMBOL(alloc_chrdev_region);
564 EXPORT_SYMBOL(cdev_init);
565 EXPORT_SYMBOL(cdev_alloc);
566 EXPORT_SYMBOL(cdev_del);
567 EXPORT_SYMBOL(cdev_add);
568 EXPORT_SYMBOL(__register_chrdev);
569 EXPORT_SYMBOL(__unregister_chrdev);
570