xref: /linux/fs/char_dev.c (revision cd354f1ae75e6466a7e31b727faede57a1f89ca5)
1 /*
2  *  linux/fs/char_dev.c
3  *
4  *  Copyright (C) 1991, 1992  Linus Torvalds
5  */
6 
7 #include <linux/init.h>
8 #include <linux/fs.h>
9 #include <linux/slab.h>
10 #include <linux/string.h>
11 
12 #include <linux/major.h>
13 #include <linux/errno.h>
14 #include <linux/module.h>
15 #include <linux/smp_lock.h>
16 #include <linux/seq_file.h>
17 
18 #include <linux/kobject.h>
19 #include <linux/kobj_map.h>
20 #include <linux/cdev.h>
21 #include <linux/mutex.h>
22 #include <linux/backing-dev.h>
23 
24 #ifdef CONFIG_KMOD
25 #include <linux/kmod.h>
26 #endif
27 #include "internal.h"
28 
29 /*
30  * capabilities for /dev/mem, /dev/kmem and similar directly mappable character
31  * devices
32  * - permits shared-mmap for read, write and/or exec
33  * - does not permit private mmap in NOMMU mode (can't do COW)
34  * - no readahead or I/O queue unplugging required
35  */
36 struct backing_dev_info directly_mappable_cdev_bdi = {
37 	.capabilities	= (
38 #ifdef CONFIG_MMU
39 		/* permit private copies of the data to be taken */
40 		BDI_CAP_MAP_COPY |
41 #endif
42 		/* permit direct mmap, for read, write or exec */
43 		BDI_CAP_MAP_DIRECT |
44 		BDI_CAP_READ_MAP | BDI_CAP_WRITE_MAP | BDI_CAP_EXEC_MAP),
45 };
46 
47 static struct kobj_map *cdev_map;
48 
49 static DEFINE_MUTEX(chrdevs_lock);
50 
51 static struct char_device_struct {
52 	struct char_device_struct *next;
53 	unsigned int major;
54 	unsigned int baseminor;
55 	int minorct;
56 	char name[64];
57 	struct file_operations *fops;
58 	struct cdev *cdev;		/* will die */
59 } *chrdevs[CHRDEV_MAJOR_HASH_SIZE];
60 
61 /* index in the above */
62 static inline int major_to_index(int major)
63 {
64 	return major % CHRDEV_MAJOR_HASH_SIZE;
65 }
66 
67 #ifdef CONFIG_PROC_FS
68 
69 void chrdev_show(struct seq_file *f, off_t offset)
70 {
71 	struct char_device_struct *cd;
72 
73 	if (offset < CHRDEV_MAJOR_HASH_SIZE) {
74 		mutex_lock(&chrdevs_lock);
75 		for (cd = chrdevs[offset]; cd; cd = cd->next)
76 			seq_printf(f, "%3d %s\n", cd->major, cd->name);
77 		mutex_unlock(&chrdevs_lock);
78 	}
79 }
80 
81 #endif /* CONFIG_PROC_FS */
82 
83 /*
84  * Register a single major with a specified minor range.
85  *
86  * If major == 0 this functions will dynamically allocate a major and return
87  * its number.
88  *
89  * If major > 0 this function will attempt to reserve the passed range of
90  * minors and will return zero on success.
91  *
92  * Returns a -ve errno on failure.
93  */
94 static struct char_device_struct *
95 __register_chrdev_region(unsigned int major, unsigned int baseminor,
96 			   int minorct, const char *name)
97 {
98 	struct char_device_struct *cd, **cp;
99 	int ret = 0;
100 	int i;
101 
102 	cd = kzalloc(sizeof(struct char_device_struct), GFP_KERNEL);
103 	if (cd == NULL)
104 		return ERR_PTR(-ENOMEM);
105 
106 	mutex_lock(&chrdevs_lock);
107 
108 	/* temporary */
109 	if (major == 0) {
110 		for (i = ARRAY_SIZE(chrdevs)-1; i > 0; i--) {
111 			/*
112 			 * Disallow the LANANA-assigned LOCAL/EXPERIMENTAL
113 			 * majors
114 			 */
115 			if ((60 <= i && i <= 63) || (120 <= i && i <= 127) ||
116 					(240 <= i && i <= 254))
117 				continue;
118 			if (chrdevs[i] == NULL)
119 				break;
120 		}
121 
122 		if (i == 0) {
123 			ret = -EBUSY;
124 			goto out;
125 		}
126 		major = i;
127 		ret = major;
128 	}
129 
130 	cd->major = major;
131 	cd->baseminor = baseminor;
132 	cd->minorct = minorct;
133 	strncpy(cd->name,name, 64);
134 
135 	i = major_to_index(major);
136 
137 	for (cp = &chrdevs[i]; *cp; cp = &(*cp)->next)
138 		if ((*cp)->major > major ||
139 		    ((*cp)->major == major &&
140 		     (((*cp)->baseminor >= baseminor) ||
141 		      ((*cp)->baseminor + (*cp)->minorct > baseminor))))
142 			break;
143 
144 	/* Check for overlapping minor ranges.  */
145 	if (*cp && (*cp)->major == major) {
146 		int old_min = (*cp)->baseminor;
147 		int old_max = (*cp)->baseminor + (*cp)->minorct - 1;
148 		int new_min = baseminor;
149 		int new_max = baseminor + minorct - 1;
150 
151 		/* New driver overlaps from the left.  */
152 		if (new_max >= old_min && new_max <= old_max) {
153 			ret = -EBUSY;
154 			goto out;
155 		}
156 
157 		/* New driver overlaps from the right.  */
158 		if (new_min <= old_max && new_min >= old_min) {
159 			ret = -EBUSY;
160 			goto out;
161 		}
162 	}
163 
164 	cd->next = *cp;
165 	*cp = cd;
166 	mutex_unlock(&chrdevs_lock);
167 	return cd;
168 out:
169 	mutex_unlock(&chrdevs_lock);
170 	kfree(cd);
171 	return ERR_PTR(ret);
172 }
173 
174 static struct char_device_struct *
175 __unregister_chrdev_region(unsigned major, unsigned baseminor, int minorct)
176 {
177 	struct char_device_struct *cd = NULL, **cp;
178 	int i = major_to_index(major);
179 
180 	mutex_lock(&chrdevs_lock);
181 	for (cp = &chrdevs[i]; *cp; cp = &(*cp)->next)
182 		if ((*cp)->major == major &&
183 		    (*cp)->baseminor == baseminor &&
184 		    (*cp)->minorct == minorct)
185 			break;
186 	if (*cp) {
187 		cd = *cp;
188 		*cp = cd->next;
189 	}
190 	mutex_unlock(&chrdevs_lock);
191 	return cd;
192 }
193 
194 /**
195  * register_chrdev_region() - register a range of device numbers
196  * @from: the first in the desired range of device numbers; must include
197  *        the major number.
198  * @count: the number of consecutive device numbers required
199  * @name: the name of the device or driver.
200  *
201  * Return value is zero on success, a negative error code on failure.
202  */
203 int register_chrdev_region(dev_t from, unsigned count, const char *name)
204 {
205 	struct char_device_struct *cd;
206 	dev_t to = from + count;
207 	dev_t n, next;
208 
209 	for (n = from; n < to; n = next) {
210 		next = MKDEV(MAJOR(n)+1, 0);
211 		if (next > to)
212 			next = to;
213 		cd = __register_chrdev_region(MAJOR(n), MINOR(n),
214 			       next - n, name);
215 		if (IS_ERR(cd))
216 			goto fail;
217 	}
218 	return 0;
219 fail:
220 	to = n;
221 	for (n = from; n < to; n = next) {
222 		next = MKDEV(MAJOR(n)+1, 0);
223 		kfree(__unregister_chrdev_region(MAJOR(n), MINOR(n), next - n));
224 	}
225 	return PTR_ERR(cd);
226 }
227 
228 /**
229  * alloc_chrdev_region() - register a range of char device numbers
230  * @dev: output parameter for first assigned number
231  * @baseminor: first of the requested range of minor numbers
232  * @count: the number of minor numbers required
233  * @name: the name of the associated device or driver
234  *
235  * Allocates a range of char device numbers.  The major number will be
236  * chosen dynamically, and returned (along with the first minor number)
237  * in @dev.  Returns zero or a negative error code.
238  */
239 int alloc_chrdev_region(dev_t *dev, unsigned baseminor, unsigned count,
240 			const char *name)
241 {
242 	struct char_device_struct *cd;
243 	cd = __register_chrdev_region(0, baseminor, count, name);
244 	if (IS_ERR(cd))
245 		return PTR_ERR(cd);
246 	*dev = MKDEV(cd->major, cd->baseminor);
247 	return 0;
248 }
249 
250 /**
251  * register_chrdev() - Register a major number for character devices.
252  * @major: major device number or 0 for dynamic allocation
253  * @name: name of this range of devices
254  * @fops: file operations associated with this devices
255  *
256  * If @major == 0 this functions will dynamically allocate a major and return
257  * its number.
258  *
259  * If @major > 0 this function will attempt to reserve a device with the given
260  * major number and will return zero on success.
261  *
262  * Returns a -ve errno on failure.
263  *
264  * The name of this device has nothing to do with the name of the device in
265  * /dev. It only helps to keep track of the different owners of devices. If
266  * your module name has only one type of devices it's ok to use e.g. the name
267  * of the module here.
268  *
269  * This function registers a range of 256 minor numbers. The first minor number
270  * is 0.
271  */
272 int register_chrdev(unsigned int major, const char *name,
273 		    const struct file_operations *fops)
274 {
275 	struct char_device_struct *cd;
276 	struct cdev *cdev;
277 	char *s;
278 	int err = -ENOMEM;
279 
280 	cd = __register_chrdev_region(major, 0, 256, name);
281 	if (IS_ERR(cd))
282 		return PTR_ERR(cd);
283 
284 	cdev = cdev_alloc();
285 	if (!cdev)
286 		goto out2;
287 
288 	cdev->owner = fops->owner;
289 	cdev->ops = fops;
290 	kobject_set_name(&cdev->kobj, "%s", name);
291 	for (s = strchr(kobject_name(&cdev->kobj),'/'); s; s = strchr(s, '/'))
292 		*s = '!';
293 
294 	err = cdev_add(cdev, MKDEV(cd->major, 0), 256);
295 	if (err)
296 		goto out;
297 
298 	cd->cdev = cdev;
299 
300 	return major ? 0 : cd->major;
301 out:
302 	kobject_put(&cdev->kobj);
303 out2:
304 	kfree(__unregister_chrdev_region(cd->major, 0, 256));
305 	return err;
306 }
307 
308 /**
309  * unregister_chrdev_region() - return a range of device numbers
310  * @from: the first in the range of numbers to unregister
311  * @count: the number of device numbers to unregister
312  *
313  * This function will unregister a range of @count device numbers,
314  * starting with @from.  The caller should normally be the one who
315  * allocated those numbers in the first place...
316  */
317 void unregister_chrdev_region(dev_t from, unsigned count)
318 {
319 	dev_t to = from + count;
320 	dev_t n, next;
321 
322 	for (n = from; n < to; n = next) {
323 		next = MKDEV(MAJOR(n)+1, 0);
324 		if (next > to)
325 			next = to;
326 		kfree(__unregister_chrdev_region(MAJOR(n), MINOR(n), next - n));
327 	}
328 }
329 
330 int unregister_chrdev(unsigned int major, const char *name)
331 {
332 	struct char_device_struct *cd;
333 	cd = __unregister_chrdev_region(major, 0, 256);
334 	if (cd && cd->cdev)
335 		cdev_del(cd->cdev);
336 	kfree(cd);
337 	return 0;
338 }
339 
340 static DEFINE_SPINLOCK(cdev_lock);
341 
342 static struct kobject *cdev_get(struct cdev *p)
343 {
344 	struct module *owner = p->owner;
345 	struct kobject *kobj;
346 
347 	if (owner && !try_module_get(owner))
348 		return NULL;
349 	kobj = kobject_get(&p->kobj);
350 	if (!kobj)
351 		module_put(owner);
352 	return kobj;
353 }
354 
355 void cdev_put(struct cdev *p)
356 {
357 	if (p) {
358 		struct module *owner = p->owner;
359 		kobject_put(&p->kobj);
360 		module_put(owner);
361 	}
362 }
363 
364 /*
365  * Called every time a character special file is opened
366  */
367 int chrdev_open(struct inode * inode, struct file * filp)
368 {
369 	struct cdev *p;
370 	struct cdev *new = NULL;
371 	int ret = 0;
372 
373 	spin_lock(&cdev_lock);
374 	p = inode->i_cdev;
375 	if (!p) {
376 		struct kobject *kobj;
377 		int idx;
378 		spin_unlock(&cdev_lock);
379 		kobj = kobj_lookup(cdev_map, inode->i_rdev, &idx);
380 		if (!kobj)
381 			return -ENXIO;
382 		new = container_of(kobj, struct cdev, kobj);
383 		spin_lock(&cdev_lock);
384 		p = inode->i_cdev;
385 		if (!p) {
386 			inode->i_cdev = p = new;
387 			inode->i_cindex = idx;
388 			list_add(&inode->i_devices, &p->list);
389 			new = NULL;
390 		} else if (!cdev_get(p))
391 			ret = -ENXIO;
392 	} else if (!cdev_get(p))
393 		ret = -ENXIO;
394 	spin_unlock(&cdev_lock);
395 	cdev_put(new);
396 	if (ret)
397 		return ret;
398 	filp->f_op = fops_get(p->ops);
399 	if (!filp->f_op) {
400 		cdev_put(p);
401 		return -ENXIO;
402 	}
403 	if (filp->f_op->open) {
404 		lock_kernel();
405 		ret = filp->f_op->open(inode,filp);
406 		unlock_kernel();
407 	}
408 	if (ret)
409 		cdev_put(p);
410 	return ret;
411 }
412 
413 void cd_forget(struct inode *inode)
414 {
415 	spin_lock(&cdev_lock);
416 	list_del_init(&inode->i_devices);
417 	inode->i_cdev = NULL;
418 	spin_unlock(&cdev_lock);
419 }
420 
421 static void cdev_purge(struct cdev *cdev)
422 {
423 	spin_lock(&cdev_lock);
424 	while (!list_empty(&cdev->list)) {
425 		struct inode *inode;
426 		inode = container_of(cdev->list.next, struct inode, i_devices);
427 		list_del_init(&inode->i_devices);
428 		inode->i_cdev = NULL;
429 	}
430 	spin_unlock(&cdev_lock);
431 }
432 
433 /*
434  * Dummy default file-operations: the only thing this does
435  * is contain the open that then fills in the correct operations
436  * depending on the special file...
437  */
438 const struct file_operations def_chr_fops = {
439 	.open = chrdev_open,
440 };
441 
442 static struct kobject *exact_match(dev_t dev, int *part, void *data)
443 {
444 	struct cdev *p = data;
445 	return &p->kobj;
446 }
447 
448 static int exact_lock(dev_t dev, void *data)
449 {
450 	struct cdev *p = data;
451 	return cdev_get(p) ? 0 : -1;
452 }
453 
454 /**
455  * cdev_add() - add a char device to the system
456  * @p: the cdev structure for the device
457  * @dev: the first device number for which this device is responsible
458  * @count: the number of consecutive minor numbers corresponding to this
459  *         device
460  *
461  * cdev_add() adds the device represented by @p to the system, making it
462  * live immediately.  A negative error code is returned on failure.
463  */
464 int cdev_add(struct cdev *p, dev_t dev, unsigned count)
465 {
466 	p->dev = dev;
467 	p->count = count;
468 	return kobj_map(cdev_map, dev, count, NULL, exact_match, exact_lock, p);
469 }
470 
471 static void cdev_unmap(dev_t dev, unsigned count)
472 {
473 	kobj_unmap(cdev_map, dev, count);
474 }
475 
476 /**
477  * cdev_del() - remove a cdev from the system
478  * @p: the cdev structure to be removed
479  *
480  * cdev_del() removes @p from the system, possibly freeing the structure
481  * itself.
482  */
483 void cdev_del(struct cdev *p)
484 {
485 	cdev_unmap(p->dev, p->count);
486 	kobject_put(&p->kobj);
487 }
488 
489 
490 static void cdev_default_release(struct kobject *kobj)
491 {
492 	struct cdev *p = container_of(kobj, struct cdev, kobj);
493 	cdev_purge(p);
494 }
495 
496 static void cdev_dynamic_release(struct kobject *kobj)
497 {
498 	struct cdev *p = container_of(kobj, struct cdev, kobj);
499 	cdev_purge(p);
500 	kfree(p);
501 }
502 
503 static struct kobj_type ktype_cdev_default = {
504 	.release	= cdev_default_release,
505 };
506 
507 static struct kobj_type ktype_cdev_dynamic = {
508 	.release	= cdev_dynamic_release,
509 };
510 
511 /**
512  * cdev_alloc() - allocate a cdev structure
513  *
514  * Allocates and returns a cdev structure, or NULL on failure.
515  */
516 struct cdev *cdev_alloc(void)
517 {
518 	struct cdev *p = kzalloc(sizeof(struct cdev), GFP_KERNEL);
519 	if (p) {
520 		p->kobj.ktype = &ktype_cdev_dynamic;
521 		INIT_LIST_HEAD(&p->list);
522 		kobject_init(&p->kobj);
523 	}
524 	return p;
525 }
526 
527 /**
528  * cdev_init() - initialize a cdev structure
529  * @cdev: the structure to initialize
530  * @fops: the file_operations for this device
531  *
532  * Initializes @cdev, remembering @fops, making it ready to add to the
533  * system with cdev_add().
534  */
535 void cdev_init(struct cdev *cdev, const struct file_operations *fops)
536 {
537 	memset(cdev, 0, sizeof *cdev);
538 	INIT_LIST_HEAD(&cdev->list);
539 	cdev->kobj.ktype = &ktype_cdev_default;
540 	kobject_init(&cdev->kobj);
541 	cdev->ops = fops;
542 }
543 
544 static struct kobject *base_probe(dev_t dev, int *part, void *data)
545 {
546 	if (request_module("char-major-%d-%d", MAJOR(dev), MINOR(dev)) > 0)
547 		/* Make old-style 2.4 aliases work */
548 		request_module("char-major-%d", MAJOR(dev));
549 	return NULL;
550 }
551 
552 void __init chrdev_init(void)
553 {
554 	cdev_map = kobj_map_init(base_probe, &chrdevs_lock);
555 }
556 
557 
558 /* Let modules do char dev stuff */
559 EXPORT_SYMBOL(register_chrdev_region);
560 EXPORT_SYMBOL(unregister_chrdev_region);
561 EXPORT_SYMBOL(alloc_chrdev_region);
562 EXPORT_SYMBOL(cdev_init);
563 EXPORT_SYMBOL(cdev_alloc);
564 EXPORT_SYMBOL(cdev_del);
565 EXPORT_SYMBOL(cdev_add);
566 EXPORT_SYMBOL(register_chrdev);
567 EXPORT_SYMBOL(unregister_chrdev);
568 EXPORT_SYMBOL(directly_mappable_cdev_bdi);
569