xref: /linux/fs/char_dev.c (revision 1795cf48b322b4d19230a40dbe7181acedd34a94)
1 /*
2  *  linux/fs/char_dev.c
3  *
4  *  Copyright (C) 1991, 1992  Linus Torvalds
5  */
6 
7 #include <linux/init.h>
8 #include <linux/fs.h>
9 #include <linux/kdev_t.h>
10 #include <linux/slab.h>
11 #include <linux/string.h>
12 
13 #include <linux/major.h>
14 #include <linux/errno.h>
15 #include <linux/module.h>
16 #include <linux/smp_lock.h>
17 #include <linux/seq_file.h>
18 
19 #include <linux/kobject.h>
20 #include <linux/kobj_map.h>
21 #include <linux/cdev.h>
22 #include <linux/mutex.h>
23 #include <linux/backing-dev.h>
24 
25 #ifdef CONFIG_KMOD
26 #include <linux/kmod.h>
27 #endif
28 #include "internal.h"
29 
30 /*
31  * capabilities for /dev/mem, /dev/kmem and similar directly mappable character
32  * devices
33  * - permits shared-mmap for read, write and/or exec
34  * - does not permit private mmap in NOMMU mode (can't do COW)
35  * - no readahead or I/O queue unplugging required
36  */
37 struct backing_dev_info directly_mappable_cdev_bdi = {
38 	.capabilities	= (
39 #ifdef CONFIG_MMU
40 		/* permit private copies of the data to be taken */
41 		BDI_CAP_MAP_COPY |
42 #endif
43 		/* permit direct mmap, for read, write or exec */
44 		BDI_CAP_MAP_DIRECT |
45 		BDI_CAP_READ_MAP | BDI_CAP_WRITE_MAP | BDI_CAP_EXEC_MAP),
46 };
47 
48 static struct kobj_map *cdev_map;
49 
50 static DEFINE_MUTEX(chrdevs_lock);
51 
52 static struct char_device_struct {
53 	struct char_device_struct *next;
54 	unsigned int major;
55 	unsigned int baseminor;
56 	int minorct;
57 	char name[64];
58 	struct cdev *cdev;		/* will die */
59 } *chrdevs[CHRDEV_MAJOR_HASH_SIZE];
60 
61 /* index in the above */
62 static inline int major_to_index(int major)
63 {
64 	return major % CHRDEV_MAJOR_HASH_SIZE;
65 }
66 
67 #ifdef CONFIG_PROC_FS
68 
69 void chrdev_show(struct seq_file *f, off_t offset)
70 {
71 	struct char_device_struct *cd;
72 
73 	if (offset < CHRDEV_MAJOR_HASH_SIZE) {
74 		mutex_lock(&chrdevs_lock);
75 		for (cd = chrdevs[offset]; cd; cd = cd->next)
76 			seq_printf(f, "%3d %s\n", cd->major, cd->name);
77 		mutex_unlock(&chrdevs_lock);
78 	}
79 }
80 
81 #endif /* CONFIG_PROC_FS */
82 
83 /*
84  * Register a single major with a specified minor range.
85  *
86  * If major == 0 this functions will dynamically allocate a major and return
87  * its number.
88  *
89  * If major > 0 this function will attempt to reserve the passed range of
90  * minors and will return zero on success.
91  *
92  * Returns a -ve errno on failure.
93  */
94 static struct char_device_struct *
95 __register_chrdev_region(unsigned int major, unsigned int baseminor,
96 			   int minorct, const char *name)
97 {
98 	struct char_device_struct *cd, **cp;
99 	int ret = 0;
100 	int i;
101 
102 	cd = kzalloc(sizeof(struct char_device_struct), GFP_KERNEL);
103 	if (cd == NULL)
104 		return ERR_PTR(-ENOMEM);
105 
106 	mutex_lock(&chrdevs_lock);
107 
108 	/* temporary */
109 	if (major == 0) {
110 		for (i = ARRAY_SIZE(chrdevs)-1; i > 0; i--) {
111 			if (chrdevs[i] == NULL)
112 				break;
113 		}
114 
115 		if (i == 0) {
116 			ret = -EBUSY;
117 			goto out;
118 		}
119 		major = i;
120 		ret = major;
121 	}
122 
123 	cd->major = major;
124 	cd->baseminor = baseminor;
125 	cd->minorct = minorct;
126 	strncpy(cd->name,name, 64);
127 
128 	i = major_to_index(major);
129 
130 	for (cp = &chrdevs[i]; *cp; cp = &(*cp)->next)
131 		if ((*cp)->major > major ||
132 		    ((*cp)->major == major &&
133 		     (((*cp)->baseminor >= baseminor) ||
134 		      ((*cp)->baseminor + (*cp)->minorct > baseminor))))
135 			break;
136 
137 	/* Check for overlapping minor ranges.  */
138 	if (*cp && (*cp)->major == major) {
139 		int old_min = (*cp)->baseminor;
140 		int old_max = (*cp)->baseminor + (*cp)->minorct - 1;
141 		int new_min = baseminor;
142 		int new_max = baseminor + minorct - 1;
143 
144 		/* New driver overlaps from the left.  */
145 		if (new_max >= old_min && new_max <= old_max) {
146 			ret = -EBUSY;
147 			goto out;
148 		}
149 
150 		/* New driver overlaps from the right.  */
151 		if (new_min <= old_max && new_min >= old_min) {
152 			ret = -EBUSY;
153 			goto out;
154 		}
155 	}
156 
157 	cd->next = *cp;
158 	*cp = cd;
159 	mutex_unlock(&chrdevs_lock);
160 	return cd;
161 out:
162 	mutex_unlock(&chrdevs_lock);
163 	kfree(cd);
164 	return ERR_PTR(ret);
165 }
166 
167 static struct char_device_struct *
168 __unregister_chrdev_region(unsigned major, unsigned baseminor, int minorct)
169 {
170 	struct char_device_struct *cd = NULL, **cp;
171 	int i = major_to_index(major);
172 
173 	mutex_lock(&chrdevs_lock);
174 	for (cp = &chrdevs[i]; *cp; cp = &(*cp)->next)
175 		if ((*cp)->major == major &&
176 		    (*cp)->baseminor == baseminor &&
177 		    (*cp)->minorct == minorct)
178 			break;
179 	if (*cp) {
180 		cd = *cp;
181 		*cp = cd->next;
182 	}
183 	mutex_unlock(&chrdevs_lock);
184 	return cd;
185 }
186 
187 /**
188  * register_chrdev_region() - register a range of device numbers
189  * @from: the first in the desired range of device numbers; must include
190  *        the major number.
191  * @count: the number of consecutive device numbers required
192  * @name: the name of the device or driver.
193  *
194  * Return value is zero on success, a negative error code on failure.
195  */
196 int register_chrdev_region(dev_t from, unsigned count, const char *name)
197 {
198 	struct char_device_struct *cd;
199 	dev_t to = from + count;
200 	dev_t n, next;
201 
202 	for (n = from; n < to; n = next) {
203 		next = MKDEV(MAJOR(n)+1, 0);
204 		if (next > to)
205 			next = to;
206 		cd = __register_chrdev_region(MAJOR(n), MINOR(n),
207 			       next - n, name);
208 		if (IS_ERR(cd))
209 			goto fail;
210 	}
211 	return 0;
212 fail:
213 	to = n;
214 	for (n = from; n < to; n = next) {
215 		next = MKDEV(MAJOR(n)+1, 0);
216 		kfree(__unregister_chrdev_region(MAJOR(n), MINOR(n), next - n));
217 	}
218 	return PTR_ERR(cd);
219 }
220 
221 /**
222  * alloc_chrdev_region() - register a range of char device numbers
223  * @dev: output parameter for first assigned number
224  * @baseminor: first of the requested range of minor numbers
225  * @count: the number of minor numbers required
226  * @name: the name of the associated device or driver
227  *
228  * Allocates a range of char device numbers.  The major number will be
229  * chosen dynamically, and returned (along with the first minor number)
230  * in @dev.  Returns zero or a negative error code.
231  */
232 int alloc_chrdev_region(dev_t *dev, unsigned baseminor, unsigned count,
233 			const char *name)
234 {
235 	struct char_device_struct *cd;
236 	cd = __register_chrdev_region(0, baseminor, count, name);
237 	if (IS_ERR(cd))
238 		return PTR_ERR(cd);
239 	*dev = MKDEV(cd->major, cd->baseminor);
240 	return 0;
241 }
242 
243 /**
244  * register_chrdev() - Register a major number for character devices.
245  * @major: major device number or 0 for dynamic allocation
246  * @name: name of this range of devices
247  * @fops: file operations associated with this devices
248  *
249  * If @major == 0 this functions will dynamically allocate a major and return
250  * its number.
251  *
252  * If @major > 0 this function will attempt to reserve a device with the given
253  * major number and will return zero on success.
254  *
255  * Returns a -ve errno on failure.
256  *
257  * The name of this device has nothing to do with the name of the device in
258  * /dev. It only helps to keep track of the different owners of devices. If
259  * your module name has only one type of devices it's ok to use e.g. the name
260  * of the module here.
261  *
262  * This function registers a range of 256 minor numbers. The first minor number
263  * is 0.
264  */
265 int register_chrdev(unsigned int major, const char *name,
266 		    const struct file_operations *fops)
267 {
268 	struct char_device_struct *cd;
269 	struct cdev *cdev;
270 	char *s;
271 	int err = -ENOMEM;
272 
273 	cd = __register_chrdev_region(major, 0, 256, name);
274 	if (IS_ERR(cd))
275 		return PTR_ERR(cd);
276 
277 	cdev = cdev_alloc();
278 	if (!cdev)
279 		goto out2;
280 
281 	cdev->owner = fops->owner;
282 	cdev->ops = fops;
283 	kobject_set_name(&cdev->kobj, "%s", name);
284 	for (s = strchr(kobject_name(&cdev->kobj),'/'); s; s = strchr(s, '/'))
285 		*s = '!';
286 
287 	err = cdev_add(cdev, MKDEV(cd->major, 0), 256);
288 	if (err)
289 		goto out;
290 
291 	cd->cdev = cdev;
292 
293 	return major ? 0 : cd->major;
294 out:
295 	kobject_put(&cdev->kobj);
296 out2:
297 	kfree(__unregister_chrdev_region(cd->major, 0, 256));
298 	return err;
299 }
300 
301 /**
302  * unregister_chrdev_region() - return a range of device numbers
303  * @from: the first in the range of numbers to unregister
304  * @count: the number of device numbers to unregister
305  *
306  * This function will unregister a range of @count device numbers,
307  * starting with @from.  The caller should normally be the one who
308  * allocated those numbers in the first place...
309  */
310 void unregister_chrdev_region(dev_t from, unsigned count)
311 {
312 	dev_t to = from + count;
313 	dev_t n, next;
314 
315 	for (n = from; n < to; n = next) {
316 		next = MKDEV(MAJOR(n)+1, 0);
317 		if (next > to)
318 			next = to;
319 		kfree(__unregister_chrdev_region(MAJOR(n), MINOR(n), next - n));
320 	}
321 }
322 
323 void unregister_chrdev(unsigned int major, const char *name)
324 {
325 	struct char_device_struct *cd;
326 	cd = __unregister_chrdev_region(major, 0, 256);
327 	if (cd && cd->cdev)
328 		cdev_del(cd->cdev);
329 	kfree(cd);
330 }
331 
332 static DEFINE_SPINLOCK(cdev_lock);
333 
334 static struct kobject *cdev_get(struct cdev *p)
335 {
336 	struct module *owner = p->owner;
337 	struct kobject *kobj;
338 
339 	if (owner && !try_module_get(owner))
340 		return NULL;
341 	kobj = kobject_get(&p->kobj);
342 	if (!kobj)
343 		module_put(owner);
344 	return kobj;
345 }
346 
347 void cdev_put(struct cdev *p)
348 {
349 	if (p) {
350 		struct module *owner = p->owner;
351 		kobject_put(&p->kobj);
352 		module_put(owner);
353 	}
354 }
355 
356 /*
357  * Called every time a character special file is opened
358  */
359 static int chrdev_open(struct inode *inode, struct file *filp)
360 {
361 	struct cdev *p;
362 	struct cdev *new = NULL;
363 	int ret = 0;
364 
365 	spin_lock(&cdev_lock);
366 	p = inode->i_cdev;
367 	if (!p) {
368 		struct kobject *kobj;
369 		int idx;
370 		spin_unlock(&cdev_lock);
371 		kobj = kobj_lookup(cdev_map, inode->i_rdev, &idx);
372 		if (!kobj)
373 			return -ENXIO;
374 		new = container_of(kobj, struct cdev, kobj);
375 		spin_lock(&cdev_lock);
376 		/* Check i_cdev again in case somebody beat us to it while
377 		   we dropped the lock. */
378 		p = inode->i_cdev;
379 		if (!p) {
380 			inode->i_cdev = p = new;
381 			inode->i_cindex = idx;
382 			list_add(&inode->i_devices, &p->list);
383 			new = NULL;
384 		} else if (!cdev_get(p))
385 			ret = -ENXIO;
386 	} else if (!cdev_get(p))
387 		ret = -ENXIO;
388 	spin_unlock(&cdev_lock);
389 	cdev_put(new);
390 	if (ret)
391 		return ret;
392 	filp->f_op = fops_get(p->ops);
393 	if (!filp->f_op) {
394 		cdev_put(p);
395 		return -ENXIO;
396 	}
397 	if (filp->f_op->open)
398 		ret = filp->f_op->open(inode,filp);
399 	if (ret)
400 		cdev_put(p);
401 	return ret;
402 }
403 
404 void cd_forget(struct inode *inode)
405 {
406 	spin_lock(&cdev_lock);
407 	list_del_init(&inode->i_devices);
408 	inode->i_cdev = NULL;
409 	spin_unlock(&cdev_lock);
410 }
411 
412 static void cdev_purge(struct cdev *cdev)
413 {
414 	spin_lock(&cdev_lock);
415 	while (!list_empty(&cdev->list)) {
416 		struct inode *inode;
417 		inode = container_of(cdev->list.next, struct inode, i_devices);
418 		list_del_init(&inode->i_devices);
419 		inode->i_cdev = NULL;
420 	}
421 	spin_unlock(&cdev_lock);
422 }
423 
424 /*
425  * Dummy default file-operations: the only thing this does
426  * is contain the open that then fills in the correct operations
427  * depending on the special file...
428  */
429 const struct file_operations def_chr_fops = {
430 	.open = chrdev_open,
431 };
432 
433 static struct kobject *exact_match(dev_t dev, int *part, void *data)
434 {
435 	struct cdev *p = data;
436 	return &p->kobj;
437 }
438 
439 static int exact_lock(dev_t dev, void *data)
440 {
441 	struct cdev *p = data;
442 	return cdev_get(p) ? 0 : -1;
443 }
444 
445 /**
446  * cdev_add() - add a char device to the system
447  * @p: the cdev structure for the device
448  * @dev: the first device number for which this device is responsible
449  * @count: the number of consecutive minor numbers corresponding to this
450  *         device
451  *
452  * cdev_add() adds the device represented by @p to the system, making it
453  * live immediately.  A negative error code is returned on failure.
454  */
455 int cdev_add(struct cdev *p, dev_t dev, unsigned count)
456 {
457 	p->dev = dev;
458 	p->count = count;
459 	return kobj_map(cdev_map, dev, count, NULL, exact_match, exact_lock, p);
460 }
461 
462 static void cdev_unmap(dev_t dev, unsigned count)
463 {
464 	kobj_unmap(cdev_map, dev, count);
465 }
466 
467 /**
468  * cdev_del() - remove a cdev from the system
469  * @p: the cdev structure to be removed
470  *
471  * cdev_del() removes @p from the system, possibly freeing the structure
472  * itself.
473  */
474 void cdev_del(struct cdev *p)
475 {
476 	cdev_unmap(p->dev, p->count);
477 	kobject_put(&p->kobj);
478 }
479 
480 
481 static void cdev_default_release(struct kobject *kobj)
482 {
483 	struct cdev *p = container_of(kobj, struct cdev, kobj);
484 	cdev_purge(p);
485 }
486 
487 static void cdev_dynamic_release(struct kobject *kobj)
488 {
489 	struct cdev *p = container_of(kobj, struct cdev, kobj);
490 	cdev_purge(p);
491 	kfree(p);
492 }
493 
494 static struct kobj_type ktype_cdev_default = {
495 	.release	= cdev_default_release,
496 };
497 
498 static struct kobj_type ktype_cdev_dynamic = {
499 	.release	= cdev_dynamic_release,
500 };
501 
502 /**
503  * cdev_alloc() - allocate a cdev structure
504  *
505  * Allocates and returns a cdev structure, or NULL on failure.
506  */
507 struct cdev *cdev_alloc(void)
508 {
509 	struct cdev *p = kzalloc(sizeof(struct cdev), GFP_KERNEL);
510 	if (p) {
511 		INIT_LIST_HEAD(&p->list);
512 		kobject_init(&p->kobj, &ktype_cdev_dynamic);
513 	}
514 	return p;
515 }
516 
517 /**
518  * cdev_init() - initialize a cdev structure
519  * @cdev: the structure to initialize
520  * @fops: the file_operations for this device
521  *
522  * Initializes @cdev, remembering @fops, making it ready to add to the
523  * system with cdev_add().
524  */
525 void cdev_init(struct cdev *cdev, const struct file_operations *fops)
526 {
527 	memset(cdev, 0, sizeof *cdev);
528 	INIT_LIST_HEAD(&cdev->list);
529 	kobject_init(&cdev->kobj, &ktype_cdev_default);
530 	cdev->ops = fops;
531 }
532 
533 static struct kobject *base_probe(dev_t dev, int *part, void *data)
534 {
535 	if (request_module("char-major-%d-%d", MAJOR(dev), MINOR(dev)) > 0)
536 		/* Make old-style 2.4 aliases work */
537 		request_module("char-major-%d", MAJOR(dev));
538 	return NULL;
539 }
540 
541 void __init chrdev_init(void)
542 {
543 	cdev_map = kobj_map_init(base_probe, &chrdevs_lock);
544 	bdi_init(&directly_mappable_cdev_bdi);
545 }
546 
547 
548 /* Let modules do char dev stuff */
549 EXPORT_SYMBOL(register_chrdev_region);
550 EXPORT_SYMBOL(unregister_chrdev_region);
551 EXPORT_SYMBOL(alloc_chrdev_region);
552 EXPORT_SYMBOL(cdev_init);
553 EXPORT_SYMBOL(cdev_alloc);
554 EXPORT_SYMBOL(cdev_del);
555 EXPORT_SYMBOL(cdev_add);
556 EXPORT_SYMBOL(register_chrdev);
557 EXPORT_SYMBOL(unregister_chrdev);
558 EXPORT_SYMBOL(directly_mappable_cdev_bdi);
559