xref: /linux/fs/char_dev.c (revision c537b994505099b7197e7d3125b942ecbcc51eb6)
1 /*
2  *  linux/fs/char_dev.c
3  *
4  *  Copyright (C) 1991, 1992  Linus Torvalds
5  */
6 
7 #include <linux/init.h>
8 #include <linux/fs.h>
9 #include <linux/kdev_t.h>
10 #include <linux/slab.h>
11 #include <linux/string.h>
12 
13 #include <linux/major.h>
14 #include <linux/errno.h>
15 #include <linux/module.h>
16 #include <linux/smp_lock.h>
17 #include <linux/seq_file.h>
18 
19 #include <linux/kobject.h>
20 #include <linux/kobj_map.h>
21 #include <linux/cdev.h>
22 #include <linux/mutex.h>
23 #include <linux/backing-dev.h>
24 
25 #ifdef CONFIG_KMOD
26 #include <linux/kmod.h>
27 #endif
28 #include "internal.h"
29 
30 /*
31  * capabilities for /dev/mem, /dev/kmem and similar directly mappable character
32  * devices
33  * - permits shared-mmap for read, write and/or exec
34  * - does not permit private mmap in NOMMU mode (can't do COW)
35  * - no readahead or I/O queue unplugging required
36  */
37 struct backing_dev_info directly_mappable_cdev_bdi = {
38 	.capabilities	= (
39 #ifdef CONFIG_MMU
40 		/* permit private copies of the data to be taken */
41 		BDI_CAP_MAP_COPY |
42 #endif
43 		/* permit direct mmap, for read, write or exec */
44 		BDI_CAP_MAP_DIRECT |
45 		BDI_CAP_READ_MAP | BDI_CAP_WRITE_MAP | BDI_CAP_EXEC_MAP),
46 };
47 
48 static struct kobj_map *cdev_map;
49 
50 static DEFINE_MUTEX(chrdevs_lock);
51 
52 static struct char_device_struct {
53 	struct char_device_struct *next;
54 	unsigned int major;
55 	unsigned int baseminor;
56 	int minorct;
57 	char name[64];
58 	struct file_operations *fops;
59 	struct cdev *cdev;		/* will die */
60 } *chrdevs[CHRDEV_MAJOR_HASH_SIZE];
61 
62 /* index in the above */
63 static inline int major_to_index(int major)
64 {
65 	return major % CHRDEV_MAJOR_HASH_SIZE;
66 }
67 
68 #ifdef CONFIG_PROC_FS
69 
70 void chrdev_show(struct seq_file *f, off_t offset)
71 {
72 	struct char_device_struct *cd;
73 
74 	if (offset < CHRDEV_MAJOR_HASH_SIZE) {
75 		mutex_lock(&chrdevs_lock);
76 		for (cd = chrdevs[offset]; cd; cd = cd->next)
77 			seq_printf(f, "%3d %s\n", cd->major, cd->name);
78 		mutex_unlock(&chrdevs_lock);
79 	}
80 }
81 
82 #endif /* CONFIG_PROC_FS */
83 
84 /*
85  * Register a single major with a specified minor range.
86  *
87  * If major == 0 this functions will dynamically allocate a major and return
88  * its number.
89  *
90  * If major > 0 this function will attempt to reserve the passed range of
91  * minors and will return zero on success.
92  *
93  * Returns a -ve errno on failure.
94  */
95 static struct char_device_struct *
96 __register_chrdev_region(unsigned int major, unsigned int baseminor,
97 			   int minorct, const char *name)
98 {
99 	struct char_device_struct *cd, **cp;
100 	int ret = 0;
101 	int i;
102 
103 	cd = kzalloc(sizeof(struct char_device_struct), GFP_KERNEL);
104 	if (cd == NULL)
105 		return ERR_PTR(-ENOMEM);
106 
107 	mutex_lock(&chrdevs_lock);
108 
109 	/* temporary */
110 	if (major == 0) {
111 		for (i = ARRAY_SIZE(chrdevs)-1; i > 0; i--) {
112 			if (is_lanana_major(i))
113 				continue;
114 			if (chrdevs[i] == NULL)
115 				break;
116 		}
117 
118 		if (i == 0) {
119 			ret = -EBUSY;
120 			goto out;
121 		}
122 		major = i;
123 		ret = major;
124 	}
125 
126 	cd->major = major;
127 	cd->baseminor = baseminor;
128 	cd->minorct = minorct;
129 	strncpy(cd->name,name, 64);
130 
131 	i = major_to_index(major);
132 
133 	for (cp = &chrdevs[i]; *cp; cp = &(*cp)->next)
134 		if ((*cp)->major > major ||
135 		    ((*cp)->major == major &&
136 		     (((*cp)->baseminor >= baseminor) ||
137 		      ((*cp)->baseminor + (*cp)->minorct > baseminor))))
138 			break;
139 
140 	/* Check for overlapping minor ranges.  */
141 	if (*cp && (*cp)->major == major) {
142 		int old_min = (*cp)->baseminor;
143 		int old_max = (*cp)->baseminor + (*cp)->minorct - 1;
144 		int new_min = baseminor;
145 		int new_max = baseminor + minorct - 1;
146 
147 		/* New driver overlaps from the left.  */
148 		if (new_max >= old_min && new_max <= old_max) {
149 			ret = -EBUSY;
150 			goto out;
151 		}
152 
153 		/* New driver overlaps from the right.  */
154 		if (new_min <= old_max && new_min >= old_min) {
155 			ret = -EBUSY;
156 			goto out;
157 		}
158 	}
159 
160 	cd->next = *cp;
161 	*cp = cd;
162 	mutex_unlock(&chrdevs_lock);
163 	return cd;
164 out:
165 	mutex_unlock(&chrdevs_lock);
166 	kfree(cd);
167 	return ERR_PTR(ret);
168 }
169 
170 static struct char_device_struct *
171 __unregister_chrdev_region(unsigned major, unsigned baseminor, int minorct)
172 {
173 	struct char_device_struct *cd = NULL, **cp;
174 	int i = major_to_index(major);
175 
176 	mutex_lock(&chrdevs_lock);
177 	for (cp = &chrdevs[i]; *cp; cp = &(*cp)->next)
178 		if ((*cp)->major == major &&
179 		    (*cp)->baseminor == baseminor &&
180 		    (*cp)->minorct == minorct)
181 			break;
182 	if (*cp) {
183 		cd = *cp;
184 		*cp = cd->next;
185 	}
186 	mutex_unlock(&chrdevs_lock);
187 	return cd;
188 }
189 
190 /**
191  * register_chrdev_region() - register a range of device numbers
192  * @from: the first in the desired range of device numbers; must include
193  *        the major number.
194  * @count: the number of consecutive device numbers required
195  * @name: the name of the device or driver.
196  *
197  * Return value is zero on success, a negative error code on failure.
198  */
199 int register_chrdev_region(dev_t from, unsigned count, const char *name)
200 {
201 	struct char_device_struct *cd;
202 	dev_t to = from + count;
203 	dev_t n, next;
204 
205 	for (n = from; n < to; n = next) {
206 		next = MKDEV(MAJOR(n)+1, 0);
207 		if (next > to)
208 			next = to;
209 		cd = __register_chrdev_region(MAJOR(n), MINOR(n),
210 			       next - n, name);
211 		if (IS_ERR(cd))
212 			goto fail;
213 	}
214 	return 0;
215 fail:
216 	to = n;
217 	for (n = from; n < to; n = next) {
218 		next = MKDEV(MAJOR(n)+1, 0);
219 		kfree(__unregister_chrdev_region(MAJOR(n), MINOR(n), next - n));
220 	}
221 	return PTR_ERR(cd);
222 }
223 
224 /**
225  * alloc_chrdev_region() - register a range of char device numbers
226  * @dev: output parameter for first assigned number
227  * @baseminor: first of the requested range of minor numbers
228  * @count: the number of minor numbers required
229  * @name: the name of the associated device or driver
230  *
231  * Allocates a range of char device numbers.  The major number will be
232  * chosen dynamically, and returned (along with the first minor number)
233  * in @dev.  Returns zero or a negative error code.
234  */
235 int alloc_chrdev_region(dev_t *dev, unsigned baseminor, unsigned count,
236 			const char *name)
237 {
238 	struct char_device_struct *cd;
239 	cd = __register_chrdev_region(0, baseminor, count, name);
240 	if (IS_ERR(cd))
241 		return PTR_ERR(cd);
242 	*dev = MKDEV(cd->major, cd->baseminor);
243 	return 0;
244 }
245 
246 /**
247  * register_chrdev() - Register a major number for character devices.
248  * @major: major device number or 0 for dynamic allocation
249  * @name: name of this range of devices
250  * @fops: file operations associated with this devices
251  *
252  * If @major == 0 this functions will dynamically allocate a major and return
253  * its number.
254  *
255  * If @major > 0 this function will attempt to reserve a device with the given
256  * major number and will return zero on success.
257  *
258  * Returns a -ve errno on failure.
259  *
260  * The name of this device has nothing to do with the name of the device in
261  * /dev. It only helps to keep track of the different owners of devices. If
262  * your module name has only one type of devices it's ok to use e.g. the name
263  * of the module here.
264  *
265  * This function registers a range of 256 minor numbers. The first minor number
266  * is 0.
267  */
268 int register_chrdev(unsigned int major, const char *name,
269 		    const struct file_operations *fops)
270 {
271 	struct char_device_struct *cd;
272 	struct cdev *cdev;
273 	char *s;
274 	int err = -ENOMEM;
275 
276 	cd = __register_chrdev_region(major, 0, 256, name);
277 	if (IS_ERR(cd))
278 		return PTR_ERR(cd);
279 
280 	cdev = cdev_alloc();
281 	if (!cdev)
282 		goto out2;
283 
284 	cdev->owner = fops->owner;
285 	cdev->ops = fops;
286 	kobject_set_name(&cdev->kobj, "%s", name);
287 	for (s = strchr(kobject_name(&cdev->kobj),'/'); s; s = strchr(s, '/'))
288 		*s = '!';
289 
290 	err = cdev_add(cdev, MKDEV(cd->major, 0), 256);
291 	if (err)
292 		goto out;
293 
294 	cd->cdev = cdev;
295 
296 	return major ? 0 : cd->major;
297 out:
298 	kobject_put(&cdev->kobj);
299 out2:
300 	kfree(__unregister_chrdev_region(cd->major, 0, 256));
301 	return err;
302 }
303 
304 /**
305  * unregister_chrdev_region() - return a range of device numbers
306  * @from: the first in the range of numbers to unregister
307  * @count: the number of device numbers to unregister
308  *
309  * This function will unregister a range of @count device numbers,
310  * starting with @from.  The caller should normally be the one who
311  * allocated those numbers in the first place...
312  */
313 void unregister_chrdev_region(dev_t from, unsigned count)
314 {
315 	dev_t to = from + count;
316 	dev_t n, next;
317 
318 	for (n = from; n < to; n = next) {
319 		next = MKDEV(MAJOR(n)+1, 0);
320 		if (next > to)
321 			next = to;
322 		kfree(__unregister_chrdev_region(MAJOR(n), MINOR(n), next - n));
323 	}
324 }
325 
326 int unregister_chrdev(unsigned int major, const char *name)
327 {
328 	struct char_device_struct *cd;
329 	cd = __unregister_chrdev_region(major, 0, 256);
330 	if (cd && cd->cdev)
331 		cdev_del(cd->cdev);
332 	kfree(cd);
333 	return 0;
334 }
335 
336 static DEFINE_SPINLOCK(cdev_lock);
337 
338 static struct kobject *cdev_get(struct cdev *p)
339 {
340 	struct module *owner = p->owner;
341 	struct kobject *kobj;
342 
343 	if (owner && !try_module_get(owner))
344 		return NULL;
345 	kobj = kobject_get(&p->kobj);
346 	if (!kobj)
347 		module_put(owner);
348 	return kobj;
349 }
350 
351 void cdev_put(struct cdev *p)
352 {
353 	if (p) {
354 		struct module *owner = p->owner;
355 		kobject_put(&p->kobj);
356 		module_put(owner);
357 	}
358 }
359 
360 /*
361  * Called every time a character special file is opened
362  */
363 int chrdev_open(struct inode * inode, struct file * filp)
364 {
365 	struct cdev *p;
366 	struct cdev *new = NULL;
367 	int ret = 0;
368 
369 	spin_lock(&cdev_lock);
370 	p = inode->i_cdev;
371 	if (!p) {
372 		struct kobject *kobj;
373 		int idx;
374 		spin_unlock(&cdev_lock);
375 		kobj = kobj_lookup(cdev_map, inode->i_rdev, &idx);
376 		if (!kobj)
377 			return -ENXIO;
378 		new = container_of(kobj, struct cdev, kobj);
379 		spin_lock(&cdev_lock);
380 		p = inode->i_cdev;
381 		if (!p) {
382 			inode->i_cdev = p = new;
383 			inode->i_cindex = idx;
384 			list_add(&inode->i_devices, &p->list);
385 			new = NULL;
386 		} else if (!cdev_get(p))
387 			ret = -ENXIO;
388 	} else if (!cdev_get(p))
389 		ret = -ENXIO;
390 	spin_unlock(&cdev_lock);
391 	cdev_put(new);
392 	if (ret)
393 		return ret;
394 	filp->f_op = fops_get(p->ops);
395 	if (!filp->f_op) {
396 		cdev_put(p);
397 		return -ENXIO;
398 	}
399 	if (filp->f_op->open) {
400 		lock_kernel();
401 		ret = filp->f_op->open(inode,filp);
402 		unlock_kernel();
403 	}
404 	if (ret)
405 		cdev_put(p);
406 	return ret;
407 }
408 
409 void cd_forget(struct inode *inode)
410 {
411 	spin_lock(&cdev_lock);
412 	list_del_init(&inode->i_devices);
413 	inode->i_cdev = NULL;
414 	spin_unlock(&cdev_lock);
415 }
416 
417 static void cdev_purge(struct cdev *cdev)
418 {
419 	spin_lock(&cdev_lock);
420 	while (!list_empty(&cdev->list)) {
421 		struct inode *inode;
422 		inode = container_of(cdev->list.next, struct inode, i_devices);
423 		list_del_init(&inode->i_devices);
424 		inode->i_cdev = NULL;
425 	}
426 	spin_unlock(&cdev_lock);
427 }
428 
429 /*
430  * Dummy default file-operations: the only thing this does
431  * is contain the open that then fills in the correct operations
432  * depending on the special file...
433  */
434 const struct file_operations def_chr_fops = {
435 	.open = chrdev_open,
436 };
437 
438 static struct kobject *exact_match(dev_t dev, int *part, void *data)
439 {
440 	struct cdev *p = data;
441 	return &p->kobj;
442 }
443 
444 static int exact_lock(dev_t dev, void *data)
445 {
446 	struct cdev *p = data;
447 	return cdev_get(p) ? 0 : -1;
448 }
449 
450 /**
451  * cdev_add() - add a char device to the system
452  * @p: the cdev structure for the device
453  * @dev: the first device number for which this device is responsible
454  * @count: the number of consecutive minor numbers corresponding to this
455  *         device
456  *
457  * cdev_add() adds the device represented by @p to the system, making it
458  * live immediately.  A negative error code is returned on failure.
459  */
460 int cdev_add(struct cdev *p, dev_t dev, unsigned count)
461 {
462 	p->dev = dev;
463 	p->count = count;
464 	return kobj_map(cdev_map, dev, count, NULL, exact_match, exact_lock, p);
465 }
466 
467 static void cdev_unmap(dev_t dev, unsigned count)
468 {
469 	kobj_unmap(cdev_map, dev, count);
470 }
471 
472 /**
473  * cdev_del() - remove a cdev from the system
474  * @p: the cdev structure to be removed
475  *
476  * cdev_del() removes @p from the system, possibly freeing the structure
477  * itself.
478  */
479 void cdev_del(struct cdev *p)
480 {
481 	cdev_unmap(p->dev, p->count);
482 	kobject_put(&p->kobj);
483 }
484 
485 
486 static void cdev_default_release(struct kobject *kobj)
487 {
488 	struct cdev *p = container_of(kobj, struct cdev, kobj);
489 	cdev_purge(p);
490 }
491 
492 static void cdev_dynamic_release(struct kobject *kobj)
493 {
494 	struct cdev *p = container_of(kobj, struct cdev, kobj);
495 	cdev_purge(p);
496 	kfree(p);
497 }
498 
499 static struct kobj_type ktype_cdev_default = {
500 	.release	= cdev_default_release,
501 };
502 
503 static struct kobj_type ktype_cdev_dynamic = {
504 	.release	= cdev_dynamic_release,
505 };
506 
507 /**
508  * cdev_alloc() - allocate a cdev structure
509  *
510  * Allocates and returns a cdev structure, or NULL on failure.
511  */
512 struct cdev *cdev_alloc(void)
513 {
514 	struct cdev *p = kzalloc(sizeof(struct cdev), GFP_KERNEL);
515 	if (p) {
516 		p->kobj.ktype = &ktype_cdev_dynamic;
517 		INIT_LIST_HEAD(&p->list);
518 		kobject_init(&p->kobj);
519 	}
520 	return p;
521 }
522 
523 /**
524  * cdev_init() - initialize a cdev structure
525  * @cdev: the structure to initialize
526  * @fops: the file_operations for this device
527  *
528  * Initializes @cdev, remembering @fops, making it ready to add to the
529  * system with cdev_add().
530  */
531 void cdev_init(struct cdev *cdev, const struct file_operations *fops)
532 {
533 	memset(cdev, 0, sizeof *cdev);
534 	INIT_LIST_HEAD(&cdev->list);
535 	cdev->kobj.ktype = &ktype_cdev_default;
536 	kobject_init(&cdev->kobj);
537 	cdev->ops = fops;
538 }
539 
540 static struct kobject *base_probe(dev_t dev, int *part, void *data)
541 {
542 	if (request_module("char-major-%d-%d", MAJOR(dev), MINOR(dev)) > 0)
543 		/* Make old-style 2.4 aliases work */
544 		request_module("char-major-%d", MAJOR(dev));
545 	return NULL;
546 }
547 
548 void __init chrdev_init(void)
549 {
550 	cdev_map = kobj_map_init(base_probe, &chrdevs_lock);
551 }
552 
553 
554 /* Let modules do char dev stuff */
555 EXPORT_SYMBOL(register_chrdev_region);
556 EXPORT_SYMBOL(unregister_chrdev_region);
557 EXPORT_SYMBOL(alloc_chrdev_region);
558 EXPORT_SYMBOL(cdev_init);
559 EXPORT_SYMBOL(cdev_alloc);
560 EXPORT_SYMBOL(cdev_del);
561 EXPORT_SYMBOL(cdev_add);
562 EXPORT_SYMBOL(register_chrdev);
563 EXPORT_SYMBOL(unregister_chrdev);
564 EXPORT_SYMBOL(directly_mappable_cdev_bdi);
565