xref: /linux/fs/char_dev.c (revision 5e8d780d745c1619aba81fe7166c5a4b5cad2b84)
1 /*
2  *  linux/fs/char_dev.c
3  *
4  *  Copyright (C) 1991, 1992  Linus Torvalds
5  */
6 
7 #include <linux/config.h>
8 #include <linux/init.h>
9 #include <linux/fs.h>
10 #include <linux/slab.h>
11 #include <linux/string.h>
12 
13 #include <linux/major.h>
14 #include <linux/errno.h>
15 #include <linux/module.h>
16 #include <linux/smp_lock.h>
17 #include <linux/seq_file.h>
18 
19 #include <linux/kobject.h>
20 #include <linux/kobj_map.h>
21 #include <linux/cdev.h>
22 #include <linux/mutex.h>
23 
24 #ifdef CONFIG_KMOD
25 #include <linux/kmod.h>
26 #endif
27 
28 static struct kobj_map *cdev_map;
29 
30 static DEFINE_MUTEX(chrdevs_lock);
31 
32 static struct char_device_struct {
33 	struct char_device_struct *next;
34 	unsigned int major;
35 	unsigned int baseminor;
36 	int minorct;
37 	char name[64];
38 	struct file_operations *fops;
39 	struct cdev *cdev;		/* will die */
40 } *chrdevs[CHRDEV_MAJOR_HASH_SIZE];
41 
42 /* index in the above */
43 static inline int major_to_index(int major)
44 {
45 	return major % CHRDEV_MAJOR_HASH_SIZE;
46 }
47 
48 #ifdef CONFIG_PROC_FS
49 
50 void chrdev_show(struct seq_file *f, off_t offset)
51 {
52 	struct char_device_struct *cd;
53 
54 	if (offset < CHRDEV_MAJOR_HASH_SIZE) {
55 		mutex_lock(&chrdevs_lock);
56 		for (cd = chrdevs[offset]; cd; cd = cd->next)
57 			seq_printf(f, "%3d %s\n", cd->major, cd->name);
58 		mutex_unlock(&chrdevs_lock);
59 	}
60 }
61 
62 #endif /* CONFIG_PROC_FS */
63 
64 /*
65  * Register a single major with a specified minor range.
66  *
67  * If major == 0 this functions will dynamically allocate a major and return
68  * its number.
69  *
70  * If major > 0 this function will attempt to reserve the passed range of
71  * minors and will return zero on success.
72  *
73  * Returns a -ve errno on failure.
74  */
75 static struct char_device_struct *
76 __register_chrdev_region(unsigned int major, unsigned int baseminor,
77 			   int minorct, const char *name)
78 {
79 	struct char_device_struct *cd, **cp;
80 	int ret = 0;
81 	int i;
82 
83 	cd = kzalloc(sizeof(struct char_device_struct), GFP_KERNEL);
84 	if (cd == NULL)
85 		return ERR_PTR(-ENOMEM);
86 
87 	mutex_lock(&chrdevs_lock);
88 
89 	/* temporary */
90 	if (major == 0) {
91 		for (i = ARRAY_SIZE(chrdevs)-1; i > 0; i--) {
92 			if (chrdevs[i] == NULL)
93 				break;
94 		}
95 
96 		if (i == 0) {
97 			ret = -EBUSY;
98 			goto out;
99 		}
100 		major = i;
101 		ret = major;
102 	}
103 
104 	cd->major = major;
105 	cd->baseminor = baseminor;
106 	cd->minorct = minorct;
107 	strncpy(cd->name,name, 64);
108 
109 	i = major_to_index(major);
110 
111 	for (cp = &chrdevs[i]; *cp; cp = &(*cp)->next)
112 		if ((*cp)->major > major ||
113 		    ((*cp)->major == major && (*cp)->baseminor >= baseminor))
114 			break;
115 	if (*cp && (*cp)->major == major &&
116 	    (*cp)->baseminor < baseminor + minorct) {
117 		ret = -EBUSY;
118 		goto out;
119 	}
120 	cd->next = *cp;
121 	*cp = cd;
122 	mutex_unlock(&chrdevs_lock);
123 	return cd;
124 out:
125 	mutex_unlock(&chrdevs_lock);
126 	kfree(cd);
127 	return ERR_PTR(ret);
128 }
129 
130 static struct char_device_struct *
131 __unregister_chrdev_region(unsigned major, unsigned baseminor, int minorct)
132 {
133 	struct char_device_struct *cd = NULL, **cp;
134 	int i = major_to_index(major);
135 
136 	mutex_lock(&chrdevs_lock);
137 	for (cp = &chrdevs[i]; *cp; cp = &(*cp)->next)
138 		if ((*cp)->major == major &&
139 		    (*cp)->baseminor == baseminor &&
140 		    (*cp)->minorct == minorct)
141 			break;
142 	if (*cp) {
143 		cd = *cp;
144 		*cp = cd->next;
145 	}
146 	mutex_unlock(&chrdevs_lock);
147 	return cd;
148 }
149 
150 int register_chrdev_region(dev_t from, unsigned count, const char *name)
151 {
152 	struct char_device_struct *cd;
153 	dev_t to = from + count;
154 	dev_t n, next;
155 
156 	for (n = from; n < to; n = next) {
157 		next = MKDEV(MAJOR(n)+1, 0);
158 		if (next > to)
159 			next = to;
160 		cd = __register_chrdev_region(MAJOR(n), MINOR(n),
161 			       next - n, name);
162 		if (IS_ERR(cd))
163 			goto fail;
164 	}
165 	return 0;
166 fail:
167 	to = n;
168 	for (n = from; n < to; n = next) {
169 		next = MKDEV(MAJOR(n)+1, 0);
170 		kfree(__unregister_chrdev_region(MAJOR(n), MINOR(n), next - n));
171 	}
172 	return PTR_ERR(cd);
173 }
174 
175 int alloc_chrdev_region(dev_t *dev, unsigned baseminor, unsigned count,
176 			const char *name)
177 {
178 	struct char_device_struct *cd;
179 	cd = __register_chrdev_region(0, baseminor, count, name);
180 	if (IS_ERR(cd))
181 		return PTR_ERR(cd);
182 	*dev = MKDEV(cd->major, cd->baseminor);
183 	return 0;
184 }
185 
186 int register_chrdev(unsigned int major, const char *name,
187 		    const struct file_operations *fops)
188 {
189 	struct char_device_struct *cd;
190 	struct cdev *cdev;
191 	char *s;
192 	int err = -ENOMEM;
193 
194 	cd = __register_chrdev_region(major, 0, 256, name);
195 	if (IS_ERR(cd))
196 		return PTR_ERR(cd);
197 
198 	cdev = cdev_alloc();
199 	if (!cdev)
200 		goto out2;
201 
202 	cdev->owner = fops->owner;
203 	cdev->ops = fops;
204 	kobject_set_name(&cdev->kobj, "%s", name);
205 	for (s = strchr(kobject_name(&cdev->kobj),'/'); s; s = strchr(s, '/'))
206 		*s = '!';
207 
208 	err = cdev_add(cdev, MKDEV(cd->major, 0), 256);
209 	if (err)
210 		goto out;
211 
212 	cd->cdev = cdev;
213 
214 	return major ? 0 : cd->major;
215 out:
216 	kobject_put(&cdev->kobj);
217 out2:
218 	kfree(__unregister_chrdev_region(cd->major, 0, 256));
219 	return err;
220 }
221 
222 void unregister_chrdev_region(dev_t from, unsigned count)
223 {
224 	dev_t to = from + count;
225 	dev_t n, next;
226 
227 	for (n = from; n < to; n = next) {
228 		next = MKDEV(MAJOR(n)+1, 0);
229 		if (next > to)
230 			next = to;
231 		kfree(__unregister_chrdev_region(MAJOR(n), MINOR(n), next - n));
232 	}
233 }
234 
235 int unregister_chrdev(unsigned int major, const char *name)
236 {
237 	struct char_device_struct *cd;
238 	cd = __unregister_chrdev_region(major, 0, 256);
239 	if (cd && cd->cdev)
240 		cdev_del(cd->cdev);
241 	kfree(cd);
242 	return 0;
243 }
244 
245 static DEFINE_SPINLOCK(cdev_lock);
246 
247 static struct kobject *cdev_get(struct cdev *p)
248 {
249 	struct module *owner = p->owner;
250 	struct kobject *kobj;
251 
252 	if (owner && !try_module_get(owner))
253 		return NULL;
254 	kobj = kobject_get(&p->kobj);
255 	if (!kobj)
256 		module_put(owner);
257 	return kobj;
258 }
259 
260 void cdev_put(struct cdev *p)
261 {
262 	if (p) {
263 		struct module *owner = p->owner;
264 		kobject_put(&p->kobj);
265 		module_put(owner);
266 	}
267 }
268 
269 /*
270  * Called every time a character special file is opened
271  */
272 int chrdev_open(struct inode * inode, struct file * filp)
273 {
274 	struct cdev *p;
275 	struct cdev *new = NULL;
276 	int ret = 0;
277 
278 	spin_lock(&cdev_lock);
279 	p = inode->i_cdev;
280 	if (!p) {
281 		struct kobject *kobj;
282 		int idx;
283 		spin_unlock(&cdev_lock);
284 		kobj = kobj_lookup(cdev_map, inode->i_rdev, &idx);
285 		if (!kobj)
286 			return -ENXIO;
287 		new = container_of(kobj, struct cdev, kobj);
288 		spin_lock(&cdev_lock);
289 		p = inode->i_cdev;
290 		if (!p) {
291 			inode->i_cdev = p = new;
292 			inode->i_cindex = idx;
293 			list_add(&inode->i_devices, &p->list);
294 			new = NULL;
295 		} else if (!cdev_get(p))
296 			ret = -ENXIO;
297 	} else if (!cdev_get(p))
298 		ret = -ENXIO;
299 	spin_unlock(&cdev_lock);
300 	cdev_put(new);
301 	if (ret)
302 		return ret;
303 	filp->f_op = fops_get(p->ops);
304 	if (!filp->f_op) {
305 		cdev_put(p);
306 		return -ENXIO;
307 	}
308 	if (filp->f_op->open) {
309 		lock_kernel();
310 		ret = filp->f_op->open(inode,filp);
311 		unlock_kernel();
312 	}
313 	if (ret)
314 		cdev_put(p);
315 	return ret;
316 }
317 
318 void cd_forget(struct inode *inode)
319 {
320 	spin_lock(&cdev_lock);
321 	list_del_init(&inode->i_devices);
322 	inode->i_cdev = NULL;
323 	spin_unlock(&cdev_lock);
324 }
325 
326 static void cdev_purge(struct cdev *cdev)
327 {
328 	spin_lock(&cdev_lock);
329 	while (!list_empty(&cdev->list)) {
330 		struct inode *inode;
331 		inode = container_of(cdev->list.next, struct inode, i_devices);
332 		list_del_init(&inode->i_devices);
333 		inode->i_cdev = NULL;
334 	}
335 	spin_unlock(&cdev_lock);
336 }
337 
338 /*
339  * Dummy default file-operations: the only thing this does
340  * is contain the open that then fills in the correct operations
341  * depending on the special file...
342  */
343 const struct file_operations def_chr_fops = {
344 	.open = chrdev_open,
345 };
346 
347 static struct kobject *exact_match(dev_t dev, int *part, void *data)
348 {
349 	struct cdev *p = data;
350 	return &p->kobj;
351 }
352 
353 static int exact_lock(dev_t dev, void *data)
354 {
355 	struct cdev *p = data;
356 	return cdev_get(p) ? 0 : -1;
357 }
358 
359 int cdev_add(struct cdev *p, dev_t dev, unsigned count)
360 {
361 	p->dev = dev;
362 	p->count = count;
363 	return kobj_map(cdev_map, dev, count, NULL, exact_match, exact_lock, p);
364 }
365 
366 static void cdev_unmap(dev_t dev, unsigned count)
367 {
368 	kobj_unmap(cdev_map, dev, count);
369 }
370 
371 void cdev_del(struct cdev *p)
372 {
373 	cdev_unmap(p->dev, p->count);
374 	kobject_put(&p->kobj);
375 }
376 
377 
378 static void cdev_default_release(struct kobject *kobj)
379 {
380 	struct cdev *p = container_of(kobj, struct cdev, kobj);
381 	cdev_purge(p);
382 }
383 
384 static void cdev_dynamic_release(struct kobject *kobj)
385 {
386 	struct cdev *p = container_of(kobj, struct cdev, kobj);
387 	cdev_purge(p);
388 	kfree(p);
389 }
390 
391 static struct kobj_type ktype_cdev_default = {
392 	.release	= cdev_default_release,
393 };
394 
395 static struct kobj_type ktype_cdev_dynamic = {
396 	.release	= cdev_dynamic_release,
397 };
398 
399 struct cdev *cdev_alloc(void)
400 {
401 	struct cdev *p = kzalloc(sizeof(struct cdev), GFP_KERNEL);
402 	if (p) {
403 		p->kobj.ktype = &ktype_cdev_dynamic;
404 		INIT_LIST_HEAD(&p->list);
405 		kobject_init(&p->kobj);
406 	}
407 	return p;
408 }
409 
410 void cdev_init(struct cdev *cdev, const struct file_operations *fops)
411 {
412 	memset(cdev, 0, sizeof *cdev);
413 	INIT_LIST_HEAD(&cdev->list);
414 	cdev->kobj.ktype = &ktype_cdev_default;
415 	kobject_init(&cdev->kobj);
416 	cdev->ops = fops;
417 }
418 
419 static struct kobject *base_probe(dev_t dev, int *part, void *data)
420 {
421 	if (request_module("char-major-%d-%d", MAJOR(dev), MINOR(dev)) > 0)
422 		/* Make old-style 2.4 aliases work */
423 		request_module("char-major-%d", MAJOR(dev));
424 	return NULL;
425 }
426 
427 void __init chrdev_init(void)
428 {
429 	cdev_map = kobj_map_init(base_probe, &chrdevs_lock);
430 }
431 
432 
433 /* Let modules do char dev stuff */
434 EXPORT_SYMBOL(register_chrdev_region);
435 EXPORT_SYMBOL(unregister_chrdev_region);
436 EXPORT_SYMBOL(alloc_chrdev_region);
437 EXPORT_SYMBOL(cdev_init);
438 EXPORT_SYMBOL(cdev_alloc);
439 EXPORT_SYMBOL(cdev_del);
440 EXPORT_SYMBOL(cdev_add);
441 EXPORT_SYMBOL(register_chrdev);
442 EXPORT_SYMBOL(unregister_chrdev);
443