xref: /linux/drivers/base/devtmpfs.c (revision 804382d59b81b331735d37a18149ea0d36d5936a)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * devtmpfs - kernel-maintained tmpfs-based /dev
4  *
5  * Copyright (C) 2009, Kay Sievers <kay.sievers@vrfy.org>
6  *
7  * During bootup, before any driver core device is registered,
8  * devtmpfs, a tmpfs-based filesystem is created. Every driver-core
9  * device which requests a device node, will add a node in this
10  * filesystem.
11  * By default, all devices are named after the name of the device,
12  * owned by root and have a default mode of 0600. Subsystems can
13  * overwrite the default setting if needed.
14  */
15 
16 #define pr_fmt(fmt) "devtmpfs: " fmt
17 
18 #include <linux/kernel.h>
19 #include <linux/syscalls.h>
20 #include <linux/mount.h>
21 #include <linux/device.h>
22 #include <linux/blkdev.h>
23 #include <linux/namei.h>
24 #include <linux/fs.h>
25 #include <linux/shmem_fs.h>
26 #include <linux/ramfs.h>
27 #include <linux/sched.h>
28 #include <linux/slab.h>
29 #include <linux/kthread.h>
30 #include <linux/init_syscalls.h>
31 #include <uapi/linux/mount.h>
32 #include "base.h"
33 
34 #ifdef CONFIG_DEVTMPFS_SAFE
35 #define DEVTMPFS_MFLAGS       (MS_SILENT | MS_NOEXEC | MS_NOSUID)
36 #else
37 #define DEVTMPFS_MFLAGS       (MS_SILENT)
38 #endif
39 
40 static struct task_struct *thread;
41 
42 static int __initdata mount_dev = IS_ENABLED(CONFIG_DEVTMPFS_MOUNT);
43 
44 static DEFINE_SPINLOCK(req_lock);
45 
46 static struct req {
47 	struct req *next;
48 	struct completion done;
49 	int err;
50 	const char *name;
51 	umode_t mode;	/* 0 => delete */
52 	kuid_t uid;
53 	kgid_t gid;
54 	struct device *dev;
55 } *requests;
56 
57 static int __init mount_param(char *str)
58 {
59 	mount_dev = simple_strtoul(str, NULL, 0);
60 	return 1;
61 }
62 __setup("devtmpfs.mount=", mount_param);
63 
64 static struct vfsmount *mnt;
65 
66 static struct file_system_type internal_fs_type = {
67 	.name = "devtmpfs",
68 #ifdef CONFIG_TMPFS
69 	.init_fs_context = shmem_init_fs_context,
70 #else
71 	.init_fs_context = ramfs_init_fs_context,
72 #endif
73 	.kill_sb = kill_litter_super,
74 };
75 
76 /* Simply take a ref on the existing mount */
77 static int devtmpfs_get_tree(struct fs_context *fc)
78 {
79 	struct super_block *sb = mnt->mnt_sb;
80 
81 	atomic_inc(&sb->s_active);
82 	down_write(&sb->s_umount);
83 	fc->root = dget(sb->s_root);
84 	return 0;
85 }
86 
87 /* Ops are filled in during init depending on underlying shmem or ramfs type */
88 struct fs_context_operations devtmpfs_context_ops = {};
89 
90 /* Call the underlying initialization and set to our ops */
91 static int devtmpfs_init_fs_context(struct fs_context *fc)
92 {
93 	int ret;
94 #ifdef CONFIG_TMPFS
95 	ret = shmem_init_fs_context(fc);
96 #else
97 	ret = ramfs_init_fs_context(fc);
98 #endif
99 	if (ret < 0)
100 		return ret;
101 
102 	fc->ops = &devtmpfs_context_ops;
103 
104 	return 0;
105 }
106 
107 static struct file_system_type dev_fs_type = {
108 	.name = "devtmpfs",
109 	.init_fs_context = devtmpfs_init_fs_context,
110 };
111 
112 static int devtmpfs_submit_req(struct req *req, const char *tmp)
113 {
114 	init_completion(&req->done);
115 
116 	spin_lock(&req_lock);
117 	req->next = requests;
118 	requests = req;
119 	spin_unlock(&req_lock);
120 
121 	wake_up_process(thread);
122 	wait_for_completion(&req->done);
123 
124 	kfree(tmp);
125 
126 	return req->err;
127 }
128 
129 int devtmpfs_create_node(struct device *dev)
130 {
131 	const char *tmp = NULL;
132 	struct req req;
133 
134 	if (!thread)
135 		return 0;
136 
137 	req.mode = 0;
138 	req.uid = GLOBAL_ROOT_UID;
139 	req.gid = GLOBAL_ROOT_GID;
140 	req.name = device_get_devnode(dev, &req.mode, &req.uid, &req.gid, &tmp);
141 	if (!req.name)
142 		return -ENOMEM;
143 
144 	if (req.mode == 0)
145 		req.mode = 0600;
146 	if (is_blockdev(dev))
147 		req.mode |= S_IFBLK;
148 	else
149 		req.mode |= S_IFCHR;
150 
151 	req.dev = dev;
152 
153 	return devtmpfs_submit_req(&req, tmp);
154 }
155 
156 int devtmpfs_delete_node(struct device *dev)
157 {
158 	const char *tmp = NULL;
159 	struct req req;
160 
161 	if (!thread)
162 		return 0;
163 
164 	req.name = device_get_devnode(dev, NULL, NULL, NULL, &tmp);
165 	if (!req.name)
166 		return -ENOMEM;
167 
168 	req.mode = 0;
169 	req.dev = dev;
170 
171 	return devtmpfs_submit_req(&req, tmp);
172 }
173 
174 static int dev_mkdir(const char *name, umode_t mode)
175 {
176 	struct dentry *dentry;
177 	struct path path;
178 	int err;
179 
180 	dentry = kern_path_create(AT_FDCWD, name, &path, LOOKUP_DIRECTORY);
181 	if (IS_ERR(dentry))
182 		return PTR_ERR(dentry);
183 
184 	err = vfs_mkdir(&nop_mnt_idmap, d_inode(path.dentry), dentry, mode);
185 	if (!err)
186 		/* mark as kernel-created inode */
187 		d_inode(dentry)->i_private = &thread;
188 	done_path_create(&path, dentry);
189 	return err;
190 }
191 
192 static int create_path(const char *nodepath)
193 {
194 	char *path;
195 	char *s;
196 	int err = 0;
197 
198 	/* parent directories do not exist, create them */
199 	path = kstrdup(nodepath, GFP_KERNEL);
200 	if (!path)
201 		return -ENOMEM;
202 
203 	s = path;
204 	for (;;) {
205 		s = strchr(s, '/');
206 		if (!s)
207 			break;
208 		s[0] = '\0';
209 		err = dev_mkdir(path, 0755);
210 		if (err && err != -EEXIST)
211 			break;
212 		s[0] = '/';
213 		s++;
214 	}
215 	kfree(path);
216 	return err;
217 }
218 
219 static int handle_create(const char *nodename, umode_t mode, kuid_t uid,
220 			 kgid_t gid, struct device *dev)
221 {
222 	struct dentry *dentry;
223 	struct path path;
224 	int err;
225 
226 	dentry = kern_path_create(AT_FDCWD, nodename, &path, 0);
227 	if (dentry == ERR_PTR(-ENOENT)) {
228 		create_path(nodename);
229 		dentry = kern_path_create(AT_FDCWD, nodename, &path, 0);
230 	}
231 	if (IS_ERR(dentry))
232 		return PTR_ERR(dentry);
233 
234 	err = vfs_mknod(&nop_mnt_idmap, d_inode(path.dentry), dentry, mode,
235 			dev->devt);
236 	if (!err) {
237 		struct iattr newattrs;
238 
239 		newattrs.ia_mode = mode;
240 		newattrs.ia_uid = uid;
241 		newattrs.ia_gid = gid;
242 		newattrs.ia_valid = ATTR_MODE|ATTR_UID|ATTR_GID;
243 		inode_lock(d_inode(dentry));
244 		notify_change(&nop_mnt_idmap, dentry, &newattrs, NULL);
245 		inode_unlock(d_inode(dentry));
246 
247 		/* mark as kernel-created inode */
248 		d_inode(dentry)->i_private = &thread;
249 	}
250 	done_path_create(&path, dentry);
251 	return err;
252 }
253 
254 static int dev_rmdir(const char *name)
255 {
256 	struct path parent;
257 	struct dentry *dentry;
258 	int err;
259 
260 	dentry = kern_path_locked(name, &parent);
261 	if (IS_ERR(dentry))
262 		return PTR_ERR(dentry);
263 	if (d_really_is_positive(dentry)) {
264 		if (d_inode(dentry)->i_private == &thread)
265 			err = vfs_rmdir(&nop_mnt_idmap, d_inode(parent.dentry),
266 					dentry);
267 		else
268 			err = -EPERM;
269 	} else {
270 		err = -ENOENT;
271 	}
272 	dput(dentry);
273 	inode_unlock(d_inode(parent.dentry));
274 	path_put(&parent);
275 	return err;
276 }
277 
278 static int delete_path(const char *nodepath)
279 {
280 	char *path;
281 	int err = 0;
282 
283 	path = kstrdup(nodepath, GFP_KERNEL);
284 	if (!path)
285 		return -ENOMEM;
286 
287 	for (;;) {
288 		char *base;
289 
290 		base = strrchr(path, '/');
291 		if (!base)
292 			break;
293 		base[0] = '\0';
294 		err = dev_rmdir(path);
295 		if (err)
296 			break;
297 	}
298 
299 	kfree(path);
300 	return err;
301 }
302 
303 static int dev_mynode(struct device *dev, struct inode *inode, struct kstat *stat)
304 {
305 	/* did we create it */
306 	if (inode->i_private != &thread)
307 		return 0;
308 
309 	/* does the dev_t match */
310 	if (is_blockdev(dev)) {
311 		if (!S_ISBLK(stat->mode))
312 			return 0;
313 	} else {
314 		if (!S_ISCHR(stat->mode))
315 			return 0;
316 	}
317 	if (stat->rdev != dev->devt)
318 		return 0;
319 
320 	/* ours */
321 	return 1;
322 }
323 
324 static int handle_remove(const char *nodename, struct device *dev)
325 {
326 	struct path parent;
327 	struct dentry *dentry;
328 	int deleted = 0;
329 	int err;
330 
331 	dentry = kern_path_locked(nodename, &parent);
332 	if (IS_ERR(dentry))
333 		return PTR_ERR(dentry);
334 
335 	if (d_really_is_positive(dentry)) {
336 		struct kstat stat;
337 		struct path p = {.mnt = parent.mnt, .dentry = dentry};
338 		err = vfs_getattr(&p, &stat, STATX_TYPE | STATX_MODE,
339 				  AT_STATX_SYNC_AS_STAT);
340 		if (!err && dev_mynode(dev, d_inode(dentry), &stat)) {
341 			struct iattr newattrs;
342 			/*
343 			 * before unlinking this node, reset permissions
344 			 * of possible references like hardlinks
345 			 */
346 			newattrs.ia_uid = GLOBAL_ROOT_UID;
347 			newattrs.ia_gid = GLOBAL_ROOT_GID;
348 			newattrs.ia_mode = stat.mode & ~0777;
349 			newattrs.ia_valid =
350 				ATTR_UID|ATTR_GID|ATTR_MODE;
351 			inode_lock(d_inode(dentry));
352 			notify_change(&nop_mnt_idmap, dentry, &newattrs, NULL);
353 			inode_unlock(d_inode(dentry));
354 			err = vfs_unlink(&nop_mnt_idmap, d_inode(parent.dentry),
355 					 dentry, NULL);
356 			if (!err || err == -ENOENT)
357 				deleted = 1;
358 		}
359 	} else {
360 		err = -ENOENT;
361 	}
362 	dput(dentry);
363 	inode_unlock(d_inode(parent.dentry));
364 
365 	path_put(&parent);
366 	if (deleted && strchr(nodename, '/'))
367 		delete_path(nodename);
368 	return err;
369 }
370 
371 /*
372  * If configured, or requested by the commandline, devtmpfs will be
373  * auto-mounted after the kernel mounted the root filesystem.
374  */
375 int __init devtmpfs_mount(void)
376 {
377 	int err;
378 
379 	if (!mount_dev)
380 		return 0;
381 
382 	if (!thread)
383 		return 0;
384 
385 	err = init_mount("devtmpfs", "dev", "devtmpfs", DEVTMPFS_MFLAGS, NULL);
386 	if (err)
387 		pr_info("error mounting %d\n", err);
388 	else
389 		pr_info("mounted\n");
390 	return err;
391 }
392 
393 static __initdata DECLARE_COMPLETION(setup_done);
394 
395 static int handle(const char *name, umode_t mode, kuid_t uid, kgid_t gid,
396 		  struct device *dev)
397 {
398 	if (mode)
399 		return handle_create(name, mode, uid, gid, dev);
400 	else
401 		return handle_remove(name, dev);
402 }
403 
404 static void __noreturn devtmpfs_work_loop(void)
405 {
406 	while (1) {
407 		spin_lock(&req_lock);
408 		while (requests) {
409 			struct req *req = requests;
410 			requests = NULL;
411 			spin_unlock(&req_lock);
412 			while (req) {
413 				struct req *next = req->next;
414 				req->err = handle(req->name, req->mode,
415 						  req->uid, req->gid, req->dev);
416 				complete(&req->done);
417 				req = next;
418 			}
419 			spin_lock(&req_lock);
420 		}
421 		__set_current_state(TASK_INTERRUPTIBLE);
422 		spin_unlock(&req_lock);
423 		schedule();
424 	}
425 }
426 
427 static noinline int __init devtmpfs_setup(void *p)
428 {
429 	int err;
430 
431 	err = ksys_unshare(CLONE_NEWNS);
432 	if (err)
433 		goto out;
434 	err = init_mount("devtmpfs", "/", "devtmpfs", DEVTMPFS_MFLAGS, NULL);
435 	if (err)
436 		goto out;
437 	init_chdir("/.."); /* will traverse into overmounted root */
438 	init_chroot(".");
439 out:
440 	*(int *)p = err;
441 	return err;
442 }
443 
444 /*
445  * The __ref is because devtmpfs_setup needs to be __init for the routines it
446  * calls.  That call is done while devtmpfs_init, which is marked __init,
447  * synchronously waits for it to complete.
448  */
449 static int __ref devtmpfsd(void *p)
450 {
451 	int err = devtmpfs_setup(p);
452 
453 	complete(&setup_done);
454 	if (err)
455 		return err;
456 	devtmpfs_work_loop();
457 	return 0;
458 }
459 
460 /*
461  * Get the underlying (shmem/ramfs) context ops to build ours
462  */
463 static int devtmpfs_configure_context(void)
464 {
465 	struct fs_context *fc;
466 
467 	fc = fs_context_for_reconfigure(mnt->mnt_root, mnt->mnt_sb->s_flags,
468 					MS_RMT_MASK);
469 	if (IS_ERR(fc))
470 		return PTR_ERR(fc);
471 
472 	/* Set up devtmpfs_context_ops based on underlying type */
473 	devtmpfs_context_ops.free	      = fc->ops->free;
474 	devtmpfs_context_ops.dup	      = fc->ops->dup;
475 	devtmpfs_context_ops.parse_param      = fc->ops->parse_param;
476 	devtmpfs_context_ops.parse_monolithic = fc->ops->parse_monolithic;
477 	devtmpfs_context_ops.get_tree	      = &devtmpfs_get_tree;
478 	devtmpfs_context_ops.reconfigure      = fc->ops->reconfigure;
479 
480 	put_fs_context(fc);
481 
482 	return 0;
483 }
484 
485 /*
486  * Create devtmpfs instance, driver-core devices will add their device
487  * nodes here.
488  */
489 int __init devtmpfs_init(void)
490 {
491 	char opts[] = "mode=0755";
492 	int err;
493 
494 	mnt = vfs_kern_mount(&internal_fs_type, 0, "devtmpfs", opts);
495 	if (IS_ERR(mnt)) {
496 		pr_err("unable to create devtmpfs %ld\n", PTR_ERR(mnt));
497 		return PTR_ERR(mnt);
498 	}
499 
500 	err = devtmpfs_configure_context();
501 	if (err) {
502 		pr_err("unable to configure devtmpfs type %d\n", err);
503 		return err;
504 	}
505 
506 	err = register_filesystem(&dev_fs_type);
507 	if (err) {
508 		pr_err("unable to register devtmpfs type %d\n", err);
509 		return err;
510 	}
511 
512 	thread = kthread_run(devtmpfsd, &err, "kdevtmpfs");
513 	if (!IS_ERR(thread)) {
514 		wait_for_completion(&setup_done);
515 	} else {
516 		err = PTR_ERR(thread);
517 		thread = NULL;
518 	}
519 
520 	if (err) {
521 		pr_err("unable to create devtmpfs %d\n", err);
522 		unregister_filesystem(&dev_fs_type);
523 		thread = NULL;
524 		return err;
525 	}
526 
527 	pr_info("initialized\n");
528 	return 0;
529 }
530