1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * devtmpfs - kernel-maintained tmpfs-based /dev
4 *
5 * Copyright (C) 2009, Kay Sievers <kay.sievers@vrfy.org>
6 *
7 * During bootup, before any driver core device is registered,
8 * devtmpfs, a tmpfs-based filesystem is created. Every driver-core
9 * device which requests a device node, will add a node in this
10 * filesystem.
11 * By default, all devices are named after the name of the device,
12 * owned by root and have a default mode of 0600. Subsystems can
13 * overwrite the default setting if needed.
14 */
15
16 #define pr_fmt(fmt) "devtmpfs: " fmt
17
18 #include <linux/kernel.h>
19 #include <linux/syscalls.h>
20 #include <linux/mount.h>
21 #include <linux/device.h>
22 #include <linux/blkdev.h>
23 #include <linux/namei.h>
24 #include <linux/fs.h>
25 #include <linux/shmem_fs.h>
26 #include <linux/ramfs.h>
27 #include <linux/sched.h>
28 #include <linux/slab.h>
29 #include <linux/kthread.h>
30 #include <linux/init_syscalls.h>
31 #include <uapi/linux/mount.h>
32 #include "base.h"
33
34 #ifdef CONFIG_DEVTMPFS_SAFE
35 #define DEVTMPFS_MFLAGS (MS_SILENT | MS_NOEXEC | MS_NOSUID)
36 #else
37 #define DEVTMPFS_MFLAGS (MS_SILENT)
38 #endif
39
40 static struct task_struct *thread;
41
42 static int __initdata mount_dev = IS_ENABLED(CONFIG_DEVTMPFS_MOUNT);
43
44 static DEFINE_SPINLOCK(req_lock);
45
46 static struct req {
47 struct req *next;
48 struct completion done;
49 int err;
50 const char *name;
51 umode_t mode; /* 0 => delete */
52 kuid_t uid;
53 kgid_t gid;
54 struct device *dev;
55 } *requests;
56
mount_param(char * str)57 static int __init mount_param(char *str)
58 {
59 mount_dev = simple_strtoul(str, NULL, 0);
60 return 1;
61 }
62 __setup("devtmpfs.mount=", mount_param);
63
64 static struct vfsmount *mnt;
65
66 static struct file_system_type internal_fs_type = {
67 .name = "devtmpfs",
68 #ifdef CONFIG_TMPFS
69 .init_fs_context = shmem_init_fs_context,
70 #else
71 .init_fs_context = ramfs_init_fs_context,
72 #endif
73 .kill_sb = kill_litter_super,
74 };
75
76 /* Simply take a ref on the existing mount */
devtmpfs_get_tree(struct fs_context * fc)77 static int devtmpfs_get_tree(struct fs_context *fc)
78 {
79 struct super_block *sb = mnt->mnt_sb;
80
81 atomic_inc(&sb->s_active);
82 down_write(&sb->s_umount);
83 fc->root = dget(sb->s_root);
84 return 0;
85 }
86
87 /* Ops are filled in during init depending on underlying shmem or ramfs type */
88 struct fs_context_operations devtmpfs_context_ops = {};
89
90 /* Call the underlying initialization and set to our ops */
devtmpfs_init_fs_context(struct fs_context * fc)91 static int devtmpfs_init_fs_context(struct fs_context *fc)
92 {
93 int ret;
94 #ifdef CONFIG_TMPFS
95 ret = shmem_init_fs_context(fc);
96 #else
97 ret = ramfs_init_fs_context(fc);
98 #endif
99 if (ret < 0)
100 return ret;
101
102 fc->ops = &devtmpfs_context_ops;
103
104 return 0;
105 }
106
107 static struct file_system_type dev_fs_type = {
108 .name = "devtmpfs",
109 .init_fs_context = devtmpfs_init_fs_context,
110 };
111
devtmpfs_submit_req(struct req * req,const char * tmp)112 static int devtmpfs_submit_req(struct req *req, const char *tmp)
113 {
114 init_completion(&req->done);
115
116 spin_lock(&req_lock);
117 req->next = requests;
118 requests = req;
119 spin_unlock(&req_lock);
120
121 wake_up_process(thread);
122 wait_for_completion(&req->done);
123
124 kfree(tmp);
125
126 return req->err;
127 }
128
devtmpfs_create_node(struct device * dev)129 int devtmpfs_create_node(struct device *dev)
130 {
131 const char *tmp = NULL;
132 struct req req;
133
134 if (!thread)
135 return 0;
136
137 req.mode = 0;
138 req.uid = GLOBAL_ROOT_UID;
139 req.gid = GLOBAL_ROOT_GID;
140 req.name = device_get_devnode(dev, &req.mode, &req.uid, &req.gid, &tmp);
141 if (!req.name)
142 return -ENOMEM;
143
144 if (req.mode == 0)
145 req.mode = 0600;
146 if (is_blockdev(dev))
147 req.mode |= S_IFBLK;
148 else
149 req.mode |= S_IFCHR;
150
151 req.dev = dev;
152
153 return devtmpfs_submit_req(&req, tmp);
154 }
155
devtmpfs_delete_node(struct device * dev)156 int devtmpfs_delete_node(struct device *dev)
157 {
158 const char *tmp = NULL;
159 struct req req;
160
161 if (!thread)
162 return 0;
163
164 req.name = device_get_devnode(dev, NULL, NULL, NULL, &tmp);
165 if (!req.name)
166 return -ENOMEM;
167
168 req.mode = 0;
169 req.dev = dev;
170
171 return devtmpfs_submit_req(&req, tmp);
172 }
173
dev_mkdir(const char * name,umode_t mode)174 static int dev_mkdir(const char *name, umode_t mode)
175 {
176 struct dentry *dentry;
177 struct path path;
178
179 dentry = start_creating_path(AT_FDCWD, name, &path, LOOKUP_DIRECTORY);
180 if (IS_ERR(dentry))
181 return PTR_ERR(dentry);
182
183 dentry = vfs_mkdir(&nop_mnt_idmap, d_inode(path.dentry), dentry, mode);
184 if (!IS_ERR(dentry))
185 /* mark as kernel-created inode */
186 d_inode(dentry)->i_private = &thread;
187 end_creating_path(&path, dentry);
188 return PTR_ERR_OR_ZERO(dentry);
189 }
190
create_path(const char * nodepath)191 static int create_path(const char *nodepath)
192 {
193 char *path;
194 char *s;
195 int err = 0;
196
197 /* parent directories do not exist, create them */
198 path = kstrdup(nodepath, GFP_KERNEL);
199 if (!path)
200 return -ENOMEM;
201
202 s = path;
203 for (;;) {
204 s = strchr(s, '/');
205 if (!s)
206 break;
207 s[0] = '\0';
208 err = dev_mkdir(path, 0755);
209 if (err && err != -EEXIST)
210 break;
211 s[0] = '/';
212 s++;
213 }
214 kfree(path);
215 return err;
216 }
217
handle_create(const char * nodename,umode_t mode,kuid_t uid,kgid_t gid,struct device * dev)218 static int handle_create(const char *nodename, umode_t mode, kuid_t uid,
219 kgid_t gid, struct device *dev)
220 {
221 struct dentry *dentry;
222 struct path path;
223 int err;
224
225 dentry = start_creating_path(AT_FDCWD, nodename, &path, 0);
226 if (dentry == ERR_PTR(-ENOENT)) {
227 create_path(nodename);
228 dentry = start_creating_path(AT_FDCWD, nodename, &path, 0);
229 }
230 if (IS_ERR(dentry))
231 return PTR_ERR(dentry);
232
233 err = vfs_mknod(&nop_mnt_idmap, d_inode(path.dentry), dentry, mode,
234 dev->devt);
235 if (!err) {
236 struct iattr newattrs;
237
238 newattrs.ia_mode = mode;
239 newattrs.ia_uid = uid;
240 newattrs.ia_gid = gid;
241 newattrs.ia_valid = ATTR_MODE|ATTR_UID|ATTR_GID;
242 inode_lock(d_inode(dentry));
243 notify_change(&nop_mnt_idmap, dentry, &newattrs, NULL);
244 inode_unlock(d_inode(dentry));
245
246 /* mark as kernel-created inode */
247 d_inode(dentry)->i_private = &thread;
248 }
249 end_creating_path(&path, dentry);
250 return err;
251 }
252
dev_rmdir(const char * name)253 static int dev_rmdir(const char *name)
254 {
255 struct path parent;
256 struct dentry *dentry;
257 int err;
258
259 dentry = start_removing_path(name, &parent);
260 if (IS_ERR(dentry))
261 return PTR_ERR(dentry);
262 if (d_inode(dentry)->i_private == &thread)
263 err = vfs_rmdir(&nop_mnt_idmap, d_inode(parent.dentry),
264 dentry);
265 else
266 err = -EPERM;
267
268 end_removing_path(&parent, dentry);
269 return err;
270 }
271
delete_path(const char * nodepath)272 static int delete_path(const char *nodepath)
273 {
274 char *path;
275 int err = 0;
276
277 path = kstrdup(nodepath, GFP_KERNEL);
278 if (!path)
279 return -ENOMEM;
280
281 for (;;) {
282 char *base;
283
284 base = strrchr(path, '/');
285 if (!base)
286 break;
287 base[0] = '\0';
288 err = dev_rmdir(path);
289 if (err)
290 break;
291 }
292
293 kfree(path);
294 return err;
295 }
296
dev_mynode(struct device * dev,struct inode * inode)297 static int dev_mynode(struct device *dev, struct inode *inode)
298 {
299 /* did we create it */
300 if (inode->i_private != &thread)
301 return 0;
302
303 /* does the dev_t match */
304 if (is_blockdev(dev)) {
305 if (!S_ISBLK(inode->i_mode))
306 return 0;
307 } else {
308 if (!S_ISCHR(inode->i_mode))
309 return 0;
310 }
311 if (inode->i_rdev != dev->devt)
312 return 0;
313
314 /* ours */
315 return 1;
316 }
317
handle_remove(const char * nodename,struct device * dev)318 static int handle_remove(const char *nodename, struct device *dev)
319 {
320 struct path parent;
321 struct dentry *dentry;
322 struct inode *inode;
323 int deleted = 0;
324 int err = 0;
325
326 dentry = start_removing_path(nodename, &parent);
327 if (IS_ERR(dentry))
328 return PTR_ERR(dentry);
329
330 inode = d_inode(dentry);
331 if (dev_mynode(dev, inode)) {
332 struct iattr newattrs;
333 /*
334 * before unlinking this node, reset permissions
335 * of possible references like hardlinks
336 */
337 newattrs.ia_uid = GLOBAL_ROOT_UID;
338 newattrs.ia_gid = GLOBAL_ROOT_GID;
339 newattrs.ia_mode = inode->i_mode & ~0777;
340 newattrs.ia_valid =
341 ATTR_UID|ATTR_GID|ATTR_MODE;
342 inode_lock(d_inode(dentry));
343 notify_change(&nop_mnt_idmap, dentry, &newattrs, NULL);
344 inode_unlock(d_inode(dentry));
345 err = vfs_unlink(&nop_mnt_idmap, d_inode(parent.dentry),
346 dentry, NULL);
347 if (!err || err == -ENOENT)
348 deleted = 1;
349 }
350 end_removing_path(&parent, dentry);
351
352 if (deleted && strchr(nodename, '/'))
353 delete_path(nodename);
354 return err;
355 }
356
357 /*
358 * If configured, or requested by the commandline, devtmpfs will be
359 * auto-mounted after the kernel mounted the root filesystem.
360 */
devtmpfs_mount(void)361 int __init devtmpfs_mount(void)
362 {
363 int err;
364
365 if (!mount_dev)
366 return 0;
367
368 if (!thread)
369 return 0;
370
371 err = init_mount("devtmpfs", "dev", "devtmpfs", DEVTMPFS_MFLAGS, NULL);
372 if (err)
373 pr_info("error mounting %d\n", err);
374 else
375 pr_info("mounted\n");
376 return err;
377 }
378
379 static __initdata DECLARE_COMPLETION(setup_done);
380
handle(const char * name,umode_t mode,kuid_t uid,kgid_t gid,struct device * dev)381 static int handle(const char *name, umode_t mode, kuid_t uid, kgid_t gid,
382 struct device *dev)
383 {
384 if (mode)
385 return handle_create(name, mode, uid, gid, dev);
386 else
387 return handle_remove(name, dev);
388 }
389
devtmpfs_work_loop(void)390 static void __noreturn devtmpfs_work_loop(void)
391 {
392 while (1) {
393 spin_lock(&req_lock);
394 while (requests) {
395 struct req *req = requests;
396 requests = NULL;
397 spin_unlock(&req_lock);
398 while (req) {
399 struct req *next = req->next;
400 req->err = handle(req->name, req->mode,
401 req->uid, req->gid, req->dev);
402 complete(&req->done);
403 req = next;
404 }
405 spin_lock(&req_lock);
406 }
407 __set_current_state(TASK_INTERRUPTIBLE);
408 spin_unlock(&req_lock);
409 schedule();
410 }
411 }
412
devtmpfs_setup(void * p)413 static noinline int __init devtmpfs_setup(void *p)
414 {
415 int err;
416
417 err = ksys_unshare(CLONE_NEWNS);
418 if (err)
419 goto out;
420 err = init_mount("devtmpfs", "/", "devtmpfs", DEVTMPFS_MFLAGS, NULL);
421 if (err)
422 goto out;
423 init_chdir("/.."); /* will traverse into overmounted root */
424 init_chroot(".");
425 out:
426 *(int *)p = err;
427 return err;
428 }
429
430 /*
431 * The __ref is because devtmpfs_setup needs to be __init for the routines it
432 * calls. That call is done while devtmpfs_init, which is marked __init,
433 * synchronously waits for it to complete.
434 */
devtmpfsd(void * p)435 static int __ref devtmpfsd(void *p)
436 {
437 int err = devtmpfs_setup(p);
438
439 complete(&setup_done);
440 if (err)
441 return err;
442 devtmpfs_work_loop();
443 return 0;
444 }
445
446 /*
447 * Get the underlying (shmem/ramfs) context ops to build ours
448 */
devtmpfs_configure_context(void)449 static int devtmpfs_configure_context(void)
450 {
451 struct fs_context *fc;
452
453 fc = fs_context_for_reconfigure(mnt->mnt_root, mnt->mnt_sb->s_flags,
454 MS_RMT_MASK);
455 if (IS_ERR(fc))
456 return PTR_ERR(fc);
457
458 /* Set up devtmpfs_context_ops based on underlying type */
459 devtmpfs_context_ops.free = fc->ops->free;
460 devtmpfs_context_ops.dup = fc->ops->dup;
461 devtmpfs_context_ops.parse_param = fc->ops->parse_param;
462 devtmpfs_context_ops.parse_monolithic = fc->ops->parse_monolithic;
463 devtmpfs_context_ops.get_tree = &devtmpfs_get_tree;
464 devtmpfs_context_ops.reconfigure = fc->ops->reconfigure;
465
466 put_fs_context(fc);
467
468 return 0;
469 }
470
471 /*
472 * Create devtmpfs instance, driver-core devices will add their device
473 * nodes here.
474 */
devtmpfs_init(void)475 int __init devtmpfs_init(void)
476 {
477 char opts[] = "mode=0755";
478 int err;
479
480 mnt = vfs_kern_mount(&internal_fs_type, 0, "devtmpfs", opts);
481 if (IS_ERR(mnt)) {
482 pr_err("unable to create devtmpfs %ld\n", PTR_ERR(mnt));
483 return PTR_ERR(mnt);
484 }
485
486 err = devtmpfs_configure_context();
487 if (err) {
488 pr_err("unable to configure devtmpfs type %d\n", err);
489 return err;
490 }
491
492 err = register_filesystem(&dev_fs_type);
493 if (err) {
494 pr_err("unable to register devtmpfs type %d\n", err);
495 return err;
496 }
497
498 thread = kthread_run(devtmpfsd, &err, "kdevtmpfs");
499 if (!IS_ERR(thread)) {
500 wait_for_completion(&setup_done);
501 } else {
502 err = PTR_ERR(thread);
503 thread = NULL;
504 }
505
506 if (err) {
507 pr_err("unable to create devtmpfs %d\n", err);
508 unregister_filesystem(&dev_fs_type);
509 thread = NULL;
510 return err;
511 }
512
513 pr_info("initialized\n");
514 return 0;
515 }
516