xref: /linux/fs/fs_struct.c (revision 827634added7f38b7d724cab1dccdb2b004c13c3)
1 #include <linux/export.h>
2 #include <linux/sched.h>
3 #include <linux/fs.h>
4 #include <linux/path.h>
5 #include <linux/slab.h>
6 #include <linux/fs_struct.h>
7 #include "internal.h"
8 
9 /*
10  * Replace the fs->{rootmnt,root} with {mnt,dentry}. Put the old values.
11  * It can block.
12  */
13 void set_fs_root(struct fs_struct *fs, const struct path *path)
14 {
15 	struct path old_root;
16 
17 	path_get(path);
18 	spin_lock(&fs->lock);
19 	write_seqcount_begin(&fs->seq);
20 	old_root = fs->root;
21 	fs->root = *path;
22 	write_seqcount_end(&fs->seq);
23 	spin_unlock(&fs->lock);
24 	if (old_root.dentry)
25 		path_put(&old_root);
26 }
27 
28 /*
29  * Replace the fs->{pwdmnt,pwd} with {mnt,dentry}. Put the old values.
30  * It can block.
31  */
32 void set_fs_pwd(struct fs_struct *fs, const struct path *path)
33 {
34 	struct path old_pwd;
35 
36 	path_get(path);
37 	spin_lock(&fs->lock);
38 	write_seqcount_begin(&fs->seq);
39 	old_pwd = fs->pwd;
40 	fs->pwd = *path;
41 	write_seqcount_end(&fs->seq);
42 	spin_unlock(&fs->lock);
43 
44 	if (old_pwd.dentry)
45 		path_put(&old_pwd);
46 }
47 
48 static inline int replace_path(struct path *p, const struct path *old, const struct path *new)
49 {
50 	if (likely(p->dentry != old->dentry || p->mnt != old->mnt))
51 		return 0;
52 	*p = *new;
53 	return 1;
54 }
55 
56 void chroot_fs_refs(const struct path *old_root, const struct path *new_root)
57 {
58 	struct task_struct *g, *p;
59 	struct fs_struct *fs;
60 	int count = 0;
61 
62 	read_lock(&tasklist_lock);
63 	do_each_thread(g, p) {
64 		task_lock(p);
65 		fs = p->fs;
66 		if (fs) {
67 			int hits = 0;
68 			spin_lock(&fs->lock);
69 			write_seqcount_begin(&fs->seq);
70 			hits += replace_path(&fs->root, old_root, new_root);
71 			hits += replace_path(&fs->pwd, old_root, new_root);
72 			write_seqcount_end(&fs->seq);
73 			while (hits--) {
74 				count++;
75 				path_get(new_root);
76 			}
77 			spin_unlock(&fs->lock);
78 		}
79 		task_unlock(p);
80 	} while_each_thread(g, p);
81 	read_unlock(&tasklist_lock);
82 	while (count--)
83 		path_put(old_root);
84 }
85 
86 void free_fs_struct(struct fs_struct *fs)
87 {
88 	path_put(&fs->root);
89 	path_put(&fs->pwd);
90 	kmem_cache_free(fs_cachep, fs);
91 }
92 
93 void exit_fs(struct task_struct *tsk)
94 {
95 	struct fs_struct *fs = tsk->fs;
96 
97 	if (fs) {
98 		int kill;
99 		task_lock(tsk);
100 		spin_lock(&fs->lock);
101 		tsk->fs = NULL;
102 		kill = !--fs->users;
103 		spin_unlock(&fs->lock);
104 		task_unlock(tsk);
105 		if (kill)
106 			free_fs_struct(fs);
107 	}
108 }
109 
110 struct fs_struct *copy_fs_struct(struct fs_struct *old)
111 {
112 	struct fs_struct *fs = kmem_cache_alloc(fs_cachep, GFP_KERNEL);
113 	/* We don't need to lock fs - think why ;-) */
114 	if (fs) {
115 		fs->users = 1;
116 		fs->in_exec = 0;
117 		spin_lock_init(&fs->lock);
118 		seqcount_init(&fs->seq);
119 		fs->umask = old->umask;
120 
121 		spin_lock(&old->lock);
122 		fs->root = old->root;
123 		path_get(&fs->root);
124 		fs->pwd = old->pwd;
125 		path_get(&fs->pwd);
126 		spin_unlock(&old->lock);
127 	}
128 	return fs;
129 }
130 
131 int unshare_fs_struct(void)
132 {
133 	struct fs_struct *fs = current->fs;
134 	struct fs_struct *new_fs = copy_fs_struct(fs);
135 	int kill;
136 
137 	if (!new_fs)
138 		return -ENOMEM;
139 
140 	task_lock(current);
141 	spin_lock(&fs->lock);
142 	kill = !--fs->users;
143 	current->fs = new_fs;
144 	spin_unlock(&fs->lock);
145 	task_unlock(current);
146 
147 	if (kill)
148 		free_fs_struct(fs);
149 
150 	return 0;
151 }
152 EXPORT_SYMBOL_GPL(unshare_fs_struct);
153 
154 int current_umask(void)
155 {
156 	return current->fs->umask;
157 }
158 EXPORT_SYMBOL(current_umask);
159 
160 /* to be mentioned only in INIT_TASK */
161 struct fs_struct init_fs = {
162 	.users		= 1,
163 	.lock		= __SPIN_LOCK_UNLOCKED(init_fs.lock),
164 	.seq		= SEQCNT_ZERO(init_fs.seq),
165 	.umask		= 0022,
166 };
167