xref: /linux/fs/fs_struct.c (revision 704bf317fd21683e5c71a542f5fb5f65271a1582)
1 #include <linux/module.h>
2 #include <linux/sched.h>
3 #include <linux/fs.h>
4 #include <linux/path.h>
5 #include <linux/slab.h>
6 #include <linux/fs_struct.h>
7 
8 /*
9  * Replace the fs->{rootmnt,root} with {mnt,dentry}. Put the old values.
10  * It can block.
11  */
12 void set_fs_root(struct fs_struct *fs, struct path *path)
13 {
14 	struct path old_root;
15 
16 	spin_lock(&fs->lock);
17 	write_seqcount_begin(&fs->seq);
18 	old_root = fs->root;
19 	fs->root = *path;
20 	path_get_long(path);
21 	write_seqcount_end(&fs->seq);
22 	spin_unlock(&fs->lock);
23 	if (old_root.dentry)
24 		path_put_long(&old_root);
25 }
26 
27 /*
28  * Replace the fs->{pwdmnt,pwd} with {mnt,dentry}. Put the old values.
29  * It can block.
30  */
31 void set_fs_pwd(struct fs_struct *fs, struct path *path)
32 {
33 	struct path old_pwd;
34 
35 	spin_lock(&fs->lock);
36 	write_seqcount_begin(&fs->seq);
37 	old_pwd = fs->pwd;
38 	fs->pwd = *path;
39 	path_get_long(path);
40 	write_seqcount_end(&fs->seq);
41 	spin_unlock(&fs->lock);
42 
43 	if (old_pwd.dentry)
44 		path_put_long(&old_pwd);
45 }
46 
47 void chroot_fs_refs(struct path *old_root, struct path *new_root)
48 {
49 	struct task_struct *g, *p;
50 	struct fs_struct *fs;
51 	int count = 0;
52 
53 	read_lock(&tasklist_lock);
54 	do_each_thread(g, p) {
55 		task_lock(p);
56 		fs = p->fs;
57 		if (fs) {
58 			spin_lock(&fs->lock);
59 			write_seqcount_begin(&fs->seq);
60 			if (fs->root.dentry == old_root->dentry
61 			    && fs->root.mnt == old_root->mnt) {
62 				path_get_long(new_root);
63 				fs->root = *new_root;
64 				count++;
65 			}
66 			if (fs->pwd.dentry == old_root->dentry
67 			    && fs->pwd.mnt == old_root->mnt) {
68 				path_get_long(new_root);
69 				fs->pwd = *new_root;
70 				count++;
71 			}
72 			write_seqcount_end(&fs->seq);
73 			spin_unlock(&fs->lock);
74 		}
75 		task_unlock(p);
76 	} while_each_thread(g, p);
77 	read_unlock(&tasklist_lock);
78 	while (count--)
79 		path_put_long(old_root);
80 }
81 
82 void free_fs_struct(struct fs_struct *fs)
83 {
84 	path_put_long(&fs->root);
85 	path_put_long(&fs->pwd);
86 	kmem_cache_free(fs_cachep, fs);
87 }
88 
89 void exit_fs(struct task_struct *tsk)
90 {
91 	struct fs_struct *fs = tsk->fs;
92 
93 	if (fs) {
94 		int kill;
95 		task_lock(tsk);
96 		spin_lock(&fs->lock);
97 		write_seqcount_begin(&fs->seq);
98 		tsk->fs = NULL;
99 		kill = !--fs->users;
100 		write_seqcount_end(&fs->seq);
101 		spin_unlock(&fs->lock);
102 		task_unlock(tsk);
103 		if (kill)
104 			free_fs_struct(fs);
105 	}
106 }
107 
108 struct fs_struct *copy_fs_struct(struct fs_struct *old)
109 {
110 	struct fs_struct *fs = kmem_cache_alloc(fs_cachep, GFP_KERNEL);
111 	/* We don't need to lock fs - think why ;-) */
112 	if (fs) {
113 		fs->users = 1;
114 		fs->in_exec = 0;
115 		spin_lock_init(&fs->lock);
116 		seqcount_init(&fs->seq);
117 		fs->umask = old->umask;
118 
119 		spin_lock(&old->lock);
120 		fs->root = old->root;
121 		path_get_long(&fs->root);
122 		fs->pwd = old->pwd;
123 		path_get_long(&fs->pwd);
124 		spin_unlock(&old->lock);
125 	}
126 	return fs;
127 }
128 
129 int unshare_fs_struct(void)
130 {
131 	struct fs_struct *fs = current->fs;
132 	struct fs_struct *new_fs = copy_fs_struct(fs);
133 	int kill;
134 
135 	if (!new_fs)
136 		return -ENOMEM;
137 
138 	task_lock(current);
139 	spin_lock(&fs->lock);
140 	kill = !--fs->users;
141 	current->fs = new_fs;
142 	spin_unlock(&fs->lock);
143 	task_unlock(current);
144 
145 	if (kill)
146 		free_fs_struct(fs);
147 
148 	return 0;
149 }
150 EXPORT_SYMBOL_GPL(unshare_fs_struct);
151 
152 int current_umask(void)
153 {
154 	return current->fs->umask;
155 }
156 EXPORT_SYMBOL(current_umask);
157 
158 /* to be mentioned only in INIT_TASK */
159 struct fs_struct init_fs = {
160 	.users		= 1,
161 	.lock		= __SPIN_LOCK_UNLOCKED(init_fs.lock),
162 	.seq		= SEQCNT_ZERO,
163 	.umask		= 0022,
164 };
165 
166 void daemonize_fs_struct(void)
167 {
168 	struct fs_struct *fs = current->fs;
169 
170 	if (fs) {
171 		int kill;
172 
173 		task_lock(current);
174 
175 		spin_lock(&init_fs.lock);
176 		init_fs.users++;
177 		spin_unlock(&init_fs.lock);
178 
179 		spin_lock(&fs->lock);
180 		current->fs = &init_fs;
181 		kill = !--fs->users;
182 		spin_unlock(&fs->lock);
183 
184 		task_unlock(current);
185 		if (kill)
186 			free_fs_struct(fs);
187 	}
188 }
189