xref: /freebsd/sys/contrib/openzfs/module/os/linux/zfs/zpl_super.c (revision 058ac3e8063366dafa634d9107642e12b038bf09)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or https://opensource.org/licenses/CDDL-1.0.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright (c) 2011, Lawrence Livermore National Security, LLC.
23  */
24 
25 
26 #include <sys/zfs_znode.h>
27 #include <sys/zfs_vfsops.h>
28 #include <sys/zfs_vnops.h>
29 #include <sys/zfs_ctldir.h>
30 #include <sys/zpl.h>
31 
32 
33 static struct inode *
34 zpl_inode_alloc(struct super_block *sb)
35 {
36 	struct inode *ip;
37 
38 	VERIFY3S(zfs_inode_alloc(sb, &ip), ==, 0);
39 	inode_set_iversion(ip, 1);
40 
41 	return (ip);
42 }
43 
44 static void
45 zpl_inode_destroy(struct inode *ip)
46 {
47 	ASSERT(atomic_read(&ip->i_count) == 0);
48 	zfs_inode_destroy(ip);
49 }
50 
51 /*
52  * Called from __mark_inode_dirty() to reflect that something in the
53  * inode has changed.  We use it to ensure the znode system attributes
54  * are always strictly update to date with respect to the inode.
55  */
56 #ifdef HAVE_DIRTY_INODE_WITH_FLAGS
57 static void
58 zpl_dirty_inode(struct inode *ip, int flags)
59 {
60 	fstrans_cookie_t cookie;
61 
62 	cookie = spl_fstrans_mark();
63 	zfs_dirty_inode(ip, flags);
64 	spl_fstrans_unmark(cookie);
65 }
66 #else
67 static void
68 zpl_dirty_inode(struct inode *ip)
69 {
70 	fstrans_cookie_t cookie;
71 
72 	cookie = spl_fstrans_mark();
73 	zfs_dirty_inode(ip, 0);
74 	spl_fstrans_unmark(cookie);
75 }
76 #endif /* HAVE_DIRTY_INODE_WITH_FLAGS */
77 
78 /*
79  * When ->drop_inode() is called its return value indicates if the
80  * inode should be evicted from the inode cache.  If the inode is
81  * unhashed and has no links the default policy is to evict it
82  * immediately.
83  *
84  * The ->evict_inode() callback must minimally truncate the inode pages,
85  * and call clear_inode().  For 2.6.35 and later kernels this will
86  * simply update the inode state, with the sync occurring before the
87  * truncate in evict().  For earlier kernels clear_inode() maps to
88  * end_writeback() which is responsible for completing all outstanding
89  * write back.  In either case, once this is done it is safe to cleanup
90  * any remaining inode specific data via zfs_inactive().
91  * remaining filesystem specific data.
92  */
93 static void
94 zpl_evict_inode(struct inode *ip)
95 {
96 	fstrans_cookie_t cookie;
97 
98 	cookie = spl_fstrans_mark();
99 	truncate_setsize(ip, 0);
100 	clear_inode(ip);
101 	zfs_inactive(ip);
102 	spl_fstrans_unmark(cookie);
103 }
104 
105 static void
106 zpl_put_super(struct super_block *sb)
107 {
108 	fstrans_cookie_t cookie;
109 	int error;
110 
111 	cookie = spl_fstrans_mark();
112 	error = -zfs_umount(sb);
113 	spl_fstrans_unmark(cookie);
114 	ASSERT3S(error, <=, 0);
115 }
116 
117 static int
118 zpl_sync_fs(struct super_block *sb, int wait)
119 {
120 	fstrans_cookie_t cookie;
121 	cred_t *cr = CRED();
122 	int error;
123 
124 	crhold(cr);
125 	cookie = spl_fstrans_mark();
126 	error = -zfs_sync(sb, wait, cr);
127 	spl_fstrans_unmark(cookie);
128 	crfree(cr);
129 	ASSERT3S(error, <=, 0);
130 
131 	return (error);
132 }
133 
134 static int
135 zpl_statfs(struct dentry *dentry, struct kstatfs *statp)
136 {
137 	fstrans_cookie_t cookie;
138 	int error;
139 
140 	cookie = spl_fstrans_mark();
141 	error = -zfs_statvfs(dentry->d_inode, statp);
142 	spl_fstrans_unmark(cookie);
143 	ASSERT3S(error, <=, 0);
144 
145 	/*
146 	 * If required by a 32-bit system call, dynamically scale the
147 	 * block size up to 16MiB and decrease the block counts.  This
148 	 * allows for a maximum size of 64EiB to be reported.  The file
149 	 * counts must be artificially capped at 2^32-1.
150 	 */
151 	if (unlikely(zpl_is_32bit_api())) {
152 		while (statp->f_blocks > UINT32_MAX &&
153 		    statp->f_bsize < SPA_MAXBLOCKSIZE) {
154 			statp->f_frsize <<= 1;
155 			statp->f_bsize <<= 1;
156 
157 			statp->f_blocks >>= 1;
158 			statp->f_bfree >>= 1;
159 			statp->f_bavail >>= 1;
160 		}
161 
162 		uint64_t usedobjs = statp->f_files - statp->f_ffree;
163 		statp->f_ffree = MIN(statp->f_ffree, UINT32_MAX - usedobjs);
164 		statp->f_files = statp->f_ffree + usedobjs;
165 	}
166 
167 	return (error);
168 }
169 
170 static int
171 zpl_remount_fs(struct super_block *sb, int *flags, char *data)
172 {
173 	zfs_mnt_t zm = { .mnt_osname = NULL, .mnt_data = data };
174 	fstrans_cookie_t cookie;
175 	int error;
176 
177 	cookie = spl_fstrans_mark();
178 	error = -zfs_remount(sb, flags, &zm);
179 	spl_fstrans_unmark(cookie);
180 	ASSERT3S(error, <=, 0);
181 
182 	return (error);
183 }
184 
185 static int
186 __zpl_show_devname(struct seq_file *seq, zfsvfs_t *zfsvfs)
187 {
188 	int error;
189 	if ((error = zpl_enter(zfsvfs, FTAG)) != 0)
190 		return (error);
191 
192 	char *fsname = kmem_alloc(ZFS_MAX_DATASET_NAME_LEN, KM_SLEEP);
193 	dmu_objset_name(zfsvfs->z_os, fsname);
194 
195 	for (int i = 0; fsname[i] != 0; i++) {
196 		/*
197 		 * Spaces in the dataset name must be converted to their
198 		 * octal escape sequence for getmntent(3) to correctly
199 		 * parse then fsname portion of /proc/self/mounts.
200 		 */
201 		if (fsname[i] == ' ') {
202 			seq_puts(seq, "\\040");
203 		} else {
204 			seq_putc(seq, fsname[i]);
205 		}
206 	}
207 
208 	kmem_free(fsname, ZFS_MAX_DATASET_NAME_LEN);
209 
210 	zpl_exit(zfsvfs, FTAG);
211 
212 	return (0);
213 }
214 
215 static int
216 zpl_show_devname(struct seq_file *seq, struct dentry *root)
217 {
218 	return (__zpl_show_devname(seq, root->d_sb->s_fs_info));
219 }
220 
221 static int
222 __zpl_show_options(struct seq_file *seq, zfsvfs_t *zfsvfs)
223 {
224 	seq_printf(seq, ",%s",
225 	    zfsvfs->z_flags & ZSB_XATTR ? "xattr" : "noxattr");
226 
227 #ifdef CONFIG_FS_POSIX_ACL
228 	switch (zfsvfs->z_acl_type) {
229 	case ZFS_ACLTYPE_POSIX:
230 		seq_puts(seq, ",posixacl");
231 		break;
232 	default:
233 		seq_puts(seq, ",noacl");
234 		break;
235 	}
236 #endif /* CONFIG_FS_POSIX_ACL */
237 
238 	switch (zfsvfs->z_case) {
239 	case ZFS_CASE_SENSITIVE:
240 		seq_puts(seq, ",casesensitive");
241 		break;
242 	case ZFS_CASE_INSENSITIVE:
243 		seq_puts(seq, ",caseinsensitive");
244 		break;
245 	default:
246 		seq_puts(seq, ",casemixed");
247 		break;
248 	}
249 
250 	return (0);
251 }
252 
253 static int
254 zpl_show_options(struct seq_file *seq, struct dentry *root)
255 {
256 	return (__zpl_show_options(seq, root->d_sb->s_fs_info));
257 }
258 
259 static int
260 zpl_fill_super(struct super_block *sb, void *data, int silent)
261 {
262 	zfs_mnt_t *zm = (zfs_mnt_t *)data;
263 	fstrans_cookie_t cookie;
264 	int error;
265 
266 	cookie = spl_fstrans_mark();
267 	error = -zfs_domount(sb, zm, silent);
268 	spl_fstrans_unmark(cookie);
269 	ASSERT3S(error, <=, 0);
270 
271 	return (error);
272 }
273 
274 static int
275 zpl_test_super(struct super_block *s, void *data)
276 {
277 	zfsvfs_t *zfsvfs = s->s_fs_info;
278 	objset_t *os = data;
279 
280 	if (zfsvfs == NULL)
281 		return (0);
282 
283 	return (os == zfsvfs->z_os);
284 }
285 
286 static struct super_block *
287 zpl_mount_impl(struct file_system_type *fs_type, int flags, zfs_mnt_t *zm)
288 {
289 	struct super_block *s;
290 	objset_t *os;
291 	int err;
292 
293 	err = dmu_objset_hold(zm->mnt_osname, FTAG, &os);
294 	if (err)
295 		return (ERR_PTR(-err));
296 
297 	/*
298 	 * The dsl pool lock must be released prior to calling sget().
299 	 * It is possible sget() may block on the lock in grab_super()
300 	 * while deactivate_super() holds that same lock and waits for
301 	 * a txg sync.  If the dsl_pool lock is held over sget()
302 	 * this can prevent the pool sync and cause a deadlock.
303 	 */
304 	dsl_dataset_long_hold(dmu_objset_ds(os), FTAG);
305 	dsl_pool_rele(dmu_objset_pool(os), FTAG);
306 
307 	s = sget(fs_type, zpl_test_super, set_anon_super, flags, os);
308 
309 	dsl_dataset_long_rele(dmu_objset_ds(os), FTAG);
310 	dsl_dataset_rele(dmu_objset_ds(os), FTAG);
311 
312 	if (IS_ERR(s))
313 		return (ERR_CAST(s));
314 
315 	if (s->s_root == NULL) {
316 		err = zpl_fill_super(s, zm, flags & SB_SILENT ? 1 : 0);
317 		if (err) {
318 			deactivate_locked_super(s);
319 			return (ERR_PTR(err));
320 		}
321 		s->s_flags |= SB_ACTIVE;
322 	} else if ((flags ^ s->s_flags) & SB_RDONLY) {
323 		deactivate_locked_super(s);
324 		return (ERR_PTR(-EBUSY));
325 	}
326 
327 	return (s);
328 }
329 
330 static struct dentry *
331 zpl_mount(struct file_system_type *fs_type, int flags,
332     const char *osname, void *data)
333 {
334 	zfs_mnt_t zm = { .mnt_osname = osname, .mnt_data = data };
335 
336 	struct super_block *sb = zpl_mount_impl(fs_type, flags, &zm);
337 	if (IS_ERR(sb))
338 		return (ERR_CAST(sb));
339 
340 	return (dget(sb->s_root));
341 }
342 
343 static void
344 zpl_kill_sb(struct super_block *sb)
345 {
346 	zfs_preumount(sb);
347 	kill_anon_super(sb);
348 }
349 
350 void
351 zpl_prune_sb(int64_t nr_to_scan, void *arg)
352 {
353 	struct super_block *sb = (struct super_block *)arg;
354 	int objects = 0;
355 
356 	(void) -zfs_prune(sb, nr_to_scan, &objects);
357 }
358 
359 const struct super_operations zpl_super_operations = {
360 	.alloc_inode		= zpl_inode_alloc,
361 	.destroy_inode		= zpl_inode_destroy,
362 	.dirty_inode		= zpl_dirty_inode,
363 	.write_inode		= NULL,
364 	.evict_inode		= zpl_evict_inode,
365 	.put_super		= zpl_put_super,
366 	.sync_fs		= zpl_sync_fs,
367 	.statfs			= zpl_statfs,
368 	.remount_fs		= zpl_remount_fs,
369 	.show_devname		= zpl_show_devname,
370 	.show_options		= zpl_show_options,
371 	.show_stats		= NULL,
372 };
373 
374 struct file_system_type zpl_fs_type = {
375 	.owner			= THIS_MODULE,
376 	.name			= ZFS_DRIVER,
377 #if defined(HAVE_IDMAP_MNT_API)
378 	.fs_flags		= FS_USERNS_MOUNT | FS_ALLOW_IDMAP,
379 #else
380 	.fs_flags		= FS_USERNS_MOUNT,
381 #endif
382 	.mount			= zpl_mount,
383 	.kill_sb		= zpl_kill_sb,
384 };
385