xref: /linux/fs/vboxsf/super.c (revision 18ca45f5ba1e31704bcca038b8b612e9b1f52b4f)
1 // SPDX-License-Identifier: MIT
2 /*
3  * VirtualBox Guest Shared Folders support: Virtual File System.
4  *
5  * Module initialization/finalization
6  * File system registration/deregistration
7  * Superblock reading
8  * Few utility functions
9  *
10  * Copyright (C) 2006-2018 Oracle Corporation
11  */
12 
13 #include <linux/idr.h>
14 #include <linux/fs_parser.h>
15 #include <linux/magic.h>
16 #include <linux/module.h>
17 #include <linux/nls.h>
18 #include <linux/statfs.h>
19 #include <linux/vbox_utils.h>
20 #include "vfsmod.h"
21 
22 #define VBOXSF_SUPER_MAGIC 0x786f4256 /* 'VBox' little endian */
23 
24 #define VBSF_MOUNT_SIGNATURE_BYTE_0 ('\000')
25 #define VBSF_MOUNT_SIGNATURE_BYTE_1 ('\377')
26 #define VBSF_MOUNT_SIGNATURE_BYTE_2 ('\376')
27 #define VBSF_MOUNT_SIGNATURE_BYTE_3 ('\375')
28 
29 static int follow_symlinks;
30 module_param(follow_symlinks, int, 0444);
31 MODULE_PARM_DESC(follow_symlinks,
32 		 "Let host resolve symlinks rather than showing them");
33 
34 static DEFINE_IDA(vboxsf_bdi_ida);
35 static DEFINE_MUTEX(vboxsf_setup_mutex);
36 static bool vboxsf_setup_done;
37 static struct super_operations vboxsf_super_ops; /* forward declaration */
38 static struct kmem_cache *vboxsf_inode_cachep;
39 
40 static char * const vboxsf_default_nls = CONFIG_NLS_DEFAULT;
41 
42 enum  { opt_nls, opt_uid, opt_gid, opt_ttl, opt_dmode, opt_fmode,
43 	opt_dmask, opt_fmask };
44 
45 static const struct fs_parameter_spec vboxsf_fs_parameters[] = {
46 	fsparam_string	("nls",		opt_nls),
47 	fsparam_u32	("uid",		opt_uid),
48 	fsparam_u32	("gid",		opt_gid),
49 	fsparam_u32	("ttl",		opt_ttl),
50 	fsparam_u32oct	("dmode",	opt_dmode),
51 	fsparam_u32oct	("fmode",	opt_fmode),
52 	fsparam_u32oct	("dmask",	opt_dmask),
53 	fsparam_u32oct	("fmask",	opt_fmask),
54 	{}
55 };
56 
57 static int vboxsf_parse_param(struct fs_context *fc, struct fs_parameter *param)
58 {
59 	struct vboxsf_fs_context *ctx = fc->fs_private;
60 	struct fs_parse_result result;
61 	kuid_t uid;
62 	kgid_t gid;
63 	int opt;
64 
65 	opt = fs_parse(fc, vboxsf_fs_parameters, param, &result);
66 	if (opt < 0)
67 		return opt;
68 
69 	switch (opt) {
70 	case opt_nls:
71 		if (ctx->nls_name || fc->purpose != FS_CONTEXT_FOR_MOUNT) {
72 			vbg_err("vboxsf: Cannot reconfigure nls option\n");
73 			return -EINVAL;
74 		}
75 		ctx->nls_name = param->string;
76 		param->string = NULL;
77 		break;
78 	case opt_uid:
79 		uid = make_kuid(current_user_ns(), result.uint_32);
80 		if (!uid_valid(uid))
81 			return -EINVAL;
82 		ctx->o.uid = uid;
83 		break;
84 	case opt_gid:
85 		gid = make_kgid(current_user_ns(), result.uint_32);
86 		if (!gid_valid(gid))
87 			return -EINVAL;
88 		ctx->o.gid = gid;
89 		break;
90 	case opt_ttl:
91 		ctx->o.ttl = msecs_to_jiffies(result.uint_32);
92 		break;
93 	case opt_dmode:
94 		if (result.uint_32 & ~0777)
95 			return -EINVAL;
96 		ctx->o.dmode = result.uint_32;
97 		ctx->o.dmode_set = true;
98 		break;
99 	case opt_fmode:
100 		if (result.uint_32 & ~0777)
101 			return -EINVAL;
102 		ctx->o.fmode = result.uint_32;
103 		ctx->o.fmode_set = true;
104 		break;
105 	case opt_dmask:
106 		if (result.uint_32 & ~07777)
107 			return -EINVAL;
108 		ctx->o.dmask = result.uint_32;
109 		break;
110 	case opt_fmask:
111 		if (result.uint_32 & ~07777)
112 			return -EINVAL;
113 		ctx->o.fmask = result.uint_32;
114 		break;
115 	default:
116 		return -EINVAL;
117 	}
118 
119 	return 0;
120 }
121 
122 static int vboxsf_fill_super(struct super_block *sb, struct fs_context *fc)
123 {
124 	struct vboxsf_fs_context *ctx = fc->fs_private;
125 	struct shfl_string *folder_name, root_path;
126 	struct vboxsf_sbi *sbi;
127 	struct dentry *droot;
128 	struct inode *iroot;
129 	char *nls_name;
130 	size_t size;
131 	int err;
132 
133 	if (!fc->source)
134 		return -EINVAL;
135 
136 	sbi = kzalloc(sizeof(*sbi), GFP_KERNEL);
137 	if (!sbi)
138 		return -ENOMEM;
139 
140 	sbi->o = ctx->o;
141 	idr_init(&sbi->ino_idr);
142 	spin_lock_init(&sbi->ino_idr_lock);
143 	sbi->next_generation = 1;
144 	sbi->bdi_id = -1;
145 
146 	/* Load nls if not utf8 */
147 	nls_name = ctx->nls_name ? ctx->nls_name : vboxsf_default_nls;
148 	if (strcmp(nls_name, "utf8") != 0) {
149 		if (nls_name == vboxsf_default_nls)
150 			sbi->nls = load_nls_default();
151 		else
152 			sbi->nls = load_nls(nls_name);
153 
154 		if (!sbi->nls) {
155 			vbg_err("vboxsf: Count not load '%s' nls\n", nls_name);
156 			err = -EINVAL;
157 			goto fail_free;
158 		}
159 	}
160 
161 	sbi->bdi_id = ida_simple_get(&vboxsf_bdi_ida, 0, 0, GFP_KERNEL);
162 	if (sbi->bdi_id < 0) {
163 		err = sbi->bdi_id;
164 		goto fail_free;
165 	}
166 
167 	err = super_setup_bdi_name(sb, "vboxsf-%d", sbi->bdi_id);
168 	if (err)
169 		goto fail_free;
170 	sb->s_bdi->ra_pages = 0;
171 	sb->s_bdi->io_pages = 0;
172 
173 	/* Turn source into a shfl_string and map the folder */
174 	size = strlen(fc->source) + 1;
175 	folder_name = kmalloc(SHFLSTRING_HEADER_SIZE + size, GFP_KERNEL);
176 	if (!folder_name) {
177 		err = -ENOMEM;
178 		goto fail_free;
179 	}
180 	folder_name->size = size;
181 	folder_name->length = size - 1;
182 	strlcpy(folder_name->string.utf8, fc->source, size);
183 	err = vboxsf_map_folder(folder_name, &sbi->root);
184 	kfree(folder_name);
185 	if (err) {
186 		vbg_err("vboxsf: Host rejected mount of '%s' with error %d\n",
187 			fc->source, err);
188 		goto fail_free;
189 	}
190 
191 	root_path.length = 1;
192 	root_path.size = 2;
193 	root_path.string.utf8[0] = '/';
194 	root_path.string.utf8[1] = 0;
195 	err = vboxsf_stat(sbi, &root_path, &sbi->root_info);
196 	if (err)
197 		goto fail_unmap;
198 
199 	sb->s_magic = VBOXSF_SUPER_MAGIC;
200 	sb->s_blocksize = 1024;
201 	sb->s_maxbytes = MAX_LFS_FILESIZE;
202 	sb->s_op = &vboxsf_super_ops;
203 	sb->s_d_op = &vboxsf_dentry_ops;
204 
205 	iroot = iget_locked(sb, 0);
206 	if (!iroot) {
207 		err = -ENOMEM;
208 		goto fail_unmap;
209 	}
210 	vboxsf_init_inode(sbi, iroot, &sbi->root_info, false);
211 	unlock_new_inode(iroot);
212 
213 	droot = d_make_root(iroot);
214 	if (!droot) {
215 		err = -ENOMEM;
216 		goto fail_unmap;
217 	}
218 
219 	sb->s_root = droot;
220 	sb->s_fs_info = sbi;
221 	return 0;
222 
223 fail_unmap:
224 	vboxsf_unmap_folder(sbi->root);
225 fail_free:
226 	if (sbi->bdi_id >= 0)
227 		ida_simple_remove(&vboxsf_bdi_ida, sbi->bdi_id);
228 	if (sbi->nls)
229 		unload_nls(sbi->nls);
230 	idr_destroy(&sbi->ino_idr);
231 	kfree(sbi);
232 	return err;
233 }
234 
235 static void vboxsf_inode_init_once(void *data)
236 {
237 	struct vboxsf_inode *sf_i = data;
238 
239 	mutex_init(&sf_i->handle_list_mutex);
240 	inode_init_once(&sf_i->vfs_inode);
241 }
242 
243 static struct inode *vboxsf_alloc_inode(struct super_block *sb)
244 {
245 	struct vboxsf_inode *sf_i;
246 
247 	sf_i = kmem_cache_alloc(vboxsf_inode_cachep, GFP_NOFS);
248 	if (!sf_i)
249 		return NULL;
250 
251 	sf_i->force_restat = 0;
252 	INIT_LIST_HEAD(&sf_i->handle_list);
253 
254 	return &sf_i->vfs_inode;
255 }
256 
257 static void vboxsf_free_inode(struct inode *inode)
258 {
259 	struct vboxsf_sbi *sbi = VBOXSF_SBI(inode->i_sb);
260 	unsigned long flags;
261 
262 	spin_lock_irqsave(&sbi->ino_idr_lock, flags);
263 	idr_remove(&sbi->ino_idr, inode->i_ino);
264 	spin_unlock_irqrestore(&sbi->ino_idr_lock, flags);
265 	kmem_cache_free(vboxsf_inode_cachep, VBOXSF_I(inode));
266 }
267 
268 static void vboxsf_put_super(struct super_block *sb)
269 {
270 	struct vboxsf_sbi *sbi = VBOXSF_SBI(sb);
271 
272 	vboxsf_unmap_folder(sbi->root);
273 	if (sbi->bdi_id >= 0)
274 		ida_simple_remove(&vboxsf_bdi_ida, sbi->bdi_id);
275 	if (sbi->nls)
276 		unload_nls(sbi->nls);
277 
278 	/*
279 	 * vboxsf_free_inode uses the idr, make sure all delayed rcu free
280 	 * inodes are flushed.
281 	 */
282 	rcu_barrier();
283 	idr_destroy(&sbi->ino_idr);
284 	kfree(sbi);
285 }
286 
287 static int vboxsf_statfs(struct dentry *dentry, struct kstatfs *stat)
288 {
289 	struct super_block *sb = dentry->d_sb;
290 	struct shfl_volinfo shfl_volinfo;
291 	struct vboxsf_sbi *sbi;
292 	u32 buf_len;
293 	int err;
294 
295 	sbi = VBOXSF_SBI(sb);
296 	buf_len = sizeof(shfl_volinfo);
297 	err = vboxsf_fsinfo(sbi->root, 0, SHFL_INFO_GET | SHFL_INFO_VOLUME,
298 			    &buf_len, &shfl_volinfo);
299 	if (err)
300 		return err;
301 
302 	stat->f_type = VBOXSF_SUPER_MAGIC;
303 	stat->f_bsize = shfl_volinfo.bytes_per_allocation_unit;
304 
305 	do_div(shfl_volinfo.total_allocation_bytes,
306 	       shfl_volinfo.bytes_per_allocation_unit);
307 	stat->f_blocks = shfl_volinfo.total_allocation_bytes;
308 
309 	do_div(shfl_volinfo.available_allocation_bytes,
310 	       shfl_volinfo.bytes_per_allocation_unit);
311 	stat->f_bfree  = shfl_volinfo.available_allocation_bytes;
312 	stat->f_bavail = shfl_volinfo.available_allocation_bytes;
313 
314 	stat->f_files = 1000;
315 	/*
316 	 * Don't return 0 here since the guest may then think that it is not
317 	 * possible to create any more files.
318 	 */
319 	stat->f_ffree = 1000000;
320 	stat->f_fsid.val[0] = 0;
321 	stat->f_fsid.val[1] = 0;
322 	stat->f_namelen = 255;
323 	return 0;
324 }
325 
326 static struct super_operations vboxsf_super_ops = {
327 	.alloc_inode	= vboxsf_alloc_inode,
328 	.free_inode	= vboxsf_free_inode,
329 	.put_super	= vboxsf_put_super,
330 	.statfs		= vboxsf_statfs,
331 };
332 
333 static int vboxsf_setup(void)
334 {
335 	int err;
336 
337 	mutex_lock(&vboxsf_setup_mutex);
338 
339 	if (vboxsf_setup_done)
340 		goto success;
341 
342 	vboxsf_inode_cachep =
343 		kmem_cache_create("vboxsf_inode_cache",
344 				  sizeof(struct vboxsf_inode), 0,
345 				  (SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD |
346 				   SLAB_ACCOUNT),
347 				  vboxsf_inode_init_once);
348 	if (!vboxsf_inode_cachep) {
349 		err = -ENOMEM;
350 		goto fail_nomem;
351 	}
352 
353 	err = vboxsf_connect();
354 	if (err) {
355 		vbg_err("vboxsf: err %d connecting to guest PCI-device\n", err);
356 		vbg_err("vboxsf: make sure you are inside a VirtualBox VM\n");
357 		vbg_err("vboxsf: and check dmesg for vboxguest errors\n");
358 		goto fail_free_cache;
359 	}
360 
361 	err = vboxsf_set_utf8();
362 	if (err) {
363 		vbg_err("vboxsf_setutf8 error %d\n", err);
364 		goto fail_disconnect;
365 	}
366 
367 	if (!follow_symlinks) {
368 		err = vboxsf_set_symlinks();
369 		if (err)
370 			vbg_warn("vboxsf: Unable to show symlinks: %d\n", err);
371 	}
372 
373 	vboxsf_setup_done = true;
374 success:
375 	mutex_unlock(&vboxsf_setup_mutex);
376 	return 0;
377 
378 fail_disconnect:
379 	vboxsf_disconnect();
380 fail_free_cache:
381 	kmem_cache_destroy(vboxsf_inode_cachep);
382 fail_nomem:
383 	mutex_unlock(&vboxsf_setup_mutex);
384 	return err;
385 }
386 
387 static int vboxsf_parse_monolithic(struct fs_context *fc, void *data)
388 {
389 	unsigned char *options = data;
390 
391 	if (options && options[0] == VBSF_MOUNT_SIGNATURE_BYTE_0 &&
392 		       options[1] == VBSF_MOUNT_SIGNATURE_BYTE_1 &&
393 		       options[2] == VBSF_MOUNT_SIGNATURE_BYTE_2 &&
394 		       options[3] == VBSF_MOUNT_SIGNATURE_BYTE_3) {
395 		vbg_err("vboxsf: Old binary mount data not supported, remove obsolete mount.vboxsf and/or update your VBoxService.\n");
396 		return -EINVAL;
397 	}
398 
399 	return generic_parse_monolithic(fc, data);
400 }
401 
402 static int vboxsf_get_tree(struct fs_context *fc)
403 {
404 	int err;
405 
406 	err = vboxsf_setup();
407 	if (err)
408 		return err;
409 
410 	return get_tree_nodev(fc, vboxsf_fill_super);
411 }
412 
413 static int vboxsf_reconfigure(struct fs_context *fc)
414 {
415 	struct vboxsf_sbi *sbi = VBOXSF_SBI(fc->root->d_sb);
416 	struct vboxsf_fs_context *ctx = fc->fs_private;
417 	struct inode *iroot = fc->root->d_sb->s_root->d_inode;
418 
419 	/* Apply changed options to the root inode */
420 	sbi->o = ctx->o;
421 	vboxsf_init_inode(sbi, iroot, &sbi->root_info, true);
422 
423 	return 0;
424 }
425 
426 static void vboxsf_free_fc(struct fs_context *fc)
427 {
428 	struct vboxsf_fs_context *ctx = fc->fs_private;
429 
430 	kfree(ctx->nls_name);
431 	kfree(ctx);
432 }
433 
434 static const struct fs_context_operations vboxsf_context_ops = {
435 	.free			= vboxsf_free_fc,
436 	.parse_param		= vboxsf_parse_param,
437 	.parse_monolithic	= vboxsf_parse_monolithic,
438 	.get_tree		= vboxsf_get_tree,
439 	.reconfigure		= vboxsf_reconfigure,
440 };
441 
442 static int vboxsf_init_fs_context(struct fs_context *fc)
443 {
444 	struct vboxsf_fs_context *ctx;
445 
446 	ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
447 	if (!ctx)
448 		return -ENOMEM;
449 
450 	current_uid_gid(&ctx->o.uid, &ctx->o.gid);
451 
452 	fc->fs_private = ctx;
453 	fc->ops = &vboxsf_context_ops;
454 	return 0;
455 }
456 
457 static struct file_system_type vboxsf_fs_type = {
458 	.owner			= THIS_MODULE,
459 	.name			= "vboxsf",
460 	.init_fs_context	= vboxsf_init_fs_context,
461 	.kill_sb		= kill_anon_super
462 };
463 
464 /* Module initialization/finalization handlers */
465 static int __init vboxsf_init(void)
466 {
467 	return register_filesystem(&vboxsf_fs_type);
468 }
469 
470 static void __exit vboxsf_fini(void)
471 {
472 	unregister_filesystem(&vboxsf_fs_type);
473 
474 	mutex_lock(&vboxsf_setup_mutex);
475 	if (vboxsf_setup_done) {
476 		vboxsf_disconnect();
477 		/*
478 		 * Make sure all delayed rcu free inodes are flushed
479 		 * before we destroy the cache.
480 		 */
481 		rcu_barrier();
482 		kmem_cache_destroy(vboxsf_inode_cachep);
483 	}
484 	mutex_unlock(&vboxsf_setup_mutex);
485 }
486 
487 module_init(vboxsf_init);
488 module_exit(vboxsf_fini);
489 
490 MODULE_DESCRIPTION("Oracle VM VirtualBox Module for Host File System Access");
491 MODULE_AUTHOR("Oracle Corporation");
492 MODULE_LICENSE("GPL v2");
493 MODULE_ALIAS_FS("vboxsf");
494