xref: /linux/fs/vboxsf/super.c (revision a1ff5a7d78a036d6c2178ee5acd6ba4946243800)
1 // SPDX-License-Identifier: MIT
2 /*
3  * VirtualBox Guest Shared Folders support: Virtual File System.
4  *
5  * Module initialization/finalization
6  * File system registration/deregistration
7  * Superblock reading
8  * Few utility functions
9  *
10  * Copyright (C) 2006-2018 Oracle Corporation
11  */
12 
13 #include <linux/idr.h>
14 #include <linux/fs_parser.h>
15 #include <linux/magic.h>
16 #include <linux/module.h>
17 #include <linux/nls.h>
18 #include <linux/statfs.h>
19 #include <linux/vbox_utils.h>
20 #include "vfsmod.h"
21 
22 #define VBOXSF_SUPER_MAGIC 0x786f4256 /* 'VBox' little endian */
23 
24 static const unsigned char VBSF_MOUNT_SIGNATURE[4] = "\000\377\376\375";
25 
26 static int follow_symlinks;
27 module_param(follow_symlinks, int, 0444);
28 MODULE_PARM_DESC(follow_symlinks,
29 		 "Let host resolve symlinks rather than showing them");
30 
31 static DEFINE_IDA(vboxsf_bdi_ida);
32 static DEFINE_MUTEX(vboxsf_setup_mutex);
33 static bool vboxsf_setup_done;
34 static struct super_operations vboxsf_super_ops; /* forward declaration */
35 static struct kmem_cache *vboxsf_inode_cachep;
36 
37 static char * const vboxsf_default_nls = CONFIG_NLS_DEFAULT;
38 
39 enum  { opt_nls, opt_uid, opt_gid, opt_ttl, opt_dmode, opt_fmode,
40 	opt_dmask, opt_fmask };
41 
42 static const struct fs_parameter_spec vboxsf_fs_parameters[] = {
43 	fsparam_string	("nls",		opt_nls),
44 	fsparam_uid	("uid",		opt_uid),
45 	fsparam_gid	("gid",		opt_gid),
46 	fsparam_u32	("ttl",		opt_ttl),
47 	fsparam_u32oct	("dmode",	opt_dmode),
48 	fsparam_u32oct	("fmode",	opt_fmode),
49 	fsparam_u32oct	("dmask",	opt_dmask),
50 	fsparam_u32oct	("fmask",	opt_fmask),
51 	{}
52 };
53 
vboxsf_parse_param(struct fs_context * fc,struct fs_parameter * param)54 static int vboxsf_parse_param(struct fs_context *fc, struct fs_parameter *param)
55 {
56 	struct vboxsf_fs_context *ctx = fc->fs_private;
57 	struct fs_parse_result result;
58 	int opt;
59 
60 	opt = fs_parse(fc, vboxsf_fs_parameters, param, &result);
61 	if (opt < 0)
62 		return opt;
63 
64 	switch (opt) {
65 	case opt_nls:
66 		if (ctx->nls_name || fc->purpose != FS_CONTEXT_FOR_MOUNT) {
67 			vbg_err("vboxsf: Cannot reconfigure nls option\n");
68 			return -EINVAL;
69 		}
70 		ctx->nls_name = param->string;
71 		param->string = NULL;
72 		break;
73 	case opt_uid:
74 		ctx->o.uid = result.uid;
75 		break;
76 	case opt_gid:
77 		ctx->o.gid = result.gid;
78 		break;
79 	case opt_ttl:
80 		ctx->o.ttl = msecs_to_jiffies(result.uint_32);
81 		break;
82 	case opt_dmode:
83 		if (result.uint_32 & ~0777)
84 			return -EINVAL;
85 		ctx->o.dmode = result.uint_32;
86 		ctx->o.dmode_set = true;
87 		break;
88 	case opt_fmode:
89 		if (result.uint_32 & ~0777)
90 			return -EINVAL;
91 		ctx->o.fmode = result.uint_32;
92 		ctx->o.fmode_set = true;
93 		break;
94 	case opt_dmask:
95 		if (result.uint_32 & ~07777)
96 			return -EINVAL;
97 		ctx->o.dmask = result.uint_32;
98 		break;
99 	case opt_fmask:
100 		if (result.uint_32 & ~07777)
101 			return -EINVAL;
102 		ctx->o.fmask = result.uint_32;
103 		break;
104 	default:
105 		return -EINVAL;
106 	}
107 
108 	return 0;
109 }
110 
vboxsf_fill_super(struct super_block * sb,struct fs_context * fc)111 static int vboxsf_fill_super(struct super_block *sb, struct fs_context *fc)
112 {
113 	struct vboxsf_fs_context *ctx = fc->fs_private;
114 	struct shfl_string *folder_name, root_path;
115 	struct vboxsf_sbi *sbi;
116 	struct dentry *droot;
117 	struct inode *iroot;
118 	char *nls_name;
119 	size_t size;
120 	int err;
121 
122 	if (!fc->source)
123 		return -EINVAL;
124 
125 	sbi = kzalloc(sizeof(*sbi), GFP_KERNEL);
126 	if (!sbi)
127 		return -ENOMEM;
128 
129 	sbi->o = ctx->o;
130 	idr_init(&sbi->ino_idr);
131 	spin_lock_init(&sbi->ino_idr_lock);
132 	sbi->next_generation = 1;
133 	sbi->bdi_id = -1;
134 
135 	/* Load nls if not utf8 */
136 	nls_name = ctx->nls_name ? ctx->nls_name : vboxsf_default_nls;
137 	if (strcmp(nls_name, "utf8") != 0) {
138 		if (nls_name == vboxsf_default_nls)
139 			sbi->nls = load_nls_default();
140 		else
141 			sbi->nls = load_nls(nls_name);
142 
143 		if (!sbi->nls) {
144 			vbg_err("vboxsf: Count not load '%s' nls\n", nls_name);
145 			err = -EINVAL;
146 			goto fail_destroy_idr;
147 		}
148 	}
149 
150 	sbi->bdi_id = ida_alloc(&vboxsf_bdi_ida, GFP_KERNEL);
151 	if (sbi->bdi_id < 0) {
152 		err = sbi->bdi_id;
153 		goto fail_free;
154 	}
155 
156 	err = super_setup_bdi_name(sb, "vboxsf-%d", sbi->bdi_id);
157 	if (err)
158 		goto fail_free;
159 	sb->s_bdi->ra_pages = 0;
160 	sb->s_bdi->io_pages = 0;
161 
162 	/* Turn source into a shfl_string and map the folder */
163 	size = strlen(fc->source) + 1;
164 	folder_name = kmalloc(SHFLSTRING_HEADER_SIZE + size, GFP_KERNEL);
165 	if (!folder_name) {
166 		err = -ENOMEM;
167 		goto fail_free;
168 	}
169 	folder_name->size = size;
170 	folder_name->length = size - 1;
171 	strscpy(folder_name->string.utf8, fc->source, size);
172 	err = vboxsf_map_folder(folder_name, &sbi->root);
173 	kfree(folder_name);
174 	if (err) {
175 		vbg_err("vboxsf: Host rejected mount of '%s' with error %d\n",
176 			fc->source, err);
177 		goto fail_free;
178 	}
179 
180 	root_path.length = 1;
181 	root_path.size = 2;
182 	root_path.string.utf8[0] = '/';
183 	root_path.string.utf8[1] = 0;
184 	err = vboxsf_stat(sbi, &root_path, &sbi->root_info);
185 	if (err)
186 		goto fail_unmap;
187 
188 	sb->s_magic = VBOXSF_SUPER_MAGIC;
189 	sb->s_blocksize = 1024;
190 	sb->s_maxbytes = MAX_LFS_FILESIZE;
191 	sb->s_op = &vboxsf_super_ops;
192 	sb->s_d_op = &vboxsf_dentry_ops;
193 
194 	iroot = iget_locked(sb, 0);
195 	if (!iroot) {
196 		err = -ENOMEM;
197 		goto fail_unmap;
198 	}
199 	vboxsf_init_inode(sbi, iroot, &sbi->root_info, false);
200 	unlock_new_inode(iroot);
201 
202 	droot = d_make_root(iroot);
203 	if (!droot) {
204 		err = -ENOMEM;
205 		goto fail_unmap;
206 	}
207 
208 	sb->s_root = droot;
209 	sb->s_fs_info = sbi;
210 	return 0;
211 
212 fail_unmap:
213 	vboxsf_unmap_folder(sbi->root);
214 fail_free:
215 	if (sbi->bdi_id >= 0)
216 		ida_free(&vboxsf_bdi_ida, sbi->bdi_id);
217 	if (sbi->nls)
218 		unload_nls(sbi->nls);
219 fail_destroy_idr:
220 	idr_destroy(&sbi->ino_idr);
221 	kfree(sbi);
222 	return err;
223 }
224 
vboxsf_inode_init_once(void * data)225 static void vboxsf_inode_init_once(void *data)
226 {
227 	struct vboxsf_inode *sf_i = data;
228 
229 	mutex_init(&sf_i->handle_list_mutex);
230 	inode_init_once(&sf_i->vfs_inode);
231 }
232 
vboxsf_alloc_inode(struct super_block * sb)233 static struct inode *vboxsf_alloc_inode(struct super_block *sb)
234 {
235 	struct vboxsf_inode *sf_i;
236 
237 	sf_i = alloc_inode_sb(sb, vboxsf_inode_cachep, GFP_NOFS);
238 	if (!sf_i)
239 		return NULL;
240 
241 	sf_i->force_restat = 0;
242 	INIT_LIST_HEAD(&sf_i->handle_list);
243 
244 	return &sf_i->vfs_inode;
245 }
246 
vboxsf_free_inode(struct inode * inode)247 static void vboxsf_free_inode(struct inode *inode)
248 {
249 	struct vboxsf_sbi *sbi = VBOXSF_SBI(inode->i_sb);
250 	unsigned long flags;
251 
252 	spin_lock_irqsave(&sbi->ino_idr_lock, flags);
253 	idr_remove(&sbi->ino_idr, inode->i_ino);
254 	spin_unlock_irqrestore(&sbi->ino_idr_lock, flags);
255 	kmem_cache_free(vboxsf_inode_cachep, VBOXSF_I(inode));
256 }
257 
vboxsf_put_super(struct super_block * sb)258 static void vboxsf_put_super(struct super_block *sb)
259 {
260 	struct vboxsf_sbi *sbi = VBOXSF_SBI(sb);
261 
262 	vboxsf_unmap_folder(sbi->root);
263 	if (sbi->bdi_id >= 0)
264 		ida_free(&vboxsf_bdi_ida, sbi->bdi_id);
265 	if (sbi->nls)
266 		unload_nls(sbi->nls);
267 
268 	/*
269 	 * vboxsf_free_inode uses the idr, make sure all delayed rcu free
270 	 * inodes are flushed.
271 	 */
272 	rcu_barrier();
273 	idr_destroy(&sbi->ino_idr);
274 	kfree(sbi);
275 }
276 
vboxsf_statfs(struct dentry * dentry,struct kstatfs * stat)277 static int vboxsf_statfs(struct dentry *dentry, struct kstatfs *stat)
278 {
279 	struct super_block *sb = dentry->d_sb;
280 	struct shfl_volinfo shfl_volinfo;
281 	struct vboxsf_sbi *sbi;
282 	u32 buf_len;
283 	int err;
284 
285 	sbi = VBOXSF_SBI(sb);
286 	buf_len = sizeof(shfl_volinfo);
287 	err = vboxsf_fsinfo(sbi->root, 0, SHFL_INFO_GET | SHFL_INFO_VOLUME,
288 			    &buf_len, &shfl_volinfo);
289 	if (err)
290 		return err;
291 
292 	stat->f_type = VBOXSF_SUPER_MAGIC;
293 	stat->f_bsize = shfl_volinfo.bytes_per_allocation_unit;
294 
295 	do_div(shfl_volinfo.total_allocation_bytes,
296 	       shfl_volinfo.bytes_per_allocation_unit);
297 	stat->f_blocks = shfl_volinfo.total_allocation_bytes;
298 
299 	do_div(shfl_volinfo.available_allocation_bytes,
300 	       shfl_volinfo.bytes_per_allocation_unit);
301 	stat->f_bfree  = shfl_volinfo.available_allocation_bytes;
302 	stat->f_bavail = shfl_volinfo.available_allocation_bytes;
303 
304 	stat->f_files = 1000;
305 	/*
306 	 * Don't return 0 here since the guest may then think that it is not
307 	 * possible to create any more files.
308 	 */
309 	stat->f_ffree = 1000000;
310 	stat->f_fsid.val[0] = 0;
311 	stat->f_fsid.val[1] = 0;
312 	stat->f_namelen = 255;
313 	return 0;
314 }
315 
316 static struct super_operations vboxsf_super_ops = {
317 	.alloc_inode	= vboxsf_alloc_inode,
318 	.free_inode	= vboxsf_free_inode,
319 	.put_super	= vboxsf_put_super,
320 	.statfs		= vboxsf_statfs,
321 };
322 
vboxsf_setup(void)323 static int vboxsf_setup(void)
324 {
325 	int err;
326 
327 	mutex_lock(&vboxsf_setup_mutex);
328 
329 	if (vboxsf_setup_done)
330 		goto success;
331 
332 	vboxsf_inode_cachep =
333 		kmem_cache_create("vboxsf_inode_cache",
334 				  sizeof(struct vboxsf_inode), 0,
335 				  SLAB_RECLAIM_ACCOUNT | SLAB_ACCOUNT,
336 				  vboxsf_inode_init_once);
337 	if (!vboxsf_inode_cachep) {
338 		err = -ENOMEM;
339 		goto fail_nomem;
340 	}
341 
342 	err = vboxsf_connect();
343 	if (err) {
344 		vbg_err("vboxsf: err %d connecting to guest PCI-device\n", err);
345 		vbg_err("vboxsf: make sure you are inside a VirtualBox VM\n");
346 		vbg_err("vboxsf: and check dmesg for vboxguest errors\n");
347 		goto fail_free_cache;
348 	}
349 
350 	err = vboxsf_set_utf8();
351 	if (err) {
352 		vbg_err("vboxsf_setutf8 error %d\n", err);
353 		goto fail_disconnect;
354 	}
355 
356 	if (!follow_symlinks) {
357 		err = vboxsf_set_symlinks();
358 		if (err)
359 			vbg_warn("vboxsf: Unable to show symlinks: %d\n", err);
360 	}
361 
362 	vboxsf_setup_done = true;
363 success:
364 	mutex_unlock(&vboxsf_setup_mutex);
365 	return 0;
366 
367 fail_disconnect:
368 	vboxsf_disconnect();
369 fail_free_cache:
370 	kmem_cache_destroy(vboxsf_inode_cachep);
371 fail_nomem:
372 	mutex_unlock(&vboxsf_setup_mutex);
373 	return err;
374 }
375 
vboxsf_parse_monolithic(struct fs_context * fc,void * data)376 static int vboxsf_parse_monolithic(struct fs_context *fc, void *data)
377 {
378 	if (data && !memcmp(data, VBSF_MOUNT_SIGNATURE, 4)) {
379 		vbg_err("vboxsf: Old binary mount data not supported, remove obsolete mount.vboxsf and/or update your VBoxService.\n");
380 		return -EINVAL;
381 	}
382 
383 	return generic_parse_monolithic(fc, data);
384 }
385 
vboxsf_get_tree(struct fs_context * fc)386 static int vboxsf_get_tree(struct fs_context *fc)
387 {
388 	int err;
389 
390 	err = vboxsf_setup();
391 	if (err)
392 		return err;
393 
394 	return get_tree_nodev(fc, vboxsf_fill_super);
395 }
396 
vboxsf_reconfigure(struct fs_context * fc)397 static int vboxsf_reconfigure(struct fs_context *fc)
398 {
399 	struct vboxsf_sbi *sbi = VBOXSF_SBI(fc->root->d_sb);
400 	struct vboxsf_fs_context *ctx = fc->fs_private;
401 	struct inode *iroot = fc->root->d_sb->s_root->d_inode;
402 
403 	/* Apply changed options to the root inode */
404 	sbi->o = ctx->o;
405 	vboxsf_init_inode(sbi, iroot, &sbi->root_info, true);
406 
407 	return 0;
408 }
409 
vboxsf_free_fc(struct fs_context * fc)410 static void vboxsf_free_fc(struct fs_context *fc)
411 {
412 	struct vboxsf_fs_context *ctx = fc->fs_private;
413 
414 	kfree(ctx->nls_name);
415 	kfree(ctx);
416 }
417 
418 static const struct fs_context_operations vboxsf_context_ops = {
419 	.free			= vboxsf_free_fc,
420 	.parse_param		= vboxsf_parse_param,
421 	.parse_monolithic	= vboxsf_parse_monolithic,
422 	.get_tree		= vboxsf_get_tree,
423 	.reconfigure		= vboxsf_reconfigure,
424 };
425 
vboxsf_init_fs_context(struct fs_context * fc)426 static int vboxsf_init_fs_context(struct fs_context *fc)
427 {
428 	struct vboxsf_fs_context *ctx;
429 
430 	ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
431 	if (!ctx)
432 		return -ENOMEM;
433 
434 	current_uid_gid(&ctx->o.uid, &ctx->o.gid);
435 
436 	fc->fs_private = ctx;
437 	fc->ops = &vboxsf_context_ops;
438 	return 0;
439 }
440 
441 static struct file_system_type vboxsf_fs_type = {
442 	.owner			= THIS_MODULE,
443 	.name			= "vboxsf",
444 	.init_fs_context	= vboxsf_init_fs_context,
445 	.kill_sb		= kill_anon_super
446 };
447 
448 /* Module initialization/finalization handlers */
vboxsf_init(void)449 static int __init vboxsf_init(void)
450 {
451 	return register_filesystem(&vboxsf_fs_type);
452 }
453 
vboxsf_fini(void)454 static void __exit vboxsf_fini(void)
455 {
456 	unregister_filesystem(&vboxsf_fs_type);
457 
458 	mutex_lock(&vboxsf_setup_mutex);
459 	if (vboxsf_setup_done) {
460 		vboxsf_disconnect();
461 		/*
462 		 * Make sure all delayed rcu free inodes are flushed
463 		 * before we destroy the cache.
464 		 */
465 		rcu_barrier();
466 		kmem_cache_destroy(vboxsf_inode_cachep);
467 	}
468 	mutex_unlock(&vboxsf_setup_mutex);
469 }
470 
471 module_init(vboxsf_init);
472 module_exit(vboxsf_fini);
473 
474 MODULE_DESCRIPTION("Oracle VM VirtualBox Module for Host File System Access");
475 MODULE_AUTHOR("Oracle Corporation");
476 MODULE_LICENSE("GPL v2");
477 MODULE_ALIAS_FS("vboxsf");
478