xref: /linux/fs/ceph/super.c (revision a1c3be890440a1769ed6f822376a3e3ab0d42994)
1 // SPDX-License-Identifier: GPL-2.0-only
2 
3 #include <linux/ceph/ceph_debug.h>
4 
5 #include <linux/backing-dev.h>
6 #include <linux/ctype.h>
7 #include <linux/fs.h>
8 #include <linux/inet.h>
9 #include <linux/in6.h>
10 #include <linux/module.h>
11 #include <linux/mount.h>
12 #include <linux/fs_context.h>
13 #include <linux/fs_parser.h>
14 #include <linux/sched.h>
15 #include <linux/seq_file.h>
16 #include <linux/slab.h>
17 #include <linux/statfs.h>
18 #include <linux/string.h>
19 
20 #include "super.h"
21 #include "mds_client.h"
22 #include "cache.h"
23 
24 #include <linux/ceph/ceph_features.h>
25 #include <linux/ceph/decode.h>
26 #include <linux/ceph/mon_client.h>
27 #include <linux/ceph/auth.h>
28 #include <linux/ceph/debugfs.h>
29 
30 static DEFINE_SPINLOCK(ceph_fsc_lock);
31 static LIST_HEAD(ceph_fsc_list);
32 
33 /*
34  * Ceph superblock operations
35  *
36  * Handle the basics of mounting, unmounting.
37  */
38 
39 /*
40  * super ops
41  */
42 static void ceph_put_super(struct super_block *s)
43 {
44 	struct ceph_fs_client *fsc = ceph_sb_to_client(s);
45 
46 	dout("put_super\n");
47 	ceph_mdsc_close_sessions(fsc->mdsc);
48 }
49 
50 static int ceph_statfs(struct dentry *dentry, struct kstatfs *buf)
51 {
52 	struct ceph_fs_client *fsc = ceph_inode_to_client(d_inode(dentry));
53 	struct ceph_mon_client *monc = &fsc->client->monc;
54 	struct ceph_statfs st;
55 	u64 fsid;
56 	int err;
57 	u64 data_pool;
58 
59 	if (fsc->mdsc->mdsmap->m_num_data_pg_pools == 1) {
60 		data_pool = fsc->mdsc->mdsmap->m_data_pg_pools[0];
61 	} else {
62 		data_pool = CEPH_NOPOOL;
63 	}
64 
65 	dout("statfs\n");
66 	err = ceph_monc_do_statfs(monc, data_pool, &st);
67 	if (err < 0)
68 		return err;
69 
70 	/* fill in kstatfs */
71 	buf->f_type = CEPH_SUPER_MAGIC;  /* ?? */
72 
73 	/*
74 	 * express utilization in terms of large blocks to avoid
75 	 * overflow on 32-bit machines.
76 	 *
77 	 * NOTE: for the time being, we make bsize == frsize to humor
78 	 * not-yet-ancient versions of glibc that are broken.
79 	 * Someday, we will probably want to report a real block
80 	 * size...  whatever that may mean for a network file system!
81 	 */
82 	buf->f_bsize = 1 << CEPH_BLOCK_SHIFT;
83 	buf->f_frsize = 1 << CEPH_BLOCK_SHIFT;
84 
85 	/*
86 	 * By default use root quota for stats; fallback to overall filesystem
87 	 * usage if using 'noquotadf' mount option or if the root dir doesn't
88 	 * have max_bytes quota set.
89 	 */
90 	if (ceph_test_mount_opt(fsc, NOQUOTADF) ||
91 	    !ceph_quota_update_statfs(fsc, buf)) {
92 		buf->f_blocks = le64_to_cpu(st.kb) >> (CEPH_BLOCK_SHIFT-10);
93 		buf->f_bfree = le64_to_cpu(st.kb_avail) >> (CEPH_BLOCK_SHIFT-10);
94 		buf->f_bavail = le64_to_cpu(st.kb_avail) >> (CEPH_BLOCK_SHIFT-10);
95 	}
96 
97 	buf->f_files = le64_to_cpu(st.num_objects);
98 	buf->f_ffree = -1;
99 	buf->f_namelen = NAME_MAX;
100 
101 	/* Must convert the fsid, for consistent values across arches */
102 	mutex_lock(&monc->mutex);
103 	fsid = le64_to_cpu(*(__le64 *)(&monc->monmap->fsid)) ^
104 	       le64_to_cpu(*((__le64 *)&monc->monmap->fsid + 1));
105 	mutex_unlock(&monc->mutex);
106 
107 	buf->f_fsid = u64_to_fsid(fsid);
108 
109 	return 0;
110 }
111 
112 static int ceph_sync_fs(struct super_block *sb, int wait)
113 {
114 	struct ceph_fs_client *fsc = ceph_sb_to_client(sb);
115 
116 	if (!wait) {
117 		dout("sync_fs (non-blocking)\n");
118 		ceph_flush_dirty_caps(fsc->mdsc);
119 		dout("sync_fs (non-blocking) done\n");
120 		return 0;
121 	}
122 
123 	dout("sync_fs (blocking)\n");
124 	ceph_osdc_sync(&fsc->client->osdc);
125 	ceph_mdsc_sync(fsc->mdsc);
126 	dout("sync_fs (blocking) done\n");
127 	return 0;
128 }
129 
130 /*
131  * mount options
132  */
133 enum {
134 	Opt_wsize,
135 	Opt_rsize,
136 	Opt_rasize,
137 	Opt_caps_wanted_delay_min,
138 	Opt_caps_wanted_delay_max,
139 	Opt_caps_max,
140 	Opt_readdir_max_entries,
141 	Opt_readdir_max_bytes,
142 	Opt_congestion_kb,
143 	/* int args above */
144 	Opt_snapdirname,
145 	Opt_mds_namespace,
146 	Opt_recover_session,
147 	Opt_source,
148 	/* string args above */
149 	Opt_dirstat,
150 	Opt_rbytes,
151 	Opt_asyncreaddir,
152 	Opt_dcache,
153 	Opt_ino32,
154 	Opt_fscache,
155 	Opt_poolperm,
156 	Opt_require_active_mds,
157 	Opt_acl,
158 	Opt_quotadf,
159 	Opt_copyfrom,
160 	Opt_wsync,
161 };
162 
163 enum ceph_recover_session_mode {
164 	ceph_recover_session_no,
165 	ceph_recover_session_clean
166 };
167 
168 static const struct constant_table ceph_param_recover[] = {
169 	{ "no",		ceph_recover_session_no },
170 	{ "clean",	ceph_recover_session_clean },
171 	{}
172 };
173 
174 static const struct fs_parameter_spec ceph_mount_parameters[] = {
175 	fsparam_flag_no ("acl",				Opt_acl),
176 	fsparam_flag_no ("asyncreaddir",		Opt_asyncreaddir),
177 	fsparam_s32	("caps_max",			Opt_caps_max),
178 	fsparam_u32	("caps_wanted_delay_max",	Opt_caps_wanted_delay_max),
179 	fsparam_u32	("caps_wanted_delay_min",	Opt_caps_wanted_delay_min),
180 	fsparam_u32	("write_congestion_kb",		Opt_congestion_kb),
181 	fsparam_flag_no ("copyfrom",			Opt_copyfrom),
182 	fsparam_flag_no ("dcache",			Opt_dcache),
183 	fsparam_flag_no ("dirstat",			Opt_dirstat),
184 	fsparam_flag_no	("fsc",				Opt_fscache), // fsc|nofsc
185 	fsparam_string	("fsc",				Opt_fscache), // fsc=...
186 	fsparam_flag_no ("ino32",			Opt_ino32),
187 	fsparam_string	("mds_namespace",		Opt_mds_namespace),
188 	fsparam_flag_no ("poolperm",			Opt_poolperm),
189 	fsparam_flag_no ("quotadf",			Opt_quotadf),
190 	fsparam_u32	("rasize",			Opt_rasize),
191 	fsparam_flag_no ("rbytes",			Opt_rbytes),
192 	fsparam_u32	("readdir_max_bytes",		Opt_readdir_max_bytes),
193 	fsparam_u32	("readdir_max_entries",		Opt_readdir_max_entries),
194 	fsparam_enum	("recover_session",		Opt_recover_session, ceph_param_recover),
195 	fsparam_flag_no ("require_active_mds",		Opt_require_active_mds),
196 	fsparam_u32	("rsize",			Opt_rsize),
197 	fsparam_string	("snapdirname",			Opt_snapdirname),
198 	fsparam_string	("source",			Opt_source),
199 	fsparam_u32	("wsize",			Opt_wsize),
200 	fsparam_flag_no	("wsync",			Opt_wsync),
201 	{}
202 };
203 
204 struct ceph_parse_opts_ctx {
205 	struct ceph_options		*copts;
206 	struct ceph_mount_options	*opts;
207 };
208 
209 /*
210  * Remove adjacent slashes and then the trailing slash, unless it is
211  * the only remaining character.
212  *
213  * E.g. "//dir1////dir2///" --> "/dir1/dir2", "///" --> "/".
214  */
215 static void canonicalize_path(char *path)
216 {
217 	int i, j = 0;
218 
219 	for (i = 0; path[i] != '\0'; i++) {
220 		if (path[i] != '/' || j < 1 || path[j - 1] != '/')
221 			path[j++] = path[i];
222 	}
223 
224 	if (j > 1 && path[j - 1] == '/')
225 		j--;
226 	path[j] = '\0';
227 }
228 
229 /*
230  * Parse the source parameter.  Distinguish the server list from the path.
231  *
232  * The source will look like:
233  *     <server_spec>[,<server_spec>...]:[<path>]
234  * where
235  *     <server_spec> is <ip>[:<port>]
236  *     <path> is optional, but if present must begin with '/'
237  */
238 static int ceph_parse_source(struct fs_parameter *param, struct fs_context *fc)
239 {
240 	struct ceph_parse_opts_ctx *pctx = fc->fs_private;
241 	struct ceph_mount_options *fsopt = pctx->opts;
242 	char *dev_name = param->string, *dev_name_end;
243 	int ret;
244 
245 	dout("%s '%s'\n", __func__, dev_name);
246 	if (!dev_name || !*dev_name)
247 		return invalfc(fc, "Empty source");
248 
249 	dev_name_end = strchr(dev_name, '/');
250 	if (dev_name_end) {
251 		/*
252 		 * The server_path will include the whole chars from userland
253 		 * including the leading '/'.
254 		 */
255 		kfree(fsopt->server_path);
256 		fsopt->server_path = kstrdup(dev_name_end, GFP_KERNEL);
257 		if (!fsopt->server_path)
258 			return -ENOMEM;
259 
260 		canonicalize_path(fsopt->server_path);
261 	} else {
262 		dev_name_end = dev_name + strlen(dev_name);
263 	}
264 
265 	dev_name_end--;		/* back up to ':' separator */
266 	if (dev_name_end < dev_name || *dev_name_end != ':')
267 		return invalfc(fc, "No path or : separator in source");
268 
269 	dout("device name '%.*s'\n", (int)(dev_name_end - dev_name), dev_name);
270 	if (fsopt->server_path)
271 		dout("server path '%s'\n", fsopt->server_path);
272 
273 	ret = ceph_parse_mon_ips(param->string, dev_name_end - dev_name,
274 				 pctx->copts, fc->log.log);
275 	if (ret)
276 		return ret;
277 
278 	fc->source = param->string;
279 	param->string = NULL;
280 	return 0;
281 }
282 
283 static int ceph_parse_mount_param(struct fs_context *fc,
284 				  struct fs_parameter *param)
285 {
286 	struct ceph_parse_opts_ctx *pctx = fc->fs_private;
287 	struct ceph_mount_options *fsopt = pctx->opts;
288 	struct fs_parse_result result;
289 	unsigned int mode;
290 	int token, ret;
291 
292 	ret = ceph_parse_param(param, pctx->copts, fc->log.log);
293 	if (ret != -ENOPARAM)
294 		return ret;
295 
296 	token = fs_parse(fc, ceph_mount_parameters, param, &result);
297 	dout("%s fs_parse '%s' token %d\n", __func__, param->key, token);
298 	if (token < 0)
299 		return token;
300 
301 	switch (token) {
302 	case Opt_snapdirname:
303 		kfree(fsopt->snapdir_name);
304 		fsopt->snapdir_name = param->string;
305 		param->string = NULL;
306 		break;
307 	case Opt_mds_namespace:
308 		kfree(fsopt->mds_namespace);
309 		fsopt->mds_namespace = param->string;
310 		param->string = NULL;
311 		break;
312 	case Opt_recover_session:
313 		mode = result.uint_32;
314 		if (mode == ceph_recover_session_no)
315 			fsopt->flags &= ~CEPH_MOUNT_OPT_CLEANRECOVER;
316 		else if (mode == ceph_recover_session_clean)
317 			fsopt->flags |= CEPH_MOUNT_OPT_CLEANRECOVER;
318 		else
319 			BUG();
320 		break;
321 	case Opt_source:
322 		if (fc->source)
323 			return invalfc(fc, "Multiple sources specified");
324 		return ceph_parse_source(param, fc);
325 	case Opt_wsize:
326 		if (result.uint_32 < PAGE_SIZE ||
327 		    result.uint_32 > CEPH_MAX_WRITE_SIZE)
328 			goto out_of_range;
329 		fsopt->wsize = ALIGN(result.uint_32, PAGE_SIZE);
330 		break;
331 	case Opt_rsize:
332 		if (result.uint_32 < PAGE_SIZE ||
333 		    result.uint_32 > CEPH_MAX_READ_SIZE)
334 			goto out_of_range;
335 		fsopt->rsize = ALIGN(result.uint_32, PAGE_SIZE);
336 		break;
337 	case Opt_rasize:
338 		fsopt->rasize = ALIGN(result.uint_32, PAGE_SIZE);
339 		break;
340 	case Opt_caps_wanted_delay_min:
341 		if (result.uint_32 < 1)
342 			goto out_of_range;
343 		fsopt->caps_wanted_delay_min = result.uint_32;
344 		break;
345 	case Opt_caps_wanted_delay_max:
346 		if (result.uint_32 < 1)
347 			goto out_of_range;
348 		fsopt->caps_wanted_delay_max = result.uint_32;
349 		break;
350 	case Opt_caps_max:
351 		if (result.int_32 < 0)
352 			goto out_of_range;
353 		fsopt->caps_max = result.int_32;
354 		break;
355 	case Opt_readdir_max_entries:
356 		if (result.uint_32 < 1)
357 			goto out_of_range;
358 		fsopt->max_readdir = result.uint_32;
359 		break;
360 	case Opt_readdir_max_bytes:
361 		if (result.uint_32 < PAGE_SIZE && result.uint_32 != 0)
362 			goto out_of_range;
363 		fsopt->max_readdir_bytes = result.uint_32;
364 		break;
365 	case Opt_congestion_kb:
366 		if (result.uint_32 < 1024) /* at least 1M */
367 			goto out_of_range;
368 		fsopt->congestion_kb = result.uint_32;
369 		break;
370 	case Opt_dirstat:
371 		if (!result.negated)
372 			fsopt->flags |= CEPH_MOUNT_OPT_DIRSTAT;
373 		else
374 			fsopt->flags &= ~CEPH_MOUNT_OPT_DIRSTAT;
375 		break;
376 	case Opt_rbytes:
377 		if (!result.negated)
378 			fsopt->flags |= CEPH_MOUNT_OPT_RBYTES;
379 		else
380 			fsopt->flags &= ~CEPH_MOUNT_OPT_RBYTES;
381 		break;
382 	case Opt_asyncreaddir:
383 		if (!result.negated)
384 			fsopt->flags &= ~CEPH_MOUNT_OPT_NOASYNCREADDIR;
385 		else
386 			fsopt->flags |= CEPH_MOUNT_OPT_NOASYNCREADDIR;
387 		break;
388 	case Opt_dcache:
389 		if (!result.negated)
390 			fsopt->flags |= CEPH_MOUNT_OPT_DCACHE;
391 		else
392 			fsopt->flags &= ~CEPH_MOUNT_OPT_DCACHE;
393 		break;
394 	case Opt_ino32:
395 		if (!result.negated)
396 			fsopt->flags |= CEPH_MOUNT_OPT_INO32;
397 		else
398 			fsopt->flags &= ~CEPH_MOUNT_OPT_INO32;
399 		break;
400 
401 	case Opt_fscache:
402 #ifdef CONFIG_CEPH_FSCACHE
403 		kfree(fsopt->fscache_uniq);
404 		fsopt->fscache_uniq = NULL;
405 		if (result.negated) {
406 			fsopt->flags &= ~CEPH_MOUNT_OPT_FSCACHE;
407 		} else {
408 			fsopt->flags |= CEPH_MOUNT_OPT_FSCACHE;
409 			fsopt->fscache_uniq = param->string;
410 			param->string = NULL;
411 		}
412 		break;
413 #else
414 		return invalfc(fc, "fscache support is disabled");
415 #endif
416 	case Opt_poolperm:
417 		if (!result.negated)
418 			fsopt->flags &= ~CEPH_MOUNT_OPT_NOPOOLPERM;
419 		else
420 			fsopt->flags |= CEPH_MOUNT_OPT_NOPOOLPERM;
421 		break;
422 	case Opt_require_active_mds:
423 		if (!result.negated)
424 			fsopt->flags &= ~CEPH_MOUNT_OPT_MOUNTWAIT;
425 		else
426 			fsopt->flags |= CEPH_MOUNT_OPT_MOUNTWAIT;
427 		break;
428 	case Opt_quotadf:
429 		if (!result.negated)
430 			fsopt->flags &= ~CEPH_MOUNT_OPT_NOQUOTADF;
431 		else
432 			fsopt->flags |= CEPH_MOUNT_OPT_NOQUOTADF;
433 		break;
434 	case Opt_copyfrom:
435 		if (!result.negated)
436 			fsopt->flags &= ~CEPH_MOUNT_OPT_NOCOPYFROM;
437 		else
438 			fsopt->flags |= CEPH_MOUNT_OPT_NOCOPYFROM;
439 		break;
440 	case Opt_acl:
441 		if (!result.negated) {
442 #ifdef CONFIG_CEPH_FS_POSIX_ACL
443 			fc->sb_flags |= SB_POSIXACL;
444 #else
445 			return invalfc(fc, "POSIX ACL support is disabled");
446 #endif
447 		} else {
448 			fc->sb_flags &= ~SB_POSIXACL;
449 		}
450 		break;
451 	case Opt_wsync:
452 		if (!result.negated)
453 			fsopt->flags &= ~CEPH_MOUNT_OPT_ASYNC_DIROPS;
454 		else
455 			fsopt->flags |= CEPH_MOUNT_OPT_ASYNC_DIROPS;
456 		break;
457 	default:
458 		BUG();
459 	}
460 	return 0;
461 
462 out_of_range:
463 	return invalfc(fc, "%s out of range", param->key);
464 }
465 
466 static void destroy_mount_options(struct ceph_mount_options *args)
467 {
468 	dout("destroy_mount_options %p\n", args);
469 	if (!args)
470 		return;
471 
472 	kfree(args->snapdir_name);
473 	kfree(args->mds_namespace);
474 	kfree(args->server_path);
475 	kfree(args->fscache_uniq);
476 	kfree(args);
477 }
478 
479 static int strcmp_null(const char *s1, const char *s2)
480 {
481 	if (!s1 && !s2)
482 		return 0;
483 	if (s1 && !s2)
484 		return -1;
485 	if (!s1 && s2)
486 		return 1;
487 	return strcmp(s1, s2);
488 }
489 
490 static int compare_mount_options(struct ceph_mount_options *new_fsopt,
491 				 struct ceph_options *new_opt,
492 				 struct ceph_fs_client *fsc)
493 {
494 	struct ceph_mount_options *fsopt1 = new_fsopt;
495 	struct ceph_mount_options *fsopt2 = fsc->mount_options;
496 	int ofs = offsetof(struct ceph_mount_options, snapdir_name);
497 	int ret;
498 
499 	ret = memcmp(fsopt1, fsopt2, ofs);
500 	if (ret)
501 		return ret;
502 
503 	ret = strcmp_null(fsopt1->snapdir_name, fsopt2->snapdir_name);
504 	if (ret)
505 		return ret;
506 
507 	ret = strcmp_null(fsopt1->mds_namespace, fsopt2->mds_namespace);
508 	if (ret)
509 		return ret;
510 
511 	ret = strcmp_null(fsopt1->server_path, fsopt2->server_path);
512 	if (ret)
513 		return ret;
514 
515 	ret = strcmp_null(fsopt1->fscache_uniq, fsopt2->fscache_uniq);
516 	if (ret)
517 		return ret;
518 
519 	return ceph_compare_options(new_opt, fsc->client);
520 }
521 
522 /**
523  * ceph_show_options - Show mount options in /proc/mounts
524  * @m: seq_file to write to
525  * @root: root of that (sub)tree
526  */
527 static int ceph_show_options(struct seq_file *m, struct dentry *root)
528 {
529 	struct ceph_fs_client *fsc = ceph_sb_to_client(root->d_sb);
530 	struct ceph_mount_options *fsopt = fsc->mount_options;
531 	size_t pos;
532 	int ret;
533 
534 	/* a comma between MNT/MS and client options */
535 	seq_putc(m, ',');
536 	pos = m->count;
537 
538 	ret = ceph_print_client_options(m, fsc->client, false);
539 	if (ret)
540 		return ret;
541 
542 	/* retract our comma if no client options */
543 	if (m->count == pos)
544 		m->count--;
545 
546 	if (fsopt->flags & CEPH_MOUNT_OPT_DIRSTAT)
547 		seq_puts(m, ",dirstat");
548 	if ((fsopt->flags & CEPH_MOUNT_OPT_RBYTES))
549 		seq_puts(m, ",rbytes");
550 	if (fsopt->flags & CEPH_MOUNT_OPT_NOASYNCREADDIR)
551 		seq_puts(m, ",noasyncreaddir");
552 	if ((fsopt->flags & CEPH_MOUNT_OPT_DCACHE) == 0)
553 		seq_puts(m, ",nodcache");
554 	if (fsopt->flags & CEPH_MOUNT_OPT_INO32)
555 		seq_puts(m, ",ino32");
556 	if (fsopt->flags & CEPH_MOUNT_OPT_FSCACHE) {
557 		seq_show_option(m, "fsc", fsopt->fscache_uniq);
558 	}
559 	if (fsopt->flags & CEPH_MOUNT_OPT_NOPOOLPERM)
560 		seq_puts(m, ",nopoolperm");
561 	if (fsopt->flags & CEPH_MOUNT_OPT_NOQUOTADF)
562 		seq_puts(m, ",noquotadf");
563 
564 #ifdef CONFIG_CEPH_FS_POSIX_ACL
565 	if (root->d_sb->s_flags & SB_POSIXACL)
566 		seq_puts(m, ",acl");
567 	else
568 		seq_puts(m, ",noacl");
569 #endif
570 
571 	if ((fsopt->flags & CEPH_MOUNT_OPT_NOCOPYFROM) == 0)
572 		seq_puts(m, ",copyfrom");
573 
574 	if (fsopt->mds_namespace)
575 		seq_show_option(m, "mds_namespace", fsopt->mds_namespace);
576 
577 	if (fsopt->flags & CEPH_MOUNT_OPT_CLEANRECOVER)
578 		seq_show_option(m, "recover_session", "clean");
579 
580 	if (fsopt->flags & CEPH_MOUNT_OPT_ASYNC_DIROPS)
581 		seq_puts(m, ",nowsync");
582 
583 	if (fsopt->wsize != CEPH_MAX_WRITE_SIZE)
584 		seq_printf(m, ",wsize=%u", fsopt->wsize);
585 	if (fsopt->rsize != CEPH_MAX_READ_SIZE)
586 		seq_printf(m, ",rsize=%u", fsopt->rsize);
587 	if (fsopt->rasize != CEPH_RASIZE_DEFAULT)
588 		seq_printf(m, ",rasize=%u", fsopt->rasize);
589 	if (fsopt->congestion_kb != default_congestion_kb())
590 		seq_printf(m, ",write_congestion_kb=%u", fsopt->congestion_kb);
591 	if (fsopt->caps_max)
592 		seq_printf(m, ",caps_max=%d", fsopt->caps_max);
593 	if (fsopt->caps_wanted_delay_min != CEPH_CAPS_WANTED_DELAY_MIN_DEFAULT)
594 		seq_printf(m, ",caps_wanted_delay_min=%u",
595 			 fsopt->caps_wanted_delay_min);
596 	if (fsopt->caps_wanted_delay_max != CEPH_CAPS_WANTED_DELAY_MAX_DEFAULT)
597 		seq_printf(m, ",caps_wanted_delay_max=%u",
598 			   fsopt->caps_wanted_delay_max);
599 	if (fsopt->max_readdir != CEPH_MAX_READDIR_DEFAULT)
600 		seq_printf(m, ",readdir_max_entries=%u", fsopt->max_readdir);
601 	if (fsopt->max_readdir_bytes != CEPH_MAX_READDIR_BYTES_DEFAULT)
602 		seq_printf(m, ",readdir_max_bytes=%u", fsopt->max_readdir_bytes);
603 	if (strcmp(fsopt->snapdir_name, CEPH_SNAPDIRNAME_DEFAULT))
604 		seq_show_option(m, "snapdirname", fsopt->snapdir_name);
605 
606 	return 0;
607 }
608 
609 /*
610  * handle any mon messages the standard library doesn't understand.
611  * return error if we don't either.
612  */
613 static int extra_mon_dispatch(struct ceph_client *client, struct ceph_msg *msg)
614 {
615 	struct ceph_fs_client *fsc = client->private;
616 	int type = le16_to_cpu(msg->hdr.type);
617 
618 	switch (type) {
619 	case CEPH_MSG_MDS_MAP:
620 		ceph_mdsc_handle_mdsmap(fsc->mdsc, msg);
621 		return 0;
622 	case CEPH_MSG_FS_MAP_USER:
623 		ceph_mdsc_handle_fsmap(fsc->mdsc, msg);
624 		return 0;
625 	default:
626 		return -1;
627 	}
628 }
629 
630 /*
631  * create a new fs client
632  *
633  * Success or not, this function consumes @fsopt and @opt.
634  */
635 static struct ceph_fs_client *create_fs_client(struct ceph_mount_options *fsopt,
636 					struct ceph_options *opt)
637 {
638 	struct ceph_fs_client *fsc;
639 	int err;
640 
641 	fsc = kzalloc(sizeof(*fsc), GFP_KERNEL);
642 	if (!fsc) {
643 		err = -ENOMEM;
644 		goto fail;
645 	}
646 
647 	fsc->client = ceph_create_client(opt, fsc);
648 	if (IS_ERR(fsc->client)) {
649 		err = PTR_ERR(fsc->client);
650 		goto fail;
651 	}
652 	opt = NULL; /* fsc->client now owns this */
653 
654 	fsc->client->extra_mon_dispatch = extra_mon_dispatch;
655 	ceph_set_opt(fsc->client, ABORT_ON_FULL);
656 
657 	if (!fsopt->mds_namespace) {
658 		ceph_monc_want_map(&fsc->client->monc, CEPH_SUB_MDSMAP,
659 				   0, true);
660 	} else {
661 		ceph_monc_want_map(&fsc->client->monc, CEPH_SUB_FSMAP,
662 				   0, false);
663 	}
664 
665 	fsc->mount_options = fsopt;
666 
667 	fsc->sb = NULL;
668 	fsc->mount_state = CEPH_MOUNT_MOUNTING;
669 	fsc->filp_gen = 1;
670 	fsc->have_copy_from2 = true;
671 
672 	atomic_long_set(&fsc->writeback_count, 0);
673 
674 	err = -ENOMEM;
675 	/*
676 	 * The number of concurrent works can be high but they don't need
677 	 * to be processed in parallel, limit concurrency.
678 	 */
679 	fsc->inode_wq = alloc_workqueue("ceph-inode", WQ_UNBOUND, 0);
680 	if (!fsc->inode_wq)
681 		goto fail_client;
682 	fsc->cap_wq = alloc_workqueue("ceph-cap", 0, 1);
683 	if (!fsc->cap_wq)
684 		goto fail_inode_wq;
685 
686 	spin_lock(&ceph_fsc_lock);
687 	list_add_tail(&fsc->metric_wakeup, &ceph_fsc_list);
688 	spin_unlock(&ceph_fsc_lock);
689 
690 	return fsc;
691 
692 fail_inode_wq:
693 	destroy_workqueue(fsc->inode_wq);
694 fail_client:
695 	ceph_destroy_client(fsc->client);
696 fail:
697 	kfree(fsc);
698 	if (opt)
699 		ceph_destroy_options(opt);
700 	destroy_mount_options(fsopt);
701 	return ERR_PTR(err);
702 }
703 
704 static void flush_fs_workqueues(struct ceph_fs_client *fsc)
705 {
706 	flush_workqueue(fsc->inode_wq);
707 	flush_workqueue(fsc->cap_wq);
708 }
709 
710 static void destroy_fs_client(struct ceph_fs_client *fsc)
711 {
712 	dout("destroy_fs_client %p\n", fsc);
713 
714 	spin_lock(&ceph_fsc_lock);
715 	list_del(&fsc->metric_wakeup);
716 	spin_unlock(&ceph_fsc_lock);
717 
718 	ceph_mdsc_destroy(fsc);
719 	destroy_workqueue(fsc->inode_wq);
720 	destroy_workqueue(fsc->cap_wq);
721 
722 	destroy_mount_options(fsc->mount_options);
723 
724 	ceph_destroy_client(fsc->client);
725 
726 	kfree(fsc);
727 	dout("destroy_fs_client %p done\n", fsc);
728 }
729 
730 /*
731  * caches
732  */
733 struct kmem_cache *ceph_inode_cachep;
734 struct kmem_cache *ceph_cap_cachep;
735 struct kmem_cache *ceph_cap_flush_cachep;
736 struct kmem_cache *ceph_dentry_cachep;
737 struct kmem_cache *ceph_file_cachep;
738 struct kmem_cache *ceph_dir_file_cachep;
739 struct kmem_cache *ceph_mds_request_cachep;
740 mempool_t *ceph_wb_pagevec_pool;
741 
742 static void ceph_inode_init_once(void *foo)
743 {
744 	struct ceph_inode_info *ci = foo;
745 	inode_init_once(&ci->vfs_inode);
746 }
747 
748 static int __init init_caches(void)
749 {
750 	int error = -ENOMEM;
751 
752 	ceph_inode_cachep = kmem_cache_create("ceph_inode_info",
753 				      sizeof(struct ceph_inode_info),
754 				      __alignof__(struct ceph_inode_info),
755 				      SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD|
756 				      SLAB_ACCOUNT, ceph_inode_init_once);
757 	if (!ceph_inode_cachep)
758 		return -ENOMEM;
759 
760 	ceph_cap_cachep = KMEM_CACHE(ceph_cap, SLAB_MEM_SPREAD);
761 	if (!ceph_cap_cachep)
762 		goto bad_cap;
763 	ceph_cap_flush_cachep = KMEM_CACHE(ceph_cap_flush,
764 					   SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD);
765 	if (!ceph_cap_flush_cachep)
766 		goto bad_cap_flush;
767 
768 	ceph_dentry_cachep = KMEM_CACHE(ceph_dentry_info,
769 					SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD);
770 	if (!ceph_dentry_cachep)
771 		goto bad_dentry;
772 
773 	ceph_file_cachep = KMEM_CACHE(ceph_file_info, SLAB_MEM_SPREAD);
774 	if (!ceph_file_cachep)
775 		goto bad_file;
776 
777 	ceph_dir_file_cachep = KMEM_CACHE(ceph_dir_file_info, SLAB_MEM_SPREAD);
778 	if (!ceph_dir_file_cachep)
779 		goto bad_dir_file;
780 
781 	ceph_mds_request_cachep = KMEM_CACHE(ceph_mds_request, SLAB_MEM_SPREAD);
782 	if (!ceph_mds_request_cachep)
783 		goto bad_mds_req;
784 
785 	ceph_wb_pagevec_pool = mempool_create_kmalloc_pool(10, CEPH_MAX_WRITE_SIZE >> PAGE_SHIFT);
786 	if (!ceph_wb_pagevec_pool)
787 		goto bad_pagevec_pool;
788 
789 	error = ceph_fscache_register();
790 	if (error)
791 		goto bad_fscache;
792 
793 	return 0;
794 
795 bad_fscache:
796 	kmem_cache_destroy(ceph_mds_request_cachep);
797 bad_pagevec_pool:
798 	mempool_destroy(ceph_wb_pagevec_pool);
799 bad_mds_req:
800 	kmem_cache_destroy(ceph_dir_file_cachep);
801 bad_dir_file:
802 	kmem_cache_destroy(ceph_file_cachep);
803 bad_file:
804 	kmem_cache_destroy(ceph_dentry_cachep);
805 bad_dentry:
806 	kmem_cache_destroy(ceph_cap_flush_cachep);
807 bad_cap_flush:
808 	kmem_cache_destroy(ceph_cap_cachep);
809 bad_cap:
810 	kmem_cache_destroy(ceph_inode_cachep);
811 	return error;
812 }
813 
814 static void destroy_caches(void)
815 {
816 	/*
817 	 * Make sure all delayed rcu free inodes are flushed before we
818 	 * destroy cache.
819 	 */
820 	rcu_barrier();
821 
822 	kmem_cache_destroy(ceph_inode_cachep);
823 	kmem_cache_destroy(ceph_cap_cachep);
824 	kmem_cache_destroy(ceph_cap_flush_cachep);
825 	kmem_cache_destroy(ceph_dentry_cachep);
826 	kmem_cache_destroy(ceph_file_cachep);
827 	kmem_cache_destroy(ceph_dir_file_cachep);
828 	kmem_cache_destroy(ceph_mds_request_cachep);
829 	mempool_destroy(ceph_wb_pagevec_pool);
830 
831 	ceph_fscache_unregister();
832 }
833 
834 static void __ceph_umount_begin(struct ceph_fs_client *fsc)
835 {
836 	ceph_osdc_abort_requests(&fsc->client->osdc, -EIO);
837 	ceph_mdsc_force_umount(fsc->mdsc);
838 	fsc->filp_gen++; // invalidate open files
839 }
840 
841 /*
842  * ceph_umount_begin - initiate forced umount.  Tear down the
843  * mount, skipping steps that may hang while waiting for server(s).
844  */
845 static void ceph_umount_begin(struct super_block *sb)
846 {
847 	struct ceph_fs_client *fsc = ceph_sb_to_client(sb);
848 
849 	dout("ceph_umount_begin - starting forced umount\n");
850 	if (!fsc)
851 		return;
852 	fsc->mount_state = CEPH_MOUNT_SHUTDOWN;
853 	__ceph_umount_begin(fsc);
854 }
855 
856 static const struct super_operations ceph_super_ops = {
857 	.alloc_inode	= ceph_alloc_inode,
858 	.free_inode	= ceph_free_inode,
859 	.write_inode    = ceph_write_inode,
860 	.drop_inode	= generic_delete_inode,
861 	.evict_inode	= ceph_evict_inode,
862 	.sync_fs        = ceph_sync_fs,
863 	.put_super	= ceph_put_super,
864 	.show_options   = ceph_show_options,
865 	.statfs		= ceph_statfs,
866 	.umount_begin   = ceph_umount_begin,
867 };
868 
869 /*
870  * Bootstrap mount by opening the root directory.  Note the mount
871  * @started time from caller, and time out if this takes too long.
872  */
873 static struct dentry *open_root_dentry(struct ceph_fs_client *fsc,
874 				       const char *path,
875 				       unsigned long started)
876 {
877 	struct ceph_mds_client *mdsc = fsc->mdsc;
878 	struct ceph_mds_request *req = NULL;
879 	int err;
880 	struct dentry *root;
881 
882 	/* open dir */
883 	dout("open_root_inode opening '%s'\n", path);
884 	req = ceph_mdsc_create_request(mdsc, CEPH_MDS_OP_GETATTR, USE_ANY_MDS);
885 	if (IS_ERR(req))
886 		return ERR_CAST(req);
887 	req->r_path1 = kstrdup(path, GFP_NOFS);
888 	if (!req->r_path1) {
889 		root = ERR_PTR(-ENOMEM);
890 		goto out;
891 	}
892 
893 	req->r_ino1.ino = CEPH_INO_ROOT;
894 	req->r_ino1.snap = CEPH_NOSNAP;
895 	req->r_started = started;
896 	req->r_timeout = fsc->client->options->mount_timeout;
897 	req->r_args.getattr.mask = cpu_to_le32(CEPH_STAT_CAP_INODE);
898 	req->r_num_caps = 2;
899 	err = ceph_mdsc_do_request(mdsc, NULL, req);
900 	if (err == 0) {
901 		struct inode *inode = req->r_target_inode;
902 		req->r_target_inode = NULL;
903 		dout("open_root_inode success\n");
904 		root = d_make_root(inode);
905 		if (!root) {
906 			root = ERR_PTR(-ENOMEM);
907 			goto out;
908 		}
909 		dout("open_root_inode success, root dentry is %p\n", root);
910 	} else {
911 		root = ERR_PTR(err);
912 	}
913 out:
914 	ceph_mdsc_put_request(req);
915 	return root;
916 }
917 
918 /*
919  * mount: join the ceph cluster, and open root directory.
920  */
921 static struct dentry *ceph_real_mount(struct ceph_fs_client *fsc,
922 				      struct fs_context *fc)
923 {
924 	int err;
925 	unsigned long started = jiffies;  /* note the start time */
926 	struct dentry *root;
927 
928 	dout("mount start %p\n", fsc);
929 	mutex_lock(&fsc->client->mount_mutex);
930 
931 	if (!fsc->sb->s_root) {
932 		const char *path = fsc->mount_options->server_path ?
933 				     fsc->mount_options->server_path + 1 : "";
934 
935 		err = __ceph_open_session(fsc->client, started);
936 		if (err < 0)
937 			goto out;
938 
939 		/* setup fscache */
940 		if (fsc->mount_options->flags & CEPH_MOUNT_OPT_FSCACHE) {
941 			err = ceph_fscache_register_fs(fsc, fc);
942 			if (err < 0)
943 				goto out;
944 		}
945 
946 		dout("mount opening path '%s'\n", path);
947 
948 		ceph_fs_debugfs_init(fsc);
949 
950 		root = open_root_dentry(fsc, path, started);
951 		if (IS_ERR(root)) {
952 			err = PTR_ERR(root);
953 			goto out;
954 		}
955 		fsc->sb->s_root = dget(root);
956 	} else {
957 		root = dget(fsc->sb->s_root);
958 	}
959 
960 	fsc->mount_state = CEPH_MOUNT_MOUNTED;
961 	dout("mount success\n");
962 	mutex_unlock(&fsc->client->mount_mutex);
963 	return root;
964 
965 out:
966 	mutex_unlock(&fsc->client->mount_mutex);
967 	return ERR_PTR(err);
968 }
969 
970 static int ceph_set_super(struct super_block *s, struct fs_context *fc)
971 {
972 	struct ceph_fs_client *fsc = s->s_fs_info;
973 	int ret;
974 
975 	dout("set_super %p\n", s);
976 
977 	s->s_maxbytes = MAX_LFS_FILESIZE;
978 
979 	s->s_xattr = ceph_xattr_handlers;
980 	fsc->sb = s;
981 	fsc->max_file_size = 1ULL << 40; /* temp value until we get mdsmap */
982 
983 	s->s_op = &ceph_super_ops;
984 	s->s_d_op = &ceph_dentry_ops;
985 	s->s_export_op = &ceph_export_ops;
986 
987 	s->s_time_gran = 1;
988 	s->s_time_min = 0;
989 	s->s_time_max = U32_MAX;
990 
991 	ret = set_anon_super_fc(s, fc);
992 	if (ret != 0)
993 		fsc->sb = NULL;
994 	return ret;
995 }
996 
997 /*
998  * share superblock if same fs AND options
999  */
1000 static int ceph_compare_super(struct super_block *sb, struct fs_context *fc)
1001 {
1002 	struct ceph_fs_client *new = fc->s_fs_info;
1003 	struct ceph_mount_options *fsopt = new->mount_options;
1004 	struct ceph_options *opt = new->client->options;
1005 	struct ceph_fs_client *other = ceph_sb_to_client(sb);
1006 
1007 	dout("ceph_compare_super %p\n", sb);
1008 
1009 	if (compare_mount_options(fsopt, opt, other)) {
1010 		dout("monitor(s)/mount options don't match\n");
1011 		return 0;
1012 	}
1013 	if ((opt->flags & CEPH_OPT_FSID) &&
1014 	    ceph_fsid_compare(&opt->fsid, &other->client->fsid)) {
1015 		dout("fsid doesn't match\n");
1016 		return 0;
1017 	}
1018 	if (fc->sb_flags != (sb->s_flags & ~SB_BORN)) {
1019 		dout("flags differ\n");
1020 		return 0;
1021 	}
1022 	return 1;
1023 }
1024 
1025 /*
1026  * construct our own bdi so we can control readahead, etc.
1027  */
1028 static atomic_long_t bdi_seq = ATOMIC_LONG_INIT(0);
1029 
1030 static int ceph_setup_bdi(struct super_block *sb, struct ceph_fs_client *fsc)
1031 {
1032 	int err;
1033 
1034 	err = super_setup_bdi_name(sb, "ceph-%ld",
1035 				   atomic_long_inc_return(&bdi_seq));
1036 	if (err)
1037 		return err;
1038 
1039 	/* set ra_pages based on rasize mount option? */
1040 	sb->s_bdi->ra_pages = fsc->mount_options->rasize >> PAGE_SHIFT;
1041 
1042 	/* set io_pages based on max osd read size */
1043 	sb->s_bdi->io_pages = fsc->mount_options->rsize >> PAGE_SHIFT;
1044 
1045 	return 0;
1046 }
1047 
1048 static int ceph_get_tree(struct fs_context *fc)
1049 {
1050 	struct ceph_parse_opts_ctx *pctx = fc->fs_private;
1051 	struct super_block *sb;
1052 	struct ceph_fs_client *fsc;
1053 	struct dentry *res;
1054 	int (*compare_super)(struct super_block *, struct fs_context *) =
1055 		ceph_compare_super;
1056 	int err;
1057 
1058 	dout("ceph_get_tree\n");
1059 
1060 	if (!fc->source)
1061 		return invalfc(fc, "No source");
1062 
1063 	/* create client (which we may/may not use) */
1064 	fsc = create_fs_client(pctx->opts, pctx->copts);
1065 	pctx->opts = NULL;
1066 	pctx->copts = NULL;
1067 	if (IS_ERR(fsc)) {
1068 		err = PTR_ERR(fsc);
1069 		goto out_final;
1070 	}
1071 
1072 	err = ceph_mdsc_init(fsc);
1073 	if (err < 0)
1074 		goto out;
1075 
1076 	if (ceph_test_opt(fsc->client, NOSHARE))
1077 		compare_super = NULL;
1078 
1079 	fc->s_fs_info = fsc;
1080 	sb = sget_fc(fc, compare_super, ceph_set_super);
1081 	fc->s_fs_info = NULL;
1082 	if (IS_ERR(sb)) {
1083 		err = PTR_ERR(sb);
1084 		goto out;
1085 	}
1086 
1087 	if (ceph_sb_to_client(sb) != fsc) {
1088 		destroy_fs_client(fsc);
1089 		fsc = ceph_sb_to_client(sb);
1090 		dout("get_sb got existing client %p\n", fsc);
1091 	} else {
1092 		dout("get_sb using new client %p\n", fsc);
1093 		err = ceph_setup_bdi(sb, fsc);
1094 		if (err < 0)
1095 			goto out_splat;
1096 	}
1097 
1098 	res = ceph_real_mount(fsc, fc);
1099 	if (IS_ERR(res)) {
1100 		err = PTR_ERR(res);
1101 		goto out_splat;
1102 	}
1103 	dout("root %p inode %p ino %llx.%llx\n", res,
1104 	     d_inode(res), ceph_vinop(d_inode(res)));
1105 	fc->root = fsc->sb->s_root;
1106 	return 0;
1107 
1108 out_splat:
1109 	if (!ceph_mdsmap_is_cluster_available(fsc->mdsc->mdsmap)) {
1110 		pr_info("No mds server is up or the cluster is laggy\n");
1111 		err = -EHOSTUNREACH;
1112 	}
1113 
1114 	ceph_mdsc_close_sessions(fsc->mdsc);
1115 	deactivate_locked_super(sb);
1116 	goto out_final;
1117 
1118 out:
1119 	destroy_fs_client(fsc);
1120 out_final:
1121 	dout("ceph_get_tree fail %d\n", err);
1122 	return err;
1123 }
1124 
1125 static void ceph_free_fc(struct fs_context *fc)
1126 {
1127 	struct ceph_parse_opts_ctx *pctx = fc->fs_private;
1128 
1129 	if (pctx) {
1130 		destroy_mount_options(pctx->opts);
1131 		ceph_destroy_options(pctx->copts);
1132 		kfree(pctx);
1133 	}
1134 }
1135 
1136 static int ceph_reconfigure_fc(struct fs_context *fc)
1137 {
1138 	struct ceph_parse_opts_ctx *pctx = fc->fs_private;
1139 	struct ceph_mount_options *fsopt = pctx->opts;
1140 	struct ceph_fs_client *fsc = ceph_sb_to_client(fc->root->d_sb);
1141 
1142 	if (fsopt->flags & CEPH_MOUNT_OPT_ASYNC_DIROPS)
1143 		ceph_set_mount_opt(fsc, ASYNC_DIROPS);
1144 	else
1145 		ceph_clear_mount_opt(fsc, ASYNC_DIROPS);
1146 
1147 	sync_filesystem(fc->root->d_sb);
1148 	return 0;
1149 }
1150 
1151 static const struct fs_context_operations ceph_context_ops = {
1152 	.free		= ceph_free_fc,
1153 	.parse_param	= ceph_parse_mount_param,
1154 	.get_tree	= ceph_get_tree,
1155 	.reconfigure	= ceph_reconfigure_fc,
1156 };
1157 
1158 /*
1159  * Set up the filesystem mount context.
1160  */
1161 static int ceph_init_fs_context(struct fs_context *fc)
1162 {
1163 	struct ceph_parse_opts_ctx *pctx;
1164 	struct ceph_mount_options *fsopt;
1165 
1166 	pctx = kzalloc(sizeof(*pctx), GFP_KERNEL);
1167 	if (!pctx)
1168 		return -ENOMEM;
1169 
1170 	pctx->copts = ceph_alloc_options();
1171 	if (!pctx->copts)
1172 		goto nomem;
1173 
1174 	pctx->opts = kzalloc(sizeof(*pctx->opts), GFP_KERNEL);
1175 	if (!pctx->opts)
1176 		goto nomem;
1177 
1178 	fsopt = pctx->opts;
1179 	fsopt->flags = CEPH_MOUNT_OPT_DEFAULT;
1180 
1181 	fsopt->wsize = CEPH_MAX_WRITE_SIZE;
1182 	fsopt->rsize = CEPH_MAX_READ_SIZE;
1183 	fsopt->rasize = CEPH_RASIZE_DEFAULT;
1184 	fsopt->snapdir_name = kstrdup(CEPH_SNAPDIRNAME_DEFAULT, GFP_KERNEL);
1185 	if (!fsopt->snapdir_name)
1186 		goto nomem;
1187 
1188 	fsopt->caps_wanted_delay_min = CEPH_CAPS_WANTED_DELAY_MIN_DEFAULT;
1189 	fsopt->caps_wanted_delay_max = CEPH_CAPS_WANTED_DELAY_MAX_DEFAULT;
1190 	fsopt->max_readdir = CEPH_MAX_READDIR_DEFAULT;
1191 	fsopt->max_readdir_bytes = CEPH_MAX_READDIR_BYTES_DEFAULT;
1192 	fsopt->congestion_kb = default_congestion_kb();
1193 
1194 #ifdef CONFIG_CEPH_FS_POSIX_ACL
1195 	fc->sb_flags |= SB_POSIXACL;
1196 #endif
1197 
1198 	fc->fs_private = pctx;
1199 	fc->ops = &ceph_context_ops;
1200 	return 0;
1201 
1202 nomem:
1203 	destroy_mount_options(pctx->opts);
1204 	ceph_destroy_options(pctx->copts);
1205 	kfree(pctx);
1206 	return -ENOMEM;
1207 }
1208 
1209 static void ceph_kill_sb(struct super_block *s)
1210 {
1211 	struct ceph_fs_client *fsc = ceph_sb_to_client(s);
1212 
1213 	dout("kill_sb %p\n", s);
1214 
1215 	ceph_mdsc_pre_umount(fsc->mdsc);
1216 	flush_fs_workqueues(fsc);
1217 
1218 	kill_anon_super(s);
1219 
1220 	fsc->client->extra_mon_dispatch = NULL;
1221 	ceph_fs_debugfs_cleanup(fsc);
1222 
1223 	ceph_fscache_unregister_fs(fsc);
1224 
1225 	destroy_fs_client(fsc);
1226 }
1227 
1228 static struct file_system_type ceph_fs_type = {
1229 	.owner		= THIS_MODULE,
1230 	.name		= "ceph",
1231 	.init_fs_context = ceph_init_fs_context,
1232 	.kill_sb	= ceph_kill_sb,
1233 	.fs_flags	= FS_RENAME_DOES_D_MOVE,
1234 };
1235 MODULE_ALIAS_FS("ceph");
1236 
1237 int ceph_force_reconnect(struct super_block *sb)
1238 {
1239 	struct ceph_fs_client *fsc = ceph_sb_to_client(sb);
1240 	int err = 0;
1241 
1242 	fsc->mount_state = CEPH_MOUNT_RECOVER;
1243 	__ceph_umount_begin(fsc);
1244 
1245 	/* Make sure all page caches get invalidated.
1246 	 * see remove_session_caps_cb() */
1247 	flush_workqueue(fsc->inode_wq);
1248 
1249 	/* In case that we were blocklisted. This also reset
1250 	 * all mon/osd connections */
1251 	ceph_reset_client_addr(fsc->client);
1252 
1253 	ceph_osdc_clear_abort_err(&fsc->client->osdc);
1254 
1255 	fsc->blocklisted = false;
1256 	fsc->mount_state = CEPH_MOUNT_MOUNTED;
1257 
1258 	if (sb->s_root) {
1259 		err = __ceph_do_getattr(d_inode(sb->s_root), NULL,
1260 					CEPH_STAT_CAP_INODE, true);
1261 	}
1262 	return err;
1263 }
1264 
1265 static int __init init_ceph(void)
1266 {
1267 	int ret = init_caches();
1268 	if (ret)
1269 		goto out;
1270 
1271 	ceph_flock_init();
1272 	ret = register_filesystem(&ceph_fs_type);
1273 	if (ret)
1274 		goto out_caches;
1275 
1276 	pr_info("loaded (mds proto %d)\n", CEPH_MDSC_PROTOCOL);
1277 
1278 	return 0;
1279 
1280 out_caches:
1281 	destroy_caches();
1282 out:
1283 	return ret;
1284 }
1285 
1286 static void __exit exit_ceph(void)
1287 {
1288 	dout("exit_ceph\n");
1289 	unregister_filesystem(&ceph_fs_type);
1290 	destroy_caches();
1291 }
1292 
1293 static int param_set_metrics(const char *val, const struct kernel_param *kp)
1294 {
1295 	struct ceph_fs_client *fsc;
1296 	int ret;
1297 
1298 	ret = param_set_bool(val, kp);
1299 	if (ret) {
1300 		pr_err("Failed to parse sending metrics switch value '%s'\n",
1301 		       val);
1302 		return ret;
1303 	} else if (!disable_send_metrics) {
1304 		// wake up all the mds clients
1305 		spin_lock(&ceph_fsc_lock);
1306 		list_for_each_entry(fsc, &ceph_fsc_list, metric_wakeup) {
1307 			metric_schedule_delayed(&fsc->mdsc->metric);
1308 		}
1309 		spin_unlock(&ceph_fsc_lock);
1310 	}
1311 
1312 	return 0;
1313 }
1314 
1315 static const struct kernel_param_ops param_ops_metrics = {
1316 	.set = param_set_metrics,
1317 	.get = param_get_bool,
1318 };
1319 
1320 bool disable_send_metrics = false;
1321 module_param_cb(disable_send_metrics, &param_ops_metrics, &disable_send_metrics, 0644);
1322 MODULE_PARM_DESC(disable_send_metrics, "Enable sending perf metrics to ceph cluster (default: on)");
1323 
1324 module_init(init_ceph);
1325 module_exit(exit_ceph);
1326 
1327 MODULE_AUTHOR("Sage Weil <sage@newdream.net>");
1328 MODULE_AUTHOR("Yehuda Sadeh <yehuda@hq.newdream.net>");
1329 MODULE_AUTHOR("Patience Warnick <patience@newdream.net>");
1330 MODULE_DESCRIPTION("Ceph filesystem for Linux");
1331 MODULE_LICENSE("GPL");
1332