xref: /linux/fs/ceph/mds_client.c (revision 7f71507851fc7764b36a3221839607d3a45c2025)
1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/ceph/ceph_debug.h>
3 
4 #include <linux/fs.h>
5 #include <linux/wait.h>
6 #include <linux/slab.h>
7 #include <linux/gfp.h>
8 #include <linux/sched.h>
9 #include <linux/debugfs.h>
10 #include <linux/seq_file.h>
11 #include <linux/ratelimit.h>
12 #include <linux/bits.h>
13 #include <linux/ktime.h>
14 #include <linux/bitmap.h>
15 #include <linux/mnt_idmapping.h>
16 
17 #include "super.h"
18 #include "mds_client.h"
19 #include "crypto.h"
20 
21 #include <linux/ceph/ceph_features.h>
22 #include <linux/ceph/messenger.h>
23 #include <linux/ceph/decode.h>
24 #include <linux/ceph/pagelist.h>
25 #include <linux/ceph/auth.h>
26 #include <linux/ceph/debugfs.h>
27 
28 #define RECONNECT_MAX_SIZE (INT_MAX - PAGE_SIZE)
29 
30 /*
31  * A cluster of MDS (metadata server) daemons is responsible for
32  * managing the file system namespace (the directory hierarchy and
33  * inodes) and for coordinating shared access to storage.  Metadata is
34  * partitioning hierarchically across a number of servers, and that
35  * partition varies over time as the cluster adjusts the distribution
36  * in order to balance load.
37  *
38  * The MDS client is primarily responsible to managing synchronous
39  * metadata requests for operations like open, unlink, and so forth.
40  * If there is a MDS failure, we find out about it when we (possibly
41  * request and) receive a new MDS map, and can resubmit affected
42  * requests.
43  *
44  * For the most part, though, we take advantage of a lossless
45  * communications channel to the MDS, and do not need to worry about
46  * timing out or resubmitting requests.
47  *
48  * We maintain a stateful "session" with each MDS we interact with.
49  * Within each session, we sent periodic heartbeat messages to ensure
50  * any capabilities or leases we have been issues remain valid.  If
51  * the session times out and goes stale, our leases and capabilities
52  * are no longer valid.
53  */
54 
55 struct ceph_reconnect_state {
56 	struct ceph_mds_session *session;
57 	int nr_caps, nr_realms;
58 	struct ceph_pagelist *pagelist;
59 	unsigned msg_version;
60 	bool allow_multi;
61 };
62 
63 static void __wake_requests(struct ceph_mds_client *mdsc,
64 			    struct list_head *head);
65 static void ceph_cap_release_work(struct work_struct *work);
66 static void ceph_cap_reclaim_work(struct work_struct *work);
67 
68 static const struct ceph_connection_operations mds_con_ops;
69 
70 
71 /*
72  * mds reply parsing
73  */
74 
75 static int parse_reply_info_quota(void **p, void *end,
76 				  struct ceph_mds_reply_info_in *info)
77 {
78 	u8 struct_v, struct_compat;
79 	u32 struct_len;
80 
81 	ceph_decode_8_safe(p, end, struct_v, bad);
82 	ceph_decode_8_safe(p, end, struct_compat, bad);
83 	/* struct_v is expected to be >= 1. we only
84 	 * understand encoding with struct_compat == 1. */
85 	if (!struct_v || struct_compat != 1)
86 		goto bad;
87 	ceph_decode_32_safe(p, end, struct_len, bad);
88 	ceph_decode_need(p, end, struct_len, bad);
89 	end = *p + struct_len;
90 	ceph_decode_64_safe(p, end, info->max_bytes, bad);
91 	ceph_decode_64_safe(p, end, info->max_files, bad);
92 	*p = end;
93 	return 0;
94 bad:
95 	return -EIO;
96 }
97 
98 /*
99  * parse individual inode info
100  */
101 static int parse_reply_info_in(void **p, void *end,
102 			       struct ceph_mds_reply_info_in *info,
103 			       u64 features)
104 {
105 	int err = 0;
106 	u8 struct_v = 0;
107 
108 	if (features == (u64)-1) {
109 		u32 struct_len;
110 		u8 struct_compat;
111 		ceph_decode_8_safe(p, end, struct_v, bad);
112 		ceph_decode_8_safe(p, end, struct_compat, bad);
113 		/* struct_v is expected to be >= 1. we only understand
114 		 * encoding with struct_compat == 1. */
115 		if (!struct_v || struct_compat != 1)
116 			goto bad;
117 		ceph_decode_32_safe(p, end, struct_len, bad);
118 		ceph_decode_need(p, end, struct_len, bad);
119 		end = *p + struct_len;
120 	}
121 
122 	ceph_decode_need(p, end, sizeof(struct ceph_mds_reply_inode), bad);
123 	info->in = *p;
124 	*p += sizeof(struct ceph_mds_reply_inode) +
125 		sizeof(*info->in->fragtree.splits) *
126 		le32_to_cpu(info->in->fragtree.nsplits);
127 
128 	ceph_decode_32_safe(p, end, info->symlink_len, bad);
129 	ceph_decode_need(p, end, info->symlink_len, bad);
130 	info->symlink = *p;
131 	*p += info->symlink_len;
132 
133 	ceph_decode_copy_safe(p, end, &info->dir_layout,
134 			      sizeof(info->dir_layout), bad);
135 	ceph_decode_32_safe(p, end, info->xattr_len, bad);
136 	ceph_decode_need(p, end, info->xattr_len, bad);
137 	info->xattr_data = *p;
138 	*p += info->xattr_len;
139 
140 	if (features == (u64)-1) {
141 		/* inline data */
142 		ceph_decode_64_safe(p, end, info->inline_version, bad);
143 		ceph_decode_32_safe(p, end, info->inline_len, bad);
144 		ceph_decode_need(p, end, info->inline_len, bad);
145 		info->inline_data = *p;
146 		*p += info->inline_len;
147 		/* quota */
148 		err = parse_reply_info_quota(p, end, info);
149 		if (err < 0)
150 			goto out_bad;
151 		/* pool namespace */
152 		ceph_decode_32_safe(p, end, info->pool_ns_len, bad);
153 		if (info->pool_ns_len > 0) {
154 			ceph_decode_need(p, end, info->pool_ns_len, bad);
155 			info->pool_ns_data = *p;
156 			*p += info->pool_ns_len;
157 		}
158 
159 		/* btime */
160 		ceph_decode_need(p, end, sizeof(info->btime), bad);
161 		ceph_decode_copy(p, &info->btime, sizeof(info->btime));
162 
163 		/* change attribute */
164 		ceph_decode_64_safe(p, end, info->change_attr, bad);
165 
166 		/* dir pin */
167 		if (struct_v >= 2) {
168 			ceph_decode_32_safe(p, end, info->dir_pin, bad);
169 		} else {
170 			info->dir_pin = -ENODATA;
171 		}
172 
173 		/* snapshot birth time, remains zero for v<=2 */
174 		if (struct_v >= 3) {
175 			ceph_decode_need(p, end, sizeof(info->snap_btime), bad);
176 			ceph_decode_copy(p, &info->snap_btime,
177 					 sizeof(info->snap_btime));
178 		} else {
179 			memset(&info->snap_btime, 0, sizeof(info->snap_btime));
180 		}
181 
182 		/* snapshot count, remains zero for v<=3 */
183 		if (struct_v >= 4) {
184 			ceph_decode_64_safe(p, end, info->rsnaps, bad);
185 		} else {
186 			info->rsnaps = 0;
187 		}
188 
189 		if (struct_v >= 5) {
190 			u32 alen;
191 
192 			ceph_decode_32_safe(p, end, alen, bad);
193 
194 			while (alen--) {
195 				u32 len;
196 
197 				/* key */
198 				ceph_decode_32_safe(p, end, len, bad);
199 				ceph_decode_skip_n(p, end, len, bad);
200 				/* value */
201 				ceph_decode_32_safe(p, end, len, bad);
202 				ceph_decode_skip_n(p, end, len, bad);
203 			}
204 		}
205 
206 		/* fscrypt flag -- ignore */
207 		if (struct_v >= 6)
208 			ceph_decode_skip_8(p, end, bad);
209 
210 		info->fscrypt_auth = NULL;
211 		info->fscrypt_auth_len = 0;
212 		info->fscrypt_file = NULL;
213 		info->fscrypt_file_len = 0;
214 		if (struct_v >= 7) {
215 			ceph_decode_32_safe(p, end, info->fscrypt_auth_len, bad);
216 			if (info->fscrypt_auth_len) {
217 				info->fscrypt_auth = kmalloc(info->fscrypt_auth_len,
218 							     GFP_KERNEL);
219 				if (!info->fscrypt_auth)
220 					return -ENOMEM;
221 				ceph_decode_copy_safe(p, end, info->fscrypt_auth,
222 						      info->fscrypt_auth_len, bad);
223 			}
224 			ceph_decode_32_safe(p, end, info->fscrypt_file_len, bad);
225 			if (info->fscrypt_file_len) {
226 				info->fscrypt_file = kmalloc(info->fscrypt_file_len,
227 							     GFP_KERNEL);
228 				if (!info->fscrypt_file)
229 					return -ENOMEM;
230 				ceph_decode_copy_safe(p, end, info->fscrypt_file,
231 						      info->fscrypt_file_len, bad);
232 			}
233 		}
234 		*p = end;
235 	} else {
236 		/* legacy (unversioned) struct */
237 		if (features & CEPH_FEATURE_MDS_INLINE_DATA) {
238 			ceph_decode_64_safe(p, end, info->inline_version, bad);
239 			ceph_decode_32_safe(p, end, info->inline_len, bad);
240 			ceph_decode_need(p, end, info->inline_len, bad);
241 			info->inline_data = *p;
242 			*p += info->inline_len;
243 		} else
244 			info->inline_version = CEPH_INLINE_NONE;
245 
246 		if (features & CEPH_FEATURE_MDS_QUOTA) {
247 			err = parse_reply_info_quota(p, end, info);
248 			if (err < 0)
249 				goto out_bad;
250 		} else {
251 			info->max_bytes = 0;
252 			info->max_files = 0;
253 		}
254 
255 		info->pool_ns_len = 0;
256 		info->pool_ns_data = NULL;
257 		if (features & CEPH_FEATURE_FS_FILE_LAYOUT_V2) {
258 			ceph_decode_32_safe(p, end, info->pool_ns_len, bad);
259 			if (info->pool_ns_len > 0) {
260 				ceph_decode_need(p, end, info->pool_ns_len, bad);
261 				info->pool_ns_data = *p;
262 				*p += info->pool_ns_len;
263 			}
264 		}
265 
266 		if (features & CEPH_FEATURE_FS_BTIME) {
267 			ceph_decode_need(p, end, sizeof(info->btime), bad);
268 			ceph_decode_copy(p, &info->btime, sizeof(info->btime));
269 			ceph_decode_64_safe(p, end, info->change_attr, bad);
270 		}
271 
272 		info->dir_pin = -ENODATA;
273 		/* info->snap_btime and info->rsnaps remain zero */
274 	}
275 	return 0;
276 bad:
277 	err = -EIO;
278 out_bad:
279 	return err;
280 }
281 
282 static int parse_reply_info_dir(void **p, void *end,
283 				struct ceph_mds_reply_dirfrag **dirfrag,
284 				u64 features)
285 {
286 	if (features == (u64)-1) {
287 		u8 struct_v, struct_compat;
288 		u32 struct_len;
289 		ceph_decode_8_safe(p, end, struct_v, bad);
290 		ceph_decode_8_safe(p, end, struct_compat, bad);
291 		/* struct_v is expected to be >= 1. we only understand
292 		 * encoding whose struct_compat == 1. */
293 		if (!struct_v || struct_compat != 1)
294 			goto bad;
295 		ceph_decode_32_safe(p, end, struct_len, bad);
296 		ceph_decode_need(p, end, struct_len, bad);
297 		end = *p + struct_len;
298 	}
299 
300 	ceph_decode_need(p, end, sizeof(**dirfrag), bad);
301 	*dirfrag = *p;
302 	*p += sizeof(**dirfrag) + sizeof(u32) * le32_to_cpu((*dirfrag)->ndist);
303 	if (unlikely(*p > end))
304 		goto bad;
305 	if (features == (u64)-1)
306 		*p = end;
307 	return 0;
308 bad:
309 	return -EIO;
310 }
311 
312 static int parse_reply_info_lease(void **p, void *end,
313 				  struct ceph_mds_reply_lease **lease,
314 				  u64 features, u32 *altname_len, u8 **altname)
315 {
316 	u8 struct_v;
317 	u32 struct_len;
318 	void *lend;
319 
320 	if (features == (u64)-1) {
321 		u8 struct_compat;
322 
323 		ceph_decode_8_safe(p, end, struct_v, bad);
324 		ceph_decode_8_safe(p, end, struct_compat, bad);
325 
326 		/* struct_v is expected to be >= 1. we only understand
327 		 * encoding whose struct_compat == 1. */
328 		if (!struct_v || struct_compat != 1)
329 			goto bad;
330 
331 		ceph_decode_32_safe(p, end, struct_len, bad);
332 	} else {
333 		struct_len = sizeof(**lease);
334 		*altname_len = 0;
335 		*altname = NULL;
336 	}
337 
338 	lend = *p + struct_len;
339 	ceph_decode_need(p, end, struct_len, bad);
340 	*lease = *p;
341 	*p += sizeof(**lease);
342 
343 	if (features == (u64)-1) {
344 		if (struct_v >= 2) {
345 			ceph_decode_32_safe(p, end, *altname_len, bad);
346 			ceph_decode_need(p, end, *altname_len, bad);
347 			*altname = *p;
348 			*p += *altname_len;
349 		} else {
350 			*altname = NULL;
351 			*altname_len = 0;
352 		}
353 	}
354 	*p = lend;
355 	return 0;
356 bad:
357 	return -EIO;
358 }
359 
360 /*
361  * parse a normal reply, which may contain a (dir+)dentry and/or a
362  * target inode.
363  */
364 static int parse_reply_info_trace(void **p, void *end,
365 				  struct ceph_mds_reply_info_parsed *info,
366 				  u64 features)
367 {
368 	int err;
369 
370 	if (info->head->is_dentry) {
371 		err = parse_reply_info_in(p, end, &info->diri, features);
372 		if (err < 0)
373 			goto out_bad;
374 
375 		err = parse_reply_info_dir(p, end, &info->dirfrag, features);
376 		if (err < 0)
377 			goto out_bad;
378 
379 		ceph_decode_32_safe(p, end, info->dname_len, bad);
380 		ceph_decode_need(p, end, info->dname_len, bad);
381 		info->dname = *p;
382 		*p += info->dname_len;
383 
384 		err = parse_reply_info_lease(p, end, &info->dlease, features,
385 					     &info->altname_len, &info->altname);
386 		if (err < 0)
387 			goto out_bad;
388 	}
389 
390 	if (info->head->is_target) {
391 		err = parse_reply_info_in(p, end, &info->targeti, features);
392 		if (err < 0)
393 			goto out_bad;
394 	}
395 
396 	if (unlikely(*p != end))
397 		goto bad;
398 	return 0;
399 
400 bad:
401 	err = -EIO;
402 out_bad:
403 	pr_err("problem parsing mds trace %d\n", err);
404 	return err;
405 }
406 
407 /*
408  * parse readdir results
409  */
410 static int parse_reply_info_readdir(void **p, void *end,
411 				    struct ceph_mds_request *req,
412 				    u64 features)
413 {
414 	struct ceph_mds_reply_info_parsed *info = &req->r_reply_info;
415 	struct ceph_client *cl = req->r_mdsc->fsc->client;
416 	u32 num, i = 0;
417 	int err;
418 
419 	err = parse_reply_info_dir(p, end, &info->dir_dir, features);
420 	if (err < 0)
421 		goto out_bad;
422 
423 	ceph_decode_need(p, end, sizeof(num) + 2, bad);
424 	num = ceph_decode_32(p);
425 	{
426 		u16 flags = ceph_decode_16(p);
427 		info->dir_end = !!(flags & CEPH_READDIR_FRAG_END);
428 		info->dir_complete = !!(flags & CEPH_READDIR_FRAG_COMPLETE);
429 		info->hash_order = !!(flags & CEPH_READDIR_HASH_ORDER);
430 		info->offset_hash = !!(flags & CEPH_READDIR_OFFSET_HASH);
431 	}
432 	if (num == 0)
433 		goto done;
434 
435 	BUG_ON(!info->dir_entries);
436 	if ((unsigned long)(info->dir_entries + num) >
437 	    (unsigned long)info->dir_entries + info->dir_buf_size) {
438 		pr_err_client(cl, "dir contents are larger than expected\n");
439 		WARN_ON(1);
440 		goto bad;
441 	}
442 
443 	info->dir_nr = num;
444 	while (num) {
445 		struct inode *inode = d_inode(req->r_dentry);
446 		struct ceph_inode_info *ci = ceph_inode(inode);
447 		struct ceph_mds_reply_dir_entry *rde = info->dir_entries + i;
448 		struct fscrypt_str tname = FSTR_INIT(NULL, 0);
449 		struct fscrypt_str oname = FSTR_INIT(NULL, 0);
450 		struct ceph_fname fname;
451 		u32 altname_len, _name_len;
452 		u8 *altname, *_name;
453 
454 		/* dentry */
455 		ceph_decode_32_safe(p, end, _name_len, bad);
456 		ceph_decode_need(p, end, _name_len, bad);
457 		_name = *p;
458 		*p += _name_len;
459 		doutc(cl, "parsed dir dname '%.*s'\n", _name_len, _name);
460 
461 		if (info->hash_order)
462 			rde->raw_hash = ceph_str_hash(ci->i_dir_layout.dl_dir_hash,
463 						      _name, _name_len);
464 
465 		/* dentry lease */
466 		err = parse_reply_info_lease(p, end, &rde->lease, features,
467 					     &altname_len, &altname);
468 		if (err)
469 			goto out_bad;
470 
471 		/*
472 		 * Try to dencrypt the dentry names and update them
473 		 * in the ceph_mds_reply_dir_entry struct.
474 		 */
475 		fname.dir = inode;
476 		fname.name = _name;
477 		fname.name_len = _name_len;
478 		fname.ctext = altname;
479 		fname.ctext_len = altname_len;
480 		/*
481 		 * The _name_len maybe larger than altname_len, such as
482 		 * when the human readable name length is in range of
483 		 * (CEPH_NOHASH_NAME_MAX, CEPH_NOHASH_NAME_MAX + SHA256_DIGEST_SIZE),
484 		 * then the copy in ceph_fname_to_usr will corrupt the
485 		 * data if there has no encryption key.
486 		 *
487 		 * Just set the no_copy flag and then if there has no
488 		 * encryption key the oname.name will be assigned to
489 		 * _name always.
490 		 */
491 		fname.no_copy = true;
492 		if (altname_len == 0) {
493 			/*
494 			 * Set tname to _name, and this will be used
495 			 * to do the base64_decode in-place. It's
496 			 * safe because the decoded string should
497 			 * always be shorter, which is 3/4 of origin
498 			 * string.
499 			 */
500 			tname.name = _name;
501 
502 			/*
503 			 * Set oname to _name too, and this will be
504 			 * used to do the dencryption in-place.
505 			 */
506 			oname.name = _name;
507 			oname.len = _name_len;
508 		} else {
509 			/*
510 			 * This will do the decryption only in-place
511 			 * from altname cryptext directly.
512 			 */
513 			oname.name = altname;
514 			oname.len = altname_len;
515 		}
516 		rde->is_nokey = false;
517 		err = ceph_fname_to_usr(&fname, &tname, &oname, &rde->is_nokey);
518 		if (err) {
519 			pr_err_client(cl, "unable to decode %.*s, got %d\n",
520 				      _name_len, _name, err);
521 			goto out_bad;
522 		}
523 		rde->name = oname.name;
524 		rde->name_len = oname.len;
525 
526 		/* inode */
527 		err = parse_reply_info_in(p, end, &rde->inode, features);
528 		if (err < 0)
529 			goto out_bad;
530 		/* ceph_readdir_prepopulate() will update it */
531 		rde->offset = 0;
532 		i++;
533 		num--;
534 	}
535 
536 done:
537 	/* Skip over any unrecognized fields */
538 	*p = end;
539 	return 0;
540 
541 bad:
542 	err = -EIO;
543 out_bad:
544 	pr_err_client(cl, "problem parsing dir contents %d\n", err);
545 	return err;
546 }
547 
548 /*
549  * parse fcntl F_GETLK results
550  */
551 static int parse_reply_info_filelock(void **p, void *end,
552 				     struct ceph_mds_reply_info_parsed *info,
553 				     u64 features)
554 {
555 	if (*p + sizeof(*info->filelock_reply) > end)
556 		goto bad;
557 
558 	info->filelock_reply = *p;
559 
560 	/* Skip over any unrecognized fields */
561 	*p = end;
562 	return 0;
563 bad:
564 	return -EIO;
565 }
566 
567 
568 #if BITS_PER_LONG == 64
569 
570 #define DELEGATED_INO_AVAILABLE		xa_mk_value(1)
571 
572 static int ceph_parse_deleg_inos(void **p, void *end,
573 				 struct ceph_mds_session *s)
574 {
575 	struct ceph_client *cl = s->s_mdsc->fsc->client;
576 	u32 sets;
577 
578 	ceph_decode_32_safe(p, end, sets, bad);
579 	doutc(cl, "got %u sets of delegated inodes\n", sets);
580 	while (sets--) {
581 		u64 start, len;
582 
583 		ceph_decode_64_safe(p, end, start, bad);
584 		ceph_decode_64_safe(p, end, len, bad);
585 
586 		/* Don't accept a delegation of system inodes */
587 		if (start < CEPH_INO_SYSTEM_BASE) {
588 			pr_warn_ratelimited_client(cl,
589 				"ignoring reserved inode range delegation (start=0x%llx len=0x%llx)\n",
590 				start, len);
591 			continue;
592 		}
593 		while (len--) {
594 			int err = xa_insert(&s->s_delegated_inos, start++,
595 					    DELEGATED_INO_AVAILABLE,
596 					    GFP_KERNEL);
597 			if (!err) {
598 				doutc(cl, "added delegated inode 0x%llx\n", start - 1);
599 			} else if (err == -EBUSY) {
600 				pr_warn_client(cl,
601 					"MDS delegated inode 0x%llx more than once.\n",
602 					start - 1);
603 			} else {
604 				return err;
605 			}
606 		}
607 	}
608 	return 0;
609 bad:
610 	return -EIO;
611 }
612 
613 u64 ceph_get_deleg_ino(struct ceph_mds_session *s)
614 {
615 	unsigned long ino;
616 	void *val;
617 
618 	xa_for_each(&s->s_delegated_inos, ino, val) {
619 		val = xa_erase(&s->s_delegated_inos, ino);
620 		if (val == DELEGATED_INO_AVAILABLE)
621 			return ino;
622 	}
623 	return 0;
624 }
625 
626 int ceph_restore_deleg_ino(struct ceph_mds_session *s, u64 ino)
627 {
628 	return xa_insert(&s->s_delegated_inos, ino, DELEGATED_INO_AVAILABLE,
629 			 GFP_KERNEL);
630 }
631 #else /* BITS_PER_LONG == 64 */
632 /*
633  * FIXME: xarrays can't handle 64-bit indexes on a 32-bit arch. For now, just
634  * ignore delegated_inos on 32 bit arch. Maybe eventually add xarrays for top
635  * and bottom words?
636  */
637 static int ceph_parse_deleg_inos(void **p, void *end,
638 				 struct ceph_mds_session *s)
639 {
640 	u32 sets;
641 
642 	ceph_decode_32_safe(p, end, sets, bad);
643 	if (sets)
644 		ceph_decode_skip_n(p, end, sets * 2 * sizeof(__le64), bad);
645 	return 0;
646 bad:
647 	return -EIO;
648 }
649 
650 u64 ceph_get_deleg_ino(struct ceph_mds_session *s)
651 {
652 	return 0;
653 }
654 
655 int ceph_restore_deleg_ino(struct ceph_mds_session *s, u64 ino)
656 {
657 	return 0;
658 }
659 #endif /* BITS_PER_LONG == 64 */
660 
661 /*
662  * parse create results
663  */
664 static int parse_reply_info_create(void **p, void *end,
665 				  struct ceph_mds_reply_info_parsed *info,
666 				  u64 features, struct ceph_mds_session *s)
667 {
668 	int ret;
669 
670 	if (features == (u64)-1 ||
671 	    (features & CEPH_FEATURE_REPLY_CREATE_INODE)) {
672 		if (*p == end) {
673 			/* Malformed reply? */
674 			info->has_create_ino = false;
675 		} else if (test_bit(CEPHFS_FEATURE_DELEG_INO, &s->s_features)) {
676 			info->has_create_ino = true;
677 			/* struct_v, struct_compat, and len */
678 			ceph_decode_skip_n(p, end, 2 + sizeof(u32), bad);
679 			ceph_decode_64_safe(p, end, info->ino, bad);
680 			ret = ceph_parse_deleg_inos(p, end, s);
681 			if (ret)
682 				return ret;
683 		} else {
684 			/* legacy */
685 			ceph_decode_64_safe(p, end, info->ino, bad);
686 			info->has_create_ino = true;
687 		}
688 	} else {
689 		if (*p != end)
690 			goto bad;
691 	}
692 
693 	/* Skip over any unrecognized fields */
694 	*p = end;
695 	return 0;
696 bad:
697 	return -EIO;
698 }
699 
700 static int parse_reply_info_getvxattr(void **p, void *end,
701 				      struct ceph_mds_reply_info_parsed *info,
702 				      u64 features)
703 {
704 	u32 value_len;
705 
706 	ceph_decode_skip_8(p, end, bad); /* skip current version: 1 */
707 	ceph_decode_skip_8(p, end, bad); /* skip first version: 1 */
708 	ceph_decode_skip_32(p, end, bad); /* skip payload length */
709 
710 	ceph_decode_32_safe(p, end, value_len, bad);
711 
712 	if (value_len == end - *p) {
713 	  info->xattr_info.xattr_value = *p;
714 	  info->xattr_info.xattr_value_len = value_len;
715 	  *p = end;
716 	  return value_len;
717 	}
718 bad:
719 	return -EIO;
720 }
721 
722 /*
723  * parse extra results
724  */
725 static int parse_reply_info_extra(void **p, void *end,
726 				  struct ceph_mds_request *req,
727 				  u64 features, struct ceph_mds_session *s)
728 {
729 	struct ceph_mds_reply_info_parsed *info = &req->r_reply_info;
730 	u32 op = le32_to_cpu(info->head->op);
731 
732 	if (op == CEPH_MDS_OP_GETFILELOCK)
733 		return parse_reply_info_filelock(p, end, info, features);
734 	else if (op == CEPH_MDS_OP_READDIR || op == CEPH_MDS_OP_LSSNAP)
735 		return parse_reply_info_readdir(p, end, req, features);
736 	else if (op == CEPH_MDS_OP_CREATE)
737 		return parse_reply_info_create(p, end, info, features, s);
738 	else if (op == CEPH_MDS_OP_GETVXATTR)
739 		return parse_reply_info_getvxattr(p, end, info, features);
740 	else
741 		return -EIO;
742 }
743 
744 /*
745  * parse entire mds reply
746  */
747 static int parse_reply_info(struct ceph_mds_session *s, struct ceph_msg *msg,
748 			    struct ceph_mds_request *req, u64 features)
749 {
750 	struct ceph_mds_reply_info_parsed *info = &req->r_reply_info;
751 	struct ceph_client *cl = s->s_mdsc->fsc->client;
752 	void *p, *end;
753 	u32 len;
754 	int err;
755 
756 	info->head = msg->front.iov_base;
757 	p = msg->front.iov_base + sizeof(struct ceph_mds_reply_head);
758 	end = p + msg->front.iov_len - sizeof(struct ceph_mds_reply_head);
759 
760 	/* trace */
761 	ceph_decode_32_safe(&p, end, len, bad);
762 	if (len > 0) {
763 		ceph_decode_need(&p, end, len, bad);
764 		err = parse_reply_info_trace(&p, p+len, info, features);
765 		if (err < 0)
766 			goto out_bad;
767 	}
768 
769 	/* extra */
770 	ceph_decode_32_safe(&p, end, len, bad);
771 	if (len > 0) {
772 		ceph_decode_need(&p, end, len, bad);
773 		err = parse_reply_info_extra(&p, p+len, req, features, s);
774 		if (err < 0)
775 			goto out_bad;
776 	}
777 
778 	/* snap blob */
779 	ceph_decode_32_safe(&p, end, len, bad);
780 	info->snapblob_len = len;
781 	info->snapblob = p;
782 	p += len;
783 
784 	if (p != end)
785 		goto bad;
786 	return 0;
787 
788 bad:
789 	err = -EIO;
790 out_bad:
791 	pr_err_client(cl, "mds parse_reply err %d\n", err);
792 	ceph_msg_dump(msg);
793 	return err;
794 }
795 
796 static void destroy_reply_info(struct ceph_mds_reply_info_parsed *info)
797 {
798 	int i;
799 
800 	kfree(info->diri.fscrypt_auth);
801 	kfree(info->diri.fscrypt_file);
802 	kfree(info->targeti.fscrypt_auth);
803 	kfree(info->targeti.fscrypt_file);
804 	if (!info->dir_entries)
805 		return;
806 
807 	for (i = 0; i < info->dir_nr; i++) {
808 		struct ceph_mds_reply_dir_entry *rde = info->dir_entries + i;
809 
810 		kfree(rde->inode.fscrypt_auth);
811 		kfree(rde->inode.fscrypt_file);
812 	}
813 	free_pages((unsigned long)info->dir_entries, get_order(info->dir_buf_size));
814 }
815 
816 /*
817  * In async unlink case the kclient won't wait for the first reply
818  * from MDS and just drop all the links and unhash the dentry and then
819  * succeeds immediately.
820  *
821  * For any new create/link/rename,etc requests followed by using the
822  * same file names we must wait for the first reply of the inflight
823  * unlink request, or the MDS possibly will fail these following
824  * requests with -EEXIST if the inflight async unlink request was
825  * delayed for some reasons.
826  *
827  * And the worst case is that for the none async openc request it will
828  * successfully open the file if the CDentry hasn't been unlinked yet,
829  * but later the previous delayed async unlink request will remove the
830  * CDentry. That means the just created file is possibly deleted later
831  * by accident.
832  *
833  * We need to wait for the inflight async unlink requests to finish
834  * when creating new files/directories by using the same file names.
835  */
836 int ceph_wait_on_conflict_unlink(struct dentry *dentry)
837 {
838 	struct ceph_fs_client *fsc = ceph_sb_to_fs_client(dentry->d_sb);
839 	struct ceph_client *cl = fsc->client;
840 	struct dentry *pdentry = dentry->d_parent;
841 	struct dentry *udentry, *found = NULL;
842 	struct ceph_dentry_info *di;
843 	struct qstr dname;
844 	u32 hash = dentry->d_name.hash;
845 	int err;
846 
847 	dname.name = dentry->d_name.name;
848 	dname.len = dentry->d_name.len;
849 
850 	rcu_read_lock();
851 	hash_for_each_possible_rcu(fsc->async_unlink_conflict, di,
852 				   hnode, hash) {
853 		udentry = di->dentry;
854 
855 		spin_lock(&udentry->d_lock);
856 		if (udentry->d_name.hash != hash)
857 			goto next;
858 		if (unlikely(udentry->d_parent != pdentry))
859 			goto next;
860 		if (!hash_hashed(&di->hnode))
861 			goto next;
862 
863 		if (!test_bit(CEPH_DENTRY_ASYNC_UNLINK_BIT, &di->flags))
864 			pr_warn_client(cl, "dentry %p:%pd async unlink bit is not set\n",
865 				       dentry, dentry);
866 
867 		if (!d_same_name(udentry, pdentry, &dname))
868 			goto next;
869 
870 		found = dget_dlock(udentry);
871 		spin_unlock(&udentry->d_lock);
872 		break;
873 next:
874 		spin_unlock(&udentry->d_lock);
875 	}
876 	rcu_read_unlock();
877 
878 	if (likely(!found))
879 		return 0;
880 
881 	doutc(cl, "dentry %p:%pd conflict with old %p:%pd\n", dentry, dentry,
882 	      found, found);
883 
884 	err = wait_on_bit(&di->flags, CEPH_DENTRY_ASYNC_UNLINK_BIT,
885 			  TASK_KILLABLE);
886 	dput(found);
887 	return err;
888 }
889 
890 
891 /*
892  * sessions
893  */
894 const char *ceph_session_state_name(int s)
895 {
896 	switch (s) {
897 	case CEPH_MDS_SESSION_NEW: return "new";
898 	case CEPH_MDS_SESSION_OPENING: return "opening";
899 	case CEPH_MDS_SESSION_OPEN: return "open";
900 	case CEPH_MDS_SESSION_HUNG: return "hung";
901 	case CEPH_MDS_SESSION_CLOSING: return "closing";
902 	case CEPH_MDS_SESSION_CLOSED: return "closed";
903 	case CEPH_MDS_SESSION_RESTARTING: return "restarting";
904 	case CEPH_MDS_SESSION_RECONNECTING: return "reconnecting";
905 	case CEPH_MDS_SESSION_REJECTED: return "rejected";
906 	default: return "???";
907 	}
908 }
909 
910 struct ceph_mds_session *ceph_get_mds_session(struct ceph_mds_session *s)
911 {
912 	if (refcount_inc_not_zero(&s->s_ref))
913 		return s;
914 	return NULL;
915 }
916 
917 void ceph_put_mds_session(struct ceph_mds_session *s)
918 {
919 	if (IS_ERR_OR_NULL(s))
920 		return;
921 
922 	if (refcount_dec_and_test(&s->s_ref)) {
923 		if (s->s_auth.authorizer)
924 			ceph_auth_destroy_authorizer(s->s_auth.authorizer);
925 		WARN_ON(mutex_is_locked(&s->s_mutex));
926 		xa_destroy(&s->s_delegated_inos);
927 		kfree(s);
928 	}
929 }
930 
931 /*
932  * called under mdsc->mutex
933  */
934 struct ceph_mds_session *__ceph_lookup_mds_session(struct ceph_mds_client *mdsc,
935 						   int mds)
936 {
937 	if (mds >= mdsc->max_sessions || !mdsc->sessions[mds])
938 		return NULL;
939 	return ceph_get_mds_session(mdsc->sessions[mds]);
940 }
941 
942 static bool __have_session(struct ceph_mds_client *mdsc, int mds)
943 {
944 	if (mds >= mdsc->max_sessions || !mdsc->sessions[mds])
945 		return false;
946 	else
947 		return true;
948 }
949 
950 static int __verify_registered_session(struct ceph_mds_client *mdsc,
951 				       struct ceph_mds_session *s)
952 {
953 	if (s->s_mds >= mdsc->max_sessions ||
954 	    mdsc->sessions[s->s_mds] != s)
955 		return -ENOENT;
956 	return 0;
957 }
958 
959 /*
960  * create+register a new session for given mds.
961  * called under mdsc->mutex.
962  */
963 static struct ceph_mds_session *register_session(struct ceph_mds_client *mdsc,
964 						 int mds)
965 {
966 	struct ceph_client *cl = mdsc->fsc->client;
967 	struct ceph_mds_session *s;
968 
969 	if (READ_ONCE(mdsc->fsc->mount_state) == CEPH_MOUNT_FENCE_IO)
970 		return ERR_PTR(-EIO);
971 
972 	if (mds >= mdsc->mdsmap->possible_max_rank)
973 		return ERR_PTR(-EINVAL);
974 
975 	s = kzalloc(sizeof(*s), GFP_NOFS);
976 	if (!s)
977 		return ERR_PTR(-ENOMEM);
978 
979 	if (mds >= mdsc->max_sessions) {
980 		int newmax = 1 << get_count_order(mds + 1);
981 		struct ceph_mds_session **sa;
982 
983 		doutc(cl, "realloc to %d\n", newmax);
984 		sa = kcalloc(newmax, sizeof(void *), GFP_NOFS);
985 		if (!sa)
986 			goto fail_realloc;
987 		if (mdsc->sessions) {
988 			memcpy(sa, mdsc->sessions,
989 			       mdsc->max_sessions * sizeof(void *));
990 			kfree(mdsc->sessions);
991 		}
992 		mdsc->sessions = sa;
993 		mdsc->max_sessions = newmax;
994 	}
995 
996 	doutc(cl, "mds%d\n", mds);
997 	s->s_mdsc = mdsc;
998 	s->s_mds = mds;
999 	s->s_state = CEPH_MDS_SESSION_NEW;
1000 	mutex_init(&s->s_mutex);
1001 
1002 	ceph_con_init(&s->s_con, s, &mds_con_ops, &mdsc->fsc->client->msgr);
1003 
1004 	atomic_set(&s->s_cap_gen, 1);
1005 	s->s_cap_ttl = jiffies - 1;
1006 
1007 	spin_lock_init(&s->s_cap_lock);
1008 	INIT_LIST_HEAD(&s->s_caps);
1009 	refcount_set(&s->s_ref, 1);
1010 	INIT_LIST_HEAD(&s->s_waiting);
1011 	INIT_LIST_HEAD(&s->s_unsafe);
1012 	xa_init(&s->s_delegated_inos);
1013 	INIT_LIST_HEAD(&s->s_cap_releases);
1014 	INIT_WORK(&s->s_cap_release_work, ceph_cap_release_work);
1015 
1016 	INIT_LIST_HEAD(&s->s_cap_dirty);
1017 	INIT_LIST_HEAD(&s->s_cap_flushing);
1018 
1019 	mdsc->sessions[mds] = s;
1020 	atomic_inc(&mdsc->num_sessions);
1021 	refcount_inc(&s->s_ref);  /* one ref to sessions[], one to caller */
1022 
1023 	ceph_con_open(&s->s_con, CEPH_ENTITY_TYPE_MDS, mds,
1024 		      ceph_mdsmap_get_addr(mdsc->mdsmap, mds));
1025 
1026 	return s;
1027 
1028 fail_realloc:
1029 	kfree(s);
1030 	return ERR_PTR(-ENOMEM);
1031 }
1032 
1033 /*
1034  * called under mdsc->mutex
1035  */
1036 static void __unregister_session(struct ceph_mds_client *mdsc,
1037 			       struct ceph_mds_session *s)
1038 {
1039 	doutc(mdsc->fsc->client, "mds%d %p\n", s->s_mds, s);
1040 	BUG_ON(mdsc->sessions[s->s_mds] != s);
1041 	mdsc->sessions[s->s_mds] = NULL;
1042 	ceph_con_close(&s->s_con);
1043 	ceph_put_mds_session(s);
1044 	atomic_dec(&mdsc->num_sessions);
1045 }
1046 
1047 /*
1048  * drop session refs in request.
1049  *
1050  * should be last request ref, or hold mdsc->mutex
1051  */
1052 static void put_request_session(struct ceph_mds_request *req)
1053 {
1054 	if (req->r_session) {
1055 		ceph_put_mds_session(req->r_session);
1056 		req->r_session = NULL;
1057 	}
1058 }
1059 
1060 void ceph_mdsc_iterate_sessions(struct ceph_mds_client *mdsc,
1061 				void (*cb)(struct ceph_mds_session *),
1062 				bool check_state)
1063 {
1064 	int mds;
1065 
1066 	mutex_lock(&mdsc->mutex);
1067 	for (mds = 0; mds < mdsc->max_sessions; ++mds) {
1068 		struct ceph_mds_session *s;
1069 
1070 		s = __ceph_lookup_mds_session(mdsc, mds);
1071 		if (!s)
1072 			continue;
1073 
1074 		if (check_state && !check_session_state(s)) {
1075 			ceph_put_mds_session(s);
1076 			continue;
1077 		}
1078 
1079 		mutex_unlock(&mdsc->mutex);
1080 		cb(s);
1081 		ceph_put_mds_session(s);
1082 		mutex_lock(&mdsc->mutex);
1083 	}
1084 	mutex_unlock(&mdsc->mutex);
1085 }
1086 
1087 void ceph_mdsc_release_request(struct kref *kref)
1088 {
1089 	struct ceph_mds_request *req = container_of(kref,
1090 						    struct ceph_mds_request,
1091 						    r_kref);
1092 	ceph_mdsc_release_dir_caps_async(req);
1093 	destroy_reply_info(&req->r_reply_info);
1094 	if (req->r_request)
1095 		ceph_msg_put(req->r_request);
1096 	if (req->r_reply)
1097 		ceph_msg_put(req->r_reply);
1098 	if (req->r_inode) {
1099 		ceph_put_cap_refs(ceph_inode(req->r_inode), CEPH_CAP_PIN);
1100 		iput(req->r_inode);
1101 	}
1102 	if (req->r_parent) {
1103 		ceph_put_cap_refs(ceph_inode(req->r_parent), CEPH_CAP_PIN);
1104 		iput(req->r_parent);
1105 	}
1106 	iput(req->r_target_inode);
1107 	iput(req->r_new_inode);
1108 	if (req->r_dentry)
1109 		dput(req->r_dentry);
1110 	if (req->r_old_dentry)
1111 		dput(req->r_old_dentry);
1112 	if (req->r_old_dentry_dir) {
1113 		/*
1114 		 * track (and drop pins for) r_old_dentry_dir
1115 		 * separately, since r_old_dentry's d_parent may have
1116 		 * changed between the dir mutex being dropped and
1117 		 * this request being freed.
1118 		 */
1119 		ceph_put_cap_refs(ceph_inode(req->r_old_dentry_dir),
1120 				  CEPH_CAP_PIN);
1121 		iput(req->r_old_dentry_dir);
1122 	}
1123 	kfree(req->r_path1);
1124 	kfree(req->r_path2);
1125 	put_cred(req->r_cred);
1126 	if (req->r_mnt_idmap)
1127 		mnt_idmap_put(req->r_mnt_idmap);
1128 	if (req->r_pagelist)
1129 		ceph_pagelist_release(req->r_pagelist);
1130 	kfree(req->r_fscrypt_auth);
1131 	kfree(req->r_altname);
1132 	put_request_session(req);
1133 	ceph_unreserve_caps(req->r_mdsc, &req->r_caps_reservation);
1134 	WARN_ON_ONCE(!list_empty(&req->r_wait));
1135 	kmem_cache_free(ceph_mds_request_cachep, req);
1136 }
1137 
1138 DEFINE_RB_FUNCS(request, struct ceph_mds_request, r_tid, r_node)
1139 
1140 /*
1141  * lookup session, bump ref if found.
1142  *
1143  * called under mdsc->mutex.
1144  */
1145 static struct ceph_mds_request *
1146 lookup_get_request(struct ceph_mds_client *mdsc, u64 tid)
1147 {
1148 	struct ceph_mds_request *req;
1149 
1150 	req = lookup_request(&mdsc->request_tree, tid);
1151 	if (req)
1152 		ceph_mdsc_get_request(req);
1153 
1154 	return req;
1155 }
1156 
1157 /*
1158  * Register an in-flight request, and assign a tid.  Link to directory
1159  * are modifying (if any).
1160  *
1161  * Called under mdsc->mutex.
1162  */
1163 static void __register_request(struct ceph_mds_client *mdsc,
1164 			       struct ceph_mds_request *req,
1165 			       struct inode *dir)
1166 {
1167 	struct ceph_client *cl = mdsc->fsc->client;
1168 	int ret = 0;
1169 
1170 	req->r_tid = ++mdsc->last_tid;
1171 	if (req->r_num_caps) {
1172 		ret = ceph_reserve_caps(mdsc, &req->r_caps_reservation,
1173 					req->r_num_caps);
1174 		if (ret < 0) {
1175 			pr_err_client(cl, "%p failed to reserve caps: %d\n",
1176 				      req, ret);
1177 			/* set req->r_err to fail early from __do_request */
1178 			req->r_err = ret;
1179 			return;
1180 		}
1181 	}
1182 	doutc(cl, "%p tid %lld\n", req, req->r_tid);
1183 	ceph_mdsc_get_request(req);
1184 	insert_request(&mdsc->request_tree, req);
1185 
1186 	req->r_cred = get_current_cred();
1187 	if (!req->r_mnt_idmap)
1188 		req->r_mnt_idmap = &nop_mnt_idmap;
1189 
1190 	if (mdsc->oldest_tid == 0 && req->r_op != CEPH_MDS_OP_SETFILELOCK)
1191 		mdsc->oldest_tid = req->r_tid;
1192 
1193 	if (dir) {
1194 		struct ceph_inode_info *ci = ceph_inode(dir);
1195 
1196 		ihold(dir);
1197 		req->r_unsafe_dir = dir;
1198 		spin_lock(&ci->i_unsafe_lock);
1199 		list_add_tail(&req->r_unsafe_dir_item, &ci->i_unsafe_dirops);
1200 		spin_unlock(&ci->i_unsafe_lock);
1201 	}
1202 }
1203 
1204 static void __unregister_request(struct ceph_mds_client *mdsc,
1205 				 struct ceph_mds_request *req)
1206 {
1207 	doutc(mdsc->fsc->client, "%p tid %lld\n", req, req->r_tid);
1208 
1209 	/* Never leave an unregistered request on an unsafe list! */
1210 	list_del_init(&req->r_unsafe_item);
1211 
1212 	if (req->r_tid == mdsc->oldest_tid) {
1213 		struct rb_node *p = rb_next(&req->r_node);
1214 		mdsc->oldest_tid = 0;
1215 		while (p) {
1216 			struct ceph_mds_request *next_req =
1217 				rb_entry(p, struct ceph_mds_request, r_node);
1218 			if (next_req->r_op != CEPH_MDS_OP_SETFILELOCK) {
1219 				mdsc->oldest_tid = next_req->r_tid;
1220 				break;
1221 			}
1222 			p = rb_next(p);
1223 		}
1224 	}
1225 
1226 	erase_request(&mdsc->request_tree, req);
1227 
1228 	if (req->r_unsafe_dir) {
1229 		struct ceph_inode_info *ci = ceph_inode(req->r_unsafe_dir);
1230 		spin_lock(&ci->i_unsafe_lock);
1231 		list_del_init(&req->r_unsafe_dir_item);
1232 		spin_unlock(&ci->i_unsafe_lock);
1233 	}
1234 	if (req->r_target_inode &&
1235 	    test_bit(CEPH_MDS_R_GOT_UNSAFE, &req->r_req_flags)) {
1236 		struct ceph_inode_info *ci = ceph_inode(req->r_target_inode);
1237 		spin_lock(&ci->i_unsafe_lock);
1238 		list_del_init(&req->r_unsafe_target_item);
1239 		spin_unlock(&ci->i_unsafe_lock);
1240 	}
1241 
1242 	if (req->r_unsafe_dir) {
1243 		iput(req->r_unsafe_dir);
1244 		req->r_unsafe_dir = NULL;
1245 	}
1246 
1247 	complete_all(&req->r_safe_completion);
1248 
1249 	ceph_mdsc_put_request(req);
1250 }
1251 
1252 /*
1253  * Walk back up the dentry tree until we hit a dentry representing a
1254  * non-snapshot inode. We do this using the rcu_read_lock (which must be held
1255  * when calling this) to ensure that the objects won't disappear while we're
1256  * working with them. Once we hit a candidate dentry, we attempt to take a
1257  * reference to it, and return that as the result.
1258  */
1259 static struct inode *get_nonsnap_parent(struct dentry *dentry)
1260 {
1261 	struct inode *inode = NULL;
1262 
1263 	while (dentry && !IS_ROOT(dentry)) {
1264 		inode = d_inode_rcu(dentry);
1265 		if (!inode || ceph_snap(inode) == CEPH_NOSNAP)
1266 			break;
1267 		dentry = dentry->d_parent;
1268 	}
1269 	if (inode)
1270 		inode = igrab(inode);
1271 	return inode;
1272 }
1273 
1274 /*
1275  * Choose mds to send request to next.  If there is a hint set in the
1276  * request (e.g., due to a prior forward hint from the mds), use that.
1277  * Otherwise, consult frag tree and/or caps to identify the
1278  * appropriate mds.  If all else fails, choose randomly.
1279  *
1280  * Called under mdsc->mutex.
1281  */
1282 static int __choose_mds(struct ceph_mds_client *mdsc,
1283 			struct ceph_mds_request *req,
1284 			bool *random)
1285 {
1286 	struct inode *inode;
1287 	struct ceph_inode_info *ci;
1288 	struct ceph_cap *cap;
1289 	int mode = req->r_direct_mode;
1290 	int mds = -1;
1291 	u32 hash = req->r_direct_hash;
1292 	bool is_hash = test_bit(CEPH_MDS_R_DIRECT_IS_HASH, &req->r_req_flags);
1293 	struct ceph_client *cl = mdsc->fsc->client;
1294 
1295 	if (random)
1296 		*random = false;
1297 
1298 	/*
1299 	 * is there a specific mds we should try?  ignore hint if we have
1300 	 * no session and the mds is not up (active or recovering).
1301 	 */
1302 	if (req->r_resend_mds >= 0 &&
1303 	    (__have_session(mdsc, req->r_resend_mds) ||
1304 	     ceph_mdsmap_get_state(mdsc->mdsmap, req->r_resend_mds) > 0)) {
1305 		doutc(cl, "using resend_mds mds%d\n", req->r_resend_mds);
1306 		return req->r_resend_mds;
1307 	}
1308 
1309 	if (mode == USE_RANDOM_MDS)
1310 		goto random;
1311 
1312 	inode = NULL;
1313 	if (req->r_inode) {
1314 		if (ceph_snap(req->r_inode) != CEPH_SNAPDIR) {
1315 			inode = req->r_inode;
1316 			ihold(inode);
1317 		} else {
1318 			/* req->r_dentry is non-null for LSSNAP request */
1319 			rcu_read_lock();
1320 			inode = get_nonsnap_parent(req->r_dentry);
1321 			rcu_read_unlock();
1322 			doutc(cl, "using snapdir's parent %p %llx.%llx\n",
1323 			      inode, ceph_vinop(inode));
1324 		}
1325 	} else if (req->r_dentry) {
1326 		/* ignore race with rename; old or new d_parent is okay */
1327 		struct dentry *parent;
1328 		struct inode *dir;
1329 
1330 		rcu_read_lock();
1331 		parent = READ_ONCE(req->r_dentry->d_parent);
1332 		dir = req->r_parent ? : d_inode_rcu(parent);
1333 
1334 		if (!dir || dir->i_sb != mdsc->fsc->sb) {
1335 			/*  not this fs or parent went negative */
1336 			inode = d_inode(req->r_dentry);
1337 			if (inode)
1338 				ihold(inode);
1339 		} else if (ceph_snap(dir) != CEPH_NOSNAP) {
1340 			/* direct snapped/virtual snapdir requests
1341 			 * based on parent dir inode */
1342 			inode = get_nonsnap_parent(parent);
1343 			doutc(cl, "using nonsnap parent %p %llx.%llx\n",
1344 			      inode, ceph_vinop(inode));
1345 		} else {
1346 			/* dentry target */
1347 			inode = d_inode(req->r_dentry);
1348 			if (!inode || mode == USE_AUTH_MDS) {
1349 				/* dir + name */
1350 				inode = igrab(dir);
1351 				hash = ceph_dentry_hash(dir, req->r_dentry);
1352 				is_hash = true;
1353 			} else {
1354 				ihold(inode);
1355 			}
1356 		}
1357 		rcu_read_unlock();
1358 	}
1359 
1360 	if (!inode)
1361 		goto random;
1362 
1363 	doutc(cl, "%p %llx.%llx is_hash=%d (0x%x) mode %d\n", inode,
1364 	      ceph_vinop(inode), (int)is_hash, hash, mode);
1365 	ci = ceph_inode(inode);
1366 
1367 	if (is_hash && S_ISDIR(inode->i_mode)) {
1368 		struct ceph_inode_frag frag;
1369 		int found;
1370 
1371 		ceph_choose_frag(ci, hash, &frag, &found);
1372 		if (found) {
1373 			if (mode == USE_ANY_MDS && frag.ndist > 0) {
1374 				u8 r;
1375 
1376 				/* choose a random replica */
1377 				get_random_bytes(&r, 1);
1378 				r %= frag.ndist;
1379 				mds = frag.dist[r];
1380 				doutc(cl, "%p %llx.%llx frag %u mds%d (%d/%d)\n",
1381 				      inode, ceph_vinop(inode), frag.frag,
1382 				      mds, (int)r, frag.ndist);
1383 				if (ceph_mdsmap_get_state(mdsc->mdsmap, mds) >=
1384 				    CEPH_MDS_STATE_ACTIVE &&
1385 				    !ceph_mdsmap_is_laggy(mdsc->mdsmap, mds))
1386 					goto out;
1387 			}
1388 
1389 			/* since this file/dir wasn't known to be
1390 			 * replicated, then we want to look for the
1391 			 * authoritative mds. */
1392 			if (frag.mds >= 0) {
1393 				/* choose auth mds */
1394 				mds = frag.mds;
1395 				doutc(cl, "%p %llx.%llx frag %u mds%d (auth)\n",
1396 				      inode, ceph_vinop(inode), frag.frag, mds);
1397 				if (ceph_mdsmap_get_state(mdsc->mdsmap, mds) >=
1398 				    CEPH_MDS_STATE_ACTIVE) {
1399 					if (!ceph_mdsmap_is_laggy(mdsc->mdsmap,
1400 								  mds))
1401 						goto out;
1402 				}
1403 			}
1404 			mode = USE_AUTH_MDS;
1405 		}
1406 	}
1407 
1408 	spin_lock(&ci->i_ceph_lock);
1409 	cap = NULL;
1410 	if (mode == USE_AUTH_MDS)
1411 		cap = ci->i_auth_cap;
1412 	if (!cap && !RB_EMPTY_ROOT(&ci->i_caps))
1413 		cap = rb_entry(rb_first(&ci->i_caps), struct ceph_cap, ci_node);
1414 	if (!cap) {
1415 		spin_unlock(&ci->i_ceph_lock);
1416 		iput(inode);
1417 		goto random;
1418 	}
1419 	mds = cap->session->s_mds;
1420 	doutc(cl, "%p %llx.%llx mds%d (%scap %p)\n", inode,
1421 	      ceph_vinop(inode), mds,
1422 	      cap == ci->i_auth_cap ? "auth " : "", cap);
1423 	spin_unlock(&ci->i_ceph_lock);
1424 out:
1425 	iput(inode);
1426 	return mds;
1427 
1428 random:
1429 	if (random)
1430 		*random = true;
1431 
1432 	mds = ceph_mdsmap_get_random_mds(mdsc->mdsmap);
1433 	doutc(cl, "chose random mds%d\n", mds);
1434 	return mds;
1435 }
1436 
1437 
1438 /*
1439  * session messages
1440  */
1441 struct ceph_msg *ceph_create_session_msg(u32 op, u64 seq)
1442 {
1443 	struct ceph_msg *msg;
1444 	struct ceph_mds_session_head *h;
1445 
1446 	msg = ceph_msg_new(CEPH_MSG_CLIENT_SESSION, sizeof(*h), GFP_NOFS,
1447 			   false);
1448 	if (!msg) {
1449 		pr_err("ENOMEM creating session %s msg\n",
1450 		       ceph_session_op_name(op));
1451 		return NULL;
1452 	}
1453 	h = msg->front.iov_base;
1454 	h->op = cpu_to_le32(op);
1455 	h->seq = cpu_to_le64(seq);
1456 
1457 	return msg;
1458 }
1459 
1460 static const unsigned char feature_bits[] = CEPHFS_FEATURES_CLIENT_SUPPORTED;
1461 #define FEATURE_BYTES(c) (DIV_ROUND_UP((size_t)feature_bits[c - 1] + 1, 64) * 8)
1462 static int encode_supported_features(void **p, void *end)
1463 {
1464 	static const size_t count = ARRAY_SIZE(feature_bits);
1465 
1466 	if (count > 0) {
1467 		size_t i;
1468 		size_t size = FEATURE_BYTES(count);
1469 		unsigned long bit;
1470 
1471 		if (WARN_ON_ONCE(*p + 4 + size > end))
1472 			return -ERANGE;
1473 
1474 		ceph_encode_32(p, size);
1475 		memset(*p, 0, size);
1476 		for (i = 0; i < count; i++) {
1477 			bit = feature_bits[i];
1478 			((unsigned char *)(*p))[bit / 8] |= BIT(bit % 8);
1479 		}
1480 		*p += size;
1481 	} else {
1482 		if (WARN_ON_ONCE(*p + 4 > end))
1483 			return -ERANGE;
1484 
1485 		ceph_encode_32(p, 0);
1486 	}
1487 
1488 	return 0;
1489 }
1490 
1491 static const unsigned char metric_bits[] = CEPHFS_METRIC_SPEC_CLIENT_SUPPORTED;
1492 #define METRIC_BYTES(cnt) (DIV_ROUND_UP((size_t)metric_bits[cnt - 1] + 1, 64) * 8)
1493 static int encode_metric_spec(void **p, void *end)
1494 {
1495 	static const size_t count = ARRAY_SIZE(metric_bits);
1496 
1497 	/* header */
1498 	if (WARN_ON_ONCE(*p + 2 > end))
1499 		return -ERANGE;
1500 
1501 	ceph_encode_8(p, 1); /* version */
1502 	ceph_encode_8(p, 1); /* compat */
1503 
1504 	if (count > 0) {
1505 		size_t i;
1506 		size_t size = METRIC_BYTES(count);
1507 
1508 		if (WARN_ON_ONCE(*p + 4 + 4 + size > end))
1509 			return -ERANGE;
1510 
1511 		/* metric spec info length */
1512 		ceph_encode_32(p, 4 + size);
1513 
1514 		/* metric spec */
1515 		ceph_encode_32(p, size);
1516 		memset(*p, 0, size);
1517 		for (i = 0; i < count; i++)
1518 			((unsigned char *)(*p))[i / 8] |= BIT(metric_bits[i] % 8);
1519 		*p += size;
1520 	} else {
1521 		if (WARN_ON_ONCE(*p + 4 + 4 > end))
1522 			return -ERANGE;
1523 
1524 		/* metric spec info length */
1525 		ceph_encode_32(p, 4);
1526 		/* metric spec */
1527 		ceph_encode_32(p, 0);
1528 	}
1529 
1530 	return 0;
1531 }
1532 
1533 /*
1534  * session message, specialization for CEPH_SESSION_REQUEST_OPEN
1535  * to include additional client metadata fields.
1536  */
1537 static struct ceph_msg *
1538 create_session_full_msg(struct ceph_mds_client *mdsc, int op, u64 seq)
1539 {
1540 	struct ceph_msg *msg;
1541 	struct ceph_mds_session_head *h;
1542 	int i;
1543 	int extra_bytes = 0;
1544 	int metadata_key_count = 0;
1545 	struct ceph_options *opt = mdsc->fsc->client->options;
1546 	struct ceph_mount_options *fsopt = mdsc->fsc->mount_options;
1547 	struct ceph_client *cl = mdsc->fsc->client;
1548 	size_t size, count;
1549 	void *p, *end;
1550 	int ret;
1551 
1552 	const char* metadata[][2] = {
1553 		{"hostname", mdsc->nodename},
1554 		{"kernel_version", init_utsname()->release},
1555 		{"entity_id", opt->name ? : ""},
1556 		{"root", fsopt->server_path ? : "/"},
1557 		{NULL, NULL}
1558 	};
1559 
1560 	/* Calculate serialized length of metadata */
1561 	extra_bytes = 4;  /* map length */
1562 	for (i = 0; metadata[i][0]; ++i) {
1563 		extra_bytes += 8 + strlen(metadata[i][0]) +
1564 			strlen(metadata[i][1]);
1565 		metadata_key_count++;
1566 	}
1567 
1568 	/* supported feature */
1569 	size = 0;
1570 	count = ARRAY_SIZE(feature_bits);
1571 	if (count > 0)
1572 		size = FEATURE_BYTES(count);
1573 	extra_bytes += 4 + size;
1574 
1575 	/* metric spec */
1576 	size = 0;
1577 	count = ARRAY_SIZE(metric_bits);
1578 	if (count > 0)
1579 		size = METRIC_BYTES(count);
1580 	extra_bytes += 2 + 4 + 4 + size;
1581 
1582 	/* flags, mds auth caps and oldest_client_tid */
1583 	extra_bytes += 4 + 4 + 8;
1584 
1585 	/* Allocate the message */
1586 	msg = ceph_msg_new(CEPH_MSG_CLIENT_SESSION, sizeof(*h) + extra_bytes,
1587 			   GFP_NOFS, false);
1588 	if (!msg) {
1589 		pr_err_client(cl, "ENOMEM creating session open msg\n");
1590 		return ERR_PTR(-ENOMEM);
1591 	}
1592 	p = msg->front.iov_base;
1593 	end = p + msg->front.iov_len;
1594 
1595 	h = p;
1596 	h->op = cpu_to_le32(op);
1597 	h->seq = cpu_to_le64(seq);
1598 
1599 	/*
1600 	 * Serialize client metadata into waiting buffer space, using
1601 	 * the format that userspace expects for map<string, string>
1602 	 *
1603 	 * ClientSession messages with metadata are v7
1604 	 */
1605 	msg->hdr.version = cpu_to_le16(7);
1606 	msg->hdr.compat_version = cpu_to_le16(1);
1607 
1608 	/* The write pointer, following the session_head structure */
1609 	p += sizeof(*h);
1610 
1611 	/* Number of entries in the map */
1612 	ceph_encode_32(&p, metadata_key_count);
1613 
1614 	/* Two length-prefixed strings for each entry in the map */
1615 	for (i = 0; metadata[i][0]; ++i) {
1616 		size_t const key_len = strlen(metadata[i][0]);
1617 		size_t const val_len = strlen(metadata[i][1]);
1618 
1619 		ceph_encode_32(&p, key_len);
1620 		memcpy(p, metadata[i][0], key_len);
1621 		p += key_len;
1622 		ceph_encode_32(&p, val_len);
1623 		memcpy(p, metadata[i][1], val_len);
1624 		p += val_len;
1625 	}
1626 
1627 	ret = encode_supported_features(&p, end);
1628 	if (ret) {
1629 		pr_err_client(cl, "encode_supported_features failed!\n");
1630 		ceph_msg_put(msg);
1631 		return ERR_PTR(ret);
1632 	}
1633 
1634 	ret = encode_metric_spec(&p, end);
1635 	if (ret) {
1636 		pr_err_client(cl, "encode_metric_spec failed!\n");
1637 		ceph_msg_put(msg);
1638 		return ERR_PTR(ret);
1639 	}
1640 
1641 	/* version == 5, flags */
1642 	ceph_encode_32(&p, 0);
1643 
1644 	/* version == 6, mds auth caps */
1645 	ceph_encode_32(&p, 0);
1646 
1647 	/* version == 7, oldest_client_tid */
1648 	ceph_encode_64(&p, mdsc->oldest_tid);
1649 
1650 	msg->front.iov_len = p - msg->front.iov_base;
1651 	msg->hdr.front_len = cpu_to_le32(msg->front.iov_len);
1652 
1653 	return msg;
1654 }
1655 
1656 /*
1657  * send session open request.
1658  *
1659  * called under mdsc->mutex
1660  */
1661 static int __open_session(struct ceph_mds_client *mdsc,
1662 			  struct ceph_mds_session *session)
1663 {
1664 	struct ceph_msg *msg;
1665 	int mstate;
1666 	int mds = session->s_mds;
1667 
1668 	if (READ_ONCE(mdsc->fsc->mount_state) == CEPH_MOUNT_FENCE_IO)
1669 		return -EIO;
1670 
1671 	/* wait for mds to go active? */
1672 	mstate = ceph_mdsmap_get_state(mdsc->mdsmap, mds);
1673 	doutc(mdsc->fsc->client, "open_session to mds%d (%s)\n", mds,
1674 	      ceph_mds_state_name(mstate));
1675 	session->s_state = CEPH_MDS_SESSION_OPENING;
1676 	session->s_renew_requested = jiffies;
1677 
1678 	/* send connect message */
1679 	msg = create_session_full_msg(mdsc, CEPH_SESSION_REQUEST_OPEN,
1680 				      session->s_seq);
1681 	if (IS_ERR(msg))
1682 		return PTR_ERR(msg);
1683 	ceph_con_send(&session->s_con, msg);
1684 	return 0;
1685 }
1686 
1687 /*
1688  * open sessions for any export targets for the given mds
1689  *
1690  * called under mdsc->mutex
1691  */
1692 static struct ceph_mds_session *
1693 __open_export_target_session(struct ceph_mds_client *mdsc, int target)
1694 {
1695 	struct ceph_mds_session *session;
1696 	int ret;
1697 
1698 	session = __ceph_lookup_mds_session(mdsc, target);
1699 	if (!session) {
1700 		session = register_session(mdsc, target);
1701 		if (IS_ERR(session))
1702 			return session;
1703 	}
1704 	if (session->s_state == CEPH_MDS_SESSION_NEW ||
1705 	    session->s_state == CEPH_MDS_SESSION_CLOSING) {
1706 		ret = __open_session(mdsc, session);
1707 		if (ret)
1708 			return ERR_PTR(ret);
1709 	}
1710 
1711 	return session;
1712 }
1713 
1714 struct ceph_mds_session *
1715 ceph_mdsc_open_export_target_session(struct ceph_mds_client *mdsc, int target)
1716 {
1717 	struct ceph_mds_session *session;
1718 	struct ceph_client *cl = mdsc->fsc->client;
1719 
1720 	doutc(cl, "to mds%d\n", target);
1721 
1722 	mutex_lock(&mdsc->mutex);
1723 	session = __open_export_target_session(mdsc, target);
1724 	mutex_unlock(&mdsc->mutex);
1725 
1726 	return session;
1727 }
1728 
1729 static void __open_export_target_sessions(struct ceph_mds_client *mdsc,
1730 					  struct ceph_mds_session *session)
1731 {
1732 	struct ceph_mds_info *mi;
1733 	struct ceph_mds_session *ts;
1734 	int i, mds = session->s_mds;
1735 	struct ceph_client *cl = mdsc->fsc->client;
1736 
1737 	if (mds >= mdsc->mdsmap->possible_max_rank)
1738 		return;
1739 
1740 	mi = &mdsc->mdsmap->m_info[mds];
1741 	doutc(cl, "for mds%d (%d targets)\n", session->s_mds,
1742 	      mi->num_export_targets);
1743 
1744 	for (i = 0; i < mi->num_export_targets; i++) {
1745 		ts = __open_export_target_session(mdsc, mi->export_targets[i]);
1746 		ceph_put_mds_session(ts);
1747 	}
1748 }
1749 
1750 /*
1751  * session caps
1752  */
1753 
1754 static void detach_cap_releases(struct ceph_mds_session *session,
1755 				struct list_head *target)
1756 {
1757 	struct ceph_client *cl = session->s_mdsc->fsc->client;
1758 
1759 	lockdep_assert_held(&session->s_cap_lock);
1760 
1761 	list_splice_init(&session->s_cap_releases, target);
1762 	session->s_num_cap_releases = 0;
1763 	doutc(cl, "mds%d\n", session->s_mds);
1764 }
1765 
1766 static void dispose_cap_releases(struct ceph_mds_client *mdsc,
1767 				 struct list_head *dispose)
1768 {
1769 	while (!list_empty(dispose)) {
1770 		struct ceph_cap *cap;
1771 		/* zero out the in-progress message */
1772 		cap = list_first_entry(dispose, struct ceph_cap, session_caps);
1773 		list_del(&cap->session_caps);
1774 		ceph_put_cap(mdsc, cap);
1775 	}
1776 }
1777 
1778 static void cleanup_session_requests(struct ceph_mds_client *mdsc,
1779 				     struct ceph_mds_session *session)
1780 {
1781 	struct ceph_client *cl = mdsc->fsc->client;
1782 	struct ceph_mds_request *req;
1783 	struct rb_node *p;
1784 
1785 	doutc(cl, "mds%d\n", session->s_mds);
1786 	mutex_lock(&mdsc->mutex);
1787 	while (!list_empty(&session->s_unsafe)) {
1788 		req = list_first_entry(&session->s_unsafe,
1789 				       struct ceph_mds_request, r_unsafe_item);
1790 		pr_warn_ratelimited_client(cl, " dropping unsafe request %llu\n",
1791 					   req->r_tid);
1792 		if (req->r_target_inode)
1793 			mapping_set_error(req->r_target_inode->i_mapping, -EIO);
1794 		if (req->r_unsafe_dir)
1795 			mapping_set_error(req->r_unsafe_dir->i_mapping, -EIO);
1796 		__unregister_request(mdsc, req);
1797 	}
1798 	/* zero r_attempts, so kick_requests() will re-send requests */
1799 	p = rb_first(&mdsc->request_tree);
1800 	while (p) {
1801 		req = rb_entry(p, struct ceph_mds_request, r_node);
1802 		p = rb_next(p);
1803 		if (req->r_session &&
1804 		    req->r_session->s_mds == session->s_mds)
1805 			req->r_attempts = 0;
1806 	}
1807 	mutex_unlock(&mdsc->mutex);
1808 }
1809 
1810 /*
1811  * Helper to safely iterate over all caps associated with a session, with
1812  * special care taken to handle a racing __ceph_remove_cap().
1813  *
1814  * Caller must hold session s_mutex.
1815  */
1816 int ceph_iterate_session_caps(struct ceph_mds_session *session,
1817 			      int (*cb)(struct inode *, int mds, void *),
1818 			      void *arg)
1819 {
1820 	struct ceph_client *cl = session->s_mdsc->fsc->client;
1821 	struct list_head *p;
1822 	struct ceph_cap *cap;
1823 	struct inode *inode, *last_inode = NULL;
1824 	struct ceph_cap *old_cap = NULL;
1825 	int ret;
1826 
1827 	doutc(cl, "%p mds%d\n", session, session->s_mds);
1828 	spin_lock(&session->s_cap_lock);
1829 	p = session->s_caps.next;
1830 	while (p != &session->s_caps) {
1831 		int mds;
1832 
1833 		cap = list_entry(p, struct ceph_cap, session_caps);
1834 		inode = igrab(&cap->ci->netfs.inode);
1835 		if (!inode) {
1836 			p = p->next;
1837 			continue;
1838 		}
1839 		session->s_cap_iterator = cap;
1840 		mds = cap->mds;
1841 		spin_unlock(&session->s_cap_lock);
1842 
1843 		if (last_inode) {
1844 			iput(last_inode);
1845 			last_inode = NULL;
1846 		}
1847 		if (old_cap) {
1848 			ceph_put_cap(session->s_mdsc, old_cap);
1849 			old_cap = NULL;
1850 		}
1851 
1852 		ret = cb(inode, mds, arg);
1853 		last_inode = inode;
1854 
1855 		spin_lock(&session->s_cap_lock);
1856 		p = p->next;
1857 		if (!cap->ci) {
1858 			doutc(cl, "finishing cap %p removal\n", cap);
1859 			BUG_ON(cap->session != session);
1860 			cap->session = NULL;
1861 			list_del_init(&cap->session_caps);
1862 			session->s_nr_caps--;
1863 			atomic64_dec(&session->s_mdsc->metric.total_caps);
1864 			if (cap->queue_release)
1865 				__ceph_queue_cap_release(session, cap);
1866 			else
1867 				old_cap = cap;  /* put_cap it w/o locks held */
1868 		}
1869 		if (ret < 0)
1870 			goto out;
1871 	}
1872 	ret = 0;
1873 out:
1874 	session->s_cap_iterator = NULL;
1875 	spin_unlock(&session->s_cap_lock);
1876 
1877 	iput(last_inode);
1878 	if (old_cap)
1879 		ceph_put_cap(session->s_mdsc, old_cap);
1880 
1881 	return ret;
1882 }
1883 
1884 static int remove_session_caps_cb(struct inode *inode, int mds, void *arg)
1885 {
1886 	struct ceph_inode_info *ci = ceph_inode(inode);
1887 	struct ceph_client *cl = ceph_inode_to_client(inode);
1888 	bool invalidate = false;
1889 	struct ceph_cap *cap;
1890 	int iputs = 0;
1891 
1892 	spin_lock(&ci->i_ceph_lock);
1893 	cap = __get_cap_for_mds(ci, mds);
1894 	if (cap) {
1895 		doutc(cl, " removing cap %p, ci is %p, inode is %p\n",
1896 		      cap, ci, &ci->netfs.inode);
1897 
1898 		iputs = ceph_purge_inode_cap(inode, cap, &invalidate);
1899 	}
1900 	spin_unlock(&ci->i_ceph_lock);
1901 
1902 	if (cap)
1903 		wake_up_all(&ci->i_cap_wq);
1904 	if (invalidate)
1905 		ceph_queue_invalidate(inode);
1906 	while (iputs--)
1907 		iput(inode);
1908 	return 0;
1909 }
1910 
1911 /*
1912  * caller must hold session s_mutex
1913  */
1914 static void remove_session_caps(struct ceph_mds_session *session)
1915 {
1916 	struct ceph_fs_client *fsc = session->s_mdsc->fsc;
1917 	struct super_block *sb = fsc->sb;
1918 	LIST_HEAD(dispose);
1919 
1920 	doutc(fsc->client, "on %p\n", session);
1921 	ceph_iterate_session_caps(session, remove_session_caps_cb, fsc);
1922 
1923 	wake_up_all(&fsc->mdsc->cap_flushing_wq);
1924 
1925 	spin_lock(&session->s_cap_lock);
1926 	if (session->s_nr_caps > 0) {
1927 		struct inode *inode;
1928 		struct ceph_cap *cap, *prev = NULL;
1929 		struct ceph_vino vino;
1930 		/*
1931 		 * iterate_session_caps() skips inodes that are being
1932 		 * deleted, we need to wait until deletions are complete.
1933 		 * __wait_on_freeing_inode() is designed for the job,
1934 		 * but it is not exported, so use lookup inode function
1935 		 * to access it.
1936 		 */
1937 		while (!list_empty(&session->s_caps)) {
1938 			cap = list_entry(session->s_caps.next,
1939 					 struct ceph_cap, session_caps);
1940 			if (cap == prev)
1941 				break;
1942 			prev = cap;
1943 			vino = cap->ci->i_vino;
1944 			spin_unlock(&session->s_cap_lock);
1945 
1946 			inode = ceph_find_inode(sb, vino);
1947 			iput(inode);
1948 
1949 			spin_lock(&session->s_cap_lock);
1950 		}
1951 	}
1952 
1953 	// drop cap expires and unlock s_cap_lock
1954 	detach_cap_releases(session, &dispose);
1955 
1956 	BUG_ON(session->s_nr_caps > 0);
1957 	BUG_ON(!list_empty(&session->s_cap_flushing));
1958 	spin_unlock(&session->s_cap_lock);
1959 	dispose_cap_releases(session->s_mdsc, &dispose);
1960 }
1961 
1962 enum {
1963 	RECONNECT,
1964 	RENEWCAPS,
1965 	FORCE_RO,
1966 };
1967 
1968 /*
1969  * wake up any threads waiting on this session's caps.  if the cap is
1970  * old (didn't get renewed on the client reconnect), remove it now.
1971  *
1972  * caller must hold s_mutex.
1973  */
1974 static int wake_up_session_cb(struct inode *inode, int mds, void *arg)
1975 {
1976 	struct ceph_inode_info *ci = ceph_inode(inode);
1977 	unsigned long ev = (unsigned long)arg;
1978 
1979 	if (ev == RECONNECT) {
1980 		spin_lock(&ci->i_ceph_lock);
1981 		ci->i_wanted_max_size = 0;
1982 		ci->i_requested_max_size = 0;
1983 		spin_unlock(&ci->i_ceph_lock);
1984 	} else if (ev == RENEWCAPS) {
1985 		struct ceph_cap *cap;
1986 
1987 		spin_lock(&ci->i_ceph_lock);
1988 		cap = __get_cap_for_mds(ci, mds);
1989 		/* mds did not re-issue stale cap */
1990 		if (cap && cap->cap_gen < atomic_read(&cap->session->s_cap_gen))
1991 			cap->issued = cap->implemented = CEPH_CAP_PIN;
1992 		spin_unlock(&ci->i_ceph_lock);
1993 	} else if (ev == FORCE_RO) {
1994 	}
1995 	wake_up_all(&ci->i_cap_wq);
1996 	return 0;
1997 }
1998 
1999 static void wake_up_session_caps(struct ceph_mds_session *session, int ev)
2000 {
2001 	struct ceph_client *cl = session->s_mdsc->fsc->client;
2002 
2003 	doutc(cl, "session %p mds%d\n", session, session->s_mds);
2004 	ceph_iterate_session_caps(session, wake_up_session_cb,
2005 				  (void *)(unsigned long)ev);
2006 }
2007 
2008 /*
2009  * Send periodic message to MDS renewing all currently held caps.  The
2010  * ack will reset the expiration for all caps from this session.
2011  *
2012  * caller holds s_mutex
2013  */
2014 static int send_renew_caps(struct ceph_mds_client *mdsc,
2015 			   struct ceph_mds_session *session)
2016 {
2017 	struct ceph_client *cl = mdsc->fsc->client;
2018 	struct ceph_msg *msg;
2019 	int state;
2020 
2021 	if (time_after_eq(jiffies, session->s_cap_ttl) &&
2022 	    time_after_eq(session->s_cap_ttl, session->s_renew_requested))
2023 		pr_info_client(cl, "mds%d caps stale\n", session->s_mds);
2024 	session->s_renew_requested = jiffies;
2025 
2026 	/* do not try to renew caps until a recovering mds has reconnected
2027 	 * with its clients. */
2028 	state = ceph_mdsmap_get_state(mdsc->mdsmap, session->s_mds);
2029 	if (state < CEPH_MDS_STATE_RECONNECT) {
2030 		doutc(cl, "ignoring mds%d (%s)\n", session->s_mds,
2031 		      ceph_mds_state_name(state));
2032 		return 0;
2033 	}
2034 
2035 	doutc(cl, "to mds%d (%s)\n", session->s_mds,
2036 	      ceph_mds_state_name(state));
2037 	msg = create_session_full_msg(mdsc, CEPH_SESSION_REQUEST_RENEWCAPS,
2038 				      ++session->s_renew_seq);
2039 	if (IS_ERR(msg))
2040 		return PTR_ERR(msg);
2041 	ceph_con_send(&session->s_con, msg);
2042 	return 0;
2043 }
2044 
2045 static int send_flushmsg_ack(struct ceph_mds_client *mdsc,
2046 			     struct ceph_mds_session *session, u64 seq)
2047 {
2048 	struct ceph_client *cl = mdsc->fsc->client;
2049 	struct ceph_msg *msg;
2050 
2051 	doutc(cl, "to mds%d (%s)s seq %lld\n", session->s_mds,
2052 	      ceph_session_state_name(session->s_state), seq);
2053 	msg = ceph_create_session_msg(CEPH_SESSION_FLUSHMSG_ACK, seq);
2054 	if (!msg)
2055 		return -ENOMEM;
2056 	ceph_con_send(&session->s_con, msg);
2057 	return 0;
2058 }
2059 
2060 
2061 /*
2062  * Note new cap ttl, and any transition from stale -> not stale (fresh?).
2063  *
2064  * Called under session->s_mutex
2065  */
2066 static void renewed_caps(struct ceph_mds_client *mdsc,
2067 			 struct ceph_mds_session *session, int is_renew)
2068 {
2069 	struct ceph_client *cl = mdsc->fsc->client;
2070 	int was_stale;
2071 	int wake = 0;
2072 
2073 	spin_lock(&session->s_cap_lock);
2074 	was_stale = is_renew && time_after_eq(jiffies, session->s_cap_ttl);
2075 
2076 	session->s_cap_ttl = session->s_renew_requested +
2077 		mdsc->mdsmap->m_session_timeout*HZ;
2078 
2079 	if (was_stale) {
2080 		if (time_before(jiffies, session->s_cap_ttl)) {
2081 			pr_info_client(cl, "mds%d caps renewed\n",
2082 				       session->s_mds);
2083 			wake = 1;
2084 		} else {
2085 			pr_info_client(cl, "mds%d caps still stale\n",
2086 				       session->s_mds);
2087 		}
2088 	}
2089 	doutc(cl, "mds%d ttl now %lu, was %s, now %s\n", session->s_mds,
2090 	      session->s_cap_ttl, was_stale ? "stale" : "fresh",
2091 	      time_before(jiffies, session->s_cap_ttl) ? "stale" : "fresh");
2092 	spin_unlock(&session->s_cap_lock);
2093 
2094 	if (wake)
2095 		wake_up_session_caps(session, RENEWCAPS);
2096 }
2097 
2098 /*
2099  * send a session close request
2100  */
2101 static int request_close_session(struct ceph_mds_session *session)
2102 {
2103 	struct ceph_client *cl = session->s_mdsc->fsc->client;
2104 	struct ceph_msg *msg;
2105 
2106 	doutc(cl, "mds%d state %s seq %lld\n", session->s_mds,
2107 	      ceph_session_state_name(session->s_state), session->s_seq);
2108 	msg = ceph_create_session_msg(CEPH_SESSION_REQUEST_CLOSE,
2109 				      session->s_seq);
2110 	if (!msg)
2111 		return -ENOMEM;
2112 	ceph_con_send(&session->s_con, msg);
2113 	return 1;
2114 }
2115 
2116 /*
2117  * Called with s_mutex held.
2118  */
2119 static int __close_session(struct ceph_mds_client *mdsc,
2120 			 struct ceph_mds_session *session)
2121 {
2122 	if (session->s_state >= CEPH_MDS_SESSION_CLOSING)
2123 		return 0;
2124 	session->s_state = CEPH_MDS_SESSION_CLOSING;
2125 	return request_close_session(session);
2126 }
2127 
2128 static bool drop_negative_children(struct dentry *dentry)
2129 {
2130 	struct dentry *child;
2131 	bool all_negative = true;
2132 
2133 	if (!d_is_dir(dentry))
2134 		goto out;
2135 
2136 	spin_lock(&dentry->d_lock);
2137 	hlist_for_each_entry(child, &dentry->d_children, d_sib) {
2138 		if (d_really_is_positive(child)) {
2139 			all_negative = false;
2140 			break;
2141 		}
2142 	}
2143 	spin_unlock(&dentry->d_lock);
2144 
2145 	if (all_negative)
2146 		shrink_dcache_parent(dentry);
2147 out:
2148 	return all_negative;
2149 }
2150 
2151 /*
2152  * Trim old(er) caps.
2153  *
2154  * Because we can't cache an inode without one or more caps, we do
2155  * this indirectly: if a cap is unused, we prune its aliases, at which
2156  * point the inode will hopefully get dropped to.
2157  *
2158  * Yes, this is a bit sloppy.  Our only real goal here is to respond to
2159  * memory pressure from the MDS, though, so it needn't be perfect.
2160  */
2161 static int trim_caps_cb(struct inode *inode, int mds, void *arg)
2162 {
2163 	struct ceph_mds_client *mdsc = ceph_sb_to_mdsc(inode->i_sb);
2164 	struct ceph_client *cl = mdsc->fsc->client;
2165 	int *remaining = arg;
2166 	struct ceph_inode_info *ci = ceph_inode(inode);
2167 	int used, wanted, oissued, mine;
2168 	struct ceph_cap *cap;
2169 
2170 	if (*remaining <= 0)
2171 		return -1;
2172 
2173 	spin_lock(&ci->i_ceph_lock);
2174 	cap = __get_cap_for_mds(ci, mds);
2175 	if (!cap) {
2176 		spin_unlock(&ci->i_ceph_lock);
2177 		return 0;
2178 	}
2179 	mine = cap->issued | cap->implemented;
2180 	used = __ceph_caps_used(ci);
2181 	wanted = __ceph_caps_file_wanted(ci);
2182 	oissued = __ceph_caps_issued_other(ci, cap);
2183 
2184 	doutc(cl, "%p %llx.%llx cap %p mine %s oissued %s used %s wanted %s\n",
2185 	      inode, ceph_vinop(inode), cap, ceph_cap_string(mine),
2186 	      ceph_cap_string(oissued), ceph_cap_string(used),
2187 	      ceph_cap_string(wanted));
2188 	if (cap == ci->i_auth_cap) {
2189 		if (ci->i_dirty_caps || ci->i_flushing_caps ||
2190 		    !list_empty(&ci->i_cap_snaps))
2191 			goto out;
2192 		if ((used | wanted) & CEPH_CAP_ANY_WR)
2193 			goto out;
2194 		/* Note: it's possible that i_filelock_ref becomes non-zero
2195 		 * after dropping auth caps. It doesn't hurt because reply
2196 		 * of lock mds request will re-add auth caps. */
2197 		if (atomic_read(&ci->i_filelock_ref) > 0)
2198 			goto out;
2199 	}
2200 	/* The inode has cached pages, but it's no longer used.
2201 	 * we can safely drop it */
2202 	if (S_ISREG(inode->i_mode) &&
2203 	    wanted == 0 && used == CEPH_CAP_FILE_CACHE &&
2204 	    !(oissued & CEPH_CAP_FILE_CACHE)) {
2205 	  used = 0;
2206 	  oissued = 0;
2207 	}
2208 	if ((used | wanted) & ~oissued & mine)
2209 		goto out;   /* we need these caps */
2210 
2211 	if (oissued) {
2212 		/* we aren't the only cap.. just remove us */
2213 		ceph_remove_cap(mdsc, cap, true);
2214 		(*remaining)--;
2215 	} else {
2216 		struct dentry *dentry;
2217 		/* try dropping referring dentries */
2218 		spin_unlock(&ci->i_ceph_lock);
2219 		dentry = d_find_any_alias(inode);
2220 		if (dentry && drop_negative_children(dentry)) {
2221 			int count;
2222 			dput(dentry);
2223 			d_prune_aliases(inode);
2224 			count = atomic_read(&inode->i_count);
2225 			if (count == 1)
2226 				(*remaining)--;
2227 			doutc(cl, "%p %llx.%llx cap %p pruned, count now %d\n",
2228 			      inode, ceph_vinop(inode), cap, count);
2229 		} else {
2230 			dput(dentry);
2231 		}
2232 		return 0;
2233 	}
2234 
2235 out:
2236 	spin_unlock(&ci->i_ceph_lock);
2237 	return 0;
2238 }
2239 
2240 /*
2241  * Trim session cap count down to some max number.
2242  */
2243 int ceph_trim_caps(struct ceph_mds_client *mdsc,
2244 		   struct ceph_mds_session *session,
2245 		   int max_caps)
2246 {
2247 	struct ceph_client *cl = mdsc->fsc->client;
2248 	int trim_caps = session->s_nr_caps - max_caps;
2249 
2250 	doutc(cl, "mds%d start: %d / %d, trim %d\n", session->s_mds,
2251 	      session->s_nr_caps, max_caps, trim_caps);
2252 	if (trim_caps > 0) {
2253 		int remaining = trim_caps;
2254 
2255 		ceph_iterate_session_caps(session, trim_caps_cb, &remaining);
2256 		doutc(cl, "mds%d done: %d / %d, trimmed %d\n",
2257 		      session->s_mds, session->s_nr_caps, max_caps,
2258 		      trim_caps - remaining);
2259 	}
2260 
2261 	ceph_flush_session_cap_releases(mdsc, session);
2262 	return 0;
2263 }
2264 
2265 static int check_caps_flush(struct ceph_mds_client *mdsc,
2266 			    u64 want_flush_tid)
2267 {
2268 	struct ceph_client *cl = mdsc->fsc->client;
2269 	int ret = 1;
2270 
2271 	spin_lock(&mdsc->cap_dirty_lock);
2272 	if (!list_empty(&mdsc->cap_flush_list)) {
2273 		struct ceph_cap_flush *cf =
2274 			list_first_entry(&mdsc->cap_flush_list,
2275 					 struct ceph_cap_flush, g_list);
2276 		if (cf->tid <= want_flush_tid) {
2277 			doutc(cl, "still flushing tid %llu <= %llu\n",
2278 			      cf->tid, want_flush_tid);
2279 			ret = 0;
2280 		}
2281 	}
2282 	spin_unlock(&mdsc->cap_dirty_lock);
2283 	return ret;
2284 }
2285 
2286 /*
2287  * flush all dirty inode data to disk.
2288  *
2289  * returns true if we've flushed through want_flush_tid
2290  */
2291 static void wait_caps_flush(struct ceph_mds_client *mdsc,
2292 			    u64 want_flush_tid)
2293 {
2294 	struct ceph_client *cl = mdsc->fsc->client;
2295 
2296 	doutc(cl, "want %llu\n", want_flush_tid);
2297 
2298 	wait_event(mdsc->cap_flushing_wq,
2299 		   check_caps_flush(mdsc, want_flush_tid));
2300 
2301 	doutc(cl, "ok, flushed thru %llu\n", want_flush_tid);
2302 }
2303 
2304 /*
2305  * called under s_mutex
2306  */
2307 static void ceph_send_cap_releases(struct ceph_mds_client *mdsc,
2308 				   struct ceph_mds_session *session)
2309 {
2310 	struct ceph_client *cl = mdsc->fsc->client;
2311 	struct ceph_msg *msg = NULL;
2312 	struct ceph_mds_cap_release *head;
2313 	struct ceph_mds_cap_item *item;
2314 	struct ceph_osd_client *osdc = &mdsc->fsc->client->osdc;
2315 	struct ceph_cap *cap;
2316 	LIST_HEAD(tmp_list);
2317 	int num_cap_releases;
2318 	__le32	barrier, *cap_barrier;
2319 
2320 	down_read(&osdc->lock);
2321 	barrier = cpu_to_le32(osdc->epoch_barrier);
2322 	up_read(&osdc->lock);
2323 
2324 	spin_lock(&session->s_cap_lock);
2325 again:
2326 	list_splice_init(&session->s_cap_releases, &tmp_list);
2327 	num_cap_releases = session->s_num_cap_releases;
2328 	session->s_num_cap_releases = 0;
2329 	spin_unlock(&session->s_cap_lock);
2330 
2331 	while (!list_empty(&tmp_list)) {
2332 		if (!msg) {
2333 			msg = ceph_msg_new(CEPH_MSG_CLIENT_CAPRELEASE,
2334 					PAGE_SIZE, GFP_NOFS, false);
2335 			if (!msg)
2336 				goto out_err;
2337 			head = msg->front.iov_base;
2338 			head->num = cpu_to_le32(0);
2339 			msg->front.iov_len = sizeof(*head);
2340 
2341 			msg->hdr.version = cpu_to_le16(2);
2342 			msg->hdr.compat_version = cpu_to_le16(1);
2343 		}
2344 
2345 		cap = list_first_entry(&tmp_list, struct ceph_cap,
2346 					session_caps);
2347 		list_del(&cap->session_caps);
2348 		num_cap_releases--;
2349 
2350 		head = msg->front.iov_base;
2351 		put_unaligned_le32(get_unaligned_le32(&head->num) + 1,
2352 				   &head->num);
2353 		item = msg->front.iov_base + msg->front.iov_len;
2354 		item->ino = cpu_to_le64(cap->cap_ino);
2355 		item->cap_id = cpu_to_le64(cap->cap_id);
2356 		item->migrate_seq = cpu_to_le32(cap->mseq);
2357 		item->issue_seq = cpu_to_le32(cap->issue_seq);
2358 		msg->front.iov_len += sizeof(*item);
2359 
2360 		ceph_put_cap(mdsc, cap);
2361 
2362 		if (le32_to_cpu(head->num) == CEPH_CAPS_PER_RELEASE) {
2363 			// Append cap_barrier field
2364 			cap_barrier = msg->front.iov_base + msg->front.iov_len;
2365 			*cap_barrier = barrier;
2366 			msg->front.iov_len += sizeof(*cap_barrier);
2367 
2368 			msg->hdr.front_len = cpu_to_le32(msg->front.iov_len);
2369 			doutc(cl, "mds%d %p\n", session->s_mds, msg);
2370 			ceph_con_send(&session->s_con, msg);
2371 			msg = NULL;
2372 		}
2373 	}
2374 
2375 	BUG_ON(num_cap_releases != 0);
2376 
2377 	spin_lock(&session->s_cap_lock);
2378 	if (!list_empty(&session->s_cap_releases))
2379 		goto again;
2380 	spin_unlock(&session->s_cap_lock);
2381 
2382 	if (msg) {
2383 		// Append cap_barrier field
2384 		cap_barrier = msg->front.iov_base + msg->front.iov_len;
2385 		*cap_barrier = barrier;
2386 		msg->front.iov_len += sizeof(*cap_barrier);
2387 
2388 		msg->hdr.front_len = cpu_to_le32(msg->front.iov_len);
2389 		doutc(cl, "mds%d %p\n", session->s_mds, msg);
2390 		ceph_con_send(&session->s_con, msg);
2391 	}
2392 	return;
2393 out_err:
2394 	pr_err_client(cl, "mds%d, failed to allocate message\n",
2395 		      session->s_mds);
2396 	spin_lock(&session->s_cap_lock);
2397 	list_splice(&tmp_list, &session->s_cap_releases);
2398 	session->s_num_cap_releases += num_cap_releases;
2399 	spin_unlock(&session->s_cap_lock);
2400 }
2401 
2402 static void ceph_cap_release_work(struct work_struct *work)
2403 {
2404 	struct ceph_mds_session *session =
2405 		container_of(work, struct ceph_mds_session, s_cap_release_work);
2406 
2407 	mutex_lock(&session->s_mutex);
2408 	if (session->s_state == CEPH_MDS_SESSION_OPEN ||
2409 	    session->s_state == CEPH_MDS_SESSION_HUNG)
2410 		ceph_send_cap_releases(session->s_mdsc, session);
2411 	mutex_unlock(&session->s_mutex);
2412 	ceph_put_mds_session(session);
2413 }
2414 
2415 void ceph_flush_session_cap_releases(struct ceph_mds_client *mdsc,
2416 		             struct ceph_mds_session *session)
2417 {
2418 	struct ceph_client *cl = mdsc->fsc->client;
2419 	if (mdsc->stopping)
2420 		return;
2421 
2422 	ceph_get_mds_session(session);
2423 	if (queue_work(mdsc->fsc->cap_wq,
2424 		       &session->s_cap_release_work)) {
2425 		doutc(cl, "cap release work queued\n");
2426 	} else {
2427 		ceph_put_mds_session(session);
2428 		doutc(cl, "failed to queue cap release work\n");
2429 	}
2430 }
2431 
2432 /*
2433  * caller holds session->s_cap_lock
2434  */
2435 void __ceph_queue_cap_release(struct ceph_mds_session *session,
2436 			      struct ceph_cap *cap)
2437 {
2438 	list_add_tail(&cap->session_caps, &session->s_cap_releases);
2439 	session->s_num_cap_releases++;
2440 
2441 	if (!(session->s_num_cap_releases % CEPH_CAPS_PER_RELEASE))
2442 		ceph_flush_session_cap_releases(session->s_mdsc, session);
2443 }
2444 
2445 static void ceph_cap_reclaim_work(struct work_struct *work)
2446 {
2447 	struct ceph_mds_client *mdsc =
2448 		container_of(work, struct ceph_mds_client, cap_reclaim_work);
2449 	int ret = ceph_trim_dentries(mdsc);
2450 	if (ret == -EAGAIN)
2451 		ceph_queue_cap_reclaim_work(mdsc);
2452 }
2453 
2454 void ceph_queue_cap_reclaim_work(struct ceph_mds_client *mdsc)
2455 {
2456 	struct ceph_client *cl = mdsc->fsc->client;
2457 	if (mdsc->stopping)
2458 		return;
2459 
2460         if (queue_work(mdsc->fsc->cap_wq, &mdsc->cap_reclaim_work)) {
2461                 doutc(cl, "caps reclaim work queued\n");
2462         } else {
2463                 doutc(cl, "failed to queue caps release work\n");
2464         }
2465 }
2466 
2467 void ceph_reclaim_caps_nr(struct ceph_mds_client *mdsc, int nr)
2468 {
2469 	int val;
2470 	if (!nr)
2471 		return;
2472 	val = atomic_add_return(nr, &mdsc->cap_reclaim_pending);
2473 	if ((val % CEPH_CAPS_PER_RELEASE) < nr) {
2474 		atomic_set(&mdsc->cap_reclaim_pending, 0);
2475 		ceph_queue_cap_reclaim_work(mdsc);
2476 	}
2477 }
2478 
2479 void ceph_queue_cap_unlink_work(struct ceph_mds_client *mdsc)
2480 {
2481 	struct ceph_client *cl = mdsc->fsc->client;
2482 	if (mdsc->stopping)
2483 		return;
2484 
2485         if (queue_work(mdsc->fsc->cap_wq, &mdsc->cap_unlink_work)) {
2486                 doutc(cl, "caps unlink work queued\n");
2487         } else {
2488                 doutc(cl, "failed to queue caps unlink work\n");
2489         }
2490 }
2491 
2492 static void ceph_cap_unlink_work(struct work_struct *work)
2493 {
2494 	struct ceph_mds_client *mdsc =
2495 		container_of(work, struct ceph_mds_client, cap_unlink_work);
2496 	struct ceph_client *cl = mdsc->fsc->client;
2497 
2498 	doutc(cl, "begin\n");
2499 	spin_lock(&mdsc->cap_delay_lock);
2500 	while (!list_empty(&mdsc->cap_unlink_delay_list)) {
2501 		struct ceph_inode_info *ci;
2502 		struct inode *inode;
2503 
2504 		ci = list_first_entry(&mdsc->cap_unlink_delay_list,
2505 				      struct ceph_inode_info,
2506 				      i_cap_delay_list);
2507 		list_del_init(&ci->i_cap_delay_list);
2508 
2509 		inode = igrab(&ci->netfs.inode);
2510 		if (inode) {
2511 			spin_unlock(&mdsc->cap_delay_lock);
2512 			doutc(cl, "on %p %llx.%llx\n", inode,
2513 			      ceph_vinop(inode));
2514 			ceph_check_caps(ci, CHECK_CAPS_FLUSH);
2515 			iput(inode);
2516 			spin_lock(&mdsc->cap_delay_lock);
2517 		}
2518 	}
2519 	spin_unlock(&mdsc->cap_delay_lock);
2520 	doutc(cl, "done\n");
2521 }
2522 
2523 /*
2524  * requests
2525  */
2526 
2527 int ceph_alloc_readdir_reply_buffer(struct ceph_mds_request *req,
2528 				    struct inode *dir)
2529 {
2530 	struct ceph_inode_info *ci = ceph_inode(dir);
2531 	struct ceph_mds_reply_info_parsed *rinfo = &req->r_reply_info;
2532 	struct ceph_mount_options *opt = req->r_mdsc->fsc->mount_options;
2533 	size_t size = sizeof(struct ceph_mds_reply_dir_entry);
2534 	unsigned int num_entries;
2535 	int order;
2536 
2537 	spin_lock(&ci->i_ceph_lock);
2538 	num_entries = ci->i_files + ci->i_subdirs;
2539 	spin_unlock(&ci->i_ceph_lock);
2540 	num_entries = max(num_entries, 1U);
2541 	num_entries = min(num_entries, opt->max_readdir);
2542 
2543 	order = get_order(size * num_entries);
2544 	while (order >= 0) {
2545 		rinfo->dir_entries = (void*)__get_free_pages(GFP_KERNEL |
2546 							     __GFP_NOWARN |
2547 							     __GFP_ZERO,
2548 							     order);
2549 		if (rinfo->dir_entries)
2550 			break;
2551 		order--;
2552 	}
2553 	if (!rinfo->dir_entries)
2554 		return -ENOMEM;
2555 
2556 	num_entries = (PAGE_SIZE << order) / size;
2557 	num_entries = min(num_entries, opt->max_readdir);
2558 
2559 	rinfo->dir_buf_size = PAGE_SIZE << order;
2560 	req->r_num_caps = num_entries + 1;
2561 	req->r_args.readdir.max_entries = cpu_to_le32(num_entries);
2562 	req->r_args.readdir.max_bytes = cpu_to_le32(opt->max_readdir_bytes);
2563 	return 0;
2564 }
2565 
2566 /*
2567  * Create an mds request.
2568  */
2569 struct ceph_mds_request *
2570 ceph_mdsc_create_request(struct ceph_mds_client *mdsc, int op, int mode)
2571 {
2572 	struct ceph_mds_request *req;
2573 
2574 	req = kmem_cache_zalloc(ceph_mds_request_cachep, GFP_NOFS);
2575 	if (!req)
2576 		return ERR_PTR(-ENOMEM);
2577 
2578 	mutex_init(&req->r_fill_mutex);
2579 	req->r_mdsc = mdsc;
2580 	req->r_started = jiffies;
2581 	req->r_start_latency = ktime_get();
2582 	req->r_resend_mds = -1;
2583 	INIT_LIST_HEAD(&req->r_unsafe_dir_item);
2584 	INIT_LIST_HEAD(&req->r_unsafe_target_item);
2585 	req->r_fmode = -1;
2586 	req->r_feature_needed = -1;
2587 	kref_init(&req->r_kref);
2588 	RB_CLEAR_NODE(&req->r_node);
2589 	INIT_LIST_HEAD(&req->r_wait);
2590 	init_completion(&req->r_completion);
2591 	init_completion(&req->r_safe_completion);
2592 	INIT_LIST_HEAD(&req->r_unsafe_item);
2593 
2594 	ktime_get_coarse_real_ts64(&req->r_stamp);
2595 
2596 	req->r_op = op;
2597 	req->r_direct_mode = mode;
2598 	return req;
2599 }
2600 
2601 /*
2602  * return oldest (lowest) request, tid in request tree, 0 if none.
2603  *
2604  * called under mdsc->mutex.
2605  */
2606 static struct ceph_mds_request *__get_oldest_req(struct ceph_mds_client *mdsc)
2607 {
2608 	if (RB_EMPTY_ROOT(&mdsc->request_tree))
2609 		return NULL;
2610 	return rb_entry(rb_first(&mdsc->request_tree),
2611 			struct ceph_mds_request, r_node);
2612 }
2613 
2614 static inline  u64 __get_oldest_tid(struct ceph_mds_client *mdsc)
2615 {
2616 	return mdsc->oldest_tid;
2617 }
2618 
2619 #if IS_ENABLED(CONFIG_FS_ENCRYPTION)
2620 static u8 *get_fscrypt_altname(const struct ceph_mds_request *req, u32 *plen)
2621 {
2622 	struct inode *dir = req->r_parent;
2623 	struct dentry *dentry = req->r_dentry;
2624 	u8 *cryptbuf = NULL;
2625 	u32 len = 0;
2626 	int ret = 0;
2627 
2628 	/* only encode if we have parent and dentry */
2629 	if (!dir || !dentry)
2630 		goto success;
2631 
2632 	/* No-op unless this is encrypted */
2633 	if (!IS_ENCRYPTED(dir))
2634 		goto success;
2635 
2636 	ret = ceph_fscrypt_prepare_readdir(dir);
2637 	if (ret < 0)
2638 		return ERR_PTR(ret);
2639 
2640 	/* No key? Just ignore it. */
2641 	if (!fscrypt_has_encryption_key(dir))
2642 		goto success;
2643 
2644 	if (!fscrypt_fname_encrypted_size(dir, dentry->d_name.len, NAME_MAX,
2645 					  &len)) {
2646 		WARN_ON_ONCE(1);
2647 		return ERR_PTR(-ENAMETOOLONG);
2648 	}
2649 
2650 	/* No need to append altname if name is short enough */
2651 	if (len <= CEPH_NOHASH_NAME_MAX) {
2652 		len = 0;
2653 		goto success;
2654 	}
2655 
2656 	cryptbuf = kmalloc(len, GFP_KERNEL);
2657 	if (!cryptbuf)
2658 		return ERR_PTR(-ENOMEM);
2659 
2660 	ret = fscrypt_fname_encrypt(dir, &dentry->d_name, cryptbuf, len);
2661 	if (ret) {
2662 		kfree(cryptbuf);
2663 		return ERR_PTR(ret);
2664 	}
2665 success:
2666 	*plen = len;
2667 	return cryptbuf;
2668 }
2669 #else
2670 static u8 *get_fscrypt_altname(const struct ceph_mds_request *req, u32 *plen)
2671 {
2672 	*plen = 0;
2673 	return NULL;
2674 }
2675 #endif
2676 
2677 /**
2678  * ceph_mdsc_build_path - build a path string to a given dentry
2679  * @mdsc: mds client
2680  * @dentry: dentry to which path should be built
2681  * @plen: returned length of string
2682  * @pbase: returned base inode number
2683  * @for_wire: is this path going to be sent to the MDS?
2684  *
2685  * Build a string that represents the path to the dentry. This is mostly called
2686  * for two different purposes:
2687  *
2688  * 1) we need to build a path string to send to the MDS (for_wire == true)
2689  * 2) we need a path string for local presentation (e.g. debugfs)
2690  *    (for_wire == false)
2691  *
2692  * The path is built in reverse, starting with the dentry. Walk back up toward
2693  * the root, building the path until the first non-snapped inode is reached
2694  * (for_wire) or the root inode is reached (!for_wire).
2695  *
2696  * Encode hidden .snap dirs as a double /, i.e.
2697  *   foo/.snap/bar -> foo//bar
2698  */
2699 char *ceph_mdsc_build_path(struct ceph_mds_client *mdsc, struct dentry *dentry,
2700 			   int *plen, u64 *pbase, int for_wire)
2701 {
2702 	struct ceph_client *cl = mdsc->fsc->client;
2703 	struct dentry *cur;
2704 	struct inode *inode;
2705 	char *path;
2706 	int pos;
2707 	unsigned seq;
2708 	u64 base;
2709 
2710 	if (!dentry)
2711 		return ERR_PTR(-EINVAL);
2712 
2713 	path = __getname();
2714 	if (!path)
2715 		return ERR_PTR(-ENOMEM);
2716 retry:
2717 	pos = PATH_MAX - 1;
2718 	path[pos] = '\0';
2719 
2720 	seq = read_seqbegin(&rename_lock);
2721 	cur = dget(dentry);
2722 	for (;;) {
2723 		struct dentry *parent;
2724 
2725 		spin_lock(&cur->d_lock);
2726 		inode = d_inode(cur);
2727 		if (inode && ceph_snap(inode) == CEPH_SNAPDIR) {
2728 			doutc(cl, "path+%d: %p SNAPDIR\n", pos, cur);
2729 			spin_unlock(&cur->d_lock);
2730 			parent = dget_parent(cur);
2731 		} else if (for_wire && inode && dentry != cur &&
2732 			   ceph_snap(inode) == CEPH_NOSNAP) {
2733 			spin_unlock(&cur->d_lock);
2734 			pos++; /* get rid of any prepended '/' */
2735 			break;
2736 		} else if (!for_wire || !IS_ENCRYPTED(d_inode(cur->d_parent))) {
2737 			pos -= cur->d_name.len;
2738 			if (pos < 0) {
2739 				spin_unlock(&cur->d_lock);
2740 				break;
2741 			}
2742 			memcpy(path + pos, cur->d_name.name, cur->d_name.len);
2743 			spin_unlock(&cur->d_lock);
2744 			parent = dget_parent(cur);
2745 		} else {
2746 			int len, ret;
2747 			char buf[NAME_MAX];
2748 
2749 			/*
2750 			 * Proactively copy name into buf, in case we need to
2751 			 * present it as-is.
2752 			 */
2753 			memcpy(buf, cur->d_name.name, cur->d_name.len);
2754 			len = cur->d_name.len;
2755 			spin_unlock(&cur->d_lock);
2756 			parent = dget_parent(cur);
2757 
2758 			ret = ceph_fscrypt_prepare_readdir(d_inode(parent));
2759 			if (ret < 0) {
2760 				dput(parent);
2761 				dput(cur);
2762 				return ERR_PTR(ret);
2763 			}
2764 
2765 			if (fscrypt_has_encryption_key(d_inode(parent))) {
2766 				len = ceph_encode_encrypted_fname(d_inode(parent),
2767 								  cur, buf);
2768 				if (len < 0) {
2769 					dput(parent);
2770 					dput(cur);
2771 					return ERR_PTR(len);
2772 				}
2773 			}
2774 			pos -= len;
2775 			if (pos < 0) {
2776 				dput(parent);
2777 				break;
2778 			}
2779 			memcpy(path + pos, buf, len);
2780 		}
2781 		dput(cur);
2782 		cur = parent;
2783 
2784 		/* Are we at the root? */
2785 		if (IS_ROOT(cur))
2786 			break;
2787 
2788 		/* Are we out of buffer? */
2789 		if (--pos < 0)
2790 			break;
2791 
2792 		path[pos] = '/';
2793 	}
2794 	inode = d_inode(cur);
2795 	base = inode ? ceph_ino(inode) : 0;
2796 	dput(cur);
2797 
2798 	if (read_seqretry(&rename_lock, seq))
2799 		goto retry;
2800 
2801 	if (pos < 0) {
2802 		/*
2803 		 * A rename didn't occur, but somehow we didn't end up where
2804 		 * we thought we would. Throw a warning and try again.
2805 		 */
2806 		pr_warn_client(cl, "did not end path lookup where expected (pos = %d)\n",
2807 			       pos);
2808 		goto retry;
2809 	}
2810 
2811 	*pbase = base;
2812 	*plen = PATH_MAX - 1 - pos;
2813 	doutc(cl, "on %p %d built %llx '%.*s'\n", dentry, d_count(dentry),
2814 	      base, *plen, path + pos);
2815 	return path + pos;
2816 }
2817 
2818 static int build_dentry_path(struct ceph_mds_client *mdsc, struct dentry *dentry,
2819 			     struct inode *dir, const char **ppath, int *ppathlen,
2820 			     u64 *pino, bool *pfreepath, bool parent_locked)
2821 {
2822 	char *path;
2823 
2824 	rcu_read_lock();
2825 	if (!dir)
2826 		dir = d_inode_rcu(dentry->d_parent);
2827 	if (dir && parent_locked && ceph_snap(dir) == CEPH_NOSNAP &&
2828 	    !IS_ENCRYPTED(dir)) {
2829 		*pino = ceph_ino(dir);
2830 		rcu_read_unlock();
2831 		*ppath = dentry->d_name.name;
2832 		*ppathlen = dentry->d_name.len;
2833 		return 0;
2834 	}
2835 	rcu_read_unlock();
2836 	path = ceph_mdsc_build_path(mdsc, dentry, ppathlen, pino, 1);
2837 	if (IS_ERR(path))
2838 		return PTR_ERR(path);
2839 	*ppath = path;
2840 	*pfreepath = true;
2841 	return 0;
2842 }
2843 
2844 static int build_inode_path(struct inode *inode,
2845 			    const char **ppath, int *ppathlen, u64 *pino,
2846 			    bool *pfreepath)
2847 {
2848 	struct ceph_mds_client *mdsc = ceph_sb_to_mdsc(inode->i_sb);
2849 	struct dentry *dentry;
2850 	char *path;
2851 
2852 	if (ceph_snap(inode) == CEPH_NOSNAP) {
2853 		*pino = ceph_ino(inode);
2854 		*ppathlen = 0;
2855 		return 0;
2856 	}
2857 	dentry = d_find_alias(inode);
2858 	path = ceph_mdsc_build_path(mdsc, dentry, ppathlen, pino, 1);
2859 	dput(dentry);
2860 	if (IS_ERR(path))
2861 		return PTR_ERR(path);
2862 	*ppath = path;
2863 	*pfreepath = true;
2864 	return 0;
2865 }
2866 
2867 /*
2868  * request arguments may be specified via an inode *, a dentry *, or
2869  * an explicit ino+path.
2870  */
2871 static int set_request_path_attr(struct ceph_mds_client *mdsc, struct inode *rinode,
2872 				 struct dentry *rdentry, struct inode *rdiri,
2873 				 const char *rpath, u64 rino, const char **ppath,
2874 				 int *pathlen, u64 *ino, bool *freepath,
2875 				 bool parent_locked)
2876 {
2877 	struct ceph_client *cl = mdsc->fsc->client;
2878 	int r = 0;
2879 
2880 	if (rinode) {
2881 		r = build_inode_path(rinode, ppath, pathlen, ino, freepath);
2882 		doutc(cl, " inode %p %llx.%llx\n", rinode, ceph_ino(rinode),
2883 		      ceph_snap(rinode));
2884 	} else if (rdentry) {
2885 		r = build_dentry_path(mdsc, rdentry, rdiri, ppath, pathlen, ino,
2886 					freepath, parent_locked);
2887 		doutc(cl, " dentry %p %llx/%.*s\n", rdentry, *ino, *pathlen, *ppath);
2888 	} else if (rpath || rino) {
2889 		*ino = rino;
2890 		*ppath = rpath;
2891 		*pathlen = rpath ? strlen(rpath) : 0;
2892 		doutc(cl, " path %.*s\n", *pathlen, rpath);
2893 	}
2894 
2895 	return r;
2896 }
2897 
2898 static void encode_mclientrequest_tail(void **p,
2899 				       const struct ceph_mds_request *req)
2900 {
2901 	struct ceph_timespec ts;
2902 	int i;
2903 
2904 	ceph_encode_timespec64(&ts, &req->r_stamp);
2905 	ceph_encode_copy(p, &ts, sizeof(ts));
2906 
2907 	/* v4: gid_list */
2908 	ceph_encode_32(p, req->r_cred->group_info->ngroups);
2909 	for (i = 0; i < req->r_cred->group_info->ngroups; i++)
2910 		ceph_encode_64(p, from_kgid(&init_user_ns,
2911 					    req->r_cred->group_info->gid[i]));
2912 
2913 	/* v5: altname */
2914 	ceph_encode_32(p, req->r_altname_len);
2915 	ceph_encode_copy(p, req->r_altname, req->r_altname_len);
2916 
2917 	/* v6: fscrypt_auth and fscrypt_file */
2918 	if (req->r_fscrypt_auth) {
2919 		u32 authlen = ceph_fscrypt_auth_len(req->r_fscrypt_auth);
2920 
2921 		ceph_encode_32(p, authlen);
2922 		ceph_encode_copy(p, req->r_fscrypt_auth, authlen);
2923 	} else {
2924 		ceph_encode_32(p, 0);
2925 	}
2926 	if (test_bit(CEPH_MDS_R_FSCRYPT_FILE, &req->r_req_flags)) {
2927 		ceph_encode_32(p, sizeof(__le64));
2928 		ceph_encode_64(p, req->r_fscrypt_file);
2929 	} else {
2930 		ceph_encode_32(p, 0);
2931 	}
2932 }
2933 
2934 static inline u16 mds_supported_head_version(struct ceph_mds_session *session)
2935 {
2936 	if (!test_bit(CEPHFS_FEATURE_32BITS_RETRY_FWD, &session->s_features))
2937 		return 1;
2938 
2939 	if (!test_bit(CEPHFS_FEATURE_HAS_OWNER_UIDGID, &session->s_features))
2940 		return 2;
2941 
2942 	return CEPH_MDS_REQUEST_HEAD_VERSION;
2943 }
2944 
2945 static struct ceph_mds_request_head_legacy *
2946 find_legacy_request_head(void *p, u64 features)
2947 {
2948 	bool legacy = !(features & CEPH_FEATURE_FS_BTIME);
2949 	struct ceph_mds_request_head_old *ohead;
2950 
2951 	if (legacy)
2952 		return (struct ceph_mds_request_head_legacy *)p;
2953 	ohead = (struct ceph_mds_request_head_old *)p;
2954 	return (struct ceph_mds_request_head_legacy *)&ohead->oldest_client_tid;
2955 }
2956 
2957 /*
2958  * called under mdsc->mutex
2959  */
2960 static struct ceph_msg *create_request_message(struct ceph_mds_session *session,
2961 					       struct ceph_mds_request *req,
2962 					       bool drop_cap_releases)
2963 {
2964 	int mds = session->s_mds;
2965 	struct ceph_mds_client *mdsc = session->s_mdsc;
2966 	struct ceph_client *cl = mdsc->fsc->client;
2967 	struct ceph_msg *msg;
2968 	struct ceph_mds_request_head_legacy *lhead;
2969 	const char *path1 = NULL;
2970 	const char *path2 = NULL;
2971 	u64 ino1 = 0, ino2 = 0;
2972 	int pathlen1 = 0, pathlen2 = 0;
2973 	bool freepath1 = false, freepath2 = false;
2974 	struct dentry *old_dentry = NULL;
2975 	int len;
2976 	u16 releases;
2977 	void *p, *end;
2978 	int ret;
2979 	bool legacy = !(session->s_con.peer_features & CEPH_FEATURE_FS_BTIME);
2980 	u16 request_head_version = mds_supported_head_version(session);
2981 	kuid_t caller_fsuid = req->r_cred->fsuid;
2982 	kgid_t caller_fsgid = req->r_cred->fsgid;
2983 
2984 	ret = set_request_path_attr(mdsc, req->r_inode, req->r_dentry,
2985 			      req->r_parent, req->r_path1, req->r_ino1.ino,
2986 			      &path1, &pathlen1, &ino1, &freepath1,
2987 			      test_bit(CEPH_MDS_R_PARENT_LOCKED,
2988 					&req->r_req_flags));
2989 	if (ret < 0) {
2990 		msg = ERR_PTR(ret);
2991 		goto out;
2992 	}
2993 
2994 	/* If r_old_dentry is set, then assume that its parent is locked */
2995 	if (req->r_old_dentry &&
2996 	    !(req->r_old_dentry->d_flags & DCACHE_DISCONNECTED))
2997 		old_dentry = req->r_old_dentry;
2998 	ret = set_request_path_attr(mdsc, NULL, old_dentry,
2999 			      req->r_old_dentry_dir,
3000 			      req->r_path2, req->r_ino2.ino,
3001 			      &path2, &pathlen2, &ino2, &freepath2, true);
3002 	if (ret < 0) {
3003 		msg = ERR_PTR(ret);
3004 		goto out_free1;
3005 	}
3006 
3007 	req->r_altname = get_fscrypt_altname(req, &req->r_altname_len);
3008 	if (IS_ERR(req->r_altname)) {
3009 		msg = ERR_CAST(req->r_altname);
3010 		req->r_altname = NULL;
3011 		goto out_free2;
3012 	}
3013 
3014 	/*
3015 	 * For old cephs without supporting the 32bit retry/fwd feature
3016 	 * it will copy the raw memories directly when decoding the
3017 	 * requests. While new cephs will decode the head depending the
3018 	 * version member, so we need to make sure it will be compatible
3019 	 * with them both.
3020 	 */
3021 	if (legacy)
3022 		len = sizeof(struct ceph_mds_request_head_legacy);
3023 	else if (request_head_version == 1)
3024 		len = sizeof(struct ceph_mds_request_head_old);
3025 	else if (request_head_version == 2)
3026 		len = offsetofend(struct ceph_mds_request_head, ext_num_fwd);
3027 	else
3028 		len = sizeof(struct ceph_mds_request_head);
3029 
3030 	/* filepaths */
3031 	len += 2 * (1 + sizeof(u32) + sizeof(u64));
3032 	len += pathlen1 + pathlen2;
3033 
3034 	/* cap releases */
3035 	len += sizeof(struct ceph_mds_request_release) *
3036 		(!!req->r_inode_drop + !!req->r_dentry_drop +
3037 		 !!req->r_old_inode_drop + !!req->r_old_dentry_drop);
3038 
3039 	if (req->r_dentry_drop)
3040 		len += pathlen1;
3041 	if (req->r_old_dentry_drop)
3042 		len += pathlen2;
3043 
3044 	/* MClientRequest tail */
3045 
3046 	/* req->r_stamp */
3047 	len += sizeof(struct ceph_timespec);
3048 
3049 	/* gid list */
3050 	len += sizeof(u32) + (sizeof(u64) * req->r_cred->group_info->ngroups);
3051 
3052 	/* alternate name */
3053 	len += sizeof(u32) + req->r_altname_len;
3054 
3055 	/* fscrypt_auth */
3056 	len += sizeof(u32); // fscrypt_auth
3057 	if (req->r_fscrypt_auth)
3058 		len += ceph_fscrypt_auth_len(req->r_fscrypt_auth);
3059 
3060 	/* fscrypt_file */
3061 	len += sizeof(u32);
3062 	if (test_bit(CEPH_MDS_R_FSCRYPT_FILE, &req->r_req_flags))
3063 		len += sizeof(__le64);
3064 
3065 	msg = ceph_msg_new2(CEPH_MSG_CLIENT_REQUEST, len, 1, GFP_NOFS, false);
3066 	if (!msg) {
3067 		msg = ERR_PTR(-ENOMEM);
3068 		goto out_free2;
3069 	}
3070 
3071 	msg->hdr.tid = cpu_to_le64(req->r_tid);
3072 
3073 	lhead = find_legacy_request_head(msg->front.iov_base,
3074 					 session->s_con.peer_features);
3075 
3076 	if ((req->r_mnt_idmap != &nop_mnt_idmap) &&
3077 	    !test_bit(CEPHFS_FEATURE_HAS_OWNER_UIDGID, &session->s_features)) {
3078 		WARN_ON_ONCE(!IS_CEPH_MDS_OP_NEWINODE(req->r_op));
3079 
3080 		if (enable_unsafe_idmap) {
3081 			pr_warn_once_client(cl,
3082 				"idmapped mount is used and CEPHFS_FEATURE_HAS_OWNER_UIDGID"
3083 				" is not supported by MDS. UID/GID-based restrictions may"
3084 				" not work properly.\n");
3085 
3086 			caller_fsuid = from_vfsuid(req->r_mnt_idmap, &init_user_ns,
3087 						   VFSUIDT_INIT(req->r_cred->fsuid));
3088 			caller_fsgid = from_vfsgid(req->r_mnt_idmap, &init_user_ns,
3089 						   VFSGIDT_INIT(req->r_cred->fsgid));
3090 		} else {
3091 			pr_err_ratelimited_client(cl,
3092 				"idmapped mount is used and CEPHFS_FEATURE_HAS_OWNER_UIDGID"
3093 				" is not supported by MDS. Fail request with -EIO.\n");
3094 
3095 			ret = -EIO;
3096 			goto out_err;
3097 		}
3098 	}
3099 
3100 	/*
3101 	 * The ceph_mds_request_head_legacy didn't contain a version field, and
3102 	 * one was added when we moved the message version from 3->4.
3103 	 */
3104 	if (legacy) {
3105 		msg->hdr.version = cpu_to_le16(3);
3106 		p = msg->front.iov_base + sizeof(*lhead);
3107 	} else if (request_head_version == 1) {
3108 		struct ceph_mds_request_head_old *ohead = msg->front.iov_base;
3109 
3110 		msg->hdr.version = cpu_to_le16(4);
3111 		ohead->version = cpu_to_le16(1);
3112 		p = msg->front.iov_base + sizeof(*ohead);
3113 	} else if (request_head_version == 2) {
3114 		struct ceph_mds_request_head *nhead = msg->front.iov_base;
3115 
3116 		msg->hdr.version = cpu_to_le16(6);
3117 		nhead->version = cpu_to_le16(2);
3118 
3119 		p = msg->front.iov_base + offsetofend(struct ceph_mds_request_head, ext_num_fwd);
3120 	} else {
3121 		struct ceph_mds_request_head *nhead = msg->front.iov_base;
3122 		kuid_t owner_fsuid;
3123 		kgid_t owner_fsgid;
3124 
3125 		msg->hdr.version = cpu_to_le16(6);
3126 		nhead->version = cpu_to_le16(CEPH_MDS_REQUEST_HEAD_VERSION);
3127 		nhead->struct_len = cpu_to_le32(sizeof(struct ceph_mds_request_head));
3128 
3129 		if (IS_CEPH_MDS_OP_NEWINODE(req->r_op)) {
3130 			owner_fsuid = from_vfsuid(req->r_mnt_idmap, &init_user_ns,
3131 						VFSUIDT_INIT(req->r_cred->fsuid));
3132 			owner_fsgid = from_vfsgid(req->r_mnt_idmap, &init_user_ns,
3133 						VFSGIDT_INIT(req->r_cred->fsgid));
3134 			nhead->owner_uid = cpu_to_le32(from_kuid(&init_user_ns, owner_fsuid));
3135 			nhead->owner_gid = cpu_to_le32(from_kgid(&init_user_ns, owner_fsgid));
3136 		} else {
3137 			nhead->owner_uid = cpu_to_le32(-1);
3138 			nhead->owner_gid = cpu_to_le32(-1);
3139 		}
3140 
3141 		p = msg->front.iov_base + sizeof(*nhead);
3142 	}
3143 
3144 	end = msg->front.iov_base + msg->front.iov_len;
3145 
3146 	lhead->mdsmap_epoch = cpu_to_le32(mdsc->mdsmap->m_epoch);
3147 	lhead->op = cpu_to_le32(req->r_op);
3148 	lhead->caller_uid = cpu_to_le32(from_kuid(&init_user_ns,
3149 						  caller_fsuid));
3150 	lhead->caller_gid = cpu_to_le32(from_kgid(&init_user_ns,
3151 						  caller_fsgid));
3152 	lhead->ino = cpu_to_le64(req->r_deleg_ino);
3153 	lhead->args = req->r_args;
3154 
3155 	ceph_encode_filepath(&p, end, ino1, path1);
3156 	ceph_encode_filepath(&p, end, ino2, path2);
3157 
3158 	/* make note of release offset, in case we need to replay */
3159 	req->r_request_release_offset = p - msg->front.iov_base;
3160 
3161 	/* cap releases */
3162 	releases = 0;
3163 	if (req->r_inode_drop)
3164 		releases += ceph_encode_inode_release(&p,
3165 		      req->r_inode ? req->r_inode : d_inode(req->r_dentry),
3166 		      mds, req->r_inode_drop, req->r_inode_unless,
3167 		      req->r_op == CEPH_MDS_OP_READDIR);
3168 	if (req->r_dentry_drop) {
3169 		ret = ceph_encode_dentry_release(&p, req->r_dentry,
3170 				req->r_parent, mds, req->r_dentry_drop,
3171 				req->r_dentry_unless);
3172 		if (ret < 0)
3173 			goto out_err;
3174 		releases += ret;
3175 	}
3176 	if (req->r_old_dentry_drop) {
3177 		ret = ceph_encode_dentry_release(&p, req->r_old_dentry,
3178 				req->r_old_dentry_dir, mds,
3179 				req->r_old_dentry_drop,
3180 				req->r_old_dentry_unless);
3181 		if (ret < 0)
3182 			goto out_err;
3183 		releases += ret;
3184 	}
3185 	if (req->r_old_inode_drop)
3186 		releases += ceph_encode_inode_release(&p,
3187 		      d_inode(req->r_old_dentry),
3188 		      mds, req->r_old_inode_drop, req->r_old_inode_unless, 0);
3189 
3190 	if (drop_cap_releases) {
3191 		releases = 0;
3192 		p = msg->front.iov_base + req->r_request_release_offset;
3193 	}
3194 
3195 	lhead->num_releases = cpu_to_le16(releases);
3196 
3197 	encode_mclientrequest_tail(&p, req);
3198 
3199 	if (WARN_ON_ONCE(p > end)) {
3200 		ceph_msg_put(msg);
3201 		msg = ERR_PTR(-ERANGE);
3202 		goto out_free2;
3203 	}
3204 
3205 	msg->front.iov_len = p - msg->front.iov_base;
3206 	msg->hdr.front_len = cpu_to_le32(msg->front.iov_len);
3207 
3208 	if (req->r_pagelist) {
3209 		struct ceph_pagelist *pagelist = req->r_pagelist;
3210 		ceph_msg_data_add_pagelist(msg, pagelist);
3211 		msg->hdr.data_len = cpu_to_le32(pagelist->length);
3212 	} else {
3213 		msg->hdr.data_len = 0;
3214 	}
3215 
3216 	msg->hdr.data_off = cpu_to_le16(0);
3217 
3218 out_free2:
3219 	if (freepath2)
3220 		ceph_mdsc_free_path((char *)path2, pathlen2);
3221 out_free1:
3222 	if (freepath1)
3223 		ceph_mdsc_free_path((char *)path1, pathlen1);
3224 out:
3225 	return msg;
3226 out_err:
3227 	ceph_msg_put(msg);
3228 	msg = ERR_PTR(ret);
3229 	goto out_free2;
3230 }
3231 
3232 /*
3233  * called under mdsc->mutex if error, under no mutex if
3234  * success.
3235  */
3236 static void complete_request(struct ceph_mds_client *mdsc,
3237 			     struct ceph_mds_request *req)
3238 {
3239 	req->r_end_latency = ktime_get();
3240 
3241 	if (req->r_callback)
3242 		req->r_callback(mdsc, req);
3243 	complete_all(&req->r_completion);
3244 }
3245 
3246 /*
3247  * called under mdsc->mutex
3248  */
3249 static int __prepare_send_request(struct ceph_mds_session *session,
3250 				  struct ceph_mds_request *req,
3251 				  bool drop_cap_releases)
3252 {
3253 	int mds = session->s_mds;
3254 	struct ceph_mds_client *mdsc = session->s_mdsc;
3255 	struct ceph_client *cl = mdsc->fsc->client;
3256 	struct ceph_mds_request_head_legacy *lhead;
3257 	struct ceph_mds_request_head *nhead;
3258 	struct ceph_msg *msg;
3259 	int flags = 0, old_max_retry;
3260 	bool old_version = !test_bit(CEPHFS_FEATURE_32BITS_RETRY_FWD,
3261 				     &session->s_features);
3262 
3263 	/*
3264 	 * Avoid infinite retrying after overflow. The client will
3265 	 * increase the retry count and if the MDS is old version,
3266 	 * so we limit to retry at most 256 times.
3267 	 */
3268 	if (req->r_attempts) {
3269 	       old_max_retry = sizeof_field(struct ceph_mds_request_head_old,
3270 					    num_retry);
3271 	       old_max_retry = 1 << (old_max_retry * BITS_PER_BYTE);
3272 	       if ((old_version && req->r_attempts >= old_max_retry) ||
3273 		   ((uint32_t)req->r_attempts >= U32_MAX)) {
3274 			pr_warn_ratelimited_client(cl, "request tid %llu seq overflow\n",
3275 						   req->r_tid);
3276 			return -EMULTIHOP;
3277 	       }
3278 	}
3279 
3280 	req->r_attempts++;
3281 	if (req->r_inode) {
3282 		struct ceph_cap *cap =
3283 			ceph_get_cap_for_mds(ceph_inode(req->r_inode), mds);
3284 
3285 		if (cap)
3286 			req->r_sent_on_mseq = cap->mseq;
3287 		else
3288 			req->r_sent_on_mseq = -1;
3289 	}
3290 	doutc(cl, "%p tid %lld %s (attempt %d)\n", req, req->r_tid,
3291 	      ceph_mds_op_name(req->r_op), req->r_attempts);
3292 
3293 	if (test_bit(CEPH_MDS_R_GOT_UNSAFE, &req->r_req_flags)) {
3294 		void *p;
3295 
3296 		/*
3297 		 * Replay.  Do not regenerate message (and rebuild
3298 		 * paths, etc.); just use the original message.
3299 		 * Rebuilding paths will break for renames because
3300 		 * d_move mangles the src name.
3301 		 */
3302 		msg = req->r_request;
3303 		lhead = find_legacy_request_head(msg->front.iov_base,
3304 						 session->s_con.peer_features);
3305 
3306 		flags = le32_to_cpu(lhead->flags);
3307 		flags |= CEPH_MDS_FLAG_REPLAY;
3308 		lhead->flags = cpu_to_le32(flags);
3309 
3310 		if (req->r_target_inode)
3311 			lhead->ino = cpu_to_le64(ceph_ino(req->r_target_inode));
3312 
3313 		lhead->num_retry = req->r_attempts - 1;
3314 		if (!old_version) {
3315 			nhead = (struct ceph_mds_request_head*)msg->front.iov_base;
3316 			nhead->ext_num_retry = cpu_to_le32(req->r_attempts - 1);
3317 		}
3318 
3319 		/* remove cap/dentry releases from message */
3320 		lhead->num_releases = 0;
3321 
3322 		p = msg->front.iov_base + req->r_request_release_offset;
3323 		encode_mclientrequest_tail(&p, req);
3324 
3325 		msg->front.iov_len = p - msg->front.iov_base;
3326 		msg->hdr.front_len = cpu_to_le32(msg->front.iov_len);
3327 		return 0;
3328 	}
3329 
3330 	if (req->r_request) {
3331 		ceph_msg_put(req->r_request);
3332 		req->r_request = NULL;
3333 	}
3334 	msg = create_request_message(session, req, drop_cap_releases);
3335 	if (IS_ERR(msg)) {
3336 		req->r_err = PTR_ERR(msg);
3337 		return PTR_ERR(msg);
3338 	}
3339 	req->r_request = msg;
3340 
3341 	lhead = find_legacy_request_head(msg->front.iov_base,
3342 					 session->s_con.peer_features);
3343 	lhead->oldest_client_tid = cpu_to_le64(__get_oldest_tid(mdsc));
3344 	if (test_bit(CEPH_MDS_R_GOT_UNSAFE, &req->r_req_flags))
3345 		flags |= CEPH_MDS_FLAG_REPLAY;
3346 	if (test_bit(CEPH_MDS_R_ASYNC, &req->r_req_flags))
3347 		flags |= CEPH_MDS_FLAG_ASYNC;
3348 	if (req->r_parent)
3349 		flags |= CEPH_MDS_FLAG_WANT_DENTRY;
3350 	lhead->flags = cpu_to_le32(flags);
3351 	lhead->num_fwd = req->r_num_fwd;
3352 	lhead->num_retry = req->r_attempts - 1;
3353 	if (!old_version) {
3354 		nhead = (struct ceph_mds_request_head*)msg->front.iov_base;
3355 		nhead->ext_num_fwd = cpu_to_le32(req->r_num_fwd);
3356 		nhead->ext_num_retry = cpu_to_le32(req->r_attempts - 1);
3357 	}
3358 
3359 	doutc(cl, " r_parent = %p\n", req->r_parent);
3360 	return 0;
3361 }
3362 
3363 /*
3364  * called under mdsc->mutex
3365  */
3366 static int __send_request(struct ceph_mds_session *session,
3367 			  struct ceph_mds_request *req,
3368 			  bool drop_cap_releases)
3369 {
3370 	int err;
3371 
3372 	err = __prepare_send_request(session, req, drop_cap_releases);
3373 	if (!err) {
3374 		ceph_msg_get(req->r_request);
3375 		ceph_con_send(&session->s_con, req->r_request);
3376 	}
3377 
3378 	return err;
3379 }
3380 
3381 /*
3382  * send request, or put it on the appropriate wait list.
3383  */
3384 static void __do_request(struct ceph_mds_client *mdsc,
3385 			struct ceph_mds_request *req)
3386 {
3387 	struct ceph_client *cl = mdsc->fsc->client;
3388 	struct ceph_mds_session *session = NULL;
3389 	int mds = -1;
3390 	int err = 0;
3391 	bool random;
3392 
3393 	if (req->r_err || test_bit(CEPH_MDS_R_GOT_RESULT, &req->r_req_flags)) {
3394 		if (test_bit(CEPH_MDS_R_ABORTED, &req->r_req_flags))
3395 			__unregister_request(mdsc, req);
3396 		return;
3397 	}
3398 
3399 	if (READ_ONCE(mdsc->fsc->mount_state) == CEPH_MOUNT_FENCE_IO) {
3400 		doutc(cl, "metadata corrupted\n");
3401 		err = -EIO;
3402 		goto finish;
3403 	}
3404 	if (req->r_timeout &&
3405 	    time_after_eq(jiffies, req->r_started + req->r_timeout)) {
3406 		doutc(cl, "timed out\n");
3407 		err = -ETIMEDOUT;
3408 		goto finish;
3409 	}
3410 	if (READ_ONCE(mdsc->fsc->mount_state) == CEPH_MOUNT_SHUTDOWN) {
3411 		doutc(cl, "forced umount\n");
3412 		err = -EIO;
3413 		goto finish;
3414 	}
3415 	if (READ_ONCE(mdsc->fsc->mount_state) == CEPH_MOUNT_MOUNTING) {
3416 		if (mdsc->mdsmap_err) {
3417 			err = mdsc->mdsmap_err;
3418 			doutc(cl, "mdsmap err %d\n", err);
3419 			goto finish;
3420 		}
3421 		if (mdsc->mdsmap->m_epoch == 0) {
3422 			doutc(cl, "no mdsmap, waiting for map\n");
3423 			list_add(&req->r_wait, &mdsc->waiting_for_map);
3424 			return;
3425 		}
3426 		if (!(mdsc->fsc->mount_options->flags &
3427 		      CEPH_MOUNT_OPT_MOUNTWAIT) &&
3428 		    !ceph_mdsmap_is_cluster_available(mdsc->mdsmap)) {
3429 			err = -EHOSTUNREACH;
3430 			goto finish;
3431 		}
3432 	}
3433 
3434 	put_request_session(req);
3435 
3436 	mds = __choose_mds(mdsc, req, &random);
3437 	if (mds < 0 ||
3438 	    ceph_mdsmap_get_state(mdsc->mdsmap, mds) < CEPH_MDS_STATE_ACTIVE) {
3439 		if (test_bit(CEPH_MDS_R_ASYNC, &req->r_req_flags)) {
3440 			err = -EJUKEBOX;
3441 			goto finish;
3442 		}
3443 		doutc(cl, "no mds or not active, waiting for map\n");
3444 		list_add(&req->r_wait, &mdsc->waiting_for_map);
3445 		return;
3446 	}
3447 
3448 	/* get, open session */
3449 	session = __ceph_lookup_mds_session(mdsc, mds);
3450 	if (!session) {
3451 		session = register_session(mdsc, mds);
3452 		if (IS_ERR(session)) {
3453 			err = PTR_ERR(session);
3454 			goto finish;
3455 		}
3456 	}
3457 	req->r_session = ceph_get_mds_session(session);
3458 
3459 	doutc(cl, "mds%d session %p state %s\n", mds, session,
3460 	      ceph_session_state_name(session->s_state));
3461 
3462 	/*
3463 	 * The old ceph will crash the MDSs when see unknown OPs
3464 	 */
3465 	if (req->r_feature_needed > 0 &&
3466 	    !test_bit(req->r_feature_needed, &session->s_features)) {
3467 		err = -EOPNOTSUPP;
3468 		goto out_session;
3469 	}
3470 
3471 	if (session->s_state != CEPH_MDS_SESSION_OPEN &&
3472 	    session->s_state != CEPH_MDS_SESSION_HUNG) {
3473 		/*
3474 		 * We cannot queue async requests since the caps and delegated
3475 		 * inodes are bound to the session. Just return -EJUKEBOX and
3476 		 * let the caller retry a sync request in that case.
3477 		 */
3478 		if (test_bit(CEPH_MDS_R_ASYNC, &req->r_req_flags)) {
3479 			err = -EJUKEBOX;
3480 			goto out_session;
3481 		}
3482 
3483 		/*
3484 		 * If the session has been REJECTED, then return a hard error,
3485 		 * unless it's a CLEANRECOVER mount, in which case we'll queue
3486 		 * it to the mdsc queue.
3487 		 */
3488 		if (session->s_state == CEPH_MDS_SESSION_REJECTED) {
3489 			if (ceph_test_mount_opt(mdsc->fsc, CLEANRECOVER))
3490 				list_add(&req->r_wait, &mdsc->waiting_for_map);
3491 			else
3492 				err = -EACCES;
3493 			goto out_session;
3494 		}
3495 
3496 		if (session->s_state == CEPH_MDS_SESSION_NEW ||
3497 		    session->s_state == CEPH_MDS_SESSION_CLOSING) {
3498 			err = __open_session(mdsc, session);
3499 			if (err)
3500 				goto out_session;
3501 			/* retry the same mds later */
3502 			if (random)
3503 				req->r_resend_mds = mds;
3504 		}
3505 		list_add(&req->r_wait, &session->s_waiting);
3506 		goto out_session;
3507 	}
3508 
3509 	/* send request */
3510 	req->r_resend_mds = -1;   /* forget any previous mds hint */
3511 
3512 	if (req->r_request_started == 0)   /* note request start time */
3513 		req->r_request_started = jiffies;
3514 
3515 	/*
3516 	 * For async create we will choose the auth MDS of frag in parent
3517 	 * directory to send the request and usually this works fine, but
3518 	 * if the migrated the dirtory to another MDS before it could handle
3519 	 * it the request will be forwarded.
3520 	 *
3521 	 * And then the auth cap will be changed.
3522 	 */
3523 	if (test_bit(CEPH_MDS_R_ASYNC, &req->r_req_flags) && req->r_num_fwd) {
3524 		struct ceph_dentry_info *di = ceph_dentry(req->r_dentry);
3525 		struct ceph_inode_info *ci;
3526 		struct ceph_cap *cap;
3527 
3528 		/*
3529 		 * The request maybe handled very fast and the new inode
3530 		 * hasn't been linked to the dentry yet. We need to wait
3531 		 * for the ceph_finish_async_create(), which shouldn't be
3532 		 * stuck too long or fail in thoery, to finish when forwarding
3533 		 * the request.
3534 		 */
3535 		if (!d_inode(req->r_dentry)) {
3536 			err = wait_on_bit(&di->flags, CEPH_DENTRY_ASYNC_CREATE_BIT,
3537 					  TASK_KILLABLE);
3538 			if (err) {
3539 				mutex_lock(&req->r_fill_mutex);
3540 				set_bit(CEPH_MDS_R_ABORTED, &req->r_req_flags);
3541 				mutex_unlock(&req->r_fill_mutex);
3542 				goto out_session;
3543 			}
3544 		}
3545 
3546 		ci = ceph_inode(d_inode(req->r_dentry));
3547 
3548 		spin_lock(&ci->i_ceph_lock);
3549 		cap = ci->i_auth_cap;
3550 		if (ci->i_ceph_flags & CEPH_I_ASYNC_CREATE && mds != cap->mds) {
3551 			doutc(cl, "session changed for auth cap %d -> %d\n",
3552 			      cap->session->s_mds, session->s_mds);
3553 
3554 			/* Remove the auth cap from old session */
3555 			spin_lock(&cap->session->s_cap_lock);
3556 			cap->session->s_nr_caps--;
3557 			list_del_init(&cap->session_caps);
3558 			spin_unlock(&cap->session->s_cap_lock);
3559 
3560 			/* Add the auth cap to the new session */
3561 			cap->mds = mds;
3562 			cap->session = session;
3563 			spin_lock(&session->s_cap_lock);
3564 			session->s_nr_caps++;
3565 			list_add_tail(&cap->session_caps, &session->s_caps);
3566 			spin_unlock(&session->s_cap_lock);
3567 
3568 			change_auth_cap_ses(ci, session);
3569 		}
3570 		spin_unlock(&ci->i_ceph_lock);
3571 	}
3572 
3573 	err = __send_request(session, req, false);
3574 
3575 out_session:
3576 	ceph_put_mds_session(session);
3577 finish:
3578 	if (err) {
3579 		doutc(cl, "early error %d\n", err);
3580 		req->r_err = err;
3581 		complete_request(mdsc, req);
3582 		__unregister_request(mdsc, req);
3583 	}
3584 	return;
3585 }
3586 
3587 /*
3588  * called under mdsc->mutex
3589  */
3590 static void __wake_requests(struct ceph_mds_client *mdsc,
3591 			    struct list_head *head)
3592 {
3593 	struct ceph_client *cl = mdsc->fsc->client;
3594 	struct ceph_mds_request *req;
3595 	LIST_HEAD(tmp_list);
3596 
3597 	list_splice_init(head, &tmp_list);
3598 
3599 	while (!list_empty(&tmp_list)) {
3600 		req = list_entry(tmp_list.next,
3601 				 struct ceph_mds_request, r_wait);
3602 		list_del_init(&req->r_wait);
3603 		doutc(cl, " wake request %p tid %llu\n", req,
3604 		      req->r_tid);
3605 		__do_request(mdsc, req);
3606 	}
3607 }
3608 
3609 /*
3610  * Wake up threads with requests pending for @mds, so that they can
3611  * resubmit their requests to a possibly different mds.
3612  */
3613 static void kick_requests(struct ceph_mds_client *mdsc, int mds)
3614 {
3615 	struct ceph_client *cl = mdsc->fsc->client;
3616 	struct ceph_mds_request *req;
3617 	struct rb_node *p = rb_first(&mdsc->request_tree);
3618 
3619 	doutc(cl, "kick_requests mds%d\n", mds);
3620 	while (p) {
3621 		req = rb_entry(p, struct ceph_mds_request, r_node);
3622 		p = rb_next(p);
3623 		if (test_bit(CEPH_MDS_R_GOT_UNSAFE, &req->r_req_flags))
3624 			continue;
3625 		if (req->r_attempts > 0)
3626 			continue; /* only new requests */
3627 		if (req->r_session &&
3628 		    req->r_session->s_mds == mds) {
3629 			doutc(cl, " kicking tid %llu\n", req->r_tid);
3630 			list_del_init(&req->r_wait);
3631 			__do_request(mdsc, req);
3632 		}
3633 	}
3634 }
3635 
3636 int ceph_mdsc_submit_request(struct ceph_mds_client *mdsc, struct inode *dir,
3637 			      struct ceph_mds_request *req)
3638 {
3639 	struct ceph_client *cl = mdsc->fsc->client;
3640 	int err = 0;
3641 
3642 	/* take CAP_PIN refs for r_inode, r_parent, r_old_dentry */
3643 	if (req->r_inode)
3644 		ceph_get_cap_refs(ceph_inode(req->r_inode), CEPH_CAP_PIN);
3645 	if (req->r_parent) {
3646 		struct ceph_inode_info *ci = ceph_inode(req->r_parent);
3647 		int fmode = (req->r_op & CEPH_MDS_OP_WRITE) ?
3648 			    CEPH_FILE_MODE_WR : CEPH_FILE_MODE_RD;
3649 		spin_lock(&ci->i_ceph_lock);
3650 		ceph_take_cap_refs(ci, CEPH_CAP_PIN, false);
3651 		__ceph_touch_fmode(ci, mdsc, fmode);
3652 		spin_unlock(&ci->i_ceph_lock);
3653 	}
3654 	if (req->r_old_dentry_dir)
3655 		ceph_get_cap_refs(ceph_inode(req->r_old_dentry_dir),
3656 				  CEPH_CAP_PIN);
3657 
3658 	if (req->r_inode) {
3659 		err = ceph_wait_on_async_create(req->r_inode);
3660 		if (err) {
3661 			doutc(cl, "wait for async create returned: %d\n", err);
3662 			return err;
3663 		}
3664 	}
3665 
3666 	if (!err && req->r_old_inode) {
3667 		err = ceph_wait_on_async_create(req->r_old_inode);
3668 		if (err) {
3669 			doutc(cl, "wait for async create returned: %d\n", err);
3670 			return err;
3671 		}
3672 	}
3673 
3674 	doutc(cl, "submit_request on %p for inode %p\n", req, dir);
3675 	mutex_lock(&mdsc->mutex);
3676 	__register_request(mdsc, req, dir);
3677 	__do_request(mdsc, req);
3678 	err = req->r_err;
3679 	mutex_unlock(&mdsc->mutex);
3680 	return err;
3681 }
3682 
3683 int ceph_mdsc_wait_request(struct ceph_mds_client *mdsc,
3684 			   struct ceph_mds_request *req,
3685 			   ceph_mds_request_wait_callback_t wait_func)
3686 {
3687 	struct ceph_client *cl = mdsc->fsc->client;
3688 	int err;
3689 
3690 	/* wait */
3691 	doutc(cl, "do_request waiting\n");
3692 	if (wait_func) {
3693 		err = wait_func(mdsc, req);
3694 	} else {
3695 		long timeleft = wait_for_completion_killable_timeout(
3696 					&req->r_completion,
3697 					ceph_timeout_jiffies(req->r_timeout));
3698 		if (timeleft > 0)
3699 			err = 0;
3700 		else if (!timeleft)
3701 			err = -ETIMEDOUT;  /* timed out */
3702 		else
3703 			err = timeleft;  /* killed */
3704 	}
3705 	doutc(cl, "do_request waited, got %d\n", err);
3706 	mutex_lock(&mdsc->mutex);
3707 
3708 	/* only abort if we didn't race with a real reply */
3709 	if (test_bit(CEPH_MDS_R_GOT_RESULT, &req->r_req_flags)) {
3710 		err = le32_to_cpu(req->r_reply_info.head->result);
3711 	} else if (err < 0) {
3712 		doutc(cl, "aborted request %lld with %d\n", req->r_tid, err);
3713 
3714 		/*
3715 		 * ensure we aren't running concurrently with
3716 		 * ceph_fill_trace or ceph_readdir_prepopulate, which
3717 		 * rely on locks (dir mutex) held by our caller.
3718 		 */
3719 		mutex_lock(&req->r_fill_mutex);
3720 		req->r_err = err;
3721 		set_bit(CEPH_MDS_R_ABORTED, &req->r_req_flags);
3722 		mutex_unlock(&req->r_fill_mutex);
3723 
3724 		if (req->r_parent &&
3725 		    (req->r_op & CEPH_MDS_OP_WRITE))
3726 			ceph_invalidate_dir_request(req);
3727 	} else {
3728 		err = req->r_err;
3729 	}
3730 
3731 	mutex_unlock(&mdsc->mutex);
3732 	return err;
3733 }
3734 
3735 /*
3736  * Synchrously perform an mds request.  Take care of all of the
3737  * session setup, forwarding, retry details.
3738  */
3739 int ceph_mdsc_do_request(struct ceph_mds_client *mdsc,
3740 			 struct inode *dir,
3741 			 struct ceph_mds_request *req)
3742 {
3743 	struct ceph_client *cl = mdsc->fsc->client;
3744 	int err;
3745 
3746 	doutc(cl, "do_request on %p\n", req);
3747 
3748 	/* issue */
3749 	err = ceph_mdsc_submit_request(mdsc, dir, req);
3750 	if (!err)
3751 		err = ceph_mdsc_wait_request(mdsc, req, NULL);
3752 	doutc(cl, "do_request %p done, result %d\n", req, err);
3753 	return err;
3754 }
3755 
3756 /*
3757  * Invalidate dir's completeness, dentry lease state on an aborted MDS
3758  * namespace request.
3759  */
3760 void ceph_invalidate_dir_request(struct ceph_mds_request *req)
3761 {
3762 	struct inode *dir = req->r_parent;
3763 	struct inode *old_dir = req->r_old_dentry_dir;
3764 	struct ceph_client *cl = req->r_mdsc->fsc->client;
3765 
3766 	doutc(cl, "invalidate_dir_request %p %p (complete, lease(s))\n",
3767 	      dir, old_dir);
3768 
3769 	ceph_dir_clear_complete(dir);
3770 	if (old_dir)
3771 		ceph_dir_clear_complete(old_dir);
3772 	if (req->r_dentry)
3773 		ceph_invalidate_dentry_lease(req->r_dentry);
3774 	if (req->r_old_dentry)
3775 		ceph_invalidate_dentry_lease(req->r_old_dentry);
3776 }
3777 
3778 /*
3779  * Handle mds reply.
3780  *
3781  * We take the session mutex and parse and process the reply immediately.
3782  * This preserves the logical ordering of replies, capabilities, etc., sent
3783  * by the MDS as they are applied to our local cache.
3784  */
3785 static void handle_reply(struct ceph_mds_session *session, struct ceph_msg *msg)
3786 {
3787 	struct ceph_mds_client *mdsc = session->s_mdsc;
3788 	struct ceph_client *cl = mdsc->fsc->client;
3789 	struct ceph_mds_request *req;
3790 	struct ceph_mds_reply_head *head = msg->front.iov_base;
3791 	struct ceph_mds_reply_info_parsed *rinfo;  /* parsed reply info */
3792 	struct ceph_snap_realm *realm;
3793 	u64 tid;
3794 	int err, result;
3795 	int mds = session->s_mds;
3796 	bool close_sessions = false;
3797 
3798 	if (msg->front.iov_len < sizeof(*head)) {
3799 		pr_err_client(cl, "got corrupt (short) reply\n");
3800 		ceph_msg_dump(msg);
3801 		return;
3802 	}
3803 
3804 	/* get request, session */
3805 	tid = le64_to_cpu(msg->hdr.tid);
3806 	mutex_lock(&mdsc->mutex);
3807 	req = lookup_get_request(mdsc, tid);
3808 	if (!req) {
3809 		doutc(cl, "on unknown tid %llu\n", tid);
3810 		mutex_unlock(&mdsc->mutex);
3811 		return;
3812 	}
3813 	doutc(cl, "handle_reply %p\n", req);
3814 
3815 	/* correct session? */
3816 	if (req->r_session != session) {
3817 		pr_err_client(cl, "got %llu on session mds%d not mds%d\n",
3818 			      tid, session->s_mds,
3819 			      req->r_session ? req->r_session->s_mds : -1);
3820 		mutex_unlock(&mdsc->mutex);
3821 		goto out;
3822 	}
3823 
3824 	/* dup? */
3825 	if ((test_bit(CEPH_MDS_R_GOT_UNSAFE, &req->r_req_flags) && !head->safe) ||
3826 	    (test_bit(CEPH_MDS_R_GOT_SAFE, &req->r_req_flags) && head->safe)) {
3827 		pr_warn_client(cl, "got a dup %s reply on %llu from mds%d\n",
3828 			       head->safe ? "safe" : "unsafe", tid, mds);
3829 		mutex_unlock(&mdsc->mutex);
3830 		goto out;
3831 	}
3832 	if (test_bit(CEPH_MDS_R_GOT_SAFE, &req->r_req_flags)) {
3833 		pr_warn_client(cl, "got unsafe after safe on %llu from mds%d\n",
3834 			       tid, mds);
3835 		mutex_unlock(&mdsc->mutex);
3836 		goto out;
3837 	}
3838 
3839 	result = le32_to_cpu(head->result);
3840 
3841 	if (head->safe) {
3842 		set_bit(CEPH_MDS_R_GOT_SAFE, &req->r_req_flags);
3843 		__unregister_request(mdsc, req);
3844 
3845 		/* last request during umount? */
3846 		if (mdsc->stopping && !__get_oldest_req(mdsc))
3847 			complete_all(&mdsc->safe_umount_waiters);
3848 
3849 		if (test_bit(CEPH_MDS_R_GOT_UNSAFE, &req->r_req_flags)) {
3850 			/*
3851 			 * We already handled the unsafe response, now do the
3852 			 * cleanup.  No need to examine the response; the MDS
3853 			 * doesn't include any result info in the safe
3854 			 * response.  And even if it did, there is nothing
3855 			 * useful we could do with a revised return value.
3856 			 */
3857 			doutc(cl, "got safe reply %llu, mds%d\n", tid, mds);
3858 
3859 			mutex_unlock(&mdsc->mutex);
3860 			goto out;
3861 		}
3862 	} else {
3863 		set_bit(CEPH_MDS_R_GOT_UNSAFE, &req->r_req_flags);
3864 		list_add_tail(&req->r_unsafe_item, &req->r_session->s_unsafe);
3865 	}
3866 
3867 	doutc(cl, "tid %lld result %d\n", tid, result);
3868 	if (test_bit(CEPHFS_FEATURE_REPLY_ENCODING, &session->s_features))
3869 		err = parse_reply_info(session, msg, req, (u64)-1);
3870 	else
3871 		err = parse_reply_info(session, msg, req,
3872 				       session->s_con.peer_features);
3873 	mutex_unlock(&mdsc->mutex);
3874 
3875 	/* Must find target inode outside of mutexes to avoid deadlocks */
3876 	rinfo = &req->r_reply_info;
3877 	if ((err >= 0) && rinfo->head->is_target) {
3878 		struct inode *in = xchg(&req->r_new_inode, NULL);
3879 		struct ceph_vino tvino = {
3880 			.ino  = le64_to_cpu(rinfo->targeti.in->ino),
3881 			.snap = le64_to_cpu(rinfo->targeti.in->snapid)
3882 		};
3883 
3884 		/*
3885 		 * If we ended up opening an existing inode, discard
3886 		 * r_new_inode
3887 		 */
3888 		if (req->r_op == CEPH_MDS_OP_CREATE &&
3889 		    !req->r_reply_info.has_create_ino) {
3890 			/* This should never happen on an async create */
3891 			WARN_ON_ONCE(req->r_deleg_ino);
3892 			iput(in);
3893 			in = NULL;
3894 		}
3895 
3896 		in = ceph_get_inode(mdsc->fsc->sb, tvino, in);
3897 		if (IS_ERR(in)) {
3898 			err = PTR_ERR(in);
3899 			mutex_lock(&session->s_mutex);
3900 			goto out_err;
3901 		}
3902 		req->r_target_inode = in;
3903 	}
3904 
3905 	mutex_lock(&session->s_mutex);
3906 	if (err < 0) {
3907 		pr_err_client(cl, "got corrupt reply mds%d(tid:%lld)\n",
3908 			      mds, tid);
3909 		ceph_msg_dump(msg);
3910 		goto out_err;
3911 	}
3912 
3913 	/* snap trace */
3914 	realm = NULL;
3915 	if (rinfo->snapblob_len) {
3916 		down_write(&mdsc->snap_rwsem);
3917 		err = ceph_update_snap_trace(mdsc, rinfo->snapblob,
3918 				rinfo->snapblob + rinfo->snapblob_len,
3919 				le32_to_cpu(head->op) == CEPH_MDS_OP_RMSNAP,
3920 				&realm);
3921 		if (err) {
3922 			up_write(&mdsc->snap_rwsem);
3923 			close_sessions = true;
3924 			if (err == -EIO)
3925 				ceph_msg_dump(msg);
3926 			goto out_err;
3927 		}
3928 		downgrade_write(&mdsc->snap_rwsem);
3929 	} else {
3930 		down_read(&mdsc->snap_rwsem);
3931 	}
3932 
3933 	/* insert trace into our cache */
3934 	mutex_lock(&req->r_fill_mutex);
3935 	current->journal_info = req;
3936 	err = ceph_fill_trace(mdsc->fsc->sb, req);
3937 	if (err == 0) {
3938 		if (result == 0 && (req->r_op == CEPH_MDS_OP_READDIR ||
3939 				    req->r_op == CEPH_MDS_OP_LSSNAP))
3940 			err = ceph_readdir_prepopulate(req, req->r_session);
3941 	}
3942 	current->journal_info = NULL;
3943 	mutex_unlock(&req->r_fill_mutex);
3944 
3945 	up_read(&mdsc->snap_rwsem);
3946 	if (realm)
3947 		ceph_put_snap_realm(mdsc, realm);
3948 
3949 	if (err == 0) {
3950 		if (req->r_target_inode &&
3951 		    test_bit(CEPH_MDS_R_GOT_UNSAFE, &req->r_req_flags)) {
3952 			struct ceph_inode_info *ci =
3953 				ceph_inode(req->r_target_inode);
3954 			spin_lock(&ci->i_unsafe_lock);
3955 			list_add_tail(&req->r_unsafe_target_item,
3956 				      &ci->i_unsafe_iops);
3957 			spin_unlock(&ci->i_unsafe_lock);
3958 		}
3959 
3960 		ceph_unreserve_caps(mdsc, &req->r_caps_reservation);
3961 	}
3962 out_err:
3963 	mutex_lock(&mdsc->mutex);
3964 	if (!test_bit(CEPH_MDS_R_ABORTED, &req->r_req_flags)) {
3965 		if (err) {
3966 			req->r_err = err;
3967 		} else {
3968 			req->r_reply =  ceph_msg_get(msg);
3969 			set_bit(CEPH_MDS_R_GOT_RESULT, &req->r_req_flags);
3970 		}
3971 	} else {
3972 		doutc(cl, "reply arrived after request %lld was aborted\n", tid);
3973 	}
3974 	mutex_unlock(&mdsc->mutex);
3975 
3976 	mutex_unlock(&session->s_mutex);
3977 
3978 	/* kick calling process */
3979 	complete_request(mdsc, req);
3980 
3981 	ceph_update_metadata_metrics(&mdsc->metric, req->r_start_latency,
3982 				     req->r_end_latency, err);
3983 out:
3984 	ceph_mdsc_put_request(req);
3985 
3986 	/* Defer closing the sessions after s_mutex lock being released */
3987 	if (close_sessions)
3988 		ceph_mdsc_close_sessions(mdsc);
3989 	return;
3990 }
3991 
3992 
3993 
3994 /*
3995  * handle mds notification that our request has been forwarded.
3996  */
3997 static void handle_forward(struct ceph_mds_client *mdsc,
3998 			   struct ceph_mds_session *session,
3999 			   struct ceph_msg *msg)
4000 {
4001 	struct ceph_client *cl = mdsc->fsc->client;
4002 	struct ceph_mds_request *req;
4003 	u64 tid = le64_to_cpu(msg->hdr.tid);
4004 	u32 next_mds;
4005 	u32 fwd_seq;
4006 	int err = -EINVAL;
4007 	void *p = msg->front.iov_base;
4008 	void *end = p + msg->front.iov_len;
4009 	bool aborted = false;
4010 
4011 	ceph_decode_need(&p, end, 2*sizeof(u32), bad);
4012 	next_mds = ceph_decode_32(&p);
4013 	fwd_seq = ceph_decode_32(&p);
4014 
4015 	mutex_lock(&mdsc->mutex);
4016 	req = lookup_get_request(mdsc, tid);
4017 	if (!req) {
4018 		mutex_unlock(&mdsc->mutex);
4019 		doutc(cl, "forward tid %llu to mds%d - req dne\n", tid, next_mds);
4020 		return;  /* dup reply? */
4021 	}
4022 
4023 	if (test_bit(CEPH_MDS_R_ABORTED, &req->r_req_flags)) {
4024 		doutc(cl, "forward tid %llu aborted, unregistering\n", tid);
4025 		__unregister_request(mdsc, req);
4026 	} else if (fwd_seq <= req->r_num_fwd || (uint32_t)fwd_seq >= U32_MAX) {
4027 		/*
4028 		 * Avoid infinite retrying after overflow.
4029 		 *
4030 		 * The MDS will increase the fwd count and in client side
4031 		 * if the num_fwd is less than the one saved in request
4032 		 * that means the MDS is an old version and overflowed of
4033 		 * 8 bits.
4034 		 */
4035 		mutex_lock(&req->r_fill_mutex);
4036 		req->r_err = -EMULTIHOP;
4037 		set_bit(CEPH_MDS_R_ABORTED, &req->r_req_flags);
4038 		mutex_unlock(&req->r_fill_mutex);
4039 		aborted = true;
4040 		pr_warn_ratelimited_client(cl, "forward tid %llu seq overflow\n",
4041 					   tid);
4042 	} else {
4043 		/* resend. forward race not possible; mds would drop */
4044 		doutc(cl, "forward tid %llu to mds%d (we resend)\n", tid, next_mds);
4045 		BUG_ON(req->r_err);
4046 		BUG_ON(test_bit(CEPH_MDS_R_GOT_RESULT, &req->r_req_flags));
4047 		req->r_attempts = 0;
4048 		req->r_num_fwd = fwd_seq;
4049 		req->r_resend_mds = next_mds;
4050 		put_request_session(req);
4051 		__do_request(mdsc, req);
4052 	}
4053 	mutex_unlock(&mdsc->mutex);
4054 
4055 	/* kick calling process */
4056 	if (aborted)
4057 		complete_request(mdsc, req);
4058 	ceph_mdsc_put_request(req);
4059 	return;
4060 
4061 bad:
4062 	pr_err_client(cl, "decode error err=%d\n", err);
4063 	ceph_msg_dump(msg);
4064 }
4065 
4066 static int __decode_session_metadata(void **p, void *end,
4067 				     bool *blocklisted)
4068 {
4069 	/* map<string,string> */
4070 	u32 n;
4071 	bool err_str;
4072 	ceph_decode_32_safe(p, end, n, bad);
4073 	while (n-- > 0) {
4074 		u32 len;
4075 		ceph_decode_32_safe(p, end, len, bad);
4076 		ceph_decode_need(p, end, len, bad);
4077 		err_str = !strncmp(*p, "error_string", len);
4078 		*p += len;
4079 		ceph_decode_32_safe(p, end, len, bad);
4080 		ceph_decode_need(p, end, len, bad);
4081 		/*
4082 		 * Match "blocklisted (blacklisted)" from newer MDSes,
4083 		 * or "blacklisted" from older MDSes.
4084 		 */
4085 		if (err_str && strnstr(*p, "blacklisted", len))
4086 			*blocklisted = true;
4087 		*p += len;
4088 	}
4089 	return 0;
4090 bad:
4091 	return -1;
4092 }
4093 
4094 /*
4095  * handle a mds session control message
4096  */
4097 static void handle_session(struct ceph_mds_session *session,
4098 			   struct ceph_msg *msg)
4099 {
4100 	struct ceph_mds_client *mdsc = session->s_mdsc;
4101 	struct ceph_client *cl = mdsc->fsc->client;
4102 	int mds = session->s_mds;
4103 	int msg_version = le16_to_cpu(msg->hdr.version);
4104 	void *p = msg->front.iov_base;
4105 	void *end = p + msg->front.iov_len;
4106 	struct ceph_mds_session_head *h;
4107 	struct ceph_mds_cap_auth *cap_auths = NULL;
4108 	u32 op, cap_auths_num = 0;
4109 	u64 seq, features = 0;
4110 	int wake = 0;
4111 	bool blocklisted = false;
4112 	u32 i;
4113 
4114 
4115 	/* decode */
4116 	ceph_decode_need(&p, end, sizeof(*h), bad);
4117 	h = p;
4118 	p += sizeof(*h);
4119 
4120 	op = le32_to_cpu(h->op);
4121 	seq = le64_to_cpu(h->seq);
4122 
4123 	if (msg_version >= 3) {
4124 		u32 len;
4125 		/* version >= 2 and < 5, decode metadata, skip otherwise
4126 		 * as it's handled via flags.
4127 		 */
4128 		if (msg_version >= 5)
4129 			ceph_decode_skip_map(&p, end, string, string, bad);
4130 		else if (__decode_session_metadata(&p, end, &blocklisted) < 0)
4131 			goto bad;
4132 
4133 		/* version >= 3, feature bits */
4134 		ceph_decode_32_safe(&p, end, len, bad);
4135 		if (len) {
4136 			ceph_decode_64_safe(&p, end, features, bad);
4137 			p += len - sizeof(features);
4138 		}
4139 	}
4140 
4141 	if (msg_version >= 5) {
4142 		u32 flags, len;
4143 
4144 		/* version >= 4 */
4145 		ceph_decode_skip_16(&p, end, bad); /* struct_v, struct_cv */
4146 		ceph_decode_32_safe(&p, end, len, bad); /* len */
4147 		ceph_decode_skip_n(&p, end, len, bad); /* metric_spec */
4148 
4149 		/* version >= 5, flags   */
4150 		ceph_decode_32_safe(&p, end, flags, bad);
4151 		if (flags & CEPH_SESSION_BLOCKLISTED) {
4152 			pr_warn_client(cl, "mds%d session blocklisted\n",
4153 				       session->s_mds);
4154 			blocklisted = true;
4155 		}
4156 	}
4157 
4158 	if (msg_version >= 6) {
4159 		ceph_decode_32_safe(&p, end, cap_auths_num, bad);
4160 		doutc(cl, "cap_auths_num %d\n", cap_auths_num);
4161 
4162 		if (cap_auths_num && op != CEPH_SESSION_OPEN) {
4163 			WARN_ON_ONCE(op != CEPH_SESSION_OPEN);
4164 			goto skip_cap_auths;
4165 		}
4166 
4167 		cap_auths = kcalloc(cap_auths_num,
4168 				    sizeof(struct ceph_mds_cap_auth),
4169 				    GFP_KERNEL);
4170 		if (!cap_auths) {
4171 			pr_err_client(cl, "No memory for cap_auths\n");
4172 			return;
4173 		}
4174 
4175 		for (i = 0; i < cap_auths_num; i++) {
4176 			u32 _len, j;
4177 
4178 			/* struct_v, struct_compat, and struct_len in MDSCapAuth */
4179 			ceph_decode_skip_n(&p, end, 2 + sizeof(u32), bad);
4180 
4181 			/* struct_v, struct_compat, and struct_len in MDSCapMatch */
4182 			ceph_decode_skip_n(&p, end, 2 + sizeof(u32), bad);
4183 			ceph_decode_64_safe(&p, end, cap_auths[i].match.uid, bad);
4184 			ceph_decode_32_safe(&p, end, _len, bad);
4185 			if (_len) {
4186 				cap_auths[i].match.gids = kcalloc(_len, sizeof(u32),
4187 								  GFP_KERNEL);
4188 				if (!cap_auths[i].match.gids) {
4189 					pr_err_client(cl, "No memory for gids\n");
4190 					goto fail;
4191 				}
4192 
4193 				cap_auths[i].match.num_gids = _len;
4194 				for (j = 0; j < _len; j++)
4195 					ceph_decode_32_safe(&p, end,
4196 							    cap_auths[i].match.gids[j],
4197 							    bad);
4198 			}
4199 
4200 			ceph_decode_32_safe(&p, end, _len, bad);
4201 			if (_len) {
4202 				cap_auths[i].match.path = kcalloc(_len + 1, sizeof(char),
4203 								  GFP_KERNEL);
4204 				if (!cap_auths[i].match.path) {
4205 					pr_err_client(cl, "No memory for path\n");
4206 					goto fail;
4207 				}
4208 				ceph_decode_copy(&p, cap_auths[i].match.path, _len);
4209 
4210 				/* Remove the tailing '/' */
4211 				while (_len && cap_auths[i].match.path[_len - 1] == '/') {
4212 					cap_auths[i].match.path[_len - 1] = '\0';
4213 					_len -= 1;
4214 				}
4215 			}
4216 
4217 			ceph_decode_32_safe(&p, end, _len, bad);
4218 			if (_len) {
4219 				cap_auths[i].match.fs_name = kcalloc(_len + 1, sizeof(char),
4220 								     GFP_KERNEL);
4221 				if (!cap_auths[i].match.fs_name) {
4222 					pr_err_client(cl, "No memory for fs_name\n");
4223 					goto fail;
4224 				}
4225 				ceph_decode_copy(&p, cap_auths[i].match.fs_name, _len);
4226 			}
4227 
4228 			ceph_decode_8_safe(&p, end, cap_auths[i].match.root_squash, bad);
4229 			ceph_decode_8_safe(&p, end, cap_auths[i].readable, bad);
4230 			ceph_decode_8_safe(&p, end, cap_auths[i].writeable, bad);
4231 			doutc(cl, "uid %lld, num_gids %u, path %s, fs_name %s, root_squash %d, readable %d, writeable %d\n",
4232 			      cap_auths[i].match.uid, cap_auths[i].match.num_gids,
4233 			      cap_auths[i].match.path, cap_auths[i].match.fs_name,
4234 			      cap_auths[i].match.root_squash,
4235 			      cap_auths[i].readable, cap_auths[i].writeable);
4236 		}
4237 	}
4238 
4239 skip_cap_auths:
4240 	mutex_lock(&mdsc->mutex);
4241 	if (op == CEPH_SESSION_OPEN) {
4242 		if (mdsc->s_cap_auths) {
4243 			for (i = 0; i < mdsc->s_cap_auths_num; i++) {
4244 				kfree(mdsc->s_cap_auths[i].match.gids);
4245 				kfree(mdsc->s_cap_auths[i].match.path);
4246 				kfree(mdsc->s_cap_auths[i].match.fs_name);
4247 			}
4248 			kfree(mdsc->s_cap_auths);
4249 		}
4250 		mdsc->s_cap_auths_num = cap_auths_num;
4251 		mdsc->s_cap_auths = cap_auths;
4252 	}
4253 	if (op == CEPH_SESSION_CLOSE) {
4254 		ceph_get_mds_session(session);
4255 		__unregister_session(mdsc, session);
4256 	}
4257 	/* FIXME: this ttl calculation is generous */
4258 	session->s_ttl = jiffies + HZ*mdsc->mdsmap->m_session_autoclose;
4259 	mutex_unlock(&mdsc->mutex);
4260 
4261 	mutex_lock(&session->s_mutex);
4262 
4263 	doutc(cl, "mds%d %s %p state %s seq %llu\n", mds,
4264 	      ceph_session_op_name(op), session,
4265 	      ceph_session_state_name(session->s_state), seq);
4266 
4267 	if (session->s_state == CEPH_MDS_SESSION_HUNG) {
4268 		session->s_state = CEPH_MDS_SESSION_OPEN;
4269 		pr_info_client(cl, "mds%d came back\n", session->s_mds);
4270 	}
4271 
4272 	switch (op) {
4273 	case CEPH_SESSION_OPEN:
4274 		if (session->s_state == CEPH_MDS_SESSION_RECONNECTING)
4275 			pr_info_client(cl, "mds%d reconnect success\n",
4276 				       session->s_mds);
4277 
4278 		session->s_features = features;
4279 		if (session->s_state == CEPH_MDS_SESSION_OPEN) {
4280 			pr_notice_client(cl, "mds%d is already opened\n",
4281 					 session->s_mds);
4282 		} else {
4283 			session->s_state = CEPH_MDS_SESSION_OPEN;
4284 			renewed_caps(mdsc, session, 0);
4285 			if (test_bit(CEPHFS_FEATURE_METRIC_COLLECT,
4286 				     &session->s_features))
4287 				metric_schedule_delayed(&mdsc->metric);
4288 		}
4289 
4290 		/*
4291 		 * The connection maybe broken and the session in client
4292 		 * side has been reinitialized, need to update the seq
4293 		 * anyway.
4294 		 */
4295 		if (!session->s_seq && seq)
4296 			session->s_seq = seq;
4297 
4298 		wake = 1;
4299 		if (mdsc->stopping)
4300 			__close_session(mdsc, session);
4301 		break;
4302 
4303 	case CEPH_SESSION_RENEWCAPS:
4304 		if (session->s_renew_seq == seq)
4305 			renewed_caps(mdsc, session, 1);
4306 		break;
4307 
4308 	case CEPH_SESSION_CLOSE:
4309 		if (session->s_state == CEPH_MDS_SESSION_RECONNECTING)
4310 			pr_info_client(cl, "mds%d reconnect denied\n",
4311 				       session->s_mds);
4312 		session->s_state = CEPH_MDS_SESSION_CLOSED;
4313 		cleanup_session_requests(mdsc, session);
4314 		remove_session_caps(session);
4315 		wake = 2; /* for good measure */
4316 		wake_up_all(&mdsc->session_close_wq);
4317 		break;
4318 
4319 	case CEPH_SESSION_STALE:
4320 		pr_info_client(cl, "mds%d caps went stale, renewing\n",
4321 			       session->s_mds);
4322 		atomic_inc(&session->s_cap_gen);
4323 		session->s_cap_ttl = jiffies - 1;
4324 		send_renew_caps(mdsc, session);
4325 		break;
4326 
4327 	case CEPH_SESSION_RECALL_STATE:
4328 		ceph_trim_caps(mdsc, session, le32_to_cpu(h->max_caps));
4329 		break;
4330 
4331 	case CEPH_SESSION_FLUSHMSG:
4332 		/* flush cap releases */
4333 		spin_lock(&session->s_cap_lock);
4334 		if (session->s_num_cap_releases)
4335 			ceph_flush_session_cap_releases(mdsc, session);
4336 		spin_unlock(&session->s_cap_lock);
4337 
4338 		send_flushmsg_ack(mdsc, session, seq);
4339 		break;
4340 
4341 	case CEPH_SESSION_FORCE_RO:
4342 		doutc(cl, "force_session_readonly %p\n", session);
4343 		spin_lock(&session->s_cap_lock);
4344 		session->s_readonly = true;
4345 		spin_unlock(&session->s_cap_lock);
4346 		wake_up_session_caps(session, FORCE_RO);
4347 		break;
4348 
4349 	case CEPH_SESSION_REJECT:
4350 		WARN_ON(session->s_state != CEPH_MDS_SESSION_OPENING);
4351 		pr_info_client(cl, "mds%d rejected session\n",
4352 			       session->s_mds);
4353 		session->s_state = CEPH_MDS_SESSION_REJECTED;
4354 		cleanup_session_requests(mdsc, session);
4355 		remove_session_caps(session);
4356 		if (blocklisted)
4357 			mdsc->fsc->blocklisted = true;
4358 		wake = 2; /* for good measure */
4359 		break;
4360 
4361 	default:
4362 		pr_err_client(cl, "bad op %d mds%d\n", op, mds);
4363 		WARN_ON(1);
4364 	}
4365 
4366 	mutex_unlock(&session->s_mutex);
4367 	if (wake) {
4368 		mutex_lock(&mdsc->mutex);
4369 		__wake_requests(mdsc, &session->s_waiting);
4370 		if (wake == 2)
4371 			kick_requests(mdsc, mds);
4372 		mutex_unlock(&mdsc->mutex);
4373 	}
4374 	if (op == CEPH_SESSION_CLOSE)
4375 		ceph_put_mds_session(session);
4376 	return;
4377 
4378 bad:
4379 	pr_err_client(cl, "corrupt message mds%d len %d\n", mds,
4380 		      (int)msg->front.iov_len);
4381 	ceph_msg_dump(msg);
4382 fail:
4383 	for (i = 0; i < cap_auths_num; i++) {
4384 		kfree(cap_auths[i].match.gids);
4385 		kfree(cap_auths[i].match.path);
4386 		kfree(cap_auths[i].match.fs_name);
4387 	}
4388 	kfree(cap_auths);
4389 	return;
4390 }
4391 
4392 void ceph_mdsc_release_dir_caps(struct ceph_mds_request *req)
4393 {
4394 	struct ceph_client *cl = req->r_mdsc->fsc->client;
4395 	int dcaps;
4396 
4397 	dcaps = xchg(&req->r_dir_caps, 0);
4398 	if (dcaps) {
4399 		doutc(cl, "releasing r_dir_caps=%s\n", ceph_cap_string(dcaps));
4400 		ceph_put_cap_refs(ceph_inode(req->r_parent), dcaps);
4401 	}
4402 }
4403 
4404 void ceph_mdsc_release_dir_caps_async(struct ceph_mds_request *req)
4405 {
4406 	struct ceph_client *cl = req->r_mdsc->fsc->client;
4407 	int dcaps;
4408 
4409 	dcaps = xchg(&req->r_dir_caps, 0);
4410 	if (dcaps) {
4411 		doutc(cl, "releasing r_dir_caps=%s\n", ceph_cap_string(dcaps));
4412 		ceph_put_cap_refs_async(ceph_inode(req->r_parent), dcaps);
4413 	}
4414 }
4415 
4416 /*
4417  * called under session->mutex.
4418  */
4419 static void replay_unsafe_requests(struct ceph_mds_client *mdsc,
4420 				   struct ceph_mds_session *session)
4421 {
4422 	struct ceph_mds_request *req, *nreq;
4423 	struct rb_node *p;
4424 
4425 	doutc(mdsc->fsc->client, "mds%d\n", session->s_mds);
4426 
4427 	mutex_lock(&mdsc->mutex);
4428 	list_for_each_entry_safe(req, nreq, &session->s_unsafe, r_unsafe_item)
4429 		__send_request(session, req, true);
4430 
4431 	/*
4432 	 * also re-send old requests when MDS enters reconnect stage. So that MDS
4433 	 * can process completed request in clientreplay stage.
4434 	 */
4435 	p = rb_first(&mdsc->request_tree);
4436 	while (p) {
4437 		req = rb_entry(p, struct ceph_mds_request, r_node);
4438 		p = rb_next(p);
4439 		if (test_bit(CEPH_MDS_R_GOT_UNSAFE, &req->r_req_flags))
4440 			continue;
4441 		if (req->r_attempts == 0)
4442 			continue; /* only old requests */
4443 		if (!req->r_session)
4444 			continue;
4445 		if (req->r_session->s_mds != session->s_mds)
4446 			continue;
4447 
4448 		ceph_mdsc_release_dir_caps_async(req);
4449 
4450 		__send_request(session, req, true);
4451 	}
4452 	mutex_unlock(&mdsc->mutex);
4453 }
4454 
4455 static int send_reconnect_partial(struct ceph_reconnect_state *recon_state)
4456 {
4457 	struct ceph_msg *reply;
4458 	struct ceph_pagelist *_pagelist;
4459 	struct page *page;
4460 	__le32 *addr;
4461 	int err = -ENOMEM;
4462 
4463 	if (!recon_state->allow_multi)
4464 		return -ENOSPC;
4465 
4466 	/* can't handle message that contains both caps and realm */
4467 	BUG_ON(!recon_state->nr_caps == !recon_state->nr_realms);
4468 
4469 	/* pre-allocate new pagelist */
4470 	_pagelist = ceph_pagelist_alloc(GFP_NOFS);
4471 	if (!_pagelist)
4472 		return -ENOMEM;
4473 
4474 	reply = ceph_msg_new2(CEPH_MSG_CLIENT_RECONNECT, 0, 1, GFP_NOFS, false);
4475 	if (!reply)
4476 		goto fail_msg;
4477 
4478 	/* placeholder for nr_caps */
4479 	err = ceph_pagelist_encode_32(_pagelist, 0);
4480 	if (err < 0)
4481 		goto fail;
4482 
4483 	if (recon_state->nr_caps) {
4484 		/* currently encoding caps */
4485 		err = ceph_pagelist_encode_32(recon_state->pagelist, 0);
4486 		if (err)
4487 			goto fail;
4488 	} else {
4489 		/* placeholder for nr_realms (currently encoding relams) */
4490 		err = ceph_pagelist_encode_32(_pagelist, 0);
4491 		if (err < 0)
4492 			goto fail;
4493 	}
4494 
4495 	err = ceph_pagelist_encode_8(recon_state->pagelist, 1);
4496 	if (err)
4497 		goto fail;
4498 
4499 	page = list_first_entry(&recon_state->pagelist->head, struct page, lru);
4500 	addr = kmap_atomic(page);
4501 	if (recon_state->nr_caps) {
4502 		/* currently encoding caps */
4503 		*addr = cpu_to_le32(recon_state->nr_caps);
4504 	} else {
4505 		/* currently encoding relams */
4506 		*(addr + 1) = cpu_to_le32(recon_state->nr_realms);
4507 	}
4508 	kunmap_atomic(addr);
4509 
4510 	reply->hdr.version = cpu_to_le16(5);
4511 	reply->hdr.compat_version = cpu_to_le16(4);
4512 
4513 	reply->hdr.data_len = cpu_to_le32(recon_state->pagelist->length);
4514 	ceph_msg_data_add_pagelist(reply, recon_state->pagelist);
4515 
4516 	ceph_con_send(&recon_state->session->s_con, reply);
4517 	ceph_pagelist_release(recon_state->pagelist);
4518 
4519 	recon_state->pagelist = _pagelist;
4520 	recon_state->nr_caps = 0;
4521 	recon_state->nr_realms = 0;
4522 	recon_state->msg_version = 5;
4523 	return 0;
4524 fail:
4525 	ceph_msg_put(reply);
4526 fail_msg:
4527 	ceph_pagelist_release(_pagelist);
4528 	return err;
4529 }
4530 
4531 static struct dentry* d_find_primary(struct inode *inode)
4532 {
4533 	struct dentry *alias, *dn = NULL;
4534 
4535 	if (hlist_empty(&inode->i_dentry))
4536 		return NULL;
4537 
4538 	spin_lock(&inode->i_lock);
4539 	if (hlist_empty(&inode->i_dentry))
4540 		goto out_unlock;
4541 
4542 	if (S_ISDIR(inode->i_mode)) {
4543 		alias = hlist_entry(inode->i_dentry.first, struct dentry, d_u.d_alias);
4544 		if (!IS_ROOT(alias))
4545 			dn = dget(alias);
4546 		goto out_unlock;
4547 	}
4548 
4549 	hlist_for_each_entry(alias, &inode->i_dentry, d_u.d_alias) {
4550 		spin_lock(&alias->d_lock);
4551 		if (!d_unhashed(alias) &&
4552 		    (ceph_dentry(alias)->flags & CEPH_DENTRY_PRIMARY_LINK)) {
4553 			dn = dget_dlock(alias);
4554 		}
4555 		spin_unlock(&alias->d_lock);
4556 		if (dn)
4557 			break;
4558 	}
4559 out_unlock:
4560 	spin_unlock(&inode->i_lock);
4561 	return dn;
4562 }
4563 
4564 /*
4565  * Encode information about a cap for a reconnect with the MDS.
4566  */
4567 static int reconnect_caps_cb(struct inode *inode, int mds, void *arg)
4568 {
4569 	struct ceph_mds_client *mdsc = ceph_sb_to_mdsc(inode->i_sb);
4570 	struct ceph_client *cl = ceph_inode_to_client(inode);
4571 	union {
4572 		struct ceph_mds_cap_reconnect v2;
4573 		struct ceph_mds_cap_reconnect_v1 v1;
4574 	} rec;
4575 	struct ceph_inode_info *ci = ceph_inode(inode);
4576 	struct ceph_reconnect_state *recon_state = arg;
4577 	struct ceph_pagelist *pagelist = recon_state->pagelist;
4578 	struct dentry *dentry;
4579 	struct ceph_cap *cap;
4580 	char *path;
4581 	int pathlen = 0, err;
4582 	u64 pathbase;
4583 	u64 snap_follows;
4584 
4585 	dentry = d_find_primary(inode);
4586 	if (dentry) {
4587 		/* set pathbase to parent dir when msg_version >= 2 */
4588 		path = ceph_mdsc_build_path(mdsc, dentry, &pathlen, &pathbase,
4589 					    recon_state->msg_version >= 2);
4590 		dput(dentry);
4591 		if (IS_ERR(path)) {
4592 			err = PTR_ERR(path);
4593 			goto out_err;
4594 		}
4595 	} else {
4596 		path = NULL;
4597 		pathbase = 0;
4598 	}
4599 
4600 	spin_lock(&ci->i_ceph_lock);
4601 	cap = __get_cap_for_mds(ci, mds);
4602 	if (!cap) {
4603 		spin_unlock(&ci->i_ceph_lock);
4604 		err = 0;
4605 		goto out_err;
4606 	}
4607 	doutc(cl, " adding %p ino %llx.%llx cap %p %lld %s\n", inode,
4608 	      ceph_vinop(inode), cap, cap->cap_id,
4609 	      ceph_cap_string(cap->issued));
4610 
4611 	cap->seq = 0;        /* reset cap seq */
4612 	cap->issue_seq = 0;  /* and issue_seq */
4613 	cap->mseq = 0;       /* and migrate_seq */
4614 	cap->cap_gen = atomic_read(&cap->session->s_cap_gen);
4615 
4616 	/* These are lost when the session goes away */
4617 	if (S_ISDIR(inode->i_mode)) {
4618 		if (cap->issued & CEPH_CAP_DIR_CREATE) {
4619 			ceph_put_string(rcu_dereference_raw(ci->i_cached_layout.pool_ns));
4620 			memset(&ci->i_cached_layout, 0, sizeof(ci->i_cached_layout));
4621 		}
4622 		cap->issued &= ~CEPH_CAP_ANY_DIR_OPS;
4623 	}
4624 
4625 	if (recon_state->msg_version >= 2) {
4626 		rec.v2.cap_id = cpu_to_le64(cap->cap_id);
4627 		rec.v2.wanted = cpu_to_le32(__ceph_caps_wanted(ci));
4628 		rec.v2.issued = cpu_to_le32(cap->issued);
4629 		rec.v2.snaprealm = cpu_to_le64(ci->i_snap_realm->ino);
4630 		rec.v2.pathbase = cpu_to_le64(pathbase);
4631 		rec.v2.flock_len = (__force __le32)
4632 			((ci->i_ceph_flags & CEPH_I_ERROR_FILELOCK) ? 0 : 1);
4633 	} else {
4634 		struct timespec64 ts;
4635 
4636 		rec.v1.cap_id = cpu_to_le64(cap->cap_id);
4637 		rec.v1.wanted = cpu_to_le32(__ceph_caps_wanted(ci));
4638 		rec.v1.issued = cpu_to_le32(cap->issued);
4639 		rec.v1.size = cpu_to_le64(i_size_read(inode));
4640 		ts = inode_get_mtime(inode);
4641 		ceph_encode_timespec64(&rec.v1.mtime, &ts);
4642 		ts = inode_get_atime(inode);
4643 		ceph_encode_timespec64(&rec.v1.atime, &ts);
4644 		rec.v1.snaprealm = cpu_to_le64(ci->i_snap_realm->ino);
4645 		rec.v1.pathbase = cpu_to_le64(pathbase);
4646 	}
4647 
4648 	if (list_empty(&ci->i_cap_snaps)) {
4649 		snap_follows = ci->i_head_snapc ? ci->i_head_snapc->seq : 0;
4650 	} else {
4651 		struct ceph_cap_snap *capsnap =
4652 			list_first_entry(&ci->i_cap_snaps,
4653 					 struct ceph_cap_snap, ci_item);
4654 		snap_follows = capsnap->follows;
4655 	}
4656 	spin_unlock(&ci->i_ceph_lock);
4657 
4658 	if (recon_state->msg_version >= 2) {
4659 		int num_fcntl_locks, num_flock_locks;
4660 		struct ceph_filelock *flocks = NULL;
4661 		size_t struct_len, total_len = sizeof(u64);
4662 		u8 struct_v = 0;
4663 
4664 encode_again:
4665 		if (rec.v2.flock_len) {
4666 			ceph_count_locks(inode, &num_fcntl_locks, &num_flock_locks);
4667 		} else {
4668 			num_fcntl_locks = 0;
4669 			num_flock_locks = 0;
4670 		}
4671 		if (num_fcntl_locks + num_flock_locks > 0) {
4672 			flocks = kmalloc_array(num_fcntl_locks + num_flock_locks,
4673 					       sizeof(struct ceph_filelock),
4674 					       GFP_NOFS);
4675 			if (!flocks) {
4676 				err = -ENOMEM;
4677 				goto out_err;
4678 			}
4679 			err = ceph_encode_locks_to_buffer(inode, flocks,
4680 							  num_fcntl_locks,
4681 							  num_flock_locks);
4682 			if (err) {
4683 				kfree(flocks);
4684 				flocks = NULL;
4685 				if (err == -ENOSPC)
4686 					goto encode_again;
4687 				goto out_err;
4688 			}
4689 		} else {
4690 			kfree(flocks);
4691 			flocks = NULL;
4692 		}
4693 
4694 		if (recon_state->msg_version >= 3) {
4695 			/* version, compat_version and struct_len */
4696 			total_len += 2 * sizeof(u8) + sizeof(u32);
4697 			struct_v = 2;
4698 		}
4699 		/*
4700 		 * number of encoded locks is stable, so copy to pagelist
4701 		 */
4702 		struct_len = 2 * sizeof(u32) +
4703 			    (num_fcntl_locks + num_flock_locks) *
4704 			    sizeof(struct ceph_filelock);
4705 		rec.v2.flock_len = cpu_to_le32(struct_len);
4706 
4707 		struct_len += sizeof(u32) + pathlen + sizeof(rec.v2);
4708 
4709 		if (struct_v >= 2)
4710 			struct_len += sizeof(u64); /* snap_follows */
4711 
4712 		total_len += struct_len;
4713 
4714 		if (pagelist->length + total_len > RECONNECT_MAX_SIZE) {
4715 			err = send_reconnect_partial(recon_state);
4716 			if (err)
4717 				goto out_freeflocks;
4718 			pagelist = recon_state->pagelist;
4719 		}
4720 
4721 		err = ceph_pagelist_reserve(pagelist, total_len);
4722 		if (err)
4723 			goto out_freeflocks;
4724 
4725 		ceph_pagelist_encode_64(pagelist, ceph_ino(inode));
4726 		if (recon_state->msg_version >= 3) {
4727 			ceph_pagelist_encode_8(pagelist, struct_v);
4728 			ceph_pagelist_encode_8(pagelist, 1);
4729 			ceph_pagelist_encode_32(pagelist, struct_len);
4730 		}
4731 		ceph_pagelist_encode_string(pagelist, path, pathlen);
4732 		ceph_pagelist_append(pagelist, &rec, sizeof(rec.v2));
4733 		ceph_locks_to_pagelist(flocks, pagelist,
4734 				       num_fcntl_locks, num_flock_locks);
4735 		if (struct_v >= 2)
4736 			ceph_pagelist_encode_64(pagelist, snap_follows);
4737 out_freeflocks:
4738 		kfree(flocks);
4739 	} else {
4740 		err = ceph_pagelist_reserve(pagelist,
4741 					    sizeof(u64) + sizeof(u32) +
4742 					    pathlen + sizeof(rec.v1));
4743 		if (err)
4744 			goto out_err;
4745 
4746 		ceph_pagelist_encode_64(pagelist, ceph_ino(inode));
4747 		ceph_pagelist_encode_string(pagelist, path, pathlen);
4748 		ceph_pagelist_append(pagelist, &rec, sizeof(rec.v1));
4749 	}
4750 
4751 out_err:
4752 	ceph_mdsc_free_path(path, pathlen);
4753 	if (!err)
4754 		recon_state->nr_caps++;
4755 	return err;
4756 }
4757 
4758 static int encode_snap_realms(struct ceph_mds_client *mdsc,
4759 			      struct ceph_reconnect_state *recon_state)
4760 {
4761 	struct rb_node *p;
4762 	struct ceph_pagelist *pagelist = recon_state->pagelist;
4763 	struct ceph_client *cl = mdsc->fsc->client;
4764 	int err = 0;
4765 
4766 	if (recon_state->msg_version >= 4) {
4767 		err = ceph_pagelist_encode_32(pagelist, mdsc->num_snap_realms);
4768 		if (err < 0)
4769 			goto fail;
4770 	}
4771 
4772 	/*
4773 	 * snaprealms.  we provide mds with the ino, seq (version), and
4774 	 * parent for all of our realms.  If the mds has any newer info,
4775 	 * it will tell us.
4776 	 */
4777 	for (p = rb_first(&mdsc->snap_realms); p; p = rb_next(p)) {
4778 		struct ceph_snap_realm *realm =
4779 		       rb_entry(p, struct ceph_snap_realm, node);
4780 		struct ceph_mds_snaprealm_reconnect sr_rec;
4781 
4782 		if (recon_state->msg_version >= 4) {
4783 			size_t need = sizeof(u8) * 2 + sizeof(u32) +
4784 				      sizeof(sr_rec);
4785 
4786 			if (pagelist->length + need > RECONNECT_MAX_SIZE) {
4787 				err = send_reconnect_partial(recon_state);
4788 				if (err)
4789 					goto fail;
4790 				pagelist = recon_state->pagelist;
4791 			}
4792 
4793 			err = ceph_pagelist_reserve(pagelist, need);
4794 			if (err)
4795 				goto fail;
4796 
4797 			ceph_pagelist_encode_8(pagelist, 1);
4798 			ceph_pagelist_encode_8(pagelist, 1);
4799 			ceph_pagelist_encode_32(pagelist, sizeof(sr_rec));
4800 		}
4801 
4802 		doutc(cl, " adding snap realm %llx seq %lld parent %llx\n",
4803 		      realm->ino, realm->seq, realm->parent_ino);
4804 		sr_rec.ino = cpu_to_le64(realm->ino);
4805 		sr_rec.seq = cpu_to_le64(realm->seq);
4806 		sr_rec.parent = cpu_to_le64(realm->parent_ino);
4807 
4808 		err = ceph_pagelist_append(pagelist, &sr_rec, sizeof(sr_rec));
4809 		if (err)
4810 			goto fail;
4811 
4812 		recon_state->nr_realms++;
4813 	}
4814 fail:
4815 	return err;
4816 }
4817 
4818 
4819 /*
4820  * If an MDS fails and recovers, clients need to reconnect in order to
4821  * reestablish shared state.  This includes all caps issued through
4822  * this session _and_ the snap_realm hierarchy.  Because it's not
4823  * clear which snap realms the mds cares about, we send everything we
4824  * know about.. that ensures we'll then get any new info the
4825  * recovering MDS might have.
4826  *
4827  * This is a relatively heavyweight operation, but it's rare.
4828  */
4829 static void send_mds_reconnect(struct ceph_mds_client *mdsc,
4830 			       struct ceph_mds_session *session)
4831 {
4832 	struct ceph_client *cl = mdsc->fsc->client;
4833 	struct ceph_msg *reply;
4834 	int mds = session->s_mds;
4835 	int err = -ENOMEM;
4836 	struct ceph_reconnect_state recon_state = {
4837 		.session = session,
4838 	};
4839 	LIST_HEAD(dispose);
4840 
4841 	pr_info_client(cl, "mds%d reconnect start\n", mds);
4842 
4843 	recon_state.pagelist = ceph_pagelist_alloc(GFP_NOFS);
4844 	if (!recon_state.pagelist)
4845 		goto fail_nopagelist;
4846 
4847 	reply = ceph_msg_new2(CEPH_MSG_CLIENT_RECONNECT, 0, 1, GFP_NOFS, false);
4848 	if (!reply)
4849 		goto fail_nomsg;
4850 
4851 	xa_destroy(&session->s_delegated_inos);
4852 
4853 	mutex_lock(&session->s_mutex);
4854 	session->s_state = CEPH_MDS_SESSION_RECONNECTING;
4855 	session->s_seq = 0;
4856 
4857 	doutc(cl, "session %p state %s\n", session,
4858 	      ceph_session_state_name(session->s_state));
4859 
4860 	atomic_inc(&session->s_cap_gen);
4861 
4862 	spin_lock(&session->s_cap_lock);
4863 	/* don't know if session is readonly */
4864 	session->s_readonly = 0;
4865 	/*
4866 	 * notify __ceph_remove_cap() that we are composing cap reconnect.
4867 	 * If a cap get released before being added to the cap reconnect,
4868 	 * __ceph_remove_cap() should skip queuing cap release.
4869 	 */
4870 	session->s_cap_reconnect = 1;
4871 	/* drop old cap expires; we're about to reestablish that state */
4872 	detach_cap_releases(session, &dispose);
4873 	spin_unlock(&session->s_cap_lock);
4874 	dispose_cap_releases(mdsc, &dispose);
4875 
4876 	/* trim unused caps to reduce MDS's cache rejoin time */
4877 	if (mdsc->fsc->sb->s_root)
4878 		shrink_dcache_parent(mdsc->fsc->sb->s_root);
4879 
4880 	ceph_con_close(&session->s_con);
4881 	ceph_con_open(&session->s_con,
4882 		      CEPH_ENTITY_TYPE_MDS, mds,
4883 		      ceph_mdsmap_get_addr(mdsc->mdsmap, mds));
4884 
4885 	/* replay unsafe requests */
4886 	replay_unsafe_requests(mdsc, session);
4887 
4888 	ceph_early_kick_flushing_caps(mdsc, session);
4889 
4890 	down_read(&mdsc->snap_rwsem);
4891 
4892 	/* placeholder for nr_caps */
4893 	err = ceph_pagelist_encode_32(recon_state.pagelist, 0);
4894 	if (err)
4895 		goto fail;
4896 
4897 	if (test_bit(CEPHFS_FEATURE_MULTI_RECONNECT, &session->s_features)) {
4898 		recon_state.msg_version = 3;
4899 		recon_state.allow_multi = true;
4900 	} else if (session->s_con.peer_features & CEPH_FEATURE_MDSENC) {
4901 		recon_state.msg_version = 3;
4902 	} else {
4903 		recon_state.msg_version = 2;
4904 	}
4905 	/* traverse this session's caps */
4906 	err = ceph_iterate_session_caps(session, reconnect_caps_cb, &recon_state);
4907 
4908 	spin_lock(&session->s_cap_lock);
4909 	session->s_cap_reconnect = 0;
4910 	spin_unlock(&session->s_cap_lock);
4911 
4912 	if (err < 0)
4913 		goto fail;
4914 
4915 	/* check if all realms can be encoded into current message */
4916 	if (mdsc->num_snap_realms) {
4917 		size_t total_len =
4918 			recon_state.pagelist->length +
4919 			mdsc->num_snap_realms *
4920 			sizeof(struct ceph_mds_snaprealm_reconnect);
4921 		if (recon_state.msg_version >= 4) {
4922 			/* number of realms */
4923 			total_len += sizeof(u32);
4924 			/* version, compat_version and struct_len */
4925 			total_len += mdsc->num_snap_realms *
4926 				     (2 * sizeof(u8) + sizeof(u32));
4927 		}
4928 		if (total_len > RECONNECT_MAX_SIZE) {
4929 			if (!recon_state.allow_multi) {
4930 				err = -ENOSPC;
4931 				goto fail;
4932 			}
4933 			if (recon_state.nr_caps) {
4934 				err = send_reconnect_partial(&recon_state);
4935 				if (err)
4936 					goto fail;
4937 			}
4938 			recon_state.msg_version = 5;
4939 		}
4940 	}
4941 
4942 	err = encode_snap_realms(mdsc, &recon_state);
4943 	if (err < 0)
4944 		goto fail;
4945 
4946 	if (recon_state.msg_version >= 5) {
4947 		err = ceph_pagelist_encode_8(recon_state.pagelist, 0);
4948 		if (err < 0)
4949 			goto fail;
4950 	}
4951 
4952 	if (recon_state.nr_caps || recon_state.nr_realms) {
4953 		struct page *page =
4954 			list_first_entry(&recon_state.pagelist->head,
4955 					struct page, lru);
4956 		__le32 *addr = kmap_atomic(page);
4957 		if (recon_state.nr_caps) {
4958 			WARN_ON(recon_state.nr_realms != mdsc->num_snap_realms);
4959 			*addr = cpu_to_le32(recon_state.nr_caps);
4960 		} else if (recon_state.msg_version >= 4) {
4961 			*(addr + 1) = cpu_to_le32(recon_state.nr_realms);
4962 		}
4963 		kunmap_atomic(addr);
4964 	}
4965 
4966 	reply->hdr.version = cpu_to_le16(recon_state.msg_version);
4967 	if (recon_state.msg_version >= 4)
4968 		reply->hdr.compat_version = cpu_to_le16(4);
4969 
4970 	reply->hdr.data_len = cpu_to_le32(recon_state.pagelist->length);
4971 	ceph_msg_data_add_pagelist(reply, recon_state.pagelist);
4972 
4973 	ceph_con_send(&session->s_con, reply);
4974 
4975 	mutex_unlock(&session->s_mutex);
4976 
4977 	mutex_lock(&mdsc->mutex);
4978 	__wake_requests(mdsc, &session->s_waiting);
4979 	mutex_unlock(&mdsc->mutex);
4980 
4981 	up_read(&mdsc->snap_rwsem);
4982 	ceph_pagelist_release(recon_state.pagelist);
4983 	return;
4984 
4985 fail:
4986 	ceph_msg_put(reply);
4987 	up_read(&mdsc->snap_rwsem);
4988 	mutex_unlock(&session->s_mutex);
4989 fail_nomsg:
4990 	ceph_pagelist_release(recon_state.pagelist);
4991 fail_nopagelist:
4992 	pr_err_client(cl, "error %d preparing reconnect for mds%d\n",
4993 		      err, mds);
4994 	return;
4995 }
4996 
4997 
4998 /*
4999  * compare old and new mdsmaps, kicking requests
5000  * and closing out old connections as necessary
5001  *
5002  * called under mdsc->mutex.
5003  */
5004 static void check_new_map(struct ceph_mds_client *mdsc,
5005 			  struct ceph_mdsmap *newmap,
5006 			  struct ceph_mdsmap *oldmap)
5007 {
5008 	int i, j, err;
5009 	int oldstate, newstate;
5010 	struct ceph_mds_session *s;
5011 	unsigned long targets[DIV_ROUND_UP(CEPH_MAX_MDS, sizeof(unsigned long))] = {0};
5012 	struct ceph_client *cl = mdsc->fsc->client;
5013 
5014 	doutc(cl, "new %u old %u\n", newmap->m_epoch, oldmap->m_epoch);
5015 
5016 	if (newmap->m_info) {
5017 		for (i = 0; i < newmap->possible_max_rank; i++) {
5018 			for (j = 0; j < newmap->m_info[i].num_export_targets; j++)
5019 				set_bit(newmap->m_info[i].export_targets[j], targets);
5020 		}
5021 	}
5022 
5023 	for (i = 0; i < oldmap->possible_max_rank && i < mdsc->max_sessions; i++) {
5024 		if (!mdsc->sessions[i])
5025 			continue;
5026 		s = mdsc->sessions[i];
5027 		oldstate = ceph_mdsmap_get_state(oldmap, i);
5028 		newstate = ceph_mdsmap_get_state(newmap, i);
5029 
5030 		doutc(cl, "mds%d state %s%s -> %s%s (session %s)\n",
5031 		      i, ceph_mds_state_name(oldstate),
5032 		      ceph_mdsmap_is_laggy(oldmap, i) ? " (laggy)" : "",
5033 		      ceph_mds_state_name(newstate),
5034 		      ceph_mdsmap_is_laggy(newmap, i) ? " (laggy)" : "",
5035 		      ceph_session_state_name(s->s_state));
5036 
5037 		if (i >= newmap->possible_max_rank) {
5038 			/* force close session for stopped mds */
5039 			ceph_get_mds_session(s);
5040 			__unregister_session(mdsc, s);
5041 			__wake_requests(mdsc, &s->s_waiting);
5042 			mutex_unlock(&mdsc->mutex);
5043 
5044 			mutex_lock(&s->s_mutex);
5045 			cleanup_session_requests(mdsc, s);
5046 			remove_session_caps(s);
5047 			mutex_unlock(&s->s_mutex);
5048 
5049 			ceph_put_mds_session(s);
5050 
5051 			mutex_lock(&mdsc->mutex);
5052 			kick_requests(mdsc, i);
5053 			continue;
5054 		}
5055 
5056 		if (memcmp(ceph_mdsmap_get_addr(oldmap, i),
5057 			   ceph_mdsmap_get_addr(newmap, i),
5058 			   sizeof(struct ceph_entity_addr))) {
5059 			/* just close it */
5060 			mutex_unlock(&mdsc->mutex);
5061 			mutex_lock(&s->s_mutex);
5062 			mutex_lock(&mdsc->mutex);
5063 			ceph_con_close(&s->s_con);
5064 			mutex_unlock(&s->s_mutex);
5065 			s->s_state = CEPH_MDS_SESSION_RESTARTING;
5066 		} else if (oldstate == newstate) {
5067 			continue;  /* nothing new with this mds */
5068 		}
5069 
5070 		/*
5071 		 * send reconnect?
5072 		 */
5073 		if (s->s_state == CEPH_MDS_SESSION_RESTARTING &&
5074 		    newstate >= CEPH_MDS_STATE_RECONNECT) {
5075 			mutex_unlock(&mdsc->mutex);
5076 			clear_bit(i, targets);
5077 			send_mds_reconnect(mdsc, s);
5078 			mutex_lock(&mdsc->mutex);
5079 		}
5080 
5081 		/*
5082 		 * kick request on any mds that has gone active.
5083 		 */
5084 		if (oldstate < CEPH_MDS_STATE_ACTIVE &&
5085 		    newstate >= CEPH_MDS_STATE_ACTIVE) {
5086 			if (oldstate != CEPH_MDS_STATE_CREATING &&
5087 			    oldstate != CEPH_MDS_STATE_STARTING)
5088 				pr_info_client(cl, "mds%d recovery completed\n",
5089 					       s->s_mds);
5090 			kick_requests(mdsc, i);
5091 			mutex_unlock(&mdsc->mutex);
5092 			mutex_lock(&s->s_mutex);
5093 			mutex_lock(&mdsc->mutex);
5094 			ceph_kick_flushing_caps(mdsc, s);
5095 			mutex_unlock(&s->s_mutex);
5096 			wake_up_session_caps(s, RECONNECT);
5097 		}
5098 	}
5099 
5100 	/*
5101 	 * Only open and reconnect sessions that don't exist yet.
5102 	 */
5103 	for (i = 0; i < newmap->possible_max_rank; i++) {
5104 		/*
5105 		 * In case the import MDS is crashed just after
5106 		 * the EImportStart journal is flushed, so when
5107 		 * a standby MDS takes over it and is replaying
5108 		 * the EImportStart journal the new MDS daemon
5109 		 * will wait the client to reconnect it, but the
5110 		 * client may never register/open the session yet.
5111 		 *
5112 		 * Will try to reconnect that MDS daemon if the
5113 		 * rank number is in the export targets array and
5114 		 * is the up:reconnect state.
5115 		 */
5116 		newstate = ceph_mdsmap_get_state(newmap, i);
5117 		if (!test_bit(i, targets) || newstate != CEPH_MDS_STATE_RECONNECT)
5118 			continue;
5119 
5120 		/*
5121 		 * The session maybe registered and opened by some
5122 		 * requests which were choosing random MDSes during
5123 		 * the mdsc->mutex's unlock/lock gap below in rare
5124 		 * case. But the related MDS daemon will just queue
5125 		 * that requests and be still waiting for the client's
5126 		 * reconnection request in up:reconnect state.
5127 		 */
5128 		s = __ceph_lookup_mds_session(mdsc, i);
5129 		if (likely(!s)) {
5130 			s = __open_export_target_session(mdsc, i);
5131 			if (IS_ERR(s)) {
5132 				err = PTR_ERR(s);
5133 				pr_err_client(cl,
5134 					      "failed to open export target session, err %d\n",
5135 					      err);
5136 				continue;
5137 			}
5138 		}
5139 		doutc(cl, "send reconnect to export target mds.%d\n", i);
5140 		mutex_unlock(&mdsc->mutex);
5141 		send_mds_reconnect(mdsc, s);
5142 		ceph_put_mds_session(s);
5143 		mutex_lock(&mdsc->mutex);
5144 	}
5145 
5146 	for (i = 0; i < newmap->possible_max_rank && i < mdsc->max_sessions; i++) {
5147 		s = mdsc->sessions[i];
5148 		if (!s)
5149 			continue;
5150 		if (!ceph_mdsmap_is_laggy(newmap, i))
5151 			continue;
5152 		if (s->s_state == CEPH_MDS_SESSION_OPEN ||
5153 		    s->s_state == CEPH_MDS_SESSION_HUNG ||
5154 		    s->s_state == CEPH_MDS_SESSION_CLOSING) {
5155 			doutc(cl, " connecting to export targets of laggy mds%d\n", i);
5156 			__open_export_target_sessions(mdsc, s);
5157 		}
5158 	}
5159 }
5160 
5161 
5162 
5163 /*
5164  * leases
5165  */
5166 
5167 /*
5168  * caller must hold session s_mutex, dentry->d_lock
5169  */
5170 void __ceph_mdsc_drop_dentry_lease(struct dentry *dentry)
5171 {
5172 	struct ceph_dentry_info *di = ceph_dentry(dentry);
5173 
5174 	ceph_put_mds_session(di->lease_session);
5175 	di->lease_session = NULL;
5176 }
5177 
5178 static void handle_lease(struct ceph_mds_client *mdsc,
5179 			 struct ceph_mds_session *session,
5180 			 struct ceph_msg *msg)
5181 {
5182 	struct ceph_client *cl = mdsc->fsc->client;
5183 	struct super_block *sb = mdsc->fsc->sb;
5184 	struct inode *inode;
5185 	struct dentry *parent, *dentry;
5186 	struct ceph_dentry_info *di;
5187 	int mds = session->s_mds;
5188 	struct ceph_mds_lease *h = msg->front.iov_base;
5189 	u32 seq;
5190 	struct ceph_vino vino;
5191 	struct qstr dname;
5192 	int release = 0;
5193 
5194 	doutc(cl, "from mds%d\n", mds);
5195 
5196 	if (!ceph_inc_mds_stopping_blocker(mdsc, session))
5197 		return;
5198 
5199 	/* decode */
5200 	if (msg->front.iov_len < sizeof(*h) + sizeof(u32))
5201 		goto bad;
5202 	vino.ino = le64_to_cpu(h->ino);
5203 	vino.snap = CEPH_NOSNAP;
5204 	seq = le32_to_cpu(h->seq);
5205 	dname.len = get_unaligned_le32(h + 1);
5206 	if (msg->front.iov_len < sizeof(*h) + sizeof(u32) + dname.len)
5207 		goto bad;
5208 	dname.name = (void *)(h + 1) + sizeof(u32);
5209 
5210 	/* lookup inode */
5211 	inode = ceph_find_inode(sb, vino);
5212 	doutc(cl, "%s, ino %llx %p %.*s\n", ceph_lease_op_name(h->action),
5213 	      vino.ino, inode, dname.len, dname.name);
5214 
5215 	mutex_lock(&session->s_mutex);
5216 	if (!inode) {
5217 		doutc(cl, "no inode %llx\n", vino.ino);
5218 		goto release;
5219 	}
5220 
5221 	/* dentry */
5222 	parent = d_find_alias(inode);
5223 	if (!parent) {
5224 		doutc(cl, "no parent dentry on inode %p\n", inode);
5225 		WARN_ON(1);
5226 		goto release;  /* hrm... */
5227 	}
5228 	dname.hash = full_name_hash(parent, dname.name, dname.len);
5229 	dentry = d_lookup(parent, &dname);
5230 	dput(parent);
5231 	if (!dentry)
5232 		goto release;
5233 
5234 	spin_lock(&dentry->d_lock);
5235 	di = ceph_dentry(dentry);
5236 	switch (h->action) {
5237 	case CEPH_MDS_LEASE_REVOKE:
5238 		if (di->lease_session == session) {
5239 			if (ceph_seq_cmp(di->lease_seq, seq) > 0)
5240 				h->seq = cpu_to_le32(di->lease_seq);
5241 			__ceph_mdsc_drop_dentry_lease(dentry);
5242 		}
5243 		release = 1;
5244 		break;
5245 
5246 	case CEPH_MDS_LEASE_RENEW:
5247 		if (di->lease_session == session &&
5248 		    di->lease_gen == atomic_read(&session->s_cap_gen) &&
5249 		    di->lease_renew_from &&
5250 		    di->lease_renew_after == 0) {
5251 			unsigned long duration =
5252 				msecs_to_jiffies(le32_to_cpu(h->duration_ms));
5253 
5254 			di->lease_seq = seq;
5255 			di->time = di->lease_renew_from + duration;
5256 			di->lease_renew_after = di->lease_renew_from +
5257 				(duration >> 1);
5258 			di->lease_renew_from = 0;
5259 		}
5260 		break;
5261 	}
5262 	spin_unlock(&dentry->d_lock);
5263 	dput(dentry);
5264 
5265 	if (!release)
5266 		goto out;
5267 
5268 release:
5269 	/* let's just reuse the same message */
5270 	h->action = CEPH_MDS_LEASE_REVOKE_ACK;
5271 	ceph_msg_get(msg);
5272 	ceph_con_send(&session->s_con, msg);
5273 
5274 out:
5275 	mutex_unlock(&session->s_mutex);
5276 	iput(inode);
5277 
5278 	ceph_dec_mds_stopping_blocker(mdsc);
5279 	return;
5280 
5281 bad:
5282 	ceph_dec_mds_stopping_blocker(mdsc);
5283 
5284 	pr_err_client(cl, "corrupt lease message\n");
5285 	ceph_msg_dump(msg);
5286 }
5287 
5288 void ceph_mdsc_lease_send_msg(struct ceph_mds_session *session,
5289 			      struct dentry *dentry, char action,
5290 			      u32 seq)
5291 {
5292 	struct ceph_client *cl = session->s_mdsc->fsc->client;
5293 	struct ceph_msg *msg;
5294 	struct ceph_mds_lease *lease;
5295 	struct inode *dir;
5296 	int len = sizeof(*lease) + sizeof(u32) + NAME_MAX;
5297 
5298 	doutc(cl, "identry %p %s to mds%d\n", dentry, ceph_lease_op_name(action),
5299 	      session->s_mds);
5300 
5301 	msg = ceph_msg_new(CEPH_MSG_CLIENT_LEASE, len, GFP_NOFS, false);
5302 	if (!msg)
5303 		return;
5304 	lease = msg->front.iov_base;
5305 	lease->action = action;
5306 	lease->seq = cpu_to_le32(seq);
5307 
5308 	spin_lock(&dentry->d_lock);
5309 	dir = d_inode(dentry->d_parent);
5310 	lease->ino = cpu_to_le64(ceph_ino(dir));
5311 	lease->first = lease->last = cpu_to_le64(ceph_snap(dir));
5312 
5313 	put_unaligned_le32(dentry->d_name.len, lease + 1);
5314 	memcpy((void *)(lease + 1) + 4,
5315 	       dentry->d_name.name, dentry->d_name.len);
5316 	spin_unlock(&dentry->d_lock);
5317 
5318 	ceph_con_send(&session->s_con, msg);
5319 }
5320 
5321 /*
5322  * lock unlock the session, to wait ongoing session activities
5323  */
5324 static void lock_unlock_session(struct ceph_mds_session *s)
5325 {
5326 	mutex_lock(&s->s_mutex);
5327 	mutex_unlock(&s->s_mutex);
5328 }
5329 
5330 static void maybe_recover_session(struct ceph_mds_client *mdsc)
5331 {
5332 	struct ceph_client *cl = mdsc->fsc->client;
5333 	struct ceph_fs_client *fsc = mdsc->fsc;
5334 
5335 	if (!ceph_test_mount_opt(fsc, CLEANRECOVER))
5336 		return;
5337 
5338 	if (READ_ONCE(fsc->mount_state) != CEPH_MOUNT_MOUNTED)
5339 		return;
5340 
5341 	if (!READ_ONCE(fsc->blocklisted))
5342 		return;
5343 
5344 	pr_info_client(cl, "auto reconnect after blocklisted\n");
5345 	ceph_force_reconnect(fsc->sb);
5346 }
5347 
5348 bool check_session_state(struct ceph_mds_session *s)
5349 {
5350 	struct ceph_client *cl = s->s_mdsc->fsc->client;
5351 
5352 	switch (s->s_state) {
5353 	case CEPH_MDS_SESSION_OPEN:
5354 		if (s->s_ttl && time_after(jiffies, s->s_ttl)) {
5355 			s->s_state = CEPH_MDS_SESSION_HUNG;
5356 			pr_info_client(cl, "mds%d hung\n", s->s_mds);
5357 		}
5358 		break;
5359 	case CEPH_MDS_SESSION_CLOSING:
5360 	case CEPH_MDS_SESSION_NEW:
5361 	case CEPH_MDS_SESSION_RESTARTING:
5362 	case CEPH_MDS_SESSION_CLOSED:
5363 	case CEPH_MDS_SESSION_REJECTED:
5364 		return false;
5365 	}
5366 
5367 	return true;
5368 }
5369 
5370 /*
5371  * If the sequence is incremented while we're waiting on a REQUEST_CLOSE reply,
5372  * then we need to retransmit that request.
5373  */
5374 void inc_session_sequence(struct ceph_mds_session *s)
5375 {
5376 	struct ceph_client *cl = s->s_mdsc->fsc->client;
5377 
5378 	lockdep_assert_held(&s->s_mutex);
5379 
5380 	s->s_seq++;
5381 
5382 	if (s->s_state == CEPH_MDS_SESSION_CLOSING) {
5383 		int ret;
5384 
5385 		doutc(cl, "resending session close request for mds%d\n", s->s_mds);
5386 		ret = request_close_session(s);
5387 		if (ret < 0)
5388 			pr_err_client(cl, "unable to close session to mds%d: %d\n",
5389 				      s->s_mds, ret);
5390 	}
5391 }
5392 
5393 /*
5394  * delayed work -- periodically trim expired leases, renew caps with mds.  If
5395  * the @delay parameter is set to 0 or if it's more than 5 secs, the default
5396  * workqueue delay value of 5 secs will be used.
5397  */
5398 static void schedule_delayed(struct ceph_mds_client *mdsc, unsigned long delay)
5399 {
5400 	unsigned long max_delay = HZ * 5;
5401 
5402 	/* 5 secs default delay */
5403 	if (!delay || (delay > max_delay))
5404 		delay = max_delay;
5405 	schedule_delayed_work(&mdsc->delayed_work,
5406 			      round_jiffies_relative(delay));
5407 }
5408 
5409 static void delayed_work(struct work_struct *work)
5410 {
5411 	struct ceph_mds_client *mdsc =
5412 		container_of(work, struct ceph_mds_client, delayed_work.work);
5413 	unsigned long delay;
5414 	int renew_interval;
5415 	int renew_caps;
5416 	int i;
5417 
5418 	doutc(mdsc->fsc->client, "mdsc delayed_work\n");
5419 
5420 	if (mdsc->stopping >= CEPH_MDSC_STOPPING_FLUSHED)
5421 		return;
5422 
5423 	mutex_lock(&mdsc->mutex);
5424 	renew_interval = mdsc->mdsmap->m_session_timeout >> 2;
5425 	renew_caps = time_after_eq(jiffies, HZ*renew_interval +
5426 				   mdsc->last_renew_caps);
5427 	if (renew_caps)
5428 		mdsc->last_renew_caps = jiffies;
5429 
5430 	for (i = 0; i < mdsc->max_sessions; i++) {
5431 		struct ceph_mds_session *s = __ceph_lookup_mds_session(mdsc, i);
5432 		if (!s)
5433 			continue;
5434 
5435 		if (!check_session_state(s)) {
5436 			ceph_put_mds_session(s);
5437 			continue;
5438 		}
5439 		mutex_unlock(&mdsc->mutex);
5440 
5441 		ceph_flush_session_cap_releases(mdsc, s);
5442 
5443 		mutex_lock(&s->s_mutex);
5444 		if (renew_caps)
5445 			send_renew_caps(mdsc, s);
5446 		else
5447 			ceph_con_keepalive(&s->s_con);
5448 		if (s->s_state == CEPH_MDS_SESSION_OPEN ||
5449 		    s->s_state == CEPH_MDS_SESSION_HUNG)
5450 			ceph_send_cap_releases(mdsc, s);
5451 		mutex_unlock(&s->s_mutex);
5452 		ceph_put_mds_session(s);
5453 
5454 		mutex_lock(&mdsc->mutex);
5455 	}
5456 	mutex_unlock(&mdsc->mutex);
5457 
5458 	delay = ceph_check_delayed_caps(mdsc);
5459 
5460 	ceph_queue_cap_reclaim_work(mdsc);
5461 
5462 	ceph_trim_snapid_map(mdsc);
5463 
5464 	maybe_recover_session(mdsc);
5465 
5466 	schedule_delayed(mdsc, delay);
5467 }
5468 
5469 int ceph_mdsc_init(struct ceph_fs_client *fsc)
5470 
5471 {
5472 	struct ceph_mds_client *mdsc;
5473 	int err;
5474 
5475 	mdsc = kzalloc(sizeof(struct ceph_mds_client), GFP_NOFS);
5476 	if (!mdsc)
5477 		return -ENOMEM;
5478 	mdsc->fsc = fsc;
5479 	mutex_init(&mdsc->mutex);
5480 	mdsc->mdsmap = kzalloc(sizeof(*mdsc->mdsmap), GFP_NOFS);
5481 	if (!mdsc->mdsmap) {
5482 		err = -ENOMEM;
5483 		goto err_mdsc;
5484 	}
5485 
5486 	init_completion(&mdsc->safe_umount_waiters);
5487 	spin_lock_init(&mdsc->stopping_lock);
5488 	atomic_set(&mdsc->stopping_blockers, 0);
5489 	init_completion(&mdsc->stopping_waiter);
5490 	init_waitqueue_head(&mdsc->session_close_wq);
5491 	INIT_LIST_HEAD(&mdsc->waiting_for_map);
5492 	mdsc->quotarealms_inodes = RB_ROOT;
5493 	mutex_init(&mdsc->quotarealms_inodes_mutex);
5494 	init_rwsem(&mdsc->snap_rwsem);
5495 	mdsc->snap_realms = RB_ROOT;
5496 	INIT_LIST_HEAD(&mdsc->snap_empty);
5497 	spin_lock_init(&mdsc->snap_empty_lock);
5498 	mdsc->request_tree = RB_ROOT;
5499 	INIT_DELAYED_WORK(&mdsc->delayed_work, delayed_work);
5500 	mdsc->last_renew_caps = jiffies;
5501 	INIT_LIST_HEAD(&mdsc->cap_delay_list);
5502 #ifdef CONFIG_DEBUG_FS
5503 	INIT_LIST_HEAD(&mdsc->cap_wait_list);
5504 #endif
5505 	spin_lock_init(&mdsc->cap_delay_lock);
5506 	INIT_LIST_HEAD(&mdsc->cap_unlink_delay_list);
5507 	INIT_LIST_HEAD(&mdsc->snap_flush_list);
5508 	spin_lock_init(&mdsc->snap_flush_lock);
5509 	mdsc->last_cap_flush_tid = 1;
5510 	INIT_LIST_HEAD(&mdsc->cap_flush_list);
5511 	INIT_LIST_HEAD(&mdsc->cap_dirty_migrating);
5512 	spin_lock_init(&mdsc->cap_dirty_lock);
5513 	init_waitqueue_head(&mdsc->cap_flushing_wq);
5514 	INIT_WORK(&mdsc->cap_reclaim_work, ceph_cap_reclaim_work);
5515 	INIT_WORK(&mdsc->cap_unlink_work, ceph_cap_unlink_work);
5516 	err = ceph_metric_init(&mdsc->metric);
5517 	if (err)
5518 		goto err_mdsmap;
5519 
5520 	spin_lock_init(&mdsc->dentry_list_lock);
5521 	INIT_LIST_HEAD(&mdsc->dentry_leases);
5522 	INIT_LIST_HEAD(&mdsc->dentry_dir_leases);
5523 
5524 	ceph_caps_init(mdsc);
5525 	ceph_adjust_caps_max_min(mdsc, fsc->mount_options);
5526 
5527 	spin_lock_init(&mdsc->snapid_map_lock);
5528 	mdsc->snapid_map_tree = RB_ROOT;
5529 	INIT_LIST_HEAD(&mdsc->snapid_map_lru);
5530 
5531 	init_rwsem(&mdsc->pool_perm_rwsem);
5532 	mdsc->pool_perm_tree = RB_ROOT;
5533 
5534 	strscpy(mdsc->nodename, utsname()->nodename,
5535 		sizeof(mdsc->nodename));
5536 
5537 	fsc->mdsc = mdsc;
5538 	return 0;
5539 
5540 err_mdsmap:
5541 	kfree(mdsc->mdsmap);
5542 err_mdsc:
5543 	kfree(mdsc);
5544 	return err;
5545 }
5546 
5547 /*
5548  * Wait for safe replies on open mds requests.  If we time out, drop
5549  * all requests from the tree to avoid dangling dentry refs.
5550  */
5551 static void wait_requests(struct ceph_mds_client *mdsc)
5552 {
5553 	struct ceph_client *cl = mdsc->fsc->client;
5554 	struct ceph_options *opts = mdsc->fsc->client->options;
5555 	struct ceph_mds_request *req;
5556 
5557 	mutex_lock(&mdsc->mutex);
5558 	if (__get_oldest_req(mdsc)) {
5559 		mutex_unlock(&mdsc->mutex);
5560 
5561 		doutc(cl, "waiting for requests\n");
5562 		wait_for_completion_timeout(&mdsc->safe_umount_waiters,
5563 				    ceph_timeout_jiffies(opts->mount_timeout));
5564 
5565 		/* tear down remaining requests */
5566 		mutex_lock(&mdsc->mutex);
5567 		while ((req = __get_oldest_req(mdsc))) {
5568 			doutc(cl, "timed out on tid %llu\n", req->r_tid);
5569 			list_del_init(&req->r_wait);
5570 			__unregister_request(mdsc, req);
5571 		}
5572 	}
5573 	mutex_unlock(&mdsc->mutex);
5574 	doutc(cl, "done\n");
5575 }
5576 
5577 void send_flush_mdlog(struct ceph_mds_session *s)
5578 {
5579 	struct ceph_client *cl = s->s_mdsc->fsc->client;
5580 	struct ceph_msg *msg;
5581 
5582 	/*
5583 	 * Pre-luminous MDS crashes when it sees an unknown session request
5584 	 */
5585 	if (!CEPH_HAVE_FEATURE(s->s_con.peer_features, SERVER_LUMINOUS))
5586 		return;
5587 
5588 	mutex_lock(&s->s_mutex);
5589 	doutc(cl, "request mdlog flush to mds%d (%s)s seq %lld\n",
5590 	      s->s_mds, ceph_session_state_name(s->s_state), s->s_seq);
5591 	msg = ceph_create_session_msg(CEPH_SESSION_REQUEST_FLUSH_MDLOG,
5592 				      s->s_seq);
5593 	if (!msg) {
5594 		pr_err_client(cl, "failed to request mdlog flush to mds%d (%s) seq %lld\n",
5595 			      s->s_mds, ceph_session_state_name(s->s_state), s->s_seq);
5596 	} else {
5597 		ceph_con_send(&s->s_con, msg);
5598 	}
5599 	mutex_unlock(&s->s_mutex);
5600 }
5601 
5602 static int ceph_mds_auth_match(struct ceph_mds_client *mdsc,
5603 			       struct ceph_mds_cap_auth *auth,
5604 			       const struct cred *cred,
5605 			       char *tpath)
5606 {
5607 	u32 caller_uid = from_kuid(&init_user_ns, cred->fsuid);
5608 	u32 caller_gid = from_kgid(&init_user_ns, cred->fsgid);
5609 	struct ceph_client *cl = mdsc->fsc->client;
5610 	const char *spath = mdsc->fsc->mount_options->server_path;
5611 	bool gid_matched = false;
5612 	u32 gid, tlen, len;
5613 	int i, j;
5614 
5615 	doutc(cl, "match.uid %lld\n", auth->match.uid);
5616 	if (auth->match.uid != MDS_AUTH_UID_ANY) {
5617 		if (auth->match.uid != caller_uid)
5618 			return 0;
5619 		if (auth->match.num_gids) {
5620 			for (i = 0; i < auth->match.num_gids; i++) {
5621 				if (caller_gid == auth->match.gids[i])
5622 					gid_matched = true;
5623 			}
5624 			if (!gid_matched && cred->group_info->ngroups) {
5625 				for (i = 0; i < cred->group_info->ngroups; i++) {
5626 					gid = from_kgid(&init_user_ns,
5627 							cred->group_info->gid[i]);
5628 					for (j = 0; j < auth->match.num_gids; j++) {
5629 						if (gid == auth->match.gids[j]) {
5630 							gid_matched = true;
5631 							break;
5632 						}
5633 					}
5634 					if (gid_matched)
5635 						break;
5636 				}
5637 			}
5638 			if (!gid_matched)
5639 				return 0;
5640 		}
5641 	}
5642 
5643 	/* path match */
5644 	if (auth->match.path) {
5645 		if (!tpath)
5646 			return 0;
5647 
5648 		tlen = strlen(tpath);
5649 		len = strlen(auth->match.path);
5650 		if (len) {
5651 			char *_tpath = tpath;
5652 			bool free_tpath = false;
5653 			int m, n;
5654 
5655 			doutc(cl, "server path %s, tpath %s, match.path %s\n",
5656 			      spath, tpath, auth->match.path);
5657 			if (spath && (m = strlen(spath)) != 1) {
5658 				/* mount path + '/' + tpath + an extra space */
5659 				n = m + 1 + tlen + 1;
5660 				_tpath = kmalloc(n, GFP_NOFS);
5661 				if (!_tpath)
5662 					return -ENOMEM;
5663 				/* remove the leading '/' */
5664 				snprintf(_tpath, n, "%s/%s", spath + 1, tpath);
5665 				free_tpath = true;
5666 				tlen = strlen(_tpath);
5667 			}
5668 
5669 			/*
5670 			 * Please note the tailing '/' for match.path has already
5671 			 * been removed when parsing.
5672 			 *
5673 			 * Remove the tailing '/' for the target path.
5674 			 */
5675 			while (tlen && _tpath[tlen - 1] == '/') {
5676 				_tpath[tlen - 1] = '\0';
5677 				tlen -= 1;
5678 			}
5679 			doutc(cl, "_tpath %s\n", _tpath);
5680 
5681 			/*
5682 			 * In case first == _tpath && tlen == len:
5683 			 *  match.path=/foo  --> /foo _path=/foo     --> match
5684 			 *  match.path=/foo/ --> /foo _path=/foo     --> match
5685 			 *
5686 			 * In case first == _tmatch.path && tlen > len:
5687 			 *  match.path=/foo/ --> /foo _path=/foo/    --> match
5688 			 *  match.path=/foo  --> /foo _path=/foo/    --> match
5689 			 *  match.path=/foo/ --> /foo _path=/foo/d   --> match
5690 			 *  match.path=/foo  --> /foo _path=/food    --> mismatch
5691 			 *
5692 			 * All the other cases                       --> mismatch
5693 			 */
5694 			char *first = strstr(_tpath, auth->match.path);
5695 			if (first != _tpath) {
5696 				if (free_tpath)
5697 					kfree(_tpath);
5698 				return 0;
5699 			}
5700 
5701 			if (tlen > len && _tpath[len] != '/') {
5702 				if (free_tpath)
5703 					kfree(_tpath);
5704 				return 0;
5705 			}
5706 		}
5707 	}
5708 
5709 	doutc(cl, "matched\n");
5710 	return 1;
5711 }
5712 
5713 int ceph_mds_check_access(struct ceph_mds_client *mdsc, char *tpath, int mask)
5714 {
5715 	const struct cred *cred = get_current_cred();
5716 	u32 caller_uid = from_kuid(&init_user_ns, cred->fsuid);
5717 	u32 caller_gid = from_kgid(&init_user_ns, cred->fsgid);
5718 	struct ceph_mds_cap_auth *rw_perms_s = NULL;
5719 	struct ceph_client *cl = mdsc->fsc->client;
5720 	bool root_squash_perms = true;
5721 	int i, err;
5722 
5723 	doutc(cl, "tpath '%s', mask %d, caller_uid %d, caller_gid %d\n",
5724 	      tpath, mask, caller_uid, caller_gid);
5725 
5726 	for (i = 0; i < mdsc->s_cap_auths_num; i++) {
5727 		struct ceph_mds_cap_auth *s = &mdsc->s_cap_auths[i];
5728 
5729 		err = ceph_mds_auth_match(mdsc, s, cred, tpath);
5730 		if (err < 0) {
5731 			put_cred(cred);
5732 			return err;
5733 		} else if (err > 0) {
5734 			/* always follow the last auth caps' permission */
5735 			root_squash_perms = true;
5736 			rw_perms_s = NULL;
5737 			if ((mask & MAY_WRITE) && s->writeable &&
5738 			    s->match.root_squash && (!caller_uid || !caller_gid))
5739 				root_squash_perms = false;
5740 
5741 			if (((mask & MAY_WRITE) && !s->writeable) ||
5742 			    ((mask & MAY_READ) && !s->readable))
5743 				rw_perms_s = s;
5744 		}
5745 	}
5746 
5747 	put_cred(cred);
5748 
5749 	doutc(cl, "root_squash_perms %d, rw_perms_s %p\n", root_squash_perms,
5750 	      rw_perms_s);
5751 	if (root_squash_perms && rw_perms_s == NULL) {
5752 		doutc(cl, "access allowed\n");
5753 		return 0;
5754 	}
5755 
5756 	if (!root_squash_perms) {
5757 		doutc(cl, "root_squash is enabled and user(%d %d) isn't allowed to write",
5758 		      caller_uid, caller_gid);
5759 	}
5760 	if (rw_perms_s) {
5761 		doutc(cl, "mds auth caps readable/writeable %d/%d while request r/w %d/%d",
5762 		      rw_perms_s->readable, rw_perms_s->writeable,
5763 		      !!(mask & MAY_READ), !!(mask & MAY_WRITE));
5764 	}
5765 	doutc(cl, "access denied\n");
5766 	return -EACCES;
5767 }
5768 
5769 /*
5770  * called before mount is ro, and before dentries are torn down.
5771  * (hmm, does this still race with new lookups?)
5772  */
5773 void ceph_mdsc_pre_umount(struct ceph_mds_client *mdsc)
5774 {
5775 	doutc(mdsc->fsc->client, "begin\n");
5776 	mdsc->stopping = CEPH_MDSC_STOPPING_BEGIN;
5777 
5778 	ceph_mdsc_iterate_sessions(mdsc, send_flush_mdlog, true);
5779 	ceph_mdsc_iterate_sessions(mdsc, lock_unlock_session, false);
5780 	ceph_flush_dirty_caps(mdsc);
5781 	wait_requests(mdsc);
5782 
5783 	/*
5784 	 * wait for reply handlers to drop their request refs and
5785 	 * their inode/dcache refs
5786 	 */
5787 	ceph_msgr_flush();
5788 
5789 	ceph_cleanup_quotarealms_inodes(mdsc);
5790 	doutc(mdsc->fsc->client, "done\n");
5791 }
5792 
5793 /*
5794  * flush the mdlog and wait for all write mds requests to flush.
5795  */
5796 static void flush_mdlog_and_wait_mdsc_unsafe_requests(struct ceph_mds_client *mdsc,
5797 						 u64 want_tid)
5798 {
5799 	struct ceph_client *cl = mdsc->fsc->client;
5800 	struct ceph_mds_request *req = NULL, *nextreq;
5801 	struct ceph_mds_session *last_session = NULL;
5802 	struct rb_node *n;
5803 
5804 	mutex_lock(&mdsc->mutex);
5805 	doutc(cl, "want %lld\n", want_tid);
5806 restart:
5807 	req = __get_oldest_req(mdsc);
5808 	while (req && req->r_tid <= want_tid) {
5809 		/* find next request */
5810 		n = rb_next(&req->r_node);
5811 		if (n)
5812 			nextreq = rb_entry(n, struct ceph_mds_request, r_node);
5813 		else
5814 			nextreq = NULL;
5815 		if (req->r_op != CEPH_MDS_OP_SETFILELOCK &&
5816 		    (req->r_op & CEPH_MDS_OP_WRITE)) {
5817 			struct ceph_mds_session *s = req->r_session;
5818 
5819 			if (!s) {
5820 				req = nextreq;
5821 				continue;
5822 			}
5823 
5824 			/* write op */
5825 			ceph_mdsc_get_request(req);
5826 			if (nextreq)
5827 				ceph_mdsc_get_request(nextreq);
5828 			s = ceph_get_mds_session(s);
5829 			mutex_unlock(&mdsc->mutex);
5830 
5831 			/* send flush mdlog request to MDS */
5832 			if (last_session != s) {
5833 				send_flush_mdlog(s);
5834 				ceph_put_mds_session(last_session);
5835 				last_session = s;
5836 			} else {
5837 				ceph_put_mds_session(s);
5838 			}
5839 			doutc(cl, "wait on %llu (want %llu)\n",
5840 			      req->r_tid, want_tid);
5841 			wait_for_completion(&req->r_safe_completion);
5842 
5843 			mutex_lock(&mdsc->mutex);
5844 			ceph_mdsc_put_request(req);
5845 			if (!nextreq)
5846 				break;  /* next dne before, so we're done! */
5847 			if (RB_EMPTY_NODE(&nextreq->r_node)) {
5848 				/* next request was removed from tree */
5849 				ceph_mdsc_put_request(nextreq);
5850 				goto restart;
5851 			}
5852 			ceph_mdsc_put_request(nextreq);  /* won't go away */
5853 		}
5854 		req = nextreq;
5855 	}
5856 	mutex_unlock(&mdsc->mutex);
5857 	ceph_put_mds_session(last_session);
5858 	doutc(cl, "done\n");
5859 }
5860 
5861 void ceph_mdsc_sync(struct ceph_mds_client *mdsc)
5862 {
5863 	struct ceph_client *cl = mdsc->fsc->client;
5864 	u64 want_tid, want_flush;
5865 
5866 	if (READ_ONCE(mdsc->fsc->mount_state) >= CEPH_MOUNT_SHUTDOWN)
5867 		return;
5868 
5869 	doutc(cl, "sync\n");
5870 	mutex_lock(&mdsc->mutex);
5871 	want_tid = mdsc->last_tid;
5872 	mutex_unlock(&mdsc->mutex);
5873 
5874 	ceph_flush_dirty_caps(mdsc);
5875 	ceph_flush_cap_releases(mdsc);
5876 	spin_lock(&mdsc->cap_dirty_lock);
5877 	want_flush = mdsc->last_cap_flush_tid;
5878 	if (!list_empty(&mdsc->cap_flush_list)) {
5879 		struct ceph_cap_flush *cf =
5880 			list_last_entry(&mdsc->cap_flush_list,
5881 					struct ceph_cap_flush, g_list);
5882 		cf->wake = true;
5883 	}
5884 	spin_unlock(&mdsc->cap_dirty_lock);
5885 
5886 	doutc(cl, "sync want tid %lld flush_seq %lld\n", want_tid, want_flush);
5887 
5888 	flush_mdlog_and_wait_mdsc_unsafe_requests(mdsc, want_tid);
5889 	wait_caps_flush(mdsc, want_flush);
5890 }
5891 
5892 /*
5893  * true if all sessions are closed, or we force unmount
5894  */
5895 static bool done_closing_sessions(struct ceph_mds_client *mdsc, int skipped)
5896 {
5897 	if (READ_ONCE(mdsc->fsc->mount_state) == CEPH_MOUNT_SHUTDOWN)
5898 		return true;
5899 	return atomic_read(&mdsc->num_sessions) <= skipped;
5900 }
5901 
5902 /*
5903  * called after sb is ro or when metadata corrupted.
5904  */
5905 void ceph_mdsc_close_sessions(struct ceph_mds_client *mdsc)
5906 {
5907 	struct ceph_options *opts = mdsc->fsc->client->options;
5908 	struct ceph_client *cl = mdsc->fsc->client;
5909 	struct ceph_mds_session *session;
5910 	int i;
5911 	int skipped = 0;
5912 
5913 	doutc(cl, "begin\n");
5914 
5915 	/* close sessions */
5916 	mutex_lock(&mdsc->mutex);
5917 	for (i = 0; i < mdsc->max_sessions; i++) {
5918 		session = __ceph_lookup_mds_session(mdsc, i);
5919 		if (!session)
5920 			continue;
5921 		mutex_unlock(&mdsc->mutex);
5922 		mutex_lock(&session->s_mutex);
5923 		if (__close_session(mdsc, session) <= 0)
5924 			skipped++;
5925 		mutex_unlock(&session->s_mutex);
5926 		ceph_put_mds_session(session);
5927 		mutex_lock(&mdsc->mutex);
5928 	}
5929 	mutex_unlock(&mdsc->mutex);
5930 
5931 	doutc(cl, "waiting for sessions to close\n");
5932 	wait_event_timeout(mdsc->session_close_wq,
5933 			   done_closing_sessions(mdsc, skipped),
5934 			   ceph_timeout_jiffies(opts->mount_timeout));
5935 
5936 	/* tear down remaining sessions */
5937 	mutex_lock(&mdsc->mutex);
5938 	for (i = 0; i < mdsc->max_sessions; i++) {
5939 		if (mdsc->sessions[i]) {
5940 			session = ceph_get_mds_session(mdsc->sessions[i]);
5941 			__unregister_session(mdsc, session);
5942 			mutex_unlock(&mdsc->mutex);
5943 			mutex_lock(&session->s_mutex);
5944 			remove_session_caps(session);
5945 			mutex_unlock(&session->s_mutex);
5946 			ceph_put_mds_session(session);
5947 			mutex_lock(&mdsc->mutex);
5948 		}
5949 	}
5950 	WARN_ON(!list_empty(&mdsc->cap_delay_list));
5951 	mutex_unlock(&mdsc->mutex);
5952 
5953 	ceph_cleanup_snapid_map(mdsc);
5954 	ceph_cleanup_global_and_empty_realms(mdsc);
5955 
5956 	cancel_work_sync(&mdsc->cap_reclaim_work);
5957 	cancel_work_sync(&mdsc->cap_unlink_work);
5958 	cancel_delayed_work_sync(&mdsc->delayed_work); /* cancel timer */
5959 
5960 	doutc(cl, "done\n");
5961 }
5962 
5963 void ceph_mdsc_force_umount(struct ceph_mds_client *mdsc)
5964 {
5965 	struct ceph_mds_session *session;
5966 	int mds;
5967 
5968 	doutc(mdsc->fsc->client, "force umount\n");
5969 
5970 	mutex_lock(&mdsc->mutex);
5971 	for (mds = 0; mds < mdsc->max_sessions; mds++) {
5972 		session = __ceph_lookup_mds_session(mdsc, mds);
5973 		if (!session)
5974 			continue;
5975 
5976 		if (session->s_state == CEPH_MDS_SESSION_REJECTED)
5977 			__unregister_session(mdsc, session);
5978 		__wake_requests(mdsc, &session->s_waiting);
5979 		mutex_unlock(&mdsc->mutex);
5980 
5981 		mutex_lock(&session->s_mutex);
5982 		__close_session(mdsc, session);
5983 		if (session->s_state == CEPH_MDS_SESSION_CLOSING) {
5984 			cleanup_session_requests(mdsc, session);
5985 			remove_session_caps(session);
5986 		}
5987 		mutex_unlock(&session->s_mutex);
5988 		ceph_put_mds_session(session);
5989 
5990 		mutex_lock(&mdsc->mutex);
5991 		kick_requests(mdsc, mds);
5992 	}
5993 	__wake_requests(mdsc, &mdsc->waiting_for_map);
5994 	mutex_unlock(&mdsc->mutex);
5995 }
5996 
5997 static void ceph_mdsc_stop(struct ceph_mds_client *mdsc)
5998 {
5999 	doutc(mdsc->fsc->client, "stop\n");
6000 	/*
6001 	 * Make sure the delayed work stopped before releasing
6002 	 * the resources.
6003 	 *
6004 	 * Because the cancel_delayed_work_sync() will only
6005 	 * guarantee that the work finishes executing. But the
6006 	 * delayed work will re-arm itself again after that.
6007 	 */
6008 	flush_delayed_work(&mdsc->delayed_work);
6009 
6010 	if (mdsc->mdsmap)
6011 		ceph_mdsmap_destroy(mdsc->mdsmap);
6012 	kfree(mdsc->sessions);
6013 	ceph_caps_finalize(mdsc);
6014 
6015 	if (mdsc->s_cap_auths) {
6016 		int i;
6017 
6018 		for (i = 0; i < mdsc->s_cap_auths_num; i++) {
6019 			kfree(mdsc->s_cap_auths[i].match.gids);
6020 			kfree(mdsc->s_cap_auths[i].match.path);
6021 			kfree(mdsc->s_cap_auths[i].match.fs_name);
6022 		}
6023 		kfree(mdsc->s_cap_auths);
6024 	}
6025 
6026 	ceph_pool_perm_destroy(mdsc);
6027 }
6028 
6029 void ceph_mdsc_destroy(struct ceph_fs_client *fsc)
6030 {
6031 	struct ceph_mds_client *mdsc = fsc->mdsc;
6032 	doutc(fsc->client, "%p\n", mdsc);
6033 
6034 	if (!mdsc)
6035 		return;
6036 
6037 	/* flush out any connection work with references to us */
6038 	ceph_msgr_flush();
6039 
6040 	ceph_mdsc_stop(mdsc);
6041 
6042 	ceph_metric_destroy(&mdsc->metric);
6043 
6044 	fsc->mdsc = NULL;
6045 	kfree(mdsc);
6046 	doutc(fsc->client, "%p done\n", mdsc);
6047 }
6048 
6049 void ceph_mdsc_handle_fsmap(struct ceph_mds_client *mdsc, struct ceph_msg *msg)
6050 {
6051 	struct ceph_fs_client *fsc = mdsc->fsc;
6052 	struct ceph_client *cl = fsc->client;
6053 	const char *mds_namespace = fsc->mount_options->mds_namespace;
6054 	void *p = msg->front.iov_base;
6055 	void *end = p + msg->front.iov_len;
6056 	u32 epoch;
6057 	u32 num_fs;
6058 	u32 mount_fscid = (u32)-1;
6059 	int err = -EINVAL;
6060 
6061 	ceph_decode_need(&p, end, sizeof(u32), bad);
6062 	epoch = ceph_decode_32(&p);
6063 
6064 	doutc(cl, "epoch %u\n", epoch);
6065 
6066 	/* struct_v, struct_cv, map_len, epoch, legacy_client_fscid */
6067 	ceph_decode_skip_n(&p, end, 2 + sizeof(u32) * 3, bad);
6068 
6069 	ceph_decode_32_safe(&p, end, num_fs, bad);
6070 	while (num_fs-- > 0) {
6071 		void *info_p, *info_end;
6072 		u32 info_len;
6073 		u32 fscid, namelen;
6074 
6075 		ceph_decode_need(&p, end, 2 + sizeof(u32), bad);
6076 		p += 2;		// info_v, info_cv
6077 		info_len = ceph_decode_32(&p);
6078 		ceph_decode_need(&p, end, info_len, bad);
6079 		info_p = p;
6080 		info_end = p + info_len;
6081 		p = info_end;
6082 
6083 		ceph_decode_need(&info_p, info_end, sizeof(u32) * 2, bad);
6084 		fscid = ceph_decode_32(&info_p);
6085 		namelen = ceph_decode_32(&info_p);
6086 		ceph_decode_need(&info_p, info_end, namelen, bad);
6087 
6088 		if (mds_namespace &&
6089 		    strlen(mds_namespace) == namelen &&
6090 		    !strncmp(mds_namespace, (char *)info_p, namelen)) {
6091 			mount_fscid = fscid;
6092 			break;
6093 		}
6094 	}
6095 
6096 	ceph_monc_got_map(&fsc->client->monc, CEPH_SUB_FSMAP, epoch);
6097 	if (mount_fscid != (u32)-1) {
6098 		fsc->client->monc.fs_cluster_id = mount_fscid;
6099 		ceph_monc_want_map(&fsc->client->monc, CEPH_SUB_MDSMAP,
6100 				   0, true);
6101 		ceph_monc_renew_subs(&fsc->client->monc);
6102 	} else {
6103 		err = -ENOENT;
6104 		goto err_out;
6105 	}
6106 	return;
6107 
6108 bad:
6109 	pr_err_client(cl, "error decoding fsmap %d. Shutting down mount.\n",
6110 		      err);
6111 	ceph_umount_begin(mdsc->fsc->sb);
6112 	ceph_msg_dump(msg);
6113 err_out:
6114 	mutex_lock(&mdsc->mutex);
6115 	mdsc->mdsmap_err = err;
6116 	__wake_requests(mdsc, &mdsc->waiting_for_map);
6117 	mutex_unlock(&mdsc->mutex);
6118 }
6119 
6120 /*
6121  * handle mds map update.
6122  */
6123 void ceph_mdsc_handle_mdsmap(struct ceph_mds_client *mdsc, struct ceph_msg *msg)
6124 {
6125 	struct ceph_client *cl = mdsc->fsc->client;
6126 	u32 epoch;
6127 	u32 maplen;
6128 	void *p = msg->front.iov_base;
6129 	void *end = p + msg->front.iov_len;
6130 	struct ceph_mdsmap *newmap, *oldmap;
6131 	struct ceph_fsid fsid;
6132 	int err = -EINVAL;
6133 
6134 	ceph_decode_need(&p, end, sizeof(fsid)+2*sizeof(u32), bad);
6135 	ceph_decode_copy(&p, &fsid, sizeof(fsid));
6136 	if (ceph_check_fsid(mdsc->fsc->client, &fsid) < 0)
6137 		return;
6138 	epoch = ceph_decode_32(&p);
6139 	maplen = ceph_decode_32(&p);
6140 	doutc(cl, "epoch %u len %d\n", epoch, (int)maplen);
6141 
6142 	/* do we need it? */
6143 	mutex_lock(&mdsc->mutex);
6144 	if (mdsc->mdsmap && epoch <= mdsc->mdsmap->m_epoch) {
6145 		doutc(cl, "epoch %u <= our %u\n", epoch, mdsc->mdsmap->m_epoch);
6146 		mutex_unlock(&mdsc->mutex);
6147 		return;
6148 	}
6149 
6150 	newmap = ceph_mdsmap_decode(mdsc, &p, end, ceph_msgr2(mdsc->fsc->client));
6151 	if (IS_ERR(newmap)) {
6152 		err = PTR_ERR(newmap);
6153 		goto bad_unlock;
6154 	}
6155 
6156 	/* swap into place */
6157 	if (mdsc->mdsmap) {
6158 		oldmap = mdsc->mdsmap;
6159 		mdsc->mdsmap = newmap;
6160 		check_new_map(mdsc, newmap, oldmap);
6161 		ceph_mdsmap_destroy(oldmap);
6162 	} else {
6163 		mdsc->mdsmap = newmap;  /* first mds map */
6164 	}
6165 	mdsc->fsc->max_file_size = min((loff_t)mdsc->mdsmap->m_max_file_size,
6166 					MAX_LFS_FILESIZE);
6167 
6168 	__wake_requests(mdsc, &mdsc->waiting_for_map);
6169 	ceph_monc_got_map(&mdsc->fsc->client->monc, CEPH_SUB_MDSMAP,
6170 			  mdsc->mdsmap->m_epoch);
6171 
6172 	mutex_unlock(&mdsc->mutex);
6173 	schedule_delayed(mdsc, 0);
6174 	return;
6175 
6176 bad_unlock:
6177 	mutex_unlock(&mdsc->mutex);
6178 bad:
6179 	pr_err_client(cl, "error decoding mdsmap %d. Shutting down mount.\n",
6180 		      err);
6181 	ceph_umount_begin(mdsc->fsc->sb);
6182 	ceph_msg_dump(msg);
6183 	return;
6184 }
6185 
6186 static struct ceph_connection *mds_get_con(struct ceph_connection *con)
6187 {
6188 	struct ceph_mds_session *s = con->private;
6189 
6190 	if (ceph_get_mds_session(s))
6191 		return con;
6192 	return NULL;
6193 }
6194 
6195 static void mds_put_con(struct ceph_connection *con)
6196 {
6197 	struct ceph_mds_session *s = con->private;
6198 
6199 	ceph_put_mds_session(s);
6200 }
6201 
6202 /*
6203  * if the client is unresponsive for long enough, the mds will kill
6204  * the session entirely.
6205  */
6206 static void mds_peer_reset(struct ceph_connection *con)
6207 {
6208 	struct ceph_mds_session *s = con->private;
6209 	struct ceph_mds_client *mdsc = s->s_mdsc;
6210 
6211 	pr_warn_client(mdsc->fsc->client, "mds%d closed our session\n",
6212 		       s->s_mds);
6213 	if (READ_ONCE(mdsc->fsc->mount_state) != CEPH_MOUNT_FENCE_IO &&
6214 	    ceph_mdsmap_get_state(mdsc->mdsmap, s->s_mds) >= CEPH_MDS_STATE_RECONNECT)
6215 		send_mds_reconnect(mdsc, s);
6216 }
6217 
6218 static void mds_dispatch(struct ceph_connection *con, struct ceph_msg *msg)
6219 {
6220 	struct ceph_mds_session *s = con->private;
6221 	struct ceph_mds_client *mdsc = s->s_mdsc;
6222 	struct ceph_client *cl = mdsc->fsc->client;
6223 	int type = le16_to_cpu(msg->hdr.type);
6224 
6225 	mutex_lock(&mdsc->mutex);
6226 	if (__verify_registered_session(mdsc, s) < 0) {
6227 		mutex_unlock(&mdsc->mutex);
6228 		goto out;
6229 	}
6230 	mutex_unlock(&mdsc->mutex);
6231 
6232 	switch (type) {
6233 	case CEPH_MSG_MDS_MAP:
6234 		ceph_mdsc_handle_mdsmap(mdsc, msg);
6235 		break;
6236 	case CEPH_MSG_FS_MAP_USER:
6237 		ceph_mdsc_handle_fsmap(mdsc, msg);
6238 		break;
6239 	case CEPH_MSG_CLIENT_SESSION:
6240 		handle_session(s, msg);
6241 		break;
6242 	case CEPH_MSG_CLIENT_REPLY:
6243 		handle_reply(s, msg);
6244 		break;
6245 	case CEPH_MSG_CLIENT_REQUEST_FORWARD:
6246 		handle_forward(mdsc, s, msg);
6247 		break;
6248 	case CEPH_MSG_CLIENT_CAPS:
6249 		ceph_handle_caps(s, msg);
6250 		break;
6251 	case CEPH_MSG_CLIENT_SNAP:
6252 		ceph_handle_snap(mdsc, s, msg);
6253 		break;
6254 	case CEPH_MSG_CLIENT_LEASE:
6255 		handle_lease(mdsc, s, msg);
6256 		break;
6257 	case CEPH_MSG_CLIENT_QUOTA:
6258 		ceph_handle_quota(mdsc, s, msg);
6259 		break;
6260 
6261 	default:
6262 		pr_err_client(cl, "received unknown message type %d %s\n",
6263 			      type, ceph_msg_type_name(type));
6264 	}
6265 out:
6266 	ceph_msg_put(msg);
6267 }
6268 
6269 /*
6270  * authentication
6271  */
6272 
6273 /*
6274  * Note: returned pointer is the address of a structure that's
6275  * managed separately.  Caller must *not* attempt to free it.
6276  */
6277 static struct ceph_auth_handshake *
6278 mds_get_authorizer(struct ceph_connection *con, int *proto, int force_new)
6279 {
6280 	struct ceph_mds_session *s = con->private;
6281 	struct ceph_mds_client *mdsc = s->s_mdsc;
6282 	struct ceph_auth_client *ac = mdsc->fsc->client->monc.auth;
6283 	struct ceph_auth_handshake *auth = &s->s_auth;
6284 	int ret;
6285 
6286 	ret = __ceph_auth_get_authorizer(ac, auth, CEPH_ENTITY_TYPE_MDS,
6287 					 force_new, proto, NULL, NULL);
6288 	if (ret)
6289 		return ERR_PTR(ret);
6290 
6291 	return auth;
6292 }
6293 
6294 static int mds_add_authorizer_challenge(struct ceph_connection *con,
6295 				    void *challenge_buf, int challenge_buf_len)
6296 {
6297 	struct ceph_mds_session *s = con->private;
6298 	struct ceph_mds_client *mdsc = s->s_mdsc;
6299 	struct ceph_auth_client *ac = mdsc->fsc->client->monc.auth;
6300 
6301 	return ceph_auth_add_authorizer_challenge(ac, s->s_auth.authorizer,
6302 					    challenge_buf, challenge_buf_len);
6303 }
6304 
6305 static int mds_verify_authorizer_reply(struct ceph_connection *con)
6306 {
6307 	struct ceph_mds_session *s = con->private;
6308 	struct ceph_mds_client *mdsc = s->s_mdsc;
6309 	struct ceph_auth_client *ac = mdsc->fsc->client->monc.auth;
6310 	struct ceph_auth_handshake *auth = &s->s_auth;
6311 
6312 	return ceph_auth_verify_authorizer_reply(ac, auth->authorizer,
6313 		auth->authorizer_reply_buf, auth->authorizer_reply_buf_len,
6314 		NULL, NULL, NULL, NULL);
6315 }
6316 
6317 static int mds_invalidate_authorizer(struct ceph_connection *con)
6318 {
6319 	struct ceph_mds_session *s = con->private;
6320 	struct ceph_mds_client *mdsc = s->s_mdsc;
6321 	struct ceph_auth_client *ac = mdsc->fsc->client->monc.auth;
6322 
6323 	ceph_auth_invalidate_authorizer(ac, CEPH_ENTITY_TYPE_MDS);
6324 
6325 	return ceph_monc_validate_auth(&mdsc->fsc->client->monc);
6326 }
6327 
6328 static int mds_get_auth_request(struct ceph_connection *con,
6329 				void *buf, int *buf_len,
6330 				void **authorizer, int *authorizer_len)
6331 {
6332 	struct ceph_mds_session *s = con->private;
6333 	struct ceph_auth_client *ac = s->s_mdsc->fsc->client->monc.auth;
6334 	struct ceph_auth_handshake *auth = &s->s_auth;
6335 	int ret;
6336 
6337 	ret = ceph_auth_get_authorizer(ac, auth, CEPH_ENTITY_TYPE_MDS,
6338 				       buf, buf_len);
6339 	if (ret)
6340 		return ret;
6341 
6342 	*authorizer = auth->authorizer_buf;
6343 	*authorizer_len = auth->authorizer_buf_len;
6344 	return 0;
6345 }
6346 
6347 static int mds_handle_auth_reply_more(struct ceph_connection *con,
6348 				      void *reply, int reply_len,
6349 				      void *buf, int *buf_len,
6350 				      void **authorizer, int *authorizer_len)
6351 {
6352 	struct ceph_mds_session *s = con->private;
6353 	struct ceph_auth_client *ac = s->s_mdsc->fsc->client->monc.auth;
6354 	struct ceph_auth_handshake *auth = &s->s_auth;
6355 	int ret;
6356 
6357 	ret = ceph_auth_handle_svc_reply_more(ac, auth, reply, reply_len,
6358 					      buf, buf_len);
6359 	if (ret)
6360 		return ret;
6361 
6362 	*authorizer = auth->authorizer_buf;
6363 	*authorizer_len = auth->authorizer_buf_len;
6364 	return 0;
6365 }
6366 
6367 static int mds_handle_auth_done(struct ceph_connection *con,
6368 				u64 global_id, void *reply, int reply_len,
6369 				u8 *session_key, int *session_key_len,
6370 				u8 *con_secret, int *con_secret_len)
6371 {
6372 	struct ceph_mds_session *s = con->private;
6373 	struct ceph_auth_client *ac = s->s_mdsc->fsc->client->monc.auth;
6374 	struct ceph_auth_handshake *auth = &s->s_auth;
6375 
6376 	return ceph_auth_handle_svc_reply_done(ac, auth, reply, reply_len,
6377 					       session_key, session_key_len,
6378 					       con_secret, con_secret_len);
6379 }
6380 
6381 static int mds_handle_auth_bad_method(struct ceph_connection *con,
6382 				      int used_proto, int result,
6383 				      const int *allowed_protos, int proto_cnt,
6384 				      const int *allowed_modes, int mode_cnt)
6385 {
6386 	struct ceph_mds_session *s = con->private;
6387 	struct ceph_mon_client *monc = &s->s_mdsc->fsc->client->monc;
6388 	int ret;
6389 
6390 	if (ceph_auth_handle_bad_authorizer(monc->auth, CEPH_ENTITY_TYPE_MDS,
6391 					    used_proto, result,
6392 					    allowed_protos, proto_cnt,
6393 					    allowed_modes, mode_cnt)) {
6394 		ret = ceph_monc_validate_auth(monc);
6395 		if (ret)
6396 			return ret;
6397 	}
6398 
6399 	return -EACCES;
6400 }
6401 
6402 static struct ceph_msg *mds_alloc_msg(struct ceph_connection *con,
6403 				struct ceph_msg_header *hdr, int *skip)
6404 {
6405 	struct ceph_msg *msg;
6406 	int type = (int) le16_to_cpu(hdr->type);
6407 	int front_len = (int) le32_to_cpu(hdr->front_len);
6408 
6409 	if (con->in_msg)
6410 		return con->in_msg;
6411 
6412 	*skip = 0;
6413 	msg = ceph_msg_new(type, front_len, GFP_NOFS, false);
6414 	if (!msg) {
6415 		pr_err("unable to allocate msg type %d len %d\n",
6416 		       type, front_len);
6417 		return NULL;
6418 	}
6419 
6420 	return msg;
6421 }
6422 
6423 static int mds_sign_message(struct ceph_msg *msg)
6424 {
6425        struct ceph_mds_session *s = msg->con->private;
6426        struct ceph_auth_handshake *auth = &s->s_auth;
6427 
6428        return ceph_auth_sign_message(auth, msg);
6429 }
6430 
6431 static int mds_check_message_signature(struct ceph_msg *msg)
6432 {
6433        struct ceph_mds_session *s = msg->con->private;
6434        struct ceph_auth_handshake *auth = &s->s_auth;
6435 
6436        return ceph_auth_check_message_signature(auth, msg);
6437 }
6438 
6439 static const struct ceph_connection_operations mds_con_ops = {
6440 	.get = mds_get_con,
6441 	.put = mds_put_con,
6442 	.alloc_msg = mds_alloc_msg,
6443 	.dispatch = mds_dispatch,
6444 	.peer_reset = mds_peer_reset,
6445 	.get_authorizer = mds_get_authorizer,
6446 	.add_authorizer_challenge = mds_add_authorizer_challenge,
6447 	.verify_authorizer_reply = mds_verify_authorizer_reply,
6448 	.invalidate_authorizer = mds_invalidate_authorizer,
6449 	.sign_message = mds_sign_message,
6450 	.check_message_signature = mds_check_message_signature,
6451 	.get_auth_request = mds_get_auth_request,
6452 	.handle_auth_reply_more = mds_handle_auth_reply_more,
6453 	.handle_auth_done = mds_handle_auth_done,
6454 	.handle_auth_bad_method = mds_handle_auth_bad_method,
6455 };
6456 
6457 /* eof */
6458