xref: /linux/fs/ceph/inode.c (revision 409c38d4f156740bf3165fd6ceae4fa6425eebf4)
1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/ceph/ceph_debug.h>
3 
4 #include <linux/module.h>
5 #include <linux/fs.h>
6 #include <linux/slab.h>
7 #include <linux/string.h>
8 #include <linux/uaccess.h>
9 #include <linux/kernel.h>
10 #include <linux/writeback.h>
11 #include <linux/vmalloc.h>
12 #include <linux/xattr.h>
13 #include <linux/posix_acl.h>
14 #include <linux/random.h>
15 #include <linux/sort.h>
16 #include <linux/iversion.h>
17 #include <linux/fscrypt.h>
18 
19 #include "super.h"
20 #include "mds_client.h"
21 #include "cache.h"
22 #include "crypto.h"
23 #include <linux/ceph/decode.h>
24 
25 /*
26  * Ceph inode operations
27  *
28  * Implement basic inode helpers (get, alloc) and inode ops (getattr,
29  * setattr, etc.), xattr helpers, and helpers for assimilating
30  * metadata returned by the MDS into our cache.
31  *
32  * Also define helpers for doing asynchronous writeback, invalidation,
33  * and truncation for the benefit of those who can't afford to block
34  * (typically because they are in the message handler path).
35  */
36 
37 static const struct inode_operations ceph_symlink_iops;
38 static const struct inode_operations ceph_encrypted_symlink_iops;
39 
40 static void ceph_inode_work(struct work_struct *work);
41 
42 /*
43  * find or create an inode, given the ceph ino number
44  */
45 static int ceph_set_ino_cb(struct inode *inode, void *data)
46 {
47 	struct ceph_inode_info *ci = ceph_inode(inode);
48 	struct ceph_mds_client *mdsc = ceph_sb_to_mdsc(inode->i_sb);
49 
50 	ci->i_vino = *(struct ceph_vino *)data;
51 	inode->i_ino = ceph_vino_to_ino_t(ci->i_vino);
52 	inode_set_iversion_raw(inode, 0);
53 	percpu_counter_inc(&mdsc->metric.total_inodes);
54 
55 	return 0;
56 }
57 
58 /**
59  * ceph_new_inode - allocate a new inode in advance of an expected create
60  * @dir: parent directory for new inode
61  * @dentry: dentry that may eventually point to new inode
62  * @mode: mode of new inode
63  * @as_ctx: pointer to inherited security context
64  *
65  * Allocate a new inode in advance of an operation to create a new inode.
66  * This allocates the inode and sets up the acl_sec_ctx with appropriate
67  * info for the new inode.
68  *
69  * Returns a pointer to the new inode or an ERR_PTR.
70  */
71 struct inode *ceph_new_inode(struct inode *dir, struct dentry *dentry,
72 			     umode_t *mode, struct ceph_acl_sec_ctx *as_ctx)
73 {
74 	int err;
75 	struct inode *inode;
76 
77 	inode = new_inode(dir->i_sb);
78 	if (!inode)
79 		return ERR_PTR(-ENOMEM);
80 
81 	if (!S_ISLNK(*mode)) {
82 		err = ceph_pre_init_acls(dir, mode, as_ctx);
83 		if (err < 0)
84 			goto out_err;
85 	}
86 
87 	inode->i_state = 0;
88 	inode->i_mode = *mode;
89 
90 	err = ceph_security_init_secctx(dentry, *mode, as_ctx);
91 	if (err < 0)
92 		goto out_err;
93 
94 	/*
95 	 * We'll skip setting fscrypt context for snapshots, leaving that for
96 	 * the handle_reply().
97 	 */
98 	if (ceph_snap(dir) != CEPH_SNAPDIR) {
99 		err = ceph_fscrypt_prepare_context(dir, inode, as_ctx);
100 		if (err)
101 			goto out_err;
102 	}
103 
104 	return inode;
105 out_err:
106 	iput(inode);
107 	return ERR_PTR(err);
108 }
109 
110 void ceph_as_ctx_to_req(struct ceph_mds_request *req,
111 			struct ceph_acl_sec_ctx *as_ctx)
112 {
113 	if (as_ctx->pagelist) {
114 		req->r_pagelist = as_ctx->pagelist;
115 		as_ctx->pagelist = NULL;
116 	}
117 	ceph_fscrypt_as_ctx_to_req(req, as_ctx);
118 }
119 
120 /**
121  * ceph_get_inode - find or create/hash a new inode
122  * @sb: superblock to search and allocate in
123  * @vino: vino to search for
124  * @newino: optional new inode to insert if one isn't found (may be NULL)
125  *
126  * Search for or insert a new inode into the hash for the given vino, and
127  * return a reference to it. If new is non-NULL, its reference is consumed.
128  */
129 struct inode *ceph_get_inode(struct super_block *sb, struct ceph_vino vino,
130 			     struct inode *newino)
131 {
132 	struct ceph_mds_client *mdsc = ceph_sb_to_mdsc(sb);
133 	struct ceph_client *cl = mdsc->fsc->client;
134 	struct inode *inode;
135 
136 	if (ceph_vino_is_reserved(vino))
137 		return ERR_PTR(-EREMOTEIO);
138 
139 	if (newino) {
140 		inode = inode_insert5(newino, (unsigned long)vino.ino,
141 				      ceph_ino_compare, ceph_set_ino_cb, &vino);
142 		if (inode != newino)
143 			iput(newino);
144 	} else {
145 		inode = iget5_locked(sb, (unsigned long)vino.ino,
146 				     ceph_ino_compare, ceph_set_ino_cb, &vino);
147 	}
148 
149 	if (!inode) {
150 		doutc(cl, "no inode found for %llx.%llx\n", vino.ino, vino.snap);
151 		return ERR_PTR(-ENOMEM);
152 	}
153 
154 	doutc(cl, "on %llx=%llx.%llx got %p new %d\n",
155 	      ceph_present_inode(inode), ceph_vinop(inode), inode,
156 	      !!(inode->i_state & I_NEW));
157 	return inode;
158 }
159 
160 /*
161  * get/constuct snapdir inode for a given directory
162  */
163 struct inode *ceph_get_snapdir(struct inode *parent)
164 {
165 	struct ceph_client *cl = ceph_inode_to_client(parent);
166 	struct ceph_vino vino = {
167 		.ino = ceph_ino(parent),
168 		.snap = CEPH_SNAPDIR,
169 	};
170 	struct inode *inode = ceph_get_inode(parent->i_sb, vino, NULL);
171 	struct ceph_inode_info *ci = ceph_inode(inode);
172 	int ret = -ENOTDIR;
173 
174 	if (IS_ERR(inode))
175 		return inode;
176 
177 	if (!S_ISDIR(parent->i_mode)) {
178 		pr_warn_once_client(cl, "bad snapdir parent type (mode=0%o)\n",
179 				    parent->i_mode);
180 		goto err;
181 	}
182 
183 	if (!(inode->i_state & I_NEW) && !S_ISDIR(inode->i_mode)) {
184 		pr_warn_once_client(cl, "bad snapdir inode type (mode=0%o)\n",
185 				    inode->i_mode);
186 		goto err;
187 	}
188 
189 	inode->i_mode = parent->i_mode;
190 	inode->i_uid = parent->i_uid;
191 	inode->i_gid = parent->i_gid;
192 	inode_set_mtime_to_ts(inode, inode_get_mtime(parent));
193 	inode_set_ctime_to_ts(inode, inode_get_ctime(parent));
194 	inode_set_atime_to_ts(inode, inode_get_atime(parent));
195 	ci->i_rbytes = 0;
196 	ci->i_btime = ceph_inode(parent)->i_btime;
197 
198 #ifdef CONFIG_FS_ENCRYPTION
199 	/* if encrypted, just borrow fscrypt_auth from parent */
200 	if (IS_ENCRYPTED(parent)) {
201 		struct ceph_inode_info *pci = ceph_inode(parent);
202 
203 		ci->fscrypt_auth = kmemdup(pci->fscrypt_auth,
204 					   pci->fscrypt_auth_len,
205 					   GFP_KERNEL);
206 		if (ci->fscrypt_auth) {
207 			inode->i_flags |= S_ENCRYPTED;
208 			ci->fscrypt_auth_len = pci->fscrypt_auth_len;
209 		} else {
210 			doutc(cl, "Failed to alloc snapdir fscrypt_auth\n");
211 			ret = -ENOMEM;
212 			goto err;
213 		}
214 	}
215 #endif
216 	if (inode->i_state & I_NEW) {
217 		inode->i_op = &ceph_snapdir_iops;
218 		inode->i_fop = &ceph_snapdir_fops;
219 		ci->i_snap_caps = CEPH_CAP_PIN; /* so we can open */
220 		unlock_new_inode(inode);
221 	}
222 
223 	return inode;
224 err:
225 	if ((inode->i_state & I_NEW))
226 		discard_new_inode(inode);
227 	else
228 		iput(inode);
229 	return ERR_PTR(ret);
230 }
231 
232 const struct inode_operations ceph_file_iops = {
233 	.permission = ceph_permission,
234 	.setattr = ceph_setattr,
235 	.getattr = ceph_getattr,
236 	.listxattr = ceph_listxattr,
237 	.get_inode_acl = ceph_get_acl,
238 	.set_acl = ceph_set_acl,
239 };
240 
241 
242 /*
243  * We use a 'frag tree' to keep track of the MDS's directory fragments
244  * for a given inode (usually there is just a single fragment).  We
245  * need to know when a child frag is delegated to a new MDS, or when
246  * it is flagged as replicated, so we can direct our requests
247  * accordingly.
248  */
249 
250 /*
251  * find/create a frag in the tree
252  */
253 static struct ceph_inode_frag *__get_or_create_frag(struct ceph_inode_info *ci,
254 						    u32 f)
255 {
256 	struct inode *inode = &ci->netfs.inode;
257 	struct ceph_client *cl = ceph_inode_to_client(inode);
258 	struct rb_node **p;
259 	struct rb_node *parent = NULL;
260 	struct ceph_inode_frag *frag;
261 	int c;
262 
263 	p = &ci->i_fragtree.rb_node;
264 	while (*p) {
265 		parent = *p;
266 		frag = rb_entry(parent, struct ceph_inode_frag, node);
267 		c = ceph_frag_compare(f, frag->frag);
268 		if (c < 0)
269 			p = &(*p)->rb_left;
270 		else if (c > 0)
271 			p = &(*p)->rb_right;
272 		else
273 			return frag;
274 	}
275 
276 	frag = kmalloc(sizeof(*frag), GFP_NOFS);
277 	if (!frag)
278 		return ERR_PTR(-ENOMEM);
279 
280 	frag->frag = f;
281 	frag->split_by = 0;
282 	frag->mds = -1;
283 	frag->ndist = 0;
284 
285 	rb_link_node(&frag->node, parent, p);
286 	rb_insert_color(&frag->node, &ci->i_fragtree);
287 
288 	doutc(cl, "added %p %llx.%llx frag %x\n", inode, ceph_vinop(inode), f);
289 	return frag;
290 }
291 
292 /*
293  * find a specific frag @f
294  */
295 struct ceph_inode_frag *__ceph_find_frag(struct ceph_inode_info *ci, u32 f)
296 {
297 	struct rb_node *n = ci->i_fragtree.rb_node;
298 
299 	while (n) {
300 		struct ceph_inode_frag *frag =
301 			rb_entry(n, struct ceph_inode_frag, node);
302 		int c = ceph_frag_compare(f, frag->frag);
303 		if (c < 0)
304 			n = n->rb_left;
305 		else if (c > 0)
306 			n = n->rb_right;
307 		else
308 			return frag;
309 	}
310 	return NULL;
311 }
312 
313 /*
314  * Choose frag containing the given value @v.  If @pfrag is
315  * specified, copy the frag delegation info to the caller if
316  * it is present.
317  */
318 static u32 __ceph_choose_frag(struct ceph_inode_info *ci, u32 v,
319 			      struct ceph_inode_frag *pfrag, int *found)
320 {
321 	struct ceph_client *cl = ceph_inode_to_client(&ci->netfs.inode);
322 	u32 t = ceph_frag_make(0, 0);
323 	struct ceph_inode_frag *frag;
324 	unsigned nway, i;
325 	u32 n;
326 
327 	if (found)
328 		*found = 0;
329 
330 	while (1) {
331 		WARN_ON(!ceph_frag_contains_value(t, v));
332 		frag = __ceph_find_frag(ci, t);
333 		if (!frag)
334 			break; /* t is a leaf */
335 		if (frag->split_by == 0) {
336 			if (pfrag)
337 				memcpy(pfrag, frag, sizeof(*pfrag));
338 			if (found)
339 				*found = 1;
340 			break;
341 		}
342 
343 		/* choose child */
344 		nway = 1 << frag->split_by;
345 		doutc(cl, "frag(%x) %x splits by %d (%d ways)\n", v, t,
346 		      frag->split_by, nway);
347 		for (i = 0; i < nway; i++) {
348 			n = ceph_frag_make_child(t, frag->split_by, i);
349 			if (ceph_frag_contains_value(n, v)) {
350 				t = n;
351 				break;
352 			}
353 		}
354 		BUG_ON(i == nway);
355 	}
356 	doutc(cl, "frag(%x) = %x\n", v, t);
357 
358 	return t;
359 }
360 
361 u32 ceph_choose_frag(struct ceph_inode_info *ci, u32 v,
362 		     struct ceph_inode_frag *pfrag, int *found)
363 {
364 	u32 ret;
365 	mutex_lock(&ci->i_fragtree_mutex);
366 	ret = __ceph_choose_frag(ci, v, pfrag, found);
367 	mutex_unlock(&ci->i_fragtree_mutex);
368 	return ret;
369 }
370 
371 /*
372  * Process dirfrag (delegation) info from the mds.  Include leaf
373  * fragment in tree ONLY if ndist > 0.  Otherwise, only
374  * branches/splits are included in i_fragtree)
375  */
376 static int ceph_fill_dirfrag(struct inode *inode,
377 			     struct ceph_mds_reply_dirfrag *dirinfo)
378 {
379 	struct ceph_inode_info *ci = ceph_inode(inode);
380 	struct ceph_client *cl = ceph_inode_to_client(inode);
381 	struct ceph_inode_frag *frag;
382 	u32 id = le32_to_cpu(dirinfo->frag);
383 	int mds = le32_to_cpu(dirinfo->auth);
384 	int ndist = le32_to_cpu(dirinfo->ndist);
385 	int diri_auth = -1;
386 	int i;
387 	int err = 0;
388 
389 	spin_lock(&ci->i_ceph_lock);
390 	if (ci->i_auth_cap)
391 		diri_auth = ci->i_auth_cap->mds;
392 	spin_unlock(&ci->i_ceph_lock);
393 
394 	if (mds == -1) /* CDIR_AUTH_PARENT */
395 		mds = diri_auth;
396 
397 	mutex_lock(&ci->i_fragtree_mutex);
398 	if (ndist == 0 && mds == diri_auth) {
399 		/* no delegation info needed. */
400 		frag = __ceph_find_frag(ci, id);
401 		if (!frag)
402 			goto out;
403 		if (frag->split_by == 0) {
404 			/* tree leaf, remove */
405 			doutc(cl, "removed %p %llx.%llx frag %x (no ref)\n",
406 			      inode, ceph_vinop(inode), id);
407 			rb_erase(&frag->node, &ci->i_fragtree);
408 			kfree(frag);
409 		} else {
410 			/* tree branch, keep and clear */
411 			doutc(cl, "cleared %p %llx.%llx frag %x referral\n",
412 			      inode, ceph_vinop(inode), id);
413 			frag->mds = -1;
414 			frag->ndist = 0;
415 		}
416 		goto out;
417 	}
418 
419 
420 	/* find/add this frag to store mds delegation info */
421 	frag = __get_or_create_frag(ci, id);
422 	if (IS_ERR(frag)) {
423 		/* this is not the end of the world; we can continue
424 		   with bad/inaccurate delegation info */
425 		pr_err_client(cl, "ENOMEM on mds ref %p %llx.%llx fg %x\n",
426 			      inode, ceph_vinop(inode),
427 			      le32_to_cpu(dirinfo->frag));
428 		err = -ENOMEM;
429 		goto out;
430 	}
431 
432 	frag->mds = mds;
433 	frag->ndist = min_t(u32, ndist, CEPH_MAX_DIRFRAG_REP);
434 	for (i = 0; i < frag->ndist; i++)
435 		frag->dist[i] = le32_to_cpu(dirinfo->dist[i]);
436 	doutc(cl, "%p %llx.%llx frag %x ndist=%d\n", inode,
437 	      ceph_vinop(inode), frag->frag, frag->ndist);
438 
439 out:
440 	mutex_unlock(&ci->i_fragtree_mutex);
441 	return err;
442 }
443 
444 static int frag_tree_split_cmp(const void *l, const void *r)
445 {
446 	struct ceph_frag_tree_split *ls = (struct ceph_frag_tree_split*)l;
447 	struct ceph_frag_tree_split *rs = (struct ceph_frag_tree_split*)r;
448 	return ceph_frag_compare(le32_to_cpu(ls->frag),
449 				 le32_to_cpu(rs->frag));
450 }
451 
452 static bool is_frag_child(u32 f, struct ceph_inode_frag *frag)
453 {
454 	if (!frag)
455 		return f == ceph_frag_make(0, 0);
456 	if (ceph_frag_bits(f) != ceph_frag_bits(frag->frag) + frag->split_by)
457 		return false;
458 	return ceph_frag_contains_value(frag->frag, ceph_frag_value(f));
459 }
460 
461 static int ceph_fill_fragtree(struct inode *inode,
462 			      struct ceph_frag_tree_head *fragtree,
463 			      struct ceph_mds_reply_dirfrag *dirinfo)
464 {
465 	struct ceph_client *cl = ceph_inode_to_client(inode);
466 	struct ceph_inode_info *ci = ceph_inode(inode);
467 	struct ceph_inode_frag *frag, *prev_frag = NULL;
468 	struct rb_node *rb_node;
469 	unsigned i, split_by, nsplits;
470 	u32 id;
471 	bool update = false;
472 
473 	mutex_lock(&ci->i_fragtree_mutex);
474 	nsplits = le32_to_cpu(fragtree->nsplits);
475 	if (nsplits != ci->i_fragtree_nsplits) {
476 		update = true;
477 	} else if (nsplits) {
478 		i = get_random_u32_below(nsplits);
479 		id = le32_to_cpu(fragtree->splits[i].frag);
480 		if (!__ceph_find_frag(ci, id))
481 			update = true;
482 	} else if (!RB_EMPTY_ROOT(&ci->i_fragtree)) {
483 		rb_node = rb_first(&ci->i_fragtree);
484 		frag = rb_entry(rb_node, struct ceph_inode_frag, node);
485 		if (frag->frag != ceph_frag_make(0, 0) || rb_next(rb_node))
486 			update = true;
487 	}
488 	if (!update && dirinfo) {
489 		id = le32_to_cpu(dirinfo->frag);
490 		if (id != __ceph_choose_frag(ci, id, NULL, NULL))
491 			update = true;
492 	}
493 	if (!update)
494 		goto out_unlock;
495 
496 	if (nsplits > 1) {
497 		sort(fragtree->splits, nsplits, sizeof(fragtree->splits[0]),
498 		     frag_tree_split_cmp, NULL);
499 	}
500 
501 	doutc(cl, "%p %llx.%llx\n", inode, ceph_vinop(inode));
502 	rb_node = rb_first(&ci->i_fragtree);
503 	for (i = 0; i < nsplits; i++) {
504 		id = le32_to_cpu(fragtree->splits[i].frag);
505 		split_by = le32_to_cpu(fragtree->splits[i].by);
506 		if (split_by == 0 || ceph_frag_bits(id) + split_by > 24) {
507 			pr_err_client(cl, "%p %llx.%llx invalid split %d/%u, "
508 			       "frag %x split by %d\n", inode,
509 			       ceph_vinop(inode), i, nsplits, id, split_by);
510 			continue;
511 		}
512 		frag = NULL;
513 		while (rb_node) {
514 			frag = rb_entry(rb_node, struct ceph_inode_frag, node);
515 			if (ceph_frag_compare(frag->frag, id) >= 0) {
516 				if (frag->frag != id)
517 					frag = NULL;
518 				else
519 					rb_node = rb_next(rb_node);
520 				break;
521 			}
522 			rb_node = rb_next(rb_node);
523 			/* delete stale split/leaf node */
524 			if (frag->split_by > 0 ||
525 			    !is_frag_child(frag->frag, prev_frag)) {
526 				rb_erase(&frag->node, &ci->i_fragtree);
527 				if (frag->split_by > 0)
528 					ci->i_fragtree_nsplits--;
529 				kfree(frag);
530 			}
531 			frag = NULL;
532 		}
533 		if (!frag) {
534 			frag = __get_or_create_frag(ci, id);
535 			if (IS_ERR(frag))
536 				continue;
537 		}
538 		if (frag->split_by == 0)
539 			ci->i_fragtree_nsplits++;
540 		frag->split_by = split_by;
541 		doutc(cl, " frag %x split by %d\n", frag->frag, frag->split_by);
542 		prev_frag = frag;
543 	}
544 	while (rb_node) {
545 		frag = rb_entry(rb_node, struct ceph_inode_frag, node);
546 		rb_node = rb_next(rb_node);
547 		/* delete stale split/leaf node */
548 		if (frag->split_by > 0 ||
549 		    !is_frag_child(frag->frag, prev_frag)) {
550 			rb_erase(&frag->node, &ci->i_fragtree);
551 			if (frag->split_by > 0)
552 				ci->i_fragtree_nsplits--;
553 			kfree(frag);
554 		}
555 	}
556 out_unlock:
557 	mutex_unlock(&ci->i_fragtree_mutex);
558 	return 0;
559 }
560 
561 /*
562  * initialize a newly allocated inode.
563  */
564 struct inode *ceph_alloc_inode(struct super_block *sb)
565 {
566 	struct ceph_fs_client *fsc = ceph_sb_to_fs_client(sb);
567 	struct ceph_inode_info *ci;
568 	int i;
569 
570 	ci = alloc_inode_sb(sb, ceph_inode_cachep, GFP_NOFS);
571 	if (!ci)
572 		return NULL;
573 
574 	doutc(fsc->client, "%p\n", &ci->netfs.inode);
575 
576 	/* Set parameters for the netfs library */
577 	netfs_inode_init(&ci->netfs, &ceph_netfs_ops, false);
578 
579 	spin_lock_init(&ci->i_ceph_lock);
580 
581 	ci->i_version = 0;
582 	ci->i_inline_version = 0;
583 	ci->i_time_warp_seq = 0;
584 	ci->i_ceph_flags = 0;
585 	atomic64_set(&ci->i_ordered_count, 1);
586 	atomic64_set(&ci->i_release_count, 1);
587 	atomic64_set(&ci->i_complete_seq[0], 0);
588 	atomic64_set(&ci->i_complete_seq[1], 0);
589 	ci->i_symlink = NULL;
590 
591 	ci->i_max_bytes = 0;
592 	ci->i_max_files = 0;
593 
594 	memset(&ci->i_dir_layout, 0, sizeof(ci->i_dir_layout));
595 	memset(&ci->i_cached_layout, 0, sizeof(ci->i_cached_layout));
596 	RCU_INIT_POINTER(ci->i_layout.pool_ns, NULL);
597 
598 	ci->i_fragtree = RB_ROOT;
599 	mutex_init(&ci->i_fragtree_mutex);
600 
601 	ci->i_xattrs.blob = NULL;
602 	ci->i_xattrs.prealloc_blob = NULL;
603 	ci->i_xattrs.dirty = false;
604 	ci->i_xattrs.index = RB_ROOT;
605 	ci->i_xattrs.count = 0;
606 	ci->i_xattrs.names_size = 0;
607 	ci->i_xattrs.vals_size = 0;
608 	ci->i_xattrs.version = 0;
609 	ci->i_xattrs.index_version = 0;
610 
611 	ci->i_caps = RB_ROOT;
612 	ci->i_auth_cap = NULL;
613 	ci->i_dirty_caps = 0;
614 	ci->i_flushing_caps = 0;
615 	INIT_LIST_HEAD(&ci->i_dirty_item);
616 	INIT_LIST_HEAD(&ci->i_flushing_item);
617 	ci->i_prealloc_cap_flush = NULL;
618 	INIT_LIST_HEAD(&ci->i_cap_flush_list);
619 	init_waitqueue_head(&ci->i_cap_wq);
620 	ci->i_hold_caps_max = 0;
621 	INIT_LIST_HEAD(&ci->i_cap_delay_list);
622 	INIT_LIST_HEAD(&ci->i_cap_snaps);
623 	ci->i_head_snapc = NULL;
624 	ci->i_snap_caps = 0;
625 
626 	ci->i_last_rd = ci->i_last_wr = jiffies - 3600 * HZ;
627 	for (i = 0; i < CEPH_FILE_MODE_BITS; i++)
628 		ci->i_nr_by_mode[i] = 0;
629 
630 	mutex_init(&ci->i_truncate_mutex);
631 	ci->i_truncate_seq = 0;
632 	ci->i_truncate_size = 0;
633 	ci->i_truncate_pending = 0;
634 	ci->i_truncate_pagecache_size = 0;
635 
636 	ci->i_max_size = 0;
637 	ci->i_reported_size = 0;
638 	ci->i_wanted_max_size = 0;
639 	ci->i_requested_max_size = 0;
640 
641 	ci->i_pin_ref = 0;
642 	ci->i_rd_ref = 0;
643 	ci->i_rdcache_ref = 0;
644 	ci->i_wr_ref = 0;
645 	ci->i_wb_ref = 0;
646 	ci->i_fx_ref = 0;
647 	ci->i_wrbuffer_ref = 0;
648 	ci->i_wrbuffer_ref_head = 0;
649 	atomic_set(&ci->i_filelock_ref, 0);
650 	atomic_set(&ci->i_shared_gen, 1);
651 	ci->i_rdcache_gen = 0;
652 	ci->i_rdcache_revoking = 0;
653 
654 	INIT_LIST_HEAD(&ci->i_unsafe_dirops);
655 	INIT_LIST_HEAD(&ci->i_unsafe_iops);
656 	spin_lock_init(&ci->i_unsafe_lock);
657 
658 	ci->i_snap_realm = NULL;
659 	INIT_LIST_HEAD(&ci->i_snap_realm_item);
660 	INIT_LIST_HEAD(&ci->i_snap_flush_item);
661 
662 	INIT_WORK(&ci->i_work, ceph_inode_work);
663 	ci->i_work_mask = 0;
664 	memset(&ci->i_btime, '\0', sizeof(ci->i_btime));
665 #ifdef CONFIG_FS_ENCRYPTION
666 	ci->fscrypt_auth = NULL;
667 	ci->fscrypt_auth_len = 0;
668 #endif
669 	return &ci->netfs.inode;
670 }
671 
672 void ceph_free_inode(struct inode *inode)
673 {
674 	struct ceph_inode_info *ci = ceph_inode(inode);
675 
676 	kfree(ci->i_symlink);
677 #ifdef CONFIG_FS_ENCRYPTION
678 	kfree(ci->fscrypt_auth);
679 #endif
680 	fscrypt_free_inode(inode);
681 	kmem_cache_free(ceph_inode_cachep, ci);
682 }
683 
684 void ceph_evict_inode(struct inode *inode)
685 {
686 	struct ceph_inode_info *ci = ceph_inode(inode);
687 	struct ceph_mds_client *mdsc = ceph_sb_to_mdsc(inode->i_sb);
688 	struct ceph_client *cl = ceph_inode_to_client(inode);
689 	struct ceph_inode_frag *frag;
690 	struct rb_node *n;
691 
692 	doutc(cl, "%p ino %llx.%llx\n", inode, ceph_vinop(inode));
693 
694 	percpu_counter_dec(&mdsc->metric.total_inodes);
695 
696 	truncate_inode_pages_final(&inode->i_data);
697 	if (inode->i_state & I_PINNING_NETFS_WB)
698 		ceph_fscache_unuse_cookie(inode, true);
699 	clear_inode(inode);
700 
701 	ceph_fscache_unregister_inode_cookie(ci);
702 	fscrypt_put_encryption_info(inode);
703 
704 	__ceph_remove_caps(ci);
705 
706 	if (__ceph_has_quota(ci, QUOTA_GET_ANY))
707 		ceph_adjust_quota_realms_count(inode, false);
708 
709 	/*
710 	 * we may still have a snap_realm reference if there are stray
711 	 * caps in i_snap_caps.
712 	 */
713 	if (ci->i_snap_realm) {
714 		if (ceph_snap(inode) == CEPH_NOSNAP) {
715 			doutc(cl, " dropping residual ref to snap realm %p\n",
716 			      ci->i_snap_realm);
717 			ceph_change_snap_realm(inode, NULL);
718 		} else {
719 			ceph_put_snapid_map(mdsc, ci->i_snapid_map);
720 			ci->i_snap_realm = NULL;
721 		}
722 	}
723 
724 	while ((n = rb_first(&ci->i_fragtree)) != NULL) {
725 		frag = rb_entry(n, struct ceph_inode_frag, node);
726 		rb_erase(n, &ci->i_fragtree);
727 		kfree(frag);
728 	}
729 	ci->i_fragtree_nsplits = 0;
730 
731 	__ceph_destroy_xattrs(ci);
732 	if (ci->i_xattrs.blob)
733 		ceph_buffer_put(ci->i_xattrs.blob);
734 	if (ci->i_xattrs.prealloc_blob)
735 		ceph_buffer_put(ci->i_xattrs.prealloc_blob);
736 
737 	ceph_put_string(rcu_dereference_raw(ci->i_layout.pool_ns));
738 	ceph_put_string(rcu_dereference_raw(ci->i_cached_layout.pool_ns));
739 }
740 
741 static inline blkcnt_t calc_inode_blocks(u64 size)
742 {
743 	return (size + (1<<9) - 1) >> 9;
744 }
745 
746 /*
747  * Helpers to fill in size, ctime, mtime, and atime.  We have to be
748  * careful because either the client or MDS may have more up to date
749  * info, depending on which capabilities are held, and whether
750  * time_warp_seq or truncate_seq have increased.  (Ordinarily, mtime
751  * and size are monotonically increasing, except when utimes() or
752  * truncate() increments the corresponding _seq values.)
753  */
754 int ceph_fill_file_size(struct inode *inode, int issued,
755 			u32 truncate_seq, u64 truncate_size, u64 size)
756 {
757 	struct ceph_client *cl = ceph_inode_to_client(inode);
758 	struct ceph_inode_info *ci = ceph_inode(inode);
759 	int queue_trunc = 0;
760 	loff_t isize = i_size_read(inode);
761 
762 	if (ceph_seq_cmp(truncate_seq, ci->i_truncate_seq) > 0 ||
763 	    (truncate_seq == ci->i_truncate_seq && size > isize)) {
764 		doutc(cl, "size %lld -> %llu\n", isize, size);
765 		if (size > 0 && S_ISDIR(inode->i_mode)) {
766 			pr_err_client(cl, "non-zero size for directory\n");
767 			size = 0;
768 		}
769 		i_size_write(inode, size);
770 		inode->i_blocks = calc_inode_blocks(size);
771 		/*
772 		 * If we're expanding, then we should be able to just update
773 		 * the existing cookie.
774 		 */
775 		if (size > isize)
776 			ceph_fscache_update(inode);
777 		ci->i_reported_size = size;
778 		if (truncate_seq != ci->i_truncate_seq) {
779 			doutc(cl, "truncate_seq %u -> %u\n",
780 			      ci->i_truncate_seq, truncate_seq);
781 			ci->i_truncate_seq = truncate_seq;
782 
783 			/* the MDS should have revoked these caps */
784 			WARN_ON_ONCE(issued & (CEPH_CAP_FILE_RD |
785 					       CEPH_CAP_FILE_LAZYIO));
786 			/*
787 			 * If we hold relevant caps, or in the case where we're
788 			 * not the only client referencing this file and we
789 			 * don't hold those caps, then we need to check whether
790 			 * the file is either opened or mmaped
791 			 */
792 			if ((issued & (CEPH_CAP_FILE_CACHE|
793 				       CEPH_CAP_FILE_BUFFER)) ||
794 			    mapping_mapped(inode->i_mapping) ||
795 			    __ceph_is_file_opened(ci)) {
796 				ci->i_truncate_pending++;
797 				queue_trunc = 1;
798 			}
799 		}
800 	}
801 
802 	/*
803 	 * It's possible that the new sizes of the two consecutive
804 	 * size truncations will be in the same fscrypt last block,
805 	 * and we need to truncate the corresponding page caches
806 	 * anyway.
807 	 */
808 	if (ceph_seq_cmp(truncate_seq, ci->i_truncate_seq) >= 0) {
809 		doutc(cl, "truncate_size %lld -> %llu, encrypted %d\n",
810 		      ci->i_truncate_size, truncate_size,
811 		      !!IS_ENCRYPTED(inode));
812 
813 		ci->i_truncate_size = truncate_size;
814 
815 		if (IS_ENCRYPTED(inode)) {
816 			doutc(cl, "truncate_pagecache_size %lld -> %llu\n",
817 			      ci->i_truncate_pagecache_size, size);
818 			ci->i_truncate_pagecache_size = size;
819 		} else {
820 			ci->i_truncate_pagecache_size = truncate_size;
821 		}
822 	}
823 	return queue_trunc;
824 }
825 
826 void ceph_fill_file_time(struct inode *inode, int issued,
827 			 u64 time_warp_seq, struct timespec64 *ctime,
828 			 struct timespec64 *mtime, struct timespec64 *atime)
829 {
830 	struct ceph_client *cl = ceph_inode_to_client(inode);
831 	struct ceph_inode_info *ci = ceph_inode(inode);
832 	struct timespec64 ictime = inode_get_ctime(inode);
833 	int warn = 0;
834 
835 	if (issued & (CEPH_CAP_FILE_EXCL|
836 		      CEPH_CAP_FILE_WR|
837 		      CEPH_CAP_FILE_BUFFER|
838 		      CEPH_CAP_AUTH_EXCL|
839 		      CEPH_CAP_XATTR_EXCL)) {
840 		if (ci->i_version == 0 ||
841 		    timespec64_compare(ctime, &ictime) > 0) {
842 			doutc(cl, "ctime %lld.%09ld -> %lld.%09ld inc w/ cap\n",
843 			     ictime.tv_sec, ictime.tv_nsec,
844 			     ctime->tv_sec, ctime->tv_nsec);
845 			inode_set_ctime_to_ts(inode, *ctime);
846 		}
847 		if (ci->i_version == 0 ||
848 		    ceph_seq_cmp(time_warp_seq, ci->i_time_warp_seq) > 0) {
849 			/* the MDS did a utimes() */
850 			doutc(cl, "mtime %lld.%09ld -> %lld.%09ld tw %d -> %d\n",
851 			     inode_get_mtime_sec(inode),
852 			     inode_get_mtime_nsec(inode),
853 			     mtime->tv_sec, mtime->tv_nsec,
854 			     ci->i_time_warp_seq, (int)time_warp_seq);
855 
856 			inode_set_mtime_to_ts(inode, *mtime);
857 			inode_set_atime_to_ts(inode, *atime);
858 			ci->i_time_warp_seq = time_warp_seq;
859 		} else if (time_warp_seq == ci->i_time_warp_seq) {
860 			struct timespec64	ts;
861 
862 			/* nobody did utimes(); take the max */
863 			ts = inode_get_mtime(inode);
864 			if (timespec64_compare(mtime, &ts) > 0) {
865 				doutc(cl, "mtime %lld.%09ld -> %lld.%09ld inc\n",
866 				     ts.tv_sec, ts.tv_nsec,
867 				     mtime->tv_sec, mtime->tv_nsec);
868 				inode_set_mtime_to_ts(inode, *mtime);
869 			}
870 			ts = inode_get_atime(inode);
871 			if (timespec64_compare(atime, &ts) > 0) {
872 				doutc(cl, "atime %lld.%09ld -> %lld.%09ld inc\n",
873 				     ts.tv_sec, ts.tv_nsec,
874 				     atime->tv_sec, atime->tv_nsec);
875 				inode_set_atime_to_ts(inode, *atime);
876 			}
877 		} else if (issued & CEPH_CAP_FILE_EXCL) {
878 			/* we did a utimes(); ignore mds values */
879 		} else {
880 			warn = 1;
881 		}
882 	} else {
883 		/* we have no write|excl caps; whatever the MDS says is true */
884 		if (ceph_seq_cmp(time_warp_seq, ci->i_time_warp_seq) >= 0) {
885 			inode_set_ctime_to_ts(inode, *ctime);
886 			inode_set_mtime_to_ts(inode, *mtime);
887 			inode_set_atime_to_ts(inode, *atime);
888 			ci->i_time_warp_seq = time_warp_seq;
889 		} else {
890 			warn = 1;
891 		}
892 	}
893 	if (warn) /* time_warp_seq shouldn't go backwards */
894 		doutc(cl, "%p mds time_warp_seq %llu < %u\n", inode,
895 		      time_warp_seq, ci->i_time_warp_seq);
896 }
897 
898 #if IS_ENABLED(CONFIG_FS_ENCRYPTION)
899 static int decode_encrypted_symlink(struct ceph_mds_client *mdsc,
900 				    const char *encsym,
901 				    int enclen, u8 **decsym)
902 {
903 	struct ceph_client *cl = mdsc->fsc->client;
904 	int declen;
905 	u8 *sym;
906 
907 	sym = kmalloc(enclen + 1, GFP_NOFS);
908 	if (!sym)
909 		return -ENOMEM;
910 
911 	declen = ceph_base64_decode(encsym, enclen, sym);
912 	if (declen < 0) {
913 		pr_err_client(cl,
914 			"can't decode symlink (%d). Content: %.*s\n",
915 			declen, enclen, encsym);
916 		kfree(sym);
917 		return -EIO;
918 	}
919 	sym[declen + 1] = '\0';
920 	*decsym = sym;
921 	return declen;
922 }
923 #else
924 static int decode_encrypted_symlink(struct ceph_mds_client *mdsc,
925 				    const char *encsym,
926 				    int symlen, u8 **decsym)
927 {
928 	return -EOPNOTSUPP;
929 }
930 #endif
931 
932 /*
933  * Populate an inode based on info from mds.  May be called on new or
934  * existing inodes.
935  */
936 int ceph_fill_inode(struct inode *inode, struct page *locked_page,
937 		    struct ceph_mds_reply_info_in *iinfo,
938 		    struct ceph_mds_reply_dirfrag *dirinfo,
939 		    struct ceph_mds_session *session, int cap_fmode,
940 		    struct ceph_cap_reservation *caps_reservation)
941 {
942 	struct ceph_mds_client *mdsc = ceph_sb_to_mdsc(inode->i_sb);
943 	struct ceph_client *cl = mdsc->fsc->client;
944 	struct ceph_mds_reply_inode *info = iinfo->in;
945 	struct ceph_inode_info *ci = ceph_inode(inode);
946 	int issued, new_issued, info_caps;
947 	struct timespec64 mtime, atime, ctime;
948 	struct ceph_buffer *xattr_blob = NULL;
949 	struct ceph_buffer *old_blob = NULL;
950 	struct ceph_string *pool_ns = NULL;
951 	struct ceph_cap *new_cap = NULL;
952 	int err = 0;
953 	bool wake = false;
954 	bool queue_trunc = false;
955 	bool new_version = false;
956 	bool fill_inline = false;
957 	umode_t mode = le32_to_cpu(info->mode);
958 	dev_t rdev = le32_to_cpu(info->rdev);
959 
960 	lockdep_assert_held(&mdsc->snap_rwsem);
961 
962 	doutc(cl, "%p ino %llx.%llx v %llu had %llu\n", inode, ceph_vinop(inode),
963 	      le64_to_cpu(info->version), ci->i_version);
964 
965 	/* Once I_NEW is cleared, we can't change type or dev numbers */
966 	if (inode->i_state & I_NEW) {
967 		inode->i_mode = mode;
968 	} else {
969 		if (inode_wrong_type(inode, mode)) {
970 			pr_warn_once_client(cl,
971 				"inode type changed! (ino %llx.%llx is 0%o, mds says 0%o)\n",
972 				ceph_vinop(inode), inode->i_mode, mode);
973 			return -ESTALE;
974 		}
975 
976 		if ((S_ISCHR(mode) || S_ISBLK(mode)) && inode->i_rdev != rdev) {
977 			pr_warn_once_client(cl,
978 				"dev inode rdev changed! (ino %llx.%llx is %u:%u, mds says %u:%u)\n",
979 				ceph_vinop(inode), MAJOR(inode->i_rdev),
980 				MINOR(inode->i_rdev), MAJOR(rdev),
981 				MINOR(rdev));
982 			return -ESTALE;
983 		}
984 	}
985 
986 	info_caps = le32_to_cpu(info->cap.caps);
987 
988 	/* prealloc new cap struct */
989 	if (info_caps && ceph_snap(inode) == CEPH_NOSNAP) {
990 		new_cap = ceph_get_cap(mdsc, caps_reservation);
991 		if (!new_cap)
992 			return -ENOMEM;
993 	}
994 
995 	/*
996 	 * prealloc xattr data, if it looks like we'll need it.  only
997 	 * if len > 4 (meaning there are actually xattrs; the first 4
998 	 * bytes are the xattr count).
999 	 */
1000 	if (iinfo->xattr_len > 4) {
1001 		xattr_blob = ceph_buffer_new(iinfo->xattr_len, GFP_NOFS);
1002 		if (!xattr_blob)
1003 			pr_err_client(cl, "ENOMEM xattr blob %d bytes\n",
1004 				      iinfo->xattr_len);
1005 	}
1006 
1007 	if (iinfo->pool_ns_len > 0)
1008 		pool_ns = ceph_find_or_create_string(iinfo->pool_ns_data,
1009 						     iinfo->pool_ns_len);
1010 
1011 	if (ceph_snap(inode) != CEPH_NOSNAP && !ci->i_snapid_map)
1012 		ci->i_snapid_map = ceph_get_snapid_map(mdsc, ceph_snap(inode));
1013 
1014 	spin_lock(&ci->i_ceph_lock);
1015 
1016 	/*
1017 	 * provided version will be odd if inode value is projected,
1018 	 * even if stable.  skip the update if we have newer stable
1019 	 * info (ours>=theirs, e.g. due to racing mds replies), unless
1020 	 * we are getting projected (unstable) info (in which case the
1021 	 * version is odd, and we want ours>theirs).
1022 	 *   us   them
1023 	 *   2    2     skip
1024 	 *   3    2     skip
1025 	 *   3    3     update
1026 	 */
1027 	if (ci->i_version == 0 ||
1028 	    ((info->cap.flags & CEPH_CAP_FLAG_AUTH) &&
1029 	     le64_to_cpu(info->version) > (ci->i_version & ~1)))
1030 		new_version = true;
1031 
1032 	/* Update change_attribute */
1033 	inode_set_max_iversion_raw(inode, iinfo->change_attr);
1034 
1035 	__ceph_caps_issued(ci, &issued);
1036 	issued |= __ceph_caps_dirty(ci);
1037 	new_issued = ~issued & info_caps;
1038 
1039 	__ceph_update_quota(ci, iinfo->max_bytes, iinfo->max_files);
1040 
1041 #ifdef CONFIG_FS_ENCRYPTION
1042 	if (iinfo->fscrypt_auth_len &&
1043 	    ((inode->i_state & I_NEW) || (ci->fscrypt_auth_len == 0))) {
1044 		kfree(ci->fscrypt_auth);
1045 		ci->fscrypt_auth_len = iinfo->fscrypt_auth_len;
1046 		ci->fscrypt_auth = iinfo->fscrypt_auth;
1047 		iinfo->fscrypt_auth = NULL;
1048 		iinfo->fscrypt_auth_len = 0;
1049 		inode_set_flags(inode, S_ENCRYPTED, S_ENCRYPTED);
1050 	}
1051 #endif
1052 
1053 	if ((new_version || (new_issued & CEPH_CAP_AUTH_SHARED)) &&
1054 	    (issued & CEPH_CAP_AUTH_EXCL) == 0) {
1055 		inode->i_mode = mode;
1056 		inode->i_uid = make_kuid(&init_user_ns, le32_to_cpu(info->uid));
1057 		inode->i_gid = make_kgid(&init_user_ns, le32_to_cpu(info->gid));
1058 		doutc(cl, "%p %llx.%llx mode 0%o uid.gid %d.%d\n", inode,
1059 		      ceph_vinop(inode), inode->i_mode,
1060 		      from_kuid(&init_user_ns, inode->i_uid),
1061 		      from_kgid(&init_user_ns, inode->i_gid));
1062 		ceph_decode_timespec64(&ci->i_btime, &iinfo->btime);
1063 		ceph_decode_timespec64(&ci->i_snap_btime, &iinfo->snap_btime);
1064 	}
1065 
1066 	/* directories have fl_stripe_unit set to zero */
1067 	if (IS_ENCRYPTED(inode))
1068 		inode->i_blkbits = CEPH_FSCRYPT_BLOCK_SHIFT;
1069 	else if (le32_to_cpu(info->layout.fl_stripe_unit))
1070 		inode->i_blkbits =
1071 			fls(le32_to_cpu(info->layout.fl_stripe_unit)) - 1;
1072 	else
1073 		inode->i_blkbits = CEPH_BLOCK_SHIFT;
1074 
1075 	if ((new_version || (new_issued & CEPH_CAP_LINK_SHARED)) &&
1076 	    (issued & CEPH_CAP_LINK_EXCL) == 0)
1077 		set_nlink(inode, le32_to_cpu(info->nlink));
1078 
1079 	if (new_version || (new_issued & CEPH_CAP_ANY_RD)) {
1080 		/* be careful with mtime, atime, size */
1081 		ceph_decode_timespec64(&atime, &info->atime);
1082 		ceph_decode_timespec64(&mtime, &info->mtime);
1083 		ceph_decode_timespec64(&ctime, &info->ctime);
1084 		ceph_fill_file_time(inode, issued,
1085 				le32_to_cpu(info->time_warp_seq),
1086 				&ctime, &mtime, &atime);
1087 	}
1088 
1089 	if (new_version || (info_caps & CEPH_CAP_FILE_SHARED)) {
1090 		ci->i_files = le64_to_cpu(info->files);
1091 		ci->i_subdirs = le64_to_cpu(info->subdirs);
1092 	}
1093 
1094 	if (new_version ||
1095 	    (new_issued & (CEPH_CAP_ANY_FILE_RD | CEPH_CAP_ANY_FILE_WR))) {
1096 		u64 size = le64_to_cpu(info->size);
1097 		s64 old_pool = ci->i_layout.pool_id;
1098 		struct ceph_string *old_ns;
1099 
1100 		ceph_file_layout_from_legacy(&ci->i_layout, &info->layout);
1101 		old_ns = rcu_dereference_protected(ci->i_layout.pool_ns,
1102 					lockdep_is_held(&ci->i_ceph_lock));
1103 		rcu_assign_pointer(ci->i_layout.pool_ns, pool_ns);
1104 
1105 		if (ci->i_layout.pool_id != old_pool || pool_ns != old_ns)
1106 			ci->i_ceph_flags &= ~CEPH_I_POOL_PERM;
1107 
1108 		pool_ns = old_ns;
1109 
1110 		if (IS_ENCRYPTED(inode) && size &&
1111 		    iinfo->fscrypt_file_len == sizeof(__le64)) {
1112 			u64 fsize = __le64_to_cpu(*(__le64 *)iinfo->fscrypt_file);
1113 
1114 			if (size == round_up(fsize, CEPH_FSCRYPT_BLOCK_SIZE)) {
1115 				size = fsize;
1116 			} else {
1117 				pr_warn_client(cl,
1118 					"fscrypt size mismatch: size=%llu fscrypt_file=%llu, discarding fscrypt_file size.\n",
1119 					info->size, size);
1120 			}
1121 		}
1122 
1123 		queue_trunc = ceph_fill_file_size(inode, issued,
1124 					le32_to_cpu(info->truncate_seq),
1125 					le64_to_cpu(info->truncate_size),
1126 					size);
1127 		/* only update max_size on auth cap */
1128 		if ((info->cap.flags & CEPH_CAP_FLAG_AUTH) &&
1129 		    ci->i_max_size != le64_to_cpu(info->max_size)) {
1130 			doutc(cl, "max_size %lld -> %llu\n",
1131 			    ci->i_max_size, le64_to_cpu(info->max_size));
1132 			ci->i_max_size = le64_to_cpu(info->max_size);
1133 		}
1134 	}
1135 
1136 	/* layout and rstat are not tracked by capability, update them if
1137 	 * the inode info is from auth mds */
1138 	if (new_version || (info->cap.flags & CEPH_CAP_FLAG_AUTH)) {
1139 		if (S_ISDIR(inode->i_mode)) {
1140 			ci->i_dir_layout = iinfo->dir_layout;
1141 			ci->i_rbytes = le64_to_cpu(info->rbytes);
1142 			ci->i_rfiles = le64_to_cpu(info->rfiles);
1143 			ci->i_rsubdirs = le64_to_cpu(info->rsubdirs);
1144 			ci->i_dir_pin = iinfo->dir_pin;
1145 			ci->i_rsnaps = iinfo->rsnaps;
1146 			ceph_decode_timespec64(&ci->i_rctime, &info->rctime);
1147 		}
1148 	}
1149 
1150 	/* xattrs */
1151 	/* note that if i_xattrs.len <= 4, i_xattrs.data will still be NULL. */
1152 	if ((ci->i_xattrs.version == 0 || !(issued & CEPH_CAP_XATTR_EXCL))  &&
1153 	    le64_to_cpu(info->xattr_version) > ci->i_xattrs.version) {
1154 		if (ci->i_xattrs.blob)
1155 			old_blob = ci->i_xattrs.blob;
1156 		ci->i_xattrs.blob = xattr_blob;
1157 		if (xattr_blob)
1158 			memcpy(ci->i_xattrs.blob->vec.iov_base,
1159 			       iinfo->xattr_data, iinfo->xattr_len);
1160 		ci->i_xattrs.version = le64_to_cpu(info->xattr_version);
1161 		ceph_forget_all_cached_acls(inode);
1162 		ceph_security_invalidate_secctx(inode);
1163 		xattr_blob = NULL;
1164 	}
1165 
1166 	/* finally update i_version */
1167 	if (le64_to_cpu(info->version) > ci->i_version)
1168 		ci->i_version = le64_to_cpu(info->version);
1169 
1170 	inode->i_mapping->a_ops = &ceph_aops;
1171 
1172 	switch (inode->i_mode & S_IFMT) {
1173 	case S_IFIFO:
1174 	case S_IFBLK:
1175 	case S_IFCHR:
1176 	case S_IFSOCK:
1177 		inode->i_blkbits = PAGE_SHIFT;
1178 		init_special_inode(inode, inode->i_mode, rdev);
1179 		inode->i_op = &ceph_file_iops;
1180 		break;
1181 	case S_IFREG:
1182 		inode->i_op = &ceph_file_iops;
1183 		inode->i_fop = &ceph_file_fops;
1184 		break;
1185 	case S_IFLNK:
1186 		if (!ci->i_symlink) {
1187 			u32 symlen = iinfo->symlink_len;
1188 			char *sym;
1189 
1190 			spin_unlock(&ci->i_ceph_lock);
1191 
1192 			if (IS_ENCRYPTED(inode)) {
1193 				if (symlen != i_size_read(inode))
1194 					pr_err_client(cl,
1195 						"%p %llx.%llx BAD symlink size %lld\n",
1196 						inode, ceph_vinop(inode),
1197 						i_size_read(inode));
1198 
1199 				err = decode_encrypted_symlink(mdsc, iinfo->symlink,
1200 							       symlen, (u8 **)&sym);
1201 				if (err < 0) {
1202 					pr_err_client(cl,
1203 						"decoding encrypted symlink failed: %d\n",
1204 						err);
1205 					goto out;
1206 				}
1207 				symlen = err;
1208 				i_size_write(inode, symlen);
1209 				inode->i_blocks = calc_inode_blocks(symlen);
1210 			} else {
1211 				if (symlen != i_size_read(inode)) {
1212 					pr_err_client(cl,
1213 						"%p %llx.%llx BAD symlink size %lld\n",
1214 						inode, ceph_vinop(inode),
1215 						i_size_read(inode));
1216 					i_size_write(inode, symlen);
1217 					inode->i_blocks = calc_inode_blocks(symlen);
1218 				}
1219 
1220 				err = -ENOMEM;
1221 				sym = kstrndup(iinfo->symlink, symlen, GFP_NOFS);
1222 				if (!sym)
1223 					goto out;
1224 			}
1225 
1226 			spin_lock(&ci->i_ceph_lock);
1227 			if (!ci->i_symlink)
1228 				ci->i_symlink = sym;
1229 			else
1230 				kfree(sym); /* lost a race */
1231 		}
1232 
1233 		if (IS_ENCRYPTED(inode)) {
1234 			/*
1235 			 * Encrypted symlinks need to be decrypted before we can
1236 			 * cache their targets in i_link. Don't touch it here.
1237 			 */
1238 			inode->i_op = &ceph_encrypted_symlink_iops;
1239 		} else {
1240 			inode->i_link = ci->i_symlink;
1241 			inode->i_op = &ceph_symlink_iops;
1242 		}
1243 		break;
1244 	case S_IFDIR:
1245 		inode->i_op = &ceph_dir_iops;
1246 		inode->i_fop = &ceph_dir_fops;
1247 		break;
1248 	default:
1249 		pr_err_client(cl, "%p %llx.%llx BAD mode 0%o\n", inode,
1250 			      ceph_vinop(inode), inode->i_mode);
1251 	}
1252 
1253 	/* were we issued a capability? */
1254 	if (info_caps) {
1255 		if (ceph_snap(inode) == CEPH_NOSNAP) {
1256 			ceph_add_cap(inode, session,
1257 				     le64_to_cpu(info->cap.cap_id),
1258 				     info_caps,
1259 				     le32_to_cpu(info->cap.wanted),
1260 				     le32_to_cpu(info->cap.seq),
1261 				     le32_to_cpu(info->cap.mseq),
1262 				     le64_to_cpu(info->cap.realm),
1263 				     info->cap.flags, &new_cap);
1264 
1265 			/* set dir completion flag? */
1266 			if (S_ISDIR(inode->i_mode) &&
1267 			    ci->i_files == 0 && ci->i_subdirs == 0 &&
1268 			    (info_caps & CEPH_CAP_FILE_SHARED) &&
1269 			    (issued & CEPH_CAP_FILE_EXCL) == 0 &&
1270 			    !__ceph_dir_is_complete(ci)) {
1271 				doutc(cl, " marking %p complete (empty)\n",
1272 				      inode);
1273 				i_size_write(inode, 0);
1274 				__ceph_dir_set_complete(ci,
1275 					atomic64_read(&ci->i_release_count),
1276 					atomic64_read(&ci->i_ordered_count));
1277 			}
1278 
1279 			wake = true;
1280 		} else {
1281 			doutc(cl, " %p got snap_caps %s\n", inode,
1282 			      ceph_cap_string(info_caps));
1283 			ci->i_snap_caps |= info_caps;
1284 		}
1285 	}
1286 
1287 	if (iinfo->inline_version > 0 &&
1288 	    iinfo->inline_version >= ci->i_inline_version) {
1289 		int cache_caps = CEPH_CAP_FILE_CACHE | CEPH_CAP_FILE_LAZYIO;
1290 		ci->i_inline_version = iinfo->inline_version;
1291 		if (ceph_has_inline_data(ci) &&
1292 		    (locked_page || (info_caps & cache_caps)))
1293 			fill_inline = true;
1294 	}
1295 
1296 	if (cap_fmode >= 0) {
1297 		if (!info_caps)
1298 			pr_warn_client(cl, "mds issued no caps on %llx.%llx\n",
1299 				       ceph_vinop(inode));
1300 		__ceph_touch_fmode(ci, mdsc, cap_fmode);
1301 	}
1302 
1303 	spin_unlock(&ci->i_ceph_lock);
1304 
1305 	ceph_fscache_register_inode_cookie(inode);
1306 
1307 	if (fill_inline)
1308 		ceph_fill_inline_data(inode, locked_page,
1309 				      iinfo->inline_data, iinfo->inline_len);
1310 
1311 	if (wake)
1312 		wake_up_all(&ci->i_cap_wq);
1313 
1314 	/* queue truncate if we saw i_size decrease */
1315 	if (queue_trunc)
1316 		ceph_queue_vmtruncate(inode);
1317 
1318 	/* populate frag tree */
1319 	if (S_ISDIR(inode->i_mode))
1320 		ceph_fill_fragtree(inode, &info->fragtree, dirinfo);
1321 
1322 	/* update delegation info? */
1323 	if (dirinfo)
1324 		ceph_fill_dirfrag(inode, dirinfo);
1325 
1326 	err = 0;
1327 out:
1328 	if (new_cap)
1329 		ceph_put_cap(mdsc, new_cap);
1330 	ceph_buffer_put(old_blob);
1331 	ceph_buffer_put(xattr_blob);
1332 	ceph_put_string(pool_ns);
1333 	return err;
1334 }
1335 
1336 /*
1337  * caller should hold session s_mutex and dentry->d_lock.
1338  */
1339 static void __update_dentry_lease(struct inode *dir, struct dentry *dentry,
1340 				  struct ceph_mds_reply_lease *lease,
1341 				  struct ceph_mds_session *session,
1342 				  unsigned long from_time,
1343 				  struct ceph_mds_session **old_lease_session)
1344 {
1345 	struct ceph_client *cl = ceph_inode_to_client(dir);
1346 	struct ceph_dentry_info *di = ceph_dentry(dentry);
1347 	unsigned mask = le16_to_cpu(lease->mask);
1348 	long unsigned duration = le32_to_cpu(lease->duration_ms);
1349 	long unsigned ttl = from_time + (duration * HZ) / 1000;
1350 	long unsigned half_ttl = from_time + (duration * HZ / 2) / 1000;
1351 
1352 	doutc(cl, "%p duration %lu ms ttl %lu\n", dentry, duration, ttl);
1353 
1354 	/* only track leases on regular dentries */
1355 	if (ceph_snap(dir) != CEPH_NOSNAP)
1356 		return;
1357 
1358 	if (mask & CEPH_LEASE_PRIMARY_LINK)
1359 		di->flags |= CEPH_DENTRY_PRIMARY_LINK;
1360 	else
1361 		di->flags &= ~CEPH_DENTRY_PRIMARY_LINK;
1362 
1363 	di->lease_shared_gen = atomic_read(&ceph_inode(dir)->i_shared_gen);
1364 	if (!(mask & CEPH_LEASE_VALID)) {
1365 		__ceph_dentry_dir_lease_touch(di);
1366 		return;
1367 	}
1368 
1369 	if (di->lease_gen == atomic_read(&session->s_cap_gen) &&
1370 	    time_before(ttl, di->time))
1371 		return;  /* we already have a newer lease. */
1372 
1373 	if (di->lease_session && di->lease_session != session) {
1374 		*old_lease_session = di->lease_session;
1375 		di->lease_session = NULL;
1376 	}
1377 
1378 	if (!di->lease_session)
1379 		di->lease_session = ceph_get_mds_session(session);
1380 	di->lease_gen = atomic_read(&session->s_cap_gen);
1381 	di->lease_seq = le32_to_cpu(lease->seq);
1382 	di->lease_renew_after = half_ttl;
1383 	di->lease_renew_from = 0;
1384 	di->time = ttl;
1385 
1386 	__ceph_dentry_lease_touch(di);
1387 }
1388 
1389 static inline void update_dentry_lease(struct inode *dir, struct dentry *dentry,
1390 					struct ceph_mds_reply_lease *lease,
1391 					struct ceph_mds_session *session,
1392 					unsigned long from_time)
1393 {
1394 	struct ceph_mds_session *old_lease_session = NULL;
1395 	spin_lock(&dentry->d_lock);
1396 	__update_dentry_lease(dir, dentry, lease, session, from_time,
1397 			      &old_lease_session);
1398 	spin_unlock(&dentry->d_lock);
1399 	ceph_put_mds_session(old_lease_session);
1400 }
1401 
1402 /*
1403  * update dentry lease without having parent inode locked
1404  */
1405 static void update_dentry_lease_careful(struct dentry *dentry,
1406 					struct ceph_mds_reply_lease *lease,
1407 					struct ceph_mds_session *session,
1408 					unsigned long from_time,
1409 					char *dname, u32 dname_len,
1410 					struct ceph_vino *pdvino,
1411 					struct ceph_vino *ptvino)
1412 
1413 {
1414 	struct inode *dir;
1415 	struct ceph_mds_session *old_lease_session = NULL;
1416 
1417 	spin_lock(&dentry->d_lock);
1418 	/* make sure dentry's name matches target */
1419 	if (dentry->d_name.len != dname_len ||
1420 	    memcmp(dentry->d_name.name, dname, dname_len))
1421 		goto out_unlock;
1422 
1423 	dir = d_inode(dentry->d_parent);
1424 	/* make sure parent matches dvino */
1425 	if (!ceph_ino_compare(dir, pdvino))
1426 		goto out_unlock;
1427 
1428 	/* make sure dentry's inode matches target. NULL ptvino means that
1429 	 * we expect a negative dentry */
1430 	if (ptvino) {
1431 		if (d_really_is_negative(dentry))
1432 			goto out_unlock;
1433 		if (!ceph_ino_compare(d_inode(dentry), ptvino))
1434 			goto out_unlock;
1435 	} else {
1436 		if (d_really_is_positive(dentry))
1437 			goto out_unlock;
1438 	}
1439 
1440 	__update_dentry_lease(dir, dentry, lease, session,
1441 			      from_time, &old_lease_session);
1442 out_unlock:
1443 	spin_unlock(&dentry->d_lock);
1444 	ceph_put_mds_session(old_lease_session);
1445 }
1446 
1447 /*
1448  * splice a dentry to an inode.
1449  * caller must hold directory i_rwsem for this to be safe.
1450  */
1451 static int splice_dentry(struct dentry **pdn, struct inode *in)
1452 {
1453 	struct ceph_client *cl = ceph_inode_to_client(in);
1454 	struct dentry *dn = *pdn;
1455 	struct dentry *realdn;
1456 
1457 	BUG_ON(d_inode(dn));
1458 
1459 	if (S_ISDIR(in->i_mode)) {
1460 		/* If inode is directory, d_splice_alias() below will remove
1461 		 * 'realdn' from its origin parent. We need to ensure that
1462 		 * origin parent's readdir cache will not reference 'realdn'
1463 		 */
1464 		realdn = d_find_any_alias(in);
1465 		if (realdn) {
1466 			struct ceph_dentry_info *di = ceph_dentry(realdn);
1467 			spin_lock(&realdn->d_lock);
1468 
1469 			realdn->d_op->d_prune(realdn);
1470 
1471 			di->time = jiffies;
1472 			di->lease_shared_gen = 0;
1473 			di->offset = 0;
1474 
1475 			spin_unlock(&realdn->d_lock);
1476 			dput(realdn);
1477 		}
1478 	}
1479 
1480 	/* dn must be unhashed */
1481 	if (!d_unhashed(dn))
1482 		d_drop(dn);
1483 	realdn = d_splice_alias(in, dn);
1484 	if (IS_ERR(realdn)) {
1485 		pr_err_client(cl, "error %ld %p inode %p ino %llx.%llx\n",
1486 			      PTR_ERR(realdn), dn, in, ceph_vinop(in));
1487 		return PTR_ERR(realdn);
1488 	}
1489 
1490 	if (realdn) {
1491 		doutc(cl, "dn %p (%d) spliced with %p (%d) inode %p ino %llx.%llx\n",
1492 		      dn, d_count(dn), realdn, d_count(realdn),
1493 		      d_inode(realdn), ceph_vinop(d_inode(realdn)));
1494 		dput(dn);
1495 		*pdn = realdn;
1496 	} else {
1497 		BUG_ON(!ceph_dentry(dn));
1498 		doutc(cl, "dn %p attached to %p ino %llx.%llx\n", dn,
1499 		      d_inode(dn), ceph_vinop(d_inode(dn)));
1500 	}
1501 	return 0;
1502 }
1503 
1504 /*
1505  * Incorporate results into the local cache.  This is either just
1506  * one inode, or a directory, dentry, and possibly linked-to inode (e.g.,
1507  * after a lookup).
1508  *
1509  * A reply may contain
1510  *         a directory inode along with a dentry.
1511  *  and/or a target inode
1512  *
1513  * Called with snap_rwsem (read).
1514  */
1515 int ceph_fill_trace(struct super_block *sb, struct ceph_mds_request *req)
1516 {
1517 	struct ceph_mds_session *session = req->r_session;
1518 	struct ceph_mds_reply_info_parsed *rinfo = &req->r_reply_info;
1519 	struct inode *in = NULL;
1520 	struct ceph_vino tvino, dvino;
1521 	struct ceph_fs_client *fsc = ceph_sb_to_fs_client(sb);
1522 	struct ceph_client *cl = fsc->client;
1523 	int err = 0;
1524 
1525 	doutc(cl, "%p is_dentry %d is_target %d\n", req,
1526 	      rinfo->head->is_dentry, rinfo->head->is_target);
1527 
1528 	if (!rinfo->head->is_target && !rinfo->head->is_dentry) {
1529 		doutc(cl, "reply is empty!\n");
1530 		if (rinfo->head->result == 0 && req->r_parent)
1531 			ceph_invalidate_dir_request(req);
1532 		return 0;
1533 	}
1534 
1535 	if (rinfo->head->is_dentry) {
1536 		struct inode *dir = req->r_parent;
1537 
1538 		if (dir) {
1539 			err = ceph_fill_inode(dir, NULL, &rinfo->diri,
1540 					      rinfo->dirfrag, session, -1,
1541 					      &req->r_caps_reservation);
1542 			if (err < 0)
1543 				goto done;
1544 		} else {
1545 			WARN_ON_ONCE(1);
1546 		}
1547 
1548 		if (dir && req->r_op == CEPH_MDS_OP_LOOKUPNAME &&
1549 		    test_bit(CEPH_MDS_R_PARENT_LOCKED, &req->r_req_flags) &&
1550 		    !test_bit(CEPH_MDS_R_ABORTED, &req->r_req_flags)) {
1551 			bool is_nokey = false;
1552 			struct qstr dname;
1553 			struct dentry *dn, *parent;
1554 			struct fscrypt_str oname = FSTR_INIT(NULL, 0);
1555 			struct ceph_fname fname = { .dir	= dir,
1556 						    .name	= rinfo->dname,
1557 						    .ctext	= rinfo->altname,
1558 						    .name_len	= rinfo->dname_len,
1559 						    .ctext_len	= rinfo->altname_len };
1560 
1561 			BUG_ON(!rinfo->head->is_target);
1562 			BUG_ON(req->r_dentry);
1563 
1564 			parent = d_find_any_alias(dir);
1565 			BUG_ON(!parent);
1566 
1567 			err = ceph_fname_alloc_buffer(dir, &oname);
1568 			if (err < 0) {
1569 				dput(parent);
1570 				goto done;
1571 			}
1572 
1573 			err = ceph_fname_to_usr(&fname, NULL, &oname, &is_nokey);
1574 			if (err < 0) {
1575 				dput(parent);
1576 				ceph_fname_free_buffer(dir, &oname);
1577 				goto done;
1578 			}
1579 			dname.name = oname.name;
1580 			dname.len = oname.len;
1581 			dname.hash = full_name_hash(parent, dname.name, dname.len);
1582 			tvino.ino = le64_to_cpu(rinfo->targeti.in->ino);
1583 			tvino.snap = le64_to_cpu(rinfo->targeti.in->snapid);
1584 retry_lookup:
1585 			dn = d_lookup(parent, &dname);
1586 			doutc(cl, "d_lookup on parent=%p name=%.*s got %p\n",
1587 			      parent, dname.len, dname.name, dn);
1588 
1589 			if (!dn) {
1590 				dn = d_alloc(parent, &dname);
1591 				doutc(cl, "d_alloc %p '%.*s' = %p\n", parent,
1592 				      dname.len, dname.name, dn);
1593 				if (!dn) {
1594 					dput(parent);
1595 					ceph_fname_free_buffer(dir, &oname);
1596 					err = -ENOMEM;
1597 					goto done;
1598 				}
1599 				if (is_nokey) {
1600 					spin_lock(&dn->d_lock);
1601 					dn->d_flags |= DCACHE_NOKEY_NAME;
1602 					spin_unlock(&dn->d_lock);
1603 				}
1604 				err = 0;
1605 			} else if (d_really_is_positive(dn) &&
1606 				   (ceph_ino(d_inode(dn)) != tvino.ino ||
1607 				    ceph_snap(d_inode(dn)) != tvino.snap)) {
1608 				doutc(cl, " dn %p points to wrong inode %p\n",
1609 				      dn, d_inode(dn));
1610 				ceph_dir_clear_ordered(dir);
1611 				d_delete(dn);
1612 				dput(dn);
1613 				goto retry_lookup;
1614 			}
1615 			ceph_fname_free_buffer(dir, &oname);
1616 
1617 			req->r_dentry = dn;
1618 			dput(parent);
1619 		}
1620 	}
1621 
1622 	if (rinfo->head->is_target) {
1623 		/* Should be filled in by handle_reply */
1624 		BUG_ON(!req->r_target_inode);
1625 
1626 		in = req->r_target_inode;
1627 		err = ceph_fill_inode(in, req->r_locked_page, &rinfo->targeti,
1628 				NULL, session,
1629 				(!test_bit(CEPH_MDS_R_ABORTED, &req->r_req_flags) &&
1630 				 !test_bit(CEPH_MDS_R_ASYNC, &req->r_req_flags) &&
1631 				 rinfo->head->result == 0) ?  req->r_fmode : -1,
1632 				&req->r_caps_reservation);
1633 		if (err < 0) {
1634 			pr_err_client(cl, "badness %p %llx.%llx\n", in,
1635 				      ceph_vinop(in));
1636 			req->r_target_inode = NULL;
1637 			if (in->i_state & I_NEW)
1638 				discard_new_inode(in);
1639 			else
1640 				iput(in);
1641 			goto done;
1642 		}
1643 		if (in->i_state & I_NEW)
1644 			unlock_new_inode(in);
1645 	}
1646 
1647 	/*
1648 	 * ignore null lease/binding on snapdir ENOENT, or else we
1649 	 * will have trouble splicing in the virtual snapdir later
1650 	 */
1651 	if (rinfo->head->is_dentry &&
1652             !test_bit(CEPH_MDS_R_ABORTED, &req->r_req_flags) &&
1653 	    test_bit(CEPH_MDS_R_PARENT_LOCKED, &req->r_req_flags) &&
1654 	    (rinfo->head->is_target || strncmp(req->r_dentry->d_name.name,
1655 					       fsc->mount_options->snapdir_name,
1656 					       req->r_dentry->d_name.len))) {
1657 		/*
1658 		 * lookup link rename   : null -> possibly existing inode
1659 		 * mknod symlink mkdir  : null -> new inode
1660 		 * unlink               : linked -> null
1661 		 */
1662 		struct inode *dir = req->r_parent;
1663 		struct dentry *dn = req->r_dentry;
1664 		bool have_dir_cap, have_lease;
1665 
1666 		BUG_ON(!dn);
1667 		BUG_ON(!dir);
1668 		BUG_ON(d_inode(dn->d_parent) != dir);
1669 
1670 		dvino.ino = le64_to_cpu(rinfo->diri.in->ino);
1671 		dvino.snap = le64_to_cpu(rinfo->diri.in->snapid);
1672 
1673 		BUG_ON(ceph_ino(dir) != dvino.ino);
1674 		BUG_ON(ceph_snap(dir) != dvino.snap);
1675 
1676 		/* do we have a lease on the whole dir? */
1677 		have_dir_cap =
1678 			(le32_to_cpu(rinfo->diri.in->cap.caps) &
1679 			 CEPH_CAP_FILE_SHARED);
1680 
1681 		/* do we have a dn lease? */
1682 		have_lease = have_dir_cap ||
1683 			le32_to_cpu(rinfo->dlease->duration_ms);
1684 		if (!have_lease)
1685 			doutc(cl, "no dentry lease or dir cap\n");
1686 
1687 		/* rename? */
1688 		if (req->r_old_dentry && req->r_op == CEPH_MDS_OP_RENAME) {
1689 			struct inode *olddir = req->r_old_dentry_dir;
1690 			BUG_ON(!olddir);
1691 
1692 			doutc(cl, " src %p '%pd' dst %p '%pd'\n",
1693 			      req->r_old_dentry, req->r_old_dentry, dn, dn);
1694 			doutc(cl, "doing d_move %p -> %p\n", req->r_old_dentry, dn);
1695 
1696 			/* d_move screws up sibling dentries' offsets */
1697 			ceph_dir_clear_ordered(dir);
1698 			ceph_dir_clear_ordered(olddir);
1699 
1700 			d_move(req->r_old_dentry, dn);
1701 			doutc(cl, " src %p '%pd' dst %p '%pd'\n",
1702 			      req->r_old_dentry, req->r_old_dentry, dn, dn);
1703 
1704 			/* ensure target dentry is invalidated, despite
1705 			   rehashing bug in vfs_rename_dir */
1706 			ceph_invalidate_dentry_lease(dn);
1707 
1708 			doutc(cl, "dn %p gets new offset %lld\n",
1709 			      req->r_old_dentry,
1710 			      ceph_dentry(req->r_old_dentry)->offset);
1711 
1712 			/* swap r_dentry and r_old_dentry in case that
1713 			 * splice_dentry() gets called later. This is safe
1714 			 * because no other place will use them */
1715 			req->r_dentry = req->r_old_dentry;
1716 			req->r_old_dentry = dn;
1717 			dn = req->r_dentry;
1718 		}
1719 
1720 		/* null dentry? */
1721 		if (!rinfo->head->is_target) {
1722 			doutc(cl, "null dentry\n");
1723 			if (d_really_is_positive(dn)) {
1724 				doutc(cl, "d_delete %p\n", dn);
1725 				ceph_dir_clear_ordered(dir);
1726 				d_delete(dn);
1727 			} else if (have_lease) {
1728 				if (d_unhashed(dn))
1729 					d_add(dn, NULL);
1730 			}
1731 
1732 			if (!d_unhashed(dn) && have_lease)
1733 				update_dentry_lease(dir, dn,
1734 						    rinfo->dlease, session,
1735 						    req->r_request_started);
1736 			goto done;
1737 		}
1738 
1739 		/* attach proper inode */
1740 		if (d_really_is_negative(dn)) {
1741 			ceph_dir_clear_ordered(dir);
1742 			ihold(in);
1743 			err = splice_dentry(&req->r_dentry, in);
1744 			if (err < 0)
1745 				goto done;
1746 			dn = req->r_dentry;  /* may have spliced */
1747 		} else if (d_really_is_positive(dn) && d_inode(dn) != in) {
1748 			doutc(cl, " %p links to %p %llx.%llx, not %llx.%llx\n",
1749 			      dn, d_inode(dn), ceph_vinop(d_inode(dn)),
1750 			      ceph_vinop(in));
1751 			d_invalidate(dn);
1752 			have_lease = false;
1753 		}
1754 
1755 		if (have_lease) {
1756 			update_dentry_lease(dir, dn,
1757 					    rinfo->dlease, session,
1758 					    req->r_request_started);
1759 		}
1760 		doutc(cl, " final dn %p\n", dn);
1761 	} else if ((req->r_op == CEPH_MDS_OP_LOOKUPSNAP ||
1762 		    req->r_op == CEPH_MDS_OP_MKSNAP) &&
1763 	           test_bit(CEPH_MDS_R_PARENT_LOCKED, &req->r_req_flags) &&
1764 		   !test_bit(CEPH_MDS_R_ABORTED, &req->r_req_flags)) {
1765 		struct inode *dir = req->r_parent;
1766 
1767 		/* fill out a snapdir LOOKUPSNAP dentry */
1768 		BUG_ON(!dir);
1769 		BUG_ON(ceph_snap(dir) != CEPH_SNAPDIR);
1770 		BUG_ON(!req->r_dentry);
1771 		doutc(cl, " linking snapped dir %p to dn %p\n", in,
1772 		      req->r_dentry);
1773 		ceph_dir_clear_ordered(dir);
1774 		ihold(in);
1775 		err = splice_dentry(&req->r_dentry, in);
1776 		if (err < 0)
1777 			goto done;
1778 	} else if (rinfo->head->is_dentry && req->r_dentry) {
1779 		/* parent inode is not locked, be carefull */
1780 		struct ceph_vino *ptvino = NULL;
1781 		dvino.ino = le64_to_cpu(rinfo->diri.in->ino);
1782 		dvino.snap = le64_to_cpu(rinfo->diri.in->snapid);
1783 		if (rinfo->head->is_target) {
1784 			tvino.ino = le64_to_cpu(rinfo->targeti.in->ino);
1785 			tvino.snap = le64_to_cpu(rinfo->targeti.in->snapid);
1786 			ptvino = &tvino;
1787 		}
1788 		update_dentry_lease_careful(req->r_dentry, rinfo->dlease,
1789 					    session, req->r_request_started,
1790 					    rinfo->dname, rinfo->dname_len,
1791 					    &dvino, ptvino);
1792 	}
1793 done:
1794 	doutc(cl, "done err=%d\n", err);
1795 	return err;
1796 }
1797 
1798 /*
1799  * Prepopulate our cache with readdir results, leases, etc.
1800  */
1801 static int readdir_prepopulate_inodes_only(struct ceph_mds_request *req,
1802 					   struct ceph_mds_session *session)
1803 {
1804 	struct ceph_mds_reply_info_parsed *rinfo = &req->r_reply_info;
1805 	struct ceph_client *cl = session->s_mdsc->fsc->client;
1806 	int i, err = 0;
1807 
1808 	for (i = 0; i < rinfo->dir_nr; i++) {
1809 		struct ceph_mds_reply_dir_entry *rde = rinfo->dir_entries + i;
1810 		struct ceph_vino vino;
1811 		struct inode *in;
1812 		int rc;
1813 
1814 		vino.ino = le64_to_cpu(rde->inode.in->ino);
1815 		vino.snap = le64_to_cpu(rde->inode.in->snapid);
1816 
1817 		in = ceph_get_inode(req->r_dentry->d_sb, vino, NULL);
1818 		if (IS_ERR(in)) {
1819 			err = PTR_ERR(in);
1820 			doutc(cl, "badness got %d\n", err);
1821 			continue;
1822 		}
1823 		rc = ceph_fill_inode(in, NULL, &rde->inode, NULL, session,
1824 				     -1, &req->r_caps_reservation);
1825 		if (rc < 0) {
1826 			pr_err_client(cl, "inode badness on %p got %d\n", in,
1827 				      rc);
1828 			err = rc;
1829 			if (in->i_state & I_NEW) {
1830 				ihold(in);
1831 				discard_new_inode(in);
1832 			}
1833 		} else if (in->i_state & I_NEW) {
1834 			unlock_new_inode(in);
1835 		}
1836 
1837 		iput(in);
1838 	}
1839 
1840 	return err;
1841 }
1842 
1843 void ceph_readdir_cache_release(struct ceph_readdir_cache_control *ctl)
1844 {
1845 	if (ctl->page) {
1846 		kunmap(ctl->page);
1847 		put_page(ctl->page);
1848 		ctl->page = NULL;
1849 	}
1850 }
1851 
1852 static int fill_readdir_cache(struct inode *dir, struct dentry *dn,
1853 			      struct ceph_readdir_cache_control *ctl,
1854 			      struct ceph_mds_request *req)
1855 {
1856 	struct ceph_client *cl = ceph_inode_to_client(dir);
1857 	struct ceph_inode_info *ci = ceph_inode(dir);
1858 	unsigned nsize = PAGE_SIZE / sizeof(struct dentry*);
1859 	unsigned idx = ctl->index % nsize;
1860 	pgoff_t pgoff = ctl->index / nsize;
1861 
1862 	if (!ctl->page || pgoff != page_index(ctl->page)) {
1863 		ceph_readdir_cache_release(ctl);
1864 		if (idx == 0)
1865 			ctl->page = grab_cache_page(&dir->i_data, pgoff);
1866 		else
1867 			ctl->page = find_lock_page(&dir->i_data, pgoff);
1868 		if (!ctl->page) {
1869 			ctl->index = -1;
1870 			return idx == 0 ? -ENOMEM : 0;
1871 		}
1872 		/* reading/filling the cache are serialized by
1873 		 * i_rwsem, no need to use page lock */
1874 		unlock_page(ctl->page);
1875 		ctl->dentries = kmap(ctl->page);
1876 		if (idx == 0)
1877 			memset(ctl->dentries, 0, PAGE_SIZE);
1878 	}
1879 
1880 	if (req->r_dir_release_cnt == atomic64_read(&ci->i_release_count) &&
1881 	    req->r_dir_ordered_cnt == atomic64_read(&ci->i_ordered_count)) {
1882 		doutc(cl, "dn %p idx %d\n", dn, ctl->index);
1883 		ctl->dentries[idx] = dn;
1884 		ctl->index++;
1885 	} else {
1886 		doutc(cl, "disable readdir cache\n");
1887 		ctl->index = -1;
1888 	}
1889 	return 0;
1890 }
1891 
1892 int ceph_readdir_prepopulate(struct ceph_mds_request *req,
1893 			     struct ceph_mds_session *session)
1894 {
1895 	struct dentry *parent = req->r_dentry;
1896 	struct inode *inode = d_inode(parent);
1897 	struct ceph_inode_info *ci = ceph_inode(inode);
1898 	struct ceph_mds_reply_info_parsed *rinfo = &req->r_reply_info;
1899 	struct ceph_client *cl = session->s_mdsc->fsc->client;
1900 	struct qstr dname;
1901 	struct dentry *dn;
1902 	struct inode *in;
1903 	int err = 0, skipped = 0, ret, i;
1904 	u32 frag = le32_to_cpu(req->r_args.readdir.frag);
1905 	u32 last_hash = 0;
1906 	u32 fpos_offset;
1907 	struct ceph_readdir_cache_control cache_ctl = {};
1908 
1909 	if (test_bit(CEPH_MDS_R_ABORTED, &req->r_req_flags))
1910 		return readdir_prepopulate_inodes_only(req, session);
1911 
1912 	if (rinfo->hash_order) {
1913 		if (req->r_path2) {
1914 			last_hash = ceph_str_hash(ci->i_dir_layout.dl_dir_hash,
1915 						  req->r_path2,
1916 						  strlen(req->r_path2));
1917 			last_hash = ceph_frag_value(last_hash);
1918 		} else if (rinfo->offset_hash) {
1919 			/* mds understands offset_hash */
1920 			WARN_ON_ONCE(req->r_readdir_offset != 2);
1921 			last_hash = le32_to_cpu(req->r_args.readdir.offset_hash);
1922 		}
1923 	}
1924 
1925 	if (rinfo->dir_dir &&
1926 	    le32_to_cpu(rinfo->dir_dir->frag) != frag) {
1927 		doutc(cl, "got new frag %x -> %x\n", frag,
1928 			    le32_to_cpu(rinfo->dir_dir->frag));
1929 		frag = le32_to_cpu(rinfo->dir_dir->frag);
1930 		if (!rinfo->hash_order)
1931 			req->r_readdir_offset = 2;
1932 	}
1933 
1934 	if (le32_to_cpu(rinfo->head->op) == CEPH_MDS_OP_LSSNAP) {
1935 		doutc(cl, "%d items under SNAPDIR dn %p\n",
1936 		      rinfo->dir_nr, parent);
1937 	} else {
1938 		doutc(cl, "%d items under dn %p\n", rinfo->dir_nr, parent);
1939 		if (rinfo->dir_dir)
1940 			ceph_fill_dirfrag(d_inode(parent), rinfo->dir_dir);
1941 
1942 		if (ceph_frag_is_leftmost(frag) &&
1943 		    req->r_readdir_offset == 2 &&
1944 		    !(rinfo->hash_order && last_hash)) {
1945 			/* note dir version at start of readdir so we can
1946 			 * tell if any dentries get dropped */
1947 			req->r_dir_release_cnt =
1948 				atomic64_read(&ci->i_release_count);
1949 			req->r_dir_ordered_cnt =
1950 				atomic64_read(&ci->i_ordered_count);
1951 			req->r_readdir_cache_idx = 0;
1952 		}
1953 	}
1954 
1955 	cache_ctl.index = req->r_readdir_cache_idx;
1956 	fpos_offset = req->r_readdir_offset;
1957 
1958 	/* FIXME: release caps/leases if error occurs */
1959 	for (i = 0; i < rinfo->dir_nr; i++) {
1960 		struct ceph_mds_reply_dir_entry *rde = rinfo->dir_entries + i;
1961 		struct ceph_vino tvino;
1962 
1963 		dname.name = rde->name;
1964 		dname.len = rde->name_len;
1965 		dname.hash = full_name_hash(parent, dname.name, dname.len);
1966 
1967 		tvino.ino = le64_to_cpu(rde->inode.in->ino);
1968 		tvino.snap = le64_to_cpu(rde->inode.in->snapid);
1969 
1970 		if (rinfo->hash_order) {
1971 			u32 hash = ceph_frag_value(rde->raw_hash);
1972 			if (hash != last_hash)
1973 				fpos_offset = 2;
1974 			last_hash = hash;
1975 			rde->offset = ceph_make_fpos(hash, fpos_offset++, true);
1976 		} else {
1977 			rde->offset = ceph_make_fpos(frag, fpos_offset++, false);
1978 		}
1979 
1980 retry_lookup:
1981 		dn = d_lookup(parent, &dname);
1982 		doutc(cl, "d_lookup on parent=%p name=%.*s got %p\n",
1983 		      parent, dname.len, dname.name, dn);
1984 
1985 		if (!dn) {
1986 			dn = d_alloc(parent, &dname);
1987 			doutc(cl, "d_alloc %p '%.*s' = %p\n", parent,
1988 			      dname.len, dname.name, dn);
1989 			if (!dn) {
1990 				doutc(cl, "d_alloc badness\n");
1991 				err = -ENOMEM;
1992 				goto out;
1993 			}
1994 			if (rde->is_nokey) {
1995 				spin_lock(&dn->d_lock);
1996 				dn->d_flags |= DCACHE_NOKEY_NAME;
1997 				spin_unlock(&dn->d_lock);
1998 			}
1999 		} else if (d_really_is_positive(dn) &&
2000 			   (ceph_ino(d_inode(dn)) != tvino.ino ||
2001 			    ceph_snap(d_inode(dn)) != tvino.snap)) {
2002 			struct ceph_dentry_info *di = ceph_dentry(dn);
2003 			doutc(cl, " dn %p points to wrong inode %p\n",
2004 			      dn, d_inode(dn));
2005 
2006 			spin_lock(&dn->d_lock);
2007 			if (di->offset > 0 &&
2008 			    di->lease_shared_gen ==
2009 			    atomic_read(&ci->i_shared_gen)) {
2010 				__ceph_dir_clear_ordered(ci);
2011 				di->offset = 0;
2012 			}
2013 			spin_unlock(&dn->d_lock);
2014 
2015 			d_delete(dn);
2016 			dput(dn);
2017 			goto retry_lookup;
2018 		}
2019 
2020 		/* inode */
2021 		if (d_really_is_positive(dn)) {
2022 			in = d_inode(dn);
2023 		} else {
2024 			in = ceph_get_inode(parent->d_sb, tvino, NULL);
2025 			if (IS_ERR(in)) {
2026 				doutc(cl, "new_inode badness\n");
2027 				d_drop(dn);
2028 				dput(dn);
2029 				err = PTR_ERR(in);
2030 				goto out;
2031 			}
2032 		}
2033 
2034 		ret = ceph_fill_inode(in, NULL, &rde->inode, NULL, session,
2035 				      -1, &req->r_caps_reservation);
2036 		if (ret < 0) {
2037 			pr_err_client(cl, "badness on %p %llx.%llx\n", in,
2038 				      ceph_vinop(in));
2039 			if (d_really_is_negative(dn)) {
2040 				if (in->i_state & I_NEW) {
2041 					ihold(in);
2042 					discard_new_inode(in);
2043 				}
2044 				iput(in);
2045 			}
2046 			d_drop(dn);
2047 			err = ret;
2048 			goto next_item;
2049 		}
2050 		if (in->i_state & I_NEW)
2051 			unlock_new_inode(in);
2052 
2053 		if (d_really_is_negative(dn)) {
2054 			if (ceph_security_xattr_deadlock(in)) {
2055 				doutc(cl, " skip splicing dn %p to inode %p"
2056 				      " (security xattr deadlock)\n", dn, in);
2057 				iput(in);
2058 				skipped++;
2059 				goto next_item;
2060 			}
2061 
2062 			err = splice_dentry(&dn, in);
2063 			if (err < 0)
2064 				goto next_item;
2065 		}
2066 
2067 		ceph_dentry(dn)->offset = rde->offset;
2068 
2069 		update_dentry_lease(d_inode(parent), dn,
2070 				    rde->lease, req->r_session,
2071 				    req->r_request_started);
2072 
2073 		if (err == 0 && skipped == 0 && cache_ctl.index >= 0) {
2074 			ret = fill_readdir_cache(d_inode(parent), dn,
2075 						 &cache_ctl, req);
2076 			if (ret < 0)
2077 				err = ret;
2078 		}
2079 next_item:
2080 		dput(dn);
2081 	}
2082 out:
2083 	if (err == 0 && skipped == 0) {
2084 		set_bit(CEPH_MDS_R_DID_PREPOPULATE, &req->r_req_flags);
2085 		req->r_readdir_cache_idx = cache_ctl.index;
2086 	}
2087 	ceph_readdir_cache_release(&cache_ctl);
2088 	doutc(cl, "done\n");
2089 	return err;
2090 }
2091 
2092 bool ceph_inode_set_size(struct inode *inode, loff_t size)
2093 {
2094 	struct ceph_client *cl = ceph_inode_to_client(inode);
2095 	struct ceph_inode_info *ci = ceph_inode(inode);
2096 	bool ret;
2097 
2098 	spin_lock(&ci->i_ceph_lock);
2099 	doutc(cl, "set_size %p %llu -> %llu\n", inode, i_size_read(inode), size);
2100 	i_size_write(inode, size);
2101 	ceph_fscache_update(inode);
2102 	inode->i_blocks = calc_inode_blocks(size);
2103 
2104 	ret = __ceph_should_report_size(ci);
2105 
2106 	spin_unlock(&ci->i_ceph_lock);
2107 
2108 	return ret;
2109 }
2110 
2111 void ceph_queue_inode_work(struct inode *inode, int work_bit)
2112 {
2113 	struct ceph_fs_client *fsc = ceph_inode_to_fs_client(inode);
2114 	struct ceph_client *cl = fsc->client;
2115 	struct ceph_inode_info *ci = ceph_inode(inode);
2116 	set_bit(work_bit, &ci->i_work_mask);
2117 
2118 	ihold(inode);
2119 	if (queue_work(fsc->inode_wq, &ci->i_work)) {
2120 		doutc(cl, "%p %llx.%llx mask=%lx\n", inode,
2121 		      ceph_vinop(inode), ci->i_work_mask);
2122 	} else {
2123 		doutc(cl, "%p %llx.%llx already queued, mask=%lx\n",
2124 		      inode, ceph_vinop(inode), ci->i_work_mask);
2125 		iput(inode);
2126 	}
2127 }
2128 
2129 static void ceph_do_invalidate_pages(struct inode *inode)
2130 {
2131 	struct ceph_client *cl = ceph_inode_to_client(inode);
2132 	struct ceph_inode_info *ci = ceph_inode(inode);
2133 	u32 orig_gen;
2134 	int check = 0;
2135 
2136 	ceph_fscache_invalidate(inode, false);
2137 
2138 	mutex_lock(&ci->i_truncate_mutex);
2139 
2140 	if (ceph_inode_is_shutdown(inode)) {
2141 		pr_warn_ratelimited_client(cl,
2142 			"%p %llx.%llx is shut down\n", inode,
2143 			ceph_vinop(inode));
2144 		mapping_set_error(inode->i_mapping, -EIO);
2145 		truncate_pagecache(inode, 0);
2146 		mutex_unlock(&ci->i_truncate_mutex);
2147 		goto out;
2148 	}
2149 
2150 	spin_lock(&ci->i_ceph_lock);
2151 	doutc(cl, "%p %llx.%llx gen %d revoking %d\n", inode,
2152 	      ceph_vinop(inode), ci->i_rdcache_gen, ci->i_rdcache_revoking);
2153 	if (ci->i_rdcache_revoking != ci->i_rdcache_gen) {
2154 		if (__ceph_caps_revoking_other(ci, NULL, CEPH_CAP_FILE_CACHE))
2155 			check = 1;
2156 		spin_unlock(&ci->i_ceph_lock);
2157 		mutex_unlock(&ci->i_truncate_mutex);
2158 		goto out;
2159 	}
2160 	orig_gen = ci->i_rdcache_gen;
2161 	spin_unlock(&ci->i_ceph_lock);
2162 
2163 	if (invalidate_inode_pages2(inode->i_mapping) < 0) {
2164 		pr_err_client(cl, "invalidate_inode_pages2 %llx.%llx failed\n",
2165 			      ceph_vinop(inode));
2166 	}
2167 
2168 	spin_lock(&ci->i_ceph_lock);
2169 	if (orig_gen == ci->i_rdcache_gen &&
2170 	    orig_gen == ci->i_rdcache_revoking) {
2171 		doutc(cl, "%p %llx.%llx gen %d successful\n", inode,
2172 		      ceph_vinop(inode), ci->i_rdcache_gen);
2173 		ci->i_rdcache_revoking--;
2174 		check = 1;
2175 	} else {
2176 		doutc(cl, "%p %llx.%llx gen %d raced, now %d revoking %d\n",
2177 		      inode, ceph_vinop(inode), orig_gen, ci->i_rdcache_gen,
2178 		      ci->i_rdcache_revoking);
2179 		if (__ceph_caps_revoking_other(ci, NULL, CEPH_CAP_FILE_CACHE))
2180 			check = 1;
2181 	}
2182 	spin_unlock(&ci->i_ceph_lock);
2183 	mutex_unlock(&ci->i_truncate_mutex);
2184 out:
2185 	if (check)
2186 		ceph_check_caps(ci, 0);
2187 }
2188 
2189 /*
2190  * Make sure any pending truncation is applied before doing anything
2191  * that may depend on it.
2192  */
2193 void __ceph_do_pending_vmtruncate(struct inode *inode)
2194 {
2195 	struct ceph_client *cl = ceph_inode_to_client(inode);
2196 	struct ceph_inode_info *ci = ceph_inode(inode);
2197 	u64 to;
2198 	int wrbuffer_refs, finish = 0;
2199 
2200 	mutex_lock(&ci->i_truncate_mutex);
2201 retry:
2202 	spin_lock(&ci->i_ceph_lock);
2203 	if (ci->i_truncate_pending == 0) {
2204 		doutc(cl, "%p %llx.%llx none pending\n", inode,
2205 		      ceph_vinop(inode));
2206 		spin_unlock(&ci->i_ceph_lock);
2207 		mutex_unlock(&ci->i_truncate_mutex);
2208 		return;
2209 	}
2210 
2211 	/*
2212 	 * make sure any dirty snapped pages are flushed before we
2213 	 * possibly truncate them.. so write AND block!
2214 	 */
2215 	if (ci->i_wrbuffer_ref_head < ci->i_wrbuffer_ref) {
2216 		spin_unlock(&ci->i_ceph_lock);
2217 		doutc(cl, "%p %llx.%llx flushing snaps first\n", inode,
2218 		      ceph_vinop(inode));
2219 		filemap_write_and_wait_range(&inode->i_data, 0,
2220 					     inode->i_sb->s_maxbytes);
2221 		goto retry;
2222 	}
2223 
2224 	/* there should be no reader or writer */
2225 	WARN_ON_ONCE(ci->i_rd_ref || ci->i_wr_ref);
2226 
2227 	to = ci->i_truncate_pagecache_size;
2228 	wrbuffer_refs = ci->i_wrbuffer_ref;
2229 	doutc(cl, "%p %llx.%llx (%d) to %lld\n", inode, ceph_vinop(inode),
2230 	      ci->i_truncate_pending, to);
2231 	spin_unlock(&ci->i_ceph_lock);
2232 
2233 	ceph_fscache_resize(inode, to);
2234 	truncate_pagecache(inode, to);
2235 
2236 	spin_lock(&ci->i_ceph_lock);
2237 	if (to == ci->i_truncate_pagecache_size) {
2238 		ci->i_truncate_pending = 0;
2239 		finish = 1;
2240 	}
2241 	spin_unlock(&ci->i_ceph_lock);
2242 	if (!finish)
2243 		goto retry;
2244 
2245 	mutex_unlock(&ci->i_truncate_mutex);
2246 
2247 	if (wrbuffer_refs == 0)
2248 		ceph_check_caps(ci, 0);
2249 
2250 	wake_up_all(&ci->i_cap_wq);
2251 }
2252 
2253 static void ceph_inode_work(struct work_struct *work)
2254 {
2255 	struct ceph_inode_info *ci = container_of(work, struct ceph_inode_info,
2256 						 i_work);
2257 	struct inode *inode = &ci->netfs.inode;
2258 	struct ceph_client *cl = ceph_inode_to_client(inode);
2259 
2260 	if (test_and_clear_bit(CEPH_I_WORK_WRITEBACK, &ci->i_work_mask)) {
2261 		doutc(cl, "writeback %p %llx.%llx\n", inode, ceph_vinop(inode));
2262 		filemap_fdatawrite(&inode->i_data);
2263 	}
2264 	if (test_and_clear_bit(CEPH_I_WORK_INVALIDATE_PAGES, &ci->i_work_mask))
2265 		ceph_do_invalidate_pages(inode);
2266 
2267 	if (test_and_clear_bit(CEPH_I_WORK_VMTRUNCATE, &ci->i_work_mask))
2268 		__ceph_do_pending_vmtruncate(inode);
2269 
2270 	if (test_and_clear_bit(CEPH_I_WORK_CHECK_CAPS, &ci->i_work_mask))
2271 		ceph_check_caps(ci, 0);
2272 
2273 	if (test_and_clear_bit(CEPH_I_WORK_FLUSH_SNAPS, &ci->i_work_mask))
2274 		ceph_flush_snaps(ci, NULL);
2275 
2276 	iput(inode);
2277 }
2278 
2279 static const char *ceph_encrypted_get_link(struct dentry *dentry,
2280 					   struct inode *inode,
2281 					   struct delayed_call *done)
2282 {
2283 	struct ceph_inode_info *ci = ceph_inode(inode);
2284 
2285 	if (!dentry)
2286 		return ERR_PTR(-ECHILD);
2287 
2288 	return fscrypt_get_symlink(inode, ci->i_symlink, i_size_read(inode),
2289 				   done);
2290 }
2291 
2292 static int ceph_encrypted_symlink_getattr(struct mnt_idmap *idmap,
2293 					  const struct path *path,
2294 					  struct kstat *stat, u32 request_mask,
2295 					  unsigned int query_flags)
2296 {
2297 	int ret;
2298 
2299 	ret = ceph_getattr(idmap, path, stat, request_mask, query_flags);
2300 	if (ret)
2301 		return ret;
2302 	return fscrypt_symlink_getattr(path, stat);
2303 }
2304 
2305 /*
2306  * symlinks
2307  */
2308 static const struct inode_operations ceph_symlink_iops = {
2309 	.get_link = simple_get_link,
2310 	.setattr = ceph_setattr,
2311 	.getattr = ceph_getattr,
2312 	.listxattr = ceph_listxattr,
2313 };
2314 
2315 static const struct inode_operations ceph_encrypted_symlink_iops = {
2316 	.get_link = ceph_encrypted_get_link,
2317 	.setattr = ceph_setattr,
2318 	.getattr = ceph_encrypted_symlink_getattr,
2319 	.listxattr = ceph_listxattr,
2320 };
2321 
2322 /*
2323  * Transfer the encrypted last block to the MDS and the MDS
2324  * will help update it when truncating a smaller size.
2325  *
2326  * We don't support a PAGE_SIZE that is smaller than the
2327  * CEPH_FSCRYPT_BLOCK_SIZE.
2328  */
2329 static int fill_fscrypt_truncate(struct inode *inode,
2330 				 struct ceph_mds_request *req,
2331 				 struct iattr *attr)
2332 {
2333 	struct ceph_client *cl = ceph_inode_to_client(inode);
2334 	struct ceph_inode_info *ci = ceph_inode(inode);
2335 	int boff = attr->ia_size % CEPH_FSCRYPT_BLOCK_SIZE;
2336 	loff_t pos, orig_pos = round_down(attr->ia_size,
2337 					  CEPH_FSCRYPT_BLOCK_SIZE);
2338 	u64 block = orig_pos >> CEPH_FSCRYPT_BLOCK_SHIFT;
2339 	struct ceph_pagelist *pagelist = NULL;
2340 	struct kvec iov = {0};
2341 	struct iov_iter iter;
2342 	struct page *page = NULL;
2343 	struct ceph_fscrypt_truncate_size_header header;
2344 	int retry_op = 0;
2345 	int len = CEPH_FSCRYPT_BLOCK_SIZE;
2346 	loff_t i_size = i_size_read(inode);
2347 	int got, ret, issued;
2348 	u64 objver;
2349 
2350 	ret = __ceph_get_caps(inode, NULL, CEPH_CAP_FILE_RD, 0, -1, &got);
2351 	if (ret < 0)
2352 		return ret;
2353 
2354 	issued = __ceph_caps_issued(ci, NULL);
2355 
2356 	doutc(cl, "size %lld -> %lld got cap refs on %s, issued %s\n",
2357 	      i_size, attr->ia_size, ceph_cap_string(got),
2358 	      ceph_cap_string(issued));
2359 
2360 	/* Try to writeback the dirty pagecaches */
2361 	if (issued & (CEPH_CAP_FILE_BUFFER)) {
2362 		loff_t lend = orig_pos + CEPH_FSCRYPT_BLOCK_SHIFT - 1;
2363 
2364 		ret = filemap_write_and_wait_range(inode->i_mapping,
2365 						   orig_pos, lend);
2366 		if (ret < 0)
2367 			goto out;
2368 	}
2369 
2370 	page = __page_cache_alloc(GFP_KERNEL);
2371 	if (page == NULL) {
2372 		ret = -ENOMEM;
2373 		goto out;
2374 	}
2375 
2376 	pagelist = ceph_pagelist_alloc(GFP_KERNEL);
2377 	if (!pagelist) {
2378 		ret = -ENOMEM;
2379 		goto out;
2380 	}
2381 
2382 	iov.iov_base = kmap_local_page(page);
2383 	iov.iov_len = len;
2384 	iov_iter_kvec(&iter, READ, &iov, 1, len);
2385 
2386 	pos = orig_pos;
2387 	ret = __ceph_sync_read(inode, &pos, &iter, &retry_op, &objver);
2388 	if (ret < 0)
2389 		goto out;
2390 
2391 	/* Insert the header first */
2392 	header.ver = 1;
2393 	header.compat = 1;
2394 	header.change_attr = cpu_to_le64(inode_peek_iversion_raw(inode));
2395 
2396 	/*
2397 	 * Always set the block_size to CEPH_FSCRYPT_BLOCK_SIZE,
2398 	 * because in MDS it may need this to do the truncate.
2399 	 */
2400 	header.block_size = cpu_to_le32(CEPH_FSCRYPT_BLOCK_SIZE);
2401 
2402 	/*
2403 	 * If we hit a hole here, we should just skip filling
2404 	 * the fscrypt for the request, because once the fscrypt
2405 	 * is enabled, the file will be split into many blocks
2406 	 * with the size of CEPH_FSCRYPT_BLOCK_SIZE, if there
2407 	 * has a hole, the hole size should be multiple of block
2408 	 * size.
2409 	 *
2410 	 * If the Rados object doesn't exist, it will be set to 0.
2411 	 */
2412 	if (!objver) {
2413 		doutc(cl, "hit hole, ppos %lld < size %lld\n", pos, i_size);
2414 
2415 		header.data_len = cpu_to_le32(8 + 8 + 4);
2416 		header.file_offset = 0;
2417 		ret = 0;
2418 	} else {
2419 		header.data_len = cpu_to_le32(8 + 8 + 4 + CEPH_FSCRYPT_BLOCK_SIZE);
2420 		header.file_offset = cpu_to_le64(orig_pos);
2421 
2422 		doutc(cl, "encrypt block boff/bsize %d/%lu\n", boff,
2423 		      CEPH_FSCRYPT_BLOCK_SIZE);
2424 
2425 		/* truncate and zero out the extra contents for the last block */
2426 		memset(iov.iov_base + boff, 0, PAGE_SIZE - boff);
2427 
2428 		/* encrypt the last block */
2429 		ret = ceph_fscrypt_encrypt_block_inplace(inode, page,
2430 						    CEPH_FSCRYPT_BLOCK_SIZE,
2431 						    0, block,
2432 						    GFP_KERNEL);
2433 		if (ret)
2434 			goto out;
2435 	}
2436 
2437 	/* Insert the header */
2438 	ret = ceph_pagelist_append(pagelist, &header, sizeof(header));
2439 	if (ret)
2440 		goto out;
2441 
2442 	if (header.block_size) {
2443 		/* Append the last block contents to pagelist */
2444 		ret = ceph_pagelist_append(pagelist, iov.iov_base,
2445 					   CEPH_FSCRYPT_BLOCK_SIZE);
2446 		if (ret)
2447 			goto out;
2448 	}
2449 	req->r_pagelist = pagelist;
2450 out:
2451 	doutc(cl, "%p %llx.%llx size dropping cap refs on %s\n", inode,
2452 	      ceph_vinop(inode), ceph_cap_string(got));
2453 	ceph_put_cap_refs(ci, got);
2454 	if (iov.iov_base)
2455 		kunmap_local(iov.iov_base);
2456 	if (page)
2457 		__free_pages(page, 0);
2458 	if (ret && pagelist)
2459 		ceph_pagelist_release(pagelist);
2460 	return ret;
2461 }
2462 
2463 int __ceph_setattr(struct mnt_idmap *idmap, struct inode *inode,
2464 		   struct iattr *attr, struct ceph_iattr *cia)
2465 {
2466 	struct ceph_inode_info *ci = ceph_inode(inode);
2467 	unsigned int ia_valid = attr->ia_valid;
2468 	struct ceph_mds_request *req;
2469 	struct ceph_mds_client *mdsc = ceph_sb_to_fs_client(inode->i_sb)->mdsc;
2470 	struct ceph_client *cl = ceph_inode_to_client(inode);
2471 	struct ceph_cap_flush *prealloc_cf;
2472 	loff_t isize = i_size_read(inode);
2473 	int issued;
2474 	int release = 0, dirtied = 0;
2475 	int mask = 0;
2476 	int err = 0;
2477 	int inode_dirty_flags = 0;
2478 	bool lock_snap_rwsem = false;
2479 	bool fill_fscrypt;
2480 	int truncate_retry = 20; /* The RMW will take around 50ms */
2481 
2482 retry:
2483 	prealloc_cf = ceph_alloc_cap_flush();
2484 	if (!prealloc_cf)
2485 		return -ENOMEM;
2486 
2487 	req = ceph_mdsc_create_request(mdsc, CEPH_MDS_OP_SETATTR,
2488 				       USE_AUTH_MDS);
2489 	if (IS_ERR(req)) {
2490 		ceph_free_cap_flush(prealloc_cf);
2491 		return PTR_ERR(req);
2492 	}
2493 
2494 	fill_fscrypt = false;
2495 	spin_lock(&ci->i_ceph_lock);
2496 	issued = __ceph_caps_issued(ci, NULL);
2497 
2498 	if (!ci->i_head_snapc &&
2499 	    (issued & (CEPH_CAP_ANY_EXCL | CEPH_CAP_FILE_WR))) {
2500 		lock_snap_rwsem = true;
2501 		if (!down_read_trylock(&mdsc->snap_rwsem)) {
2502 			spin_unlock(&ci->i_ceph_lock);
2503 			down_read(&mdsc->snap_rwsem);
2504 			spin_lock(&ci->i_ceph_lock);
2505 			issued = __ceph_caps_issued(ci, NULL);
2506 		}
2507 	}
2508 
2509 	doutc(cl, "%p %llx.%llx issued %s\n", inode, ceph_vinop(inode),
2510 	      ceph_cap_string(issued));
2511 #if IS_ENABLED(CONFIG_FS_ENCRYPTION)
2512 	if (cia && cia->fscrypt_auth) {
2513 		u32 len = ceph_fscrypt_auth_len(cia->fscrypt_auth);
2514 
2515 		if (len > sizeof(*cia->fscrypt_auth)) {
2516 			err = -EINVAL;
2517 			spin_unlock(&ci->i_ceph_lock);
2518 			goto out;
2519 		}
2520 
2521 		doutc(cl, "%p %llx.%llx fscrypt_auth len %u to %u)\n", inode,
2522 		      ceph_vinop(inode), ci->fscrypt_auth_len, len);
2523 
2524 		/* It should never be re-set once set */
2525 		WARN_ON_ONCE(ci->fscrypt_auth);
2526 
2527 		if (issued & CEPH_CAP_AUTH_EXCL) {
2528 			dirtied |= CEPH_CAP_AUTH_EXCL;
2529 			kfree(ci->fscrypt_auth);
2530 			ci->fscrypt_auth = (u8 *)cia->fscrypt_auth;
2531 			ci->fscrypt_auth_len = len;
2532 		} else if ((issued & CEPH_CAP_AUTH_SHARED) == 0 ||
2533 			   ci->fscrypt_auth_len != len ||
2534 			   memcmp(ci->fscrypt_auth, cia->fscrypt_auth, len)) {
2535 			req->r_fscrypt_auth = cia->fscrypt_auth;
2536 			mask |= CEPH_SETATTR_FSCRYPT_AUTH;
2537 			release |= CEPH_CAP_AUTH_SHARED;
2538 		}
2539 		cia->fscrypt_auth = NULL;
2540 	}
2541 #else
2542 	if (cia && cia->fscrypt_auth) {
2543 		err = -EINVAL;
2544 		spin_unlock(&ci->i_ceph_lock);
2545 		goto out;
2546 	}
2547 #endif /* CONFIG_FS_ENCRYPTION */
2548 
2549 	if (ia_valid & ATTR_UID) {
2550 		kuid_t fsuid = from_vfsuid(idmap, i_user_ns(inode), attr->ia_vfsuid);
2551 
2552 		doutc(cl, "%p %llx.%llx uid %d -> %d\n", inode,
2553 		      ceph_vinop(inode),
2554 		      from_kuid(&init_user_ns, inode->i_uid),
2555 		      from_kuid(&init_user_ns, attr->ia_uid));
2556 		if (issued & CEPH_CAP_AUTH_EXCL) {
2557 			inode->i_uid = fsuid;
2558 			dirtied |= CEPH_CAP_AUTH_EXCL;
2559 		} else if ((issued & CEPH_CAP_AUTH_SHARED) == 0 ||
2560 			   !uid_eq(fsuid, inode->i_uid)) {
2561 			req->r_args.setattr.uid = cpu_to_le32(
2562 				from_kuid(&init_user_ns, fsuid));
2563 			mask |= CEPH_SETATTR_UID;
2564 			release |= CEPH_CAP_AUTH_SHARED;
2565 		}
2566 	}
2567 	if (ia_valid & ATTR_GID) {
2568 		kgid_t fsgid = from_vfsgid(idmap, i_user_ns(inode), attr->ia_vfsgid);
2569 
2570 		doutc(cl, "%p %llx.%llx gid %d -> %d\n", inode,
2571 		      ceph_vinop(inode),
2572 		      from_kgid(&init_user_ns, inode->i_gid),
2573 		      from_kgid(&init_user_ns, attr->ia_gid));
2574 		if (issued & CEPH_CAP_AUTH_EXCL) {
2575 			inode->i_gid = fsgid;
2576 			dirtied |= CEPH_CAP_AUTH_EXCL;
2577 		} else if ((issued & CEPH_CAP_AUTH_SHARED) == 0 ||
2578 			   !gid_eq(fsgid, inode->i_gid)) {
2579 			req->r_args.setattr.gid = cpu_to_le32(
2580 				from_kgid(&init_user_ns, fsgid));
2581 			mask |= CEPH_SETATTR_GID;
2582 			release |= CEPH_CAP_AUTH_SHARED;
2583 		}
2584 	}
2585 	if (ia_valid & ATTR_MODE) {
2586 		doutc(cl, "%p %llx.%llx mode 0%o -> 0%o\n", inode,
2587 		      ceph_vinop(inode), inode->i_mode, attr->ia_mode);
2588 		if (issued & CEPH_CAP_AUTH_EXCL) {
2589 			inode->i_mode = attr->ia_mode;
2590 			dirtied |= CEPH_CAP_AUTH_EXCL;
2591 		} else if ((issued & CEPH_CAP_AUTH_SHARED) == 0 ||
2592 			   attr->ia_mode != inode->i_mode) {
2593 			inode->i_mode = attr->ia_mode;
2594 			req->r_args.setattr.mode = cpu_to_le32(attr->ia_mode);
2595 			mask |= CEPH_SETATTR_MODE;
2596 			release |= CEPH_CAP_AUTH_SHARED;
2597 		}
2598 	}
2599 
2600 	if (ia_valid & ATTR_ATIME) {
2601 		struct timespec64 atime = inode_get_atime(inode);
2602 
2603 		doutc(cl, "%p %llx.%llx atime %lld.%09ld -> %lld.%09ld\n",
2604 		      inode, ceph_vinop(inode),
2605 		      atime.tv_sec, atime.tv_nsec,
2606 		      attr->ia_atime.tv_sec, attr->ia_atime.tv_nsec);
2607 		if (issued & CEPH_CAP_FILE_EXCL) {
2608 			ci->i_time_warp_seq++;
2609 			inode_set_atime_to_ts(inode, attr->ia_atime);
2610 			dirtied |= CEPH_CAP_FILE_EXCL;
2611 		} else if ((issued & CEPH_CAP_FILE_WR) &&
2612 			   timespec64_compare(&atime,
2613 					      &attr->ia_atime) < 0) {
2614 			inode_set_atime_to_ts(inode, attr->ia_atime);
2615 			dirtied |= CEPH_CAP_FILE_WR;
2616 		} else if ((issued & CEPH_CAP_FILE_SHARED) == 0 ||
2617 			   !timespec64_equal(&atime, &attr->ia_atime)) {
2618 			ceph_encode_timespec64(&req->r_args.setattr.atime,
2619 					       &attr->ia_atime);
2620 			mask |= CEPH_SETATTR_ATIME;
2621 			release |= CEPH_CAP_FILE_SHARED |
2622 				   CEPH_CAP_FILE_RD | CEPH_CAP_FILE_WR;
2623 		}
2624 	}
2625 	if (ia_valid & ATTR_SIZE) {
2626 		doutc(cl, "%p %llx.%llx size %lld -> %lld\n", inode,
2627 		      ceph_vinop(inode), isize, attr->ia_size);
2628 		/*
2629 		 * Only when the new size is smaller and not aligned to
2630 		 * CEPH_FSCRYPT_BLOCK_SIZE will the RMW is needed.
2631 		 */
2632 		if (IS_ENCRYPTED(inode) && attr->ia_size < isize &&
2633 		    (attr->ia_size % CEPH_FSCRYPT_BLOCK_SIZE)) {
2634 			mask |= CEPH_SETATTR_SIZE;
2635 			release |= CEPH_CAP_FILE_SHARED | CEPH_CAP_FILE_EXCL |
2636 				   CEPH_CAP_FILE_RD | CEPH_CAP_FILE_WR;
2637 			set_bit(CEPH_MDS_R_FSCRYPT_FILE, &req->r_req_flags);
2638 			mask |= CEPH_SETATTR_FSCRYPT_FILE;
2639 			req->r_args.setattr.size =
2640 				cpu_to_le64(round_up(attr->ia_size,
2641 						     CEPH_FSCRYPT_BLOCK_SIZE));
2642 			req->r_args.setattr.old_size =
2643 				cpu_to_le64(round_up(isize,
2644 						     CEPH_FSCRYPT_BLOCK_SIZE));
2645 			req->r_fscrypt_file = attr->ia_size;
2646 			fill_fscrypt = true;
2647 		} else if ((issued & CEPH_CAP_FILE_EXCL) && attr->ia_size >= isize) {
2648 			if (attr->ia_size > isize) {
2649 				i_size_write(inode, attr->ia_size);
2650 				inode->i_blocks = calc_inode_blocks(attr->ia_size);
2651 				ci->i_reported_size = attr->ia_size;
2652 				dirtied |= CEPH_CAP_FILE_EXCL;
2653 				ia_valid |= ATTR_MTIME;
2654 			}
2655 		} else if ((issued & CEPH_CAP_FILE_SHARED) == 0 ||
2656 			   attr->ia_size != isize) {
2657 			mask |= CEPH_SETATTR_SIZE;
2658 			release |= CEPH_CAP_FILE_SHARED | CEPH_CAP_FILE_EXCL |
2659 				   CEPH_CAP_FILE_RD | CEPH_CAP_FILE_WR;
2660 			if (IS_ENCRYPTED(inode) && attr->ia_size) {
2661 				set_bit(CEPH_MDS_R_FSCRYPT_FILE, &req->r_req_flags);
2662 				mask |= CEPH_SETATTR_FSCRYPT_FILE;
2663 				req->r_args.setattr.size =
2664 					cpu_to_le64(round_up(attr->ia_size,
2665 							     CEPH_FSCRYPT_BLOCK_SIZE));
2666 				req->r_args.setattr.old_size =
2667 					cpu_to_le64(round_up(isize,
2668 							     CEPH_FSCRYPT_BLOCK_SIZE));
2669 				req->r_fscrypt_file = attr->ia_size;
2670 			} else {
2671 				req->r_args.setattr.size = cpu_to_le64(attr->ia_size);
2672 				req->r_args.setattr.old_size = cpu_to_le64(isize);
2673 				req->r_fscrypt_file = 0;
2674 			}
2675 		}
2676 	}
2677 	if (ia_valid & ATTR_MTIME) {
2678 		struct timespec64 mtime = inode_get_mtime(inode);
2679 
2680 		doutc(cl, "%p %llx.%llx mtime %lld.%09ld -> %lld.%09ld\n",
2681 		      inode, ceph_vinop(inode),
2682 		      mtime.tv_sec, mtime.tv_nsec,
2683 		      attr->ia_mtime.tv_sec, attr->ia_mtime.tv_nsec);
2684 		if (issued & CEPH_CAP_FILE_EXCL) {
2685 			ci->i_time_warp_seq++;
2686 			inode_set_mtime_to_ts(inode, attr->ia_mtime);
2687 			dirtied |= CEPH_CAP_FILE_EXCL;
2688 		} else if ((issued & CEPH_CAP_FILE_WR) &&
2689 			   timespec64_compare(&mtime, &attr->ia_mtime) < 0) {
2690 			inode_set_mtime_to_ts(inode, attr->ia_mtime);
2691 			dirtied |= CEPH_CAP_FILE_WR;
2692 		} else if ((issued & CEPH_CAP_FILE_SHARED) == 0 ||
2693 			   !timespec64_equal(&mtime, &attr->ia_mtime)) {
2694 			ceph_encode_timespec64(&req->r_args.setattr.mtime,
2695 					       &attr->ia_mtime);
2696 			mask |= CEPH_SETATTR_MTIME;
2697 			release |= CEPH_CAP_FILE_SHARED |
2698 				   CEPH_CAP_FILE_RD | CEPH_CAP_FILE_WR;
2699 		}
2700 	}
2701 
2702 	/* these do nothing */
2703 	if (ia_valid & ATTR_CTIME) {
2704 		bool only = (ia_valid & (ATTR_SIZE|ATTR_MTIME|ATTR_ATIME|
2705 					 ATTR_MODE|ATTR_UID|ATTR_GID)) == 0;
2706 		doutc(cl, "%p %llx.%llx ctime %lld.%09ld -> %lld.%09ld (%s)\n",
2707 		      inode, ceph_vinop(inode),
2708 		      inode_get_ctime_sec(inode),
2709 		      inode_get_ctime_nsec(inode),
2710 		      attr->ia_ctime.tv_sec, attr->ia_ctime.tv_nsec,
2711 		      only ? "ctime only" : "ignored");
2712 		if (only) {
2713 			/*
2714 			 * if kernel wants to dirty ctime but nothing else,
2715 			 * we need to choose a cap to dirty under, or do
2716 			 * a almost-no-op setattr
2717 			 */
2718 			if (issued & CEPH_CAP_AUTH_EXCL)
2719 				dirtied |= CEPH_CAP_AUTH_EXCL;
2720 			else if (issued & CEPH_CAP_FILE_EXCL)
2721 				dirtied |= CEPH_CAP_FILE_EXCL;
2722 			else if (issued & CEPH_CAP_XATTR_EXCL)
2723 				dirtied |= CEPH_CAP_XATTR_EXCL;
2724 			else
2725 				mask |= CEPH_SETATTR_CTIME;
2726 		}
2727 	}
2728 	if (ia_valid & ATTR_FILE)
2729 		doutc(cl, "%p %llx.%llx ATTR_FILE ... hrm!\n", inode,
2730 		      ceph_vinop(inode));
2731 
2732 	if (dirtied) {
2733 		inode_dirty_flags = __ceph_mark_dirty_caps(ci, dirtied,
2734 							   &prealloc_cf);
2735 		inode_set_ctime_to_ts(inode, attr->ia_ctime);
2736 		inode_inc_iversion_raw(inode);
2737 	}
2738 
2739 	release &= issued;
2740 	spin_unlock(&ci->i_ceph_lock);
2741 	if (lock_snap_rwsem) {
2742 		up_read(&mdsc->snap_rwsem);
2743 		lock_snap_rwsem = false;
2744 	}
2745 
2746 	if (inode_dirty_flags)
2747 		__mark_inode_dirty(inode, inode_dirty_flags);
2748 
2749 	if (mask) {
2750 		req->r_inode = inode;
2751 		ihold(inode);
2752 		req->r_inode_drop = release;
2753 		req->r_args.setattr.mask = cpu_to_le32(mask);
2754 		req->r_num_caps = 1;
2755 		req->r_stamp = attr->ia_ctime;
2756 		if (fill_fscrypt) {
2757 			err = fill_fscrypt_truncate(inode, req, attr);
2758 			if (err)
2759 				goto out;
2760 		}
2761 
2762 		/*
2763 		 * The truncate request will return -EAGAIN when the
2764 		 * last block has been updated just before the MDS
2765 		 * successfully gets the xlock for the FILE lock. To
2766 		 * avoid corrupting the file contents we need to retry
2767 		 * it.
2768 		 */
2769 		err = ceph_mdsc_do_request(mdsc, NULL, req);
2770 		if (err == -EAGAIN && truncate_retry--) {
2771 			doutc(cl, "%p %llx.%llx result=%d (%s locally, %d remote), retry it!\n",
2772 			      inode, ceph_vinop(inode), err,
2773 			      ceph_cap_string(dirtied), mask);
2774 			ceph_mdsc_put_request(req);
2775 			ceph_free_cap_flush(prealloc_cf);
2776 			goto retry;
2777 		}
2778 	}
2779 out:
2780 	doutc(cl, "%p %llx.%llx result=%d (%s locally, %d remote)\n", inode,
2781 	      ceph_vinop(inode), err, ceph_cap_string(dirtied), mask);
2782 
2783 	ceph_mdsc_put_request(req);
2784 	ceph_free_cap_flush(prealloc_cf);
2785 
2786 	if (err >= 0 && (mask & CEPH_SETATTR_SIZE))
2787 		__ceph_do_pending_vmtruncate(inode);
2788 
2789 	return err;
2790 }
2791 
2792 /*
2793  * setattr
2794  */
2795 int ceph_setattr(struct mnt_idmap *idmap, struct dentry *dentry,
2796 		 struct iattr *attr)
2797 {
2798 	struct inode *inode = d_inode(dentry);
2799 	struct ceph_fs_client *fsc = ceph_inode_to_fs_client(inode);
2800 	int err;
2801 
2802 	if (ceph_snap(inode) != CEPH_NOSNAP)
2803 		return -EROFS;
2804 
2805 	if (ceph_inode_is_shutdown(inode))
2806 		return -ESTALE;
2807 
2808 	err = fscrypt_prepare_setattr(dentry, attr);
2809 	if (err)
2810 		return err;
2811 
2812 	err = setattr_prepare(idmap, dentry, attr);
2813 	if (err != 0)
2814 		return err;
2815 
2816 	if ((attr->ia_valid & ATTR_SIZE) &&
2817 	    attr->ia_size > max(i_size_read(inode), fsc->max_file_size))
2818 		return -EFBIG;
2819 
2820 	if ((attr->ia_valid & ATTR_SIZE) &&
2821 	    ceph_quota_is_max_bytes_exceeded(inode, attr->ia_size))
2822 		return -EDQUOT;
2823 
2824 	err = __ceph_setattr(idmap, inode, attr, NULL);
2825 
2826 	if (err >= 0 && (attr->ia_valid & ATTR_MODE))
2827 		err = posix_acl_chmod(idmap, dentry, attr->ia_mode);
2828 
2829 	return err;
2830 }
2831 
2832 int ceph_try_to_choose_auth_mds(struct inode *inode, int mask)
2833 {
2834 	int issued = ceph_caps_issued(ceph_inode(inode));
2835 
2836 	/*
2837 	 * If any 'x' caps is issued we can just choose the auth MDS
2838 	 * instead of the random replica MDSes. Because only when the
2839 	 * Locker is in LOCK_EXEC state will the loner client could
2840 	 * get the 'x' caps. And if we send the getattr requests to
2841 	 * any replica MDS it must auth pin and tries to rdlock from
2842 	 * the auth MDS, and then the auth MDS need to do the Locker
2843 	 * state transition to LOCK_SYNC. And after that the lock state
2844 	 * will change back.
2845 	 *
2846 	 * This cost much when doing the Locker state transition and
2847 	 * usually will need to revoke caps from clients.
2848 	 *
2849 	 * And for the 'Xs' caps for getxattr we will also choose the
2850 	 * auth MDS, because the MDS side code is buggy due to setxattr
2851 	 * won't notify the replica MDSes when the values changed and
2852 	 * the replica MDS will return the old values. Though we will
2853 	 * fix it in MDS code, but this still makes sense for old ceph.
2854 	 */
2855 	if (((mask & CEPH_CAP_ANY_SHARED) && (issued & CEPH_CAP_ANY_EXCL))
2856 	    || (mask & (CEPH_STAT_RSTAT | CEPH_STAT_CAP_XATTR)))
2857 		return USE_AUTH_MDS;
2858 	else
2859 		return USE_ANY_MDS;
2860 }
2861 
2862 /*
2863  * Verify that we have a lease on the given mask.  If not,
2864  * do a getattr against an mds.
2865  */
2866 int __ceph_do_getattr(struct inode *inode, struct page *locked_page,
2867 		      int mask, bool force)
2868 {
2869 	struct ceph_fs_client *fsc = ceph_sb_to_fs_client(inode->i_sb);
2870 	struct ceph_client *cl = fsc->client;
2871 	struct ceph_mds_client *mdsc = fsc->mdsc;
2872 	struct ceph_mds_request *req;
2873 	int mode;
2874 	int err;
2875 
2876 	if (ceph_snap(inode) == CEPH_SNAPDIR) {
2877 		doutc(cl, "inode %p %llx.%llx SNAPDIR\n", inode,
2878 		      ceph_vinop(inode));
2879 		return 0;
2880 	}
2881 
2882 	doutc(cl, "inode %p %llx.%llx mask %s mode 0%o\n", inode,
2883 	      ceph_vinop(inode), ceph_cap_string(mask), inode->i_mode);
2884 	if (!force && ceph_caps_issued_mask_metric(ceph_inode(inode), mask, 1))
2885 			return 0;
2886 
2887 	mode = ceph_try_to_choose_auth_mds(inode, mask);
2888 	req = ceph_mdsc_create_request(mdsc, CEPH_MDS_OP_GETATTR, mode);
2889 	if (IS_ERR(req))
2890 		return PTR_ERR(req);
2891 	req->r_inode = inode;
2892 	ihold(inode);
2893 	req->r_num_caps = 1;
2894 	req->r_args.getattr.mask = cpu_to_le32(mask);
2895 	req->r_locked_page = locked_page;
2896 	err = ceph_mdsc_do_request(mdsc, NULL, req);
2897 	if (locked_page && err == 0) {
2898 		u64 inline_version = req->r_reply_info.targeti.inline_version;
2899 		if (inline_version == 0) {
2900 			/* the reply is supposed to contain inline data */
2901 			err = -EINVAL;
2902 		} else if (inline_version == CEPH_INLINE_NONE ||
2903 			   inline_version == 1) {
2904 			err = -ENODATA;
2905 		} else {
2906 			err = req->r_reply_info.targeti.inline_len;
2907 		}
2908 	}
2909 	ceph_mdsc_put_request(req);
2910 	doutc(cl, "result=%d\n", err);
2911 	return err;
2912 }
2913 
2914 int ceph_do_getvxattr(struct inode *inode, const char *name, void *value,
2915 		      size_t size)
2916 {
2917 	struct ceph_fs_client *fsc = ceph_sb_to_fs_client(inode->i_sb);
2918 	struct ceph_client *cl = fsc->client;
2919 	struct ceph_mds_client *mdsc = fsc->mdsc;
2920 	struct ceph_mds_request *req;
2921 	int mode = USE_AUTH_MDS;
2922 	int err;
2923 	char *xattr_value;
2924 	size_t xattr_value_len;
2925 
2926 	req = ceph_mdsc_create_request(mdsc, CEPH_MDS_OP_GETVXATTR, mode);
2927 	if (IS_ERR(req)) {
2928 		err = -ENOMEM;
2929 		goto out;
2930 	}
2931 
2932 	req->r_feature_needed = CEPHFS_FEATURE_OP_GETVXATTR;
2933 	req->r_path2 = kstrdup(name, GFP_NOFS);
2934 	if (!req->r_path2) {
2935 		err = -ENOMEM;
2936 		goto put;
2937 	}
2938 
2939 	ihold(inode);
2940 	req->r_inode = inode;
2941 	err = ceph_mdsc_do_request(mdsc, NULL, req);
2942 	if (err < 0)
2943 		goto put;
2944 
2945 	xattr_value = req->r_reply_info.xattr_info.xattr_value;
2946 	xattr_value_len = req->r_reply_info.xattr_info.xattr_value_len;
2947 
2948 	doutc(cl, "xattr_value_len:%zu, size:%zu\n", xattr_value_len, size);
2949 
2950 	err = (int)xattr_value_len;
2951 	if (size == 0)
2952 		goto put;
2953 
2954 	if (xattr_value_len > size) {
2955 		err = -ERANGE;
2956 		goto put;
2957 	}
2958 
2959 	memcpy(value, xattr_value, xattr_value_len);
2960 put:
2961 	ceph_mdsc_put_request(req);
2962 out:
2963 	doutc(cl, "result=%d\n", err);
2964 	return err;
2965 }
2966 
2967 
2968 /*
2969  * Check inode permissions.  We verify we have a valid value for
2970  * the AUTH cap, then call the generic handler.
2971  */
2972 int ceph_permission(struct mnt_idmap *idmap, struct inode *inode,
2973 		    int mask)
2974 {
2975 	int err;
2976 
2977 	if (mask & MAY_NOT_BLOCK)
2978 		return -ECHILD;
2979 
2980 	err = ceph_do_getattr(inode, CEPH_CAP_AUTH_SHARED, false);
2981 
2982 	if (!err)
2983 		err = generic_permission(idmap, inode, mask);
2984 	return err;
2985 }
2986 
2987 /* Craft a mask of needed caps given a set of requested statx attrs. */
2988 static int statx_to_caps(u32 want, umode_t mode)
2989 {
2990 	int mask = 0;
2991 
2992 	if (want & (STATX_MODE|STATX_UID|STATX_GID|STATX_CTIME|STATX_BTIME|STATX_CHANGE_COOKIE))
2993 		mask |= CEPH_CAP_AUTH_SHARED;
2994 
2995 	if (want & (STATX_NLINK|STATX_CTIME|STATX_CHANGE_COOKIE)) {
2996 		/*
2997 		 * The link count for directories depends on inode->i_subdirs,
2998 		 * and that is only updated when Fs caps are held.
2999 		 */
3000 		if (S_ISDIR(mode))
3001 			mask |= CEPH_CAP_FILE_SHARED;
3002 		else
3003 			mask |= CEPH_CAP_LINK_SHARED;
3004 	}
3005 
3006 	if (want & (STATX_ATIME|STATX_MTIME|STATX_CTIME|STATX_SIZE|STATX_BLOCKS|STATX_CHANGE_COOKIE))
3007 		mask |= CEPH_CAP_FILE_SHARED;
3008 
3009 	if (want & (STATX_CTIME|STATX_CHANGE_COOKIE))
3010 		mask |= CEPH_CAP_XATTR_SHARED;
3011 
3012 	return mask;
3013 }
3014 
3015 /*
3016  * Get all the attributes. If we have sufficient caps for the requested attrs,
3017  * then we can avoid talking to the MDS at all.
3018  */
3019 int ceph_getattr(struct mnt_idmap *idmap, const struct path *path,
3020 		 struct kstat *stat, u32 request_mask, unsigned int flags)
3021 {
3022 	struct inode *inode = d_inode(path->dentry);
3023 	struct super_block *sb = inode->i_sb;
3024 	struct ceph_inode_info *ci = ceph_inode(inode);
3025 	u32 valid_mask = STATX_BASIC_STATS;
3026 	int err = 0;
3027 
3028 	if (ceph_inode_is_shutdown(inode))
3029 		return -ESTALE;
3030 
3031 	/* Skip the getattr altogether if we're asked not to sync */
3032 	if ((flags & AT_STATX_SYNC_TYPE) != AT_STATX_DONT_SYNC) {
3033 		err = ceph_do_getattr(inode,
3034 				statx_to_caps(request_mask, inode->i_mode),
3035 				flags & AT_STATX_FORCE_SYNC);
3036 		if (err)
3037 			return err;
3038 	}
3039 
3040 	generic_fillattr(idmap, request_mask, inode, stat);
3041 	stat->ino = ceph_present_inode(inode);
3042 
3043 	/*
3044 	 * btime on newly-allocated inodes is 0, so if this is still set to
3045 	 * that, then assume that it's not valid.
3046 	 */
3047 	if (ci->i_btime.tv_sec || ci->i_btime.tv_nsec) {
3048 		stat->btime = ci->i_btime;
3049 		valid_mask |= STATX_BTIME;
3050 	}
3051 
3052 	if (request_mask & STATX_CHANGE_COOKIE) {
3053 		stat->change_cookie = inode_peek_iversion_raw(inode);
3054 		valid_mask |= STATX_CHANGE_COOKIE;
3055 	}
3056 
3057 	if (ceph_snap(inode) == CEPH_NOSNAP)
3058 		stat->dev = sb->s_dev;
3059 	else
3060 		stat->dev = ci->i_snapid_map ? ci->i_snapid_map->dev : 0;
3061 
3062 	if (S_ISDIR(inode->i_mode)) {
3063 		if (ceph_test_mount_opt(ceph_sb_to_fs_client(sb), RBYTES)) {
3064 			stat->size = ci->i_rbytes;
3065 		} else if (ceph_snap(inode) == CEPH_SNAPDIR) {
3066 			struct ceph_inode_info *pci;
3067 			struct ceph_snap_realm *realm;
3068 			struct inode *parent;
3069 
3070 			parent = ceph_lookup_inode(sb, ceph_ino(inode));
3071 			if (IS_ERR(parent))
3072 				return PTR_ERR(parent);
3073 
3074 			pci = ceph_inode(parent);
3075 			spin_lock(&pci->i_ceph_lock);
3076 			realm = pci->i_snap_realm;
3077 			if (realm)
3078 				stat->size = realm->num_snaps;
3079 			else
3080 				stat->size = 0;
3081 			spin_unlock(&pci->i_ceph_lock);
3082 			iput(parent);
3083 		} else {
3084 			stat->size = ci->i_files + ci->i_subdirs;
3085 		}
3086 		stat->blocks = 0;
3087 		stat->blksize = 65536;
3088 		/*
3089 		 * Some applications rely on the number of st_nlink
3090 		 * value on directories to be either 0 (if unlinked)
3091 		 * or 2 + number of subdirectories.
3092 		 */
3093 		if (stat->nlink == 1)
3094 			/* '.' + '..' + subdirs */
3095 			stat->nlink = 1 + 1 + ci->i_subdirs;
3096 	}
3097 
3098 	stat->attributes |= STATX_ATTR_CHANGE_MONOTONIC;
3099 	if (IS_ENCRYPTED(inode))
3100 		stat->attributes |= STATX_ATTR_ENCRYPTED;
3101 	stat->attributes_mask |= (STATX_ATTR_CHANGE_MONOTONIC |
3102 				  STATX_ATTR_ENCRYPTED);
3103 
3104 	stat->result_mask = request_mask & valid_mask;
3105 	return err;
3106 }
3107 
3108 void ceph_inode_shutdown(struct inode *inode)
3109 {
3110 	struct ceph_inode_info *ci = ceph_inode(inode);
3111 	struct rb_node *p;
3112 	int iputs = 0;
3113 	bool invalidate = false;
3114 
3115 	spin_lock(&ci->i_ceph_lock);
3116 	ci->i_ceph_flags |= CEPH_I_SHUTDOWN;
3117 	p = rb_first(&ci->i_caps);
3118 	while (p) {
3119 		struct ceph_cap *cap = rb_entry(p, struct ceph_cap, ci_node);
3120 
3121 		p = rb_next(p);
3122 		iputs += ceph_purge_inode_cap(inode, cap, &invalidate);
3123 	}
3124 	spin_unlock(&ci->i_ceph_lock);
3125 
3126 	if (invalidate)
3127 		ceph_queue_invalidate(inode);
3128 	while (iputs--)
3129 		iput(inode);
3130 }
3131