xref: /linux/fs/ceph/inode.c (revision 56ef27e3abe6d6453b1f4f6127041f3a65d7cbc9)
1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/ceph/ceph_debug.h>
3 
4 #include <linux/module.h>
5 #include <linux/fs.h>
6 #include <linux/slab.h>
7 #include <linux/string.h>
8 #include <linux/uaccess.h>
9 #include <linux/kernel.h>
10 #include <linux/writeback.h>
11 #include <linux/vmalloc.h>
12 #include <linux/xattr.h>
13 #include <linux/posix_acl.h>
14 #include <linux/random.h>
15 #include <linux/sort.h>
16 #include <linux/iversion.h>
17 #include <linux/fscrypt.h>
18 
19 #include "super.h"
20 #include "mds_client.h"
21 #include "cache.h"
22 #include "crypto.h"
23 #include <linux/ceph/decode.h>
24 
25 /*
26  * Ceph inode operations
27  *
28  * Implement basic inode helpers (get, alloc) and inode ops (getattr,
29  * setattr, etc.), xattr helpers, and helpers for assimilating
30  * metadata returned by the MDS into our cache.
31  *
32  * Also define helpers for doing asynchronous writeback, invalidation,
33  * and truncation for the benefit of those who can't afford to block
34  * (typically because they are in the message handler path).
35  */
36 
37 static const struct inode_operations ceph_symlink_iops;
38 static const struct inode_operations ceph_encrypted_symlink_iops;
39 
40 static void ceph_inode_work(struct work_struct *work);
41 
42 /*
43  * find or create an inode, given the ceph ino number
44  */
45 static int ceph_set_ino_cb(struct inode *inode, void *data)
46 {
47 	struct ceph_inode_info *ci = ceph_inode(inode);
48 	struct ceph_mds_client *mdsc = ceph_sb_to_mdsc(inode->i_sb);
49 
50 	ci->i_vino = *(struct ceph_vino *)data;
51 	inode->i_ino = ceph_vino_to_ino_t(ci->i_vino);
52 	inode_set_iversion_raw(inode, 0);
53 	percpu_counter_inc(&mdsc->metric.total_inodes);
54 
55 	return 0;
56 }
57 
58 /**
59  * ceph_new_inode - allocate a new inode in advance of an expected create
60  * @dir: parent directory for new inode
61  * @dentry: dentry that may eventually point to new inode
62  * @mode: mode of new inode
63  * @as_ctx: pointer to inherited security context
64  *
65  * Allocate a new inode in advance of an operation to create a new inode.
66  * This allocates the inode and sets up the acl_sec_ctx with appropriate
67  * info for the new inode.
68  *
69  * Returns a pointer to the new inode or an ERR_PTR.
70  */
71 struct inode *ceph_new_inode(struct inode *dir, struct dentry *dentry,
72 			     umode_t *mode, struct ceph_acl_sec_ctx *as_ctx)
73 {
74 	int err;
75 	struct inode *inode;
76 
77 	inode = new_inode(dir->i_sb);
78 	if (!inode)
79 		return ERR_PTR(-ENOMEM);
80 
81 	inode->i_blkbits = CEPH_FSCRYPT_BLOCK_SHIFT;
82 
83 	if (!S_ISLNK(*mode)) {
84 		err = ceph_pre_init_acls(dir, mode, as_ctx);
85 		if (err < 0)
86 			goto out_err;
87 	}
88 
89 	inode->i_state = 0;
90 	inode->i_mode = *mode;
91 
92 	err = ceph_security_init_secctx(dentry, *mode, as_ctx);
93 	if (err < 0)
94 		goto out_err;
95 
96 	/*
97 	 * We'll skip setting fscrypt context for snapshots, leaving that for
98 	 * the handle_reply().
99 	 */
100 	if (ceph_snap(dir) != CEPH_SNAPDIR) {
101 		err = ceph_fscrypt_prepare_context(dir, inode, as_ctx);
102 		if (err)
103 			goto out_err;
104 	}
105 
106 	return inode;
107 out_err:
108 	iput(inode);
109 	return ERR_PTR(err);
110 }
111 
112 void ceph_as_ctx_to_req(struct ceph_mds_request *req,
113 			struct ceph_acl_sec_ctx *as_ctx)
114 {
115 	if (as_ctx->pagelist) {
116 		req->r_pagelist = as_ctx->pagelist;
117 		as_ctx->pagelist = NULL;
118 	}
119 	ceph_fscrypt_as_ctx_to_req(req, as_ctx);
120 }
121 
122 /**
123  * ceph_get_inode - find or create/hash a new inode
124  * @sb: superblock to search and allocate in
125  * @vino: vino to search for
126  * @newino: optional new inode to insert if one isn't found (may be NULL)
127  *
128  * Search for or insert a new inode into the hash for the given vino, and
129  * return a reference to it. If new is non-NULL, its reference is consumed.
130  */
131 struct inode *ceph_get_inode(struct super_block *sb, struct ceph_vino vino,
132 			     struct inode *newino)
133 {
134 	struct ceph_mds_client *mdsc = ceph_sb_to_mdsc(sb);
135 	struct ceph_client *cl = mdsc->fsc->client;
136 	struct inode *inode;
137 
138 	if (ceph_vino_is_reserved(vino))
139 		return ERR_PTR(-EREMOTEIO);
140 
141 	if (newino) {
142 		inode = inode_insert5(newino, (unsigned long)vino.ino,
143 				      ceph_ino_compare, ceph_set_ino_cb, &vino);
144 		if (inode != newino)
145 			iput(newino);
146 	} else {
147 		inode = iget5_locked(sb, (unsigned long)vino.ino,
148 				     ceph_ino_compare, ceph_set_ino_cb, &vino);
149 	}
150 
151 	if (!inode) {
152 		doutc(cl, "no inode found for %llx.%llx\n", vino.ino, vino.snap);
153 		return ERR_PTR(-ENOMEM);
154 	}
155 
156 	doutc(cl, "on %llx=%llx.%llx got %p new %d\n",
157 	      ceph_present_inode(inode), ceph_vinop(inode), inode,
158 	      !!(inode->i_state & I_NEW));
159 	return inode;
160 }
161 
162 /*
163  * get/constuct snapdir inode for a given directory
164  */
165 struct inode *ceph_get_snapdir(struct inode *parent)
166 {
167 	struct ceph_client *cl = ceph_inode_to_client(parent);
168 	struct ceph_vino vino = {
169 		.ino = ceph_ino(parent),
170 		.snap = CEPH_SNAPDIR,
171 	};
172 	struct inode *inode = ceph_get_inode(parent->i_sb, vino, NULL);
173 	struct ceph_inode_info *ci = ceph_inode(inode);
174 	int ret = -ENOTDIR;
175 
176 	if (IS_ERR(inode))
177 		return inode;
178 
179 	if (!S_ISDIR(parent->i_mode)) {
180 		pr_warn_once_client(cl, "bad snapdir parent type (mode=0%o)\n",
181 				    parent->i_mode);
182 		goto err;
183 	}
184 
185 	if (!(inode->i_state & I_NEW) && !S_ISDIR(inode->i_mode)) {
186 		pr_warn_once_client(cl, "bad snapdir inode type (mode=0%o)\n",
187 				    inode->i_mode);
188 		goto err;
189 	}
190 
191 	inode->i_mode = parent->i_mode;
192 	inode->i_uid = parent->i_uid;
193 	inode->i_gid = parent->i_gid;
194 	inode_set_mtime_to_ts(inode, inode_get_mtime(parent));
195 	inode_set_ctime_to_ts(inode, inode_get_ctime(parent));
196 	inode_set_atime_to_ts(inode, inode_get_atime(parent));
197 	ci->i_rbytes = 0;
198 	ci->i_btime = ceph_inode(parent)->i_btime;
199 
200 #ifdef CONFIG_FS_ENCRYPTION
201 	/* if encrypted, just borrow fscrypt_auth from parent */
202 	if (IS_ENCRYPTED(parent)) {
203 		struct ceph_inode_info *pci = ceph_inode(parent);
204 
205 		ci->fscrypt_auth = kmemdup(pci->fscrypt_auth,
206 					   pci->fscrypt_auth_len,
207 					   GFP_KERNEL);
208 		if (ci->fscrypt_auth) {
209 			inode->i_flags |= S_ENCRYPTED;
210 			ci->fscrypt_auth_len = pci->fscrypt_auth_len;
211 		} else {
212 			doutc(cl, "Failed to alloc snapdir fscrypt_auth\n");
213 			ret = -ENOMEM;
214 			goto err;
215 		}
216 	}
217 #endif
218 	if (inode->i_state & I_NEW) {
219 		inode->i_op = &ceph_snapdir_iops;
220 		inode->i_fop = &ceph_snapdir_fops;
221 		ci->i_snap_caps = CEPH_CAP_PIN; /* so we can open */
222 		unlock_new_inode(inode);
223 	}
224 
225 	return inode;
226 err:
227 	if ((inode->i_state & I_NEW))
228 		discard_new_inode(inode);
229 	else
230 		iput(inode);
231 	return ERR_PTR(ret);
232 }
233 
234 const struct inode_operations ceph_file_iops = {
235 	.permission = ceph_permission,
236 	.setattr = ceph_setattr,
237 	.getattr = ceph_getattr,
238 	.listxattr = ceph_listxattr,
239 	.get_inode_acl = ceph_get_acl,
240 	.set_acl = ceph_set_acl,
241 };
242 
243 
244 /*
245  * We use a 'frag tree' to keep track of the MDS's directory fragments
246  * for a given inode (usually there is just a single fragment).  We
247  * need to know when a child frag is delegated to a new MDS, or when
248  * it is flagged as replicated, so we can direct our requests
249  * accordingly.
250  */
251 
252 /*
253  * find/create a frag in the tree
254  */
255 static struct ceph_inode_frag *__get_or_create_frag(struct ceph_inode_info *ci,
256 						    u32 f)
257 {
258 	struct inode *inode = &ci->netfs.inode;
259 	struct ceph_client *cl = ceph_inode_to_client(inode);
260 	struct rb_node **p;
261 	struct rb_node *parent = NULL;
262 	struct ceph_inode_frag *frag;
263 	int c;
264 
265 	p = &ci->i_fragtree.rb_node;
266 	while (*p) {
267 		parent = *p;
268 		frag = rb_entry(parent, struct ceph_inode_frag, node);
269 		c = ceph_frag_compare(f, frag->frag);
270 		if (c < 0)
271 			p = &(*p)->rb_left;
272 		else if (c > 0)
273 			p = &(*p)->rb_right;
274 		else
275 			return frag;
276 	}
277 
278 	frag = kmalloc(sizeof(*frag), GFP_NOFS);
279 	if (!frag)
280 		return ERR_PTR(-ENOMEM);
281 
282 	frag->frag = f;
283 	frag->split_by = 0;
284 	frag->mds = -1;
285 	frag->ndist = 0;
286 
287 	rb_link_node(&frag->node, parent, p);
288 	rb_insert_color(&frag->node, &ci->i_fragtree);
289 
290 	doutc(cl, "added %p %llx.%llx frag %x\n", inode, ceph_vinop(inode), f);
291 	return frag;
292 }
293 
294 /*
295  * find a specific frag @f
296  */
297 struct ceph_inode_frag *__ceph_find_frag(struct ceph_inode_info *ci, u32 f)
298 {
299 	struct rb_node *n = ci->i_fragtree.rb_node;
300 
301 	while (n) {
302 		struct ceph_inode_frag *frag =
303 			rb_entry(n, struct ceph_inode_frag, node);
304 		int c = ceph_frag_compare(f, frag->frag);
305 		if (c < 0)
306 			n = n->rb_left;
307 		else if (c > 0)
308 			n = n->rb_right;
309 		else
310 			return frag;
311 	}
312 	return NULL;
313 }
314 
315 /*
316  * Choose frag containing the given value @v.  If @pfrag is
317  * specified, copy the frag delegation info to the caller if
318  * it is present.
319  */
320 static u32 __ceph_choose_frag(struct ceph_inode_info *ci, u32 v,
321 			      struct ceph_inode_frag *pfrag, int *found)
322 {
323 	struct ceph_client *cl = ceph_inode_to_client(&ci->netfs.inode);
324 	u32 t = ceph_frag_make(0, 0);
325 	struct ceph_inode_frag *frag;
326 	unsigned nway, i;
327 	u32 n;
328 
329 	if (found)
330 		*found = 0;
331 
332 	while (1) {
333 		WARN_ON(!ceph_frag_contains_value(t, v));
334 		frag = __ceph_find_frag(ci, t);
335 		if (!frag)
336 			break; /* t is a leaf */
337 		if (frag->split_by == 0) {
338 			if (pfrag)
339 				memcpy(pfrag, frag, sizeof(*pfrag));
340 			if (found)
341 				*found = 1;
342 			break;
343 		}
344 
345 		/* choose child */
346 		nway = 1 << frag->split_by;
347 		doutc(cl, "frag(%x) %x splits by %d (%d ways)\n", v, t,
348 		      frag->split_by, nway);
349 		for (i = 0; i < nway; i++) {
350 			n = ceph_frag_make_child(t, frag->split_by, i);
351 			if (ceph_frag_contains_value(n, v)) {
352 				t = n;
353 				break;
354 			}
355 		}
356 		BUG_ON(i == nway);
357 	}
358 	doutc(cl, "frag(%x) = %x\n", v, t);
359 
360 	return t;
361 }
362 
363 u32 ceph_choose_frag(struct ceph_inode_info *ci, u32 v,
364 		     struct ceph_inode_frag *pfrag, int *found)
365 {
366 	u32 ret;
367 	mutex_lock(&ci->i_fragtree_mutex);
368 	ret = __ceph_choose_frag(ci, v, pfrag, found);
369 	mutex_unlock(&ci->i_fragtree_mutex);
370 	return ret;
371 }
372 
373 /*
374  * Process dirfrag (delegation) info from the mds.  Include leaf
375  * fragment in tree ONLY if ndist > 0.  Otherwise, only
376  * branches/splits are included in i_fragtree)
377  */
378 static int ceph_fill_dirfrag(struct inode *inode,
379 			     struct ceph_mds_reply_dirfrag *dirinfo)
380 {
381 	struct ceph_inode_info *ci = ceph_inode(inode);
382 	struct ceph_client *cl = ceph_inode_to_client(inode);
383 	struct ceph_inode_frag *frag;
384 	u32 id = le32_to_cpu(dirinfo->frag);
385 	int mds = le32_to_cpu(dirinfo->auth);
386 	int ndist = le32_to_cpu(dirinfo->ndist);
387 	int diri_auth = -1;
388 	int i;
389 	int err = 0;
390 
391 	spin_lock(&ci->i_ceph_lock);
392 	if (ci->i_auth_cap)
393 		diri_auth = ci->i_auth_cap->mds;
394 	spin_unlock(&ci->i_ceph_lock);
395 
396 	if (mds == -1) /* CDIR_AUTH_PARENT */
397 		mds = diri_auth;
398 
399 	mutex_lock(&ci->i_fragtree_mutex);
400 	if (ndist == 0 && mds == diri_auth) {
401 		/* no delegation info needed. */
402 		frag = __ceph_find_frag(ci, id);
403 		if (!frag)
404 			goto out;
405 		if (frag->split_by == 0) {
406 			/* tree leaf, remove */
407 			doutc(cl, "removed %p %llx.%llx frag %x (no ref)\n",
408 			      inode, ceph_vinop(inode), id);
409 			rb_erase(&frag->node, &ci->i_fragtree);
410 			kfree(frag);
411 		} else {
412 			/* tree branch, keep and clear */
413 			doutc(cl, "cleared %p %llx.%llx frag %x referral\n",
414 			      inode, ceph_vinop(inode), id);
415 			frag->mds = -1;
416 			frag->ndist = 0;
417 		}
418 		goto out;
419 	}
420 
421 
422 	/* find/add this frag to store mds delegation info */
423 	frag = __get_or_create_frag(ci, id);
424 	if (IS_ERR(frag)) {
425 		/* this is not the end of the world; we can continue
426 		   with bad/inaccurate delegation info */
427 		pr_err_client(cl, "ENOMEM on mds ref %p %llx.%llx fg %x\n",
428 			      inode, ceph_vinop(inode),
429 			      le32_to_cpu(dirinfo->frag));
430 		err = -ENOMEM;
431 		goto out;
432 	}
433 
434 	frag->mds = mds;
435 	frag->ndist = min_t(u32, ndist, CEPH_MAX_DIRFRAG_REP);
436 	for (i = 0; i < frag->ndist; i++)
437 		frag->dist[i] = le32_to_cpu(dirinfo->dist[i]);
438 	doutc(cl, "%p %llx.%llx frag %x ndist=%d\n", inode,
439 	      ceph_vinop(inode), frag->frag, frag->ndist);
440 
441 out:
442 	mutex_unlock(&ci->i_fragtree_mutex);
443 	return err;
444 }
445 
446 static int frag_tree_split_cmp(const void *l, const void *r)
447 {
448 	struct ceph_frag_tree_split *ls = (struct ceph_frag_tree_split*)l;
449 	struct ceph_frag_tree_split *rs = (struct ceph_frag_tree_split*)r;
450 	return ceph_frag_compare(le32_to_cpu(ls->frag),
451 				 le32_to_cpu(rs->frag));
452 }
453 
454 static bool is_frag_child(u32 f, struct ceph_inode_frag *frag)
455 {
456 	if (!frag)
457 		return f == ceph_frag_make(0, 0);
458 	if (ceph_frag_bits(f) != ceph_frag_bits(frag->frag) + frag->split_by)
459 		return false;
460 	return ceph_frag_contains_value(frag->frag, ceph_frag_value(f));
461 }
462 
463 static int ceph_fill_fragtree(struct inode *inode,
464 			      struct ceph_frag_tree_head *fragtree,
465 			      struct ceph_mds_reply_dirfrag *dirinfo)
466 {
467 	struct ceph_client *cl = ceph_inode_to_client(inode);
468 	struct ceph_inode_info *ci = ceph_inode(inode);
469 	struct ceph_inode_frag *frag, *prev_frag = NULL;
470 	struct rb_node *rb_node;
471 	unsigned i, split_by, nsplits;
472 	u32 id;
473 	bool update = false;
474 
475 	mutex_lock(&ci->i_fragtree_mutex);
476 	nsplits = le32_to_cpu(fragtree->nsplits);
477 	if (nsplits != ci->i_fragtree_nsplits) {
478 		update = true;
479 	} else if (nsplits) {
480 		i = get_random_u32_below(nsplits);
481 		id = le32_to_cpu(fragtree->splits[i].frag);
482 		if (!__ceph_find_frag(ci, id))
483 			update = true;
484 	} else if (!RB_EMPTY_ROOT(&ci->i_fragtree)) {
485 		rb_node = rb_first(&ci->i_fragtree);
486 		frag = rb_entry(rb_node, struct ceph_inode_frag, node);
487 		if (frag->frag != ceph_frag_make(0, 0) || rb_next(rb_node))
488 			update = true;
489 	}
490 	if (!update && dirinfo) {
491 		id = le32_to_cpu(dirinfo->frag);
492 		if (id != __ceph_choose_frag(ci, id, NULL, NULL))
493 			update = true;
494 	}
495 	if (!update)
496 		goto out_unlock;
497 
498 	if (nsplits > 1) {
499 		sort(fragtree->splits, nsplits, sizeof(fragtree->splits[0]),
500 		     frag_tree_split_cmp, NULL);
501 	}
502 
503 	doutc(cl, "%p %llx.%llx\n", inode, ceph_vinop(inode));
504 	rb_node = rb_first(&ci->i_fragtree);
505 	for (i = 0; i < nsplits; i++) {
506 		id = le32_to_cpu(fragtree->splits[i].frag);
507 		split_by = le32_to_cpu(fragtree->splits[i].by);
508 		if (split_by == 0 || ceph_frag_bits(id) + split_by > 24) {
509 			pr_err_client(cl, "%p %llx.%llx invalid split %d/%u, "
510 			       "frag %x split by %d\n", inode,
511 			       ceph_vinop(inode), i, nsplits, id, split_by);
512 			continue;
513 		}
514 		frag = NULL;
515 		while (rb_node) {
516 			frag = rb_entry(rb_node, struct ceph_inode_frag, node);
517 			if (ceph_frag_compare(frag->frag, id) >= 0) {
518 				if (frag->frag != id)
519 					frag = NULL;
520 				else
521 					rb_node = rb_next(rb_node);
522 				break;
523 			}
524 			rb_node = rb_next(rb_node);
525 			/* delete stale split/leaf node */
526 			if (frag->split_by > 0 ||
527 			    !is_frag_child(frag->frag, prev_frag)) {
528 				rb_erase(&frag->node, &ci->i_fragtree);
529 				if (frag->split_by > 0)
530 					ci->i_fragtree_nsplits--;
531 				kfree(frag);
532 			}
533 			frag = NULL;
534 		}
535 		if (!frag) {
536 			frag = __get_or_create_frag(ci, id);
537 			if (IS_ERR(frag))
538 				continue;
539 		}
540 		if (frag->split_by == 0)
541 			ci->i_fragtree_nsplits++;
542 		frag->split_by = split_by;
543 		doutc(cl, " frag %x split by %d\n", frag->frag, frag->split_by);
544 		prev_frag = frag;
545 	}
546 	while (rb_node) {
547 		frag = rb_entry(rb_node, struct ceph_inode_frag, node);
548 		rb_node = rb_next(rb_node);
549 		/* delete stale split/leaf node */
550 		if (frag->split_by > 0 ||
551 		    !is_frag_child(frag->frag, prev_frag)) {
552 			rb_erase(&frag->node, &ci->i_fragtree);
553 			if (frag->split_by > 0)
554 				ci->i_fragtree_nsplits--;
555 			kfree(frag);
556 		}
557 	}
558 out_unlock:
559 	mutex_unlock(&ci->i_fragtree_mutex);
560 	return 0;
561 }
562 
563 /*
564  * initialize a newly allocated inode.
565  */
566 struct inode *ceph_alloc_inode(struct super_block *sb)
567 {
568 	struct ceph_fs_client *fsc = ceph_sb_to_fs_client(sb);
569 	struct ceph_inode_info *ci;
570 	int i;
571 
572 	ci = alloc_inode_sb(sb, ceph_inode_cachep, GFP_NOFS);
573 	if (!ci)
574 		return NULL;
575 
576 	doutc(fsc->client, "%p\n", &ci->netfs.inode);
577 
578 	/* Set parameters for the netfs library */
579 	netfs_inode_init(&ci->netfs, &ceph_netfs_ops, false);
580 
581 	spin_lock_init(&ci->i_ceph_lock);
582 
583 	ci->i_version = 0;
584 	ci->i_inline_version = 0;
585 	ci->i_time_warp_seq = 0;
586 	ci->i_ceph_flags = 0;
587 	atomic64_set(&ci->i_ordered_count, 1);
588 	atomic64_set(&ci->i_release_count, 1);
589 	atomic64_set(&ci->i_complete_seq[0], 0);
590 	atomic64_set(&ci->i_complete_seq[1], 0);
591 	ci->i_symlink = NULL;
592 
593 	ci->i_max_bytes = 0;
594 	ci->i_max_files = 0;
595 
596 	memset(&ci->i_dir_layout, 0, sizeof(ci->i_dir_layout));
597 	memset(&ci->i_cached_layout, 0, sizeof(ci->i_cached_layout));
598 	RCU_INIT_POINTER(ci->i_layout.pool_ns, NULL);
599 
600 	ci->i_fragtree = RB_ROOT;
601 	mutex_init(&ci->i_fragtree_mutex);
602 
603 	ci->i_xattrs.blob = NULL;
604 	ci->i_xattrs.prealloc_blob = NULL;
605 	ci->i_xattrs.dirty = false;
606 	ci->i_xattrs.index = RB_ROOT;
607 	ci->i_xattrs.count = 0;
608 	ci->i_xattrs.names_size = 0;
609 	ci->i_xattrs.vals_size = 0;
610 	ci->i_xattrs.version = 0;
611 	ci->i_xattrs.index_version = 0;
612 
613 	ci->i_caps = RB_ROOT;
614 	ci->i_auth_cap = NULL;
615 	ci->i_dirty_caps = 0;
616 	ci->i_flushing_caps = 0;
617 	INIT_LIST_HEAD(&ci->i_dirty_item);
618 	INIT_LIST_HEAD(&ci->i_flushing_item);
619 	ci->i_prealloc_cap_flush = NULL;
620 	INIT_LIST_HEAD(&ci->i_cap_flush_list);
621 	init_waitqueue_head(&ci->i_cap_wq);
622 	ci->i_hold_caps_max = 0;
623 	INIT_LIST_HEAD(&ci->i_cap_delay_list);
624 	INIT_LIST_HEAD(&ci->i_cap_snaps);
625 	ci->i_head_snapc = NULL;
626 	ci->i_snap_caps = 0;
627 
628 	ci->i_last_rd = ci->i_last_wr = jiffies - 3600 * HZ;
629 	for (i = 0; i < CEPH_FILE_MODE_BITS; i++)
630 		ci->i_nr_by_mode[i] = 0;
631 
632 	mutex_init(&ci->i_truncate_mutex);
633 	ci->i_truncate_seq = 0;
634 	ci->i_truncate_size = 0;
635 	ci->i_truncate_pending = 0;
636 	ci->i_truncate_pagecache_size = 0;
637 
638 	ci->i_max_size = 0;
639 	ci->i_reported_size = 0;
640 	ci->i_wanted_max_size = 0;
641 	ci->i_requested_max_size = 0;
642 
643 	ci->i_pin_ref = 0;
644 	ci->i_rd_ref = 0;
645 	ci->i_rdcache_ref = 0;
646 	ci->i_wr_ref = 0;
647 	ci->i_wb_ref = 0;
648 	ci->i_fx_ref = 0;
649 	ci->i_wrbuffer_ref = 0;
650 	ci->i_wrbuffer_ref_head = 0;
651 	atomic_set(&ci->i_filelock_ref, 0);
652 	atomic_set(&ci->i_shared_gen, 1);
653 	ci->i_rdcache_gen = 0;
654 	ci->i_rdcache_revoking = 0;
655 
656 	INIT_LIST_HEAD(&ci->i_unsafe_dirops);
657 	INIT_LIST_HEAD(&ci->i_unsafe_iops);
658 	spin_lock_init(&ci->i_unsafe_lock);
659 
660 	ci->i_snap_realm = NULL;
661 	INIT_LIST_HEAD(&ci->i_snap_realm_item);
662 	INIT_LIST_HEAD(&ci->i_snap_flush_item);
663 
664 	INIT_WORK(&ci->i_work, ceph_inode_work);
665 	ci->i_work_mask = 0;
666 	memset(&ci->i_btime, '\0', sizeof(ci->i_btime));
667 #ifdef CONFIG_FS_ENCRYPTION
668 	ci->fscrypt_auth = NULL;
669 	ci->fscrypt_auth_len = 0;
670 #endif
671 	return &ci->netfs.inode;
672 }
673 
674 void ceph_free_inode(struct inode *inode)
675 {
676 	struct ceph_inode_info *ci = ceph_inode(inode);
677 
678 	kfree(ci->i_symlink);
679 #ifdef CONFIG_FS_ENCRYPTION
680 	kfree(ci->fscrypt_auth);
681 #endif
682 	fscrypt_free_inode(inode);
683 	kmem_cache_free(ceph_inode_cachep, ci);
684 }
685 
686 void ceph_evict_inode(struct inode *inode)
687 {
688 	struct ceph_inode_info *ci = ceph_inode(inode);
689 	struct ceph_mds_client *mdsc = ceph_sb_to_mdsc(inode->i_sb);
690 	struct ceph_client *cl = ceph_inode_to_client(inode);
691 	struct ceph_inode_frag *frag;
692 	struct rb_node *n;
693 
694 	doutc(cl, "%p ino %llx.%llx\n", inode, ceph_vinop(inode));
695 
696 	percpu_counter_dec(&mdsc->metric.total_inodes);
697 
698 	truncate_inode_pages_final(&inode->i_data);
699 	if (inode->i_state & I_PINNING_NETFS_WB)
700 		ceph_fscache_unuse_cookie(inode, true);
701 	clear_inode(inode);
702 
703 	ceph_fscache_unregister_inode_cookie(ci);
704 	fscrypt_put_encryption_info(inode);
705 
706 	__ceph_remove_caps(ci);
707 
708 	if (__ceph_has_quota(ci, QUOTA_GET_ANY))
709 		ceph_adjust_quota_realms_count(inode, false);
710 
711 	/*
712 	 * we may still have a snap_realm reference if there are stray
713 	 * caps in i_snap_caps.
714 	 */
715 	if (ci->i_snap_realm) {
716 		if (ceph_snap(inode) == CEPH_NOSNAP) {
717 			doutc(cl, " dropping residual ref to snap realm %p\n",
718 			      ci->i_snap_realm);
719 			ceph_change_snap_realm(inode, NULL);
720 		} else {
721 			ceph_put_snapid_map(mdsc, ci->i_snapid_map);
722 			ci->i_snap_realm = NULL;
723 		}
724 	}
725 
726 	while ((n = rb_first(&ci->i_fragtree)) != NULL) {
727 		frag = rb_entry(n, struct ceph_inode_frag, node);
728 		rb_erase(n, &ci->i_fragtree);
729 		kfree(frag);
730 	}
731 	ci->i_fragtree_nsplits = 0;
732 
733 	__ceph_destroy_xattrs(ci);
734 	if (ci->i_xattrs.blob)
735 		ceph_buffer_put(ci->i_xattrs.blob);
736 	if (ci->i_xattrs.prealloc_blob)
737 		ceph_buffer_put(ci->i_xattrs.prealloc_blob);
738 
739 	ceph_put_string(rcu_dereference_raw(ci->i_layout.pool_ns));
740 	ceph_put_string(rcu_dereference_raw(ci->i_cached_layout.pool_ns));
741 }
742 
743 static inline blkcnt_t calc_inode_blocks(u64 size)
744 {
745 	return (size + (1<<9) - 1) >> 9;
746 }
747 
748 /*
749  * Helpers to fill in size, ctime, mtime, and atime.  We have to be
750  * careful because either the client or MDS may have more up to date
751  * info, depending on which capabilities are held, and whether
752  * time_warp_seq or truncate_seq have increased.  (Ordinarily, mtime
753  * and size are monotonically increasing, except when utimes() or
754  * truncate() increments the corresponding _seq values.)
755  */
756 int ceph_fill_file_size(struct inode *inode, int issued,
757 			u32 truncate_seq, u64 truncate_size, u64 size)
758 {
759 	struct ceph_client *cl = ceph_inode_to_client(inode);
760 	struct ceph_inode_info *ci = ceph_inode(inode);
761 	int queue_trunc = 0;
762 	loff_t isize = i_size_read(inode);
763 
764 	if (ceph_seq_cmp(truncate_seq, ci->i_truncate_seq) > 0 ||
765 	    (truncate_seq == ci->i_truncate_seq && size > isize)) {
766 		doutc(cl, "size %lld -> %llu\n", isize, size);
767 		if (size > 0 && S_ISDIR(inode->i_mode)) {
768 			pr_err_client(cl, "non-zero size for directory\n");
769 			size = 0;
770 		}
771 		i_size_write(inode, size);
772 		inode->i_blocks = calc_inode_blocks(size);
773 		/*
774 		 * If we're expanding, then we should be able to just update
775 		 * the existing cookie.
776 		 */
777 		if (size > isize)
778 			ceph_fscache_update(inode);
779 		ci->i_reported_size = size;
780 		if (truncate_seq != ci->i_truncate_seq) {
781 			doutc(cl, "truncate_seq %u -> %u\n",
782 			      ci->i_truncate_seq, truncate_seq);
783 			ci->i_truncate_seq = truncate_seq;
784 
785 			/* the MDS should have revoked these caps */
786 			WARN_ON_ONCE(issued & (CEPH_CAP_FILE_RD |
787 					       CEPH_CAP_FILE_LAZYIO));
788 			/*
789 			 * If we hold relevant caps, or in the case where we're
790 			 * not the only client referencing this file and we
791 			 * don't hold those caps, then we need to check whether
792 			 * the file is either opened or mmaped
793 			 */
794 			if ((issued & (CEPH_CAP_FILE_CACHE|
795 				       CEPH_CAP_FILE_BUFFER)) ||
796 			    mapping_mapped(inode->i_mapping) ||
797 			    __ceph_is_file_opened(ci)) {
798 				ci->i_truncate_pending++;
799 				queue_trunc = 1;
800 			}
801 		}
802 	}
803 
804 	/*
805 	 * It's possible that the new sizes of the two consecutive
806 	 * size truncations will be in the same fscrypt last block,
807 	 * and we need to truncate the corresponding page caches
808 	 * anyway.
809 	 */
810 	if (ceph_seq_cmp(truncate_seq, ci->i_truncate_seq) >= 0) {
811 		doutc(cl, "truncate_size %lld -> %llu, encrypted %d\n",
812 		      ci->i_truncate_size, truncate_size,
813 		      !!IS_ENCRYPTED(inode));
814 
815 		ci->i_truncate_size = truncate_size;
816 
817 		if (IS_ENCRYPTED(inode)) {
818 			doutc(cl, "truncate_pagecache_size %lld -> %llu\n",
819 			      ci->i_truncate_pagecache_size, size);
820 			ci->i_truncate_pagecache_size = size;
821 		} else {
822 			ci->i_truncate_pagecache_size = truncate_size;
823 		}
824 	}
825 	return queue_trunc;
826 }
827 
828 void ceph_fill_file_time(struct inode *inode, int issued,
829 			 u64 time_warp_seq, struct timespec64 *ctime,
830 			 struct timespec64 *mtime, struct timespec64 *atime)
831 {
832 	struct ceph_client *cl = ceph_inode_to_client(inode);
833 	struct ceph_inode_info *ci = ceph_inode(inode);
834 	struct timespec64 ictime = inode_get_ctime(inode);
835 	int warn = 0;
836 
837 	if (issued & (CEPH_CAP_FILE_EXCL|
838 		      CEPH_CAP_FILE_WR|
839 		      CEPH_CAP_FILE_BUFFER|
840 		      CEPH_CAP_AUTH_EXCL|
841 		      CEPH_CAP_XATTR_EXCL)) {
842 		if (ci->i_version == 0 ||
843 		    timespec64_compare(ctime, &ictime) > 0) {
844 			doutc(cl, "ctime %lld.%09ld -> %lld.%09ld inc w/ cap\n",
845 			     ictime.tv_sec, ictime.tv_nsec,
846 			     ctime->tv_sec, ctime->tv_nsec);
847 			inode_set_ctime_to_ts(inode, *ctime);
848 		}
849 		if (ci->i_version == 0 ||
850 		    ceph_seq_cmp(time_warp_seq, ci->i_time_warp_seq) > 0) {
851 			/* the MDS did a utimes() */
852 			doutc(cl, "mtime %lld.%09ld -> %lld.%09ld tw %d -> %d\n",
853 			     inode_get_mtime_sec(inode),
854 			     inode_get_mtime_nsec(inode),
855 			     mtime->tv_sec, mtime->tv_nsec,
856 			     ci->i_time_warp_seq, (int)time_warp_seq);
857 
858 			inode_set_mtime_to_ts(inode, *mtime);
859 			inode_set_atime_to_ts(inode, *atime);
860 			ci->i_time_warp_seq = time_warp_seq;
861 		} else if (time_warp_seq == ci->i_time_warp_seq) {
862 			struct timespec64	ts;
863 
864 			/* nobody did utimes(); take the max */
865 			ts = inode_get_mtime(inode);
866 			if (timespec64_compare(mtime, &ts) > 0) {
867 				doutc(cl, "mtime %lld.%09ld -> %lld.%09ld inc\n",
868 				     ts.tv_sec, ts.tv_nsec,
869 				     mtime->tv_sec, mtime->tv_nsec);
870 				inode_set_mtime_to_ts(inode, *mtime);
871 			}
872 			ts = inode_get_atime(inode);
873 			if (timespec64_compare(atime, &ts) > 0) {
874 				doutc(cl, "atime %lld.%09ld -> %lld.%09ld inc\n",
875 				     ts.tv_sec, ts.tv_nsec,
876 				     atime->tv_sec, atime->tv_nsec);
877 				inode_set_atime_to_ts(inode, *atime);
878 			}
879 		} else if (issued & CEPH_CAP_FILE_EXCL) {
880 			/* we did a utimes(); ignore mds values */
881 		} else {
882 			warn = 1;
883 		}
884 	} else {
885 		/* we have no write|excl caps; whatever the MDS says is true */
886 		if (ceph_seq_cmp(time_warp_seq, ci->i_time_warp_seq) >= 0) {
887 			inode_set_ctime_to_ts(inode, *ctime);
888 			inode_set_mtime_to_ts(inode, *mtime);
889 			inode_set_atime_to_ts(inode, *atime);
890 			ci->i_time_warp_seq = time_warp_seq;
891 		} else {
892 			warn = 1;
893 		}
894 	}
895 	if (warn) /* time_warp_seq shouldn't go backwards */
896 		doutc(cl, "%p mds time_warp_seq %llu < %u\n", inode,
897 		      time_warp_seq, ci->i_time_warp_seq);
898 }
899 
900 #if IS_ENABLED(CONFIG_FS_ENCRYPTION)
901 static int decode_encrypted_symlink(struct ceph_mds_client *mdsc,
902 				    const char *encsym,
903 				    int enclen, u8 **decsym)
904 {
905 	struct ceph_client *cl = mdsc->fsc->client;
906 	int declen;
907 	u8 *sym;
908 
909 	sym = kmalloc(enclen + 1, GFP_NOFS);
910 	if (!sym)
911 		return -ENOMEM;
912 
913 	declen = ceph_base64_decode(encsym, enclen, sym);
914 	if (declen < 0) {
915 		pr_err_client(cl,
916 			"can't decode symlink (%d). Content: %.*s\n",
917 			declen, enclen, encsym);
918 		kfree(sym);
919 		return -EIO;
920 	}
921 	sym[declen + 1] = '\0';
922 	*decsym = sym;
923 	return declen;
924 }
925 #else
926 static int decode_encrypted_symlink(struct ceph_mds_client *mdsc,
927 				    const char *encsym,
928 				    int symlen, u8 **decsym)
929 {
930 	return -EOPNOTSUPP;
931 }
932 #endif
933 
934 /*
935  * Populate an inode based on info from mds.  May be called on new or
936  * existing inodes.
937  */
938 int ceph_fill_inode(struct inode *inode, struct page *locked_page,
939 		    struct ceph_mds_reply_info_in *iinfo,
940 		    struct ceph_mds_reply_dirfrag *dirinfo,
941 		    struct ceph_mds_session *session, int cap_fmode,
942 		    struct ceph_cap_reservation *caps_reservation)
943 {
944 	struct ceph_mds_client *mdsc = ceph_sb_to_mdsc(inode->i_sb);
945 	struct ceph_client *cl = mdsc->fsc->client;
946 	struct ceph_mds_reply_inode *info = iinfo->in;
947 	struct ceph_inode_info *ci = ceph_inode(inode);
948 	int issued, new_issued, info_caps;
949 	struct timespec64 mtime, atime, ctime;
950 	struct ceph_buffer *xattr_blob = NULL;
951 	struct ceph_buffer *old_blob = NULL;
952 	struct ceph_string *pool_ns = NULL;
953 	struct ceph_cap *new_cap = NULL;
954 	int err = 0;
955 	bool wake = false;
956 	bool queue_trunc = false;
957 	bool new_version = false;
958 	bool fill_inline = false;
959 	umode_t mode = le32_to_cpu(info->mode);
960 	dev_t rdev = le32_to_cpu(info->rdev);
961 
962 	lockdep_assert_held(&mdsc->snap_rwsem);
963 
964 	doutc(cl, "%p ino %llx.%llx v %llu had %llu\n", inode, ceph_vinop(inode),
965 	      le64_to_cpu(info->version), ci->i_version);
966 
967 	/* Once I_NEW is cleared, we can't change type or dev numbers */
968 	if (inode->i_state & I_NEW) {
969 		inode->i_mode = mode;
970 	} else {
971 		if (inode_wrong_type(inode, mode)) {
972 			pr_warn_once_client(cl,
973 				"inode type changed! (ino %llx.%llx is 0%o, mds says 0%o)\n",
974 				ceph_vinop(inode), inode->i_mode, mode);
975 			return -ESTALE;
976 		}
977 
978 		if ((S_ISCHR(mode) || S_ISBLK(mode)) && inode->i_rdev != rdev) {
979 			pr_warn_once_client(cl,
980 				"dev inode rdev changed! (ino %llx.%llx is %u:%u, mds says %u:%u)\n",
981 				ceph_vinop(inode), MAJOR(inode->i_rdev),
982 				MINOR(inode->i_rdev), MAJOR(rdev),
983 				MINOR(rdev));
984 			return -ESTALE;
985 		}
986 	}
987 
988 	info_caps = le32_to_cpu(info->cap.caps);
989 
990 	/* prealloc new cap struct */
991 	if (info_caps && ceph_snap(inode) == CEPH_NOSNAP) {
992 		new_cap = ceph_get_cap(mdsc, caps_reservation);
993 		if (!new_cap)
994 			return -ENOMEM;
995 	}
996 
997 	/*
998 	 * prealloc xattr data, if it looks like we'll need it.  only
999 	 * if len > 4 (meaning there are actually xattrs; the first 4
1000 	 * bytes are the xattr count).
1001 	 */
1002 	if (iinfo->xattr_len > 4) {
1003 		xattr_blob = ceph_buffer_new(iinfo->xattr_len, GFP_NOFS);
1004 		if (!xattr_blob)
1005 			pr_err_client(cl, "ENOMEM xattr blob %d bytes\n",
1006 				      iinfo->xattr_len);
1007 	}
1008 
1009 	if (iinfo->pool_ns_len > 0)
1010 		pool_ns = ceph_find_or_create_string(iinfo->pool_ns_data,
1011 						     iinfo->pool_ns_len);
1012 
1013 	if (ceph_snap(inode) != CEPH_NOSNAP && !ci->i_snapid_map)
1014 		ci->i_snapid_map = ceph_get_snapid_map(mdsc, ceph_snap(inode));
1015 
1016 	spin_lock(&ci->i_ceph_lock);
1017 
1018 	/*
1019 	 * provided version will be odd if inode value is projected,
1020 	 * even if stable.  skip the update if we have newer stable
1021 	 * info (ours>=theirs, e.g. due to racing mds replies), unless
1022 	 * we are getting projected (unstable) info (in which case the
1023 	 * version is odd, and we want ours>theirs).
1024 	 *   us   them
1025 	 *   2    2     skip
1026 	 *   3    2     skip
1027 	 *   3    3     update
1028 	 */
1029 	if (ci->i_version == 0 ||
1030 	    ((info->cap.flags & CEPH_CAP_FLAG_AUTH) &&
1031 	     le64_to_cpu(info->version) > (ci->i_version & ~1)))
1032 		new_version = true;
1033 
1034 	/* Update change_attribute */
1035 	inode_set_max_iversion_raw(inode, iinfo->change_attr);
1036 
1037 	__ceph_caps_issued(ci, &issued);
1038 	issued |= __ceph_caps_dirty(ci);
1039 	new_issued = ~issued & info_caps;
1040 
1041 	__ceph_update_quota(ci, iinfo->max_bytes, iinfo->max_files);
1042 
1043 #ifdef CONFIG_FS_ENCRYPTION
1044 	if (iinfo->fscrypt_auth_len &&
1045 	    ((inode->i_state & I_NEW) || (ci->fscrypt_auth_len == 0))) {
1046 		kfree(ci->fscrypt_auth);
1047 		ci->fscrypt_auth_len = iinfo->fscrypt_auth_len;
1048 		ci->fscrypt_auth = iinfo->fscrypt_auth;
1049 		iinfo->fscrypt_auth = NULL;
1050 		iinfo->fscrypt_auth_len = 0;
1051 		inode_set_flags(inode, S_ENCRYPTED, S_ENCRYPTED);
1052 	}
1053 #endif
1054 
1055 	if ((new_version || (new_issued & CEPH_CAP_AUTH_SHARED)) &&
1056 	    (issued & CEPH_CAP_AUTH_EXCL) == 0) {
1057 		inode->i_mode = mode;
1058 		inode->i_uid = make_kuid(&init_user_ns, le32_to_cpu(info->uid));
1059 		inode->i_gid = make_kgid(&init_user_ns, le32_to_cpu(info->gid));
1060 		doutc(cl, "%p %llx.%llx mode 0%o uid.gid %d.%d\n", inode,
1061 		      ceph_vinop(inode), inode->i_mode,
1062 		      from_kuid(&init_user_ns, inode->i_uid),
1063 		      from_kgid(&init_user_ns, inode->i_gid));
1064 		ceph_decode_timespec64(&ci->i_btime, &iinfo->btime);
1065 		ceph_decode_timespec64(&ci->i_snap_btime, &iinfo->snap_btime);
1066 	}
1067 
1068 	/* directories have fl_stripe_unit set to zero */
1069 	if (IS_ENCRYPTED(inode))
1070 		inode->i_blkbits = CEPH_FSCRYPT_BLOCK_SHIFT;
1071 	else if (le32_to_cpu(info->layout.fl_stripe_unit))
1072 		inode->i_blkbits =
1073 			fls(le32_to_cpu(info->layout.fl_stripe_unit)) - 1;
1074 	else
1075 		inode->i_blkbits = CEPH_BLOCK_SHIFT;
1076 
1077 	if ((new_version || (new_issued & CEPH_CAP_LINK_SHARED)) &&
1078 	    (issued & CEPH_CAP_LINK_EXCL) == 0)
1079 		set_nlink(inode, le32_to_cpu(info->nlink));
1080 
1081 	if (new_version || (new_issued & CEPH_CAP_ANY_RD)) {
1082 		/* be careful with mtime, atime, size */
1083 		ceph_decode_timespec64(&atime, &info->atime);
1084 		ceph_decode_timespec64(&mtime, &info->mtime);
1085 		ceph_decode_timespec64(&ctime, &info->ctime);
1086 		ceph_fill_file_time(inode, issued,
1087 				le32_to_cpu(info->time_warp_seq),
1088 				&ctime, &mtime, &atime);
1089 	}
1090 
1091 	if (new_version || (info_caps & CEPH_CAP_FILE_SHARED)) {
1092 		ci->i_files = le64_to_cpu(info->files);
1093 		ci->i_subdirs = le64_to_cpu(info->subdirs);
1094 	}
1095 
1096 	if (new_version ||
1097 	    (new_issued & (CEPH_CAP_ANY_FILE_RD | CEPH_CAP_ANY_FILE_WR))) {
1098 		u64 size = le64_to_cpu(info->size);
1099 		s64 old_pool = ci->i_layout.pool_id;
1100 		struct ceph_string *old_ns;
1101 
1102 		ceph_file_layout_from_legacy(&ci->i_layout, &info->layout);
1103 		old_ns = rcu_dereference_protected(ci->i_layout.pool_ns,
1104 					lockdep_is_held(&ci->i_ceph_lock));
1105 		rcu_assign_pointer(ci->i_layout.pool_ns, pool_ns);
1106 
1107 		if (ci->i_layout.pool_id != old_pool || pool_ns != old_ns)
1108 			ci->i_ceph_flags &= ~CEPH_I_POOL_PERM;
1109 
1110 		pool_ns = old_ns;
1111 
1112 		if (IS_ENCRYPTED(inode) && size &&
1113 		    iinfo->fscrypt_file_len == sizeof(__le64)) {
1114 			u64 fsize = __le64_to_cpu(*(__le64 *)iinfo->fscrypt_file);
1115 
1116 			if (size == round_up(fsize, CEPH_FSCRYPT_BLOCK_SIZE)) {
1117 				size = fsize;
1118 			} else {
1119 				pr_warn_client(cl,
1120 					"fscrypt size mismatch: size=%llu fscrypt_file=%llu, discarding fscrypt_file size.\n",
1121 					info->size, size);
1122 			}
1123 		}
1124 
1125 		queue_trunc = ceph_fill_file_size(inode, issued,
1126 					le32_to_cpu(info->truncate_seq),
1127 					le64_to_cpu(info->truncate_size),
1128 					size);
1129 		/* only update max_size on auth cap */
1130 		if ((info->cap.flags & CEPH_CAP_FLAG_AUTH) &&
1131 		    ci->i_max_size != le64_to_cpu(info->max_size)) {
1132 			doutc(cl, "max_size %lld -> %llu\n",
1133 			    ci->i_max_size, le64_to_cpu(info->max_size));
1134 			ci->i_max_size = le64_to_cpu(info->max_size);
1135 		}
1136 	}
1137 
1138 	/* layout and rstat are not tracked by capability, update them if
1139 	 * the inode info is from auth mds */
1140 	if (new_version || (info->cap.flags & CEPH_CAP_FLAG_AUTH)) {
1141 		if (S_ISDIR(inode->i_mode)) {
1142 			ci->i_dir_layout = iinfo->dir_layout;
1143 			ci->i_rbytes = le64_to_cpu(info->rbytes);
1144 			ci->i_rfiles = le64_to_cpu(info->rfiles);
1145 			ci->i_rsubdirs = le64_to_cpu(info->rsubdirs);
1146 			ci->i_dir_pin = iinfo->dir_pin;
1147 			ci->i_rsnaps = iinfo->rsnaps;
1148 			ceph_decode_timespec64(&ci->i_rctime, &info->rctime);
1149 		}
1150 	}
1151 
1152 	/* xattrs */
1153 	/* note that if i_xattrs.len <= 4, i_xattrs.data will still be NULL. */
1154 	if ((ci->i_xattrs.version == 0 || !(issued & CEPH_CAP_XATTR_EXCL))  &&
1155 	    le64_to_cpu(info->xattr_version) > ci->i_xattrs.version) {
1156 		if (ci->i_xattrs.blob)
1157 			old_blob = ci->i_xattrs.blob;
1158 		ci->i_xattrs.blob = xattr_blob;
1159 		if (xattr_blob)
1160 			memcpy(ci->i_xattrs.blob->vec.iov_base,
1161 			       iinfo->xattr_data, iinfo->xattr_len);
1162 		ci->i_xattrs.version = le64_to_cpu(info->xattr_version);
1163 		ceph_forget_all_cached_acls(inode);
1164 		ceph_security_invalidate_secctx(inode);
1165 		xattr_blob = NULL;
1166 	}
1167 
1168 	/* finally update i_version */
1169 	if (le64_to_cpu(info->version) > ci->i_version)
1170 		ci->i_version = le64_to_cpu(info->version);
1171 
1172 	inode->i_mapping->a_ops = &ceph_aops;
1173 
1174 	switch (inode->i_mode & S_IFMT) {
1175 	case S_IFIFO:
1176 	case S_IFBLK:
1177 	case S_IFCHR:
1178 	case S_IFSOCK:
1179 		inode->i_blkbits = PAGE_SHIFT;
1180 		init_special_inode(inode, inode->i_mode, rdev);
1181 		inode->i_op = &ceph_file_iops;
1182 		break;
1183 	case S_IFREG:
1184 		inode->i_op = &ceph_file_iops;
1185 		inode->i_fop = &ceph_file_fops;
1186 		break;
1187 	case S_IFLNK:
1188 		if (!ci->i_symlink) {
1189 			u32 symlen = iinfo->symlink_len;
1190 			char *sym;
1191 
1192 			spin_unlock(&ci->i_ceph_lock);
1193 
1194 			if (IS_ENCRYPTED(inode)) {
1195 				if (symlen != i_size_read(inode))
1196 					pr_err_client(cl,
1197 						"%p %llx.%llx BAD symlink size %lld\n",
1198 						inode, ceph_vinop(inode),
1199 						i_size_read(inode));
1200 
1201 				err = decode_encrypted_symlink(mdsc, iinfo->symlink,
1202 							       symlen, (u8 **)&sym);
1203 				if (err < 0) {
1204 					pr_err_client(cl,
1205 						"decoding encrypted symlink failed: %d\n",
1206 						err);
1207 					goto out;
1208 				}
1209 				symlen = err;
1210 				i_size_write(inode, symlen);
1211 				inode->i_blocks = calc_inode_blocks(symlen);
1212 			} else {
1213 				if (symlen != i_size_read(inode)) {
1214 					pr_err_client(cl,
1215 						"%p %llx.%llx BAD symlink size %lld\n",
1216 						inode, ceph_vinop(inode),
1217 						i_size_read(inode));
1218 					i_size_write(inode, symlen);
1219 					inode->i_blocks = calc_inode_blocks(symlen);
1220 				}
1221 
1222 				err = -ENOMEM;
1223 				sym = kstrndup(iinfo->symlink, symlen, GFP_NOFS);
1224 				if (!sym)
1225 					goto out;
1226 			}
1227 
1228 			spin_lock(&ci->i_ceph_lock);
1229 			if (!ci->i_symlink)
1230 				ci->i_symlink = sym;
1231 			else
1232 				kfree(sym); /* lost a race */
1233 		}
1234 
1235 		if (IS_ENCRYPTED(inode)) {
1236 			/*
1237 			 * Encrypted symlinks need to be decrypted before we can
1238 			 * cache their targets in i_link. Don't touch it here.
1239 			 */
1240 			inode->i_op = &ceph_encrypted_symlink_iops;
1241 		} else {
1242 			inode->i_link = ci->i_symlink;
1243 			inode->i_op = &ceph_symlink_iops;
1244 		}
1245 		break;
1246 	case S_IFDIR:
1247 		inode->i_op = &ceph_dir_iops;
1248 		inode->i_fop = &ceph_dir_fops;
1249 		break;
1250 	default:
1251 		pr_err_client(cl, "%p %llx.%llx BAD mode 0%o\n", inode,
1252 			      ceph_vinop(inode), inode->i_mode);
1253 	}
1254 
1255 	/* were we issued a capability? */
1256 	if (info_caps) {
1257 		if (ceph_snap(inode) == CEPH_NOSNAP) {
1258 			ceph_add_cap(inode, session,
1259 				     le64_to_cpu(info->cap.cap_id),
1260 				     info_caps,
1261 				     le32_to_cpu(info->cap.wanted),
1262 				     le32_to_cpu(info->cap.seq),
1263 				     le32_to_cpu(info->cap.mseq),
1264 				     le64_to_cpu(info->cap.realm),
1265 				     info->cap.flags, &new_cap);
1266 
1267 			/* set dir completion flag? */
1268 			if (S_ISDIR(inode->i_mode) &&
1269 			    ci->i_files == 0 && ci->i_subdirs == 0 &&
1270 			    (info_caps & CEPH_CAP_FILE_SHARED) &&
1271 			    (issued & CEPH_CAP_FILE_EXCL) == 0 &&
1272 			    !__ceph_dir_is_complete(ci)) {
1273 				doutc(cl, " marking %p complete (empty)\n",
1274 				      inode);
1275 				i_size_write(inode, 0);
1276 				__ceph_dir_set_complete(ci,
1277 					atomic64_read(&ci->i_release_count),
1278 					atomic64_read(&ci->i_ordered_count));
1279 			}
1280 
1281 			wake = true;
1282 		} else {
1283 			doutc(cl, " %p got snap_caps %s\n", inode,
1284 			      ceph_cap_string(info_caps));
1285 			ci->i_snap_caps |= info_caps;
1286 		}
1287 	}
1288 
1289 	if (iinfo->inline_version > 0 &&
1290 	    iinfo->inline_version >= ci->i_inline_version) {
1291 		int cache_caps = CEPH_CAP_FILE_CACHE | CEPH_CAP_FILE_LAZYIO;
1292 		ci->i_inline_version = iinfo->inline_version;
1293 		if (ceph_has_inline_data(ci) &&
1294 		    (locked_page || (info_caps & cache_caps)))
1295 			fill_inline = true;
1296 	}
1297 
1298 	if (cap_fmode >= 0) {
1299 		if (!info_caps)
1300 			pr_warn_client(cl, "mds issued no caps on %llx.%llx\n",
1301 				       ceph_vinop(inode));
1302 		__ceph_touch_fmode(ci, mdsc, cap_fmode);
1303 	}
1304 
1305 	spin_unlock(&ci->i_ceph_lock);
1306 
1307 	ceph_fscache_register_inode_cookie(inode);
1308 
1309 	if (fill_inline)
1310 		ceph_fill_inline_data(inode, locked_page,
1311 				      iinfo->inline_data, iinfo->inline_len);
1312 
1313 	if (wake)
1314 		wake_up_all(&ci->i_cap_wq);
1315 
1316 	/* queue truncate if we saw i_size decrease */
1317 	if (queue_trunc)
1318 		ceph_queue_vmtruncate(inode);
1319 
1320 	/* populate frag tree */
1321 	if (S_ISDIR(inode->i_mode))
1322 		ceph_fill_fragtree(inode, &info->fragtree, dirinfo);
1323 
1324 	/* update delegation info? */
1325 	if (dirinfo)
1326 		ceph_fill_dirfrag(inode, dirinfo);
1327 
1328 	err = 0;
1329 out:
1330 	if (new_cap)
1331 		ceph_put_cap(mdsc, new_cap);
1332 	ceph_buffer_put(old_blob);
1333 	ceph_buffer_put(xattr_blob);
1334 	ceph_put_string(pool_ns);
1335 	return err;
1336 }
1337 
1338 /*
1339  * caller should hold session s_mutex and dentry->d_lock.
1340  */
1341 static void __update_dentry_lease(struct inode *dir, struct dentry *dentry,
1342 				  struct ceph_mds_reply_lease *lease,
1343 				  struct ceph_mds_session *session,
1344 				  unsigned long from_time,
1345 				  struct ceph_mds_session **old_lease_session)
1346 {
1347 	struct ceph_client *cl = ceph_inode_to_client(dir);
1348 	struct ceph_dentry_info *di = ceph_dentry(dentry);
1349 	unsigned mask = le16_to_cpu(lease->mask);
1350 	long unsigned duration = le32_to_cpu(lease->duration_ms);
1351 	long unsigned ttl = from_time + (duration * HZ) / 1000;
1352 	long unsigned half_ttl = from_time + (duration * HZ / 2) / 1000;
1353 
1354 	doutc(cl, "%p duration %lu ms ttl %lu\n", dentry, duration, ttl);
1355 
1356 	/* only track leases on regular dentries */
1357 	if (ceph_snap(dir) != CEPH_NOSNAP)
1358 		return;
1359 
1360 	if (mask & CEPH_LEASE_PRIMARY_LINK)
1361 		di->flags |= CEPH_DENTRY_PRIMARY_LINK;
1362 	else
1363 		di->flags &= ~CEPH_DENTRY_PRIMARY_LINK;
1364 
1365 	di->lease_shared_gen = atomic_read(&ceph_inode(dir)->i_shared_gen);
1366 	if (!(mask & CEPH_LEASE_VALID)) {
1367 		__ceph_dentry_dir_lease_touch(di);
1368 		return;
1369 	}
1370 
1371 	if (di->lease_gen == atomic_read(&session->s_cap_gen) &&
1372 	    time_before(ttl, di->time))
1373 		return;  /* we already have a newer lease. */
1374 
1375 	if (di->lease_session && di->lease_session != session) {
1376 		*old_lease_session = di->lease_session;
1377 		di->lease_session = NULL;
1378 	}
1379 
1380 	if (!di->lease_session)
1381 		di->lease_session = ceph_get_mds_session(session);
1382 	di->lease_gen = atomic_read(&session->s_cap_gen);
1383 	di->lease_seq = le32_to_cpu(lease->seq);
1384 	di->lease_renew_after = half_ttl;
1385 	di->lease_renew_from = 0;
1386 	di->time = ttl;
1387 
1388 	__ceph_dentry_lease_touch(di);
1389 }
1390 
1391 static inline void update_dentry_lease(struct inode *dir, struct dentry *dentry,
1392 					struct ceph_mds_reply_lease *lease,
1393 					struct ceph_mds_session *session,
1394 					unsigned long from_time)
1395 {
1396 	struct ceph_mds_session *old_lease_session = NULL;
1397 	spin_lock(&dentry->d_lock);
1398 	__update_dentry_lease(dir, dentry, lease, session, from_time,
1399 			      &old_lease_session);
1400 	spin_unlock(&dentry->d_lock);
1401 	ceph_put_mds_session(old_lease_session);
1402 }
1403 
1404 /*
1405  * update dentry lease without having parent inode locked
1406  */
1407 static void update_dentry_lease_careful(struct dentry *dentry,
1408 					struct ceph_mds_reply_lease *lease,
1409 					struct ceph_mds_session *session,
1410 					unsigned long from_time,
1411 					char *dname, u32 dname_len,
1412 					struct ceph_vino *pdvino,
1413 					struct ceph_vino *ptvino)
1414 
1415 {
1416 	struct inode *dir;
1417 	struct ceph_mds_session *old_lease_session = NULL;
1418 
1419 	spin_lock(&dentry->d_lock);
1420 	/* make sure dentry's name matches target */
1421 	if (dentry->d_name.len != dname_len ||
1422 	    memcmp(dentry->d_name.name, dname, dname_len))
1423 		goto out_unlock;
1424 
1425 	dir = d_inode(dentry->d_parent);
1426 	/* make sure parent matches dvino */
1427 	if (!ceph_ino_compare(dir, pdvino))
1428 		goto out_unlock;
1429 
1430 	/* make sure dentry's inode matches target. NULL ptvino means that
1431 	 * we expect a negative dentry */
1432 	if (ptvino) {
1433 		if (d_really_is_negative(dentry))
1434 			goto out_unlock;
1435 		if (!ceph_ino_compare(d_inode(dentry), ptvino))
1436 			goto out_unlock;
1437 	} else {
1438 		if (d_really_is_positive(dentry))
1439 			goto out_unlock;
1440 	}
1441 
1442 	__update_dentry_lease(dir, dentry, lease, session,
1443 			      from_time, &old_lease_session);
1444 out_unlock:
1445 	spin_unlock(&dentry->d_lock);
1446 	ceph_put_mds_session(old_lease_session);
1447 }
1448 
1449 /*
1450  * splice a dentry to an inode.
1451  * caller must hold directory i_rwsem for this to be safe.
1452  */
1453 static int splice_dentry(struct dentry **pdn, struct inode *in)
1454 {
1455 	struct ceph_client *cl = ceph_inode_to_client(in);
1456 	struct dentry *dn = *pdn;
1457 	struct dentry *realdn;
1458 
1459 	BUG_ON(d_inode(dn));
1460 
1461 	if (S_ISDIR(in->i_mode)) {
1462 		/* If inode is directory, d_splice_alias() below will remove
1463 		 * 'realdn' from its origin parent. We need to ensure that
1464 		 * origin parent's readdir cache will not reference 'realdn'
1465 		 */
1466 		realdn = d_find_any_alias(in);
1467 		if (realdn) {
1468 			struct ceph_dentry_info *di = ceph_dentry(realdn);
1469 			spin_lock(&realdn->d_lock);
1470 
1471 			realdn->d_op->d_prune(realdn);
1472 
1473 			di->time = jiffies;
1474 			di->lease_shared_gen = 0;
1475 			di->offset = 0;
1476 
1477 			spin_unlock(&realdn->d_lock);
1478 			dput(realdn);
1479 		}
1480 	}
1481 
1482 	/* dn must be unhashed */
1483 	if (!d_unhashed(dn))
1484 		d_drop(dn);
1485 	realdn = d_splice_alias(in, dn);
1486 	if (IS_ERR(realdn)) {
1487 		pr_err_client(cl, "error %ld %p inode %p ino %llx.%llx\n",
1488 			      PTR_ERR(realdn), dn, in, ceph_vinop(in));
1489 		return PTR_ERR(realdn);
1490 	}
1491 
1492 	if (realdn) {
1493 		doutc(cl, "dn %p (%d) spliced with %p (%d) inode %p ino %llx.%llx\n",
1494 		      dn, d_count(dn), realdn, d_count(realdn),
1495 		      d_inode(realdn), ceph_vinop(d_inode(realdn)));
1496 		dput(dn);
1497 		*pdn = realdn;
1498 	} else {
1499 		BUG_ON(!ceph_dentry(dn));
1500 		doutc(cl, "dn %p attached to %p ino %llx.%llx\n", dn,
1501 		      d_inode(dn), ceph_vinop(d_inode(dn)));
1502 	}
1503 	return 0;
1504 }
1505 
1506 /*
1507  * Incorporate results into the local cache.  This is either just
1508  * one inode, or a directory, dentry, and possibly linked-to inode (e.g.,
1509  * after a lookup).
1510  *
1511  * A reply may contain
1512  *         a directory inode along with a dentry.
1513  *  and/or a target inode
1514  *
1515  * Called with snap_rwsem (read).
1516  */
1517 int ceph_fill_trace(struct super_block *sb, struct ceph_mds_request *req)
1518 {
1519 	struct ceph_mds_session *session = req->r_session;
1520 	struct ceph_mds_reply_info_parsed *rinfo = &req->r_reply_info;
1521 	struct inode *in = NULL;
1522 	struct ceph_vino tvino, dvino;
1523 	struct ceph_fs_client *fsc = ceph_sb_to_fs_client(sb);
1524 	struct ceph_client *cl = fsc->client;
1525 	int err = 0;
1526 
1527 	doutc(cl, "%p is_dentry %d is_target %d\n", req,
1528 	      rinfo->head->is_dentry, rinfo->head->is_target);
1529 
1530 	if (!rinfo->head->is_target && !rinfo->head->is_dentry) {
1531 		doutc(cl, "reply is empty!\n");
1532 		if (rinfo->head->result == 0 && req->r_parent)
1533 			ceph_invalidate_dir_request(req);
1534 		return 0;
1535 	}
1536 
1537 	if (rinfo->head->is_dentry) {
1538 		struct inode *dir = req->r_parent;
1539 
1540 		if (dir) {
1541 			err = ceph_fill_inode(dir, NULL, &rinfo->diri,
1542 					      rinfo->dirfrag, session, -1,
1543 					      &req->r_caps_reservation);
1544 			if (err < 0)
1545 				goto done;
1546 		} else {
1547 			WARN_ON_ONCE(1);
1548 		}
1549 
1550 		if (dir && req->r_op == CEPH_MDS_OP_LOOKUPNAME &&
1551 		    test_bit(CEPH_MDS_R_PARENT_LOCKED, &req->r_req_flags) &&
1552 		    !test_bit(CEPH_MDS_R_ABORTED, &req->r_req_flags)) {
1553 			bool is_nokey = false;
1554 			struct qstr dname;
1555 			struct dentry *dn, *parent;
1556 			struct fscrypt_str oname = FSTR_INIT(NULL, 0);
1557 			struct ceph_fname fname = { .dir	= dir,
1558 						    .name	= rinfo->dname,
1559 						    .ctext	= rinfo->altname,
1560 						    .name_len	= rinfo->dname_len,
1561 						    .ctext_len	= rinfo->altname_len };
1562 
1563 			BUG_ON(!rinfo->head->is_target);
1564 			BUG_ON(req->r_dentry);
1565 
1566 			parent = d_find_any_alias(dir);
1567 			BUG_ON(!parent);
1568 
1569 			err = ceph_fname_alloc_buffer(dir, &oname);
1570 			if (err < 0) {
1571 				dput(parent);
1572 				goto done;
1573 			}
1574 
1575 			err = ceph_fname_to_usr(&fname, NULL, &oname, &is_nokey);
1576 			if (err < 0) {
1577 				dput(parent);
1578 				ceph_fname_free_buffer(dir, &oname);
1579 				goto done;
1580 			}
1581 			dname.name = oname.name;
1582 			dname.len = oname.len;
1583 			dname.hash = full_name_hash(parent, dname.name, dname.len);
1584 			tvino.ino = le64_to_cpu(rinfo->targeti.in->ino);
1585 			tvino.snap = le64_to_cpu(rinfo->targeti.in->snapid);
1586 retry_lookup:
1587 			dn = d_lookup(parent, &dname);
1588 			doutc(cl, "d_lookup on parent=%p name=%.*s got %p\n",
1589 			      parent, dname.len, dname.name, dn);
1590 
1591 			if (!dn) {
1592 				dn = d_alloc(parent, &dname);
1593 				doutc(cl, "d_alloc %p '%.*s' = %p\n", parent,
1594 				      dname.len, dname.name, dn);
1595 				if (!dn) {
1596 					dput(parent);
1597 					ceph_fname_free_buffer(dir, &oname);
1598 					err = -ENOMEM;
1599 					goto done;
1600 				}
1601 				if (is_nokey) {
1602 					spin_lock(&dn->d_lock);
1603 					dn->d_flags |= DCACHE_NOKEY_NAME;
1604 					spin_unlock(&dn->d_lock);
1605 				}
1606 				err = 0;
1607 			} else if (d_really_is_positive(dn) &&
1608 				   (ceph_ino(d_inode(dn)) != tvino.ino ||
1609 				    ceph_snap(d_inode(dn)) != tvino.snap)) {
1610 				doutc(cl, " dn %p points to wrong inode %p\n",
1611 				      dn, d_inode(dn));
1612 				ceph_dir_clear_ordered(dir);
1613 				d_delete(dn);
1614 				dput(dn);
1615 				goto retry_lookup;
1616 			}
1617 			ceph_fname_free_buffer(dir, &oname);
1618 
1619 			req->r_dentry = dn;
1620 			dput(parent);
1621 		}
1622 	}
1623 
1624 	if (rinfo->head->is_target) {
1625 		/* Should be filled in by handle_reply */
1626 		BUG_ON(!req->r_target_inode);
1627 
1628 		in = req->r_target_inode;
1629 		err = ceph_fill_inode(in, req->r_locked_page, &rinfo->targeti,
1630 				NULL, session,
1631 				(!test_bit(CEPH_MDS_R_ABORTED, &req->r_req_flags) &&
1632 				 !test_bit(CEPH_MDS_R_ASYNC, &req->r_req_flags) &&
1633 				 rinfo->head->result == 0) ?  req->r_fmode : -1,
1634 				&req->r_caps_reservation);
1635 		if (err < 0) {
1636 			pr_err_client(cl, "badness %p %llx.%llx\n", in,
1637 				      ceph_vinop(in));
1638 			req->r_target_inode = NULL;
1639 			if (in->i_state & I_NEW)
1640 				discard_new_inode(in);
1641 			else
1642 				iput(in);
1643 			goto done;
1644 		}
1645 		if (in->i_state & I_NEW)
1646 			unlock_new_inode(in);
1647 	}
1648 
1649 	/*
1650 	 * ignore null lease/binding on snapdir ENOENT, or else we
1651 	 * will have trouble splicing in the virtual snapdir later
1652 	 */
1653 	if (rinfo->head->is_dentry &&
1654             !test_bit(CEPH_MDS_R_ABORTED, &req->r_req_flags) &&
1655 	    test_bit(CEPH_MDS_R_PARENT_LOCKED, &req->r_req_flags) &&
1656 	    (rinfo->head->is_target || strncmp(req->r_dentry->d_name.name,
1657 					       fsc->mount_options->snapdir_name,
1658 					       req->r_dentry->d_name.len))) {
1659 		/*
1660 		 * lookup link rename   : null -> possibly existing inode
1661 		 * mknod symlink mkdir  : null -> new inode
1662 		 * unlink               : linked -> null
1663 		 */
1664 		struct inode *dir = req->r_parent;
1665 		struct dentry *dn = req->r_dentry;
1666 		bool have_dir_cap, have_lease;
1667 
1668 		BUG_ON(!dn);
1669 		BUG_ON(!dir);
1670 		BUG_ON(d_inode(dn->d_parent) != dir);
1671 
1672 		dvino.ino = le64_to_cpu(rinfo->diri.in->ino);
1673 		dvino.snap = le64_to_cpu(rinfo->diri.in->snapid);
1674 
1675 		BUG_ON(ceph_ino(dir) != dvino.ino);
1676 		BUG_ON(ceph_snap(dir) != dvino.snap);
1677 
1678 		/* do we have a lease on the whole dir? */
1679 		have_dir_cap =
1680 			(le32_to_cpu(rinfo->diri.in->cap.caps) &
1681 			 CEPH_CAP_FILE_SHARED);
1682 
1683 		/* do we have a dn lease? */
1684 		have_lease = have_dir_cap ||
1685 			le32_to_cpu(rinfo->dlease->duration_ms);
1686 		if (!have_lease)
1687 			doutc(cl, "no dentry lease or dir cap\n");
1688 
1689 		/* rename? */
1690 		if (req->r_old_dentry && req->r_op == CEPH_MDS_OP_RENAME) {
1691 			struct inode *olddir = req->r_old_dentry_dir;
1692 			BUG_ON(!olddir);
1693 
1694 			doutc(cl, " src %p '%pd' dst %p '%pd'\n",
1695 			      req->r_old_dentry, req->r_old_dentry, dn, dn);
1696 			doutc(cl, "doing d_move %p -> %p\n", req->r_old_dentry, dn);
1697 
1698 			/* d_move screws up sibling dentries' offsets */
1699 			ceph_dir_clear_ordered(dir);
1700 			ceph_dir_clear_ordered(olddir);
1701 
1702 			d_move(req->r_old_dentry, dn);
1703 			doutc(cl, " src %p '%pd' dst %p '%pd'\n",
1704 			      req->r_old_dentry, req->r_old_dentry, dn, dn);
1705 
1706 			/* ensure target dentry is invalidated, despite
1707 			   rehashing bug in vfs_rename_dir */
1708 			ceph_invalidate_dentry_lease(dn);
1709 
1710 			doutc(cl, "dn %p gets new offset %lld\n",
1711 			      req->r_old_dentry,
1712 			      ceph_dentry(req->r_old_dentry)->offset);
1713 
1714 			/* swap r_dentry and r_old_dentry in case that
1715 			 * splice_dentry() gets called later. This is safe
1716 			 * because no other place will use them */
1717 			req->r_dentry = req->r_old_dentry;
1718 			req->r_old_dentry = dn;
1719 			dn = req->r_dentry;
1720 		}
1721 
1722 		/* null dentry? */
1723 		if (!rinfo->head->is_target) {
1724 			doutc(cl, "null dentry\n");
1725 			if (d_really_is_positive(dn)) {
1726 				doutc(cl, "d_delete %p\n", dn);
1727 				ceph_dir_clear_ordered(dir);
1728 				d_delete(dn);
1729 			} else if (have_lease) {
1730 				if (d_unhashed(dn))
1731 					d_add(dn, NULL);
1732 			}
1733 
1734 			if (!d_unhashed(dn) && have_lease)
1735 				update_dentry_lease(dir, dn,
1736 						    rinfo->dlease, session,
1737 						    req->r_request_started);
1738 			goto done;
1739 		}
1740 
1741 		/* attach proper inode */
1742 		if (d_really_is_negative(dn)) {
1743 			ceph_dir_clear_ordered(dir);
1744 			ihold(in);
1745 			err = splice_dentry(&req->r_dentry, in);
1746 			if (err < 0)
1747 				goto done;
1748 			dn = req->r_dentry;  /* may have spliced */
1749 		} else if (d_really_is_positive(dn) && d_inode(dn) != in) {
1750 			doutc(cl, " %p links to %p %llx.%llx, not %llx.%llx\n",
1751 			      dn, d_inode(dn), ceph_vinop(d_inode(dn)),
1752 			      ceph_vinop(in));
1753 			d_invalidate(dn);
1754 			have_lease = false;
1755 		}
1756 
1757 		if (have_lease) {
1758 			update_dentry_lease(dir, dn,
1759 					    rinfo->dlease, session,
1760 					    req->r_request_started);
1761 		}
1762 		doutc(cl, " final dn %p\n", dn);
1763 	} else if ((req->r_op == CEPH_MDS_OP_LOOKUPSNAP ||
1764 		    req->r_op == CEPH_MDS_OP_MKSNAP) &&
1765 	           test_bit(CEPH_MDS_R_PARENT_LOCKED, &req->r_req_flags) &&
1766 		   !test_bit(CEPH_MDS_R_ABORTED, &req->r_req_flags)) {
1767 		struct inode *dir = req->r_parent;
1768 
1769 		/* fill out a snapdir LOOKUPSNAP dentry */
1770 		BUG_ON(!dir);
1771 		BUG_ON(ceph_snap(dir) != CEPH_SNAPDIR);
1772 		BUG_ON(!req->r_dentry);
1773 		doutc(cl, " linking snapped dir %p to dn %p\n", in,
1774 		      req->r_dentry);
1775 		ceph_dir_clear_ordered(dir);
1776 		ihold(in);
1777 		err = splice_dentry(&req->r_dentry, in);
1778 		if (err < 0)
1779 			goto done;
1780 	} else if (rinfo->head->is_dentry && req->r_dentry) {
1781 		/* parent inode is not locked, be carefull */
1782 		struct ceph_vino *ptvino = NULL;
1783 		dvino.ino = le64_to_cpu(rinfo->diri.in->ino);
1784 		dvino.snap = le64_to_cpu(rinfo->diri.in->snapid);
1785 		if (rinfo->head->is_target) {
1786 			tvino.ino = le64_to_cpu(rinfo->targeti.in->ino);
1787 			tvino.snap = le64_to_cpu(rinfo->targeti.in->snapid);
1788 			ptvino = &tvino;
1789 		}
1790 		update_dentry_lease_careful(req->r_dentry, rinfo->dlease,
1791 					    session, req->r_request_started,
1792 					    rinfo->dname, rinfo->dname_len,
1793 					    &dvino, ptvino);
1794 	}
1795 done:
1796 	doutc(cl, "done err=%d\n", err);
1797 	return err;
1798 }
1799 
1800 /*
1801  * Prepopulate our cache with readdir results, leases, etc.
1802  */
1803 static int readdir_prepopulate_inodes_only(struct ceph_mds_request *req,
1804 					   struct ceph_mds_session *session)
1805 {
1806 	struct ceph_mds_reply_info_parsed *rinfo = &req->r_reply_info;
1807 	struct ceph_client *cl = session->s_mdsc->fsc->client;
1808 	int i, err = 0;
1809 
1810 	for (i = 0; i < rinfo->dir_nr; i++) {
1811 		struct ceph_mds_reply_dir_entry *rde = rinfo->dir_entries + i;
1812 		struct ceph_vino vino;
1813 		struct inode *in;
1814 		int rc;
1815 
1816 		vino.ino = le64_to_cpu(rde->inode.in->ino);
1817 		vino.snap = le64_to_cpu(rde->inode.in->snapid);
1818 
1819 		in = ceph_get_inode(req->r_dentry->d_sb, vino, NULL);
1820 		if (IS_ERR(in)) {
1821 			err = PTR_ERR(in);
1822 			doutc(cl, "badness got %d\n", err);
1823 			continue;
1824 		}
1825 		rc = ceph_fill_inode(in, NULL, &rde->inode, NULL, session,
1826 				     -1, &req->r_caps_reservation);
1827 		if (rc < 0) {
1828 			pr_err_client(cl, "inode badness on %p got %d\n", in,
1829 				      rc);
1830 			err = rc;
1831 			if (in->i_state & I_NEW) {
1832 				ihold(in);
1833 				discard_new_inode(in);
1834 			}
1835 		} else if (in->i_state & I_NEW) {
1836 			unlock_new_inode(in);
1837 		}
1838 
1839 		iput(in);
1840 	}
1841 
1842 	return err;
1843 }
1844 
1845 void ceph_readdir_cache_release(struct ceph_readdir_cache_control *ctl)
1846 {
1847 	if (ctl->page) {
1848 		kunmap(ctl->page);
1849 		put_page(ctl->page);
1850 		ctl->page = NULL;
1851 	}
1852 }
1853 
1854 static int fill_readdir_cache(struct inode *dir, struct dentry *dn,
1855 			      struct ceph_readdir_cache_control *ctl,
1856 			      struct ceph_mds_request *req)
1857 {
1858 	struct ceph_client *cl = ceph_inode_to_client(dir);
1859 	struct ceph_inode_info *ci = ceph_inode(dir);
1860 	unsigned nsize = PAGE_SIZE / sizeof(struct dentry*);
1861 	unsigned idx = ctl->index % nsize;
1862 	pgoff_t pgoff = ctl->index / nsize;
1863 
1864 	if (!ctl->page || pgoff != page_index(ctl->page)) {
1865 		ceph_readdir_cache_release(ctl);
1866 		if (idx == 0)
1867 			ctl->page = grab_cache_page(&dir->i_data, pgoff);
1868 		else
1869 			ctl->page = find_lock_page(&dir->i_data, pgoff);
1870 		if (!ctl->page) {
1871 			ctl->index = -1;
1872 			return idx == 0 ? -ENOMEM : 0;
1873 		}
1874 		/* reading/filling the cache are serialized by
1875 		 * i_rwsem, no need to use page lock */
1876 		unlock_page(ctl->page);
1877 		ctl->dentries = kmap(ctl->page);
1878 		if (idx == 0)
1879 			memset(ctl->dentries, 0, PAGE_SIZE);
1880 	}
1881 
1882 	if (req->r_dir_release_cnt == atomic64_read(&ci->i_release_count) &&
1883 	    req->r_dir_ordered_cnt == atomic64_read(&ci->i_ordered_count)) {
1884 		doutc(cl, "dn %p idx %d\n", dn, ctl->index);
1885 		ctl->dentries[idx] = dn;
1886 		ctl->index++;
1887 	} else {
1888 		doutc(cl, "disable readdir cache\n");
1889 		ctl->index = -1;
1890 	}
1891 	return 0;
1892 }
1893 
1894 int ceph_readdir_prepopulate(struct ceph_mds_request *req,
1895 			     struct ceph_mds_session *session)
1896 {
1897 	struct dentry *parent = req->r_dentry;
1898 	struct inode *inode = d_inode(parent);
1899 	struct ceph_inode_info *ci = ceph_inode(inode);
1900 	struct ceph_mds_reply_info_parsed *rinfo = &req->r_reply_info;
1901 	struct ceph_client *cl = session->s_mdsc->fsc->client;
1902 	struct qstr dname;
1903 	struct dentry *dn;
1904 	struct inode *in;
1905 	int err = 0, skipped = 0, ret, i;
1906 	u32 frag = le32_to_cpu(req->r_args.readdir.frag);
1907 	u32 last_hash = 0;
1908 	u32 fpos_offset;
1909 	struct ceph_readdir_cache_control cache_ctl = {};
1910 
1911 	if (test_bit(CEPH_MDS_R_ABORTED, &req->r_req_flags))
1912 		return readdir_prepopulate_inodes_only(req, session);
1913 
1914 	if (rinfo->hash_order) {
1915 		if (req->r_path2) {
1916 			last_hash = ceph_str_hash(ci->i_dir_layout.dl_dir_hash,
1917 						  req->r_path2,
1918 						  strlen(req->r_path2));
1919 			last_hash = ceph_frag_value(last_hash);
1920 		} else if (rinfo->offset_hash) {
1921 			/* mds understands offset_hash */
1922 			WARN_ON_ONCE(req->r_readdir_offset != 2);
1923 			last_hash = le32_to_cpu(req->r_args.readdir.offset_hash);
1924 		}
1925 	}
1926 
1927 	if (rinfo->dir_dir &&
1928 	    le32_to_cpu(rinfo->dir_dir->frag) != frag) {
1929 		doutc(cl, "got new frag %x -> %x\n", frag,
1930 			    le32_to_cpu(rinfo->dir_dir->frag));
1931 		frag = le32_to_cpu(rinfo->dir_dir->frag);
1932 		if (!rinfo->hash_order)
1933 			req->r_readdir_offset = 2;
1934 	}
1935 
1936 	if (le32_to_cpu(rinfo->head->op) == CEPH_MDS_OP_LSSNAP) {
1937 		doutc(cl, "%d items under SNAPDIR dn %p\n",
1938 		      rinfo->dir_nr, parent);
1939 	} else {
1940 		doutc(cl, "%d items under dn %p\n", rinfo->dir_nr, parent);
1941 		if (rinfo->dir_dir)
1942 			ceph_fill_dirfrag(d_inode(parent), rinfo->dir_dir);
1943 
1944 		if (ceph_frag_is_leftmost(frag) &&
1945 		    req->r_readdir_offset == 2 &&
1946 		    !(rinfo->hash_order && last_hash)) {
1947 			/* note dir version at start of readdir so we can
1948 			 * tell if any dentries get dropped */
1949 			req->r_dir_release_cnt =
1950 				atomic64_read(&ci->i_release_count);
1951 			req->r_dir_ordered_cnt =
1952 				atomic64_read(&ci->i_ordered_count);
1953 			req->r_readdir_cache_idx = 0;
1954 		}
1955 	}
1956 
1957 	cache_ctl.index = req->r_readdir_cache_idx;
1958 	fpos_offset = req->r_readdir_offset;
1959 
1960 	/* FIXME: release caps/leases if error occurs */
1961 	for (i = 0; i < rinfo->dir_nr; i++) {
1962 		struct ceph_mds_reply_dir_entry *rde = rinfo->dir_entries + i;
1963 		struct ceph_vino tvino;
1964 
1965 		dname.name = rde->name;
1966 		dname.len = rde->name_len;
1967 		dname.hash = full_name_hash(parent, dname.name, dname.len);
1968 
1969 		tvino.ino = le64_to_cpu(rde->inode.in->ino);
1970 		tvino.snap = le64_to_cpu(rde->inode.in->snapid);
1971 
1972 		if (rinfo->hash_order) {
1973 			u32 hash = ceph_frag_value(rde->raw_hash);
1974 			if (hash != last_hash)
1975 				fpos_offset = 2;
1976 			last_hash = hash;
1977 			rde->offset = ceph_make_fpos(hash, fpos_offset++, true);
1978 		} else {
1979 			rde->offset = ceph_make_fpos(frag, fpos_offset++, false);
1980 		}
1981 
1982 retry_lookup:
1983 		dn = d_lookup(parent, &dname);
1984 		doutc(cl, "d_lookup on parent=%p name=%.*s got %p\n",
1985 		      parent, dname.len, dname.name, dn);
1986 
1987 		if (!dn) {
1988 			dn = d_alloc(parent, &dname);
1989 			doutc(cl, "d_alloc %p '%.*s' = %p\n", parent,
1990 			      dname.len, dname.name, dn);
1991 			if (!dn) {
1992 				doutc(cl, "d_alloc badness\n");
1993 				err = -ENOMEM;
1994 				goto out;
1995 			}
1996 			if (rde->is_nokey) {
1997 				spin_lock(&dn->d_lock);
1998 				dn->d_flags |= DCACHE_NOKEY_NAME;
1999 				spin_unlock(&dn->d_lock);
2000 			}
2001 		} else if (d_really_is_positive(dn) &&
2002 			   (ceph_ino(d_inode(dn)) != tvino.ino ||
2003 			    ceph_snap(d_inode(dn)) != tvino.snap)) {
2004 			struct ceph_dentry_info *di = ceph_dentry(dn);
2005 			doutc(cl, " dn %p points to wrong inode %p\n",
2006 			      dn, d_inode(dn));
2007 
2008 			spin_lock(&dn->d_lock);
2009 			if (di->offset > 0 &&
2010 			    di->lease_shared_gen ==
2011 			    atomic_read(&ci->i_shared_gen)) {
2012 				__ceph_dir_clear_ordered(ci);
2013 				di->offset = 0;
2014 			}
2015 			spin_unlock(&dn->d_lock);
2016 
2017 			d_delete(dn);
2018 			dput(dn);
2019 			goto retry_lookup;
2020 		}
2021 
2022 		/* inode */
2023 		if (d_really_is_positive(dn)) {
2024 			in = d_inode(dn);
2025 		} else {
2026 			in = ceph_get_inode(parent->d_sb, tvino, NULL);
2027 			if (IS_ERR(in)) {
2028 				doutc(cl, "new_inode badness\n");
2029 				d_drop(dn);
2030 				dput(dn);
2031 				err = PTR_ERR(in);
2032 				goto out;
2033 			}
2034 		}
2035 
2036 		ret = ceph_fill_inode(in, NULL, &rde->inode, NULL, session,
2037 				      -1, &req->r_caps_reservation);
2038 		if (ret < 0) {
2039 			pr_err_client(cl, "badness on %p %llx.%llx\n", in,
2040 				      ceph_vinop(in));
2041 			if (d_really_is_negative(dn)) {
2042 				if (in->i_state & I_NEW) {
2043 					ihold(in);
2044 					discard_new_inode(in);
2045 				}
2046 				iput(in);
2047 			}
2048 			d_drop(dn);
2049 			err = ret;
2050 			goto next_item;
2051 		}
2052 		if (in->i_state & I_NEW)
2053 			unlock_new_inode(in);
2054 
2055 		if (d_really_is_negative(dn)) {
2056 			if (ceph_security_xattr_deadlock(in)) {
2057 				doutc(cl, " skip splicing dn %p to inode %p"
2058 				      " (security xattr deadlock)\n", dn, in);
2059 				iput(in);
2060 				skipped++;
2061 				goto next_item;
2062 			}
2063 
2064 			err = splice_dentry(&dn, in);
2065 			if (err < 0)
2066 				goto next_item;
2067 		}
2068 
2069 		ceph_dentry(dn)->offset = rde->offset;
2070 
2071 		update_dentry_lease(d_inode(parent), dn,
2072 				    rde->lease, req->r_session,
2073 				    req->r_request_started);
2074 
2075 		if (err == 0 && skipped == 0 && cache_ctl.index >= 0) {
2076 			ret = fill_readdir_cache(d_inode(parent), dn,
2077 						 &cache_ctl, req);
2078 			if (ret < 0)
2079 				err = ret;
2080 		}
2081 next_item:
2082 		dput(dn);
2083 	}
2084 out:
2085 	if (err == 0 && skipped == 0) {
2086 		set_bit(CEPH_MDS_R_DID_PREPOPULATE, &req->r_req_flags);
2087 		req->r_readdir_cache_idx = cache_ctl.index;
2088 	}
2089 	ceph_readdir_cache_release(&cache_ctl);
2090 	doutc(cl, "done\n");
2091 	return err;
2092 }
2093 
2094 bool ceph_inode_set_size(struct inode *inode, loff_t size)
2095 {
2096 	struct ceph_client *cl = ceph_inode_to_client(inode);
2097 	struct ceph_inode_info *ci = ceph_inode(inode);
2098 	bool ret;
2099 
2100 	spin_lock(&ci->i_ceph_lock);
2101 	doutc(cl, "set_size %p %llu -> %llu\n", inode, i_size_read(inode), size);
2102 	i_size_write(inode, size);
2103 	ceph_fscache_update(inode);
2104 	inode->i_blocks = calc_inode_blocks(size);
2105 
2106 	ret = __ceph_should_report_size(ci);
2107 
2108 	spin_unlock(&ci->i_ceph_lock);
2109 
2110 	return ret;
2111 }
2112 
2113 void ceph_queue_inode_work(struct inode *inode, int work_bit)
2114 {
2115 	struct ceph_fs_client *fsc = ceph_inode_to_fs_client(inode);
2116 	struct ceph_client *cl = fsc->client;
2117 	struct ceph_inode_info *ci = ceph_inode(inode);
2118 	set_bit(work_bit, &ci->i_work_mask);
2119 
2120 	ihold(inode);
2121 	if (queue_work(fsc->inode_wq, &ci->i_work)) {
2122 		doutc(cl, "%p %llx.%llx mask=%lx\n", inode,
2123 		      ceph_vinop(inode), ci->i_work_mask);
2124 	} else {
2125 		doutc(cl, "%p %llx.%llx already queued, mask=%lx\n",
2126 		      inode, ceph_vinop(inode), ci->i_work_mask);
2127 		iput(inode);
2128 	}
2129 }
2130 
2131 static void ceph_do_invalidate_pages(struct inode *inode)
2132 {
2133 	struct ceph_client *cl = ceph_inode_to_client(inode);
2134 	struct ceph_inode_info *ci = ceph_inode(inode);
2135 	u32 orig_gen;
2136 	int check = 0;
2137 
2138 	ceph_fscache_invalidate(inode, false);
2139 
2140 	mutex_lock(&ci->i_truncate_mutex);
2141 
2142 	if (ceph_inode_is_shutdown(inode)) {
2143 		pr_warn_ratelimited_client(cl,
2144 			"%p %llx.%llx is shut down\n", inode,
2145 			ceph_vinop(inode));
2146 		mapping_set_error(inode->i_mapping, -EIO);
2147 		truncate_pagecache(inode, 0);
2148 		mutex_unlock(&ci->i_truncate_mutex);
2149 		goto out;
2150 	}
2151 
2152 	spin_lock(&ci->i_ceph_lock);
2153 	doutc(cl, "%p %llx.%llx gen %d revoking %d\n", inode,
2154 	      ceph_vinop(inode), ci->i_rdcache_gen, ci->i_rdcache_revoking);
2155 	if (ci->i_rdcache_revoking != ci->i_rdcache_gen) {
2156 		if (__ceph_caps_revoking_other(ci, NULL, CEPH_CAP_FILE_CACHE))
2157 			check = 1;
2158 		spin_unlock(&ci->i_ceph_lock);
2159 		mutex_unlock(&ci->i_truncate_mutex);
2160 		goto out;
2161 	}
2162 	orig_gen = ci->i_rdcache_gen;
2163 	spin_unlock(&ci->i_ceph_lock);
2164 
2165 	if (invalidate_inode_pages2(inode->i_mapping) < 0) {
2166 		pr_err_client(cl, "invalidate_inode_pages2 %llx.%llx failed\n",
2167 			      ceph_vinop(inode));
2168 	}
2169 
2170 	spin_lock(&ci->i_ceph_lock);
2171 	if (orig_gen == ci->i_rdcache_gen &&
2172 	    orig_gen == ci->i_rdcache_revoking) {
2173 		doutc(cl, "%p %llx.%llx gen %d successful\n", inode,
2174 		      ceph_vinop(inode), ci->i_rdcache_gen);
2175 		ci->i_rdcache_revoking--;
2176 		check = 1;
2177 	} else {
2178 		doutc(cl, "%p %llx.%llx gen %d raced, now %d revoking %d\n",
2179 		      inode, ceph_vinop(inode), orig_gen, ci->i_rdcache_gen,
2180 		      ci->i_rdcache_revoking);
2181 		if (__ceph_caps_revoking_other(ci, NULL, CEPH_CAP_FILE_CACHE))
2182 			check = 1;
2183 	}
2184 	spin_unlock(&ci->i_ceph_lock);
2185 	mutex_unlock(&ci->i_truncate_mutex);
2186 out:
2187 	if (check)
2188 		ceph_check_caps(ci, 0);
2189 }
2190 
2191 /*
2192  * Make sure any pending truncation is applied before doing anything
2193  * that may depend on it.
2194  */
2195 void __ceph_do_pending_vmtruncate(struct inode *inode)
2196 {
2197 	struct ceph_client *cl = ceph_inode_to_client(inode);
2198 	struct ceph_inode_info *ci = ceph_inode(inode);
2199 	u64 to;
2200 	int wrbuffer_refs, finish = 0;
2201 
2202 	mutex_lock(&ci->i_truncate_mutex);
2203 retry:
2204 	spin_lock(&ci->i_ceph_lock);
2205 	if (ci->i_truncate_pending == 0) {
2206 		doutc(cl, "%p %llx.%llx none pending\n", inode,
2207 		      ceph_vinop(inode));
2208 		spin_unlock(&ci->i_ceph_lock);
2209 		mutex_unlock(&ci->i_truncate_mutex);
2210 		return;
2211 	}
2212 
2213 	/*
2214 	 * make sure any dirty snapped pages are flushed before we
2215 	 * possibly truncate them.. so write AND block!
2216 	 */
2217 	if (ci->i_wrbuffer_ref_head < ci->i_wrbuffer_ref) {
2218 		spin_unlock(&ci->i_ceph_lock);
2219 		doutc(cl, "%p %llx.%llx flushing snaps first\n", inode,
2220 		      ceph_vinop(inode));
2221 		filemap_write_and_wait_range(&inode->i_data, 0,
2222 					     inode->i_sb->s_maxbytes);
2223 		goto retry;
2224 	}
2225 
2226 	/* there should be no reader or writer */
2227 	WARN_ON_ONCE(ci->i_rd_ref || ci->i_wr_ref);
2228 
2229 	to = ci->i_truncate_pagecache_size;
2230 	wrbuffer_refs = ci->i_wrbuffer_ref;
2231 	doutc(cl, "%p %llx.%llx (%d) to %lld\n", inode, ceph_vinop(inode),
2232 	      ci->i_truncate_pending, to);
2233 	spin_unlock(&ci->i_ceph_lock);
2234 
2235 	ceph_fscache_resize(inode, to);
2236 	truncate_pagecache(inode, to);
2237 
2238 	spin_lock(&ci->i_ceph_lock);
2239 	if (to == ci->i_truncate_pagecache_size) {
2240 		ci->i_truncate_pending = 0;
2241 		finish = 1;
2242 	}
2243 	spin_unlock(&ci->i_ceph_lock);
2244 	if (!finish)
2245 		goto retry;
2246 
2247 	mutex_unlock(&ci->i_truncate_mutex);
2248 
2249 	if (wrbuffer_refs == 0)
2250 		ceph_check_caps(ci, 0);
2251 
2252 	wake_up_all(&ci->i_cap_wq);
2253 }
2254 
2255 static void ceph_inode_work(struct work_struct *work)
2256 {
2257 	struct ceph_inode_info *ci = container_of(work, struct ceph_inode_info,
2258 						 i_work);
2259 	struct inode *inode = &ci->netfs.inode;
2260 	struct ceph_client *cl = ceph_inode_to_client(inode);
2261 
2262 	if (test_and_clear_bit(CEPH_I_WORK_WRITEBACK, &ci->i_work_mask)) {
2263 		doutc(cl, "writeback %p %llx.%llx\n", inode, ceph_vinop(inode));
2264 		filemap_fdatawrite(&inode->i_data);
2265 	}
2266 	if (test_and_clear_bit(CEPH_I_WORK_INVALIDATE_PAGES, &ci->i_work_mask))
2267 		ceph_do_invalidate_pages(inode);
2268 
2269 	if (test_and_clear_bit(CEPH_I_WORK_VMTRUNCATE, &ci->i_work_mask))
2270 		__ceph_do_pending_vmtruncate(inode);
2271 
2272 	if (test_and_clear_bit(CEPH_I_WORK_CHECK_CAPS, &ci->i_work_mask))
2273 		ceph_check_caps(ci, 0);
2274 
2275 	if (test_and_clear_bit(CEPH_I_WORK_FLUSH_SNAPS, &ci->i_work_mask))
2276 		ceph_flush_snaps(ci, NULL);
2277 
2278 	iput(inode);
2279 }
2280 
2281 static const char *ceph_encrypted_get_link(struct dentry *dentry,
2282 					   struct inode *inode,
2283 					   struct delayed_call *done)
2284 {
2285 	struct ceph_inode_info *ci = ceph_inode(inode);
2286 
2287 	if (!dentry)
2288 		return ERR_PTR(-ECHILD);
2289 
2290 	return fscrypt_get_symlink(inode, ci->i_symlink, i_size_read(inode),
2291 				   done);
2292 }
2293 
2294 static int ceph_encrypted_symlink_getattr(struct mnt_idmap *idmap,
2295 					  const struct path *path,
2296 					  struct kstat *stat, u32 request_mask,
2297 					  unsigned int query_flags)
2298 {
2299 	int ret;
2300 
2301 	ret = ceph_getattr(idmap, path, stat, request_mask, query_flags);
2302 	if (ret)
2303 		return ret;
2304 	return fscrypt_symlink_getattr(path, stat);
2305 }
2306 
2307 /*
2308  * symlinks
2309  */
2310 static const struct inode_operations ceph_symlink_iops = {
2311 	.get_link = simple_get_link,
2312 	.setattr = ceph_setattr,
2313 	.getattr = ceph_getattr,
2314 	.listxattr = ceph_listxattr,
2315 };
2316 
2317 static const struct inode_operations ceph_encrypted_symlink_iops = {
2318 	.get_link = ceph_encrypted_get_link,
2319 	.setattr = ceph_setattr,
2320 	.getattr = ceph_encrypted_symlink_getattr,
2321 	.listxattr = ceph_listxattr,
2322 };
2323 
2324 /*
2325  * Transfer the encrypted last block to the MDS and the MDS
2326  * will help update it when truncating a smaller size.
2327  *
2328  * We don't support a PAGE_SIZE that is smaller than the
2329  * CEPH_FSCRYPT_BLOCK_SIZE.
2330  */
2331 static int fill_fscrypt_truncate(struct inode *inode,
2332 				 struct ceph_mds_request *req,
2333 				 struct iattr *attr)
2334 {
2335 	struct ceph_client *cl = ceph_inode_to_client(inode);
2336 	struct ceph_inode_info *ci = ceph_inode(inode);
2337 	int boff = attr->ia_size % CEPH_FSCRYPT_BLOCK_SIZE;
2338 	loff_t pos, orig_pos = round_down(attr->ia_size,
2339 					  CEPH_FSCRYPT_BLOCK_SIZE);
2340 	u64 block = orig_pos >> CEPH_FSCRYPT_BLOCK_SHIFT;
2341 	struct ceph_pagelist *pagelist = NULL;
2342 	struct kvec iov = {0};
2343 	struct iov_iter iter;
2344 	struct page *page = NULL;
2345 	struct ceph_fscrypt_truncate_size_header header;
2346 	int retry_op = 0;
2347 	int len = CEPH_FSCRYPT_BLOCK_SIZE;
2348 	loff_t i_size = i_size_read(inode);
2349 	int got, ret, issued;
2350 	u64 objver;
2351 
2352 	ret = __ceph_get_caps(inode, NULL, CEPH_CAP_FILE_RD, 0, -1, &got);
2353 	if (ret < 0)
2354 		return ret;
2355 
2356 	issued = __ceph_caps_issued(ci, NULL);
2357 
2358 	doutc(cl, "size %lld -> %lld got cap refs on %s, issued %s\n",
2359 	      i_size, attr->ia_size, ceph_cap_string(got),
2360 	      ceph_cap_string(issued));
2361 
2362 	/* Try to writeback the dirty pagecaches */
2363 	if (issued & (CEPH_CAP_FILE_BUFFER)) {
2364 		loff_t lend = orig_pos + CEPH_FSCRYPT_BLOCK_SHIFT - 1;
2365 
2366 		ret = filemap_write_and_wait_range(inode->i_mapping,
2367 						   orig_pos, lend);
2368 		if (ret < 0)
2369 			goto out;
2370 	}
2371 
2372 	page = __page_cache_alloc(GFP_KERNEL);
2373 	if (page == NULL) {
2374 		ret = -ENOMEM;
2375 		goto out;
2376 	}
2377 
2378 	pagelist = ceph_pagelist_alloc(GFP_KERNEL);
2379 	if (!pagelist) {
2380 		ret = -ENOMEM;
2381 		goto out;
2382 	}
2383 
2384 	iov.iov_base = kmap_local_page(page);
2385 	iov.iov_len = len;
2386 	iov_iter_kvec(&iter, READ, &iov, 1, len);
2387 
2388 	pos = orig_pos;
2389 	ret = __ceph_sync_read(inode, &pos, &iter, &retry_op, &objver);
2390 	if (ret < 0)
2391 		goto out;
2392 
2393 	/* Insert the header first */
2394 	header.ver = 1;
2395 	header.compat = 1;
2396 	header.change_attr = cpu_to_le64(inode_peek_iversion_raw(inode));
2397 
2398 	/*
2399 	 * Always set the block_size to CEPH_FSCRYPT_BLOCK_SIZE,
2400 	 * because in MDS it may need this to do the truncate.
2401 	 */
2402 	header.block_size = cpu_to_le32(CEPH_FSCRYPT_BLOCK_SIZE);
2403 
2404 	/*
2405 	 * If we hit a hole here, we should just skip filling
2406 	 * the fscrypt for the request, because once the fscrypt
2407 	 * is enabled, the file will be split into many blocks
2408 	 * with the size of CEPH_FSCRYPT_BLOCK_SIZE, if there
2409 	 * has a hole, the hole size should be multiple of block
2410 	 * size.
2411 	 *
2412 	 * If the Rados object doesn't exist, it will be set to 0.
2413 	 */
2414 	if (!objver) {
2415 		doutc(cl, "hit hole, ppos %lld < size %lld\n", pos, i_size);
2416 
2417 		header.data_len = cpu_to_le32(8 + 8 + 4);
2418 		header.file_offset = 0;
2419 		ret = 0;
2420 	} else {
2421 		header.data_len = cpu_to_le32(8 + 8 + 4 + CEPH_FSCRYPT_BLOCK_SIZE);
2422 		header.file_offset = cpu_to_le64(orig_pos);
2423 
2424 		doutc(cl, "encrypt block boff/bsize %d/%lu\n", boff,
2425 		      CEPH_FSCRYPT_BLOCK_SIZE);
2426 
2427 		/* truncate and zero out the extra contents for the last block */
2428 		memset(iov.iov_base + boff, 0, PAGE_SIZE - boff);
2429 
2430 		/* encrypt the last block */
2431 		ret = ceph_fscrypt_encrypt_block_inplace(inode, page,
2432 						    CEPH_FSCRYPT_BLOCK_SIZE,
2433 						    0, block,
2434 						    GFP_KERNEL);
2435 		if (ret)
2436 			goto out;
2437 	}
2438 
2439 	/* Insert the header */
2440 	ret = ceph_pagelist_append(pagelist, &header, sizeof(header));
2441 	if (ret)
2442 		goto out;
2443 
2444 	if (header.block_size) {
2445 		/* Append the last block contents to pagelist */
2446 		ret = ceph_pagelist_append(pagelist, iov.iov_base,
2447 					   CEPH_FSCRYPT_BLOCK_SIZE);
2448 		if (ret)
2449 			goto out;
2450 	}
2451 	req->r_pagelist = pagelist;
2452 out:
2453 	doutc(cl, "%p %llx.%llx size dropping cap refs on %s\n", inode,
2454 	      ceph_vinop(inode), ceph_cap_string(got));
2455 	ceph_put_cap_refs(ci, got);
2456 	if (iov.iov_base)
2457 		kunmap_local(iov.iov_base);
2458 	if (page)
2459 		__free_pages(page, 0);
2460 	if (ret && pagelist)
2461 		ceph_pagelist_release(pagelist);
2462 	return ret;
2463 }
2464 
2465 int __ceph_setattr(struct mnt_idmap *idmap, struct inode *inode,
2466 		   struct iattr *attr, struct ceph_iattr *cia)
2467 {
2468 	struct ceph_inode_info *ci = ceph_inode(inode);
2469 	unsigned int ia_valid = attr->ia_valid;
2470 	struct ceph_mds_request *req;
2471 	struct ceph_mds_client *mdsc = ceph_sb_to_fs_client(inode->i_sb)->mdsc;
2472 	struct ceph_client *cl = ceph_inode_to_client(inode);
2473 	struct ceph_cap_flush *prealloc_cf;
2474 	loff_t isize = i_size_read(inode);
2475 	int issued;
2476 	int release = 0, dirtied = 0;
2477 	int mask = 0;
2478 	int err = 0;
2479 	int inode_dirty_flags = 0;
2480 	bool lock_snap_rwsem = false;
2481 	bool fill_fscrypt;
2482 	int truncate_retry = 20; /* The RMW will take around 50ms */
2483 
2484 retry:
2485 	prealloc_cf = ceph_alloc_cap_flush();
2486 	if (!prealloc_cf)
2487 		return -ENOMEM;
2488 
2489 	req = ceph_mdsc_create_request(mdsc, CEPH_MDS_OP_SETATTR,
2490 				       USE_AUTH_MDS);
2491 	if (IS_ERR(req)) {
2492 		ceph_free_cap_flush(prealloc_cf);
2493 		return PTR_ERR(req);
2494 	}
2495 
2496 	fill_fscrypt = false;
2497 	spin_lock(&ci->i_ceph_lock);
2498 	issued = __ceph_caps_issued(ci, NULL);
2499 
2500 	if (!ci->i_head_snapc &&
2501 	    (issued & (CEPH_CAP_ANY_EXCL | CEPH_CAP_FILE_WR))) {
2502 		lock_snap_rwsem = true;
2503 		if (!down_read_trylock(&mdsc->snap_rwsem)) {
2504 			spin_unlock(&ci->i_ceph_lock);
2505 			down_read(&mdsc->snap_rwsem);
2506 			spin_lock(&ci->i_ceph_lock);
2507 			issued = __ceph_caps_issued(ci, NULL);
2508 		}
2509 	}
2510 
2511 	doutc(cl, "%p %llx.%llx issued %s\n", inode, ceph_vinop(inode),
2512 	      ceph_cap_string(issued));
2513 #if IS_ENABLED(CONFIG_FS_ENCRYPTION)
2514 	if (cia && cia->fscrypt_auth) {
2515 		u32 len = ceph_fscrypt_auth_len(cia->fscrypt_auth);
2516 
2517 		if (len > sizeof(*cia->fscrypt_auth)) {
2518 			err = -EINVAL;
2519 			spin_unlock(&ci->i_ceph_lock);
2520 			goto out;
2521 		}
2522 
2523 		doutc(cl, "%p %llx.%llx fscrypt_auth len %u to %u)\n", inode,
2524 		      ceph_vinop(inode), ci->fscrypt_auth_len, len);
2525 
2526 		/* It should never be re-set once set */
2527 		WARN_ON_ONCE(ci->fscrypt_auth);
2528 
2529 		if (issued & CEPH_CAP_AUTH_EXCL) {
2530 			dirtied |= CEPH_CAP_AUTH_EXCL;
2531 			kfree(ci->fscrypt_auth);
2532 			ci->fscrypt_auth = (u8 *)cia->fscrypt_auth;
2533 			ci->fscrypt_auth_len = len;
2534 		} else if ((issued & CEPH_CAP_AUTH_SHARED) == 0 ||
2535 			   ci->fscrypt_auth_len != len ||
2536 			   memcmp(ci->fscrypt_auth, cia->fscrypt_auth, len)) {
2537 			req->r_fscrypt_auth = cia->fscrypt_auth;
2538 			mask |= CEPH_SETATTR_FSCRYPT_AUTH;
2539 			release |= CEPH_CAP_AUTH_SHARED;
2540 		}
2541 		cia->fscrypt_auth = NULL;
2542 	}
2543 #else
2544 	if (cia && cia->fscrypt_auth) {
2545 		err = -EINVAL;
2546 		spin_unlock(&ci->i_ceph_lock);
2547 		goto out;
2548 	}
2549 #endif /* CONFIG_FS_ENCRYPTION */
2550 
2551 	if (ia_valid & ATTR_UID) {
2552 		kuid_t fsuid = from_vfsuid(idmap, i_user_ns(inode), attr->ia_vfsuid);
2553 
2554 		doutc(cl, "%p %llx.%llx uid %d -> %d\n", inode,
2555 		      ceph_vinop(inode),
2556 		      from_kuid(&init_user_ns, inode->i_uid),
2557 		      from_kuid(&init_user_ns, attr->ia_uid));
2558 		if (issued & CEPH_CAP_AUTH_EXCL) {
2559 			inode->i_uid = fsuid;
2560 			dirtied |= CEPH_CAP_AUTH_EXCL;
2561 		} else if ((issued & CEPH_CAP_AUTH_SHARED) == 0 ||
2562 			   !uid_eq(fsuid, inode->i_uid)) {
2563 			req->r_args.setattr.uid = cpu_to_le32(
2564 				from_kuid(&init_user_ns, fsuid));
2565 			mask |= CEPH_SETATTR_UID;
2566 			release |= CEPH_CAP_AUTH_SHARED;
2567 		}
2568 	}
2569 	if (ia_valid & ATTR_GID) {
2570 		kgid_t fsgid = from_vfsgid(idmap, i_user_ns(inode), attr->ia_vfsgid);
2571 
2572 		doutc(cl, "%p %llx.%llx gid %d -> %d\n", inode,
2573 		      ceph_vinop(inode),
2574 		      from_kgid(&init_user_ns, inode->i_gid),
2575 		      from_kgid(&init_user_ns, attr->ia_gid));
2576 		if (issued & CEPH_CAP_AUTH_EXCL) {
2577 			inode->i_gid = fsgid;
2578 			dirtied |= CEPH_CAP_AUTH_EXCL;
2579 		} else if ((issued & CEPH_CAP_AUTH_SHARED) == 0 ||
2580 			   !gid_eq(fsgid, inode->i_gid)) {
2581 			req->r_args.setattr.gid = cpu_to_le32(
2582 				from_kgid(&init_user_ns, fsgid));
2583 			mask |= CEPH_SETATTR_GID;
2584 			release |= CEPH_CAP_AUTH_SHARED;
2585 		}
2586 	}
2587 	if (ia_valid & ATTR_MODE) {
2588 		doutc(cl, "%p %llx.%llx mode 0%o -> 0%o\n", inode,
2589 		      ceph_vinop(inode), inode->i_mode, attr->ia_mode);
2590 		if (issued & CEPH_CAP_AUTH_EXCL) {
2591 			inode->i_mode = attr->ia_mode;
2592 			dirtied |= CEPH_CAP_AUTH_EXCL;
2593 		} else if ((issued & CEPH_CAP_AUTH_SHARED) == 0 ||
2594 			   attr->ia_mode != inode->i_mode) {
2595 			inode->i_mode = attr->ia_mode;
2596 			req->r_args.setattr.mode = cpu_to_le32(attr->ia_mode);
2597 			mask |= CEPH_SETATTR_MODE;
2598 			release |= CEPH_CAP_AUTH_SHARED;
2599 		}
2600 	}
2601 
2602 	if (ia_valid & ATTR_ATIME) {
2603 		struct timespec64 atime = inode_get_atime(inode);
2604 
2605 		doutc(cl, "%p %llx.%llx atime %lld.%09ld -> %lld.%09ld\n",
2606 		      inode, ceph_vinop(inode),
2607 		      atime.tv_sec, atime.tv_nsec,
2608 		      attr->ia_atime.tv_sec, attr->ia_atime.tv_nsec);
2609 		if (issued & CEPH_CAP_FILE_EXCL) {
2610 			ci->i_time_warp_seq++;
2611 			inode_set_atime_to_ts(inode, attr->ia_atime);
2612 			dirtied |= CEPH_CAP_FILE_EXCL;
2613 		} else if ((issued & CEPH_CAP_FILE_WR) &&
2614 			   timespec64_compare(&atime,
2615 					      &attr->ia_atime) < 0) {
2616 			inode_set_atime_to_ts(inode, attr->ia_atime);
2617 			dirtied |= CEPH_CAP_FILE_WR;
2618 		} else if ((issued & CEPH_CAP_FILE_SHARED) == 0 ||
2619 			   !timespec64_equal(&atime, &attr->ia_atime)) {
2620 			ceph_encode_timespec64(&req->r_args.setattr.atime,
2621 					       &attr->ia_atime);
2622 			mask |= CEPH_SETATTR_ATIME;
2623 			release |= CEPH_CAP_FILE_SHARED |
2624 				   CEPH_CAP_FILE_RD | CEPH_CAP_FILE_WR;
2625 		}
2626 	}
2627 	if (ia_valid & ATTR_SIZE) {
2628 		doutc(cl, "%p %llx.%llx size %lld -> %lld\n", inode,
2629 		      ceph_vinop(inode), isize, attr->ia_size);
2630 		/*
2631 		 * Only when the new size is smaller and not aligned to
2632 		 * CEPH_FSCRYPT_BLOCK_SIZE will the RMW is needed.
2633 		 */
2634 		if (IS_ENCRYPTED(inode) && attr->ia_size < isize &&
2635 		    (attr->ia_size % CEPH_FSCRYPT_BLOCK_SIZE)) {
2636 			mask |= CEPH_SETATTR_SIZE;
2637 			release |= CEPH_CAP_FILE_SHARED | CEPH_CAP_FILE_EXCL |
2638 				   CEPH_CAP_FILE_RD | CEPH_CAP_FILE_WR;
2639 			set_bit(CEPH_MDS_R_FSCRYPT_FILE, &req->r_req_flags);
2640 			mask |= CEPH_SETATTR_FSCRYPT_FILE;
2641 			req->r_args.setattr.size =
2642 				cpu_to_le64(round_up(attr->ia_size,
2643 						     CEPH_FSCRYPT_BLOCK_SIZE));
2644 			req->r_args.setattr.old_size =
2645 				cpu_to_le64(round_up(isize,
2646 						     CEPH_FSCRYPT_BLOCK_SIZE));
2647 			req->r_fscrypt_file = attr->ia_size;
2648 			fill_fscrypt = true;
2649 		} else if ((issued & CEPH_CAP_FILE_EXCL) && attr->ia_size >= isize) {
2650 			if (attr->ia_size > isize) {
2651 				i_size_write(inode, attr->ia_size);
2652 				inode->i_blocks = calc_inode_blocks(attr->ia_size);
2653 				ci->i_reported_size = attr->ia_size;
2654 				dirtied |= CEPH_CAP_FILE_EXCL;
2655 				ia_valid |= ATTR_MTIME;
2656 			}
2657 		} else if ((issued & CEPH_CAP_FILE_SHARED) == 0 ||
2658 			   attr->ia_size != isize) {
2659 			mask |= CEPH_SETATTR_SIZE;
2660 			release |= CEPH_CAP_FILE_SHARED | CEPH_CAP_FILE_EXCL |
2661 				   CEPH_CAP_FILE_RD | CEPH_CAP_FILE_WR;
2662 			if (IS_ENCRYPTED(inode) && attr->ia_size) {
2663 				set_bit(CEPH_MDS_R_FSCRYPT_FILE, &req->r_req_flags);
2664 				mask |= CEPH_SETATTR_FSCRYPT_FILE;
2665 				req->r_args.setattr.size =
2666 					cpu_to_le64(round_up(attr->ia_size,
2667 							     CEPH_FSCRYPT_BLOCK_SIZE));
2668 				req->r_args.setattr.old_size =
2669 					cpu_to_le64(round_up(isize,
2670 							     CEPH_FSCRYPT_BLOCK_SIZE));
2671 				req->r_fscrypt_file = attr->ia_size;
2672 			} else {
2673 				req->r_args.setattr.size = cpu_to_le64(attr->ia_size);
2674 				req->r_args.setattr.old_size = cpu_to_le64(isize);
2675 				req->r_fscrypt_file = 0;
2676 			}
2677 		}
2678 	}
2679 	if (ia_valid & ATTR_MTIME) {
2680 		struct timespec64 mtime = inode_get_mtime(inode);
2681 
2682 		doutc(cl, "%p %llx.%llx mtime %lld.%09ld -> %lld.%09ld\n",
2683 		      inode, ceph_vinop(inode),
2684 		      mtime.tv_sec, mtime.tv_nsec,
2685 		      attr->ia_mtime.tv_sec, attr->ia_mtime.tv_nsec);
2686 		if (issued & CEPH_CAP_FILE_EXCL) {
2687 			ci->i_time_warp_seq++;
2688 			inode_set_mtime_to_ts(inode, attr->ia_mtime);
2689 			dirtied |= CEPH_CAP_FILE_EXCL;
2690 		} else if ((issued & CEPH_CAP_FILE_WR) &&
2691 			   timespec64_compare(&mtime, &attr->ia_mtime) < 0) {
2692 			inode_set_mtime_to_ts(inode, attr->ia_mtime);
2693 			dirtied |= CEPH_CAP_FILE_WR;
2694 		} else if ((issued & CEPH_CAP_FILE_SHARED) == 0 ||
2695 			   !timespec64_equal(&mtime, &attr->ia_mtime)) {
2696 			ceph_encode_timespec64(&req->r_args.setattr.mtime,
2697 					       &attr->ia_mtime);
2698 			mask |= CEPH_SETATTR_MTIME;
2699 			release |= CEPH_CAP_FILE_SHARED |
2700 				   CEPH_CAP_FILE_RD | CEPH_CAP_FILE_WR;
2701 		}
2702 	}
2703 
2704 	/* these do nothing */
2705 	if (ia_valid & ATTR_CTIME) {
2706 		bool only = (ia_valid & (ATTR_SIZE|ATTR_MTIME|ATTR_ATIME|
2707 					 ATTR_MODE|ATTR_UID|ATTR_GID)) == 0;
2708 		doutc(cl, "%p %llx.%llx ctime %lld.%09ld -> %lld.%09ld (%s)\n",
2709 		      inode, ceph_vinop(inode),
2710 		      inode_get_ctime_sec(inode),
2711 		      inode_get_ctime_nsec(inode),
2712 		      attr->ia_ctime.tv_sec, attr->ia_ctime.tv_nsec,
2713 		      only ? "ctime only" : "ignored");
2714 		if (only) {
2715 			/*
2716 			 * if kernel wants to dirty ctime but nothing else,
2717 			 * we need to choose a cap to dirty under, or do
2718 			 * a almost-no-op setattr
2719 			 */
2720 			if (issued & CEPH_CAP_AUTH_EXCL)
2721 				dirtied |= CEPH_CAP_AUTH_EXCL;
2722 			else if (issued & CEPH_CAP_FILE_EXCL)
2723 				dirtied |= CEPH_CAP_FILE_EXCL;
2724 			else if (issued & CEPH_CAP_XATTR_EXCL)
2725 				dirtied |= CEPH_CAP_XATTR_EXCL;
2726 			else
2727 				mask |= CEPH_SETATTR_CTIME;
2728 		}
2729 	}
2730 	if (ia_valid & ATTR_FILE)
2731 		doutc(cl, "%p %llx.%llx ATTR_FILE ... hrm!\n", inode,
2732 		      ceph_vinop(inode));
2733 
2734 	if (dirtied) {
2735 		inode_dirty_flags = __ceph_mark_dirty_caps(ci, dirtied,
2736 							   &prealloc_cf);
2737 		inode_set_ctime_to_ts(inode, attr->ia_ctime);
2738 		inode_inc_iversion_raw(inode);
2739 	}
2740 
2741 	release &= issued;
2742 	spin_unlock(&ci->i_ceph_lock);
2743 	if (lock_snap_rwsem) {
2744 		up_read(&mdsc->snap_rwsem);
2745 		lock_snap_rwsem = false;
2746 	}
2747 
2748 	if (inode_dirty_flags)
2749 		__mark_inode_dirty(inode, inode_dirty_flags);
2750 
2751 	if (mask) {
2752 		req->r_inode = inode;
2753 		ihold(inode);
2754 		req->r_inode_drop = release;
2755 		req->r_args.setattr.mask = cpu_to_le32(mask);
2756 		req->r_num_caps = 1;
2757 		req->r_stamp = attr->ia_ctime;
2758 		if (fill_fscrypt) {
2759 			err = fill_fscrypt_truncate(inode, req, attr);
2760 			if (err)
2761 				goto out;
2762 		}
2763 
2764 		/*
2765 		 * The truncate request will return -EAGAIN when the
2766 		 * last block has been updated just before the MDS
2767 		 * successfully gets the xlock for the FILE lock. To
2768 		 * avoid corrupting the file contents we need to retry
2769 		 * it.
2770 		 */
2771 		err = ceph_mdsc_do_request(mdsc, NULL, req);
2772 		if (err == -EAGAIN && truncate_retry--) {
2773 			doutc(cl, "%p %llx.%llx result=%d (%s locally, %d remote), retry it!\n",
2774 			      inode, ceph_vinop(inode), err,
2775 			      ceph_cap_string(dirtied), mask);
2776 			ceph_mdsc_put_request(req);
2777 			ceph_free_cap_flush(prealloc_cf);
2778 			goto retry;
2779 		}
2780 	}
2781 out:
2782 	doutc(cl, "%p %llx.%llx result=%d (%s locally, %d remote)\n", inode,
2783 	      ceph_vinop(inode), err, ceph_cap_string(dirtied), mask);
2784 
2785 	ceph_mdsc_put_request(req);
2786 	ceph_free_cap_flush(prealloc_cf);
2787 
2788 	if (err >= 0 && (mask & CEPH_SETATTR_SIZE))
2789 		__ceph_do_pending_vmtruncate(inode);
2790 
2791 	return err;
2792 }
2793 
2794 /*
2795  * setattr
2796  */
2797 int ceph_setattr(struct mnt_idmap *idmap, struct dentry *dentry,
2798 		 struct iattr *attr)
2799 {
2800 	struct inode *inode = d_inode(dentry);
2801 	struct ceph_fs_client *fsc = ceph_inode_to_fs_client(inode);
2802 	int err;
2803 
2804 	if (ceph_snap(inode) != CEPH_NOSNAP)
2805 		return -EROFS;
2806 
2807 	if (ceph_inode_is_shutdown(inode))
2808 		return -ESTALE;
2809 
2810 	err = fscrypt_prepare_setattr(dentry, attr);
2811 	if (err)
2812 		return err;
2813 
2814 	err = setattr_prepare(idmap, dentry, attr);
2815 	if (err != 0)
2816 		return err;
2817 
2818 	if ((attr->ia_valid & ATTR_SIZE) &&
2819 	    attr->ia_size > max(i_size_read(inode), fsc->max_file_size))
2820 		return -EFBIG;
2821 
2822 	if ((attr->ia_valid & ATTR_SIZE) &&
2823 	    ceph_quota_is_max_bytes_exceeded(inode, attr->ia_size))
2824 		return -EDQUOT;
2825 
2826 	err = __ceph_setattr(idmap, inode, attr, NULL);
2827 
2828 	if (err >= 0 && (attr->ia_valid & ATTR_MODE))
2829 		err = posix_acl_chmod(idmap, dentry, attr->ia_mode);
2830 
2831 	return err;
2832 }
2833 
2834 int ceph_try_to_choose_auth_mds(struct inode *inode, int mask)
2835 {
2836 	int issued = ceph_caps_issued(ceph_inode(inode));
2837 
2838 	/*
2839 	 * If any 'x' caps is issued we can just choose the auth MDS
2840 	 * instead of the random replica MDSes. Because only when the
2841 	 * Locker is in LOCK_EXEC state will the loner client could
2842 	 * get the 'x' caps. And if we send the getattr requests to
2843 	 * any replica MDS it must auth pin and tries to rdlock from
2844 	 * the auth MDS, and then the auth MDS need to do the Locker
2845 	 * state transition to LOCK_SYNC. And after that the lock state
2846 	 * will change back.
2847 	 *
2848 	 * This cost much when doing the Locker state transition and
2849 	 * usually will need to revoke caps from clients.
2850 	 *
2851 	 * And for the 'Xs' caps for getxattr we will also choose the
2852 	 * auth MDS, because the MDS side code is buggy due to setxattr
2853 	 * won't notify the replica MDSes when the values changed and
2854 	 * the replica MDS will return the old values. Though we will
2855 	 * fix it in MDS code, but this still makes sense for old ceph.
2856 	 */
2857 	if (((mask & CEPH_CAP_ANY_SHARED) && (issued & CEPH_CAP_ANY_EXCL))
2858 	    || (mask & (CEPH_STAT_RSTAT | CEPH_STAT_CAP_XATTR)))
2859 		return USE_AUTH_MDS;
2860 	else
2861 		return USE_ANY_MDS;
2862 }
2863 
2864 /*
2865  * Verify that we have a lease on the given mask.  If not,
2866  * do a getattr against an mds.
2867  */
2868 int __ceph_do_getattr(struct inode *inode, struct page *locked_page,
2869 		      int mask, bool force)
2870 {
2871 	struct ceph_fs_client *fsc = ceph_sb_to_fs_client(inode->i_sb);
2872 	struct ceph_client *cl = fsc->client;
2873 	struct ceph_mds_client *mdsc = fsc->mdsc;
2874 	struct ceph_mds_request *req;
2875 	int mode;
2876 	int err;
2877 
2878 	if (ceph_snap(inode) == CEPH_SNAPDIR) {
2879 		doutc(cl, "inode %p %llx.%llx SNAPDIR\n", inode,
2880 		      ceph_vinop(inode));
2881 		return 0;
2882 	}
2883 
2884 	doutc(cl, "inode %p %llx.%llx mask %s mode 0%o\n", inode,
2885 	      ceph_vinop(inode), ceph_cap_string(mask), inode->i_mode);
2886 	if (!force && ceph_caps_issued_mask_metric(ceph_inode(inode), mask, 1))
2887 			return 0;
2888 
2889 	mode = ceph_try_to_choose_auth_mds(inode, mask);
2890 	req = ceph_mdsc_create_request(mdsc, CEPH_MDS_OP_GETATTR, mode);
2891 	if (IS_ERR(req))
2892 		return PTR_ERR(req);
2893 	req->r_inode = inode;
2894 	ihold(inode);
2895 	req->r_num_caps = 1;
2896 	req->r_args.getattr.mask = cpu_to_le32(mask);
2897 	req->r_locked_page = locked_page;
2898 	err = ceph_mdsc_do_request(mdsc, NULL, req);
2899 	if (locked_page && err == 0) {
2900 		u64 inline_version = req->r_reply_info.targeti.inline_version;
2901 		if (inline_version == 0) {
2902 			/* the reply is supposed to contain inline data */
2903 			err = -EINVAL;
2904 		} else if (inline_version == CEPH_INLINE_NONE ||
2905 			   inline_version == 1) {
2906 			err = -ENODATA;
2907 		} else {
2908 			err = req->r_reply_info.targeti.inline_len;
2909 		}
2910 	}
2911 	ceph_mdsc_put_request(req);
2912 	doutc(cl, "result=%d\n", err);
2913 	return err;
2914 }
2915 
2916 int ceph_do_getvxattr(struct inode *inode, const char *name, void *value,
2917 		      size_t size)
2918 {
2919 	struct ceph_fs_client *fsc = ceph_sb_to_fs_client(inode->i_sb);
2920 	struct ceph_client *cl = fsc->client;
2921 	struct ceph_mds_client *mdsc = fsc->mdsc;
2922 	struct ceph_mds_request *req;
2923 	int mode = USE_AUTH_MDS;
2924 	int err;
2925 	char *xattr_value;
2926 	size_t xattr_value_len;
2927 
2928 	req = ceph_mdsc_create_request(mdsc, CEPH_MDS_OP_GETVXATTR, mode);
2929 	if (IS_ERR(req)) {
2930 		err = -ENOMEM;
2931 		goto out;
2932 	}
2933 
2934 	req->r_feature_needed = CEPHFS_FEATURE_OP_GETVXATTR;
2935 	req->r_path2 = kstrdup(name, GFP_NOFS);
2936 	if (!req->r_path2) {
2937 		err = -ENOMEM;
2938 		goto put;
2939 	}
2940 
2941 	ihold(inode);
2942 	req->r_inode = inode;
2943 	err = ceph_mdsc_do_request(mdsc, NULL, req);
2944 	if (err < 0)
2945 		goto put;
2946 
2947 	xattr_value = req->r_reply_info.xattr_info.xattr_value;
2948 	xattr_value_len = req->r_reply_info.xattr_info.xattr_value_len;
2949 
2950 	doutc(cl, "xattr_value_len:%zu, size:%zu\n", xattr_value_len, size);
2951 
2952 	err = (int)xattr_value_len;
2953 	if (size == 0)
2954 		goto put;
2955 
2956 	if (xattr_value_len > size) {
2957 		err = -ERANGE;
2958 		goto put;
2959 	}
2960 
2961 	memcpy(value, xattr_value, xattr_value_len);
2962 put:
2963 	ceph_mdsc_put_request(req);
2964 out:
2965 	doutc(cl, "result=%d\n", err);
2966 	return err;
2967 }
2968 
2969 
2970 /*
2971  * Check inode permissions.  We verify we have a valid value for
2972  * the AUTH cap, then call the generic handler.
2973  */
2974 int ceph_permission(struct mnt_idmap *idmap, struct inode *inode,
2975 		    int mask)
2976 {
2977 	int err;
2978 
2979 	if (mask & MAY_NOT_BLOCK)
2980 		return -ECHILD;
2981 
2982 	err = ceph_do_getattr(inode, CEPH_CAP_AUTH_SHARED, false);
2983 
2984 	if (!err)
2985 		err = generic_permission(idmap, inode, mask);
2986 	return err;
2987 }
2988 
2989 /* Craft a mask of needed caps given a set of requested statx attrs. */
2990 static int statx_to_caps(u32 want, umode_t mode)
2991 {
2992 	int mask = 0;
2993 
2994 	if (want & (STATX_MODE|STATX_UID|STATX_GID|STATX_CTIME|STATX_BTIME|STATX_CHANGE_COOKIE))
2995 		mask |= CEPH_CAP_AUTH_SHARED;
2996 
2997 	if (want & (STATX_NLINK|STATX_CTIME|STATX_CHANGE_COOKIE)) {
2998 		/*
2999 		 * The link count for directories depends on inode->i_subdirs,
3000 		 * and that is only updated when Fs caps are held.
3001 		 */
3002 		if (S_ISDIR(mode))
3003 			mask |= CEPH_CAP_FILE_SHARED;
3004 		else
3005 			mask |= CEPH_CAP_LINK_SHARED;
3006 	}
3007 
3008 	if (want & (STATX_ATIME|STATX_MTIME|STATX_CTIME|STATX_SIZE|STATX_BLOCKS|STATX_CHANGE_COOKIE))
3009 		mask |= CEPH_CAP_FILE_SHARED;
3010 
3011 	if (want & (STATX_CTIME|STATX_CHANGE_COOKIE))
3012 		mask |= CEPH_CAP_XATTR_SHARED;
3013 
3014 	return mask;
3015 }
3016 
3017 /*
3018  * Get all the attributes. If we have sufficient caps for the requested attrs,
3019  * then we can avoid talking to the MDS at all.
3020  */
3021 int ceph_getattr(struct mnt_idmap *idmap, const struct path *path,
3022 		 struct kstat *stat, u32 request_mask, unsigned int flags)
3023 {
3024 	struct inode *inode = d_inode(path->dentry);
3025 	struct super_block *sb = inode->i_sb;
3026 	struct ceph_inode_info *ci = ceph_inode(inode);
3027 	u32 valid_mask = STATX_BASIC_STATS;
3028 	int err = 0;
3029 
3030 	if (ceph_inode_is_shutdown(inode))
3031 		return -ESTALE;
3032 
3033 	/* Skip the getattr altogether if we're asked not to sync */
3034 	if ((flags & AT_STATX_SYNC_TYPE) != AT_STATX_DONT_SYNC) {
3035 		err = ceph_do_getattr(inode,
3036 				statx_to_caps(request_mask, inode->i_mode),
3037 				flags & AT_STATX_FORCE_SYNC);
3038 		if (err)
3039 			return err;
3040 	}
3041 
3042 	generic_fillattr(idmap, request_mask, inode, stat);
3043 	stat->ino = ceph_present_inode(inode);
3044 
3045 	/*
3046 	 * btime on newly-allocated inodes is 0, so if this is still set to
3047 	 * that, then assume that it's not valid.
3048 	 */
3049 	if (ci->i_btime.tv_sec || ci->i_btime.tv_nsec) {
3050 		stat->btime = ci->i_btime;
3051 		valid_mask |= STATX_BTIME;
3052 	}
3053 
3054 	if (request_mask & STATX_CHANGE_COOKIE) {
3055 		stat->change_cookie = inode_peek_iversion_raw(inode);
3056 		valid_mask |= STATX_CHANGE_COOKIE;
3057 	}
3058 
3059 	if (ceph_snap(inode) == CEPH_NOSNAP)
3060 		stat->dev = sb->s_dev;
3061 	else
3062 		stat->dev = ci->i_snapid_map ? ci->i_snapid_map->dev : 0;
3063 
3064 	if (S_ISDIR(inode->i_mode)) {
3065 		if (ceph_test_mount_opt(ceph_sb_to_fs_client(sb), RBYTES)) {
3066 			stat->size = ci->i_rbytes;
3067 		} else if (ceph_snap(inode) == CEPH_SNAPDIR) {
3068 			struct ceph_inode_info *pci;
3069 			struct ceph_snap_realm *realm;
3070 			struct inode *parent;
3071 
3072 			parent = ceph_lookup_inode(sb, ceph_ino(inode));
3073 			if (IS_ERR(parent))
3074 				return PTR_ERR(parent);
3075 
3076 			pci = ceph_inode(parent);
3077 			spin_lock(&pci->i_ceph_lock);
3078 			realm = pci->i_snap_realm;
3079 			if (realm)
3080 				stat->size = realm->num_snaps;
3081 			else
3082 				stat->size = 0;
3083 			spin_unlock(&pci->i_ceph_lock);
3084 			iput(parent);
3085 		} else {
3086 			stat->size = ci->i_files + ci->i_subdirs;
3087 		}
3088 		stat->blocks = 0;
3089 		stat->blksize = 65536;
3090 		/*
3091 		 * Some applications rely on the number of st_nlink
3092 		 * value on directories to be either 0 (if unlinked)
3093 		 * or 2 + number of subdirectories.
3094 		 */
3095 		if (stat->nlink == 1)
3096 			/* '.' + '..' + subdirs */
3097 			stat->nlink = 1 + 1 + ci->i_subdirs;
3098 	}
3099 
3100 	stat->attributes |= STATX_ATTR_CHANGE_MONOTONIC;
3101 	if (IS_ENCRYPTED(inode))
3102 		stat->attributes |= STATX_ATTR_ENCRYPTED;
3103 	stat->attributes_mask |= (STATX_ATTR_CHANGE_MONOTONIC |
3104 				  STATX_ATTR_ENCRYPTED);
3105 
3106 	stat->result_mask = request_mask & valid_mask;
3107 	return err;
3108 }
3109 
3110 void ceph_inode_shutdown(struct inode *inode)
3111 {
3112 	struct ceph_inode_info *ci = ceph_inode(inode);
3113 	struct rb_node *p;
3114 	int iputs = 0;
3115 	bool invalidate = false;
3116 
3117 	spin_lock(&ci->i_ceph_lock);
3118 	ci->i_ceph_flags |= CEPH_I_SHUTDOWN;
3119 	p = rb_first(&ci->i_caps);
3120 	while (p) {
3121 		struct ceph_cap *cap = rb_entry(p, struct ceph_cap, ci_node);
3122 
3123 		p = rb_next(p);
3124 		iputs += ceph_purge_inode_cap(inode, cap, &invalidate);
3125 	}
3126 	spin_unlock(&ci->i_ceph_lock);
3127 
3128 	if (invalidate)
3129 		ceph_queue_invalidate(inode);
3130 	while (iputs--)
3131 		iput(inode);
3132 }
3133