xref: /linux/fs/ceph/inode.c (revision 9d0ad045533ee37a208991ac5baaf6641e60a9ed)
1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/ceph/ceph_debug.h>
3 
4 #include <linux/module.h>
5 #include <linux/fs.h>
6 #include <linux/slab.h>
7 #include <linux/string.h>
8 #include <linux/uaccess.h>
9 #include <linux/kernel.h>
10 #include <linux/writeback.h>
11 #include <linux/vmalloc.h>
12 #include <linux/xattr.h>
13 #include <linux/posix_acl.h>
14 #include <linux/random.h>
15 #include <linux/sort.h>
16 #include <linux/iversion.h>
17 #include <linux/fscrypt.h>
18 
19 #include "super.h"
20 #include "mds_client.h"
21 #include "cache.h"
22 #include "crypto.h"
23 #include <linux/ceph/decode.h>
24 
25 /*
26  * Ceph inode operations
27  *
28  * Implement basic inode helpers (get, alloc) and inode ops (getattr,
29  * setattr, etc.), xattr helpers, and helpers for assimilating
30  * metadata returned by the MDS into our cache.
31  *
32  * Also define helpers for doing asynchronous writeback, invalidation,
33  * and truncation for the benefit of those who can't afford to block
34  * (typically because they are in the message handler path).
35  */
36 
37 static const struct inode_operations ceph_symlink_iops;
38 static const struct inode_operations ceph_encrypted_symlink_iops;
39 
40 static void ceph_inode_work(struct work_struct *work);
41 
42 /*
43  * find or create an inode, given the ceph ino number
44  */
ceph_set_ino_cb(struct inode * inode,void * data)45 static int ceph_set_ino_cb(struct inode *inode, void *data)
46 {
47 	struct ceph_inode_info *ci = ceph_inode(inode);
48 	struct ceph_mds_client *mdsc = ceph_sb_to_mdsc(inode->i_sb);
49 
50 	ci->i_vino = *(struct ceph_vino *)data;
51 	inode->i_ino = ceph_vino_to_ino_t(ci->i_vino);
52 	inode_set_iversion_raw(inode, 0);
53 	percpu_counter_inc(&mdsc->metric.total_inodes);
54 
55 	return 0;
56 }
57 
58 /**
59  * ceph_new_inode - allocate a new inode in advance of an expected create
60  * @dir: parent directory for new inode
61  * @dentry: dentry that may eventually point to new inode
62  * @mode: mode of new inode
63  * @as_ctx: pointer to inherited security context
64  *
65  * Allocate a new inode in advance of an operation to create a new inode.
66  * This allocates the inode and sets up the acl_sec_ctx with appropriate
67  * info for the new inode.
68  *
69  * Returns a pointer to the new inode or an ERR_PTR.
70  */
ceph_new_inode(struct inode * dir,struct dentry * dentry,umode_t * mode,struct ceph_acl_sec_ctx * as_ctx)71 struct inode *ceph_new_inode(struct inode *dir, struct dentry *dentry,
72 			     umode_t *mode, struct ceph_acl_sec_ctx *as_ctx)
73 {
74 	int err;
75 	struct inode *inode;
76 
77 	inode = new_inode(dir->i_sb);
78 	if (!inode)
79 		return ERR_PTR(-ENOMEM);
80 
81 	inode->i_blkbits = CEPH_FSCRYPT_BLOCK_SHIFT;
82 
83 	if (!S_ISLNK(*mode)) {
84 		err = ceph_pre_init_acls(dir, mode, as_ctx);
85 		if (err < 0)
86 			goto out_err;
87 	}
88 
89 	inode->i_state = 0;
90 	inode->i_mode = *mode;
91 
92 	err = ceph_security_init_secctx(dentry, *mode, as_ctx);
93 	if (err < 0)
94 		goto out_err;
95 
96 	/*
97 	 * We'll skip setting fscrypt context for snapshots, leaving that for
98 	 * the handle_reply().
99 	 */
100 	if (ceph_snap(dir) != CEPH_SNAPDIR) {
101 		err = ceph_fscrypt_prepare_context(dir, inode, as_ctx);
102 		if (err)
103 			goto out_err;
104 	}
105 
106 	return inode;
107 out_err:
108 	iput(inode);
109 	return ERR_PTR(err);
110 }
111 
ceph_as_ctx_to_req(struct ceph_mds_request * req,struct ceph_acl_sec_ctx * as_ctx)112 void ceph_as_ctx_to_req(struct ceph_mds_request *req,
113 			struct ceph_acl_sec_ctx *as_ctx)
114 {
115 	if (as_ctx->pagelist) {
116 		req->r_pagelist = as_ctx->pagelist;
117 		as_ctx->pagelist = NULL;
118 	}
119 	ceph_fscrypt_as_ctx_to_req(req, as_ctx);
120 }
121 
122 /**
123  * ceph_get_inode - find or create/hash a new inode
124  * @sb: superblock to search and allocate in
125  * @vino: vino to search for
126  * @newino: optional new inode to insert if one isn't found (may be NULL)
127  *
128  * Search for or insert a new inode into the hash for the given vino, and
129  * return a reference to it. If new is non-NULL, its reference is consumed.
130  */
ceph_get_inode(struct super_block * sb,struct ceph_vino vino,struct inode * newino)131 struct inode *ceph_get_inode(struct super_block *sb, struct ceph_vino vino,
132 			     struct inode *newino)
133 {
134 	struct ceph_mds_client *mdsc = ceph_sb_to_mdsc(sb);
135 	struct ceph_client *cl = mdsc->fsc->client;
136 	struct inode *inode;
137 
138 	if (ceph_vino_is_reserved(vino))
139 		return ERR_PTR(-EREMOTEIO);
140 
141 	if (newino) {
142 		inode = inode_insert5(newino, (unsigned long)vino.ino,
143 				      ceph_ino_compare, ceph_set_ino_cb, &vino);
144 		if (inode != newino)
145 			iput(newino);
146 	} else {
147 		inode = iget5_locked(sb, (unsigned long)vino.ino,
148 				     ceph_ino_compare, ceph_set_ino_cb, &vino);
149 	}
150 
151 	if (!inode) {
152 		doutc(cl, "no inode found for %llx.%llx\n", vino.ino, vino.snap);
153 		return ERR_PTR(-ENOMEM);
154 	}
155 
156 	doutc(cl, "on %llx=%llx.%llx got %p new %d\n",
157 	      ceph_present_inode(inode), ceph_vinop(inode), inode,
158 	      !!(inode->i_state & I_NEW));
159 	return inode;
160 }
161 
162 /*
163  * get/construct snapdir inode for a given directory
164  */
ceph_get_snapdir(struct inode * parent)165 struct inode *ceph_get_snapdir(struct inode *parent)
166 {
167 	struct ceph_client *cl = ceph_inode_to_client(parent);
168 	struct ceph_vino vino = {
169 		.ino = ceph_ino(parent),
170 		.snap = CEPH_SNAPDIR,
171 	};
172 	struct inode *inode = ceph_get_inode(parent->i_sb, vino, NULL);
173 	struct ceph_inode_info *ci = ceph_inode(inode);
174 	int ret = -ENOTDIR;
175 
176 	if (IS_ERR(inode))
177 		return inode;
178 
179 	if (!S_ISDIR(parent->i_mode)) {
180 		pr_warn_once_client(cl, "bad snapdir parent type (mode=0%o)\n",
181 				    parent->i_mode);
182 		goto err;
183 	}
184 
185 	if (!(inode->i_state & I_NEW) && !S_ISDIR(inode->i_mode)) {
186 		pr_warn_once_client(cl, "bad snapdir inode type (mode=0%o)\n",
187 				    inode->i_mode);
188 		goto err;
189 	}
190 
191 	inode->i_mode = parent->i_mode;
192 	inode->i_uid = parent->i_uid;
193 	inode->i_gid = parent->i_gid;
194 	inode_set_mtime_to_ts(inode, inode_get_mtime(parent));
195 	inode_set_ctime_to_ts(inode, inode_get_ctime(parent));
196 	inode_set_atime_to_ts(inode, inode_get_atime(parent));
197 	ci->i_rbytes = 0;
198 	ci->i_btime = ceph_inode(parent)->i_btime;
199 
200 #ifdef CONFIG_FS_ENCRYPTION
201 	/* if encrypted, just borrow fscrypt_auth from parent */
202 	if (IS_ENCRYPTED(parent)) {
203 		struct ceph_inode_info *pci = ceph_inode(parent);
204 
205 		ci->fscrypt_auth = kmemdup(pci->fscrypt_auth,
206 					   pci->fscrypt_auth_len,
207 					   GFP_KERNEL);
208 		if (ci->fscrypt_auth) {
209 			inode->i_flags |= S_ENCRYPTED;
210 			ci->fscrypt_auth_len = pci->fscrypt_auth_len;
211 		} else {
212 			doutc(cl, "Failed to alloc snapdir fscrypt_auth\n");
213 			ret = -ENOMEM;
214 			goto err;
215 		}
216 	}
217 #endif
218 	if (inode->i_state & I_NEW) {
219 		inode->i_op = &ceph_snapdir_iops;
220 		inode->i_fop = &ceph_snapdir_fops;
221 		ci->i_snap_caps = CEPH_CAP_PIN; /* so we can open */
222 		unlock_new_inode(inode);
223 	}
224 
225 	return inode;
226 err:
227 	if ((inode->i_state & I_NEW))
228 		discard_new_inode(inode);
229 	else
230 		iput(inode);
231 	return ERR_PTR(ret);
232 }
233 
234 const struct inode_operations ceph_file_iops = {
235 	.permission = ceph_permission,
236 	.setattr = ceph_setattr,
237 	.getattr = ceph_getattr,
238 	.listxattr = ceph_listxattr,
239 	.get_inode_acl = ceph_get_acl,
240 	.set_acl = ceph_set_acl,
241 };
242 
243 
244 /*
245  * We use a 'frag tree' to keep track of the MDS's directory fragments
246  * for a given inode (usually there is just a single fragment).  We
247  * need to know when a child frag is delegated to a new MDS, or when
248  * it is flagged as replicated, so we can direct our requests
249  * accordingly.
250  */
251 
252 /*
253  * find/create a frag in the tree
254  */
__get_or_create_frag(struct ceph_inode_info * ci,u32 f)255 static struct ceph_inode_frag *__get_or_create_frag(struct ceph_inode_info *ci,
256 						    u32 f)
257 {
258 	struct inode *inode = &ci->netfs.inode;
259 	struct ceph_client *cl = ceph_inode_to_client(inode);
260 	struct rb_node **p;
261 	struct rb_node *parent = NULL;
262 	struct ceph_inode_frag *frag;
263 	int c;
264 
265 	p = &ci->i_fragtree.rb_node;
266 	while (*p) {
267 		parent = *p;
268 		frag = rb_entry(parent, struct ceph_inode_frag, node);
269 		c = ceph_frag_compare(f, frag->frag);
270 		if (c < 0)
271 			p = &(*p)->rb_left;
272 		else if (c > 0)
273 			p = &(*p)->rb_right;
274 		else
275 			return frag;
276 	}
277 
278 	frag = kmalloc(sizeof(*frag), GFP_NOFS);
279 	if (!frag)
280 		return ERR_PTR(-ENOMEM);
281 
282 	frag->frag = f;
283 	frag->split_by = 0;
284 	frag->mds = -1;
285 	frag->ndist = 0;
286 
287 	rb_link_node(&frag->node, parent, p);
288 	rb_insert_color(&frag->node, &ci->i_fragtree);
289 
290 	doutc(cl, "added %p %llx.%llx frag %x\n", inode, ceph_vinop(inode), f);
291 	return frag;
292 }
293 
294 /*
295  * find a specific frag @f
296  */
__ceph_find_frag(struct ceph_inode_info * ci,u32 f)297 struct ceph_inode_frag *__ceph_find_frag(struct ceph_inode_info *ci, u32 f)
298 {
299 	struct rb_node *n = ci->i_fragtree.rb_node;
300 
301 	while (n) {
302 		struct ceph_inode_frag *frag =
303 			rb_entry(n, struct ceph_inode_frag, node);
304 		int c = ceph_frag_compare(f, frag->frag);
305 		if (c < 0)
306 			n = n->rb_left;
307 		else if (c > 0)
308 			n = n->rb_right;
309 		else
310 			return frag;
311 	}
312 	return NULL;
313 }
314 
315 /*
316  * Choose frag containing the given value @v.  If @pfrag is
317  * specified, copy the frag delegation info to the caller if
318  * it is present.
319  */
__ceph_choose_frag(struct ceph_inode_info * ci,u32 v,struct ceph_inode_frag * pfrag,int * found)320 static u32 __ceph_choose_frag(struct ceph_inode_info *ci, u32 v,
321 			      struct ceph_inode_frag *pfrag, int *found)
322 {
323 	struct ceph_client *cl = ceph_inode_to_client(&ci->netfs.inode);
324 	u32 t = ceph_frag_make(0, 0);
325 	struct ceph_inode_frag *frag;
326 	unsigned nway, i;
327 	u32 n;
328 
329 	if (found)
330 		*found = 0;
331 
332 	while (1) {
333 		WARN_ON(!ceph_frag_contains_value(t, v));
334 		frag = __ceph_find_frag(ci, t);
335 		if (!frag)
336 			break; /* t is a leaf */
337 		if (frag->split_by == 0) {
338 			if (pfrag)
339 				memcpy(pfrag, frag, sizeof(*pfrag));
340 			if (found)
341 				*found = 1;
342 			break;
343 		}
344 
345 		/* choose child */
346 		nway = 1 << frag->split_by;
347 		doutc(cl, "frag(%x) %x splits by %d (%d ways)\n", v, t,
348 		      frag->split_by, nway);
349 		for (i = 0; i < nway; i++) {
350 			n = ceph_frag_make_child(t, frag->split_by, i);
351 			if (ceph_frag_contains_value(n, v)) {
352 				t = n;
353 				break;
354 			}
355 		}
356 		BUG_ON(i == nway);
357 	}
358 	doutc(cl, "frag(%x) = %x\n", v, t);
359 
360 	return t;
361 }
362 
ceph_choose_frag(struct ceph_inode_info * ci,u32 v,struct ceph_inode_frag * pfrag,int * found)363 u32 ceph_choose_frag(struct ceph_inode_info *ci, u32 v,
364 		     struct ceph_inode_frag *pfrag, int *found)
365 {
366 	u32 ret;
367 	mutex_lock(&ci->i_fragtree_mutex);
368 	ret = __ceph_choose_frag(ci, v, pfrag, found);
369 	mutex_unlock(&ci->i_fragtree_mutex);
370 	return ret;
371 }
372 
373 /*
374  * Process dirfrag (delegation) info from the mds.  Include leaf
375  * fragment in tree ONLY if ndist > 0.  Otherwise, only
376  * branches/splits are included in i_fragtree)
377  */
ceph_fill_dirfrag(struct inode * inode,struct ceph_mds_reply_dirfrag * dirinfo)378 static int ceph_fill_dirfrag(struct inode *inode,
379 			     struct ceph_mds_reply_dirfrag *dirinfo)
380 {
381 	struct ceph_inode_info *ci = ceph_inode(inode);
382 	struct ceph_client *cl = ceph_inode_to_client(inode);
383 	struct ceph_inode_frag *frag;
384 	u32 id = le32_to_cpu(dirinfo->frag);
385 	int mds = le32_to_cpu(dirinfo->auth);
386 	int ndist = le32_to_cpu(dirinfo->ndist);
387 	int diri_auth = -1;
388 	int i;
389 	int err = 0;
390 
391 	spin_lock(&ci->i_ceph_lock);
392 	if (ci->i_auth_cap)
393 		diri_auth = ci->i_auth_cap->mds;
394 	spin_unlock(&ci->i_ceph_lock);
395 
396 	if (mds == -1) /* CDIR_AUTH_PARENT */
397 		mds = diri_auth;
398 
399 	mutex_lock(&ci->i_fragtree_mutex);
400 	if (ndist == 0 && mds == diri_auth) {
401 		/* no delegation info needed. */
402 		frag = __ceph_find_frag(ci, id);
403 		if (!frag)
404 			goto out;
405 		if (frag->split_by == 0) {
406 			/* tree leaf, remove */
407 			doutc(cl, "removed %p %llx.%llx frag %x (no ref)\n",
408 			      inode, ceph_vinop(inode), id);
409 			rb_erase(&frag->node, &ci->i_fragtree);
410 			kfree(frag);
411 		} else {
412 			/* tree branch, keep and clear */
413 			doutc(cl, "cleared %p %llx.%llx frag %x referral\n",
414 			      inode, ceph_vinop(inode), id);
415 			frag->mds = -1;
416 			frag->ndist = 0;
417 		}
418 		goto out;
419 	}
420 
421 
422 	/* find/add this frag to store mds delegation info */
423 	frag = __get_or_create_frag(ci, id);
424 	if (IS_ERR(frag)) {
425 		/* this is not the end of the world; we can continue
426 		   with bad/inaccurate delegation info */
427 		pr_err_client(cl, "ENOMEM on mds ref %p %llx.%llx fg %x\n",
428 			      inode, ceph_vinop(inode),
429 			      le32_to_cpu(dirinfo->frag));
430 		err = -ENOMEM;
431 		goto out;
432 	}
433 
434 	frag->mds = mds;
435 	frag->ndist = min_t(u32, ndist, CEPH_MAX_DIRFRAG_REP);
436 	for (i = 0; i < frag->ndist; i++)
437 		frag->dist[i] = le32_to_cpu(dirinfo->dist[i]);
438 	doutc(cl, "%p %llx.%llx frag %x ndist=%d\n", inode,
439 	      ceph_vinop(inode), frag->frag, frag->ndist);
440 
441 out:
442 	mutex_unlock(&ci->i_fragtree_mutex);
443 	return err;
444 }
445 
frag_tree_split_cmp(const void * l,const void * r)446 static int frag_tree_split_cmp(const void *l, const void *r)
447 {
448 	struct ceph_frag_tree_split *ls = (struct ceph_frag_tree_split*)l;
449 	struct ceph_frag_tree_split *rs = (struct ceph_frag_tree_split*)r;
450 	return ceph_frag_compare(le32_to_cpu(ls->frag),
451 				 le32_to_cpu(rs->frag));
452 }
453 
is_frag_child(u32 f,struct ceph_inode_frag * frag)454 static bool is_frag_child(u32 f, struct ceph_inode_frag *frag)
455 {
456 	if (!frag)
457 		return f == ceph_frag_make(0, 0);
458 	if (ceph_frag_bits(f) != ceph_frag_bits(frag->frag) + frag->split_by)
459 		return false;
460 	return ceph_frag_contains_value(frag->frag, ceph_frag_value(f));
461 }
462 
ceph_fill_fragtree(struct inode * inode,struct ceph_frag_tree_head * fragtree,struct ceph_mds_reply_dirfrag * dirinfo)463 static int ceph_fill_fragtree(struct inode *inode,
464 			      struct ceph_frag_tree_head *fragtree,
465 			      struct ceph_mds_reply_dirfrag *dirinfo)
466 {
467 	struct ceph_client *cl = ceph_inode_to_client(inode);
468 	struct ceph_inode_info *ci = ceph_inode(inode);
469 	struct ceph_inode_frag *frag, *prev_frag = NULL;
470 	struct rb_node *rb_node;
471 	unsigned i, split_by, nsplits;
472 	u32 id;
473 	bool update = false;
474 
475 	mutex_lock(&ci->i_fragtree_mutex);
476 	nsplits = le32_to_cpu(fragtree->nsplits);
477 	if (nsplits != ci->i_fragtree_nsplits) {
478 		update = true;
479 	} else if (nsplits) {
480 		i = get_random_u32_below(nsplits);
481 		id = le32_to_cpu(fragtree->splits[i].frag);
482 		if (!__ceph_find_frag(ci, id))
483 			update = true;
484 	} else if (!RB_EMPTY_ROOT(&ci->i_fragtree)) {
485 		rb_node = rb_first(&ci->i_fragtree);
486 		frag = rb_entry(rb_node, struct ceph_inode_frag, node);
487 		if (frag->frag != ceph_frag_make(0, 0) || rb_next(rb_node))
488 			update = true;
489 	}
490 	if (!update && dirinfo) {
491 		id = le32_to_cpu(dirinfo->frag);
492 		if (id != __ceph_choose_frag(ci, id, NULL, NULL))
493 			update = true;
494 	}
495 	if (!update)
496 		goto out_unlock;
497 
498 	if (nsplits > 1) {
499 		sort(fragtree->splits, nsplits, sizeof(fragtree->splits[0]),
500 		     frag_tree_split_cmp, NULL);
501 	}
502 
503 	doutc(cl, "%p %llx.%llx\n", inode, ceph_vinop(inode));
504 	rb_node = rb_first(&ci->i_fragtree);
505 	for (i = 0; i < nsplits; i++) {
506 		id = le32_to_cpu(fragtree->splits[i].frag);
507 		split_by = le32_to_cpu(fragtree->splits[i].by);
508 		if (split_by == 0 || ceph_frag_bits(id) + split_by > 24) {
509 			pr_err_client(cl, "%p %llx.%llx invalid split %d/%u, "
510 			       "frag %x split by %d\n", inode,
511 			       ceph_vinop(inode), i, nsplits, id, split_by);
512 			continue;
513 		}
514 		frag = NULL;
515 		while (rb_node) {
516 			frag = rb_entry(rb_node, struct ceph_inode_frag, node);
517 			if (ceph_frag_compare(frag->frag, id) >= 0) {
518 				if (frag->frag != id)
519 					frag = NULL;
520 				else
521 					rb_node = rb_next(rb_node);
522 				break;
523 			}
524 			rb_node = rb_next(rb_node);
525 			/* delete stale split/leaf node */
526 			if (frag->split_by > 0 ||
527 			    !is_frag_child(frag->frag, prev_frag)) {
528 				rb_erase(&frag->node, &ci->i_fragtree);
529 				if (frag->split_by > 0)
530 					ci->i_fragtree_nsplits--;
531 				kfree(frag);
532 			}
533 			frag = NULL;
534 		}
535 		if (!frag) {
536 			frag = __get_or_create_frag(ci, id);
537 			if (IS_ERR(frag))
538 				continue;
539 		}
540 		if (frag->split_by == 0)
541 			ci->i_fragtree_nsplits++;
542 		frag->split_by = split_by;
543 		doutc(cl, " frag %x split by %d\n", frag->frag, frag->split_by);
544 		prev_frag = frag;
545 	}
546 	while (rb_node) {
547 		frag = rb_entry(rb_node, struct ceph_inode_frag, node);
548 		rb_node = rb_next(rb_node);
549 		/* delete stale split/leaf node */
550 		if (frag->split_by > 0 ||
551 		    !is_frag_child(frag->frag, prev_frag)) {
552 			rb_erase(&frag->node, &ci->i_fragtree);
553 			if (frag->split_by > 0)
554 				ci->i_fragtree_nsplits--;
555 			kfree(frag);
556 		}
557 	}
558 out_unlock:
559 	mutex_unlock(&ci->i_fragtree_mutex);
560 	return 0;
561 }
562 
563 /*
564  * initialize a newly allocated inode.
565  */
ceph_alloc_inode(struct super_block * sb)566 struct inode *ceph_alloc_inode(struct super_block *sb)
567 {
568 	struct ceph_fs_client *fsc = ceph_sb_to_fs_client(sb);
569 	struct ceph_inode_info *ci;
570 	int i;
571 
572 	ci = alloc_inode_sb(sb, ceph_inode_cachep, GFP_NOFS);
573 	if (!ci)
574 		return NULL;
575 
576 	doutc(fsc->client, "%p\n", &ci->netfs.inode);
577 
578 	/* Set parameters for the netfs library */
579 	netfs_inode_init(&ci->netfs, &ceph_netfs_ops, false);
580 
581 	spin_lock_init(&ci->i_ceph_lock);
582 
583 	ci->i_version = 0;
584 	ci->i_inline_version = 0;
585 	ci->i_time_warp_seq = 0;
586 	ci->i_ceph_flags = 0;
587 	atomic64_set(&ci->i_ordered_count, 1);
588 	atomic64_set(&ci->i_release_count, 1);
589 	atomic64_set(&ci->i_complete_seq[0], 0);
590 	atomic64_set(&ci->i_complete_seq[1], 0);
591 	ci->i_symlink = NULL;
592 
593 	ci->i_max_bytes = 0;
594 	ci->i_max_files = 0;
595 
596 	memset(&ci->i_dir_layout, 0, sizeof(ci->i_dir_layout));
597 	memset(&ci->i_cached_layout, 0, sizeof(ci->i_cached_layout));
598 	RCU_INIT_POINTER(ci->i_layout.pool_ns, NULL);
599 
600 	ci->i_fragtree = RB_ROOT;
601 	mutex_init(&ci->i_fragtree_mutex);
602 
603 	ci->i_xattrs.blob = NULL;
604 	ci->i_xattrs.prealloc_blob = NULL;
605 	ci->i_xattrs.dirty = false;
606 	ci->i_xattrs.index = RB_ROOT;
607 	ci->i_xattrs.count = 0;
608 	ci->i_xattrs.names_size = 0;
609 	ci->i_xattrs.vals_size = 0;
610 	ci->i_xattrs.version = 0;
611 	ci->i_xattrs.index_version = 0;
612 
613 	ci->i_caps = RB_ROOT;
614 	ci->i_auth_cap = NULL;
615 	ci->i_dirty_caps = 0;
616 	ci->i_flushing_caps = 0;
617 	INIT_LIST_HEAD(&ci->i_dirty_item);
618 	INIT_LIST_HEAD(&ci->i_flushing_item);
619 	ci->i_prealloc_cap_flush = NULL;
620 	INIT_LIST_HEAD(&ci->i_cap_flush_list);
621 	init_waitqueue_head(&ci->i_cap_wq);
622 	ci->i_hold_caps_max = 0;
623 	INIT_LIST_HEAD(&ci->i_cap_delay_list);
624 	INIT_LIST_HEAD(&ci->i_cap_snaps);
625 	ci->i_head_snapc = NULL;
626 	ci->i_snap_caps = 0;
627 
628 	ci->i_last_rd = ci->i_last_wr = jiffies - 3600 * HZ;
629 	for (i = 0; i < CEPH_FILE_MODE_BITS; i++)
630 		ci->i_nr_by_mode[i] = 0;
631 
632 	mutex_init(&ci->i_truncate_mutex);
633 	ci->i_truncate_seq = 0;
634 	ci->i_truncate_size = 0;
635 	ci->i_truncate_pending = 0;
636 	ci->i_truncate_pagecache_size = 0;
637 
638 	ci->i_max_size = 0;
639 	ci->i_reported_size = 0;
640 	ci->i_wanted_max_size = 0;
641 	ci->i_requested_max_size = 0;
642 
643 	ci->i_pin_ref = 0;
644 	ci->i_rd_ref = 0;
645 	ci->i_rdcache_ref = 0;
646 	ci->i_wr_ref = 0;
647 	ci->i_wb_ref = 0;
648 	ci->i_fx_ref = 0;
649 	ci->i_wrbuffer_ref = 0;
650 	ci->i_wrbuffer_ref_head = 0;
651 	atomic_set(&ci->i_filelock_ref, 0);
652 	atomic_set(&ci->i_shared_gen, 1);
653 	ci->i_rdcache_gen = 0;
654 	ci->i_rdcache_revoking = 0;
655 
656 	INIT_LIST_HEAD(&ci->i_unsafe_dirops);
657 	INIT_LIST_HEAD(&ci->i_unsafe_iops);
658 	spin_lock_init(&ci->i_unsafe_lock);
659 
660 	ci->i_snap_realm = NULL;
661 	INIT_LIST_HEAD(&ci->i_snap_realm_item);
662 	INIT_LIST_HEAD(&ci->i_snap_flush_item);
663 
664 	INIT_WORK(&ci->i_work, ceph_inode_work);
665 	ci->i_work_mask = 0;
666 	memset(&ci->i_btime, '\0', sizeof(ci->i_btime));
667 #ifdef CONFIG_FS_ENCRYPTION
668 	ci->fscrypt_auth = NULL;
669 	ci->fscrypt_auth_len = 0;
670 #endif
671 	return &ci->netfs.inode;
672 }
673 
ceph_free_inode(struct inode * inode)674 void ceph_free_inode(struct inode *inode)
675 {
676 	struct ceph_inode_info *ci = ceph_inode(inode);
677 
678 	kfree(ci->i_symlink);
679 #ifdef CONFIG_FS_ENCRYPTION
680 	kfree(ci->fscrypt_auth);
681 #endif
682 	fscrypt_free_inode(inode);
683 	kmem_cache_free(ceph_inode_cachep, ci);
684 }
685 
ceph_evict_inode(struct inode * inode)686 void ceph_evict_inode(struct inode *inode)
687 {
688 	struct ceph_inode_info *ci = ceph_inode(inode);
689 	struct ceph_mds_client *mdsc = ceph_sb_to_mdsc(inode->i_sb);
690 	struct ceph_client *cl = ceph_inode_to_client(inode);
691 	struct ceph_inode_frag *frag;
692 	struct rb_node *n;
693 
694 	doutc(cl, "%p ino %llx.%llx\n", inode, ceph_vinop(inode));
695 
696 	percpu_counter_dec(&mdsc->metric.total_inodes);
697 
698 	netfs_wait_for_outstanding_io(inode);
699 	truncate_inode_pages_final(&inode->i_data);
700 	if (inode->i_state & I_PINNING_NETFS_WB)
701 		ceph_fscache_unuse_cookie(inode, true);
702 	clear_inode(inode);
703 
704 	ceph_fscache_unregister_inode_cookie(ci);
705 	fscrypt_put_encryption_info(inode);
706 
707 	__ceph_remove_caps(ci);
708 
709 	if (__ceph_has_quota(ci, QUOTA_GET_ANY))
710 		ceph_adjust_quota_realms_count(inode, false);
711 
712 	/*
713 	 * we may still have a snap_realm reference if there are stray
714 	 * caps in i_snap_caps.
715 	 */
716 	if (ci->i_snap_realm) {
717 		if (ceph_snap(inode) == CEPH_NOSNAP) {
718 			doutc(cl, " dropping residual ref to snap realm %p\n",
719 			      ci->i_snap_realm);
720 			ceph_change_snap_realm(inode, NULL);
721 		} else {
722 			ceph_put_snapid_map(mdsc, ci->i_snapid_map);
723 			ci->i_snap_realm = NULL;
724 		}
725 	}
726 
727 	while ((n = rb_first(&ci->i_fragtree)) != NULL) {
728 		frag = rb_entry(n, struct ceph_inode_frag, node);
729 		rb_erase(n, &ci->i_fragtree);
730 		kfree(frag);
731 	}
732 	ci->i_fragtree_nsplits = 0;
733 
734 	__ceph_destroy_xattrs(ci);
735 	if (ci->i_xattrs.blob)
736 		ceph_buffer_put(ci->i_xattrs.blob);
737 	if (ci->i_xattrs.prealloc_blob)
738 		ceph_buffer_put(ci->i_xattrs.prealloc_blob);
739 
740 	ceph_put_string(rcu_dereference_raw(ci->i_layout.pool_ns));
741 	ceph_put_string(rcu_dereference_raw(ci->i_cached_layout.pool_ns));
742 }
743 
calc_inode_blocks(u64 size)744 static inline blkcnt_t calc_inode_blocks(u64 size)
745 {
746 	return (size + (1<<9) - 1) >> 9;
747 }
748 
749 /*
750  * Helpers to fill in size, ctime, mtime, and atime.  We have to be
751  * careful because either the client or MDS may have more up to date
752  * info, depending on which capabilities are held, and whether
753  * time_warp_seq or truncate_seq have increased.  (Ordinarily, mtime
754  * and size are monotonically increasing, except when utimes() or
755  * truncate() increments the corresponding _seq values.)
756  */
ceph_fill_file_size(struct inode * inode,int issued,u32 truncate_seq,u64 truncate_size,u64 size)757 int ceph_fill_file_size(struct inode *inode, int issued,
758 			u32 truncate_seq, u64 truncate_size, u64 size)
759 {
760 	struct ceph_client *cl = ceph_inode_to_client(inode);
761 	struct ceph_inode_info *ci = ceph_inode(inode);
762 	int queue_trunc = 0;
763 	loff_t isize = i_size_read(inode);
764 
765 	if (ceph_seq_cmp(truncate_seq, ci->i_truncate_seq) > 0 ||
766 	    (truncate_seq == ci->i_truncate_seq && size > isize)) {
767 		doutc(cl, "size %lld -> %llu\n", isize, size);
768 		if (size > 0 && S_ISDIR(inode->i_mode)) {
769 			pr_err_client(cl, "non-zero size for directory\n");
770 			size = 0;
771 		}
772 		i_size_write(inode, size);
773 		inode->i_blocks = calc_inode_blocks(size);
774 		/*
775 		 * If we're expanding, then we should be able to just update
776 		 * the existing cookie.
777 		 */
778 		if (size > isize)
779 			ceph_fscache_update(inode);
780 		ci->i_reported_size = size;
781 		if (truncate_seq != ci->i_truncate_seq) {
782 			doutc(cl, "truncate_seq %u -> %u\n",
783 			      ci->i_truncate_seq, truncate_seq);
784 			ci->i_truncate_seq = truncate_seq;
785 
786 			/* the MDS should have revoked these caps */
787 			WARN_ON_ONCE(issued & (CEPH_CAP_FILE_RD |
788 					       CEPH_CAP_FILE_LAZYIO));
789 			/*
790 			 * If we hold relevant caps, or in the case where we're
791 			 * not the only client referencing this file and we
792 			 * don't hold those caps, then we need to check whether
793 			 * the file is either opened or mmaped
794 			 */
795 			if ((issued & (CEPH_CAP_FILE_CACHE|
796 				       CEPH_CAP_FILE_BUFFER)) ||
797 			    mapping_mapped(inode->i_mapping) ||
798 			    __ceph_is_file_opened(ci)) {
799 				ci->i_truncate_pending++;
800 				queue_trunc = 1;
801 			}
802 		}
803 	}
804 
805 	/*
806 	 * It's possible that the new sizes of the two consecutive
807 	 * size truncations will be in the same fscrypt last block,
808 	 * and we need to truncate the corresponding page caches
809 	 * anyway.
810 	 */
811 	if (ceph_seq_cmp(truncate_seq, ci->i_truncate_seq) >= 0) {
812 		doutc(cl, "truncate_size %lld -> %llu, encrypted %d\n",
813 		      ci->i_truncate_size, truncate_size,
814 		      !!IS_ENCRYPTED(inode));
815 
816 		ci->i_truncate_size = truncate_size;
817 
818 		if (IS_ENCRYPTED(inode)) {
819 			doutc(cl, "truncate_pagecache_size %lld -> %llu\n",
820 			      ci->i_truncate_pagecache_size, size);
821 			ci->i_truncate_pagecache_size = size;
822 		} else {
823 			ci->i_truncate_pagecache_size = truncate_size;
824 		}
825 	}
826 	return queue_trunc;
827 }
828 
ceph_fill_file_time(struct inode * inode,int issued,u64 time_warp_seq,struct timespec64 * ctime,struct timespec64 * mtime,struct timespec64 * atime)829 void ceph_fill_file_time(struct inode *inode, int issued,
830 			 u64 time_warp_seq, struct timespec64 *ctime,
831 			 struct timespec64 *mtime, struct timespec64 *atime)
832 {
833 	struct ceph_client *cl = ceph_inode_to_client(inode);
834 	struct ceph_inode_info *ci = ceph_inode(inode);
835 	struct timespec64 ictime = inode_get_ctime(inode);
836 	int warn = 0;
837 
838 	if (issued & (CEPH_CAP_FILE_EXCL|
839 		      CEPH_CAP_FILE_WR|
840 		      CEPH_CAP_FILE_BUFFER|
841 		      CEPH_CAP_AUTH_EXCL|
842 		      CEPH_CAP_XATTR_EXCL)) {
843 		if (ci->i_version == 0 ||
844 		    timespec64_compare(ctime, &ictime) > 0) {
845 			doutc(cl, "ctime %lld.%09ld -> %lld.%09ld inc w/ cap\n",
846 			     ictime.tv_sec, ictime.tv_nsec,
847 			     ctime->tv_sec, ctime->tv_nsec);
848 			inode_set_ctime_to_ts(inode, *ctime);
849 		}
850 		if (ci->i_version == 0 ||
851 		    ceph_seq_cmp(time_warp_seq, ci->i_time_warp_seq) > 0) {
852 			/* the MDS did a utimes() */
853 			doutc(cl, "mtime %lld.%09ld -> %lld.%09ld tw %d -> %d\n",
854 			     inode_get_mtime_sec(inode),
855 			     inode_get_mtime_nsec(inode),
856 			     mtime->tv_sec, mtime->tv_nsec,
857 			     ci->i_time_warp_seq, (int)time_warp_seq);
858 
859 			inode_set_mtime_to_ts(inode, *mtime);
860 			inode_set_atime_to_ts(inode, *atime);
861 			ci->i_time_warp_seq = time_warp_seq;
862 		} else if (time_warp_seq == ci->i_time_warp_seq) {
863 			struct timespec64	ts;
864 
865 			/* nobody did utimes(); take the max */
866 			ts = inode_get_mtime(inode);
867 			if (timespec64_compare(mtime, &ts) > 0) {
868 				doutc(cl, "mtime %lld.%09ld -> %lld.%09ld inc\n",
869 				     ts.tv_sec, ts.tv_nsec,
870 				     mtime->tv_sec, mtime->tv_nsec);
871 				inode_set_mtime_to_ts(inode, *mtime);
872 			}
873 			ts = inode_get_atime(inode);
874 			if (timespec64_compare(atime, &ts) > 0) {
875 				doutc(cl, "atime %lld.%09ld -> %lld.%09ld inc\n",
876 				     ts.tv_sec, ts.tv_nsec,
877 				     atime->tv_sec, atime->tv_nsec);
878 				inode_set_atime_to_ts(inode, *atime);
879 			}
880 		} else if (issued & CEPH_CAP_FILE_EXCL) {
881 			/* we did a utimes(); ignore mds values */
882 		} else {
883 			warn = 1;
884 		}
885 	} else {
886 		/* we have no write|excl caps; whatever the MDS says is true */
887 		if (ceph_seq_cmp(time_warp_seq, ci->i_time_warp_seq) >= 0) {
888 			inode_set_ctime_to_ts(inode, *ctime);
889 			inode_set_mtime_to_ts(inode, *mtime);
890 			inode_set_atime_to_ts(inode, *atime);
891 			ci->i_time_warp_seq = time_warp_seq;
892 		} else {
893 			warn = 1;
894 		}
895 	}
896 	if (warn) /* time_warp_seq shouldn't go backwards */
897 		doutc(cl, "%p mds time_warp_seq %llu < %u\n", inode,
898 		      time_warp_seq, ci->i_time_warp_seq);
899 }
900 
901 #if IS_ENABLED(CONFIG_FS_ENCRYPTION)
decode_encrypted_symlink(struct ceph_mds_client * mdsc,const char * encsym,int enclen,u8 ** decsym)902 static int decode_encrypted_symlink(struct ceph_mds_client *mdsc,
903 				    const char *encsym,
904 				    int enclen, u8 **decsym)
905 {
906 	struct ceph_client *cl = mdsc->fsc->client;
907 	int declen;
908 	u8 *sym;
909 
910 	sym = kmalloc(enclen + 1, GFP_NOFS);
911 	if (!sym)
912 		return -ENOMEM;
913 
914 	declen = ceph_base64_decode(encsym, enclen, sym);
915 	if (declen < 0) {
916 		pr_err_client(cl,
917 			"can't decode symlink (%d). Content: %.*s\n",
918 			declen, enclen, encsym);
919 		kfree(sym);
920 		return -EIO;
921 	}
922 	sym[declen + 1] = '\0';
923 	*decsym = sym;
924 	return declen;
925 }
926 #else
decode_encrypted_symlink(struct ceph_mds_client * mdsc,const char * encsym,int symlen,u8 ** decsym)927 static int decode_encrypted_symlink(struct ceph_mds_client *mdsc,
928 				    const char *encsym,
929 				    int symlen, u8 **decsym)
930 {
931 	return -EOPNOTSUPP;
932 }
933 #endif
934 
935 /*
936  * Populate an inode based on info from mds.  May be called on new or
937  * existing inodes.
938  */
ceph_fill_inode(struct inode * inode,struct page * locked_page,struct ceph_mds_reply_info_in * iinfo,struct ceph_mds_reply_dirfrag * dirinfo,struct ceph_mds_session * session,int cap_fmode,struct ceph_cap_reservation * caps_reservation)939 int ceph_fill_inode(struct inode *inode, struct page *locked_page,
940 		    struct ceph_mds_reply_info_in *iinfo,
941 		    struct ceph_mds_reply_dirfrag *dirinfo,
942 		    struct ceph_mds_session *session, int cap_fmode,
943 		    struct ceph_cap_reservation *caps_reservation)
944 {
945 	struct ceph_mds_client *mdsc = ceph_sb_to_mdsc(inode->i_sb);
946 	struct ceph_client *cl = mdsc->fsc->client;
947 	struct ceph_mds_reply_inode *info = iinfo->in;
948 	struct ceph_inode_info *ci = ceph_inode(inode);
949 	int issued, new_issued, info_caps;
950 	struct timespec64 mtime, atime, ctime;
951 	struct ceph_buffer *xattr_blob = NULL;
952 	struct ceph_buffer *old_blob = NULL;
953 	struct ceph_string *pool_ns = NULL;
954 	struct ceph_cap *new_cap = NULL;
955 	int err = 0;
956 	bool wake = false;
957 	bool queue_trunc = false;
958 	bool new_version = false;
959 	bool fill_inline = false;
960 	umode_t mode = le32_to_cpu(info->mode);
961 	dev_t rdev = le32_to_cpu(info->rdev);
962 
963 	lockdep_assert_held(&mdsc->snap_rwsem);
964 
965 	doutc(cl, "%p ino %llx.%llx v %llu had %llu\n", inode, ceph_vinop(inode),
966 	      le64_to_cpu(info->version), ci->i_version);
967 
968 	/* Once I_NEW is cleared, we can't change type or dev numbers */
969 	if (inode->i_state & I_NEW) {
970 		inode->i_mode = mode;
971 	} else {
972 		if (inode_wrong_type(inode, mode)) {
973 			pr_warn_once_client(cl,
974 				"inode type changed! (ino %llx.%llx is 0%o, mds says 0%o)\n",
975 				ceph_vinop(inode), inode->i_mode, mode);
976 			return -ESTALE;
977 		}
978 
979 		if ((S_ISCHR(mode) || S_ISBLK(mode)) && inode->i_rdev != rdev) {
980 			pr_warn_once_client(cl,
981 				"dev inode rdev changed! (ino %llx.%llx is %u:%u, mds says %u:%u)\n",
982 				ceph_vinop(inode), MAJOR(inode->i_rdev),
983 				MINOR(inode->i_rdev), MAJOR(rdev),
984 				MINOR(rdev));
985 			return -ESTALE;
986 		}
987 	}
988 
989 	info_caps = le32_to_cpu(info->cap.caps);
990 
991 	/* prealloc new cap struct */
992 	if (info_caps && ceph_snap(inode) == CEPH_NOSNAP) {
993 		new_cap = ceph_get_cap(mdsc, caps_reservation);
994 		if (!new_cap)
995 			return -ENOMEM;
996 	}
997 
998 	/*
999 	 * prealloc xattr data, if it looks like we'll need it.  only
1000 	 * if len > 4 (meaning there are actually xattrs; the first 4
1001 	 * bytes are the xattr count).
1002 	 */
1003 	if (iinfo->xattr_len > 4) {
1004 		xattr_blob = ceph_buffer_new(iinfo->xattr_len, GFP_NOFS);
1005 		if (!xattr_blob)
1006 			pr_err_client(cl, "ENOMEM xattr blob %d bytes\n",
1007 				      iinfo->xattr_len);
1008 	}
1009 
1010 	if (iinfo->pool_ns_len > 0)
1011 		pool_ns = ceph_find_or_create_string(iinfo->pool_ns_data,
1012 						     iinfo->pool_ns_len);
1013 
1014 	if (ceph_snap(inode) != CEPH_NOSNAP && !ci->i_snapid_map)
1015 		ci->i_snapid_map = ceph_get_snapid_map(mdsc, ceph_snap(inode));
1016 
1017 	spin_lock(&ci->i_ceph_lock);
1018 
1019 	/*
1020 	 * provided version will be odd if inode value is projected,
1021 	 * even if stable.  skip the update if we have newer stable
1022 	 * info (ours>=theirs, e.g. due to racing mds replies), unless
1023 	 * we are getting projected (unstable) info (in which case the
1024 	 * version is odd, and we want ours>theirs).
1025 	 *   us   them
1026 	 *   2    2     skip
1027 	 *   3    2     skip
1028 	 *   3    3     update
1029 	 */
1030 	if (ci->i_version == 0 ||
1031 	    ((info->cap.flags & CEPH_CAP_FLAG_AUTH) &&
1032 	     le64_to_cpu(info->version) > (ci->i_version & ~1)))
1033 		new_version = true;
1034 
1035 	/* Update change_attribute */
1036 	inode_set_max_iversion_raw(inode, iinfo->change_attr);
1037 
1038 	__ceph_caps_issued(ci, &issued);
1039 	issued |= __ceph_caps_dirty(ci);
1040 	new_issued = ~issued & info_caps;
1041 
1042 	__ceph_update_quota(ci, iinfo->max_bytes, iinfo->max_files);
1043 
1044 #ifdef CONFIG_FS_ENCRYPTION
1045 	if (iinfo->fscrypt_auth_len &&
1046 	    ((inode->i_state & I_NEW) || (ci->fscrypt_auth_len == 0))) {
1047 		kfree(ci->fscrypt_auth);
1048 		ci->fscrypt_auth_len = iinfo->fscrypt_auth_len;
1049 		ci->fscrypt_auth = iinfo->fscrypt_auth;
1050 		iinfo->fscrypt_auth = NULL;
1051 		iinfo->fscrypt_auth_len = 0;
1052 		inode_set_flags(inode, S_ENCRYPTED, S_ENCRYPTED);
1053 	}
1054 #endif
1055 
1056 	if ((new_version || (new_issued & CEPH_CAP_AUTH_SHARED)) &&
1057 	    (issued & CEPH_CAP_AUTH_EXCL) == 0) {
1058 		inode->i_mode = mode;
1059 		inode->i_uid = make_kuid(&init_user_ns, le32_to_cpu(info->uid));
1060 		inode->i_gid = make_kgid(&init_user_ns, le32_to_cpu(info->gid));
1061 		doutc(cl, "%p %llx.%llx mode 0%o uid.gid %d.%d\n", inode,
1062 		      ceph_vinop(inode), inode->i_mode,
1063 		      from_kuid(&init_user_ns, inode->i_uid),
1064 		      from_kgid(&init_user_ns, inode->i_gid));
1065 		ceph_decode_timespec64(&ci->i_btime, &iinfo->btime);
1066 		ceph_decode_timespec64(&ci->i_snap_btime, &iinfo->snap_btime);
1067 	}
1068 
1069 	/* directories have fl_stripe_unit set to zero */
1070 	if (IS_ENCRYPTED(inode))
1071 		inode->i_blkbits = CEPH_FSCRYPT_BLOCK_SHIFT;
1072 	else if (le32_to_cpu(info->layout.fl_stripe_unit))
1073 		inode->i_blkbits =
1074 			fls(le32_to_cpu(info->layout.fl_stripe_unit)) - 1;
1075 	else
1076 		inode->i_blkbits = CEPH_BLOCK_SHIFT;
1077 
1078 	if ((new_version || (new_issued & CEPH_CAP_LINK_SHARED)) &&
1079 	    (issued & CEPH_CAP_LINK_EXCL) == 0)
1080 		set_nlink(inode, le32_to_cpu(info->nlink));
1081 
1082 	if (new_version || (new_issued & CEPH_CAP_ANY_RD)) {
1083 		/* be careful with mtime, atime, size */
1084 		ceph_decode_timespec64(&atime, &info->atime);
1085 		ceph_decode_timespec64(&mtime, &info->mtime);
1086 		ceph_decode_timespec64(&ctime, &info->ctime);
1087 		ceph_fill_file_time(inode, issued,
1088 				le32_to_cpu(info->time_warp_seq),
1089 				&ctime, &mtime, &atime);
1090 	}
1091 
1092 	if (new_version || (info_caps & CEPH_CAP_FILE_SHARED)) {
1093 		ci->i_files = le64_to_cpu(info->files);
1094 		ci->i_subdirs = le64_to_cpu(info->subdirs);
1095 	}
1096 
1097 	if (new_version ||
1098 	    (new_issued & (CEPH_CAP_ANY_FILE_RD | CEPH_CAP_ANY_FILE_WR))) {
1099 		u64 size = le64_to_cpu(info->size);
1100 		s64 old_pool = ci->i_layout.pool_id;
1101 		struct ceph_string *old_ns;
1102 
1103 		ceph_file_layout_from_legacy(&ci->i_layout, &info->layout);
1104 		old_ns = rcu_dereference_protected(ci->i_layout.pool_ns,
1105 					lockdep_is_held(&ci->i_ceph_lock));
1106 		rcu_assign_pointer(ci->i_layout.pool_ns, pool_ns);
1107 
1108 		if (ci->i_layout.pool_id != old_pool || pool_ns != old_ns)
1109 			ci->i_ceph_flags &= ~CEPH_I_POOL_PERM;
1110 
1111 		pool_ns = old_ns;
1112 
1113 		if (IS_ENCRYPTED(inode) && size &&
1114 		    iinfo->fscrypt_file_len == sizeof(__le64)) {
1115 			u64 fsize = __le64_to_cpu(*(__le64 *)iinfo->fscrypt_file);
1116 
1117 			if (size == round_up(fsize, CEPH_FSCRYPT_BLOCK_SIZE)) {
1118 				size = fsize;
1119 			} else {
1120 				pr_warn_client(cl,
1121 					"fscrypt size mismatch: size=%llu fscrypt_file=%llu, discarding fscrypt_file size.\n",
1122 					info->size, size);
1123 			}
1124 		}
1125 
1126 		queue_trunc = ceph_fill_file_size(inode, issued,
1127 					le32_to_cpu(info->truncate_seq),
1128 					le64_to_cpu(info->truncate_size),
1129 					size);
1130 		/* only update max_size on auth cap */
1131 		if ((info->cap.flags & CEPH_CAP_FLAG_AUTH) &&
1132 		    ci->i_max_size != le64_to_cpu(info->max_size)) {
1133 			doutc(cl, "max_size %lld -> %llu\n",
1134 			    ci->i_max_size, le64_to_cpu(info->max_size));
1135 			ci->i_max_size = le64_to_cpu(info->max_size);
1136 		}
1137 	}
1138 
1139 	/* layout and rstat are not tracked by capability, update them if
1140 	 * the inode info is from auth mds */
1141 	if (new_version || (info->cap.flags & CEPH_CAP_FLAG_AUTH)) {
1142 		if (S_ISDIR(inode->i_mode)) {
1143 			ci->i_dir_layout = iinfo->dir_layout;
1144 			ci->i_rbytes = le64_to_cpu(info->rbytes);
1145 			ci->i_rfiles = le64_to_cpu(info->rfiles);
1146 			ci->i_rsubdirs = le64_to_cpu(info->rsubdirs);
1147 			ci->i_dir_pin = iinfo->dir_pin;
1148 			ci->i_rsnaps = iinfo->rsnaps;
1149 			ceph_decode_timespec64(&ci->i_rctime, &info->rctime);
1150 		}
1151 	}
1152 
1153 	/* xattrs */
1154 	/* note that if i_xattrs.len <= 4, i_xattrs.data will still be NULL. */
1155 	if ((ci->i_xattrs.version == 0 || !(issued & CEPH_CAP_XATTR_EXCL))  &&
1156 	    le64_to_cpu(info->xattr_version) > ci->i_xattrs.version) {
1157 		if (ci->i_xattrs.blob)
1158 			old_blob = ci->i_xattrs.blob;
1159 		ci->i_xattrs.blob = xattr_blob;
1160 		if (xattr_blob)
1161 			memcpy(ci->i_xattrs.blob->vec.iov_base,
1162 			       iinfo->xattr_data, iinfo->xattr_len);
1163 		ci->i_xattrs.version = le64_to_cpu(info->xattr_version);
1164 		ceph_forget_all_cached_acls(inode);
1165 		ceph_security_invalidate_secctx(inode);
1166 		xattr_blob = NULL;
1167 	}
1168 
1169 	/* finally update i_version */
1170 	if (le64_to_cpu(info->version) > ci->i_version)
1171 		ci->i_version = le64_to_cpu(info->version);
1172 
1173 	inode->i_mapping->a_ops = &ceph_aops;
1174 
1175 	switch (inode->i_mode & S_IFMT) {
1176 	case S_IFIFO:
1177 	case S_IFBLK:
1178 	case S_IFCHR:
1179 	case S_IFSOCK:
1180 		inode->i_blkbits = PAGE_SHIFT;
1181 		init_special_inode(inode, inode->i_mode, rdev);
1182 		inode->i_op = &ceph_file_iops;
1183 		break;
1184 	case S_IFREG:
1185 		inode->i_op = &ceph_file_iops;
1186 		inode->i_fop = &ceph_file_fops;
1187 		break;
1188 	case S_IFLNK:
1189 		if (!ci->i_symlink) {
1190 			u32 symlen = iinfo->symlink_len;
1191 			char *sym;
1192 
1193 			spin_unlock(&ci->i_ceph_lock);
1194 
1195 			if (IS_ENCRYPTED(inode)) {
1196 				if (symlen != i_size_read(inode))
1197 					pr_err_client(cl,
1198 						"%p %llx.%llx BAD symlink size %lld\n",
1199 						inode, ceph_vinop(inode),
1200 						i_size_read(inode));
1201 
1202 				err = decode_encrypted_symlink(mdsc, iinfo->symlink,
1203 							       symlen, (u8 **)&sym);
1204 				if (err < 0) {
1205 					pr_err_client(cl,
1206 						"decoding encrypted symlink failed: %d\n",
1207 						err);
1208 					goto out;
1209 				}
1210 				symlen = err;
1211 				i_size_write(inode, symlen);
1212 				inode->i_blocks = calc_inode_blocks(symlen);
1213 			} else {
1214 				if (symlen != i_size_read(inode)) {
1215 					pr_err_client(cl,
1216 						"%p %llx.%llx BAD symlink size %lld\n",
1217 						inode, ceph_vinop(inode),
1218 						i_size_read(inode));
1219 					i_size_write(inode, symlen);
1220 					inode->i_blocks = calc_inode_blocks(symlen);
1221 				}
1222 
1223 				err = -ENOMEM;
1224 				sym = kstrndup(iinfo->symlink, symlen, GFP_NOFS);
1225 				if (!sym)
1226 					goto out;
1227 			}
1228 
1229 			spin_lock(&ci->i_ceph_lock);
1230 			if (!ci->i_symlink)
1231 				ci->i_symlink = sym;
1232 			else
1233 				kfree(sym); /* lost a race */
1234 		}
1235 
1236 		if (IS_ENCRYPTED(inode)) {
1237 			/*
1238 			 * Encrypted symlinks need to be decrypted before we can
1239 			 * cache their targets in i_link. Don't touch it here.
1240 			 */
1241 			inode->i_op = &ceph_encrypted_symlink_iops;
1242 		} else {
1243 			inode->i_link = ci->i_symlink;
1244 			inode->i_op = &ceph_symlink_iops;
1245 		}
1246 		break;
1247 	case S_IFDIR:
1248 		inode->i_op = &ceph_dir_iops;
1249 		inode->i_fop = &ceph_dir_fops;
1250 		break;
1251 	default:
1252 		pr_err_client(cl, "%p %llx.%llx BAD mode 0%o\n", inode,
1253 			      ceph_vinop(inode), inode->i_mode);
1254 	}
1255 
1256 	/* were we issued a capability? */
1257 	if (info_caps) {
1258 		if (ceph_snap(inode) == CEPH_NOSNAP) {
1259 			ceph_add_cap(inode, session,
1260 				     le64_to_cpu(info->cap.cap_id),
1261 				     info_caps,
1262 				     le32_to_cpu(info->cap.wanted),
1263 				     le32_to_cpu(info->cap.seq),
1264 				     le32_to_cpu(info->cap.mseq),
1265 				     le64_to_cpu(info->cap.realm),
1266 				     info->cap.flags, &new_cap);
1267 
1268 			/* set dir completion flag? */
1269 			if (S_ISDIR(inode->i_mode) &&
1270 			    ci->i_files == 0 && ci->i_subdirs == 0 &&
1271 			    (info_caps & CEPH_CAP_FILE_SHARED) &&
1272 			    (issued & CEPH_CAP_FILE_EXCL) == 0 &&
1273 			    !__ceph_dir_is_complete(ci)) {
1274 				doutc(cl, " marking %p complete (empty)\n",
1275 				      inode);
1276 				i_size_write(inode, 0);
1277 				__ceph_dir_set_complete(ci,
1278 					atomic64_read(&ci->i_release_count),
1279 					atomic64_read(&ci->i_ordered_count));
1280 			}
1281 
1282 			wake = true;
1283 		} else {
1284 			doutc(cl, " %p got snap_caps %s\n", inode,
1285 			      ceph_cap_string(info_caps));
1286 			ci->i_snap_caps |= info_caps;
1287 		}
1288 	}
1289 
1290 	if (iinfo->inline_version > 0 &&
1291 	    iinfo->inline_version >= ci->i_inline_version) {
1292 		int cache_caps = CEPH_CAP_FILE_CACHE | CEPH_CAP_FILE_LAZYIO;
1293 		ci->i_inline_version = iinfo->inline_version;
1294 		if (ceph_has_inline_data(ci) &&
1295 		    (locked_page || (info_caps & cache_caps)))
1296 			fill_inline = true;
1297 	}
1298 
1299 	if (cap_fmode >= 0) {
1300 		if (!info_caps)
1301 			pr_warn_client(cl, "mds issued no caps on %llx.%llx\n",
1302 				       ceph_vinop(inode));
1303 		__ceph_touch_fmode(ci, mdsc, cap_fmode);
1304 	}
1305 
1306 	spin_unlock(&ci->i_ceph_lock);
1307 
1308 	ceph_fscache_register_inode_cookie(inode);
1309 
1310 	if (fill_inline)
1311 		ceph_fill_inline_data(inode, locked_page,
1312 				      iinfo->inline_data, iinfo->inline_len);
1313 
1314 	if (wake)
1315 		wake_up_all(&ci->i_cap_wq);
1316 
1317 	/* queue truncate if we saw i_size decrease */
1318 	if (queue_trunc)
1319 		ceph_queue_vmtruncate(inode);
1320 
1321 	/* populate frag tree */
1322 	if (S_ISDIR(inode->i_mode))
1323 		ceph_fill_fragtree(inode, &info->fragtree, dirinfo);
1324 
1325 	/* update delegation info? */
1326 	if (dirinfo)
1327 		ceph_fill_dirfrag(inode, dirinfo);
1328 
1329 	err = 0;
1330 out:
1331 	if (new_cap)
1332 		ceph_put_cap(mdsc, new_cap);
1333 	ceph_buffer_put(old_blob);
1334 	ceph_buffer_put(xattr_blob);
1335 	ceph_put_string(pool_ns);
1336 	return err;
1337 }
1338 
1339 /*
1340  * caller should hold session s_mutex and dentry->d_lock.
1341  */
__update_dentry_lease(struct inode * dir,struct dentry * dentry,struct ceph_mds_reply_lease * lease,struct ceph_mds_session * session,unsigned long from_time,struct ceph_mds_session ** old_lease_session)1342 static void __update_dentry_lease(struct inode *dir, struct dentry *dentry,
1343 				  struct ceph_mds_reply_lease *lease,
1344 				  struct ceph_mds_session *session,
1345 				  unsigned long from_time,
1346 				  struct ceph_mds_session **old_lease_session)
1347 {
1348 	struct ceph_client *cl = ceph_inode_to_client(dir);
1349 	struct ceph_dentry_info *di = ceph_dentry(dentry);
1350 	unsigned mask = le16_to_cpu(lease->mask);
1351 	long unsigned duration = le32_to_cpu(lease->duration_ms);
1352 	long unsigned ttl = from_time + (duration * HZ) / 1000;
1353 	long unsigned half_ttl = from_time + (duration * HZ / 2) / 1000;
1354 
1355 	doutc(cl, "%p duration %lu ms ttl %lu\n", dentry, duration, ttl);
1356 
1357 	/* only track leases on regular dentries */
1358 	if (ceph_snap(dir) != CEPH_NOSNAP)
1359 		return;
1360 
1361 	if (mask & CEPH_LEASE_PRIMARY_LINK)
1362 		di->flags |= CEPH_DENTRY_PRIMARY_LINK;
1363 	else
1364 		di->flags &= ~CEPH_DENTRY_PRIMARY_LINK;
1365 
1366 	di->lease_shared_gen = atomic_read(&ceph_inode(dir)->i_shared_gen);
1367 	if (!(mask & CEPH_LEASE_VALID)) {
1368 		__ceph_dentry_dir_lease_touch(di);
1369 		return;
1370 	}
1371 
1372 	if (di->lease_gen == atomic_read(&session->s_cap_gen) &&
1373 	    time_before(ttl, di->time))
1374 		return;  /* we already have a newer lease. */
1375 
1376 	if (di->lease_session && di->lease_session != session) {
1377 		*old_lease_session = di->lease_session;
1378 		di->lease_session = NULL;
1379 	}
1380 
1381 	if (!di->lease_session)
1382 		di->lease_session = ceph_get_mds_session(session);
1383 	di->lease_gen = atomic_read(&session->s_cap_gen);
1384 	di->lease_seq = le32_to_cpu(lease->seq);
1385 	di->lease_renew_after = half_ttl;
1386 	di->lease_renew_from = 0;
1387 	di->time = ttl;
1388 
1389 	__ceph_dentry_lease_touch(di);
1390 }
1391 
update_dentry_lease(struct inode * dir,struct dentry * dentry,struct ceph_mds_reply_lease * lease,struct ceph_mds_session * session,unsigned long from_time)1392 static inline void update_dentry_lease(struct inode *dir, struct dentry *dentry,
1393 					struct ceph_mds_reply_lease *lease,
1394 					struct ceph_mds_session *session,
1395 					unsigned long from_time)
1396 {
1397 	struct ceph_mds_session *old_lease_session = NULL;
1398 	spin_lock(&dentry->d_lock);
1399 	__update_dentry_lease(dir, dentry, lease, session, from_time,
1400 			      &old_lease_session);
1401 	spin_unlock(&dentry->d_lock);
1402 	ceph_put_mds_session(old_lease_session);
1403 }
1404 
1405 /*
1406  * update dentry lease without having parent inode locked
1407  */
update_dentry_lease_careful(struct dentry * dentry,struct ceph_mds_reply_lease * lease,struct ceph_mds_session * session,unsigned long from_time,char * dname,u32 dname_len,struct ceph_vino * pdvino,struct ceph_vino * ptvino)1408 static void update_dentry_lease_careful(struct dentry *dentry,
1409 					struct ceph_mds_reply_lease *lease,
1410 					struct ceph_mds_session *session,
1411 					unsigned long from_time,
1412 					char *dname, u32 dname_len,
1413 					struct ceph_vino *pdvino,
1414 					struct ceph_vino *ptvino)
1415 
1416 {
1417 	struct inode *dir;
1418 	struct ceph_mds_session *old_lease_session = NULL;
1419 
1420 	spin_lock(&dentry->d_lock);
1421 	/* make sure dentry's name matches target */
1422 	if (dentry->d_name.len != dname_len ||
1423 	    memcmp(dentry->d_name.name, dname, dname_len))
1424 		goto out_unlock;
1425 
1426 	dir = d_inode(dentry->d_parent);
1427 	/* make sure parent matches dvino */
1428 	if (!ceph_ino_compare(dir, pdvino))
1429 		goto out_unlock;
1430 
1431 	/* make sure dentry's inode matches target. NULL ptvino means that
1432 	 * we expect a negative dentry */
1433 	if (ptvino) {
1434 		if (d_really_is_negative(dentry))
1435 			goto out_unlock;
1436 		if (!ceph_ino_compare(d_inode(dentry), ptvino))
1437 			goto out_unlock;
1438 	} else {
1439 		if (d_really_is_positive(dentry))
1440 			goto out_unlock;
1441 	}
1442 
1443 	__update_dentry_lease(dir, dentry, lease, session,
1444 			      from_time, &old_lease_session);
1445 out_unlock:
1446 	spin_unlock(&dentry->d_lock);
1447 	ceph_put_mds_session(old_lease_session);
1448 }
1449 
1450 /*
1451  * splice a dentry to an inode.
1452  * caller must hold directory i_rwsem for this to be safe.
1453  */
splice_dentry(struct dentry ** pdn,struct inode * in)1454 static int splice_dentry(struct dentry **pdn, struct inode *in)
1455 {
1456 	struct ceph_client *cl = ceph_inode_to_client(in);
1457 	struct dentry *dn = *pdn;
1458 	struct dentry *realdn;
1459 
1460 	BUG_ON(d_inode(dn));
1461 
1462 	if (S_ISDIR(in->i_mode)) {
1463 		/* If inode is directory, d_splice_alias() below will remove
1464 		 * 'realdn' from its origin parent. We need to ensure that
1465 		 * origin parent's readdir cache will not reference 'realdn'
1466 		 */
1467 		realdn = d_find_any_alias(in);
1468 		if (realdn) {
1469 			struct ceph_dentry_info *di = ceph_dentry(realdn);
1470 			spin_lock(&realdn->d_lock);
1471 
1472 			realdn->d_op->d_prune(realdn);
1473 
1474 			di->time = jiffies;
1475 			di->lease_shared_gen = 0;
1476 			di->offset = 0;
1477 
1478 			spin_unlock(&realdn->d_lock);
1479 			dput(realdn);
1480 		}
1481 	}
1482 
1483 	/* dn must be unhashed */
1484 	if (!d_unhashed(dn))
1485 		d_drop(dn);
1486 	realdn = d_splice_alias(in, dn);
1487 	if (IS_ERR(realdn)) {
1488 		pr_err_client(cl, "error %ld %p inode %p ino %llx.%llx\n",
1489 			      PTR_ERR(realdn), dn, in, ceph_vinop(in));
1490 		return PTR_ERR(realdn);
1491 	}
1492 
1493 	if (realdn) {
1494 		doutc(cl, "dn %p (%d) spliced with %p (%d) inode %p ino %llx.%llx\n",
1495 		      dn, d_count(dn), realdn, d_count(realdn),
1496 		      d_inode(realdn), ceph_vinop(d_inode(realdn)));
1497 		dput(dn);
1498 		*pdn = realdn;
1499 	} else {
1500 		BUG_ON(!ceph_dentry(dn));
1501 		doutc(cl, "dn %p attached to %p ino %llx.%llx\n", dn,
1502 		      d_inode(dn), ceph_vinop(d_inode(dn)));
1503 	}
1504 	return 0;
1505 }
1506 
1507 /*
1508  * Incorporate results into the local cache.  This is either just
1509  * one inode, or a directory, dentry, and possibly linked-to inode (e.g.,
1510  * after a lookup).
1511  *
1512  * A reply may contain
1513  *         a directory inode along with a dentry.
1514  *  and/or a target inode
1515  *
1516  * Called with snap_rwsem (read).
1517  */
ceph_fill_trace(struct super_block * sb,struct ceph_mds_request * req)1518 int ceph_fill_trace(struct super_block *sb, struct ceph_mds_request *req)
1519 {
1520 	struct ceph_mds_session *session = req->r_session;
1521 	struct ceph_mds_reply_info_parsed *rinfo = &req->r_reply_info;
1522 	struct inode *in = NULL;
1523 	struct ceph_vino tvino, dvino;
1524 	struct ceph_fs_client *fsc = ceph_sb_to_fs_client(sb);
1525 	struct ceph_client *cl = fsc->client;
1526 	int err = 0;
1527 
1528 	doutc(cl, "%p is_dentry %d is_target %d\n", req,
1529 	      rinfo->head->is_dentry, rinfo->head->is_target);
1530 
1531 	if (!rinfo->head->is_target && !rinfo->head->is_dentry) {
1532 		doutc(cl, "reply is empty!\n");
1533 		if (rinfo->head->result == 0 && req->r_parent)
1534 			ceph_invalidate_dir_request(req);
1535 		return 0;
1536 	}
1537 
1538 	if (rinfo->head->is_dentry) {
1539 		struct inode *dir = req->r_parent;
1540 
1541 		if (dir) {
1542 			err = ceph_fill_inode(dir, NULL, &rinfo->diri,
1543 					      rinfo->dirfrag, session, -1,
1544 					      &req->r_caps_reservation);
1545 			if (err < 0)
1546 				goto done;
1547 		} else {
1548 			WARN_ON_ONCE(1);
1549 		}
1550 
1551 		if (dir && req->r_op == CEPH_MDS_OP_LOOKUPNAME &&
1552 		    test_bit(CEPH_MDS_R_PARENT_LOCKED, &req->r_req_flags) &&
1553 		    !test_bit(CEPH_MDS_R_ABORTED, &req->r_req_flags)) {
1554 			bool is_nokey = false;
1555 			struct qstr dname;
1556 			struct dentry *dn, *parent;
1557 			struct fscrypt_str oname = FSTR_INIT(NULL, 0);
1558 			struct ceph_fname fname = { .dir	= dir,
1559 						    .name	= rinfo->dname,
1560 						    .ctext	= rinfo->altname,
1561 						    .name_len	= rinfo->dname_len,
1562 						    .ctext_len	= rinfo->altname_len };
1563 
1564 			BUG_ON(!rinfo->head->is_target);
1565 			BUG_ON(req->r_dentry);
1566 
1567 			parent = d_find_any_alias(dir);
1568 			BUG_ON(!parent);
1569 
1570 			err = ceph_fname_alloc_buffer(dir, &oname);
1571 			if (err < 0) {
1572 				dput(parent);
1573 				goto done;
1574 			}
1575 
1576 			err = ceph_fname_to_usr(&fname, NULL, &oname, &is_nokey);
1577 			if (err < 0) {
1578 				dput(parent);
1579 				ceph_fname_free_buffer(dir, &oname);
1580 				goto done;
1581 			}
1582 			dname.name = oname.name;
1583 			dname.len = oname.len;
1584 			dname.hash = full_name_hash(parent, dname.name, dname.len);
1585 			tvino.ino = le64_to_cpu(rinfo->targeti.in->ino);
1586 			tvino.snap = le64_to_cpu(rinfo->targeti.in->snapid);
1587 retry_lookup:
1588 			dn = d_lookup(parent, &dname);
1589 			doutc(cl, "d_lookup on parent=%p name=%.*s got %p\n",
1590 			      parent, dname.len, dname.name, dn);
1591 
1592 			if (!dn) {
1593 				dn = d_alloc(parent, &dname);
1594 				doutc(cl, "d_alloc %p '%.*s' = %p\n", parent,
1595 				      dname.len, dname.name, dn);
1596 				if (!dn) {
1597 					dput(parent);
1598 					ceph_fname_free_buffer(dir, &oname);
1599 					err = -ENOMEM;
1600 					goto done;
1601 				}
1602 				if (is_nokey) {
1603 					spin_lock(&dn->d_lock);
1604 					dn->d_flags |= DCACHE_NOKEY_NAME;
1605 					spin_unlock(&dn->d_lock);
1606 				}
1607 				err = 0;
1608 			} else if (d_really_is_positive(dn) &&
1609 				   (ceph_ino(d_inode(dn)) != tvino.ino ||
1610 				    ceph_snap(d_inode(dn)) != tvino.snap)) {
1611 				doutc(cl, " dn %p points to wrong inode %p\n",
1612 				      dn, d_inode(dn));
1613 				ceph_dir_clear_ordered(dir);
1614 				d_delete(dn);
1615 				dput(dn);
1616 				goto retry_lookup;
1617 			}
1618 			ceph_fname_free_buffer(dir, &oname);
1619 
1620 			req->r_dentry = dn;
1621 			dput(parent);
1622 		}
1623 	}
1624 
1625 	if (rinfo->head->is_target) {
1626 		/* Should be filled in by handle_reply */
1627 		BUG_ON(!req->r_target_inode);
1628 
1629 		in = req->r_target_inode;
1630 		err = ceph_fill_inode(in, req->r_locked_page, &rinfo->targeti,
1631 				NULL, session,
1632 				(!test_bit(CEPH_MDS_R_ABORTED, &req->r_req_flags) &&
1633 				 !test_bit(CEPH_MDS_R_ASYNC, &req->r_req_flags) &&
1634 				 rinfo->head->result == 0) ?  req->r_fmode : -1,
1635 				&req->r_caps_reservation);
1636 		if (err < 0) {
1637 			pr_err_client(cl, "badness %p %llx.%llx\n", in,
1638 				      ceph_vinop(in));
1639 			req->r_target_inode = NULL;
1640 			if (in->i_state & I_NEW)
1641 				discard_new_inode(in);
1642 			else
1643 				iput(in);
1644 			goto done;
1645 		}
1646 		if (in->i_state & I_NEW)
1647 			unlock_new_inode(in);
1648 	}
1649 
1650 	/*
1651 	 * ignore null lease/binding on snapdir ENOENT, or else we
1652 	 * will have trouble splicing in the virtual snapdir later
1653 	 */
1654 	if (rinfo->head->is_dentry &&
1655             !test_bit(CEPH_MDS_R_ABORTED, &req->r_req_flags) &&
1656 	    test_bit(CEPH_MDS_R_PARENT_LOCKED, &req->r_req_flags) &&
1657 	    (rinfo->head->is_target || strncmp(req->r_dentry->d_name.name,
1658 					       fsc->mount_options->snapdir_name,
1659 					       req->r_dentry->d_name.len))) {
1660 		/*
1661 		 * lookup link rename   : null -> possibly existing inode
1662 		 * mknod symlink mkdir  : null -> new inode
1663 		 * unlink               : linked -> null
1664 		 */
1665 		struct inode *dir = req->r_parent;
1666 		struct dentry *dn = req->r_dentry;
1667 		bool have_dir_cap, have_lease;
1668 
1669 		BUG_ON(!dn);
1670 		BUG_ON(!dir);
1671 		BUG_ON(d_inode(dn->d_parent) != dir);
1672 
1673 		dvino.ino = le64_to_cpu(rinfo->diri.in->ino);
1674 		dvino.snap = le64_to_cpu(rinfo->diri.in->snapid);
1675 
1676 		BUG_ON(ceph_ino(dir) != dvino.ino);
1677 		BUG_ON(ceph_snap(dir) != dvino.snap);
1678 
1679 		/* do we have a lease on the whole dir? */
1680 		have_dir_cap =
1681 			(le32_to_cpu(rinfo->diri.in->cap.caps) &
1682 			 CEPH_CAP_FILE_SHARED);
1683 
1684 		/* do we have a dn lease? */
1685 		have_lease = have_dir_cap ||
1686 			le32_to_cpu(rinfo->dlease->duration_ms);
1687 		if (!have_lease)
1688 			doutc(cl, "no dentry lease or dir cap\n");
1689 
1690 		/* rename? */
1691 		if (req->r_old_dentry && req->r_op == CEPH_MDS_OP_RENAME) {
1692 			struct inode *olddir = req->r_old_dentry_dir;
1693 			BUG_ON(!olddir);
1694 
1695 			doutc(cl, " src %p '%pd' dst %p '%pd'\n",
1696 			      req->r_old_dentry, req->r_old_dentry, dn, dn);
1697 			doutc(cl, "doing d_move %p -> %p\n", req->r_old_dentry, dn);
1698 
1699 			/* d_move screws up sibling dentries' offsets */
1700 			ceph_dir_clear_ordered(dir);
1701 			ceph_dir_clear_ordered(olddir);
1702 
1703 			d_move(req->r_old_dentry, dn);
1704 			doutc(cl, " src %p '%pd' dst %p '%pd'\n",
1705 			      req->r_old_dentry, req->r_old_dentry, dn, dn);
1706 
1707 			/* ensure target dentry is invalidated, despite
1708 			   rehashing bug in vfs_rename_dir */
1709 			ceph_invalidate_dentry_lease(dn);
1710 
1711 			doutc(cl, "dn %p gets new offset %lld\n",
1712 			      req->r_old_dentry,
1713 			      ceph_dentry(req->r_old_dentry)->offset);
1714 
1715 			/* swap r_dentry and r_old_dentry in case that
1716 			 * splice_dentry() gets called later. This is safe
1717 			 * because no other place will use them */
1718 			req->r_dentry = req->r_old_dentry;
1719 			req->r_old_dentry = dn;
1720 			dn = req->r_dentry;
1721 		}
1722 
1723 		/* null dentry? */
1724 		if (!rinfo->head->is_target) {
1725 			doutc(cl, "null dentry\n");
1726 			if (d_really_is_positive(dn)) {
1727 				doutc(cl, "d_delete %p\n", dn);
1728 				ceph_dir_clear_ordered(dir);
1729 				d_delete(dn);
1730 			} else if (have_lease) {
1731 				if (d_unhashed(dn))
1732 					d_add(dn, NULL);
1733 			}
1734 
1735 			if (!d_unhashed(dn) && have_lease)
1736 				update_dentry_lease(dir, dn,
1737 						    rinfo->dlease, session,
1738 						    req->r_request_started);
1739 			goto done;
1740 		}
1741 
1742 		/* attach proper inode */
1743 		if (d_really_is_negative(dn)) {
1744 			ceph_dir_clear_ordered(dir);
1745 			ihold(in);
1746 			err = splice_dentry(&req->r_dentry, in);
1747 			if (err < 0)
1748 				goto done;
1749 			dn = req->r_dentry;  /* may have spliced */
1750 		} else if (d_really_is_positive(dn) && d_inode(dn) != in) {
1751 			doutc(cl, " %p links to %p %llx.%llx, not %llx.%llx\n",
1752 			      dn, d_inode(dn), ceph_vinop(d_inode(dn)),
1753 			      ceph_vinop(in));
1754 			d_invalidate(dn);
1755 			have_lease = false;
1756 		}
1757 
1758 		if (have_lease) {
1759 			update_dentry_lease(dir, dn,
1760 					    rinfo->dlease, session,
1761 					    req->r_request_started);
1762 		}
1763 		doutc(cl, " final dn %p\n", dn);
1764 	} else if ((req->r_op == CEPH_MDS_OP_LOOKUPSNAP ||
1765 		    req->r_op == CEPH_MDS_OP_MKSNAP) &&
1766 	           test_bit(CEPH_MDS_R_PARENT_LOCKED, &req->r_req_flags) &&
1767 		   !test_bit(CEPH_MDS_R_ABORTED, &req->r_req_flags)) {
1768 		struct inode *dir = req->r_parent;
1769 
1770 		/* fill out a snapdir LOOKUPSNAP dentry */
1771 		BUG_ON(!dir);
1772 		BUG_ON(ceph_snap(dir) != CEPH_SNAPDIR);
1773 		BUG_ON(!req->r_dentry);
1774 		doutc(cl, " linking snapped dir %p to dn %p\n", in,
1775 		      req->r_dentry);
1776 		ceph_dir_clear_ordered(dir);
1777 		ihold(in);
1778 		err = splice_dentry(&req->r_dentry, in);
1779 		if (err < 0)
1780 			goto done;
1781 	} else if (rinfo->head->is_dentry && req->r_dentry) {
1782 		/* parent inode is not locked, be careful */
1783 		struct ceph_vino *ptvino = NULL;
1784 		dvino.ino = le64_to_cpu(rinfo->diri.in->ino);
1785 		dvino.snap = le64_to_cpu(rinfo->diri.in->snapid);
1786 		if (rinfo->head->is_target) {
1787 			tvino.ino = le64_to_cpu(rinfo->targeti.in->ino);
1788 			tvino.snap = le64_to_cpu(rinfo->targeti.in->snapid);
1789 			ptvino = &tvino;
1790 		}
1791 		update_dentry_lease_careful(req->r_dentry, rinfo->dlease,
1792 					    session, req->r_request_started,
1793 					    rinfo->dname, rinfo->dname_len,
1794 					    &dvino, ptvino);
1795 	}
1796 done:
1797 	doutc(cl, "done err=%d\n", err);
1798 	return err;
1799 }
1800 
1801 /*
1802  * Prepopulate our cache with readdir results, leases, etc.
1803  */
readdir_prepopulate_inodes_only(struct ceph_mds_request * req,struct ceph_mds_session * session)1804 static int readdir_prepopulate_inodes_only(struct ceph_mds_request *req,
1805 					   struct ceph_mds_session *session)
1806 {
1807 	struct ceph_mds_reply_info_parsed *rinfo = &req->r_reply_info;
1808 	struct ceph_client *cl = session->s_mdsc->fsc->client;
1809 	int i, err = 0;
1810 
1811 	for (i = 0; i < rinfo->dir_nr; i++) {
1812 		struct ceph_mds_reply_dir_entry *rde = rinfo->dir_entries + i;
1813 		struct ceph_vino vino;
1814 		struct inode *in;
1815 		int rc;
1816 
1817 		vino.ino = le64_to_cpu(rde->inode.in->ino);
1818 		vino.snap = le64_to_cpu(rde->inode.in->snapid);
1819 
1820 		in = ceph_get_inode(req->r_dentry->d_sb, vino, NULL);
1821 		if (IS_ERR(in)) {
1822 			err = PTR_ERR(in);
1823 			doutc(cl, "badness got %d\n", err);
1824 			continue;
1825 		}
1826 		rc = ceph_fill_inode(in, NULL, &rde->inode, NULL, session,
1827 				     -1, &req->r_caps_reservation);
1828 		if (rc < 0) {
1829 			pr_err_client(cl, "inode badness on %p got %d\n", in,
1830 				      rc);
1831 			err = rc;
1832 			if (in->i_state & I_NEW) {
1833 				ihold(in);
1834 				discard_new_inode(in);
1835 			}
1836 		} else if (in->i_state & I_NEW) {
1837 			unlock_new_inode(in);
1838 		}
1839 
1840 		iput(in);
1841 	}
1842 
1843 	return err;
1844 }
1845 
ceph_readdir_cache_release(struct ceph_readdir_cache_control * ctl)1846 void ceph_readdir_cache_release(struct ceph_readdir_cache_control *ctl)
1847 {
1848 	if (ctl->page) {
1849 		kunmap(ctl->page);
1850 		put_page(ctl->page);
1851 		ctl->page = NULL;
1852 	}
1853 }
1854 
fill_readdir_cache(struct inode * dir,struct dentry * dn,struct ceph_readdir_cache_control * ctl,struct ceph_mds_request * req)1855 static int fill_readdir_cache(struct inode *dir, struct dentry *dn,
1856 			      struct ceph_readdir_cache_control *ctl,
1857 			      struct ceph_mds_request *req)
1858 {
1859 	struct ceph_client *cl = ceph_inode_to_client(dir);
1860 	struct ceph_inode_info *ci = ceph_inode(dir);
1861 	unsigned nsize = PAGE_SIZE / sizeof(struct dentry*);
1862 	unsigned idx = ctl->index % nsize;
1863 	pgoff_t pgoff = ctl->index / nsize;
1864 
1865 	if (!ctl->page || pgoff != ctl->page->index) {
1866 		ceph_readdir_cache_release(ctl);
1867 		if (idx == 0)
1868 			ctl->page = grab_cache_page(&dir->i_data, pgoff);
1869 		else
1870 			ctl->page = find_lock_page(&dir->i_data, pgoff);
1871 		if (!ctl->page) {
1872 			ctl->index = -1;
1873 			return idx == 0 ? -ENOMEM : 0;
1874 		}
1875 		/* reading/filling the cache are serialized by
1876 		 * i_rwsem, no need to use page lock */
1877 		unlock_page(ctl->page);
1878 		ctl->dentries = kmap(ctl->page);
1879 		if (idx == 0)
1880 			memset(ctl->dentries, 0, PAGE_SIZE);
1881 	}
1882 
1883 	if (req->r_dir_release_cnt == atomic64_read(&ci->i_release_count) &&
1884 	    req->r_dir_ordered_cnt == atomic64_read(&ci->i_ordered_count)) {
1885 		doutc(cl, "dn %p idx %d\n", dn, ctl->index);
1886 		ctl->dentries[idx] = dn;
1887 		ctl->index++;
1888 	} else {
1889 		doutc(cl, "disable readdir cache\n");
1890 		ctl->index = -1;
1891 	}
1892 	return 0;
1893 }
1894 
ceph_readdir_prepopulate(struct ceph_mds_request * req,struct ceph_mds_session * session)1895 int ceph_readdir_prepopulate(struct ceph_mds_request *req,
1896 			     struct ceph_mds_session *session)
1897 {
1898 	struct dentry *parent = req->r_dentry;
1899 	struct inode *inode = d_inode(parent);
1900 	struct ceph_inode_info *ci = ceph_inode(inode);
1901 	struct ceph_mds_reply_info_parsed *rinfo = &req->r_reply_info;
1902 	struct ceph_client *cl = session->s_mdsc->fsc->client;
1903 	struct qstr dname;
1904 	struct dentry *dn;
1905 	struct inode *in;
1906 	int err = 0, skipped = 0, ret, i;
1907 	u32 frag = le32_to_cpu(req->r_args.readdir.frag);
1908 	u32 last_hash = 0;
1909 	u32 fpos_offset;
1910 	struct ceph_readdir_cache_control cache_ctl = {};
1911 
1912 	if (test_bit(CEPH_MDS_R_ABORTED, &req->r_req_flags))
1913 		return readdir_prepopulate_inodes_only(req, session);
1914 
1915 	if (rinfo->hash_order) {
1916 		if (req->r_path2) {
1917 			last_hash = ceph_str_hash(ci->i_dir_layout.dl_dir_hash,
1918 						  req->r_path2,
1919 						  strlen(req->r_path2));
1920 			last_hash = ceph_frag_value(last_hash);
1921 		} else if (rinfo->offset_hash) {
1922 			/* mds understands offset_hash */
1923 			WARN_ON_ONCE(req->r_readdir_offset != 2);
1924 			last_hash = le32_to_cpu(req->r_args.readdir.offset_hash);
1925 		}
1926 	}
1927 
1928 	if (rinfo->dir_dir &&
1929 	    le32_to_cpu(rinfo->dir_dir->frag) != frag) {
1930 		doutc(cl, "got new frag %x -> %x\n", frag,
1931 			    le32_to_cpu(rinfo->dir_dir->frag));
1932 		frag = le32_to_cpu(rinfo->dir_dir->frag);
1933 		if (!rinfo->hash_order)
1934 			req->r_readdir_offset = 2;
1935 	}
1936 
1937 	if (le32_to_cpu(rinfo->head->op) == CEPH_MDS_OP_LSSNAP) {
1938 		doutc(cl, "%d items under SNAPDIR dn %p\n",
1939 		      rinfo->dir_nr, parent);
1940 	} else {
1941 		doutc(cl, "%d items under dn %p\n", rinfo->dir_nr, parent);
1942 		if (rinfo->dir_dir)
1943 			ceph_fill_dirfrag(d_inode(parent), rinfo->dir_dir);
1944 
1945 		if (ceph_frag_is_leftmost(frag) &&
1946 		    req->r_readdir_offset == 2 &&
1947 		    !(rinfo->hash_order && last_hash)) {
1948 			/* note dir version at start of readdir so we can
1949 			 * tell if any dentries get dropped */
1950 			req->r_dir_release_cnt =
1951 				atomic64_read(&ci->i_release_count);
1952 			req->r_dir_ordered_cnt =
1953 				atomic64_read(&ci->i_ordered_count);
1954 			req->r_readdir_cache_idx = 0;
1955 		}
1956 	}
1957 
1958 	cache_ctl.index = req->r_readdir_cache_idx;
1959 	fpos_offset = req->r_readdir_offset;
1960 
1961 	/* FIXME: release caps/leases if error occurs */
1962 	for (i = 0; i < rinfo->dir_nr; i++) {
1963 		struct ceph_mds_reply_dir_entry *rde = rinfo->dir_entries + i;
1964 		struct ceph_vino tvino;
1965 
1966 		dname.name = rde->name;
1967 		dname.len = rde->name_len;
1968 		dname.hash = full_name_hash(parent, dname.name, dname.len);
1969 
1970 		tvino.ino = le64_to_cpu(rde->inode.in->ino);
1971 		tvino.snap = le64_to_cpu(rde->inode.in->snapid);
1972 
1973 		if (rinfo->hash_order) {
1974 			u32 hash = ceph_frag_value(rde->raw_hash);
1975 			if (hash != last_hash)
1976 				fpos_offset = 2;
1977 			last_hash = hash;
1978 			rde->offset = ceph_make_fpos(hash, fpos_offset++, true);
1979 		} else {
1980 			rde->offset = ceph_make_fpos(frag, fpos_offset++, false);
1981 		}
1982 
1983 retry_lookup:
1984 		dn = d_lookup(parent, &dname);
1985 		doutc(cl, "d_lookup on parent=%p name=%.*s got %p\n",
1986 		      parent, dname.len, dname.name, dn);
1987 
1988 		if (!dn) {
1989 			dn = d_alloc(parent, &dname);
1990 			doutc(cl, "d_alloc %p '%.*s' = %p\n", parent,
1991 			      dname.len, dname.name, dn);
1992 			if (!dn) {
1993 				doutc(cl, "d_alloc badness\n");
1994 				err = -ENOMEM;
1995 				goto out;
1996 			}
1997 			if (rde->is_nokey) {
1998 				spin_lock(&dn->d_lock);
1999 				dn->d_flags |= DCACHE_NOKEY_NAME;
2000 				spin_unlock(&dn->d_lock);
2001 			}
2002 		} else if (d_really_is_positive(dn) &&
2003 			   (ceph_ino(d_inode(dn)) != tvino.ino ||
2004 			    ceph_snap(d_inode(dn)) != tvino.snap)) {
2005 			struct ceph_dentry_info *di = ceph_dentry(dn);
2006 			doutc(cl, " dn %p points to wrong inode %p\n",
2007 			      dn, d_inode(dn));
2008 
2009 			spin_lock(&dn->d_lock);
2010 			if (di->offset > 0 &&
2011 			    di->lease_shared_gen ==
2012 			    atomic_read(&ci->i_shared_gen)) {
2013 				__ceph_dir_clear_ordered(ci);
2014 				di->offset = 0;
2015 			}
2016 			spin_unlock(&dn->d_lock);
2017 
2018 			d_delete(dn);
2019 			dput(dn);
2020 			goto retry_lookup;
2021 		}
2022 
2023 		/* inode */
2024 		if (d_really_is_positive(dn)) {
2025 			in = d_inode(dn);
2026 		} else {
2027 			in = ceph_get_inode(parent->d_sb, tvino, NULL);
2028 			if (IS_ERR(in)) {
2029 				doutc(cl, "new_inode badness\n");
2030 				d_drop(dn);
2031 				dput(dn);
2032 				err = PTR_ERR(in);
2033 				goto out;
2034 			}
2035 		}
2036 
2037 		ret = ceph_fill_inode(in, NULL, &rde->inode, NULL, session,
2038 				      -1, &req->r_caps_reservation);
2039 		if (ret < 0) {
2040 			pr_err_client(cl, "badness on %p %llx.%llx\n", in,
2041 				      ceph_vinop(in));
2042 			if (d_really_is_negative(dn)) {
2043 				if (in->i_state & I_NEW) {
2044 					ihold(in);
2045 					discard_new_inode(in);
2046 				}
2047 				iput(in);
2048 			}
2049 			d_drop(dn);
2050 			err = ret;
2051 			goto next_item;
2052 		}
2053 		if (in->i_state & I_NEW)
2054 			unlock_new_inode(in);
2055 
2056 		if (d_really_is_negative(dn)) {
2057 			if (ceph_security_xattr_deadlock(in)) {
2058 				doutc(cl, " skip splicing dn %p to inode %p"
2059 				      " (security xattr deadlock)\n", dn, in);
2060 				iput(in);
2061 				skipped++;
2062 				goto next_item;
2063 			}
2064 
2065 			err = splice_dentry(&dn, in);
2066 			if (err < 0)
2067 				goto next_item;
2068 		}
2069 
2070 		ceph_dentry(dn)->offset = rde->offset;
2071 
2072 		update_dentry_lease(d_inode(parent), dn,
2073 				    rde->lease, req->r_session,
2074 				    req->r_request_started);
2075 
2076 		if (err == 0 && skipped == 0 && cache_ctl.index >= 0) {
2077 			ret = fill_readdir_cache(d_inode(parent), dn,
2078 						 &cache_ctl, req);
2079 			if (ret < 0)
2080 				err = ret;
2081 		}
2082 next_item:
2083 		dput(dn);
2084 	}
2085 out:
2086 	if (err == 0 && skipped == 0) {
2087 		set_bit(CEPH_MDS_R_DID_PREPOPULATE, &req->r_req_flags);
2088 		req->r_readdir_cache_idx = cache_ctl.index;
2089 	}
2090 	ceph_readdir_cache_release(&cache_ctl);
2091 	doutc(cl, "done\n");
2092 	return err;
2093 }
2094 
ceph_inode_set_size(struct inode * inode,loff_t size)2095 bool ceph_inode_set_size(struct inode *inode, loff_t size)
2096 {
2097 	struct ceph_client *cl = ceph_inode_to_client(inode);
2098 	struct ceph_inode_info *ci = ceph_inode(inode);
2099 	bool ret;
2100 
2101 	spin_lock(&ci->i_ceph_lock);
2102 	doutc(cl, "set_size %p %llu -> %llu\n", inode, i_size_read(inode), size);
2103 	i_size_write(inode, size);
2104 	ceph_fscache_update(inode);
2105 	inode->i_blocks = calc_inode_blocks(size);
2106 
2107 	ret = __ceph_should_report_size(ci);
2108 
2109 	spin_unlock(&ci->i_ceph_lock);
2110 
2111 	return ret;
2112 }
2113 
ceph_queue_inode_work(struct inode * inode,int work_bit)2114 void ceph_queue_inode_work(struct inode *inode, int work_bit)
2115 {
2116 	struct ceph_fs_client *fsc = ceph_inode_to_fs_client(inode);
2117 	struct ceph_client *cl = fsc->client;
2118 	struct ceph_inode_info *ci = ceph_inode(inode);
2119 	set_bit(work_bit, &ci->i_work_mask);
2120 
2121 	ihold(inode);
2122 	if (queue_work(fsc->inode_wq, &ci->i_work)) {
2123 		doutc(cl, "%p %llx.%llx mask=%lx\n", inode,
2124 		      ceph_vinop(inode), ci->i_work_mask);
2125 	} else {
2126 		doutc(cl, "%p %llx.%llx already queued, mask=%lx\n",
2127 		      inode, ceph_vinop(inode), ci->i_work_mask);
2128 		iput(inode);
2129 	}
2130 }
2131 
ceph_do_invalidate_pages(struct inode * inode)2132 static void ceph_do_invalidate_pages(struct inode *inode)
2133 {
2134 	struct ceph_client *cl = ceph_inode_to_client(inode);
2135 	struct ceph_inode_info *ci = ceph_inode(inode);
2136 	u32 orig_gen;
2137 	int check = 0;
2138 
2139 	ceph_fscache_invalidate(inode, false);
2140 
2141 	mutex_lock(&ci->i_truncate_mutex);
2142 
2143 	if (ceph_inode_is_shutdown(inode)) {
2144 		pr_warn_ratelimited_client(cl,
2145 			"%p %llx.%llx is shut down\n", inode,
2146 			ceph_vinop(inode));
2147 		mapping_set_error(inode->i_mapping, -EIO);
2148 		truncate_pagecache(inode, 0);
2149 		mutex_unlock(&ci->i_truncate_mutex);
2150 		goto out;
2151 	}
2152 
2153 	spin_lock(&ci->i_ceph_lock);
2154 	doutc(cl, "%p %llx.%llx gen %d revoking %d\n", inode,
2155 	      ceph_vinop(inode), ci->i_rdcache_gen, ci->i_rdcache_revoking);
2156 	if (ci->i_rdcache_revoking != ci->i_rdcache_gen) {
2157 		if (__ceph_caps_revoking_other(ci, NULL, CEPH_CAP_FILE_CACHE))
2158 			check = 1;
2159 		spin_unlock(&ci->i_ceph_lock);
2160 		mutex_unlock(&ci->i_truncate_mutex);
2161 		goto out;
2162 	}
2163 	orig_gen = ci->i_rdcache_gen;
2164 	spin_unlock(&ci->i_ceph_lock);
2165 
2166 	if (invalidate_inode_pages2(inode->i_mapping) < 0) {
2167 		pr_err_client(cl, "invalidate_inode_pages2 %llx.%llx failed\n",
2168 			      ceph_vinop(inode));
2169 	}
2170 
2171 	spin_lock(&ci->i_ceph_lock);
2172 	if (orig_gen == ci->i_rdcache_gen &&
2173 	    orig_gen == ci->i_rdcache_revoking) {
2174 		doutc(cl, "%p %llx.%llx gen %d successful\n", inode,
2175 		      ceph_vinop(inode), ci->i_rdcache_gen);
2176 		ci->i_rdcache_revoking--;
2177 		check = 1;
2178 	} else {
2179 		doutc(cl, "%p %llx.%llx gen %d raced, now %d revoking %d\n",
2180 		      inode, ceph_vinop(inode), orig_gen, ci->i_rdcache_gen,
2181 		      ci->i_rdcache_revoking);
2182 		if (__ceph_caps_revoking_other(ci, NULL, CEPH_CAP_FILE_CACHE))
2183 			check = 1;
2184 	}
2185 	spin_unlock(&ci->i_ceph_lock);
2186 	mutex_unlock(&ci->i_truncate_mutex);
2187 out:
2188 	if (check)
2189 		ceph_check_caps(ci, 0);
2190 }
2191 
2192 /*
2193  * Make sure any pending truncation is applied before doing anything
2194  * that may depend on it.
2195  */
__ceph_do_pending_vmtruncate(struct inode * inode)2196 void __ceph_do_pending_vmtruncate(struct inode *inode)
2197 {
2198 	struct ceph_client *cl = ceph_inode_to_client(inode);
2199 	struct ceph_inode_info *ci = ceph_inode(inode);
2200 	u64 to;
2201 	int wrbuffer_refs, finish = 0;
2202 
2203 	mutex_lock(&ci->i_truncate_mutex);
2204 retry:
2205 	spin_lock(&ci->i_ceph_lock);
2206 	if (ci->i_truncate_pending == 0) {
2207 		doutc(cl, "%p %llx.%llx none pending\n", inode,
2208 		      ceph_vinop(inode));
2209 		spin_unlock(&ci->i_ceph_lock);
2210 		mutex_unlock(&ci->i_truncate_mutex);
2211 		return;
2212 	}
2213 
2214 	/*
2215 	 * make sure any dirty snapped pages are flushed before we
2216 	 * possibly truncate them.. so write AND block!
2217 	 */
2218 	if (ci->i_wrbuffer_ref_head < ci->i_wrbuffer_ref) {
2219 		spin_unlock(&ci->i_ceph_lock);
2220 		doutc(cl, "%p %llx.%llx flushing snaps first\n", inode,
2221 		      ceph_vinop(inode));
2222 		filemap_write_and_wait_range(&inode->i_data, 0,
2223 					     inode->i_sb->s_maxbytes);
2224 		goto retry;
2225 	}
2226 
2227 	/* there should be no reader or writer */
2228 	WARN_ON_ONCE(ci->i_rd_ref || ci->i_wr_ref);
2229 
2230 	to = ci->i_truncate_pagecache_size;
2231 	wrbuffer_refs = ci->i_wrbuffer_ref;
2232 	doutc(cl, "%p %llx.%llx (%d) to %lld\n", inode, ceph_vinop(inode),
2233 	      ci->i_truncate_pending, to);
2234 	spin_unlock(&ci->i_ceph_lock);
2235 
2236 	ceph_fscache_resize(inode, to);
2237 	truncate_pagecache(inode, to);
2238 
2239 	spin_lock(&ci->i_ceph_lock);
2240 	if (to == ci->i_truncate_pagecache_size) {
2241 		ci->i_truncate_pending = 0;
2242 		finish = 1;
2243 	}
2244 	spin_unlock(&ci->i_ceph_lock);
2245 	if (!finish)
2246 		goto retry;
2247 
2248 	mutex_unlock(&ci->i_truncate_mutex);
2249 
2250 	if (wrbuffer_refs == 0)
2251 		ceph_check_caps(ci, 0);
2252 
2253 	wake_up_all(&ci->i_cap_wq);
2254 }
2255 
ceph_inode_work(struct work_struct * work)2256 static void ceph_inode_work(struct work_struct *work)
2257 {
2258 	struct ceph_inode_info *ci = container_of(work, struct ceph_inode_info,
2259 						 i_work);
2260 	struct inode *inode = &ci->netfs.inode;
2261 	struct ceph_client *cl = ceph_inode_to_client(inode);
2262 
2263 	if (test_and_clear_bit(CEPH_I_WORK_WRITEBACK, &ci->i_work_mask)) {
2264 		doutc(cl, "writeback %p %llx.%llx\n", inode, ceph_vinop(inode));
2265 		filemap_fdatawrite(&inode->i_data);
2266 	}
2267 	if (test_and_clear_bit(CEPH_I_WORK_INVALIDATE_PAGES, &ci->i_work_mask))
2268 		ceph_do_invalidate_pages(inode);
2269 
2270 	if (test_and_clear_bit(CEPH_I_WORK_VMTRUNCATE, &ci->i_work_mask))
2271 		__ceph_do_pending_vmtruncate(inode);
2272 
2273 	if (test_and_clear_bit(CEPH_I_WORK_CHECK_CAPS, &ci->i_work_mask))
2274 		ceph_check_caps(ci, 0);
2275 
2276 	if (test_and_clear_bit(CEPH_I_WORK_FLUSH_SNAPS, &ci->i_work_mask))
2277 		ceph_flush_snaps(ci, NULL);
2278 
2279 	iput(inode);
2280 }
2281 
ceph_encrypted_get_link(struct dentry * dentry,struct inode * inode,struct delayed_call * done)2282 static const char *ceph_encrypted_get_link(struct dentry *dentry,
2283 					   struct inode *inode,
2284 					   struct delayed_call *done)
2285 {
2286 	struct ceph_inode_info *ci = ceph_inode(inode);
2287 
2288 	if (!dentry)
2289 		return ERR_PTR(-ECHILD);
2290 
2291 	return fscrypt_get_symlink(inode, ci->i_symlink, i_size_read(inode),
2292 				   done);
2293 }
2294 
ceph_encrypted_symlink_getattr(struct mnt_idmap * idmap,const struct path * path,struct kstat * stat,u32 request_mask,unsigned int query_flags)2295 static int ceph_encrypted_symlink_getattr(struct mnt_idmap *idmap,
2296 					  const struct path *path,
2297 					  struct kstat *stat, u32 request_mask,
2298 					  unsigned int query_flags)
2299 {
2300 	int ret;
2301 
2302 	ret = ceph_getattr(idmap, path, stat, request_mask, query_flags);
2303 	if (ret)
2304 		return ret;
2305 	return fscrypt_symlink_getattr(path, stat);
2306 }
2307 
2308 /*
2309  * symlinks
2310  */
2311 static const struct inode_operations ceph_symlink_iops = {
2312 	.get_link = simple_get_link,
2313 	.setattr = ceph_setattr,
2314 	.getattr = ceph_getattr,
2315 	.listxattr = ceph_listxattr,
2316 };
2317 
2318 static const struct inode_operations ceph_encrypted_symlink_iops = {
2319 	.get_link = ceph_encrypted_get_link,
2320 	.setattr = ceph_setattr,
2321 	.getattr = ceph_encrypted_symlink_getattr,
2322 	.listxattr = ceph_listxattr,
2323 };
2324 
2325 /*
2326  * Transfer the encrypted last block to the MDS and the MDS
2327  * will help update it when truncating a smaller size.
2328  *
2329  * We don't support a PAGE_SIZE that is smaller than the
2330  * CEPH_FSCRYPT_BLOCK_SIZE.
2331  */
fill_fscrypt_truncate(struct inode * inode,struct ceph_mds_request * req,struct iattr * attr)2332 static int fill_fscrypt_truncate(struct inode *inode,
2333 				 struct ceph_mds_request *req,
2334 				 struct iattr *attr)
2335 {
2336 	struct ceph_client *cl = ceph_inode_to_client(inode);
2337 	struct ceph_inode_info *ci = ceph_inode(inode);
2338 	int boff = attr->ia_size % CEPH_FSCRYPT_BLOCK_SIZE;
2339 	loff_t pos, orig_pos = round_down(attr->ia_size,
2340 					  CEPH_FSCRYPT_BLOCK_SIZE);
2341 	u64 block = orig_pos >> CEPH_FSCRYPT_BLOCK_SHIFT;
2342 	struct ceph_pagelist *pagelist = NULL;
2343 	struct kvec iov = {0};
2344 	struct iov_iter iter;
2345 	struct page *page = NULL;
2346 	struct ceph_fscrypt_truncate_size_header header;
2347 	int retry_op = 0;
2348 	int len = CEPH_FSCRYPT_BLOCK_SIZE;
2349 	loff_t i_size = i_size_read(inode);
2350 	int got, ret, issued;
2351 	u64 objver;
2352 
2353 	ret = __ceph_get_caps(inode, NULL, CEPH_CAP_FILE_RD, 0, -1, &got);
2354 	if (ret < 0)
2355 		return ret;
2356 
2357 	issued = __ceph_caps_issued(ci, NULL);
2358 
2359 	doutc(cl, "size %lld -> %lld got cap refs on %s, issued %s\n",
2360 	      i_size, attr->ia_size, ceph_cap_string(got),
2361 	      ceph_cap_string(issued));
2362 
2363 	/* Try to writeback the dirty pagecaches */
2364 	if (issued & (CEPH_CAP_FILE_BUFFER)) {
2365 		loff_t lend = orig_pos + CEPH_FSCRYPT_BLOCK_SHIFT - 1;
2366 
2367 		ret = filemap_write_and_wait_range(inode->i_mapping,
2368 						   orig_pos, lend);
2369 		if (ret < 0)
2370 			goto out;
2371 	}
2372 
2373 	page = __page_cache_alloc(GFP_KERNEL);
2374 	if (page == NULL) {
2375 		ret = -ENOMEM;
2376 		goto out;
2377 	}
2378 
2379 	pagelist = ceph_pagelist_alloc(GFP_KERNEL);
2380 	if (!pagelist) {
2381 		ret = -ENOMEM;
2382 		goto out;
2383 	}
2384 
2385 	iov.iov_base = kmap_local_page(page);
2386 	iov.iov_len = len;
2387 	iov_iter_kvec(&iter, READ, &iov, 1, len);
2388 
2389 	pos = orig_pos;
2390 	ret = __ceph_sync_read(inode, &pos, &iter, &retry_op, &objver);
2391 	if (ret < 0)
2392 		goto out;
2393 
2394 	/* Insert the header first */
2395 	header.ver = 1;
2396 	header.compat = 1;
2397 	header.change_attr = cpu_to_le64(inode_peek_iversion_raw(inode));
2398 
2399 	/*
2400 	 * Always set the block_size to CEPH_FSCRYPT_BLOCK_SIZE,
2401 	 * because in MDS it may need this to do the truncate.
2402 	 */
2403 	header.block_size = cpu_to_le32(CEPH_FSCRYPT_BLOCK_SIZE);
2404 
2405 	/*
2406 	 * If we hit a hole here, we should just skip filling
2407 	 * the fscrypt for the request, because once the fscrypt
2408 	 * is enabled, the file will be split into many blocks
2409 	 * with the size of CEPH_FSCRYPT_BLOCK_SIZE, if there
2410 	 * has a hole, the hole size should be multiple of block
2411 	 * size.
2412 	 *
2413 	 * If the Rados object doesn't exist, it will be set to 0.
2414 	 */
2415 	if (!objver) {
2416 		doutc(cl, "hit hole, ppos %lld < size %lld\n", pos, i_size);
2417 
2418 		header.data_len = cpu_to_le32(8 + 8 + 4);
2419 		header.file_offset = 0;
2420 		ret = 0;
2421 	} else {
2422 		header.data_len = cpu_to_le32(8 + 8 + 4 + CEPH_FSCRYPT_BLOCK_SIZE);
2423 		header.file_offset = cpu_to_le64(orig_pos);
2424 
2425 		doutc(cl, "encrypt block boff/bsize %d/%lu\n", boff,
2426 		      CEPH_FSCRYPT_BLOCK_SIZE);
2427 
2428 		/* truncate and zero out the extra contents for the last block */
2429 		memset(iov.iov_base + boff, 0, PAGE_SIZE - boff);
2430 
2431 		/* encrypt the last block */
2432 		ret = ceph_fscrypt_encrypt_block_inplace(inode, page,
2433 						    CEPH_FSCRYPT_BLOCK_SIZE,
2434 						    0, block,
2435 						    GFP_KERNEL);
2436 		if (ret)
2437 			goto out;
2438 	}
2439 
2440 	/* Insert the header */
2441 	ret = ceph_pagelist_append(pagelist, &header, sizeof(header));
2442 	if (ret)
2443 		goto out;
2444 
2445 	if (header.block_size) {
2446 		/* Append the last block contents to pagelist */
2447 		ret = ceph_pagelist_append(pagelist, iov.iov_base,
2448 					   CEPH_FSCRYPT_BLOCK_SIZE);
2449 		if (ret)
2450 			goto out;
2451 	}
2452 	req->r_pagelist = pagelist;
2453 out:
2454 	doutc(cl, "%p %llx.%llx size dropping cap refs on %s\n", inode,
2455 	      ceph_vinop(inode), ceph_cap_string(got));
2456 	ceph_put_cap_refs(ci, got);
2457 	if (iov.iov_base)
2458 		kunmap_local(iov.iov_base);
2459 	if (page)
2460 		__free_pages(page, 0);
2461 	if (ret && pagelist)
2462 		ceph_pagelist_release(pagelist);
2463 	return ret;
2464 }
2465 
__ceph_setattr(struct mnt_idmap * idmap,struct inode * inode,struct iattr * attr,struct ceph_iattr * cia)2466 int __ceph_setattr(struct mnt_idmap *idmap, struct inode *inode,
2467 		   struct iattr *attr, struct ceph_iattr *cia)
2468 {
2469 	struct ceph_inode_info *ci = ceph_inode(inode);
2470 	unsigned int ia_valid = attr->ia_valid;
2471 	struct ceph_mds_request *req;
2472 	struct ceph_mds_client *mdsc = ceph_sb_to_fs_client(inode->i_sb)->mdsc;
2473 	struct ceph_client *cl = ceph_inode_to_client(inode);
2474 	struct ceph_cap_flush *prealloc_cf;
2475 	loff_t isize = i_size_read(inode);
2476 	int issued;
2477 	int release = 0, dirtied = 0;
2478 	int mask = 0;
2479 	int err = 0;
2480 	int inode_dirty_flags = 0;
2481 	bool lock_snap_rwsem = false;
2482 	bool fill_fscrypt;
2483 	int truncate_retry = 20; /* The RMW will take around 50ms */
2484 	struct dentry *dentry;
2485 	char *path;
2486 	int pathlen;
2487 	u64 pathbase;
2488 	bool do_sync = false;
2489 
2490 	dentry = d_find_alias(inode);
2491 	if (!dentry) {
2492 		do_sync = true;
2493 	} else {
2494 		path = ceph_mdsc_build_path(mdsc, dentry, &pathlen, &pathbase, 0);
2495 		if (IS_ERR(path)) {
2496 			do_sync = true;
2497 			err = 0;
2498 		} else {
2499 			err = ceph_mds_check_access(mdsc, path, MAY_WRITE);
2500 		}
2501 		ceph_mdsc_free_path(path, pathlen);
2502 		dput(dentry);
2503 
2504 		/* For none EACCES cases will let the MDS do the mds auth check */
2505 		if (err == -EACCES) {
2506 			return err;
2507 		} else if (err < 0) {
2508 			do_sync = true;
2509 			err = 0;
2510 		}
2511 	}
2512 
2513 retry:
2514 	prealloc_cf = ceph_alloc_cap_flush();
2515 	if (!prealloc_cf)
2516 		return -ENOMEM;
2517 
2518 	req = ceph_mdsc_create_request(mdsc, CEPH_MDS_OP_SETATTR,
2519 				       USE_AUTH_MDS);
2520 	if (IS_ERR(req)) {
2521 		ceph_free_cap_flush(prealloc_cf);
2522 		return PTR_ERR(req);
2523 	}
2524 
2525 	fill_fscrypt = false;
2526 	spin_lock(&ci->i_ceph_lock);
2527 	issued = __ceph_caps_issued(ci, NULL);
2528 
2529 	if (!ci->i_head_snapc &&
2530 	    (issued & (CEPH_CAP_ANY_EXCL | CEPH_CAP_FILE_WR))) {
2531 		lock_snap_rwsem = true;
2532 		if (!down_read_trylock(&mdsc->snap_rwsem)) {
2533 			spin_unlock(&ci->i_ceph_lock);
2534 			down_read(&mdsc->snap_rwsem);
2535 			spin_lock(&ci->i_ceph_lock);
2536 			issued = __ceph_caps_issued(ci, NULL);
2537 		}
2538 	}
2539 
2540 	doutc(cl, "%p %llx.%llx issued %s\n", inode, ceph_vinop(inode),
2541 	      ceph_cap_string(issued));
2542 #if IS_ENABLED(CONFIG_FS_ENCRYPTION)
2543 	if (cia && cia->fscrypt_auth) {
2544 		u32 len = ceph_fscrypt_auth_len(cia->fscrypt_auth);
2545 
2546 		if (len > sizeof(*cia->fscrypt_auth)) {
2547 			err = -EINVAL;
2548 			spin_unlock(&ci->i_ceph_lock);
2549 			goto out;
2550 		}
2551 
2552 		doutc(cl, "%p %llx.%llx fscrypt_auth len %u to %u)\n", inode,
2553 		      ceph_vinop(inode), ci->fscrypt_auth_len, len);
2554 
2555 		/* It should never be re-set once set */
2556 		WARN_ON_ONCE(ci->fscrypt_auth);
2557 
2558 		if (!do_sync && (issued & CEPH_CAP_AUTH_EXCL)) {
2559 			dirtied |= CEPH_CAP_AUTH_EXCL;
2560 			kfree(ci->fscrypt_auth);
2561 			ci->fscrypt_auth = (u8 *)cia->fscrypt_auth;
2562 			ci->fscrypt_auth_len = len;
2563 		} else if ((issued & CEPH_CAP_AUTH_SHARED) == 0 ||
2564 			   ci->fscrypt_auth_len != len ||
2565 			   memcmp(ci->fscrypt_auth, cia->fscrypt_auth, len)) {
2566 			req->r_fscrypt_auth = cia->fscrypt_auth;
2567 			mask |= CEPH_SETATTR_FSCRYPT_AUTH;
2568 			release |= CEPH_CAP_AUTH_SHARED;
2569 		}
2570 		cia->fscrypt_auth = NULL;
2571 	}
2572 #else
2573 	if (cia && cia->fscrypt_auth) {
2574 		err = -EINVAL;
2575 		spin_unlock(&ci->i_ceph_lock);
2576 		goto out;
2577 	}
2578 #endif /* CONFIG_FS_ENCRYPTION */
2579 
2580 	if (ia_valid & ATTR_UID) {
2581 		kuid_t fsuid = from_vfsuid(idmap, i_user_ns(inode), attr->ia_vfsuid);
2582 
2583 		doutc(cl, "%p %llx.%llx uid %d -> %d\n", inode,
2584 		      ceph_vinop(inode),
2585 		      from_kuid(&init_user_ns, inode->i_uid),
2586 		      from_kuid(&init_user_ns, attr->ia_uid));
2587 		if (!do_sync && (issued & CEPH_CAP_AUTH_EXCL)) {
2588 			inode->i_uid = fsuid;
2589 			dirtied |= CEPH_CAP_AUTH_EXCL;
2590 		} else if ((issued & CEPH_CAP_AUTH_SHARED) == 0 ||
2591 			   !uid_eq(fsuid, inode->i_uid)) {
2592 			req->r_args.setattr.uid = cpu_to_le32(
2593 				from_kuid(&init_user_ns, fsuid));
2594 			mask |= CEPH_SETATTR_UID;
2595 			release |= CEPH_CAP_AUTH_SHARED;
2596 		}
2597 	}
2598 	if (ia_valid & ATTR_GID) {
2599 		kgid_t fsgid = from_vfsgid(idmap, i_user_ns(inode), attr->ia_vfsgid);
2600 
2601 		doutc(cl, "%p %llx.%llx gid %d -> %d\n", inode,
2602 		      ceph_vinop(inode),
2603 		      from_kgid(&init_user_ns, inode->i_gid),
2604 		      from_kgid(&init_user_ns, attr->ia_gid));
2605 		if (!do_sync && (issued & CEPH_CAP_AUTH_EXCL)) {
2606 			inode->i_gid = fsgid;
2607 			dirtied |= CEPH_CAP_AUTH_EXCL;
2608 		} else if ((issued & CEPH_CAP_AUTH_SHARED) == 0 ||
2609 			   !gid_eq(fsgid, inode->i_gid)) {
2610 			req->r_args.setattr.gid = cpu_to_le32(
2611 				from_kgid(&init_user_ns, fsgid));
2612 			mask |= CEPH_SETATTR_GID;
2613 			release |= CEPH_CAP_AUTH_SHARED;
2614 		}
2615 	}
2616 	if (ia_valid & ATTR_MODE) {
2617 		doutc(cl, "%p %llx.%llx mode 0%o -> 0%o\n", inode,
2618 		      ceph_vinop(inode), inode->i_mode, attr->ia_mode);
2619 		if (!do_sync && (issued & CEPH_CAP_AUTH_EXCL)) {
2620 			inode->i_mode = attr->ia_mode;
2621 			dirtied |= CEPH_CAP_AUTH_EXCL;
2622 		} else if ((issued & CEPH_CAP_AUTH_SHARED) == 0 ||
2623 			   attr->ia_mode != inode->i_mode) {
2624 			inode->i_mode = attr->ia_mode;
2625 			req->r_args.setattr.mode = cpu_to_le32(attr->ia_mode);
2626 			mask |= CEPH_SETATTR_MODE;
2627 			release |= CEPH_CAP_AUTH_SHARED;
2628 		}
2629 	}
2630 
2631 	if (ia_valid & ATTR_ATIME) {
2632 		struct timespec64 atime = inode_get_atime(inode);
2633 
2634 		doutc(cl, "%p %llx.%llx atime %lld.%09ld -> %lld.%09ld\n",
2635 		      inode, ceph_vinop(inode),
2636 		      atime.tv_sec, atime.tv_nsec,
2637 		      attr->ia_atime.tv_sec, attr->ia_atime.tv_nsec);
2638 		if (!do_sync && (issued & CEPH_CAP_FILE_EXCL)) {
2639 			ci->i_time_warp_seq++;
2640 			inode_set_atime_to_ts(inode, attr->ia_atime);
2641 			dirtied |= CEPH_CAP_FILE_EXCL;
2642 		} else if (!do_sync && (issued & CEPH_CAP_FILE_WR) &&
2643 			   timespec64_compare(&atime,
2644 					      &attr->ia_atime) < 0) {
2645 			inode_set_atime_to_ts(inode, attr->ia_atime);
2646 			dirtied |= CEPH_CAP_FILE_WR;
2647 		} else if ((issued & CEPH_CAP_FILE_SHARED) == 0 ||
2648 			   !timespec64_equal(&atime, &attr->ia_atime)) {
2649 			ceph_encode_timespec64(&req->r_args.setattr.atime,
2650 					       &attr->ia_atime);
2651 			mask |= CEPH_SETATTR_ATIME;
2652 			release |= CEPH_CAP_FILE_SHARED |
2653 				   CEPH_CAP_FILE_RD | CEPH_CAP_FILE_WR;
2654 		}
2655 	}
2656 	if (ia_valid & ATTR_SIZE) {
2657 		doutc(cl, "%p %llx.%llx size %lld -> %lld\n", inode,
2658 		      ceph_vinop(inode), isize, attr->ia_size);
2659 		/*
2660 		 * Only when the new size is smaller and not aligned to
2661 		 * CEPH_FSCRYPT_BLOCK_SIZE will the RMW is needed.
2662 		 */
2663 		if (IS_ENCRYPTED(inode) && attr->ia_size < isize &&
2664 		    (attr->ia_size % CEPH_FSCRYPT_BLOCK_SIZE)) {
2665 			mask |= CEPH_SETATTR_SIZE;
2666 			release |= CEPH_CAP_FILE_SHARED | CEPH_CAP_FILE_EXCL |
2667 				   CEPH_CAP_FILE_RD | CEPH_CAP_FILE_WR;
2668 			set_bit(CEPH_MDS_R_FSCRYPT_FILE, &req->r_req_flags);
2669 			mask |= CEPH_SETATTR_FSCRYPT_FILE;
2670 			req->r_args.setattr.size =
2671 				cpu_to_le64(round_up(attr->ia_size,
2672 						     CEPH_FSCRYPT_BLOCK_SIZE));
2673 			req->r_args.setattr.old_size =
2674 				cpu_to_le64(round_up(isize,
2675 						     CEPH_FSCRYPT_BLOCK_SIZE));
2676 			req->r_fscrypt_file = attr->ia_size;
2677 			fill_fscrypt = true;
2678 		} else if (!do_sync && (issued & CEPH_CAP_FILE_EXCL) && attr->ia_size >= isize) {
2679 			if (attr->ia_size > isize) {
2680 				i_size_write(inode, attr->ia_size);
2681 				inode->i_blocks = calc_inode_blocks(attr->ia_size);
2682 				ci->i_reported_size = attr->ia_size;
2683 				dirtied |= CEPH_CAP_FILE_EXCL;
2684 				ia_valid |= ATTR_MTIME;
2685 			}
2686 		} else if ((issued & CEPH_CAP_FILE_SHARED) == 0 ||
2687 			   attr->ia_size != isize) {
2688 			mask |= CEPH_SETATTR_SIZE;
2689 			release |= CEPH_CAP_FILE_SHARED | CEPH_CAP_FILE_EXCL |
2690 				   CEPH_CAP_FILE_RD | CEPH_CAP_FILE_WR;
2691 			if (IS_ENCRYPTED(inode) && attr->ia_size) {
2692 				set_bit(CEPH_MDS_R_FSCRYPT_FILE, &req->r_req_flags);
2693 				mask |= CEPH_SETATTR_FSCRYPT_FILE;
2694 				req->r_args.setattr.size =
2695 					cpu_to_le64(round_up(attr->ia_size,
2696 							     CEPH_FSCRYPT_BLOCK_SIZE));
2697 				req->r_args.setattr.old_size =
2698 					cpu_to_le64(round_up(isize,
2699 							     CEPH_FSCRYPT_BLOCK_SIZE));
2700 				req->r_fscrypt_file = attr->ia_size;
2701 			} else {
2702 				req->r_args.setattr.size = cpu_to_le64(attr->ia_size);
2703 				req->r_args.setattr.old_size = cpu_to_le64(isize);
2704 				req->r_fscrypt_file = 0;
2705 			}
2706 		}
2707 	}
2708 	if (ia_valid & ATTR_MTIME) {
2709 		struct timespec64 mtime = inode_get_mtime(inode);
2710 
2711 		doutc(cl, "%p %llx.%llx mtime %lld.%09ld -> %lld.%09ld\n",
2712 		      inode, ceph_vinop(inode),
2713 		      mtime.tv_sec, mtime.tv_nsec,
2714 		      attr->ia_mtime.tv_sec, attr->ia_mtime.tv_nsec);
2715 		if (!do_sync && (issued & CEPH_CAP_FILE_EXCL)) {
2716 			ci->i_time_warp_seq++;
2717 			inode_set_mtime_to_ts(inode, attr->ia_mtime);
2718 			dirtied |= CEPH_CAP_FILE_EXCL;
2719 		} else if (!do_sync && (issued & CEPH_CAP_FILE_WR) &&
2720 			   timespec64_compare(&mtime, &attr->ia_mtime) < 0) {
2721 			inode_set_mtime_to_ts(inode, attr->ia_mtime);
2722 			dirtied |= CEPH_CAP_FILE_WR;
2723 		} else if ((issued & CEPH_CAP_FILE_SHARED) == 0 ||
2724 			   !timespec64_equal(&mtime, &attr->ia_mtime)) {
2725 			ceph_encode_timespec64(&req->r_args.setattr.mtime,
2726 					       &attr->ia_mtime);
2727 			mask |= CEPH_SETATTR_MTIME;
2728 			release |= CEPH_CAP_FILE_SHARED |
2729 				   CEPH_CAP_FILE_RD | CEPH_CAP_FILE_WR;
2730 		}
2731 	}
2732 
2733 	/* these do nothing */
2734 	if (ia_valid & ATTR_CTIME) {
2735 		bool only = (ia_valid & (ATTR_SIZE|ATTR_MTIME|ATTR_ATIME|
2736 					 ATTR_MODE|ATTR_UID|ATTR_GID)) == 0;
2737 		doutc(cl, "%p %llx.%llx ctime %lld.%09ld -> %lld.%09ld (%s)\n",
2738 		      inode, ceph_vinop(inode),
2739 		      inode_get_ctime_sec(inode),
2740 		      inode_get_ctime_nsec(inode),
2741 		      attr->ia_ctime.tv_sec, attr->ia_ctime.tv_nsec,
2742 		      only ? "ctime only" : "ignored");
2743 		if (only) {
2744 			/*
2745 			 * if kernel wants to dirty ctime but nothing else,
2746 			 * we need to choose a cap to dirty under, or do
2747 			 * a almost-no-op setattr
2748 			 */
2749 			if (issued & CEPH_CAP_AUTH_EXCL)
2750 				dirtied |= CEPH_CAP_AUTH_EXCL;
2751 			else if (issued & CEPH_CAP_FILE_EXCL)
2752 				dirtied |= CEPH_CAP_FILE_EXCL;
2753 			else if (issued & CEPH_CAP_XATTR_EXCL)
2754 				dirtied |= CEPH_CAP_XATTR_EXCL;
2755 			else
2756 				mask |= CEPH_SETATTR_CTIME;
2757 		}
2758 	}
2759 	if (ia_valid & ATTR_FILE)
2760 		doutc(cl, "%p %llx.%llx ATTR_FILE ... hrm!\n", inode,
2761 		      ceph_vinop(inode));
2762 
2763 	if (dirtied) {
2764 		inode_dirty_flags = __ceph_mark_dirty_caps(ci, dirtied,
2765 							   &prealloc_cf);
2766 		inode_set_ctime_to_ts(inode, attr->ia_ctime);
2767 		inode_inc_iversion_raw(inode);
2768 	}
2769 
2770 	release &= issued;
2771 	spin_unlock(&ci->i_ceph_lock);
2772 	if (lock_snap_rwsem) {
2773 		up_read(&mdsc->snap_rwsem);
2774 		lock_snap_rwsem = false;
2775 	}
2776 
2777 	if (inode_dirty_flags)
2778 		__mark_inode_dirty(inode, inode_dirty_flags);
2779 
2780 	if (mask) {
2781 		req->r_inode = inode;
2782 		ihold(inode);
2783 		req->r_inode_drop = release;
2784 		req->r_args.setattr.mask = cpu_to_le32(mask);
2785 		req->r_num_caps = 1;
2786 		req->r_stamp = attr->ia_ctime;
2787 		if (fill_fscrypt) {
2788 			err = fill_fscrypt_truncate(inode, req, attr);
2789 			if (err)
2790 				goto out;
2791 		}
2792 
2793 		/*
2794 		 * The truncate request will return -EAGAIN when the
2795 		 * last block has been updated just before the MDS
2796 		 * successfully gets the xlock for the FILE lock. To
2797 		 * avoid corrupting the file contents we need to retry
2798 		 * it.
2799 		 */
2800 		err = ceph_mdsc_do_request(mdsc, NULL, req);
2801 		if (err == -EAGAIN && truncate_retry--) {
2802 			doutc(cl, "%p %llx.%llx result=%d (%s locally, %d remote), retry it!\n",
2803 			      inode, ceph_vinop(inode), err,
2804 			      ceph_cap_string(dirtied), mask);
2805 			ceph_mdsc_put_request(req);
2806 			ceph_free_cap_flush(prealloc_cf);
2807 			goto retry;
2808 		}
2809 	}
2810 out:
2811 	doutc(cl, "%p %llx.%llx result=%d (%s locally, %d remote)\n", inode,
2812 	      ceph_vinop(inode), err, ceph_cap_string(dirtied), mask);
2813 
2814 	ceph_mdsc_put_request(req);
2815 	ceph_free_cap_flush(prealloc_cf);
2816 
2817 	if (err >= 0 && (mask & CEPH_SETATTR_SIZE))
2818 		__ceph_do_pending_vmtruncate(inode);
2819 
2820 	return err;
2821 }
2822 
2823 /*
2824  * setattr
2825  */
ceph_setattr(struct mnt_idmap * idmap,struct dentry * dentry,struct iattr * attr)2826 int ceph_setattr(struct mnt_idmap *idmap, struct dentry *dentry,
2827 		 struct iattr *attr)
2828 {
2829 	struct inode *inode = d_inode(dentry);
2830 	struct ceph_fs_client *fsc = ceph_inode_to_fs_client(inode);
2831 	int err;
2832 
2833 	if (ceph_snap(inode) != CEPH_NOSNAP)
2834 		return -EROFS;
2835 
2836 	if (ceph_inode_is_shutdown(inode))
2837 		return -ESTALE;
2838 
2839 	err = fscrypt_prepare_setattr(dentry, attr);
2840 	if (err)
2841 		return err;
2842 
2843 	err = setattr_prepare(idmap, dentry, attr);
2844 	if (err != 0)
2845 		return err;
2846 
2847 	if ((attr->ia_valid & ATTR_SIZE) &&
2848 	    attr->ia_size > max(i_size_read(inode), fsc->max_file_size))
2849 		return -EFBIG;
2850 
2851 	if ((attr->ia_valid & ATTR_SIZE) &&
2852 	    ceph_quota_is_max_bytes_exceeded(inode, attr->ia_size))
2853 		return -EDQUOT;
2854 
2855 	err = __ceph_setattr(idmap, inode, attr, NULL);
2856 
2857 	if (err >= 0 && (attr->ia_valid & ATTR_MODE))
2858 		err = posix_acl_chmod(idmap, dentry, attr->ia_mode);
2859 
2860 	return err;
2861 }
2862 
ceph_try_to_choose_auth_mds(struct inode * inode,int mask)2863 int ceph_try_to_choose_auth_mds(struct inode *inode, int mask)
2864 {
2865 	int issued = ceph_caps_issued(ceph_inode(inode));
2866 
2867 	/*
2868 	 * If any 'x' caps is issued we can just choose the auth MDS
2869 	 * instead of the random replica MDSes. Because only when the
2870 	 * Locker is in LOCK_EXEC state will the loner client could
2871 	 * get the 'x' caps. And if we send the getattr requests to
2872 	 * any replica MDS it must auth pin and tries to rdlock from
2873 	 * the auth MDS, and then the auth MDS need to do the Locker
2874 	 * state transition to LOCK_SYNC. And after that the lock state
2875 	 * will change back.
2876 	 *
2877 	 * This cost much when doing the Locker state transition and
2878 	 * usually will need to revoke caps from clients.
2879 	 *
2880 	 * And for the 'Xs' caps for getxattr we will also choose the
2881 	 * auth MDS, because the MDS side code is buggy due to setxattr
2882 	 * won't notify the replica MDSes when the values changed and
2883 	 * the replica MDS will return the old values. Though we will
2884 	 * fix it in MDS code, but this still makes sense for old ceph.
2885 	 */
2886 	if (((mask & CEPH_CAP_ANY_SHARED) && (issued & CEPH_CAP_ANY_EXCL))
2887 	    || (mask & (CEPH_STAT_RSTAT | CEPH_STAT_CAP_XATTR)))
2888 		return USE_AUTH_MDS;
2889 	else
2890 		return USE_ANY_MDS;
2891 }
2892 
2893 /*
2894  * Verify that we have a lease on the given mask.  If not,
2895  * do a getattr against an mds.
2896  */
__ceph_do_getattr(struct inode * inode,struct page * locked_page,int mask,bool force)2897 int __ceph_do_getattr(struct inode *inode, struct page *locked_page,
2898 		      int mask, bool force)
2899 {
2900 	struct ceph_fs_client *fsc = ceph_sb_to_fs_client(inode->i_sb);
2901 	struct ceph_client *cl = fsc->client;
2902 	struct ceph_mds_client *mdsc = fsc->mdsc;
2903 	struct ceph_mds_request *req;
2904 	int mode;
2905 	int err;
2906 
2907 	if (ceph_snap(inode) == CEPH_SNAPDIR) {
2908 		doutc(cl, "inode %p %llx.%llx SNAPDIR\n", inode,
2909 		      ceph_vinop(inode));
2910 		return 0;
2911 	}
2912 
2913 	doutc(cl, "inode %p %llx.%llx mask %s mode 0%o\n", inode,
2914 	      ceph_vinop(inode), ceph_cap_string(mask), inode->i_mode);
2915 	if (!force && ceph_caps_issued_mask_metric(ceph_inode(inode), mask, 1))
2916 			return 0;
2917 
2918 	mode = ceph_try_to_choose_auth_mds(inode, mask);
2919 	req = ceph_mdsc_create_request(mdsc, CEPH_MDS_OP_GETATTR, mode);
2920 	if (IS_ERR(req))
2921 		return PTR_ERR(req);
2922 	req->r_inode = inode;
2923 	ihold(inode);
2924 	req->r_num_caps = 1;
2925 	req->r_args.getattr.mask = cpu_to_le32(mask);
2926 	req->r_locked_page = locked_page;
2927 	err = ceph_mdsc_do_request(mdsc, NULL, req);
2928 	if (locked_page && err == 0) {
2929 		u64 inline_version = req->r_reply_info.targeti.inline_version;
2930 		if (inline_version == 0) {
2931 			/* the reply is supposed to contain inline data */
2932 			err = -EINVAL;
2933 		} else if (inline_version == CEPH_INLINE_NONE ||
2934 			   inline_version == 1) {
2935 			err = -ENODATA;
2936 		} else {
2937 			err = req->r_reply_info.targeti.inline_len;
2938 		}
2939 	}
2940 	ceph_mdsc_put_request(req);
2941 	doutc(cl, "result=%d\n", err);
2942 	return err;
2943 }
2944 
ceph_do_getvxattr(struct inode * inode,const char * name,void * value,size_t size)2945 int ceph_do_getvxattr(struct inode *inode, const char *name, void *value,
2946 		      size_t size)
2947 {
2948 	struct ceph_fs_client *fsc = ceph_sb_to_fs_client(inode->i_sb);
2949 	struct ceph_client *cl = fsc->client;
2950 	struct ceph_mds_client *mdsc = fsc->mdsc;
2951 	struct ceph_mds_request *req;
2952 	int mode = USE_AUTH_MDS;
2953 	int err;
2954 	char *xattr_value;
2955 	size_t xattr_value_len;
2956 
2957 	req = ceph_mdsc_create_request(mdsc, CEPH_MDS_OP_GETVXATTR, mode);
2958 	if (IS_ERR(req)) {
2959 		err = -ENOMEM;
2960 		goto out;
2961 	}
2962 
2963 	req->r_feature_needed = CEPHFS_FEATURE_OP_GETVXATTR;
2964 	req->r_path2 = kstrdup(name, GFP_NOFS);
2965 	if (!req->r_path2) {
2966 		err = -ENOMEM;
2967 		goto put;
2968 	}
2969 
2970 	ihold(inode);
2971 	req->r_inode = inode;
2972 	err = ceph_mdsc_do_request(mdsc, NULL, req);
2973 	if (err < 0)
2974 		goto put;
2975 
2976 	xattr_value = req->r_reply_info.xattr_info.xattr_value;
2977 	xattr_value_len = req->r_reply_info.xattr_info.xattr_value_len;
2978 
2979 	doutc(cl, "xattr_value_len:%zu, size:%zu\n", xattr_value_len, size);
2980 
2981 	err = (int)xattr_value_len;
2982 	if (size == 0)
2983 		goto put;
2984 
2985 	if (xattr_value_len > size) {
2986 		err = -ERANGE;
2987 		goto put;
2988 	}
2989 
2990 	memcpy(value, xattr_value, xattr_value_len);
2991 put:
2992 	ceph_mdsc_put_request(req);
2993 out:
2994 	doutc(cl, "result=%d\n", err);
2995 	return err;
2996 }
2997 
2998 
2999 /*
3000  * Check inode permissions.  We verify we have a valid value for
3001  * the AUTH cap, then call the generic handler.
3002  */
ceph_permission(struct mnt_idmap * idmap,struct inode * inode,int mask)3003 int ceph_permission(struct mnt_idmap *idmap, struct inode *inode,
3004 		    int mask)
3005 {
3006 	int err;
3007 
3008 	if (mask & MAY_NOT_BLOCK)
3009 		return -ECHILD;
3010 
3011 	err = ceph_do_getattr(inode, CEPH_CAP_AUTH_SHARED, false);
3012 
3013 	if (!err)
3014 		err = generic_permission(idmap, inode, mask);
3015 	return err;
3016 }
3017 
3018 /* Craft a mask of needed caps given a set of requested statx attrs. */
statx_to_caps(u32 want,umode_t mode)3019 static int statx_to_caps(u32 want, umode_t mode)
3020 {
3021 	int mask = 0;
3022 
3023 	if (want & (STATX_MODE|STATX_UID|STATX_GID|STATX_CTIME|STATX_BTIME|STATX_CHANGE_COOKIE))
3024 		mask |= CEPH_CAP_AUTH_SHARED;
3025 
3026 	if (want & (STATX_NLINK|STATX_CTIME|STATX_CHANGE_COOKIE)) {
3027 		/*
3028 		 * The link count for directories depends on inode->i_subdirs,
3029 		 * and that is only updated when Fs caps are held.
3030 		 */
3031 		if (S_ISDIR(mode))
3032 			mask |= CEPH_CAP_FILE_SHARED;
3033 		else
3034 			mask |= CEPH_CAP_LINK_SHARED;
3035 	}
3036 
3037 	if (want & (STATX_ATIME|STATX_MTIME|STATX_CTIME|STATX_SIZE|STATX_BLOCKS|STATX_CHANGE_COOKIE))
3038 		mask |= CEPH_CAP_FILE_SHARED;
3039 
3040 	if (want & (STATX_CTIME|STATX_CHANGE_COOKIE))
3041 		mask |= CEPH_CAP_XATTR_SHARED;
3042 
3043 	return mask;
3044 }
3045 
3046 /*
3047  * Get all the attributes. If we have sufficient caps for the requested attrs,
3048  * then we can avoid talking to the MDS at all.
3049  */
ceph_getattr(struct mnt_idmap * idmap,const struct path * path,struct kstat * stat,u32 request_mask,unsigned int flags)3050 int ceph_getattr(struct mnt_idmap *idmap, const struct path *path,
3051 		 struct kstat *stat, u32 request_mask, unsigned int flags)
3052 {
3053 	struct inode *inode = d_inode(path->dentry);
3054 	struct super_block *sb = inode->i_sb;
3055 	struct ceph_inode_info *ci = ceph_inode(inode);
3056 	u32 valid_mask = STATX_BASIC_STATS;
3057 	int err = 0;
3058 
3059 	if (ceph_inode_is_shutdown(inode))
3060 		return -ESTALE;
3061 
3062 	/* Skip the getattr altogether if we're asked not to sync */
3063 	if ((flags & AT_STATX_SYNC_TYPE) != AT_STATX_DONT_SYNC) {
3064 		err = ceph_do_getattr(inode,
3065 				statx_to_caps(request_mask, inode->i_mode),
3066 				flags & AT_STATX_FORCE_SYNC);
3067 		if (err)
3068 			return err;
3069 	}
3070 
3071 	generic_fillattr(idmap, request_mask, inode, stat);
3072 	stat->ino = ceph_present_inode(inode);
3073 
3074 	/*
3075 	 * btime on newly-allocated inodes is 0, so if this is still set to
3076 	 * that, then assume that it's not valid.
3077 	 */
3078 	if (ci->i_btime.tv_sec || ci->i_btime.tv_nsec) {
3079 		stat->btime = ci->i_btime;
3080 		valid_mask |= STATX_BTIME;
3081 	}
3082 
3083 	if (request_mask & STATX_CHANGE_COOKIE) {
3084 		stat->change_cookie = inode_peek_iversion_raw(inode);
3085 		valid_mask |= STATX_CHANGE_COOKIE;
3086 	}
3087 
3088 	if (ceph_snap(inode) == CEPH_NOSNAP)
3089 		stat->dev = sb->s_dev;
3090 	else
3091 		stat->dev = ci->i_snapid_map ? ci->i_snapid_map->dev : 0;
3092 
3093 	if (S_ISDIR(inode->i_mode)) {
3094 		if (ceph_test_mount_opt(ceph_sb_to_fs_client(sb), RBYTES)) {
3095 			stat->size = ci->i_rbytes;
3096 		} else if (ceph_snap(inode) == CEPH_SNAPDIR) {
3097 			struct ceph_inode_info *pci;
3098 			struct ceph_snap_realm *realm;
3099 			struct inode *parent;
3100 
3101 			parent = ceph_lookup_inode(sb, ceph_ino(inode));
3102 			if (IS_ERR(parent))
3103 				return PTR_ERR(parent);
3104 
3105 			pci = ceph_inode(parent);
3106 			spin_lock(&pci->i_ceph_lock);
3107 			realm = pci->i_snap_realm;
3108 			if (realm)
3109 				stat->size = realm->num_snaps;
3110 			else
3111 				stat->size = 0;
3112 			spin_unlock(&pci->i_ceph_lock);
3113 			iput(parent);
3114 		} else {
3115 			stat->size = ci->i_files + ci->i_subdirs;
3116 		}
3117 		stat->blocks = 0;
3118 		stat->blksize = 65536;
3119 		/*
3120 		 * Some applications rely on the number of st_nlink
3121 		 * value on directories to be either 0 (if unlinked)
3122 		 * or 2 + number of subdirectories.
3123 		 */
3124 		if (stat->nlink == 1)
3125 			/* '.' + '..' + subdirs */
3126 			stat->nlink = 1 + 1 + ci->i_subdirs;
3127 	}
3128 
3129 	stat->attributes |= STATX_ATTR_CHANGE_MONOTONIC;
3130 	if (IS_ENCRYPTED(inode))
3131 		stat->attributes |= STATX_ATTR_ENCRYPTED;
3132 	stat->attributes_mask |= (STATX_ATTR_CHANGE_MONOTONIC |
3133 				  STATX_ATTR_ENCRYPTED);
3134 
3135 	stat->result_mask = request_mask & valid_mask;
3136 	return err;
3137 }
3138 
ceph_inode_shutdown(struct inode * inode)3139 void ceph_inode_shutdown(struct inode *inode)
3140 {
3141 	struct ceph_inode_info *ci = ceph_inode(inode);
3142 	struct rb_node *p;
3143 	int iputs = 0;
3144 	bool invalidate = false;
3145 
3146 	spin_lock(&ci->i_ceph_lock);
3147 	ci->i_ceph_flags |= CEPH_I_SHUTDOWN;
3148 	p = rb_first(&ci->i_caps);
3149 	while (p) {
3150 		struct ceph_cap *cap = rb_entry(p, struct ceph_cap, ci_node);
3151 
3152 		p = rb_next(p);
3153 		iputs += ceph_purge_inode_cap(inode, cap, &invalidate);
3154 	}
3155 	spin_unlock(&ci->i_ceph_lock);
3156 
3157 	if (invalidate)
3158 		ceph_queue_invalidate(inode);
3159 	while (iputs--)
3160 		iput(inode);
3161 }
3162