xref: /linux/fs/ceph/inode.c (revision 8bd9238e511d02831022ff0270865c54ccc482d6)
1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/ceph/ceph_debug.h>
3 
4 #include <linux/module.h>
5 #include <linux/fs.h>
6 #include <linux/slab.h>
7 #include <linux/string.h>
8 #include <linux/uaccess.h>
9 #include <linux/kernel.h>
10 #include <linux/writeback.h>
11 #include <linux/vmalloc.h>
12 #include <linux/xattr.h>
13 #include <linux/posix_acl.h>
14 #include <linux/random.h>
15 #include <linux/sort.h>
16 #include <linux/iversion.h>
17 #include <linux/fscrypt.h>
18 
19 #include "super.h"
20 #include "mds_client.h"
21 #include "cache.h"
22 #include "crypto.h"
23 #include <linux/ceph/decode.h>
24 
25 /*
26  * Ceph inode operations
27  *
28  * Implement basic inode helpers (get, alloc) and inode ops (getattr,
29  * setattr, etc.), xattr helpers, and helpers for assimilating
30  * metadata returned by the MDS into our cache.
31  *
32  * Also define helpers for doing asynchronous writeback, invalidation,
33  * and truncation for the benefit of those who can't afford to block
34  * (typically because they are in the message handler path).
35  */
36 
37 static const struct inode_operations ceph_symlink_iops;
38 static const struct inode_operations ceph_encrypted_symlink_iops;
39 
40 static void ceph_inode_work(struct work_struct *work);
41 
42 /*
43  * find or create an inode, given the ceph ino number
44  */
ceph_set_ino_cb(struct inode * inode,void * data)45 static int ceph_set_ino_cb(struct inode *inode, void *data)
46 {
47 	struct ceph_inode_info *ci = ceph_inode(inode);
48 	struct ceph_mds_client *mdsc = ceph_sb_to_mdsc(inode->i_sb);
49 
50 	ci->i_vino = *(struct ceph_vino *)data;
51 	inode->i_ino = ceph_vino_to_ino_t(ci->i_vino);
52 	inode_set_iversion_raw(inode, 0);
53 	percpu_counter_inc(&mdsc->metric.total_inodes);
54 
55 	return 0;
56 }
57 
58 /*
59  * Check if the parent inode matches the vino from directory reply info
60  */
ceph_vino_matches_parent(struct inode * parent,struct ceph_vino vino)61 static inline bool ceph_vino_matches_parent(struct inode *parent,
62 					    struct ceph_vino vino)
63 {
64 	return ceph_ino(parent) == vino.ino && ceph_snap(parent) == vino.snap;
65 }
66 
67 /*
68  * Validate that the directory inode referenced by @req->r_parent matches the
69  * inode number and snapshot id contained in the reply's directory record.  If
70  * they do not match – which can theoretically happen if the parent dentry was
71  * moved between the time the request was issued and the reply arrived – fall
72  * back to looking up the correct inode in the inode cache.
73  *
74  * A reference is *always* returned.  Callers that receive a different inode
75  * than the original @parent are responsible for dropping the extra reference
76  * once the reply has been processed.
77  */
ceph_get_reply_dir(struct super_block * sb,struct inode * parent,struct ceph_mds_reply_info_parsed * rinfo)78 static struct inode *ceph_get_reply_dir(struct super_block *sb,
79 					struct inode *parent,
80 					struct ceph_mds_reply_info_parsed *rinfo)
81 {
82 	struct ceph_vino vino;
83 
84 	if (unlikely(!rinfo->diri.in))
85 		return parent; /* nothing to compare against */
86 
87 	/* If we didn't have a cached parent inode to begin with, just bail out. */
88 	if (!parent)
89 		return NULL;
90 
91 	vino.ino  = le64_to_cpu(rinfo->diri.in->ino);
92 	vino.snap = le64_to_cpu(rinfo->diri.in->snapid);
93 
94 	if (likely(ceph_vino_matches_parent(parent, vino)))
95 		return parent; /* matches – use the original reference */
96 
97 	/* Mismatch – this should be rare.  Emit a WARN and obtain the correct inode. */
98 	WARN_ONCE(1, "ceph: reply dir mismatch (parent valid %llx.%llx reply %llx.%llx)\n",
99 		  ceph_ino(parent), ceph_snap(parent), vino.ino, vino.snap);
100 
101 	return ceph_get_inode(sb, vino, NULL);
102 }
103 
104 /**
105  * ceph_new_inode - allocate a new inode in advance of an expected create
106  * @dir: parent directory for new inode
107  * @dentry: dentry that may eventually point to new inode
108  * @mode: mode of new inode
109  * @as_ctx: pointer to inherited security context
110  *
111  * Allocate a new inode in advance of an operation to create a new inode.
112  * This allocates the inode and sets up the acl_sec_ctx with appropriate
113  * info for the new inode.
114  *
115  * Returns a pointer to the new inode or an ERR_PTR.
116  */
ceph_new_inode(struct inode * dir,struct dentry * dentry,umode_t * mode,struct ceph_acl_sec_ctx * as_ctx)117 struct inode *ceph_new_inode(struct inode *dir, struct dentry *dentry,
118 			     umode_t *mode, struct ceph_acl_sec_ctx *as_ctx)
119 {
120 	int err;
121 	struct inode *inode;
122 
123 	inode = new_inode(dir->i_sb);
124 	if (!inode)
125 		return ERR_PTR(-ENOMEM);
126 
127 	inode->i_blkbits = CEPH_FSCRYPT_BLOCK_SHIFT;
128 
129 	if (!S_ISLNK(*mode)) {
130 		err = ceph_pre_init_acls(dir, mode, as_ctx);
131 		if (err < 0)
132 			goto out_err;
133 	}
134 
135 	inode->i_state = 0;
136 	inode->i_mode = *mode;
137 
138 	err = ceph_security_init_secctx(dentry, *mode, as_ctx);
139 	if (err < 0)
140 		goto out_err;
141 
142 	/*
143 	 * We'll skip setting fscrypt context for snapshots, leaving that for
144 	 * the handle_reply().
145 	 */
146 	if (ceph_snap(dir) != CEPH_SNAPDIR) {
147 		err = ceph_fscrypt_prepare_context(dir, inode, as_ctx);
148 		if (err)
149 			goto out_err;
150 	}
151 
152 	return inode;
153 out_err:
154 	iput(inode);
155 	return ERR_PTR(err);
156 }
157 
ceph_as_ctx_to_req(struct ceph_mds_request * req,struct ceph_acl_sec_ctx * as_ctx)158 void ceph_as_ctx_to_req(struct ceph_mds_request *req,
159 			struct ceph_acl_sec_ctx *as_ctx)
160 {
161 	if (as_ctx->pagelist) {
162 		req->r_pagelist = as_ctx->pagelist;
163 		as_ctx->pagelist = NULL;
164 	}
165 	ceph_fscrypt_as_ctx_to_req(req, as_ctx);
166 }
167 
168 /**
169  * ceph_get_inode - find or create/hash a new inode
170  * @sb: superblock to search and allocate in
171  * @vino: vino to search for
172  * @newino: optional new inode to insert if one isn't found (may be NULL)
173  *
174  * Search for or insert a new inode into the hash for the given vino, and
175  * return a reference to it. If new is non-NULL, its reference is consumed.
176  */
ceph_get_inode(struct super_block * sb,struct ceph_vino vino,struct inode * newino)177 struct inode *ceph_get_inode(struct super_block *sb, struct ceph_vino vino,
178 			     struct inode *newino)
179 {
180 	struct ceph_mds_client *mdsc = ceph_sb_to_mdsc(sb);
181 	struct ceph_client *cl = mdsc->fsc->client;
182 	struct inode *inode;
183 
184 	if (ceph_vino_is_reserved(vino))
185 		return ERR_PTR(-EREMOTEIO);
186 
187 	if (newino) {
188 		inode = inode_insert5(newino, (unsigned long)vino.ino,
189 				      ceph_ino_compare, ceph_set_ino_cb, &vino);
190 		if (inode != newino)
191 			iput(newino);
192 	} else {
193 		inode = iget5_locked(sb, (unsigned long)vino.ino,
194 				     ceph_ino_compare, ceph_set_ino_cb, &vino);
195 	}
196 
197 	if (!inode) {
198 		doutc(cl, "no inode found for %llx.%llx\n", vino.ino, vino.snap);
199 		return ERR_PTR(-ENOMEM);
200 	}
201 
202 	doutc(cl, "on %llx=%llx.%llx got %p new %d\n",
203 	      ceph_present_inode(inode), ceph_vinop(inode), inode,
204 	      !!(inode->i_state & I_NEW));
205 	return inode;
206 }
207 
208 /*
209  * get/construct snapdir inode for a given directory
210  */
ceph_get_snapdir(struct inode * parent)211 struct inode *ceph_get_snapdir(struct inode *parent)
212 {
213 	struct ceph_client *cl = ceph_inode_to_client(parent);
214 	struct ceph_vino vino = {
215 		.ino = ceph_ino(parent),
216 		.snap = CEPH_SNAPDIR,
217 	};
218 	struct inode *inode = ceph_get_inode(parent->i_sb, vino, NULL);
219 	struct ceph_inode_info *ci = ceph_inode(inode);
220 	int ret = -ENOTDIR;
221 
222 	if (IS_ERR(inode))
223 		return inode;
224 
225 	if (!S_ISDIR(parent->i_mode)) {
226 		pr_warn_once_client(cl, "bad snapdir parent type (mode=0%o)\n",
227 				    parent->i_mode);
228 		goto err;
229 	}
230 
231 	if (!(inode->i_state & I_NEW) && !S_ISDIR(inode->i_mode)) {
232 		pr_warn_once_client(cl, "bad snapdir inode type (mode=0%o)\n",
233 				    inode->i_mode);
234 		goto err;
235 	}
236 
237 	inode->i_mode = parent->i_mode;
238 	inode->i_uid = parent->i_uid;
239 	inode->i_gid = parent->i_gid;
240 	inode_set_mtime_to_ts(inode, inode_get_mtime(parent));
241 	inode_set_ctime_to_ts(inode, inode_get_ctime(parent));
242 	inode_set_atime_to_ts(inode, inode_get_atime(parent));
243 	ci->i_rbytes = 0;
244 	ci->i_btime = ceph_inode(parent)->i_btime;
245 
246 #ifdef CONFIG_FS_ENCRYPTION
247 	/* if encrypted, just borrow fscrypt_auth from parent */
248 	if (IS_ENCRYPTED(parent)) {
249 		struct ceph_inode_info *pci = ceph_inode(parent);
250 
251 		ci->fscrypt_auth = kmemdup(pci->fscrypt_auth,
252 					   pci->fscrypt_auth_len,
253 					   GFP_KERNEL);
254 		if (ci->fscrypt_auth) {
255 			inode->i_flags |= S_ENCRYPTED;
256 			ci->fscrypt_auth_len = pci->fscrypt_auth_len;
257 		} else {
258 			doutc(cl, "Failed to alloc snapdir fscrypt_auth\n");
259 			ret = -ENOMEM;
260 			goto err;
261 		}
262 	}
263 #endif
264 	if (inode->i_state & I_NEW) {
265 		inode->i_op = &ceph_snapdir_iops;
266 		inode->i_fop = &ceph_snapdir_fops;
267 		ci->i_snap_caps = CEPH_CAP_PIN; /* so we can open */
268 		unlock_new_inode(inode);
269 	}
270 
271 	return inode;
272 err:
273 	if ((inode->i_state & I_NEW))
274 		discard_new_inode(inode);
275 	else
276 		iput(inode);
277 	return ERR_PTR(ret);
278 }
279 
280 const struct inode_operations ceph_file_iops = {
281 	.permission = ceph_permission,
282 	.setattr = ceph_setattr,
283 	.getattr = ceph_getattr,
284 	.listxattr = ceph_listxattr,
285 	.get_inode_acl = ceph_get_acl,
286 	.set_acl = ceph_set_acl,
287 };
288 
289 
290 /*
291  * We use a 'frag tree' to keep track of the MDS's directory fragments
292  * for a given inode (usually there is just a single fragment).  We
293  * need to know when a child frag is delegated to a new MDS, or when
294  * it is flagged as replicated, so we can direct our requests
295  * accordingly.
296  */
297 
298 /*
299  * find/create a frag in the tree
300  */
__get_or_create_frag(struct ceph_inode_info * ci,u32 f)301 static struct ceph_inode_frag *__get_or_create_frag(struct ceph_inode_info *ci,
302 						    u32 f)
303 {
304 	struct inode *inode = &ci->netfs.inode;
305 	struct ceph_client *cl = ceph_inode_to_client(inode);
306 	struct rb_node **p;
307 	struct rb_node *parent = NULL;
308 	struct ceph_inode_frag *frag;
309 	int c;
310 
311 	p = &ci->i_fragtree.rb_node;
312 	while (*p) {
313 		parent = *p;
314 		frag = rb_entry(parent, struct ceph_inode_frag, node);
315 		c = ceph_frag_compare(f, frag->frag);
316 		if (c < 0)
317 			p = &(*p)->rb_left;
318 		else if (c > 0)
319 			p = &(*p)->rb_right;
320 		else
321 			return frag;
322 	}
323 
324 	frag = kmalloc(sizeof(*frag), GFP_NOFS);
325 	if (!frag)
326 		return ERR_PTR(-ENOMEM);
327 
328 	frag->frag = f;
329 	frag->split_by = 0;
330 	frag->mds = -1;
331 	frag->ndist = 0;
332 
333 	rb_link_node(&frag->node, parent, p);
334 	rb_insert_color(&frag->node, &ci->i_fragtree);
335 
336 	doutc(cl, "added %p %llx.%llx frag %x\n", inode, ceph_vinop(inode), f);
337 	return frag;
338 }
339 
340 /*
341  * find a specific frag @f
342  */
__ceph_find_frag(struct ceph_inode_info * ci,u32 f)343 struct ceph_inode_frag *__ceph_find_frag(struct ceph_inode_info *ci, u32 f)
344 {
345 	struct rb_node *n = ci->i_fragtree.rb_node;
346 
347 	while (n) {
348 		struct ceph_inode_frag *frag =
349 			rb_entry(n, struct ceph_inode_frag, node);
350 		int c = ceph_frag_compare(f, frag->frag);
351 		if (c < 0)
352 			n = n->rb_left;
353 		else if (c > 0)
354 			n = n->rb_right;
355 		else
356 			return frag;
357 	}
358 	return NULL;
359 }
360 
361 /*
362  * Choose frag containing the given value @v.  If @pfrag is
363  * specified, copy the frag delegation info to the caller if
364  * it is present.
365  */
__ceph_choose_frag(struct ceph_inode_info * ci,u32 v,struct ceph_inode_frag * pfrag,int * found)366 static u32 __ceph_choose_frag(struct ceph_inode_info *ci, u32 v,
367 			      struct ceph_inode_frag *pfrag, int *found)
368 {
369 	struct ceph_client *cl = ceph_inode_to_client(&ci->netfs.inode);
370 	u32 t = ceph_frag_make(0, 0);
371 	struct ceph_inode_frag *frag;
372 	unsigned nway, i;
373 	u32 n;
374 
375 	if (found)
376 		*found = 0;
377 
378 	while (1) {
379 		WARN_ON(!ceph_frag_contains_value(t, v));
380 		frag = __ceph_find_frag(ci, t);
381 		if (!frag)
382 			break; /* t is a leaf */
383 		if (frag->split_by == 0) {
384 			if (pfrag)
385 				memcpy(pfrag, frag, sizeof(*pfrag));
386 			if (found)
387 				*found = 1;
388 			break;
389 		}
390 
391 		/* choose child */
392 		nway = 1 << frag->split_by;
393 		doutc(cl, "frag(%x) %x splits by %d (%d ways)\n", v, t,
394 		      frag->split_by, nway);
395 		for (i = 0; i < nway; i++) {
396 			n = ceph_frag_make_child(t, frag->split_by, i);
397 			if (ceph_frag_contains_value(n, v)) {
398 				t = n;
399 				break;
400 			}
401 		}
402 		BUG_ON(i == nway);
403 	}
404 	doutc(cl, "frag(%x) = %x\n", v, t);
405 
406 	return t;
407 }
408 
ceph_choose_frag(struct ceph_inode_info * ci,u32 v,struct ceph_inode_frag * pfrag,int * found)409 u32 ceph_choose_frag(struct ceph_inode_info *ci, u32 v,
410 		     struct ceph_inode_frag *pfrag, int *found)
411 {
412 	u32 ret;
413 	mutex_lock(&ci->i_fragtree_mutex);
414 	ret = __ceph_choose_frag(ci, v, pfrag, found);
415 	mutex_unlock(&ci->i_fragtree_mutex);
416 	return ret;
417 }
418 
419 /*
420  * Process dirfrag (delegation) info from the mds.  Include leaf
421  * fragment in tree ONLY if ndist > 0.  Otherwise, only
422  * branches/splits are included in i_fragtree)
423  */
ceph_fill_dirfrag(struct inode * inode,struct ceph_mds_reply_dirfrag * dirinfo)424 static int ceph_fill_dirfrag(struct inode *inode,
425 			     struct ceph_mds_reply_dirfrag *dirinfo)
426 {
427 	struct ceph_inode_info *ci = ceph_inode(inode);
428 	struct ceph_client *cl = ceph_inode_to_client(inode);
429 	struct ceph_inode_frag *frag;
430 	u32 id = le32_to_cpu(dirinfo->frag);
431 	int mds = le32_to_cpu(dirinfo->auth);
432 	int ndist = le32_to_cpu(dirinfo->ndist);
433 	int diri_auth = -1;
434 	int i;
435 	int err = 0;
436 
437 	spin_lock(&ci->i_ceph_lock);
438 	if (ci->i_auth_cap)
439 		diri_auth = ci->i_auth_cap->mds;
440 	spin_unlock(&ci->i_ceph_lock);
441 
442 	if (mds == -1) /* CDIR_AUTH_PARENT */
443 		mds = diri_auth;
444 
445 	mutex_lock(&ci->i_fragtree_mutex);
446 	if (ndist == 0 && mds == diri_auth) {
447 		/* no delegation info needed. */
448 		frag = __ceph_find_frag(ci, id);
449 		if (!frag)
450 			goto out;
451 		if (frag->split_by == 0) {
452 			/* tree leaf, remove */
453 			doutc(cl, "removed %p %llx.%llx frag %x (no ref)\n",
454 			      inode, ceph_vinop(inode), id);
455 			rb_erase(&frag->node, &ci->i_fragtree);
456 			kfree(frag);
457 		} else {
458 			/* tree branch, keep and clear */
459 			doutc(cl, "cleared %p %llx.%llx frag %x referral\n",
460 			      inode, ceph_vinop(inode), id);
461 			frag->mds = -1;
462 			frag->ndist = 0;
463 		}
464 		goto out;
465 	}
466 
467 
468 	/* find/add this frag to store mds delegation info */
469 	frag = __get_or_create_frag(ci, id);
470 	if (IS_ERR(frag)) {
471 		/* this is not the end of the world; we can continue
472 		   with bad/inaccurate delegation info */
473 		pr_err_client(cl, "ENOMEM on mds ref %p %llx.%llx fg %x\n",
474 			      inode, ceph_vinop(inode),
475 			      le32_to_cpu(dirinfo->frag));
476 		err = -ENOMEM;
477 		goto out;
478 	}
479 
480 	frag->mds = mds;
481 	frag->ndist = min_t(u32, ndist, CEPH_MAX_DIRFRAG_REP);
482 	for (i = 0; i < frag->ndist; i++)
483 		frag->dist[i] = le32_to_cpu(dirinfo->dist[i]);
484 	doutc(cl, "%p %llx.%llx frag %x ndist=%d\n", inode,
485 	      ceph_vinop(inode), frag->frag, frag->ndist);
486 
487 out:
488 	mutex_unlock(&ci->i_fragtree_mutex);
489 	return err;
490 }
491 
frag_tree_split_cmp(const void * l,const void * r)492 static int frag_tree_split_cmp(const void *l, const void *r)
493 {
494 	struct ceph_frag_tree_split *ls = (struct ceph_frag_tree_split*)l;
495 	struct ceph_frag_tree_split *rs = (struct ceph_frag_tree_split*)r;
496 	return ceph_frag_compare(le32_to_cpu(ls->frag),
497 				 le32_to_cpu(rs->frag));
498 }
499 
is_frag_child(u32 f,struct ceph_inode_frag * frag)500 static bool is_frag_child(u32 f, struct ceph_inode_frag *frag)
501 {
502 	if (!frag)
503 		return f == ceph_frag_make(0, 0);
504 	if (ceph_frag_bits(f) != ceph_frag_bits(frag->frag) + frag->split_by)
505 		return false;
506 	return ceph_frag_contains_value(frag->frag, ceph_frag_value(f));
507 }
508 
ceph_fill_fragtree(struct inode * inode,struct ceph_frag_tree_head * fragtree,struct ceph_mds_reply_dirfrag * dirinfo)509 static int ceph_fill_fragtree(struct inode *inode,
510 			      struct ceph_frag_tree_head *fragtree,
511 			      struct ceph_mds_reply_dirfrag *dirinfo)
512 {
513 	struct ceph_client *cl = ceph_inode_to_client(inode);
514 	struct ceph_inode_info *ci = ceph_inode(inode);
515 	struct ceph_inode_frag *frag, *prev_frag = NULL;
516 	struct rb_node *rb_node;
517 	unsigned i, split_by, nsplits;
518 	u32 id;
519 	bool update = false;
520 
521 	mutex_lock(&ci->i_fragtree_mutex);
522 	nsplits = le32_to_cpu(fragtree->nsplits);
523 	if (nsplits != ci->i_fragtree_nsplits) {
524 		update = true;
525 	} else if (nsplits) {
526 		i = get_random_u32_below(nsplits);
527 		id = le32_to_cpu(fragtree->splits[i].frag);
528 		if (!__ceph_find_frag(ci, id))
529 			update = true;
530 	} else if (!RB_EMPTY_ROOT(&ci->i_fragtree)) {
531 		rb_node = rb_first(&ci->i_fragtree);
532 		frag = rb_entry(rb_node, struct ceph_inode_frag, node);
533 		if (frag->frag != ceph_frag_make(0, 0) || rb_next(rb_node))
534 			update = true;
535 	}
536 	if (!update && dirinfo) {
537 		id = le32_to_cpu(dirinfo->frag);
538 		if (id != __ceph_choose_frag(ci, id, NULL, NULL))
539 			update = true;
540 	}
541 	if (!update)
542 		goto out_unlock;
543 
544 	if (nsplits > 1) {
545 		sort(fragtree->splits, nsplits, sizeof(fragtree->splits[0]),
546 		     frag_tree_split_cmp, NULL);
547 	}
548 
549 	doutc(cl, "%p %llx.%llx\n", inode, ceph_vinop(inode));
550 	rb_node = rb_first(&ci->i_fragtree);
551 	for (i = 0; i < nsplits; i++) {
552 		id = le32_to_cpu(fragtree->splits[i].frag);
553 		split_by = le32_to_cpu(fragtree->splits[i].by);
554 		if (split_by == 0 || ceph_frag_bits(id) + split_by > 24) {
555 			pr_err_client(cl, "%p %llx.%llx invalid split %d/%u, "
556 			       "frag %x split by %d\n", inode,
557 			       ceph_vinop(inode), i, nsplits, id, split_by);
558 			continue;
559 		}
560 		frag = NULL;
561 		while (rb_node) {
562 			frag = rb_entry(rb_node, struct ceph_inode_frag, node);
563 			if (ceph_frag_compare(frag->frag, id) >= 0) {
564 				if (frag->frag != id)
565 					frag = NULL;
566 				else
567 					rb_node = rb_next(rb_node);
568 				break;
569 			}
570 			rb_node = rb_next(rb_node);
571 			/* delete stale split/leaf node */
572 			if (frag->split_by > 0 ||
573 			    !is_frag_child(frag->frag, prev_frag)) {
574 				rb_erase(&frag->node, &ci->i_fragtree);
575 				if (frag->split_by > 0)
576 					ci->i_fragtree_nsplits--;
577 				kfree(frag);
578 			}
579 			frag = NULL;
580 		}
581 		if (!frag) {
582 			frag = __get_or_create_frag(ci, id);
583 			if (IS_ERR(frag))
584 				continue;
585 		}
586 		if (frag->split_by == 0)
587 			ci->i_fragtree_nsplits++;
588 		frag->split_by = split_by;
589 		doutc(cl, " frag %x split by %d\n", frag->frag, frag->split_by);
590 		prev_frag = frag;
591 	}
592 	while (rb_node) {
593 		frag = rb_entry(rb_node, struct ceph_inode_frag, node);
594 		rb_node = rb_next(rb_node);
595 		/* delete stale split/leaf node */
596 		if (frag->split_by > 0 ||
597 		    !is_frag_child(frag->frag, prev_frag)) {
598 			rb_erase(&frag->node, &ci->i_fragtree);
599 			if (frag->split_by > 0)
600 				ci->i_fragtree_nsplits--;
601 			kfree(frag);
602 		}
603 	}
604 out_unlock:
605 	mutex_unlock(&ci->i_fragtree_mutex);
606 	return 0;
607 }
608 
609 /*
610  * initialize a newly allocated inode.
611  */
ceph_alloc_inode(struct super_block * sb)612 struct inode *ceph_alloc_inode(struct super_block *sb)
613 {
614 	struct ceph_fs_client *fsc = ceph_sb_to_fs_client(sb);
615 	struct ceph_inode_info *ci;
616 	int i;
617 
618 	ci = alloc_inode_sb(sb, ceph_inode_cachep, GFP_NOFS);
619 	if (!ci)
620 		return NULL;
621 
622 	doutc(fsc->client, "%p\n", &ci->netfs.inode);
623 
624 	/* Set parameters for the netfs library */
625 	netfs_inode_init(&ci->netfs, &ceph_netfs_ops, false);
626 
627 	spin_lock_init(&ci->i_ceph_lock);
628 
629 	ci->i_version = 0;
630 	ci->i_inline_version = 0;
631 	ci->i_time_warp_seq = 0;
632 	ci->i_ceph_flags = 0;
633 	atomic64_set(&ci->i_ordered_count, 1);
634 	atomic64_set(&ci->i_release_count, 1);
635 	atomic64_set(&ci->i_complete_seq[0], 0);
636 	atomic64_set(&ci->i_complete_seq[1], 0);
637 	ci->i_symlink = NULL;
638 
639 	ci->i_max_bytes = 0;
640 	ci->i_max_files = 0;
641 
642 	memset(&ci->i_dir_layout, 0, sizeof(ci->i_dir_layout));
643 	memset(&ci->i_cached_layout, 0, sizeof(ci->i_cached_layout));
644 	RCU_INIT_POINTER(ci->i_layout.pool_ns, NULL);
645 
646 	ci->i_fragtree = RB_ROOT;
647 	mutex_init(&ci->i_fragtree_mutex);
648 
649 	ci->i_xattrs.blob = NULL;
650 	ci->i_xattrs.prealloc_blob = NULL;
651 	ci->i_xattrs.dirty = false;
652 	ci->i_xattrs.index = RB_ROOT;
653 	ci->i_xattrs.count = 0;
654 	ci->i_xattrs.names_size = 0;
655 	ci->i_xattrs.vals_size = 0;
656 	ci->i_xattrs.version = 0;
657 	ci->i_xattrs.index_version = 0;
658 
659 	ci->i_caps = RB_ROOT;
660 	ci->i_auth_cap = NULL;
661 	ci->i_dirty_caps = 0;
662 	ci->i_flushing_caps = 0;
663 	INIT_LIST_HEAD(&ci->i_dirty_item);
664 	INIT_LIST_HEAD(&ci->i_flushing_item);
665 	ci->i_prealloc_cap_flush = NULL;
666 	INIT_LIST_HEAD(&ci->i_cap_flush_list);
667 	init_waitqueue_head(&ci->i_cap_wq);
668 	ci->i_hold_caps_max = 0;
669 	INIT_LIST_HEAD(&ci->i_cap_delay_list);
670 	INIT_LIST_HEAD(&ci->i_cap_snaps);
671 	ci->i_head_snapc = NULL;
672 	ci->i_snap_caps = 0;
673 
674 	ci->i_last_rd = ci->i_last_wr = jiffies - 3600 * HZ;
675 	for (i = 0; i < CEPH_FILE_MODE_BITS; i++)
676 		ci->i_nr_by_mode[i] = 0;
677 
678 	mutex_init(&ci->i_truncate_mutex);
679 	ci->i_truncate_seq = 0;
680 	ci->i_truncate_size = 0;
681 	ci->i_truncate_pending = 0;
682 	ci->i_truncate_pagecache_size = 0;
683 
684 	ci->i_max_size = 0;
685 	ci->i_reported_size = 0;
686 	ci->i_wanted_max_size = 0;
687 	ci->i_requested_max_size = 0;
688 
689 	ci->i_pin_ref = 0;
690 	ci->i_rd_ref = 0;
691 	ci->i_rdcache_ref = 0;
692 	ci->i_wr_ref = 0;
693 	ci->i_wb_ref = 0;
694 	ci->i_fx_ref = 0;
695 	ci->i_wrbuffer_ref = 0;
696 	ci->i_wrbuffer_ref_head = 0;
697 	atomic_set(&ci->i_filelock_ref, 0);
698 	atomic_set(&ci->i_shared_gen, 1);
699 	ci->i_rdcache_gen = 0;
700 	ci->i_rdcache_revoking = 0;
701 
702 	INIT_LIST_HEAD(&ci->i_unsafe_dirops);
703 	INIT_LIST_HEAD(&ci->i_unsafe_iops);
704 	spin_lock_init(&ci->i_unsafe_lock);
705 
706 	ci->i_snap_realm = NULL;
707 	INIT_LIST_HEAD(&ci->i_snap_realm_item);
708 	INIT_LIST_HEAD(&ci->i_snap_flush_item);
709 
710 	INIT_WORK(&ci->i_work, ceph_inode_work);
711 	ci->i_work_mask = 0;
712 	memset(&ci->i_btime, '\0', sizeof(ci->i_btime));
713 #ifdef CONFIG_FS_ENCRYPTION
714 	ci->i_crypt_info = NULL;
715 	ci->fscrypt_auth = NULL;
716 	ci->fscrypt_auth_len = 0;
717 #endif
718 	return &ci->netfs.inode;
719 }
720 
ceph_free_inode(struct inode * inode)721 void ceph_free_inode(struct inode *inode)
722 {
723 	struct ceph_inode_info *ci = ceph_inode(inode);
724 
725 	kfree(ci->i_symlink);
726 #ifdef CONFIG_FS_ENCRYPTION
727 	kfree(ci->fscrypt_auth);
728 #endif
729 	fscrypt_free_inode(inode);
730 	kmem_cache_free(ceph_inode_cachep, ci);
731 }
732 
ceph_evict_inode(struct inode * inode)733 void ceph_evict_inode(struct inode *inode)
734 {
735 	struct ceph_inode_info *ci = ceph_inode(inode);
736 	struct ceph_mds_client *mdsc = ceph_sb_to_mdsc(inode->i_sb);
737 	struct ceph_client *cl = ceph_inode_to_client(inode);
738 	struct ceph_inode_frag *frag;
739 	struct rb_node *n;
740 
741 	doutc(cl, "%p ino %llx.%llx\n", inode, ceph_vinop(inode));
742 
743 	percpu_counter_dec(&mdsc->metric.total_inodes);
744 
745 	netfs_wait_for_outstanding_io(inode);
746 	truncate_inode_pages_final(&inode->i_data);
747 	if (inode->i_state & I_PINNING_NETFS_WB)
748 		ceph_fscache_unuse_cookie(inode, true);
749 	clear_inode(inode);
750 
751 	ceph_fscache_unregister_inode_cookie(ci);
752 	fscrypt_put_encryption_info(inode);
753 
754 	__ceph_remove_caps(ci);
755 
756 	if (__ceph_has_quota(ci, QUOTA_GET_ANY))
757 		ceph_adjust_quota_realms_count(inode, false);
758 
759 	/*
760 	 * we may still have a snap_realm reference if there are stray
761 	 * caps in i_snap_caps.
762 	 */
763 	if (ci->i_snap_realm) {
764 		if (ceph_snap(inode) == CEPH_NOSNAP) {
765 			doutc(cl, " dropping residual ref to snap realm %p\n",
766 			      ci->i_snap_realm);
767 			ceph_change_snap_realm(inode, NULL);
768 		} else {
769 			ceph_put_snapid_map(mdsc, ci->i_snapid_map);
770 			ci->i_snap_realm = NULL;
771 		}
772 	}
773 
774 	while ((n = rb_first(&ci->i_fragtree)) != NULL) {
775 		frag = rb_entry(n, struct ceph_inode_frag, node);
776 		rb_erase(n, &ci->i_fragtree);
777 		kfree(frag);
778 	}
779 	ci->i_fragtree_nsplits = 0;
780 
781 	__ceph_destroy_xattrs(ci);
782 	if (ci->i_xattrs.blob)
783 		ceph_buffer_put(ci->i_xattrs.blob);
784 	if (ci->i_xattrs.prealloc_blob)
785 		ceph_buffer_put(ci->i_xattrs.prealloc_blob);
786 
787 	ceph_put_string(rcu_dereference_raw(ci->i_layout.pool_ns));
788 	ceph_put_string(rcu_dereference_raw(ci->i_cached_layout.pool_ns));
789 }
790 
calc_inode_blocks(u64 size)791 static inline blkcnt_t calc_inode_blocks(u64 size)
792 {
793 	return (size + (1<<9) - 1) >> 9;
794 }
795 
796 /*
797  * Helpers to fill in size, ctime, mtime, and atime.  We have to be
798  * careful because either the client or MDS may have more up to date
799  * info, depending on which capabilities are held, and whether
800  * time_warp_seq or truncate_seq have increased.  (Ordinarily, mtime
801  * and size are monotonically increasing, except when utimes() or
802  * truncate() increments the corresponding _seq values.)
803  */
ceph_fill_file_size(struct inode * inode,int issued,u32 truncate_seq,u64 truncate_size,u64 size)804 int ceph_fill_file_size(struct inode *inode, int issued,
805 			u32 truncate_seq, u64 truncate_size, u64 size)
806 {
807 	struct ceph_client *cl = ceph_inode_to_client(inode);
808 	struct ceph_inode_info *ci = ceph_inode(inode);
809 	int queue_trunc = 0;
810 	loff_t isize = i_size_read(inode);
811 
812 	if (ceph_seq_cmp(truncate_seq, ci->i_truncate_seq) > 0 ||
813 	    (truncate_seq == ci->i_truncate_seq && size > isize)) {
814 		doutc(cl, "size %lld -> %llu\n", isize, size);
815 		if (size > 0 && S_ISDIR(inode->i_mode)) {
816 			pr_err_client(cl, "non-zero size for directory\n");
817 			size = 0;
818 		}
819 		i_size_write(inode, size);
820 		inode->i_blocks = calc_inode_blocks(size);
821 		/*
822 		 * If we're expanding, then we should be able to just update
823 		 * the existing cookie.
824 		 */
825 		if (size > isize)
826 			ceph_fscache_update(inode);
827 		ci->i_reported_size = size;
828 		if (truncate_seq != ci->i_truncate_seq) {
829 			doutc(cl, "truncate_seq %u -> %u\n",
830 			      ci->i_truncate_seq, truncate_seq);
831 			ci->i_truncate_seq = truncate_seq;
832 
833 			/* the MDS should have revoked these caps */
834 			WARN_ON_ONCE(issued & (CEPH_CAP_FILE_RD |
835 					       CEPH_CAP_FILE_LAZYIO));
836 			/*
837 			 * If we hold relevant caps, or in the case where we're
838 			 * not the only client referencing this file and we
839 			 * don't hold those caps, then we need to check whether
840 			 * the file is either opened or mmaped
841 			 */
842 			if ((issued & (CEPH_CAP_FILE_CACHE|
843 				       CEPH_CAP_FILE_BUFFER)) ||
844 			    mapping_mapped(inode->i_mapping) ||
845 			    __ceph_is_file_opened(ci)) {
846 				ci->i_truncate_pending++;
847 				queue_trunc = 1;
848 			}
849 		}
850 	}
851 
852 	/*
853 	 * It's possible that the new sizes of the two consecutive
854 	 * size truncations will be in the same fscrypt last block,
855 	 * and we need to truncate the corresponding page caches
856 	 * anyway.
857 	 */
858 	if (ceph_seq_cmp(truncate_seq, ci->i_truncate_seq) >= 0) {
859 		doutc(cl, "truncate_size %lld -> %llu, encrypted %d\n",
860 		      ci->i_truncate_size, truncate_size,
861 		      !!IS_ENCRYPTED(inode));
862 
863 		ci->i_truncate_size = truncate_size;
864 
865 		if (IS_ENCRYPTED(inode)) {
866 			doutc(cl, "truncate_pagecache_size %lld -> %llu\n",
867 			      ci->i_truncate_pagecache_size, size);
868 			ci->i_truncate_pagecache_size = size;
869 		} else {
870 			ci->i_truncate_pagecache_size = truncate_size;
871 		}
872 	}
873 	return queue_trunc;
874 }
875 
ceph_fill_file_time(struct inode * inode,int issued,u64 time_warp_seq,struct timespec64 * ctime,struct timespec64 * mtime,struct timespec64 * atime)876 void ceph_fill_file_time(struct inode *inode, int issued,
877 			 u64 time_warp_seq, struct timespec64 *ctime,
878 			 struct timespec64 *mtime, struct timespec64 *atime)
879 {
880 	struct ceph_client *cl = ceph_inode_to_client(inode);
881 	struct ceph_inode_info *ci = ceph_inode(inode);
882 	struct timespec64 ictime = inode_get_ctime(inode);
883 	int warn = 0;
884 
885 	if (issued & (CEPH_CAP_FILE_EXCL|
886 		      CEPH_CAP_FILE_WR|
887 		      CEPH_CAP_FILE_BUFFER|
888 		      CEPH_CAP_AUTH_EXCL|
889 		      CEPH_CAP_XATTR_EXCL)) {
890 		if (ci->i_version == 0 ||
891 		    timespec64_compare(ctime, &ictime) > 0) {
892 			doutc(cl, "ctime %lld.%09ld -> %lld.%09ld inc w/ cap\n",
893 			     ictime.tv_sec, ictime.tv_nsec,
894 			     ctime->tv_sec, ctime->tv_nsec);
895 			inode_set_ctime_to_ts(inode, *ctime);
896 		}
897 		if (ci->i_version == 0 ||
898 		    ceph_seq_cmp(time_warp_seq, ci->i_time_warp_seq) > 0) {
899 			/* the MDS did a utimes() */
900 			doutc(cl, "mtime %lld.%09ld -> %lld.%09ld tw %d -> %d\n",
901 			     inode_get_mtime_sec(inode),
902 			     inode_get_mtime_nsec(inode),
903 			     mtime->tv_sec, mtime->tv_nsec,
904 			     ci->i_time_warp_seq, (int)time_warp_seq);
905 
906 			inode_set_mtime_to_ts(inode, *mtime);
907 			inode_set_atime_to_ts(inode, *atime);
908 			ci->i_time_warp_seq = time_warp_seq;
909 		} else if (time_warp_seq == ci->i_time_warp_seq) {
910 			struct timespec64	ts;
911 
912 			/* nobody did utimes(); take the max */
913 			ts = inode_get_mtime(inode);
914 			if (timespec64_compare(mtime, &ts) > 0) {
915 				doutc(cl, "mtime %lld.%09ld -> %lld.%09ld inc\n",
916 				     ts.tv_sec, ts.tv_nsec,
917 				     mtime->tv_sec, mtime->tv_nsec);
918 				inode_set_mtime_to_ts(inode, *mtime);
919 			}
920 			ts = inode_get_atime(inode);
921 			if (timespec64_compare(atime, &ts) > 0) {
922 				doutc(cl, "atime %lld.%09ld -> %lld.%09ld inc\n",
923 				     ts.tv_sec, ts.tv_nsec,
924 				     atime->tv_sec, atime->tv_nsec);
925 				inode_set_atime_to_ts(inode, *atime);
926 			}
927 		} else if (issued & CEPH_CAP_FILE_EXCL) {
928 			/* we did a utimes(); ignore mds values */
929 		} else {
930 			warn = 1;
931 		}
932 	} else {
933 		/* we have no write|excl caps; whatever the MDS says is true */
934 		if (ceph_seq_cmp(time_warp_seq, ci->i_time_warp_seq) >= 0) {
935 			inode_set_ctime_to_ts(inode, *ctime);
936 			inode_set_mtime_to_ts(inode, *mtime);
937 			inode_set_atime_to_ts(inode, *atime);
938 			ci->i_time_warp_seq = time_warp_seq;
939 		} else {
940 			warn = 1;
941 		}
942 	}
943 	if (warn) /* time_warp_seq shouldn't go backwards */
944 		doutc(cl, "%p mds time_warp_seq %llu < %u\n", inode,
945 		      time_warp_seq, ci->i_time_warp_seq);
946 }
947 
948 #if IS_ENABLED(CONFIG_FS_ENCRYPTION)
decode_encrypted_symlink(struct ceph_mds_client * mdsc,const char * encsym,int enclen,u8 ** decsym)949 static int decode_encrypted_symlink(struct ceph_mds_client *mdsc,
950 				    const char *encsym,
951 				    int enclen, u8 **decsym)
952 {
953 	struct ceph_client *cl = mdsc->fsc->client;
954 	int declen;
955 	u8 *sym;
956 
957 	sym = kmalloc(enclen + 1, GFP_NOFS);
958 	if (!sym)
959 		return -ENOMEM;
960 
961 	declen = ceph_base64_decode(encsym, enclen, sym);
962 	if (declen < 0) {
963 		pr_err_client(cl,
964 			"can't decode symlink (%d). Content: %.*s\n",
965 			declen, enclen, encsym);
966 		kfree(sym);
967 		return -EIO;
968 	}
969 	sym[declen + 1] = '\0';
970 	*decsym = sym;
971 	return declen;
972 }
973 #else
decode_encrypted_symlink(struct ceph_mds_client * mdsc,const char * encsym,int symlen,u8 ** decsym)974 static int decode_encrypted_symlink(struct ceph_mds_client *mdsc,
975 				    const char *encsym,
976 				    int symlen, u8 **decsym)
977 {
978 	return -EOPNOTSUPP;
979 }
980 #endif
981 
982 /*
983  * Populate an inode based on info from mds.  May be called on new or
984  * existing inodes.
985  */
ceph_fill_inode(struct inode * inode,struct page * locked_page,struct ceph_mds_reply_info_in * iinfo,struct ceph_mds_reply_dirfrag * dirinfo,struct ceph_mds_session * session,int cap_fmode,struct ceph_cap_reservation * caps_reservation)986 int ceph_fill_inode(struct inode *inode, struct page *locked_page,
987 		    struct ceph_mds_reply_info_in *iinfo,
988 		    struct ceph_mds_reply_dirfrag *dirinfo,
989 		    struct ceph_mds_session *session, int cap_fmode,
990 		    struct ceph_cap_reservation *caps_reservation)
991 {
992 	struct ceph_mds_client *mdsc = ceph_sb_to_mdsc(inode->i_sb);
993 	struct ceph_client *cl = mdsc->fsc->client;
994 	struct ceph_mds_reply_inode *info = iinfo->in;
995 	struct ceph_inode_info *ci = ceph_inode(inode);
996 	int issued, new_issued, info_caps;
997 	struct timespec64 mtime, atime, ctime;
998 	struct ceph_buffer *xattr_blob = NULL;
999 	struct ceph_buffer *old_blob = NULL;
1000 	struct ceph_string *pool_ns = NULL;
1001 	struct ceph_cap *new_cap = NULL;
1002 	int err = 0;
1003 	bool wake = false;
1004 	bool queue_trunc = false;
1005 	bool new_version = false;
1006 	bool fill_inline = false;
1007 	umode_t mode = le32_to_cpu(info->mode);
1008 	dev_t rdev = le32_to_cpu(info->rdev);
1009 
1010 	lockdep_assert_held(&mdsc->snap_rwsem);
1011 
1012 	doutc(cl, "%p ino %llx.%llx v %llu had %llu\n", inode, ceph_vinop(inode),
1013 	      le64_to_cpu(info->version), ci->i_version);
1014 
1015 	/* Once I_NEW is cleared, we can't change type or dev numbers */
1016 	if (inode->i_state & I_NEW) {
1017 		inode->i_mode = mode;
1018 	} else {
1019 		if (inode_wrong_type(inode, mode)) {
1020 			pr_warn_once_client(cl,
1021 				"inode type changed! (ino %llx.%llx is 0%o, mds says 0%o)\n",
1022 				ceph_vinop(inode), inode->i_mode, mode);
1023 			return -ESTALE;
1024 		}
1025 
1026 		if ((S_ISCHR(mode) || S_ISBLK(mode)) && inode->i_rdev != rdev) {
1027 			pr_warn_once_client(cl,
1028 				"dev inode rdev changed! (ino %llx.%llx is %u:%u, mds says %u:%u)\n",
1029 				ceph_vinop(inode), MAJOR(inode->i_rdev),
1030 				MINOR(inode->i_rdev), MAJOR(rdev),
1031 				MINOR(rdev));
1032 			return -ESTALE;
1033 		}
1034 	}
1035 
1036 	info_caps = le32_to_cpu(info->cap.caps);
1037 
1038 	/* prealloc new cap struct */
1039 	if (info_caps && ceph_snap(inode) == CEPH_NOSNAP) {
1040 		new_cap = ceph_get_cap(mdsc, caps_reservation);
1041 		if (!new_cap)
1042 			return -ENOMEM;
1043 	}
1044 
1045 	/*
1046 	 * prealloc xattr data, if it looks like we'll need it.  only
1047 	 * if len > 4 (meaning there are actually xattrs; the first 4
1048 	 * bytes are the xattr count).
1049 	 */
1050 	if (iinfo->xattr_len > 4) {
1051 		xattr_blob = ceph_buffer_new(iinfo->xattr_len, GFP_NOFS);
1052 		if (!xattr_blob)
1053 			pr_err_client(cl, "ENOMEM xattr blob %d bytes\n",
1054 				      iinfo->xattr_len);
1055 	}
1056 
1057 	if (iinfo->pool_ns_len > 0)
1058 		pool_ns = ceph_find_or_create_string(iinfo->pool_ns_data,
1059 						     iinfo->pool_ns_len);
1060 
1061 	if (ceph_snap(inode) != CEPH_NOSNAP && !ci->i_snapid_map)
1062 		ci->i_snapid_map = ceph_get_snapid_map(mdsc, ceph_snap(inode));
1063 
1064 	spin_lock(&ci->i_ceph_lock);
1065 
1066 	/*
1067 	 * provided version will be odd if inode value is projected,
1068 	 * even if stable.  skip the update if we have newer stable
1069 	 * info (ours>=theirs, e.g. due to racing mds replies), unless
1070 	 * we are getting projected (unstable) info (in which case the
1071 	 * version is odd, and we want ours>theirs).
1072 	 *   us   them
1073 	 *   2    2     skip
1074 	 *   3    2     skip
1075 	 *   3    3     update
1076 	 */
1077 	if (ci->i_version == 0 ||
1078 	    ((info->cap.flags & CEPH_CAP_FLAG_AUTH) &&
1079 	     le64_to_cpu(info->version) > (ci->i_version & ~1)))
1080 		new_version = true;
1081 
1082 	/* Update change_attribute */
1083 	inode_set_max_iversion_raw(inode, iinfo->change_attr);
1084 
1085 	__ceph_caps_issued(ci, &issued);
1086 	issued |= __ceph_caps_dirty(ci);
1087 	new_issued = ~issued & info_caps;
1088 
1089 	__ceph_update_quota(ci, iinfo->max_bytes, iinfo->max_files);
1090 
1091 #ifdef CONFIG_FS_ENCRYPTION
1092 	if (iinfo->fscrypt_auth_len &&
1093 	    ((inode->i_state & I_NEW) || (ci->fscrypt_auth_len == 0))) {
1094 		kfree(ci->fscrypt_auth);
1095 		ci->fscrypt_auth_len = iinfo->fscrypt_auth_len;
1096 		ci->fscrypt_auth = iinfo->fscrypt_auth;
1097 		iinfo->fscrypt_auth = NULL;
1098 		iinfo->fscrypt_auth_len = 0;
1099 		inode_set_flags(inode, S_ENCRYPTED, S_ENCRYPTED);
1100 	}
1101 #endif
1102 
1103 	if ((new_version || (new_issued & CEPH_CAP_AUTH_SHARED)) &&
1104 	    (issued & CEPH_CAP_AUTH_EXCL) == 0) {
1105 		inode->i_mode = mode;
1106 		inode->i_uid = make_kuid(&init_user_ns, le32_to_cpu(info->uid));
1107 		inode->i_gid = make_kgid(&init_user_ns, le32_to_cpu(info->gid));
1108 		doutc(cl, "%p %llx.%llx mode 0%o uid.gid %d.%d\n", inode,
1109 		      ceph_vinop(inode), inode->i_mode,
1110 		      from_kuid(&init_user_ns, inode->i_uid),
1111 		      from_kgid(&init_user_ns, inode->i_gid));
1112 		ceph_decode_timespec64(&ci->i_btime, &iinfo->btime);
1113 		ceph_decode_timespec64(&ci->i_snap_btime, &iinfo->snap_btime);
1114 	}
1115 
1116 	/* directories have fl_stripe_unit set to zero */
1117 	if (IS_ENCRYPTED(inode))
1118 		inode->i_blkbits = CEPH_FSCRYPT_BLOCK_SHIFT;
1119 	else if (le32_to_cpu(info->layout.fl_stripe_unit))
1120 		inode->i_blkbits =
1121 			fls(le32_to_cpu(info->layout.fl_stripe_unit)) - 1;
1122 	else
1123 		inode->i_blkbits = CEPH_BLOCK_SHIFT;
1124 
1125 	if ((new_version || (new_issued & CEPH_CAP_LINK_SHARED)) &&
1126 	    (issued & CEPH_CAP_LINK_EXCL) == 0)
1127 		set_nlink(inode, le32_to_cpu(info->nlink));
1128 
1129 	if (new_version || (new_issued & CEPH_CAP_ANY_RD)) {
1130 		/* be careful with mtime, atime, size */
1131 		ceph_decode_timespec64(&atime, &info->atime);
1132 		ceph_decode_timespec64(&mtime, &info->mtime);
1133 		ceph_decode_timespec64(&ctime, &info->ctime);
1134 		ceph_fill_file_time(inode, issued,
1135 				le32_to_cpu(info->time_warp_seq),
1136 				&ctime, &mtime, &atime);
1137 	}
1138 
1139 	if (new_version || (info_caps & CEPH_CAP_FILE_SHARED)) {
1140 		ci->i_files = le64_to_cpu(info->files);
1141 		ci->i_subdirs = le64_to_cpu(info->subdirs);
1142 	}
1143 
1144 	if (new_version ||
1145 	    (new_issued & (CEPH_CAP_ANY_FILE_RD | CEPH_CAP_ANY_FILE_WR))) {
1146 		u64 size = le64_to_cpu(info->size);
1147 		s64 old_pool = ci->i_layout.pool_id;
1148 		struct ceph_string *old_ns;
1149 
1150 		ceph_file_layout_from_legacy(&ci->i_layout, &info->layout);
1151 		old_ns = rcu_dereference_protected(ci->i_layout.pool_ns,
1152 					lockdep_is_held(&ci->i_ceph_lock));
1153 		rcu_assign_pointer(ci->i_layout.pool_ns, pool_ns);
1154 
1155 		if (ci->i_layout.pool_id != old_pool || pool_ns != old_ns)
1156 			ci->i_ceph_flags &= ~CEPH_I_POOL_PERM;
1157 
1158 		pool_ns = old_ns;
1159 
1160 		if (IS_ENCRYPTED(inode) && size &&
1161 		    iinfo->fscrypt_file_len == sizeof(__le64)) {
1162 			u64 fsize = __le64_to_cpu(*(__le64 *)iinfo->fscrypt_file);
1163 
1164 			if (size == round_up(fsize, CEPH_FSCRYPT_BLOCK_SIZE)) {
1165 				size = fsize;
1166 			} else {
1167 				pr_warn_client(cl,
1168 					"fscrypt size mismatch: size=%llu fscrypt_file=%llu, discarding fscrypt_file size.\n",
1169 					info->size, size);
1170 			}
1171 		}
1172 
1173 		queue_trunc = ceph_fill_file_size(inode, issued,
1174 					le32_to_cpu(info->truncate_seq),
1175 					le64_to_cpu(info->truncate_size),
1176 					size);
1177 		/* only update max_size on auth cap */
1178 		if ((info->cap.flags & CEPH_CAP_FLAG_AUTH) &&
1179 		    ci->i_max_size != le64_to_cpu(info->max_size)) {
1180 			doutc(cl, "max_size %lld -> %llu\n",
1181 			    ci->i_max_size, le64_to_cpu(info->max_size));
1182 			ci->i_max_size = le64_to_cpu(info->max_size);
1183 		}
1184 	}
1185 
1186 	/* layout and rstat are not tracked by capability, update them if
1187 	 * the inode info is from auth mds */
1188 	if (new_version || (info->cap.flags & CEPH_CAP_FLAG_AUTH)) {
1189 		if (S_ISDIR(inode->i_mode)) {
1190 			ci->i_dir_layout = iinfo->dir_layout;
1191 			ci->i_rbytes = le64_to_cpu(info->rbytes);
1192 			ci->i_rfiles = le64_to_cpu(info->rfiles);
1193 			ci->i_rsubdirs = le64_to_cpu(info->rsubdirs);
1194 			ci->i_dir_pin = iinfo->dir_pin;
1195 			ci->i_rsnaps = iinfo->rsnaps;
1196 			ceph_decode_timespec64(&ci->i_rctime, &info->rctime);
1197 		}
1198 	}
1199 
1200 	/* xattrs */
1201 	/* note that if i_xattrs.len <= 4, i_xattrs.data will still be NULL. */
1202 	if ((ci->i_xattrs.version == 0 || !(issued & CEPH_CAP_XATTR_EXCL))  &&
1203 	    le64_to_cpu(info->xattr_version) > ci->i_xattrs.version) {
1204 		if (ci->i_xattrs.blob)
1205 			old_blob = ci->i_xattrs.blob;
1206 		ci->i_xattrs.blob = xattr_blob;
1207 		if (xattr_blob)
1208 			memcpy(ci->i_xattrs.blob->vec.iov_base,
1209 			       iinfo->xattr_data, iinfo->xattr_len);
1210 		ci->i_xattrs.version = le64_to_cpu(info->xattr_version);
1211 		ceph_forget_all_cached_acls(inode);
1212 		ceph_security_invalidate_secctx(inode);
1213 		xattr_blob = NULL;
1214 	}
1215 
1216 	/* finally update i_version */
1217 	if (le64_to_cpu(info->version) > ci->i_version)
1218 		ci->i_version = le64_to_cpu(info->version);
1219 
1220 	inode->i_mapping->a_ops = &ceph_aops;
1221 
1222 	switch (inode->i_mode & S_IFMT) {
1223 	case S_IFIFO:
1224 	case S_IFBLK:
1225 	case S_IFCHR:
1226 	case S_IFSOCK:
1227 		inode->i_blkbits = PAGE_SHIFT;
1228 		init_special_inode(inode, inode->i_mode, rdev);
1229 		inode->i_op = &ceph_file_iops;
1230 		break;
1231 	case S_IFREG:
1232 		inode->i_op = &ceph_file_iops;
1233 		inode->i_fop = &ceph_file_fops;
1234 		break;
1235 	case S_IFLNK:
1236 		if (!ci->i_symlink) {
1237 			u32 symlen = iinfo->symlink_len;
1238 			char *sym;
1239 
1240 			spin_unlock(&ci->i_ceph_lock);
1241 
1242 			if (IS_ENCRYPTED(inode)) {
1243 				if (symlen != i_size_read(inode))
1244 					pr_err_client(cl,
1245 						"%p %llx.%llx BAD symlink size %lld\n",
1246 						inode, ceph_vinop(inode),
1247 						i_size_read(inode));
1248 
1249 				err = decode_encrypted_symlink(mdsc, iinfo->symlink,
1250 							       symlen, (u8 **)&sym);
1251 				if (err < 0) {
1252 					pr_err_client(cl,
1253 						"decoding encrypted symlink failed: %d\n",
1254 						err);
1255 					goto out;
1256 				}
1257 				symlen = err;
1258 				i_size_write(inode, symlen);
1259 				inode->i_blocks = calc_inode_blocks(symlen);
1260 			} else {
1261 				if (symlen != i_size_read(inode)) {
1262 					pr_err_client(cl,
1263 						"%p %llx.%llx BAD symlink size %lld\n",
1264 						inode, ceph_vinop(inode),
1265 						i_size_read(inode));
1266 					i_size_write(inode, symlen);
1267 					inode->i_blocks = calc_inode_blocks(symlen);
1268 				}
1269 
1270 				err = -ENOMEM;
1271 				sym = kstrndup(iinfo->symlink, symlen, GFP_NOFS);
1272 				if (!sym)
1273 					goto out;
1274 			}
1275 
1276 			spin_lock(&ci->i_ceph_lock);
1277 			if (!ci->i_symlink)
1278 				ci->i_symlink = sym;
1279 			else
1280 				kfree(sym); /* lost a race */
1281 		}
1282 
1283 		if (IS_ENCRYPTED(inode)) {
1284 			/*
1285 			 * Encrypted symlinks need to be decrypted before we can
1286 			 * cache their targets in i_link. Don't touch it here.
1287 			 */
1288 			inode->i_op = &ceph_encrypted_symlink_iops;
1289 		} else {
1290 			inode->i_link = ci->i_symlink;
1291 			inode->i_op = &ceph_symlink_iops;
1292 		}
1293 		break;
1294 	case S_IFDIR:
1295 		inode->i_op = &ceph_dir_iops;
1296 		inode->i_fop = &ceph_dir_fops;
1297 		break;
1298 	default:
1299 		pr_err_client(cl, "%p %llx.%llx BAD mode 0%o\n", inode,
1300 			      ceph_vinop(inode), inode->i_mode);
1301 	}
1302 
1303 	/* were we issued a capability? */
1304 	if (info_caps) {
1305 		if (ceph_snap(inode) == CEPH_NOSNAP) {
1306 			ceph_add_cap(inode, session,
1307 				     le64_to_cpu(info->cap.cap_id),
1308 				     info_caps,
1309 				     le32_to_cpu(info->cap.wanted),
1310 				     le32_to_cpu(info->cap.seq),
1311 				     le32_to_cpu(info->cap.mseq),
1312 				     le64_to_cpu(info->cap.realm),
1313 				     info->cap.flags, &new_cap);
1314 
1315 			/* set dir completion flag? */
1316 			if (S_ISDIR(inode->i_mode) &&
1317 			    ci->i_files == 0 && ci->i_subdirs == 0 &&
1318 			    (info_caps & CEPH_CAP_FILE_SHARED) &&
1319 			    (issued & CEPH_CAP_FILE_EXCL) == 0 &&
1320 			    !__ceph_dir_is_complete(ci)) {
1321 				doutc(cl, " marking %p complete (empty)\n",
1322 				      inode);
1323 				i_size_write(inode, 0);
1324 				__ceph_dir_set_complete(ci,
1325 					atomic64_read(&ci->i_release_count),
1326 					atomic64_read(&ci->i_ordered_count));
1327 			}
1328 
1329 			wake = true;
1330 		} else {
1331 			doutc(cl, " %p got snap_caps %s\n", inode,
1332 			      ceph_cap_string(info_caps));
1333 			ci->i_snap_caps |= info_caps;
1334 		}
1335 	}
1336 
1337 	if (iinfo->inline_version > 0 &&
1338 	    iinfo->inline_version >= ci->i_inline_version) {
1339 		int cache_caps = CEPH_CAP_FILE_CACHE | CEPH_CAP_FILE_LAZYIO;
1340 		ci->i_inline_version = iinfo->inline_version;
1341 		if (ceph_has_inline_data(ci) &&
1342 		    (locked_page || (info_caps & cache_caps)))
1343 			fill_inline = true;
1344 	}
1345 
1346 	if (cap_fmode >= 0) {
1347 		if (!info_caps)
1348 			pr_warn_client(cl, "mds issued no caps on %llx.%llx\n",
1349 				       ceph_vinop(inode));
1350 		__ceph_touch_fmode(ci, mdsc, cap_fmode);
1351 	}
1352 
1353 	spin_unlock(&ci->i_ceph_lock);
1354 
1355 	ceph_fscache_register_inode_cookie(inode);
1356 
1357 	if (fill_inline)
1358 		ceph_fill_inline_data(inode, locked_page,
1359 				      iinfo->inline_data, iinfo->inline_len);
1360 
1361 	if (wake)
1362 		wake_up_all(&ci->i_cap_wq);
1363 
1364 	/* queue truncate if we saw i_size decrease */
1365 	if (queue_trunc)
1366 		ceph_queue_vmtruncate(inode);
1367 
1368 	/* populate frag tree */
1369 	if (S_ISDIR(inode->i_mode))
1370 		ceph_fill_fragtree(inode, &info->fragtree, dirinfo);
1371 
1372 	/* update delegation info? */
1373 	if (dirinfo)
1374 		ceph_fill_dirfrag(inode, dirinfo);
1375 
1376 	err = 0;
1377 out:
1378 	if (new_cap)
1379 		ceph_put_cap(mdsc, new_cap);
1380 	ceph_buffer_put(old_blob);
1381 	ceph_buffer_put(xattr_blob);
1382 	ceph_put_string(pool_ns);
1383 	return err;
1384 }
1385 
1386 /*
1387  * caller should hold session s_mutex and dentry->d_lock.
1388  */
__update_dentry_lease(struct inode * dir,struct dentry * dentry,struct ceph_mds_reply_lease * lease,struct ceph_mds_session * session,unsigned long from_time,struct ceph_mds_session ** old_lease_session)1389 static void __update_dentry_lease(struct inode *dir, struct dentry *dentry,
1390 				  struct ceph_mds_reply_lease *lease,
1391 				  struct ceph_mds_session *session,
1392 				  unsigned long from_time,
1393 				  struct ceph_mds_session **old_lease_session)
1394 {
1395 	struct ceph_client *cl = ceph_inode_to_client(dir);
1396 	struct ceph_dentry_info *di = ceph_dentry(dentry);
1397 	unsigned mask = le16_to_cpu(lease->mask);
1398 	long unsigned duration = le32_to_cpu(lease->duration_ms);
1399 	long unsigned ttl = from_time + (duration * HZ) / 1000;
1400 	long unsigned half_ttl = from_time + (duration * HZ / 2) / 1000;
1401 
1402 	doutc(cl, "%p duration %lu ms ttl %lu\n", dentry, duration, ttl);
1403 
1404 	/* only track leases on regular dentries */
1405 	if (ceph_snap(dir) != CEPH_NOSNAP)
1406 		return;
1407 
1408 	if (mask & CEPH_LEASE_PRIMARY_LINK)
1409 		di->flags |= CEPH_DENTRY_PRIMARY_LINK;
1410 	else
1411 		di->flags &= ~CEPH_DENTRY_PRIMARY_LINK;
1412 
1413 	di->lease_shared_gen = atomic_read(&ceph_inode(dir)->i_shared_gen);
1414 	if (!(mask & CEPH_LEASE_VALID)) {
1415 		__ceph_dentry_dir_lease_touch(di);
1416 		return;
1417 	}
1418 
1419 	if (di->lease_gen == atomic_read(&session->s_cap_gen) &&
1420 	    time_before(ttl, di->time))
1421 		return;  /* we already have a newer lease. */
1422 
1423 	if (di->lease_session && di->lease_session != session) {
1424 		*old_lease_session = di->lease_session;
1425 		di->lease_session = NULL;
1426 	}
1427 
1428 	if (!di->lease_session)
1429 		di->lease_session = ceph_get_mds_session(session);
1430 	di->lease_gen = atomic_read(&session->s_cap_gen);
1431 	di->lease_seq = le32_to_cpu(lease->seq);
1432 	di->lease_renew_after = half_ttl;
1433 	di->lease_renew_from = 0;
1434 	di->time = ttl;
1435 
1436 	__ceph_dentry_lease_touch(di);
1437 }
1438 
update_dentry_lease(struct inode * dir,struct dentry * dentry,struct ceph_mds_reply_lease * lease,struct ceph_mds_session * session,unsigned long from_time)1439 static inline void update_dentry_lease(struct inode *dir, struct dentry *dentry,
1440 					struct ceph_mds_reply_lease *lease,
1441 					struct ceph_mds_session *session,
1442 					unsigned long from_time)
1443 {
1444 	struct ceph_mds_session *old_lease_session = NULL;
1445 	spin_lock(&dentry->d_lock);
1446 	__update_dentry_lease(dir, dentry, lease, session, from_time,
1447 			      &old_lease_session);
1448 	spin_unlock(&dentry->d_lock);
1449 	ceph_put_mds_session(old_lease_session);
1450 }
1451 
1452 /*
1453  * update dentry lease without having parent inode locked
1454  */
update_dentry_lease_careful(struct dentry * dentry,struct ceph_mds_reply_lease * lease,struct ceph_mds_session * session,unsigned long from_time,char * dname,u32 dname_len,struct ceph_vino * pdvino,struct ceph_vino * ptvino)1455 static void update_dentry_lease_careful(struct dentry *dentry,
1456 					struct ceph_mds_reply_lease *lease,
1457 					struct ceph_mds_session *session,
1458 					unsigned long from_time,
1459 					char *dname, u32 dname_len,
1460 					struct ceph_vino *pdvino,
1461 					struct ceph_vino *ptvino)
1462 
1463 {
1464 	struct inode *dir;
1465 	struct ceph_mds_session *old_lease_session = NULL;
1466 
1467 	spin_lock(&dentry->d_lock);
1468 	/* make sure dentry's name matches target */
1469 	if (dentry->d_name.len != dname_len ||
1470 	    memcmp(dentry->d_name.name, dname, dname_len))
1471 		goto out_unlock;
1472 
1473 	dir = d_inode(dentry->d_parent);
1474 	/* make sure parent matches dvino */
1475 	if (!ceph_ino_compare(dir, pdvino))
1476 		goto out_unlock;
1477 
1478 	/* make sure dentry's inode matches target. NULL ptvino means that
1479 	 * we expect a negative dentry */
1480 	if (ptvino) {
1481 		if (d_really_is_negative(dentry))
1482 			goto out_unlock;
1483 		if (!ceph_ino_compare(d_inode(dentry), ptvino))
1484 			goto out_unlock;
1485 	} else {
1486 		if (d_really_is_positive(dentry))
1487 			goto out_unlock;
1488 	}
1489 
1490 	__update_dentry_lease(dir, dentry, lease, session,
1491 			      from_time, &old_lease_session);
1492 out_unlock:
1493 	spin_unlock(&dentry->d_lock);
1494 	ceph_put_mds_session(old_lease_session);
1495 }
1496 
1497 /*
1498  * splice a dentry to an inode.
1499  * caller must hold directory i_rwsem for this to be safe.
1500  */
splice_dentry(struct dentry ** pdn,struct inode * in)1501 static int splice_dentry(struct dentry **pdn, struct inode *in)
1502 {
1503 	struct ceph_client *cl = ceph_inode_to_client(in);
1504 	struct dentry *dn = *pdn;
1505 	struct dentry *realdn;
1506 
1507 	BUG_ON(d_inode(dn));
1508 
1509 	if (S_ISDIR(in->i_mode)) {
1510 		/* If inode is directory, d_splice_alias() below will remove
1511 		 * 'realdn' from its origin parent. We need to ensure that
1512 		 * origin parent's readdir cache will not reference 'realdn'
1513 		 */
1514 		realdn = d_find_any_alias(in);
1515 		if (realdn) {
1516 			struct ceph_dentry_info *di = ceph_dentry(realdn);
1517 			spin_lock(&realdn->d_lock);
1518 
1519 			realdn->d_op->d_prune(realdn);
1520 
1521 			di->time = jiffies;
1522 			di->lease_shared_gen = 0;
1523 			di->offset = 0;
1524 
1525 			spin_unlock(&realdn->d_lock);
1526 			dput(realdn);
1527 		}
1528 	}
1529 
1530 	/* dn must be unhashed */
1531 	if (!d_unhashed(dn))
1532 		d_drop(dn);
1533 	realdn = d_splice_alias(in, dn);
1534 	if (IS_ERR(realdn)) {
1535 		pr_err_client(cl, "error %ld %p inode %p ino %llx.%llx\n",
1536 			      PTR_ERR(realdn), dn, in, ceph_vinop(in));
1537 		return PTR_ERR(realdn);
1538 	}
1539 
1540 	if (realdn) {
1541 		doutc(cl, "dn %p (%d) spliced with %p (%d) inode %p ino %llx.%llx\n",
1542 		      dn, d_count(dn), realdn, d_count(realdn),
1543 		      d_inode(realdn), ceph_vinop(d_inode(realdn)));
1544 		dput(dn);
1545 		*pdn = realdn;
1546 	} else {
1547 		BUG_ON(!ceph_dentry(dn));
1548 		doutc(cl, "dn %p attached to %p ino %llx.%llx\n", dn,
1549 		      d_inode(dn), ceph_vinop(d_inode(dn)));
1550 	}
1551 	return 0;
1552 }
1553 
1554 /*
1555  * Incorporate results into the local cache.  This is either just
1556  * one inode, or a directory, dentry, and possibly linked-to inode (e.g.,
1557  * after a lookup).
1558  *
1559  * A reply may contain
1560  *         a directory inode along with a dentry.
1561  *  and/or a target inode
1562  *
1563  * Called with snap_rwsem (read).
1564  */
ceph_fill_trace(struct super_block * sb,struct ceph_mds_request * req)1565 int ceph_fill_trace(struct super_block *sb, struct ceph_mds_request *req)
1566 {
1567 	struct ceph_mds_session *session = req->r_session;
1568 	struct ceph_mds_reply_info_parsed *rinfo = &req->r_reply_info;
1569 	struct inode *in = NULL;
1570 	struct ceph_vino tvino, dvino;
1571 	struct ceph_fs_client *fsc = ceph_sb_to_fs_client(sb);
1572 	struct ceph_client *cl = fsc->client;
1573 	struct inode *parent_dir = NULL;
1574 	int err = 0;
1575 
1576 	doutc(cl, "%p is_dentry %d is_target %d\n", req,
1577 	      rinfo->head->is_dentry, rinfo->head->is_target);
1578 
1579 	if (!rinfo->head->is_target && !rinfo->head->is_dentry) {
1580 		doutc(cl, "reply is empty!\n");
1581 		if (rinfo->head->result == 0 && req->r_parent)
1582 			ceph_invalidate_dir_request(req);
1583 		return 0;
1584 	}
1585 
1586 	if (rinfo->head->is_dentry) {
1587 		/*
1588 		 * r_parent may be stale, in cases when R_PARENT_LOCKED is not set,
1589 		 * so we need to get the correct inode
1590 		 */
1591 		parent_dir = ceph_get_reply_dir(sb, req->r_parent, rinfo);
1592 		if (unlikely(IS_ERR(parent_dir))) {
1593 			err = PTR_ERR(parent_dir);
1594 			goto done;
1595 		}
1596 		if (parent_dir) {
1597 			err = ceph_fill_inode(parent_dir, NULL, &rinfo->diri,
1598 					      rinfo->dirfrag, session, -1,
1599 					      &req->r_caps_reservation);
1600 			if (err < 0)
1601 				goto done;
1602 		} else {
1603 			WARN_ON_ONCE(1);
1604 		}
1605 
1606 		if (parent_dir && req->r_op == CEPH_MDS_OP_LOOKUPNAME &&
1607 		    test_bit(CEPH_MDS_R_PARENT_LOCKED, &req->r_req_flags) &&
1608 		    !test_bit(CEPH_MDS_R_ABORTED, &req->r_req_flags)) {
1609 			bool is_nokey = false;
1610 			struct qstr dname;
1611 			struct dentry *dn, *parent;
1612 			struct fscrypt_str oname = FSTR_INIT(NULL, 0);
1613 			struct ceph_fname fname = { .dir	= parent_dir,
1614 						    .name	= rinfo->dname,
1615 						    .ctext	= rinfo->altname,
1616 						    .name_len	= rinfo->dname_len,
1617 						    .ctext_len	= rinfo->altname_len };
1618 
1619 			BUG_ON(!rinfo->head->is_target);
1620 			BUG_ON(req->r_dentry);
1621 
1622 			parent = d_find_any_alias(parent_dir);
1623 			BUG_ON(!parent);
1624 
1625 			err = ceph_fname_alloc_buffer(parent_dir, &oname);
1626 			if (err < 0) {
1627 				dput(parent);
1628 				goto done;
1629 			}
1630 
1631 			err = ceph_fname_to_usr(&fname, NULL, &oname, &is_nokey);
1632 			if (err < 0) {
1633 				dput(parent);
1634 				ceph_fname_free_buffer(parent_dir, &oname);
1635 				goto done;
1636 			}
1637 			dname.name = oname.name;
1638 			dname.len = oname.len;
1639 			dname.hash = full_name_hash(parent, dname.name, dname.len);
1640 			tvino.ino = le64_to_cpu(rinfo->targeti.in->ino);
1641 			tvino.snap = le64_to_cpu(rinfo->targeti.in->snapid);
1642 retry_lookup:
1643 			dn = d_lookup(parent, &dname);
1644 			doutc(cl, "d_lookup on parent=%p name=%.*s got %p\n",
1645 			      parent, dname.len, dname.name, dn);
1646 
1647 			if (!dn) {
1648 				dn = d_alloc(parent, &dname);
1649 				doutc(cl, "d_alloc %p '%.*s' = %p\n", parent,
1650 				      dname.len, dname.name, dn);
1651 				if (!dn) {
1652 					dput(parent);
1653 					ceph_fname_free_buffer(parent_dir, &oname);
1654 					err = -ENOMEM;
1655 					goto done;
1656 				}
1657 				if (is_nokey) {
1658 					spin_lock(&dn->d_lock);
1659 					dn->d_flags |= DCACHE_NOKEY_NAME;
1660 					spin_unlock(&dn->d_lock);
1661 				}
1662 				err = 0;
1663 			} else if (d_really_is_positive(dn) &&
1664 				   (ceph_ino(d_inode(dn)) != tvino.ino ||
1665 				    ceph_snap(d_inode(dn)) != tvino.snap)) {
1666 				doutc(cl, " dn %p points to wrong inode %p\n",
1667 				      dn, d_inode(dn));
1668 				ceph_dir_clear_ordered(parent_dir);
1669 				d_delete(dn);
1670 				dput(dn);
1671 				goto retry_lookup;
1672 			}
1673 			ceph_fname_free_buffer(parent_dir, &oname);
1674 
1675 			req->r_dentry = dn;
1676 			dput(parent);
1677 		}
1678 	}
1679 
1680 	if (rinfo->head->is_target) {
1681 		/* Should be filled in by handle_reply */
1682 		BUG_ON(!req->r_target_inode);
1683 
1684 		in = req->r_target_inode;
1685 		err = ceph_fill_inode(in, req->r_locked_page, &rinfo->targeti,
1686 				NULL, session,
1687 				(!test_bit(CEPH_MDS_R_ABORTED, &req->r_req_flags) &&
1688 				 !test_bit(CEPH_MDS_R_ASYNC, &req->r_req_flags) &&
1689 				 rinfo->head->result == 0) ?  req->r_fmode : -1,
1690 				&req->r_caps_reservation);
1691 		if (err < 0) {
1692 			pr_err_client(cl, "badness %p %llx.%llx\n", in,
1693 				      ceph_vinop(in));
1694 			req->r_target_inode = NULL;
1695 			if (in->i_state & I_NEW)
1696 				discard_new_inode(in);
1697 			else
1698 				iput(in);
1699 			goto done;
1700 		}
1701 		if (in->i_state & I_NEW)
1702 			unlock_new_inode(in);
1703 	}
1704 
1705 	/*
1706 	 * ignore null lease/binding on snapdir ENOENT, or else we
1707 	 * will have trouble splicing in the virtual snapdir later
1708 	 */
1709 	if (rinfo->head->is_dentry &&
1710             !test_bit(CEPH_MDS_R_ABORTED, &req->r_req_flags) &&
1711 	    test_bit(CEPH_MDS_R_PARENT_LOCKED, &req->r_req_flags) &&
1712 	    (rinfo->head->is_target || strncmp(req->r_dentry->d_name.name,
1713 					       fsc->mount_options->snapdir_name,
1714 					       req->r_dentry->d_name.len))) {
1715 		/*
1716 		 * lookup link rename   : null -> possibly existing inode
1717 		 * mknod symlink mkdir  : null -> new inode
1718 		 * unlink               : linked -> null
1719 		 */
1720 		struct inode *dir = req->r_parent;
1721 		struct dentry *dn = req->r_dentry;
1722 		bool have_dir_cap, have_lease;
1723 
1724 		BUG_ON(!dn);
1725 		BUG_ON(!dir);
1726 		BUG_ON(d_inode(dn->d_parent) != dir);
1727 
1728 		dvino.ino = le64_to_cpu(rinfo->diri.in->ino);
1729 		dvino.snap = le64_to_cpu(rinfo->diri.in->snapid);
1730 
1731 		BUG_ON(ceph_ino(dir) != dvino.ino);
1732 		BUG_ON(ceph_snap(dir) != dvino.snap);
1733 
1734 		/* do we have a lease on the whole dir? */
1735 		have_dir_cap =
1736 			(le32_to_cpu(rinfo->diri.in->cap.caps) &
1737 			 CEPH_CAP_FILE_SHARED);
1738 
1739 		/* do we have a dn lease? */
1740 		have_lease = have_dir_cap ||
1741 			le32_to_cpu(rinfo->dlease->duration_ms);
1742 		if (!have_lease)
1743 			doutc(cl, "no dentry lease or dir cap\n");
1744 
1745 		/* rename? */
1746 		if (req->r_old_dentry && req->r_op == CEPH_MDS_OP_RENAME) {
1747 			struct inode *olddir = req->r_old_dentry_dir;
1748 			BUG_ON(!olddir);
1749 
1750 			doutc(cl, " src %p '%pd' dst %p '%pd'\n",
1751 			      req->r_old_dentry, req->r_old_dentry, dn, dn);
1752 			doutc(cl, "doing d_move %p -> %p\n", req->r_old_dentry, dn);
1753 
1754 			/* d_move screws up sibling dentries' offsets */
1755 			ceph_dir_clear_ordered(dir);
1756 			ceph_dir_clear_ordered(olddir);
1757 
1758 			d_move(req->r_old_dentry, dn);
1759 			doutc(cl, " src %p '%pd' dst %p '%pd'\n",
1760 			      req->r_old_dentry, req->r_old_dentry, dn, dn);
1761 
1762 			/* ensure target dentry is invalidated, despite
1763 			   rehashing bug in vfs_rename_dir */
1764 			ceph_invalidate_dentry_lease(dn);
1765 
1766 			doutc(cl, "dn %p gets new offset %lld\n",
1767 			      req->r_old_dentry,
1768 			      ceph_dentry(req->r_old_dentry)->offset);
1769 
1770 			/* swap r_dentry and r_old_dentry in case that
1771 			 * splice_dentry() gets called later. This is safe
1772 			 * because no other place will use them */
1773 			req->r_dentry = req->r_old_dentry;
1774 			req->r_old_dentry = dn;
1775 			dn = req->r_dentry;
1776 		}
1777 
1778 		/* null dentry? */
1779 		if (!rinfo->head->is_target) {
1780 			doutc(cl, "null dentry\n");
1781 			if (d_really_is_positive(dn)) {
1782 				doutc(cl, "d_delete %p\n", dn);
1783 				ceph_dir_clear_ordered(dir);
1784 				d_delete(dn);
1785 			} else if (have_lease) {
1786 				if (d_unhashed(dn))
1787 					d_add(dn, NULL);
1788 			}
1789 
1790 			if (!d_unhashed(dn) && have_lease)
1791 				update_dentry_lease(dir, dn,
1792 						    rinfo->dlease, session,
1793 						    req->r_request_started);
1794 			goto done;
1795 		}
1796 
1797 		if (unlikely(!in)) {
1798 			err = -EINVAL;
1799 			goto done;
1800 		}
1801 
1802 		/* attach proper inode */
1803 		if (d_really_is_negative(dn)) {
1804 			ceph_dir_clear_ordered(dir);
1805 			ihold(in);
1806 			err = splice_dentry(&req->r_dentry, in);
1807 			if (err < 0)
1808 				goto done;
1809 			dn = req->r_dentry;  /* may have spliced */
1810 		} else if (d_really_is_positive(dn) && d_inode(dn) != in) {
1811 			doutc(cl, " %p links to %p %llx.%llx, not %llx.%llx\n",
1812 			      dn, d_inode(dn), ceph_vinop(d_inode(dn)),
1813 			      ceph_vinop(in));
1814 			d_invalidate(dn);
1815 			have_lease = false;
1816 		}
1817 
1818 		if (have_lease) {
1819 			update_dentry_lease(dir, dn,
1820 					    rinfo->dlease, session,
1821 					    req->r_request_started);
1822 		}
1823 		doutc(cl, " final dn %p\n", dn);
1824 	} else if ((req->r_op == CEPH_MDS_OP_LOOKUPSNAP ||
1825 		    req->r_op == CEPH_MDS_OP_MKSNAP) &&
1826 	           test_bit(CEPH_MDS_R_PARENT_LOCKED, &req->r_req_flags) &&
1827 		   !test_bit(CEPH_MDS_R_ABORTED, &req->r_req_flags)) {
1828 		struct inode *dir = req->r_parent;
1829 
1830 		/* fill out a snapdir LOOKUPSNAP dentry */
1831 		BUG_ON(!dir);
1832 		BUG_ON(ceph_snap(dir) != CEPH_SNAPDIR);
1833 		BUG_ON(!req->r_dentry);
1834 		doutc(cl, " linking snapped dir %p to dn %p\n", in,
1835 		      req->r_dentry);
1836 		ceph_dir_clear_ordered(dir);
1837 
1838 		if (unlikely(!in)) {
1839 			err = -EINVAL;
1840 			goto done;
1841 		}
1842 
1843 		ihold(in);
1844 		err = splice_dentry(&req->r_dentry, in);
1845 		if (err < 0)
1846 			goto done;
1847 	} else if (rinfo->head->is_dentry && req->r_dentry) {
1848 		/* parent inode is not locked, be careful */
1849 		struct ceph_vino *ptvino = NULL;
1850 		dvino.ino = le64_to_cpu(rinfo->diri.in->ino);
1851 		dvino.snap = le64_to_cpu(rinfo->diri.in->snapid);
1852 		if (rinfo->head->is_target) {
1853 			tvino.ino = le64_to_cpu(rinfo->targeti.in->ino);
1854 			tvino.snap = le64_to_cpu(rinfo->targeti.in->snapid);
1855 			ptvino = &tvino;
1856 		}
1857 		update_dentry_lease_careful(req->r_dentry, rinfo->dlease,
1858 					    session, req->r_request_started,
1859 					    rinfo->dname, rinfo->dname_len,
1860 					    &dvino, ptvino);
1861 	}
1862 done:
1863 	/* Drop extra ref from ceph_get_reply_dir() if it returned a new inode */
1864 	if (unlikely(!IS_ERR_OR_NULL(parent_dir) && parent_dir != req->r_parent))
1865 		iput(parent_dir);
1866 	doutc(cl, "done err=%d\n", err);
1867 	return err;
1868 }
1869 
1870 /*
1871  * Prepopulate our cache with readdir results, leases, etc.
1872  */
readdir_prepopulate_inodes_only(struct ceph_mds_request * req,struct ceph_mds_session * session)1873 static int readdir_prepopulate_inodes_only(struct ceph_mds_request *req,
1874 					   struct ceph_mds_session *session)
1875 {
1876 	struct ceph_mds_reply_info_parsed *rinfo = &req->r_reply_info;
1877 	struct ceph_client *cl = session->s_mdsc->fsc->client;
1878 	int i, err = 0;
1879 
1880 	for (i = 0; i < rinfo->dir_nr; i++) {
1881 		struct ceph_mds_reply_dir_entry *rde = rinfo->dir_entries + i;
1882 		struct ceph_vino vino;
1883 		struct inode *in;
1884 		int rc;
1885 
1886 		vino.ino = le64_to_cpu(rde->inode.in->ino);
1887 		vino.snap = le64_to_cpu(rde->inode.in->snapid);
1888 
1889 		in = ceph_get_inode(req->r_dentry->d_sb, vino, NULL);
1890 		if (IS_ERR(in)) {
1891 			err = PTR_ERR(in);
1892 			doutc(cl, "badness got %d\n", err);
1893 			continue;
1894 		}
1895 		rc = ceph_fill_inode(in, NULL, &rde->inode, NULL, session,
1896 				     -1, &req->r_caps_reservation);
1897 		if (rc < 0) {
1898 			pr_err_client(cl, "inode badness on %p got %d\n", in,
1899 				      rc);
1900 			err = rc;
1901 			if (in->i_state & I_NEW) {
1902 				ihold(in);
1903 				discard_new_inode(in);
1904 			}
1905 		} else if (in->i_state & I_NEW) {
1906 			unlock_new_inode(in);
1907 		}
1908 
1909 		iput(in);
1910 	}
1911 
1912 	return err;
1913 }
1914 
ceph_readdir_cache_release(struct ceph_readdir_cache_control * ctl)1915 void ceph_readdir_cache_release(struct ceph_readdir_cache_control *ctl)
1916 {
1917 	if (ctl->folio) {
1918 		folio_release_kmap(ctl->folio, ctl->dentries);
1919 		ctl->folio = NULL;
1920 	}
1921 }
1922 
fill_readdir_cache(struct inode * dir,struct dentry * dn,struct ceph_readdir_cache_control * ctl,struct ceph_mds_request * req)1923 static int fill_readdir_cache(struct inode *dir, struct dentry *dn,
1924 			      struct ceph_readdir_cache_control *ctl,
1925 			      struct ceph_mds_request *req)
1926 {
1927 	struct ceph_client *cl = ceph_inode_to_client(dir);
1928 	struct ceph_inode_info *ci = ceph_inode(dir);
1929 	unsigned nsize = PAGE_SIZE / sizeof(struct dentry*);
1930 	unsigned idx = ctl->index % nsize;
1931 	pgoff_t pgoff = ctl->index / nsize;
1932 
1933 	if (!ctl->folio || pgoff != ctl->folio->index) {
1934 		ceph_readdir_cache_release(ctl);
1935 		fgf_t fgf = FGP_LOCK;
1936 
1937 		if (idx == 0)
1938 			fgf |= FGP_ACCESSED | FGP_CREAT;
1939 
1940 		ctl->folio = __filemap_get_folio(&dir->i_data, pgoff,
1941 				fgf, mapping_gfp_mask(&dir->i_data));
1942 		if (IS_ERR(ctl->folio)) {
1943 			int err = PTR_ERR(ctl->folio);
1944 
1945 			ctl->folio = NULL;
1946 			ctl->index = -1;
1947 			return idx == 0 ? err : 0;
1948 		}
1949 		/* reading/filling the cache are serialized by
1950 		 * i_rwsem, no need to use folio lock */
1951 		folio_unlock(ctl->folio);
1952 		ctl->dentries = kmap_local_folio(ctl->folio, 0);
1953 		if (idx == 0)
1954 			memset(ctl->dentries, 0, PAGE_SIZE);
1955 	}
1956 
1957 	if (req->r_dir_release_cnt == atomic64_read(&ci->i_release_count) &&
1958 	    req->r_dir_ordered_cnt == atomic64_read(&ci->i_ordered_count)) {
1959 		doutc(cl, "dn %p idx %d\n", dn, ctl->index);
1960 		ctl->dentries[idx] = dn;
1961 		ctl->index++;
1962 	} else {
1963 		doutc(cl, "disable readdir cache\n");
1964 		ctl->index = -1;
1965 	}
1966 	return 0;
1967 }
1968 
ceph_readdir_prepopulate(struct ceph_mds_request * req,struct ceph_mds_session * session)1969 int ceph_readdir_prepopulate(struct ceph_mds_request *req,
1970 			     struct ceph_mds_session *session)
1971 {
1972 	struct dentry *parent = req->r_dentry;
1973 	struct inode *inode = d_inode(parent);
1974 	struct ceph_inode_info *ci = ceph_inode(inode);
1975 	struct ceph_mds_reply_info_parsed *rinfo = &req->r_reply_info;
1976 	struct ceph_client *cl = session->s_mdsc->fsc->client;
1977 	struct qstr dname;
1978 	struct dentry *dn;
1979 	struct inode *in;
1980 	int err = 0, skipped = 0, ret, i;
1981 	u32 frag = le32_to_cpu(req->r_args.readdir.frag);
1982 	u32 last_hash = 0;
1983 	u32 fpos_offset;
1984 	struct ceph_readdir_cache_control cache_ctl = {};
1985 
1986 	if (test_bit(CEPH_MDS_R_ABORTED, &req->r_req_flags))
1987 		return readdir_prepopulate_inodes_only(req, session);
1988 
1989 	if (rinfo->hash_order) {
1990 		if (req->r_path2) {
1991 			last_hash = ceph_str_hash(ci->i_dir_layout.dl_dir_hash,
1992 						  req->r_path2,
1993 						  strlen(req->r_path2));
1994 			last_hash = ceph_frag_value(last_hash);
1995 		} else if (rinfo->offset_hash) {
1996 			/* mds understands offset_hash */
1997 			WARN_ON_ONCE(req->r_readdir_offset != 2);
1998 			last_hash = le32_to_cpu(req->r_args.readdir.offset_hash);
1999 		}
2000 	}
2001 
2002 	if (rinfo->dir_dir &&
2003 	    le32_to_cpu(rinfo->dir_dir->frag) != frag) {
2004 		doutc(cl, "got new frag %x -> %x\n", frag,
2005 			    le32_to_cpu(rinfo->dir_dir->frag));
2006 		frag = le32_to_cpu(rinfo->dir_dir->frag);
2007 		if (!rinfo->hash_order)
2008 			req->r_readdir_offset = 2;
2009 	}
2010 
2011 	if (le32_to_cpu(rinfo->head->op) == CEPH_MDS_OP_LSSNAP) {
2012 		doutc(cl, "%d items under SNAPDIR dn %p\n",
2013 		      rinfo->dir_nr, parent);
2014 	} else {
2015 		doutc(cl, "%d items under dn %p\n", rinfo->dir_nr, parent);
2016 		if (rinfo->dir_dir)
2017 			ceph_fill_dirfrag(d_inode(parent), rinfo->dir_dir);
2018 
2019 		if (ceph_frag_is_leftmost(frag) &&
2020 		    req->r_readdir_offset == 2 &&
2021 		    !(rinfo->hash_order && last_hash)) {
2022 			/* note dir version at start of readdir so we can
2023 			 * tell if any dentries get dropped */
2024 			req->r_dir_release_cnt =
2025 				atomic64_read(&ci->i_release_count);
2026 			req->r_dir_ordered_cnt =
2027 				atomic64_read(&ci->i_ordered_count);
2028 			req->r_readdir_cache_idx = 0;
2029 		}
2030 	}
2031 
2032 	cache_ctl.index = req->r_readdir_cache_idx;
2033 	fpos_offset = req->r_readdir_offset;
2034 
2035 	/* FIXME: release caps/leases if error occurs */
2036 	for (i = 0; i < rinfo->dir_nr; i++) {
2037 		struct ceph_mds_reply_dir_entry *rde = rinfo->dir_entries + i;
2038 		struct ceph_vino tvino;
2039 
2040 		dname.name = rde->name;
2041 		dname.len = rde->name_len;
2042 		dname.hash = full_name_hash(parent, dname.name, dname.len);
2043 
2044 		tvino.ino = le64_to_cpu(rde->inode.in->ino);
2045 		tvino.snap = le64_to_cpu(rde->inode.in->snapid);
2046 
2047 		if (rinfo->hash_order) {
2048 			u32 hash = ceph_frag_value(rde->raw_hash);
2049 			if (hash != last_hash)
2050 				fpos_offset = 2;
2051 			last_hash = hash;
2052 			rde->offset = ceph_make_fpos(hash, fpos_offset++, true);
2053 		} else {
2054 			rde->offset = ceph_make_fpos(frag, fpos_offset++, false);
2055 		}
2056 
2057 retry_lookup:
2058 		dn = d_lookup(parent, &dname);
2059 		doutc(cl, "d_lookup on parent=%p name=%.*s got %p\n",
2060 		      parent, dname.len, dname.name, dn);
2061 
2062 		if (!dn) {
2063 			dn = d_alloc(parent, &dname);
2064 			doutc(cl, "d_alloc %p '%.*s' = %p\n", parent,
2065 			      dname.len, dname.name, dn);
2066 			if (!dn) {
2067 				doutc(cl, "d_alloc badness\n");
2068 				err = -ENOMEM;
2069 				goto out;
2070 			}
2071 			if (rde->is_nokey) {
2072 				spin_lock(&dn->d_lock);
2073 				dn->d_flags |= DCACHE_NOKEY_NAME;
2074 				spin_unlock(&dn->d_lock);
2075 			}
2076 		} else if (d_really_is_positive(dn) &&
2077 			   (ceph_ino(d_inode(dn)) != tvino.ino ||
2078 			    ceph_snap(d_inode(dn)) != tvino.snap)) {
2079 			struct ceph_dentry_info *di = ceph_dentry(dn);
2080 			doutc(cl, " dn %p points to wrong inode %p\n",
2081 			      dn, d_inode(dn));
2082 
2083 			spin_lock(&dn->d_lock);
2084 			if (di->offset > 0 &&
2085 			    di->lease_shared_gen ==
2086 			    atomic_read(&ci->i_shared_gen)) {
2087 				__ceph_dir_clear_ordered(ci);
2088 				di->offset = 0;
2089 			}
2090 			spin_unlock(&dn->d_lock);
2091 
2092 			d_delete(dn);
2093 			dput(dn);
2094 			goto retry_lookup;
2095 		}
2096 
2097 		/* inode */
2098 		if (d_really_is_positive(dn)) {
2099 			in = d_inode(dn);
2100 		} else {
2101 			in = ceph_get_inode(parent->d_sb, tvino, NULL);
2102 			if (IS_ERR(in)) {
2103 				doutc(cl, "new_inode badness\n");
2104 				d_drop(dn);
2105 				dput(dn);
2106 				err = PTR_ERR(in);
2107 				goto out;
2108 			}
2109 		}
2110 
2111 		ret = ceph_fill_inode(in, NULL, &rde->inode, NULL, session,
2112 				      -1, &req->r_caps_reservation);
2113 		if (ret < 0) {
2114 			pr_err_client(cl, "badness on %p %llx.%llx\n", in,
2115 				      ceph_vinop(in));
2116 			if (d_really_is_negative(dn)) {
2117 				if (in->i_state & I_NEW) {
2118 					ihold(in);
2119 					discard_new_inode(in);
2120 				}
2121 				iput(in);
2122 			}
2123 			d_drop(dn);
2124 			err = ret;
2125 			goto next_item;
2126 		}
2127 		if (in->i_state & I_NEW)
2128 			unlock_new_inode(in);
2129 
2130 		if (d_really_is_negative(dn)) {
2131 			if (ceph_security_xattr_deadlock(in)) {
2132 				doutc(cl, " skip splicing dn %p to inode %p"
2133 				      " (security xattr deadlock)\n", dn, in);
2134 				iput(in);
2135 				skipped++;
2136 				goto next_item;
2137 			}
2138 
2139 			err = splice_dentry(&dn, in);
2140 			if (err < 0)
2141 				goto next_item;
2142 		}
2143 
2144 		ceph_dentry(dn)->offset = rde->offset;
2145 
2146 		update_dentry_lease(d_inode(parent), dn,
2147 				    rde->lease, req->r_session,
2148 				    req->r_request_started);
2149 
2150 		if (err == 0 && skipped == 0 && cache_ctl.index >= 0) {
2151 			ret = fill_readdir_cache(d_inode(parent), dn,
2152 						 &cache_ctl, req);
2153 			if (ret < 0)
2154 				err = ret;
2155 		}
2156 next_item:
2157 		dput(dn);
2158 	}
2159 out:
2160 	if (err == 0 && skipped == 0) {
2161 		set_bit(CEPH_MDS_R_DID_PREPOPULATE, &req->r_req_flags);
2162 		req->r_readdir_cache_idx = cache_ctl.index;
2163 	}
2164 	ceph_readdir_cache_release(&cache_ctl);
2165 	doutc(cl, "done\n");
2166 	return err;
2167 }
2168 
ceph_inode_set_size(struct inode * inode,loff_t size)2169 bool ceph_inode_set_size(struct inode *inode, loff_t size)
2170 {
2171 	struct ceph_client *cl = ceph_inode_to_client(inode);
2172 	struct ceph_inode_info *ci = ceph_inode(inode);
2173 	bool ret;
2174 
2175 	spin_lock(&ci->i_ceph_lock);
2176 	doutc(cl, "set_size %p %llu -> %llu\n", inode, i_size_read(inode), size);
2177 	i_size_write(inode, size);
2178 	ceph_fscache_update(inode);
2179 	inode->i_blocks = calc_inode_blocks(size);
2180 
2181 	ret = __ceph_should_report_size(ci);
2182 
2183 	spin_unlock(&ci->i_ceph_lock);
2184 
2185 	return ret;
2186 }
2187 
ceph_queue_inode_work(struct inode * inode,int work_bit)2188 void ceph_queue_inode_work(struct inode *inode, int work_bit)
2189 {
2190 	struct ceph_fs_client *fsc = ceph_inode_to_fs_client(inode);
2191 	struct ceph_client *cl = fsc->client;
2192 	struct ceph_inode_info *ci = ceph_inode(inode);
2193 	set_bit(work_bit, &ci->i_work_mask);
2194 
2195 	ihold(inode);
2196 	if (queue_work(fsc->inode_wq, &ci->i_work)) {
2197 		doutc(cl, "%p %llx.%llx mask=%lx\n", inode,
2198 		      ceph_vinop(inode), ci->i_work_mask);
2199 	} else {
2200 		doutc(cl, "%p %llx.%llx already queued, mask=%lx\n",
2201 		      inode, ceph_vinop(inode), ci->i_work_mask);
2202 		iput(inode);
2203 	}
2204 }
2205 
ceph_do_invalidate_pages(struct inode * inode)2206 static void ceph_do_invalidate_pages(struct inode *inode)
2207 {
2208 	struct ceph_client *cl = ceph_inode_to_client(inode);
2209 	struct ceph_inode_info *ci = ceph_inode(inode);
2210 	u32 orig_gen;
2211 	int check = 0;
2212 
2213 	ceph_fscache_invalidate(inode, false);
2214 
2215 	mutex_lock(&ci->i_truncate_mutex);
2216 
2217 	if (ceph_inode_is_shutdown(inode)) {
2218 		pr_warn_ratelimited_client(cl,
2219 			"%p %llx.%llx is shut down\n", inode,
2220 			ceph_vinop(inode));
2221 		mapping_set_error(inode->i_mapping, -EIO);
2222 		truncate_pagecache(inode, 0);
2223 		mutex_unlock(&ci->i_truncate_mutex);
2224 		goto out;
2225 	}
2226 
2227 	spin_lock(&ci->i_ceph_lock);
2228 	doutc(cl, "%p %llx.%llx gen %d revoking %d\n", inode,
2229 	      ceph_vinop(inode), ci->i_rdcache_gen, ci->i_rdcache_revoking);
2230 	if (ci->i_rdcache_revoking != ci->i_rdcache_gen) {
2231 		if (__ceph_caps_revoking_other(ci, NULL, CEPH_CAP_FILE_CACHE))
2232 			check = 1;
2233 		spin_unlock(&ci->i_ceph_lock);
2234 		mutex_unlock(&ci->i_truncate_mutex);
2235 		goto out;
2236 	}
2237 	orig_gen = ci->i_rdcache_gen;
2238 	spin_unlock(&ci->i_ceph_lock);
2239 
2240 	if (invalidate_inode_pages2(inode->i_mapping) < 0) {
2241 		pr_err_client(cl, "invalidate_inode_pages2 %llx.%llx failed\n",
2242 			      ceph_vinop(inode));
2243 	}
2244 
2245 	spin_lock(&ci->i_ceph_lock);
2246 	if (orig_gen == ci->i_rdcache_gen &&
2247 	    orig_gen == ci->i_rdcache_revoking) {
2248 		doutc(cl, "%p %llx.%llx gen %d successful\n", inode,
2249 		      ceph_vinop(inode), ci->i_rdcache_gen);
2250 		ci->i_rdcache_revoking--;
2251 		check = 1;
2252 	} else {
2253 		doutc(cl, "%p %llx.%llx gen %d raced, now %d revoking %d\n",
2254 		      inode, ceph_vinop(inode), orig_gen, ci->i_rdcache_gen,
2255 		      ci->i_rdcache_revoking);
2256 		if (__ceph_caps_revoking_other(ci, NULL, CEPH_CAP_FILE_CACHE))
2257 			check = 1;
2258 	}
2259 	spin_unlock(&ci->i_ceph_lock);
2260 	mutex_unlock(&ci->i_truncate_mutex);
2261 out:
2262 	if (check)
2263 		ceph_check_caps(ci, 0);
2264 }
2265 
2266 /*
2267  * Make sure any pending truncation is applied before doing anything
2268  * that may depend on it.
2269  */
__ceph_do_pending_vmtruncate(struct inode * inode)2270 void __ceph_do_pending_vmtruncate(struct inode *inode)
2271 {
2272 	struct ceph_client *cl = ceph_inode_to_client(inode);
2273 	struct ceph_inode_info *ci = ceph_inode(inode);
2274 	u64 to;
2275 	int wrbuffer_refs, finish = 0;
2276 
2277 	mutex_lock(&ci->i_truncate_mutex);
2278 retry:
2279 	spin_lock(&ci->i_ceph_lock);
2280 	if (ci->i_truncate_pending == 0) {
2281 		doutc(cl, "%p %llx.%llx none pending\n", inode,
2282 		      ceph_vinop(inode));
2283 		spin_unlock(&ci->i_ceph_lock);
2284 		mutex_unlock(&ci->i_truncate_mutex);
2285 		return;
2286 	}
2287 
2288 	/*
2289 	 * make sure any dirty snapped pages are flushed before we
2290 	 * possibly truncate them.. so write AND block!
2291 	 */
2292 	if (ci->i_wrbuffer_ref_head < ci->i_wrbuffer_ref) {
2293 		spin_unlock(&ci->i_ceph_lock);
2294 		doutc(cl, "%p %llx.%llx flushing snaps first\n", inode,
2295 		      ceph_vinop(inode));
2296 		filemap_write_and_wait_range(&inode->i_data, 0,
2297 					     inode->i_sb->s_maxbytes);
2298 		goto retry;
2299 	}
2300 
2301 	/* there should be no reader or writer */
2302 	WARN_ON_ONCE(ci->i_rd_ref || ci->i_wr_ref);
2303 
2304 	to = ci->i_truncate_pagecache_size;
2305 	wrbuffer_refs = ci->i_wrbuffer_ref;
2306 	doutc(cl, "%p %llx.%llx (%d) to %lld\n", inode, ceph_vinop(inode),
2307 	      ci->i_truncate_pending, to);
2308 	spin_unlock(&ci->i_ceph_lock);
2309 
2310 	ceph_fscache_resize(inode, to);
2311 	truncate_pagecache(inode, to);
2312 
2313 	spin_lock(&ci->i_ceph_lock);
2314 	if (to == ci->i_truncate_pagecache_size) {
2315 		ci->i_truncate_pending = 0;
2316 		finish = 1;
2317 	}
2318 	spin_unlock(&ci->i_ceph_lock);
2319 	if (!finish)
2320 		goto retry;
2321 
2322 	mutex_unlock(&ci->i_truncate_mutex);
2323 
2324 	if (wrbuffer_refs == 0)
2325 		ceph_check_caps(ci, 0);
2326 
2327 	wake_up_all(&ci->i_cap_wq);
2328 }
2329 
ceph_inode_work(struct work_struct * work)2330 static void ceph_inode_work(struct work_struct *work)
2331 {
2332 	struct ceph_inode_info *ci = container_of(work, struct ceph_inode_info,
2333 						 i_work);
2334 	struct inode *inode = &ci->netfs.inode;
2335 	struct ceph_client *cl = ceph_inode_to_client(inode);
2336 
2337 	if (test_and_clear_bit(CEPH_I_WORK_WRITEBACK, &ci->i_work_mask)) {
2338 		doutc(cl, "writeback %p %llx.%llx\n", inode, ceph_vinop(inode));
2339 		filemap_fdatawrite(&inode->i_data);
2340 	}
2341 	if (test_and_clear_bit(CEPH_I_WORK_INVALIDATE_PAGES, &ci->i_work_mask))
2342 		ceph_do_invalidate_pages(inode);
2343 
2344 	if (test_and_clear_bit(CEPH_I_WORK_VMTRUNCATE, &ci->i_work_mask))
2345 		__ceph_do_pending_vmtruncate(inode);
2346 
2347 	if (test_and_clear_bit(CEPH_I_WORK_CHECK_CAPS, &ci->i_work_mask))
2348 		ceph_check_caps(ci, 0);
2349 
2350 	if (test_and_clear_bit(CEPH_I_WORK_FLUSH_SNAPS, &ci->i_work_mask))
2351 		ceph_flush_snaps(ci, NULL);
2352 
2353 	iput(inode);
2354 }
2355 
ceph_encrypted_get_link(struct dentry * dentry,struct inode * inode,struct delayed_call * done)2356 static const char *ceph_encrypted_get_link(struct dentry *dentry,
2357 					   struct inode *inode,
2358 					   struct delayed_call *done)
2359 {
2360 	struct ceph_inode_info *ci = ceph_inode(inode);
2361 
2362 	if (!dentry)
2363 		return ERR_PTR(-ECHILD);
2364 
2365 	return fscrypt_get_symlink(inode, ci->i_symlink, i_size_read(inode),
2366 				   done);
2367 }
2368 
ceph_encrypted_symlink_getattr(struct mnt_idmap * idmap,const struct path * path,struct kstat * stat,u32 request_mask,unsigned int query_flags)2369 static int ceph_encrypted_symlink_getattr(struct mnt_idmap *idmap,
2370 					  const struct path *path,
2371 					  struct kstat *stat, u32 request_mask,
2372 					  unsigned int query_flags)
2373 {
2374 	int ret;
2375 
2376 	ret = ceph_getattr(idmap, path, stat, request_mask, query_flags);
2377 	if (ret)
2378 		return ret;
2379 	return fscrypt_symlink_getattr(path, stat);
2380 }
2381 
2382 /*
2383  * symlinks
2384  */
2385 static const struct inode_operations ceph_symlink_iops = {
2386 	.get_link = simple_get_link,
2387 	.setattr = ceph_setattr,
2388 	.getattr = ceph_getattr,
2389 	.listxattr = ceph_listxattr,
2390 };
2391 
2392 static const struct inode_operations ceph_encrypted_symlink_iops = {
2393 	.get_link = ceph_encrypted_get_link,
2394 	.setattr = ceph_setattr,
2395 	.getattr = ceph_encrypted_symlink_getattr,
2396 	.listxattr = ceph_listxattr,
2397 };
2398 
2399 /*
2400  * Transfer the encrypted last block to the MDS and the MDS
2401  * will help update it when truncating a smaller size.
2402  *
2403  * We don't support a PAGE_SIZE that is smaller than the
2404  * CEPH_FSCRYPT_BLOCK_SIZE.
2405  */
fill_fscrypt_truncate(struct inode * inode,struct ceph_mds_request * req,struct iattr * attr)2406 static int fill_fscrypt_truncate(struct inode *inode,
2407 				 struct ceph_mds_request *req,
2408 				 struct iattr *attr)
2409 {
2410 	struct ceph_client *cl = ceph_inode_to_client(inode);
2411 	struct ceph_inode_info *ci = ceph_inode(inode);
2412 	int boff = attr->ia_size % CEPH_FSCRYPT_BLOCK_SIZE;
2413 	loff_t pos, orig_pos = round_down(attr->ia_size,
2414 					  CEPH_FSCRYPT_BLOCK_SIZE);
2415 	u64 block = orig_pos >> CEPH_FSCRYPT_BLOCK_SHIFT;
2416 	struct ceph_pagelist *pagelist = NULL;
2417 	struct kvec iov = {0};
2418 	struct iov_iter iter;
2419 	struct page *page = NULL;
2420 	struct ceph_fscrypt_truncate_size_header header;
2421 	int retry_op = 0;
2422 	int len = CEPH_FSCRYPT_BLOCK_SIZE;
2423 	loff_t i_size = i_size_read(inode);
2424 	int got, ret, issued;
2425 	u64 objver;
2426 
2427 	ret = __ceph_get_caps(inode, NULL, CEPH_CAP_FILE_RD, 0, -1, &got);
2428 	if (ret < 0)
2429 		return ret;
2430 
2431 	issued = __ceph_caps_issued(ci, NULL);
2432 
2433 	doutc(cl, "size %lld -> %lld got cap refs on %s, issued %s\n",
2434 	      i_size, attr->ia_size, ceph_cap_string(got),
2435 	      ceph_cap_string(issued));
2436 
2437 	/* Try to writeback the dirty pagecaches */
2438 	if (issued & (CEPH_CAP_FILE_BUFFER)) {
2439 		loff_t lend = orig_pos + CEPH_FSCRYPT_BLOCK_SIZE - 1;
2440 
2441 		ret = filemap_write_and_wait_range(inode->i_mapping,
2442 						   orig_pos, lend);
2443 		if (ret < 0)
2444 			goto out;
2445 	}
2446 
2447 	page = __page_cache_alloc(GFP_KERNEL);
2448 	if (page == NULL) {
2449 		ret = -ENOMEM;
2450 		goto out;
2451 	}
2452 
2453 	pagelist = ceph_pagelist_alloc(GFP_KERNEL);
2454 	if (!pagelist) {
2455 		ret = -ENOMEM;
2456 		goto out;
2457 	}
2458 
2459 	iov.iov_base = kmap_local_page(page);
2460 	iov.iov_len = len;
2461 	iov_iter_kvec(&iter, READ, &iov, 1, len);
2462 
2463 	pos = orig_pos;
2464 	ret = __ceph_sync_read(inode, &pos, &iter, &retry_op, &objver);
2465 	if (ret < 0)
2466 		goto out;
2467 
2468 	/* Insert the header first */
2469 	header.ver = 1;
2470 	header.compat = 1;
2471 	header.change_attr = cpu_to_le64(inode_peek_iversion_raw(inode));
2472 
2473 	/*
2474 	 * Always set the block_size to CEPH_FSCRYPT_BLOCK_SIZE,
2475 	 * because in MDS it may need this to do the truncate.
2476 	 */
2477 	header.block_size = cpu_to_le32(CEPH_FSCRYPT_BLOCK_SIZE);
2478 
2479 	/*
2480 	 * If we hit a hole here, we should just skip filling
2481 	 * the fscrypt for the request, because once the fscrypt
2482 	 * is enabled, the file will be split into many blocks
2483 	 * with the size of CEPH_FSCRYPT_BLOCK_SIZE, if there
2484 	 * has a hole, the hole size should be multiple of block
2485 	 * size.
2486 	 *
2487 	 * If the Rados object doesn't exist, it will be set to 0.
2488 	 */
2489 	if (!objver) {
2490 		doutc(cl, "hit hole, ppos %lld < size %lld\n", pos, i_size);
2491 
2492 		header.data_len = cpu_to_le32(8 + 8 + 4);
2493 		header.file_offset = 0;
2494 		ret = 0;
2495 	} else {
2496 		header.data_len = cpu_to_le32(8 + 8 + 4 + CEPH_FSCRYPT_BLOCK_SIZE);
2497 		header.file_offset = cpu_to_le64(orig_pos);
2498 
2499 		doutc(cl, "encrypt block boff/bsize %d/%lu\n", boff,
2500 		      CEPH_FSCRYPT_BLOCK_SIZE);
2501 
2502 		/* truncate and zero out the extra contents for the last block */
2503 		memset(iov.iov_base + boff, 0, PAGE_SIZE - boff);
2504 
2505 		/* encrypt the last block */
2506 		ret = ceph_fscrypt_encrypt_block_inplace(inode, page,
2507 						    CEPH_FSCRYPT_BLOCK_SIZE,
2508 						    0, block);
2509 		if (ret)
2510 			goto out;
2511 	}
2512 
2513 	/* Insert the header */
2514 	ret = ceph_pagelist_append(pagelist, &header, sizeof(header));
2515 	if (ret)
2516 		goto out;
2517 
2518 	if (header.block_size) {
2519 		/* Append the last block contents to pagelist */
2520 		ret = ceph_pagelist_append(pagelist, iov.iov_base,
2521 					   CEPH_FSCRYPT_BLOCK_SIZE);
2522 		if (ret)
2523 			goto out;
2524 	}
2525 	req->r_pagelist = pagelist;
2526 out:
2527 	doutc(cl, "%p %llx.%llx size dropping cap refs on %s\n", inode,
2528 	      ceph_vinop(inode), ceph_cap_string(got));
2529 	ceph_put_cap_refs(ci, got);
2530 	if (iov.iov_base)
2531 		kunmap_local(iov.iov_base);
2532 	if (page)
2533 		__free_pages(page, 0);
2534 	if (ret && pagelist)
2535 		ceph_pagelist_release(pagelist);
2536 	return ret;
2537 }
2538 
__ceph_setattr(struct mnt_idmap * idmap,struct inode * inode,struct iattr * attr,struct ceph_iattr * cia)2539 int __ceph_setattr(struct mnt_idmap *idmap, struct inode *inode,
2540 		   struct iattr *attr, struct ceph_iattr *cia)
2541 {
2542 	struct ceph_inode_info *ci = ceph_inode(inode);
2543 	unsigned int ia_valid = attr->ia_valid;
2544 	struct ceph_mds_request *req;
2545 	struct ceph_mds_client *mdsc = ceph_sb_to_fs_client(inode->i_sb)->mdsc;
2546 	struct ceph_client *cl = ceph_inode_to_client(inode);
2547 	struct ceph_cap_flush *prealloc_cf;
2548 	loff_t isize = i_size_read(inode);
2549 	int issued;
2550 	int release = 0, dirtied = 0;
2551 	int mask = 0;
2552 	int err = 0;
2553 	int inode_dirty_flags = 0;
2554 	bool lock_snap_rwsem = false;
2555 	bool fill_fscrypt;
2556 	int truncate_retry = 20; /* The RMW will take around 50ms */
2557 	struct dentry *dentry;
2558 	char *path;
2559 	bool do_sync = false;
2560 
2561 	dentry = d_find_alias(inode);
2562 	if (!dentry) {
2563 		do_sync = true;
2564 	} else {
2565 		struct ceph_path_info path_info;
2566 		path = ceph_mdsc_build_path(mdsc, dentry, &path_info, 0);
2567 		if (IS_ERR(path)) {
2568 			do_sync = true;
2569 			err = 0;
2570 		} else {
2571 			err = ceph_mds_check_access(mdsc, path, MAY_WRITE);
2572 		}
2573 		ceph_mdsc_free_path_info(&path_info);
2574 		dput(dentry);
2575 
2576 		/* For none EACCES cases will let the MDS do the mds auth check */
2577 		if (err == -EACCES) {
2578 			return err;
2579 		} else if (err < 0) {
2580 			do_sync = true;
2581 			err = 0;
2582 		}
2583 	}
2584 
2585 retry:
2586 	prealloc_cf = ceph_alloc_cap_flush();
2587 	if (!prealloc_cf)
2588 		return -ENOMEM;
2589 
2590 	req = ceph_mdsc_create_request(mdsc, CEPH_MDS_OP_SETATTR,
2591 				       USE_AUTH_MDS);
2592 	if (IS_ERR(req)) {
2593 		ceph_free_cap_flush(prealloc_cf);
2594 		return PTR_ERR(req);
2595 	}
2596 
2597 	fill_fscrypt = false;
2598 	spin_lock(&ci->i_ceph_lock);
2599 	issued = __ceph_caps_issued(ci, NULL);
2600 
2601 	if (!ci->i_head_snapc &&
2602 	    (issued & (CEPH_CAP_ANY_EXCL | CEPH_CAP_FILE_WR))) {
2603 		lock_snap_rwsem = true;
2604 		if (!down_read_trylock(&mdsc->snap_rwsem)) {
2605 			spin_unlock(&ci->i_ceph_lock);
2606 			down_read(&mdsc->snap_rwsem);
2607 			spin_lock(&ci->i_ceph_lock);
2608 			issued = __ceph_caps_issued(ci, NULL);
2609 		}
2610 	}
2611 
2612 	doutc(cl, "%p %llx.%llx issued %s\n", inode, ceph_vinop(inode),
2613 	      ceph_cap_string(issued));
2614 #if IS_ENABLED(CONFIG_FS_ENCRYPTION)
2615 	if (cia && cia->fscrypt_auth) {
2616 		u32 len = ceph_fscrypt_auth_len(cia->fscrypt_auth);
2617 
2618 		if (len > sizeof(*cia->fscrypt_auth)) {
2619 			err = -EINVAL;
2620 			spin_unlock(&ci->i_ceph_lock);
2621 			goto out;
2622 		}
2623 
2624 		doutc(cl, "%p %llx.%llx fscrypt_auth len %u to %u)\n", inode,
2625 		      ceph_vinop(inode), ci->fscrypt_auth_len, len);
2626 
2627 		/* It should never be re-set once set */
2628 		WARN_ON_ONCE(ci->fscrypt_auth);
2629 
2630 		if (!do_sync && (issued & CEPH_CAP_AUTH_EXCL)) {
2631 			dirtied |= CEPH_CAP_AUTH_EXCL;
2632 			kfree(ci->fscrypt_auth);
2633 			ci->fscrypt_auth = (u8 *)cia->fscrypt_auth;
2634 			ci->fscrypt_auth_len = len;
2635 		} else if ((issued & CEPH_CAP_AUTH_SHARED) == 0 ||
2636 			   ci->fscrypt_auth_len != len ||
2637 			   memcmp(ci->fscrypt_auth, cia->fscrypt_auth, len)) {
2638 			req->r_fscrypt_auth = cia->fscrypt_auth;
2639 			mask |= CEPH_SETATTR_FSCRYPT_AUTH;
2640 			release |= CEPH_CAP_AUTH_SHARED;
2641 		}
2642 		cia->fscrypt_auth = NULL;
2643 	}
2644 #else
2645 	if (cia && cia->fscrypt_auth) {
2646 		err = -EINVAL;
2647 		spin_unlock(&ci->i_ceph_lock);
2648 		goto out;
2649 	}
2650 #endif /* CONFIG_FS_ENCRYPTION */
2651 
2652 	if (ia_valid & ATTR_UID) {
2653 		kuid_t fsuid = from_vfsuid(idmap, i_user_ns(inode), attr->ia_vfsuid);
2654 
2655 		doutc(cl, "%p %llx.%llx uid %d -> %d\n", inode,
2656 		      ceph_vinop(inode),
2657 		      from_kuid(&init_user_ns, inode->i_uid),
2658 		      from_kuid(&init_user_ns, attr->ia_uid));
2659 		if (!do_sync && (issued & CEPH_CAP_AUTH_EXCL)) {
2660 			inode->i_uid = fsuid;
2661 			dirtied |= CEPH_CAP_AUTH_EXCL;
2662 		} else if ((issued & CEPH_CAP_AUTH_SHARED) == 0 ||
2663 			   !uid_eq(fsuid, inode->i_uid)) {
2664 			req->r_args.setattr.uid = cpu_to_le32(
2665 				from_kuid(&init_user_ns, fsuid));
2666 			mask |= CEPH_SETATTR_UID;
2667 			release |= CEPH_CAP_AUTH_SHARED;
2668 		}
2669 	}
2670 	if (ia_valid & ATTR_GID) {
2671 		kgid_t fsgid = from_vfsgid(idmap, i_user_ns(inode), attr->ia_vfsgid);
2672 
2673 		doutc(cl, "%p %llx.%llx gid %d -> %d\n", inode,
2674 		      ceph_vinop(inode),
2675 		      from_kgid(&init_user_ns, inode->i_gid),
2676 		      from_kgid(&init_user_ns, attr->ia_gid));
2677 		if (!do_sync && (issued & CEPH_CAP_AUTH_EXCL)) {
2678 			inode->i_gid = fsgid;
2679 			dirtied |= CEPH_CAP_AUTH_EXCL;
2680 		} else if ((issued & CEPH_CAP_AUTH_SHARED) == 0 ||
2681 			   !gid_eq(fsgid, inode->i_gid)) {
2682 			req->r_args.setattr.gid = cpu_to_le32(
2683 				from_kgid(&init_user_ns, fsgid));
2684 			mask |= CEPH_SETATTR_GID;
2685 			release |= CEPH_CAP_AUTH_SHARED;
2686 		}
2687 	}
2688 	if (ia_valid & ATTR_MODE) {
2689 		doutc(cl, "%p %llx.%llx mode 0%o -> 0%o\n", inode,
2690 		      ceph_vinop(inode), inode->i_mode, attr->ia_mode);
2691 		if (!do_sync && (issued & CEPH_CAP_AUTH_EXCL)) {
2692 			inode->i_mode = attr->ia_mode;
2693 			dirtied |= CEPH_CAP_AUTH_EXCL;
2694 		} else if ((issued & CEPH_CAP_AUTH_SHARED) == 0 ||
2695 			   attr->ia_mode != inode->i_mode) {
2696 			inode->i_mode = attr->ia_mode;
2697 			req->r_args.setattr.mode = cpu_to_le32(attr->ia_mode);
2698 			mask |= CEPH_SETATTR_MODE;
2699 			release |= CEPH_CAP_AUTH_SHARED;
2700 		}
2701 	}
2702 
2703 	if (ia_valid & ATTR_ATIME) {
2704 		struct timespec64 atime = inode_get_atime(inode);
2705 
2706 		doutc(cl, "%p %llx.%llx atime %lld.%09ld -> %lld.%09ld\n",
2707 		      inode, ceph_vinop(inode),
2708 		      atime.tv_sec, atime.tv_nsec,
2709 		      attr->ia_atime.tv_sec, attr->ia_atime.tv_nsec);
2710 		if (!do_sync && (issued & CEPH_CAP_FILE_EXCL)) {
2711 			ci->i_time_warp_seq++;
2712 			inode_set_atime_to_ts(inode, attr->ia_atime);
2713 			dirtied |= CEPH_CAP_FILE_EXCL;
2714 		} else if (!do_sync && (issued & CEPH_CAP_FILE_WR) &&
2715 			   timespec64_compare(&atime,
2716 					      &attr->ia_atime) < 0) {
2717 			inode_set_atime_to_ts(inode, attr->ia_atime);
2718 			dirtied |= CEPH_CAP_FILE_WR;
2719 		} else if ((issued & CEPH_CAP_FILE_SHARED) == 0 ||
2720 			   !timespec64_equal(&atime, &attr->ia_atime)) {
2721 			ceph_encode_timespec64(&req->r_args.setattr.atime,
2722 					       &attr->ia_atime);
2723 			mask |= CEPH_SETATTR_ATIME;
2724 			release |= CEPH_CAP_FILE_SHARED |
2725 				   CEPH_CAP_FILE_RD | CEPH_CAP_FILE_WR;
2726 		}
2727 	}
2728 	if (ia_valid & ATTR_SIZE) {
2729 		doutc(cl, "%p %llx.%llx size %lld -> %lld\n", inode,
2730 		      ceph_vinop(inode), isize, attr->ia_size);
2731 		/*
2732 		 * Only when the new size is smaller and not aligned to
2733 		 * CEPH_FSCRYPT_BLOCK_SIZE will the RMW is needed.
2734 		 */
2735 		if (IS_ENCRYPTED(inode) && attr->ia_size < isize &&
2736 		    (attr->ia_size % CEPH_FSCRYPT_BLOCK_SIZE)) {
2737 			mask |= CEPH_SETATTR_SIZE;
2738 			release |= CEPH_CAP_FILE_SHARED | CEPH_CAP_FILE_EXCL |
2739 				   CEPH_CAP_FILE_RD | CEPH_CAP_FILE_WR;
2740 			set_bit(CEPH_MDS_R_FSCRYPT_FILE, &req->r_req_flags);
2741 			mask |= CEPH_SETATTR_FSCRYPT_FILE;
2742 			req->r_args.setattr.size =
2743 				cpu_to_le64(round_up(attr->ia_size,
2744 						     CEPH_FSCRYPT_BLOCK_SIZE));
2745 			req->r_args.setattr.old_size =
2746 				cpu_to_le64(round_up(isize,
2747 						     CEPH_FSCRYPT_BLOCK_SIZE));
2748 			req->r_fscrypt_file = attr->ia_size;
2749 			fill_fscrypt = true;
2750 		} else if (!do_sync && (issued & CEPH_CAP_FILE_EXCL) && attr->ia_size >= isize) {
2751 			if (attr->ia_size > isize) {
2752 				i_size_write(inode, attr->ia_size);
2753 				inode->i_blocks = calc_inode_blocks(attr->ia_size);
2754 				ci->i_reported_size = attr->ia_size;
2755 				dirtied |= CEPH_CAP_FILE_EXCL;
2756 				ia_valid |= ATTR_MTIME;
2757 			}
2758 		} else if ((issued & CEPH_CAP_FILE_SHARED) == 0 ||
2759 			   attr->ia_size != isize) {
2760 			mask |= CEPH_SETATTR_SIZE;
2761 			release |= CEPH_CAP_FILE_SHARED | CEPH_CAP_FILE_EXCL |
2762 				   CEPH_CAP_FILE_RD | CEPH_CAP_FILE_WR;
2763 			if (IS_ENCRYPTED(inode) && attr->ia_size) {
2764 				set_bit(CEPH_MDS_R_FSCRYPT_FILE, &req->r_req_flags);
2765 				mask |= CEPH_SETATTR_FSCRYPT_FILE;
2766 				req->r_args.setattr.size =
2767 					cpu_to_le64(round_up(attr->ia_size,
2768 							     CEPH_FSCRYPT_BLOCK_SIZE));
2769 				req->r_args.setattr.old_size =
2770 					cpu_to_le64(round_up(isize,
2771 							     CEPH_FSCRYPT_BLOCK_SIZE));
2772 				req->r_fscrypt_file = attr->ia_size;
2773 			} else {
2774 				req->r_args.setattr.size = cpu_to_le64(attr->ia_size);
2775 				req->r_args.setattr.old_size = cpu_to_le64(isize);
2776 				req->r_fscrypt_file = 0;
2777 			}
2778 		}
2779 	}
2780 	if (ia_valid & ATTR_MTIME) {
2781 		struct timespec64 mtime = inode_get_mtime(inode);
2782 
2783 		doutc(cl, "%p %llx.%llx mtime %lld.%09ld -> %lld.%09ld\n",
2784 		      inode, ceph_vinop(inode),
2785 		      mtime.tv_sec, mtime.tv_nsec,
2786 		      attr->ia_mtime.tv_sec, attr->ia_mtime.tv_nsec);
2787 		if (!do_sync && (issued & CEPH_CAP_FILE_EXCL)) {
2788 			ci->i_time_warp_seq++;
2789 			inode_set_mtime_to_ts(inode, attr->ia_mtime);
2790 			dirtied |= CEPH_CAP_FILE_EXCL;
2791 		} else if (!do_sync && (issued & CEPH_CAP_FILE_WR) &&
2792 			   timespec64_compare(&mtime, &attr->ia_mtime) < 0) {
2793 			inode_set_mtime_to_ts(inode, attr->ia_mtime);
2794 			dirtied |= CEPH_CAP_FILE_WR;
2795 		} else if ((issued & CEPH_CAP_FILE_SHARED) == 0 ||
2796 			   !timespec64_equal(&mtime, &attr->ia_mtime)) {
2797 			ceph_encode_timespec64(&req->r_args.setattr.mtime,
2798 					       &attr->ia_mtime);
2799 			mask |= CEPH_SETATTR_MTIME;
2800 			release |= CEPH_CAP_FILE_SHARED |
2801 				   CEPH_CAP_FILE_RD | CEPH_CAP_FILE_WR;
2802 		}
2803 	}
2804 
2805 	/* these do nothing */
2806 	if (ia_valid & ATTR_CTIME) {
2807 		bool only = (ia_valid & (ATTR_SIZE|ATTR_MTIME|ATTR_ATIME|
2808 					 ATTR_MODE|ATTR_UID|ATTR_GID)) == 0;
2809 		doutc(cl, "%p %llx.%llx ctime %lld.%09ld -> %lld.%09ld (%s)\n",
2810 		      inode, ceph_vinop(inode),
2811 		      inode_get_ctime_sec(inode),
2812 		      inode_get_ctime_nsec(inode),
2813 		      attr->ia_ctime.tv_sec, attr->ia_ctime.tv_nsec,
2814 		      only ? "ctime only" : "ignored");
2815 		if (only) {
2816 			/*
2817 			 * if kernel wants to dirty ctime but nothing else,
2818 			 * we need to choose a cap to dirty under, or do
2819 			 * a almost-no-op setattr
2820 			 */
2821 			if (issued & CEPH_CAP_AUTH_EXCL)
2822 				dirtied |= CEPH_CAP_AUTH_EXCL;
2823 			else if (issued & CEPH_CAP_FILE_EXCL)
2824 				dirtied |= CEPH_CAP_FILE_EXCL;
2825 			else if (issued & CEPH_CAP_XATTR_EXCL)
2826 				dirtied |= CEPH_CAP_XATTR_EXCL;
2827 			else
2828 				mask |= CEPH_SETATTR_CTIME;
2829 		}
2830 	}
2831 	if (ia_valid & ATTR_FILE)
2832 		doutc(cl, "%p %llx.%llx ATTR_FILE ... hrm!\n", inode,
2833 		      ceph_vinop(inode));
2834 
2835 	if (dirtied) {
2836 		inode_dirty_flags = __ceph_mark_dirty_caps(ci, dirtied,
2837 							   &prealloc_cf);
2838 		inode_set_ctime_to_ts(inode, attr->ia_ctime);
2839 		inode_inc_iversion_raw(inode);
2840 	}
2841 
2842 	release &= issued;
2843 	spin_unlock(&ci->i_ceph_lock);
2844 	if (lock_snap_rwsem) {
2845 		up_read(&mdsc->snap_rwsem);
2846 		lock_snap_rwsem = false;
2847 	}
2848 
2849 	if (inode_dirty_flags)
2850 		__mark_inode_dirty(inode, inode_dirty_flags);
2851 
2852 	if (mask) {
2853 		req->r_inode = inode;
2854 		ihold(inode);
2855 		req->r_inode_drop = release;
2856 		req->r_args.setattr.mask = cpu_to_le32(mask);
2857 		req->r_num_caps = 1;
2858 		req->r_stamp = attr->ia_ctime;
2859 		if (fill_fscrypt) {
2860 			err = fill_fscrypt_truncate(inode, req, attr);
2861 			if (err)
2862 				goto out;
2863 		}
2864 
2865 		/*
2866 		 * The truncate request will return -EAGAIN when the
2867 		 * last block has been updated just before the MDS
2868 		 * successfully gets the xlock for the FILE lock. To
2869 		 * avoid corrupting the file contents we need to retry
2870 		 * it.
2871 		 */
2872 		err = ceph_mdsc_do_request(mdsc, NULL, req);
2873 		if (err == -EAGAIN && truncate_retry--) {
2874 			doutc(cl, "%p %llx.%llx result=%d (%s locally, %d remote), retry it!\n",
2875 			      inode, ceph_vinop(inode), err,
2876 			      ceph_cap_string(dirtied), mask);
2877 			ceph_mdsc_put_request(req);
2878 			ceph_free_cap_flush(prealloc_cf);
2879 			goto retry;
2880 		}
2881 	}
2882 out:
2883 	doutc(cl, "%p %llx.%llx result=%d (%s locally, %d remote)\n", inode,
2884 	      ceph_vinop(inode), err, ceph_cap_string(dirtied), mask);
2885 
2886 	ceph_mdsc_put_request(req);
2887 	ceph_free_cap_flush(prealloc_cf);
2888 
2889 	if (err >= 0 && (mask & CEPH_SETATTR_SIZE))
2890 		__ceph_do_pending_vmtruncate(inode);
2891 
2892 	return err;
2893 }
2894 
2895 /*
2896  * setattr
2897  */
ceph_setattr(struct mnt_idmap * idmap,struct dentry * dentry,struct iattr * attr)2898 int ceph_setattr(struct mnt_idmap *idmap, struct dentry *dentry,
2899 		 struct iattr *attr)
2900 {
2901 	struct inode *inode = d_inode(dentry);
2902 	struct ceph_fs_client *fsc = ceph_inode_to_fs_client(inode);
2903 	int err;
2904 
2905 	if (ceph_snap(inode) != CEPH_NOSNAP)
2906 		return -EROFS;
2907 
2908 	if (ceph_inode_is_shutdown(inode))
2909 		return -ESTALE;
2910 
2911 	err = fscrypt_prepare_setattr(dentry, attr);
2912 	if (err)
2913 		return err;
2914 
2915 	err = setattr_prepare(idmap, dentry, attr);
2916 	if (err != 0)
2917 		return err;
2918 
2919 	if ((attr->ia_valid & ATTR_SIZE) &&
2920 	    attr->ia_size > max(i_size_read(inode), fsc->max_file_size))
2921 		return -EFBIG;
2922 
2923 	if ((attr->ia_valid & ATTR_SIZE) &&
2924 	    ceph_quota_is_max_bytes_exceeded(inode, attr->ia_size))
2925 		return -EDQUOT;
2926 
2927 	err = __ceph_setattr(idmap, inode, attr, NULL);
2928 
2929 	if (err >= 0 && (attr->ia_valid & ATTR_MODE))
2930 		err = posix_acl_chmod(idmap, dentry, attr->ia_mode);
2931 
2932 	return err;
2933 }
2934 
ceph_try_to_choose_auth_mds(struct inode * inode,int mask)2935 int ceph_try_to_choose_auth_mds(struct inode *inode, int mask)
2936 {
2937 	int issued = ceph_caps_issued(ceph_inode(inode));
2938 
2939 	/*
2940 	 * If any 'x' caps is issued we can just choose the auth MDS
2941 	 * instead of the random replica MDSes. Because only when the
2942 	 * Locker is in LOCK_EXEC state will the loner client could
2943 	 * get the 'x' caps. And if we send the getattr requests to
2944 	 * any replica MDS it must auth pin and tries to rdlock from
2945 	 * the auth MDS, and then the auth MDS need to do the Locker
2946 	 * state transition to LOCK_SYNC. And after that the lock state
2947 	 * will change back.
2948 	 *
2949 	 * This cost much when doing the Locker state transition and
2950 	 * usually will need to revoke caps from clients.
2951 	 *
2952 	 * And for the 'Xs' caps for getxattr we will also choose the
2953 	 * auth MDS, because the MDS side code is buggy due to setxattr
2954 	 * won't notify the replica MDSes when the values changed and
2955 	 * the replica MDS will return the old values. Though we will
2956 	 * fix it in MDS code, but this still makes sense for old ceph.
2957 	 */
2958 	if (((mask & CEPH_CAP_ANY_SHARED) && (issued & CEPH_CAP_ANY_EXCL))
2959 	    || (mask & (CEPH_STAT_RSTAT | CEPH_STAT_CAP_XATTR)))
2960 		return USE_AUTH_MDS;
2961 	else
2962 		return USE_ANY_MDS;
2963 }
2964 
2965 /*
2966  * Verify that we have a lease on the given mask.  If not,
2967  * do a getattr against an mds.
2968  */
__ceph_do_getattr(struct inode * inode,struct page * locked_page,int mask,bool force)2969 int __ceph_do_getattr(struct inode *inode, struct page *locked_page,
2970 		      int mask, bool force)
2971 {
2972 	struct ceph_fs_client *fsc = ceph_sb_to_fs_client(inode->i_sb);
2973 	struct ceph_client *cl = fsc->client;
2974 	struct ceph_mds_client *mdsc = fsc->mdsc;
2975 	struct ceph_mds_request *req;
2976 	int mode;
2977 	int err;
2978 
2979 	if (ceph_snap(inode) == CEPH_SNAPDIR) {
2980 		doutc(cl, "inode %p %llx.%llx SNAPDIR\n", inode,
2981 		      ceph_vinop(inode));
2982 		return 0;
2983 	}
2984 
2985 	doutc(cl, "inode %p %llx.%llx mask %s mode 0%o\n", inode,
2986 	      ceph_vinop(inode), ceph_cap_string(mask), inode->i_mode);
2987 	if (!force && ceph_caps_issued_mask_metric(ceph_inode(inode), mask, 1))
2988 			return 0;
2989 
2990 	mode = ceph_try_to_choose_auth_mds(inode, mask);
2991 	req = ceph_mdsc_create_request(mdsc, CEPH_MDS_OP_GETATTR, mode);
2992 	if (IS_ERR(req))
2993 		return PTR_ERR(req);
2994 	req->r_inode = inode;
2995 	ihold(inode);
2996 	req->r_num_caps = 1;
2997 	req->r_args.getattr.mask = cpu_to_le32(mask);
2998 	req->r_locked_page = locked_page;
2999 	err = ceph_mdsc_do_request(mdsc, NULL, req);
3000 	if (locked_page && err == 0) {
3001 		u64 inline_version = req->r_reply_info.targeti.inline_version;
3002 		if (inline_version == 0) {
3003 			/* the reply is supposed to contain inline data */
3004 			err = -EINVAL;
3005 		} else if (inline_version == CEPH_INLINE_NONE ||
3006 			   inline_version == 1) {
3007 			err = -ENODATA;
3008 		} else {
3009 			err = req->r_reply_info.targeti.inline_len;
3010 		}
3011 	}
3012 	ceph_mdsc_put_request(req);
3013 	doutc(cl, "result=%d\n", err);
3014 	return err;
3015 }
3016 
ceph_do_getvxattr(struct inode * inode,const char * name,void * value,size_t size)3017 int ceph_do_getvxattr(struct inode *inode, const char *name, void *value,
3018 		      size_t size)
3019 {
3020 	struct ceph_fs_client *fsc = ceph_sb_to_fs_client(inode->i_sb);
3021 	struct ceph_client *cl = fsc->client;
3022 	struct ceph_mds_client *mdsc = fsc->mdsc;
3023 	struct ceph_mds_request *req;
3024 	int mode = USE_AUTH_MDS;
3025 	int err;
3026 	char *xattr_value;
3027 	size_t xattr_value_len;
3028 
3029 	req = ceph_mdsc_create_request(mdsc, CEPH_MDS_OP_GETVXATTR, mode);
3030 	if (IS_ERR(req)) {
3031 		err = -ENOMEM;
3032 		goto out;
3033 	}
3034 
3035 	req->r_feature_needed = CEPHFS_FEATURE_OP_GETVXATTR;
3036 	req->r_path2 = kstrdup(name, GFP_NOFS);
3037 	if (!req->r_path2) {
3038 		err = -ENOMEM;
3039 		goto put;
3040 	}
3041 
3042 	ihold(inode);
3043 	req->r_inode = inode;
3044 	err = ceph_mdsc_do_request(mdsc, NULL, req);
3045 	if (err < 0)
3046 		goto put;
3047 
3048 	xattr_value = req->r_reply_info.xattr_info.xattr_value;
3049 	xattr_value_len = req->r_reply_info.xattr_info.xattr_value_len;
3050 
3051 	doutc(cl, "xattr_value_len:%zu, size:%zu\n", xattr_value_len, size);
3052 
3053 	err = (int)xattr_value_len;
3054 	if (size == 0)
3055 		goto put;
3056 
3057 	if (xattr_value_len > size) {
3058 		err = -ERANGE;
3059 		goto put;
3060 	}
3061 
3062 	memcpy(value, xattr_value, xattr_value_len);
3063 put:
3064 	ceph_mdsc_put_request(req);
3065 out:
3066 	doutc(cl, "result=%d\n", err);
3067 	return err;
3068 }
3069 
3070 
3071 /*
3072  * Check inode permissions.  We verify we have a valid value for
3073  * the AUTH cap, then call the generic handler.
3074  */
ceph_permission(struct mnt_idmap * idmap,struct inode * inode,int mask)3075 int ceph_permission(struct mnt_idmap *idmap, struct inode *inode,
3076 		    int mask)
3077 {
3078 	int err;
3079 
3080 	if (mask & MAY_NOT_BLOCK)
3081 		return -ECHILD;
3082 
3083 	err = ceph_do_getattr(inode, CEPH_CAP_AUTH_SHARED, false);
3084 
3085 	if (!err)
3086 		err = generic_permission(idmap, inode, mask);
3087 	return err;
3088 }
3089 
3090 /* Craft a mask of needed caps given a set of requested statx attrs. */
statx_to_caps(u32 want,umode_t mode)3091 static int statx_to_caps(u32 want, umode_t mode)
3092 {
3093 	int mask = 0;
3094 
3095 	if (want & (STATX_MODE|STATX_UID|STATX_GID|STATX_CTIME|STATX_BTIME|STATX_CHANGE_COOKIE))
3096 		mask |= CEPH_CAP_AUTH_SHARED;
3097 
3098 	if (want & (STATX_NLINK|STATX_CTIME|STATX_CHANGE_COOKIE)) {
3099 		/*
3100 		 * The link count for directories depends on inode->i_subdirs,
3101 		 * and that is only updated when Fs caps are held.
3102 		 */
3103 		if (S_ISDIR(mode))
3104 			mask |= CEPH_CAP_FILE_SHARED;
3105 		else
3106 			mask |= CEPH_CAP_LINK_SHARED;
3107 	}
3108 
3109 	if (want & (STATX_ATIME|STATX_MTIME|STATX_CTIME|STATX_SIZE|STATX_BLOCKS|STATX_CHANGE_COOKIE))
3110 		mask |= CEPH_CAP_FILE_SHARED;
3111 
3112 	if (want & (STATX_CTIME|STATX_CHANGE_COOKIE))
3113 		mask |= CEPH_CAP_XATTR_SHARED;
3114 
3115 	return mask;
3116 }
3117 
3118 /*
3119  * Get all the attributes. If we have sufficient caps for the requested attrs,
3120  * then we can avoid talking to the MDS at all.
3121  */
ceph_getattr(struct mnt_idmap * idmap,const struct path * path,struct kstat * stat,u32 request_mask,unsigned int flags)3122 int ceph_getattr(struct mnt_idmap *idmap, const struct path *path,
3123 		 struct kstat *stat, u32 request_mask, unsigned int flags)
3124 {
3125 	struct inode *inode = d_inode(path->dentry);
3126 	struct super_block *sb = inode->i_sb;
3127 	struct ceph_inode_info *ci = ceph_inode(inode);
3128 	u32 valid_mask = STATX_BASIC_STATS;
3129 	int err = 0;
3130 
3131 	if (ceph_inode_is_shutdown(inode))
3132 		return -ESTALE;
3133 
3134 	/* Skip the getattr altogether if we're asked not to sync */
3135 	if ((flags & AT_STATX_SYNC_TYPE) != AT_STATX_DONT_SYNC) {
3136 		err = ceph_do_getattr(inode,
3137 				statx_to_caps(request_mask, inode->i_mode),
3138 				flags & AT_STATX_FORCE_SYNC);
3139 		if (err)
3140 			return err;
3141 	}
3142 
3143 	generic_fillattr(idmap, request_mask, inode, stat);
3144 	stat->ino = ceph_present_inode(inode);
3145 
3146 	/*
3147 	 * btime on newly-allocated inodes is 0, so if this is still set to
3148 	 * that, then assume that it's not valid.
3149 	 */
3150 	if (ci->i_btime.tv_sec || ci->i_btime.tv_nsec) {
3151 		stat->btime = ci->i_btime;
3152 		valid_mask |= STATX_BTIME;
3153 	}
3154 
3155 	if (request_mask & STATX_CHANGE_COOKIE) {
3156 		stat->change_cookie = inode_peek_iversion_raw(inode);
3157 		valid_mask |= STATX_CHANGE_COOKIE;
3158 	}
3159 
3160 	if (ceph_snap(inode) == CEPH_NOSNAP)
3161 		stat->dev = sb->s_dev;
3162 	else
3163 		stat->dev = ci->i_snapid_map ? ci->i_snapid_map->dev : 0;
3164 
3165 	if (S_ISDIR(inode->i_mode)) {
3166 		if (ceph_test_mount_opt(ceph_sb_to_fs_client(sb), RBYTES)) {
3167 			stat->size = ci->i_rbytes;
3168 		} else if (ceph_snap(inode) == CEPH_SNAPDIR) {
3169 			struct ceph_inode_info *pci;
3170 			struct ceph_snap_realm *realm;
3171 			struct inode *parent;
3172 
3173 			parent = ceph_lookup_inode(sb, ceph_ino(inode));
3174 			if (IS_ERR(parent))
3175 				return PTR_ERR(parent);
3176 
3177 			pci = ceph_inode(parent);
3178 			spin_lock(&pci->i_ceph_lock);
3179 			realm = pci->i_snap_realm;
3180 			if (realm)
3181 				stat->size = realm->num_snaps;
3182 			else
3183 				stat->size = 0;
3184 			spin_unlock(&pci->i_ceph_lock);
3185 			iput(parent);
3186 		} else {
3187 			stat->size = ci->i_files + ci->i_subdirs;
3188 		}
3189 		stat->blocks = 0;
3190 		stat->blksize = 65536;
3191 		/*
3192 		 * Some applications rely on the number of st_nlink
3193 		 * value on directories to be either 0 (if unlinked)
3194 		 * or 2 + number of subdirectories.
3195 		 */
3196 		if (stat->nlink == 1)
3197 			/* '.' + '..' + subdirs */
3198 			stat->nlink = 1 + 1 + ci->i_subdirs;
3199 	}
3200 
3201 	stat->attributes |= STATX_ATTR_CHANGE_MONOTONIC;
3202 	if (IS_ENCRYPTED(inode))
3203 		stat->attributes |= STATX_ATTR_ENCRYPTED;
3204 	stat->attributes_mask |= (STATX_ATTR_CHANGE_MONOTONIC |
3205 				  STATX_ATTR_ENCRYPTED);
3206 
3207 	stat->result_mask = request_mask & valid_mask;
3208 	return err;
3209 }
3210 
ceph_inode_shutdown(struct inode * inode)3211 void ceph_inode_shutdown(struct inode *inode)
3212 {
3213 	struct ceph_inode_info *ci = ceph_inode(inode);
3214 	struct rb_node *p;
3215 	int iputs = 0;
3216 	bool invalidate = false;
3217 
3218 	spin_lock(&ci->i_ceph_lock);
3219 	ci->i_ceph_flags |= CEPH_I_SHUTDOWN;
3220 	p = rb_first(&ci->i_caps);
3221 	while (p) {
3222 		struct ceph_cap *cap = rb_entry(p, struct ceph_cap, ci_node);
3223 
3224 		p = rb_next(p);
3225 		iputs += ceph_purge_inode_cap(inode, cap, &invalidate);
3226 	}
3227 	spin_unlock(&ci->i_ceph_lock);
3228 
3229 	if (invalidate)
3230 		ceph_queue_invalidate(inode);
3231 	while (iputs--)
3232 		iput(inode);
3233 }
3234