xref: /linux/fs/fuse/dir.c (revision f96a974170b749e3a56844e25b31d46a7233b6f6)
1 /*
2   FUSE: Filesystem in Userspace
3   Copyright (C) 2001-2008  Miklos Szeredi <miklos@szeredi.hu>
4 
5   This program can be distributed under the terms of the GNU GPL.
6   See the file COPYING.
7 */
8 
9 #include "fuse_i.h"
10 
11 #include <linux/pagemap.h>
12 #include <linux/file.h>
13 #include <linux/fs_context.h>
14 #include <linux/moduleparam.h>
15 #include <linux/sched.h>
16 #include <linux/namei.h>
17 #include <linux/slab.h>
18 #include <linux/xattr.h>
19 #include <linux/iversion.h>
20 #include <linux/posix_acl.h>
21 #include <linux/security.h>
22 #include <linux/types.h>
23 #include <linux/kernel.h>
24 
25 static bool __read_mostly allow_sys_admin_access;
26 module_param(allow_sys_admin_access, bool, 0644);
27 MODULE_PARM_DESC(allow_sys_admin_access,
28 		 "Allow users with CAP_SYS_ADMIN in initial userns to bypass allow_other access check");
29 
30 static void fuse_advise_use_readdirplus(struct inode *dir)
31 {
32 	struct fuse_inode *fi = get_fuse_inode(dir);
33 
34 	set_bit(FUSE_I_ADVISE_RDPLUS, &fi->state);
35 }
36 
37 #if BITS_PER_LONG >= 64
38 static inline void __fuse_dentry_settime(struct dentry *entry, u64 time)
39 {
40 	entry->d_fsdata = (void *) time;
41 }
42 
43 static inline u64 fuse_dentry_time(const struct dentry *entry)
44 {
45 	return (u64)entry->d_fsdata;
46 }
47 
48 #else
49 union fuse_dentry {
50 	u64 time;
51 	struct rcu_head rcu;
52 };
53 
54 static inline void __fuse_dentry_settime(struct dentry *dentry, u64 time)
55 {
56 	((union fuse_dentry *) dentry->d_fsdata)->time = time;
57 }
58 
59 static inline u64 fuse_dentry_time(const struct dentry *entry)
60 {
61 	return ((union fuse_dentry *) entry->d_fsdata)->time;
62 }
63 #endif
64 
65 static void fuse_dentry_settime(struct dentry *dentry, u64 time)
66 {
67 	struct fuse_conn *fc = get_fuse_conn_super(dentry->d_sb);
68 	bool delete = !time && fc->delete_stale;
69 	/*
70 	 * Mess with DCACHE_OP_DELETE because dput() will be faster without it.
71 	 * Don't care about races, either way it's just an optimization
72 	 */
73 	if ((!delete && (dentry->d_flags & DCACHE_OP_DELETE)) ||
74 	    (delete && !(dentry->d_flags & DCACHE_OP_DELETE))) {
75 		spin_lock(&dentry->d_lock);
76 		if (!delete)
77 			dentry->d_flags &= ~DCACHE_OP_DELETE;
78 		else
79 			dentry->d_flags |= DCACHE_OP_DELETE;
80 		spin_unlock(&dentry->d_lock);
81 	}
82 
83 	__fuse_dentry_settime(dentry, time);
84 }
85 
86 /*
87  * FUSE caches dentries and attributes with separate timeout.  The
88  * time in jiffies until the dentry/attributes are valid is stored in
89  * dentry->d_fsdata and fuse_inode->i_time respectively.
90  */
91 
92 /*
93  * Calculate the time in jiffies until a dentry/attributes are valid
94  */
95 u64 fuse_time_to_jiffies(u64 sec, u32 nsec)
96 {
97 	if (sec || nsec) {
98 		struct timespec64 ts = {
99 			sec,
100 			min_t(u32, nsec, NSEC_PER_SEC - 1)
101 		};
102 
103 		return get_jiffies_64() + timespec64_to_jiffies(&ts);
104 	} else
105 		return 0;
106 }
107 
108 /*
109  * Set dentry and possibly attribute timeouts from the lookup/mk*
110  * replies
111  */
112 void fuse_change_entry_timeout(struct dentry *entry, struct fuse_entry_out *o)
113 {
114 	fuse_dentry_settime(entry,
115 		fuse_time_to_jiffies(o->entry_valid, o->entry_valid_nsec));
116 }
117 
118 void fuse_invalidate_attr_mask(struct inode *inode, u32 mask)
119 {
120 	set_mask_bits(&get_fuse_inode(inode)->inval_mask, 0, mask);
121 }
122 
123 /*
124  * Mark the attributes as stale, so that at the next call to
125  * ->getattr() they will be fetched from userspace
126  */
127 void fuse_invalidate_attr(struct inode *inode)
128 {
129 	fuse_invalidate_attr_mask(inode, STATX_BASIC_STATS);
130 }
131 
132 static void fuse_dir_changed(struct inode *dir)
133 {
134 	fuse_invalidate_attr(dir);
135 	inode_maybe_inc_iversion(dir, false);
136 }
137 
138 /*
139  * Mark the attributes as stale due to an atime change.  Avoid the invalidate if
140  * atime is not used.
141  */
142 void fuse_invalidate_atime(struct inode *inode)
143 {
144 	if (!IS_RDONLY(inode))
145 		fuse_invalidate_attr_mask(inode, STATX_ATIME);
146 }
147 
148 /*
149  * Just mark the entry as stale, so that a next attempt to look it up
150  * will result in a new lookup call to userspace
151  *
152  * This is called when a dentry is about to become negative and the
153  * timeout is unknown (unlink, rmdir, rename and in some cases
154  * lookup)
155  */
156 void fuse_invalidate_entry_cache(struct dentry *entry)
157 {
158 	fuse_dentry_settime(entry, 0);
159 }
160 
161 /*
162  * Same as fuse_invalidate_entry_cache(), but also try to remove the
163  * dentry from the hash
164  */
165 static void fuse_invalidate_entry(struct dentry *entry)
166 {
167 	d_invalidate(entry);
168 	fuse_invalidate_entry_cache(entry);
169 }
170 
171 static void fuse_lookup_init(struct fuse_conn *fc, struct fuse_args *args,
172 			     u64 nodeid, const struct qstr *name,
173 			     struct fuse_entry_out *outarg)
174 {
175 	memset(outarg, 0, sizeof(struct fuse_entry_out));
176 	args->opcode = FUSE_LOOKUP;
177 	args->nodeid = nodeid;
178 	args->in_numargs = 1;
179 	args->in_args[0].size = name->len + 1;
180 	args->in_args[0].value = name->name;
181 	args->out_numargs = 1;
182 	args->out_args[0].size = sizeof(struct fuse_entry_out);
183 	args->out_args[0].value = outarg;
184 }
185 
186 /*
187  * Check whether the dentry is still valid
188  *
189  * If the entry validity timeout has expired and the dentry is
190  * positive, try to redo the lookup.  If the lookup results in a
191  * different inode, then let the VFS invalidate the dentry and redo
192  * the lookup once more.  If the lookup results in the same inode,
193  * then refresh the attributes, timeouts and mark the dentry valid.
194  */
195 static int fuse_dentry_revalidate(struct dentry *entry, unsigned int flags)
196 {
197 	struct inode *inode;
198 	struct dentry *parent;
199 	struct fuse_mount *fm;
200 	struct fuse_inode *fi;
201 	int ret;
202 
203 	inode = d_inode_rcu(entry);
204 	if (inode && fuse_is_bad(inode))
205 		goto invalid;
206 	else if (time_before64(fuse_dentry_time(entry), get_jiffies_64()) ||
207 		 (flags & (LOOKUP_EXCL | LOOKUP_REVAL | LOOKUP_RENAME_TARGET))) {
208 		struct fuse_entry_out outarg;
209 		FUSE_ARGS(args);
210 		struct fuse_forget_link *forget;
211 		u64 attr_version;
212 
213 		/* For negative dentries, always do a fresh lookup */
214 		if (!inode)
215 			goto invalid;
216 
217 		ret = -ECHILD;
218 		if (flags & LOOKUP_RCU)
219 			goto out;
220 
221 		fm = get_fuse_mount(inode);
222 
223 		forget = fuse_alloc_forget();
224 		ret = -ENOMEM;
225 		if (!forget)
226 			goto out;
227 
228 		attr_version = fuse_get_attr_version(fm->fc);
229 
230 		parent = dget_parent(entry);
231 		fuse_lookup_init(fm->fc, &args, get_node_id(d_inode(parent)),
232 				 &entry->d_name, &outarg);
233 		ret = fuse_simple_request(fm, &args);
234 		dput(parent);
235 		/* Zero nodeid is same as -ENOENT */
236 		if (!ret && !outarg.nodeid)
237 			ret = -ENOENT;
238 		if (!ret) {
239 			fi = get_fuse_inode(inode);
240 			if (outarg.nodeid != get_node_id(inode) ||
241 			    (bool) IS_AUTOMOUNT(inode) != (bool) (outarg.attr.flags & FUSE_ATTR_SUBMOUNT)) {
242 				fuse_queue_forget(fm->fc, forget,
243 						  outarg.nodeid, 1);
244 				goto invalid;
245 			}
246 			spin_lock(&fi->lock);
247 			fi->nlookup++;
248 			spin_unlock(&fi->lock);
249 		}
250 		kfree(forget);
251 		if (ret == -ENOMEM || ret == -EINTR)
252 			goto out;
253 		if (ret || fuse_invalid_attr(&outarg.attr) ||
254 		    fuse_stale_inode(inode, outarg.generation, &outarg.attr))
255 			goto invalid;
256 
257 		forget_all_cached_acls(inode);
258 		fuse_change_attributes(inode, &outarg.attr, NULL,
259 				       ATTR_TIMEOUT(&outarg),
260 				       attr_version);
261 		fuse_change_entry_timeout(entry, &outarg);
262 	} else if (inode) {
263 		fi = get_fuse_inode(inode);
264 		if (flags & LOOKUP_RCU) {
265 			if (test_bit(FUSE_I_INIT_RDPLUS, &fi->state))
266 				return -ECHILD;
267 		} else if (test_and_clear_bit(FUSE_I_INIT_RDPLUS, &fi->state)) {
268 			parent = dget_parent(entry);
269 			fuse_advise_use_readdirplus(d_inode(parent));
270 			dput(parent);
271 		}
272 	}
273 	ret = 1;
274 out:
275 	return ret;
276 
277 invalid:
278 	ret = 0;
279 	goto out;
280 }
281 
282 #if BITS_PER_LONG < 64
283 static int fuse_dentry_init(struct dentry *dentry)
284 {
285 	dentry->d_fsdata = kzalloc(sizeof(union fuse_dentry),
286 				   GFP_KERNEL_ACCOUNT | __GFP_RECLAIMABLE);
287 
288 	return dentry->d_fsdata ? 0 : -ENOMEM;
289 }
290 static void fuse_dentry_release(struct dentry *dentry)
291 {
292 	union fuse_dentry *fd = dentry->d_fsdata;
293 
294 	kfree_rcu(fd, rcu);
295 }
296 #endif
297 
298 static int fuse_dentry_delete(const struct dentry *dentry)
299 {
300 	return time_before64(fuse_dentry_time(dentry), get_jiffies_64());
301 }
302 
303 /*
304  * Create a fuse_mount object with a new superblock (with path->dentry
305  * as the root), and return that mount so it can be auto-mounted on
306  * @path.
307  */
308 static struct vfsmount *fuse_dentry_automount(struct path *path)
309 {
310 	struct fs_context *fsc;
311 	struct vfsmount *mnt;
312 	struct fuse_inode *mp_fi = get_fuse_inode(d_inode(path->dentry));
313 
314 	fsc = fs_context_for_submount(path->mnt->mnt_sb->s_type, path->dentry);
315 	if (IS_ERR(fsc))
316 		return ERR_CAST(fsc);
317 
318 	/* Pass the FUSE inode of the mount for fuse_get_tree_submount() */
319 	fsc->fs_private = mp_fi;
320 
321 	/* Create the submount */
322 	mnt = fc_mount(fsc);
323 	if (!IS_ERR(mnt))
324 		mntget(mnt);
325 
326 	put_fs_context(fsc);
327 	return mnt;
328 }
329 
330 const struct dentry_operations fuse_dentry_operations = {
331 	.d_revalidate	= fuse_dentry_revalidate,
332 	.d_delete	= fuse_dentry_delete,
333 #if BITS_PER_LONG < 64
334 	.d_init		= fuse_dentry_init,
335 	.d_release	= fuse_dentry_release,
336 #endif
337 	.d_automount	= fuse_dentry_automount,
338 };
339 
340 const struct dentry_operations fuse_root_dentry_operations = {
341 #if BITS_PER_LONG < 64
342 	.d_init		= fuse_dentry_init,
343 	.d_release	= fuse_dentry_release,
344 #endif
345 };
346 
347 int fuse_valid_type(int m)
348 {
349 	return S_ISREG(m) || S_ISDIR(m) || S_ISLNK(m) || S_ISCHR(m) ||
350 		S_ISBLK(m) || S_ISFIFO(m) || S_ISSOCK(m);
351 }
352 
353 static bool fuse_valid_size(u64 size)
354 {
355 	return size <= LLONG_MAX;
356 }
357 
358 bool fuse_invalid_attr(struct fuse_attr *attr)
359 {
360 	return !fuse_valid_type(attr->mode) || !fuse_valid_size(attr->size);
361 }
362 
363 int fuse_lookup_name(struct super_block *sb, u64 nodeid, const struct qstr *name,
364 		     struct fuse_entry_out *outarg, struct inode **inode)
365 {
366 	struct fuse_mount *fm = get_fuse_mount_super(sb);
367 	FUSE_ARGS(args);
368 	struct fuse_forget_link *forget;
369 	u64 attr_version, evict_ctr;
370 	int err;
371 
372 	*inode = NULL;
373 	err = -ENAMETOOLONG;
374 	if (name->len > FUSE_NAME_MAX)
375 		goto out;
376 
377 
378 	forget = fuse_alloc_forget();
379 	err = -ENOMEM;
380 	if (!forget)
381 		goto out;
382 
383 	attr_version = fuse_get_attr_version(fm->fc);
384 	evict_ctr = fuse_get_evict_ctr(fm->fc);
385 
386 	fuse_lookup_init(fm->fc, &args, nodeid, name, outarg);
387 	err = fuse_simple_request(fm, &args);
388 	/* Zero nodeid is same as -ENOENT, but with valid timeout */
389 	if (err || !outarg->nodeid)
390 		goto out_put_forget;
391 
392 	err = -EIO;
393 	if (fuse_invalid_attr(&outarg->attr))
394 		goto out_put_forget;
395 	if (outarg->nodeid == FUSE_ROOT_ID && outarg->generation != 0) {
396 		pr_warn_once("root generation should be zero\n");
397 		outarg->generation = 0;
398 	}
399 
400 	*inode = fuse_iget(sb, outarg->nodeid, outarg->generation,
401 			   &outarg->attr, ATTR_TIMEOUT(outarg),
402 			   attr_version, evict_ctr);
403 	err = -ENOMEM;
404 	if (!*inode) {
405 		fuse_queue_forget(fm->fc, forget, outarg->nodeid, 1);
406 		goto out;
407 	}
408 	err = 0;
409 
410  out_put_forget:
411 	kfree(forget);
412  out:
413 	return err;
414 }
415 
416 static struct dentry *fuse_lookup(struct inode *dir, struct dentry *entry,
417 				  unsigned int flags)
418 {
419 	int err;
420 	struct fuse_entry_out outarg;
421 	struct inode *inode;
422 	struct dentry *newent;
423 	bool outarg_valid = true;
424 	bool locked;
425 
426 	if (fuse_is_bad(dir))
427 		return ERR_PTR(-EIO);
428 
429 	locked = fuse_lock_inode(dir);
430 	err = fuse_lookup_name(dir->i_sb, get_node_id(dir), &entry->d_name,
431 			       &outarg, &inode);
432 	fuse_unlock_inode(dir, locked);
433 	if (err == -ENOENT) {
434 		outarg_valid = false;
435 		err = 0;
436 	}
437 	if (err)
438 		goto out_err;
439 
440 	err = -EIO;
441 	if (inode && get_node_id(inode) == FUSE_ROOT_ID)
442 		goto out_iput;
443 
444 	newent = d_splice_alias(inode, entry);
445 	err = PTR_ERR(newent);
446 	if (IS_ERR(newent))
447 		goto out_err;
448 
449 	entry = newent ? newent : entry;
450 	if (outarg_valid)
451 		fuse_change_entry_timeout(entry, &outarg);
452 	else
453 		fuse_invalidate_entry_cache(entry);
454 
455 	if (inode)
456 		fuse_advise_use_readdirplus(dir);
457 	return newent;
458 
459  out_iput:
460 	iput(inode);
461  out_err:
462 	return ERR_PTR(err);
463 }
464 
465 static int get_security_context(struct dentry *entry, umode_t mode,
466 				struct fuse_in_arg *ext)
467 {
468 	struct fuse_secctx *fctx;
469 	struct fuse_secctx_header *header;
470 	struct lsm_context lsmctx = { };
471 	void *ptr;
472 	u32 total_len = sizeof(*header);
473 	int err, nr_ctx = 0;
474 	const char *name = NULL;
475 	size_t namelen;
476 
477 	err = security_dentry_init_security(entry, mode, &entry->d_name,
478 					    &name, &lsmctx);
479 
480 	/* If no LSM is supporting this security hook ignore error */
481 	if (err && err != -EOPNOTSUPP)
482 		goto out_err;
483 
484 	if (lsmctx.len) {
485 		nr_ctx = 1;
486 		namelen = strlen(name) + 1;
487 		err = -EIO;
488 		if (WARN_ON(namelen > XATTR_NAME_MAX + 1 ||
489 		    lsmctx.len > S32_MAX))
490 			goto out_err;
491 		total_len += FUSE_REC_ALIGN(sizeof(*fctx) + namelen +
492 					    lsmctx.len);
493 	}
494 
495 	err = -ENOMEM;
496 	header = ptr = kzalloc(total_len, GFP_KERNEL);
497 	if (!ptr)
498 		goto out_err;
499 
500 	header->nr_secctx = nr_ctx;
501 	header->size = total_len;
502 	ptr += sizeof(*header);
503 	if (nr_ctx) {
504 		fctx = ptr;
505 		fctx->size = lsmctx.len;
506 		ptr += sizeof(*fctx);
507 
508 		strcpy(ptr, name);
509 		ptr += namelen;
510 
511 		memcpy(ptr, lsmctx.context, lsmctx.len);
512 	}
513 	ext->size = total_len;
514 	ext->value = header;
515 	err = 0;
516 out_err:
517 	if (nr_ctx)
518 		security_release_secctx(&lsmctx);
519 	return err;
520 }
521 
522 static void *extend_arg(struct fuse_in_arg *buf, u32 bytes)
523 {
524 	void *p;
525 	u32 newlen = buf->size + bytes;
526 
527 	p = krealloc(buf->value, newlen, GFP_KERNEL);
528 	if (!p) {
529 		kfree(buf->value);
530 		buf->size = 0;
531 		buf->value = NULL;
532 		return NULL;
533 	}
534 
535 	memset(p + buf->size, 0, bytes);
536 	buf->value = p;
537 	buf->size = newlen;
538 
539 	return p + newlen - bytes;
540 }
541 
542 static u32 fuse_ext_size(size_t size)
543 {
544 	return FUSE_REC_ALIGN(sizeof(struct fuse_ext_header) + size);
545 }
546 
547 /*
548  * This adds just a single supplementary group that matches the parent's group.
549  */
550 static int get_create_supp_group(struct mnt_idmap *idmap,
551 				 struct inode *dir,
552 				 struct fuse_in_arg *ext)
553 {
554 	struct fuse_conn *fc = get_fuse_conn(dir);
555 	struct fuse_ext_header *xh;
556 	struct fuse_supp_groups *sg;
557 	kgid_t kgid = dir->i_gid;
558 	vfsgid_t vfsgid = make_vfsgid(idmap, fc->user_ns, kgid);
559 	gid_t parent_gid = from_kgid(fc->user_ns, kgid);
560 
561 	u32 sg_len = fuse_ext_size(sizeof(*sg) + sizeof(sg->groups[0]));
562 
563 	if (parent_gid == (gid_t) -1 || vfsgid_eq_kgid(vfsgid, current_fsgid()) ||
564 	    !vfsgid_in_group_p(vfsgid))
565 		return 0;
566 
567 	xh = extend_arg(ext, sg_len);
568 	if (!xh)
569 		return -ENOMEM;
570 
571 	xh->size = sg_len;
572 	xh->type = FUSE_EXT_GROUPS;
573 
574 	sg = (struct fuse_supp_groups *) &xh[1];
575 	sg->nr_groups = 1;
576 	sg->groups[0] = parent_gid;
577 
578 	return 0;
579 }
580 
581 static int get_create_ext(struct mnt_idmap *idmap,
582 			  struct fuse_args *args,
583 			  struct inode *dir, struct dentry *dentry,
584 			  umode_t mode)
585 {
586 	struct fuse_conn *fc = get_fuse_conn_super(dentry->d_sb);
587 	struct fuse_in_arg ext = { .size = 0, .value = NULL };
588 	int err = 0;
589 
590 	if (fc->init_security)
591 		err = get_security_context(dentry, mode, &ext);
592 	if (!err && fc->create_supp_group)
593 		err = get_create_supp_group(idmap, dir, &ext);
594 
595 	if (!err && ext.size) {
596 		WARN_ON(args->in_numargs >= ARRAY_SIZE(args->in_args));
597 		args->is_ext = true;
598 		args->ext_idx = args->in_numargs++;
599 		args->in_args[args->ext_idx] = ext;
600 	} else {
601 		kfree(ext.value);
602 	}
603 
604 	return err;
605 }
606 
607 static void free_ext_value(struct fuse_args *args)
608 {
609 	if (args->is_ext)
610 		kfree(args->in_args[args->ext_idx].value);
611 }
612 
613 /*
614  * Atomic create+open operation
615  *
616  * If the filesystem doesn't support this, then fall back to separate
617  * 'mknod' + 'open' requests.
618  */
619 static int fuse_create_open(struct mnt_idmap *idmap, struct inode *dir,
620 			    struct dentry *entry, struct file *file,
621 			    unsigned int flags, umode_t mode, u32 opcode)
622 {
623 	int err;
624 	struct inode *inode;
625 	struct fuse_mount *fm = get_fuse_mount(dir);
626 	FUSE_ARGS(args);
627 	struct fuse_forget_link *forget;
628 	struct fuse_create_in inarg;
629 	struct fuse_open_out *outopenp;
630 	struct fuse_entry_out outentry;
631 	struct fuse_inode *fi;
632 	struct fuse_file *ff;
633 	bool trunc = flags & O_TRUNC;
634 
635 	/* Userspace expects S_IFREG in create mode */
636 	BUG_ON((mode & S_IFMT) != S_IFREG);
637 
638 	forget = fuse_alloc_forget();
639 	err = -ENOMEM;
640 	if (!forget)
641 		goto out_err;
642 
643 	err = -ENOMEM;
644 	ff = fuse_file_alloc(fm, true);
645 	if (!ff)
646 		goto out_put_forget_req;
647 
648 	if (!fm->fc->dont_mask)
649 		mode &= ~current_umask();
650 
651 	flags &= ~O_NOCTTY;
652 	memset(&inarg, 0, sizeof(inarg));
653 	memset(&outentry, 0, sizeof(outentry));
654 	inarg.flags = flags;
655 	inarg.mode = mode;
656 	inarg.umask = current_umask();
657 
658 	if (fm->fc->handle_killpriv_v2 && trunc &&
659 	    !(flags & O_EXCL) && !capable(CAP_FSETID)) {
660 		inarg.open_flags |= FUSE_OPEN_KILL_SUIDGID;
661 	}
662 
663 	args.opcode = opcode;
664 	args.nodeid = get_node_id(dir);
665 	args.in_numargs = 2;
666 	args.in_args[0].size = sizeof(inarg);
667 	args.in_args[0].value = &inarg;
668 	args.in_args[1].size = entry->d_name.len + 1;
669 	args.in_args[1].value = entry->d_name.name;
670 	args.out_numargs = 2;
671 	args.out_args[0].size = sizeof(outentry);
672 	args.out_args[0].value = &outentry;
673 	/* Store outarg for fuse_finish_open() */
674 	outopenp = &ff->args->open_outarg;
675 	args.out_args[1].size = sizeof(*outopenp);
676 	args.out_args[1].value = outopenp;
677 
678 	err = get_create_ext(idmap, &args, dir, entry, mode);
679 	if (err)
680 		goto out_free_ff;
681 
682 	err = fuse_simple_idmap_request(idmap, fm, &args);
683 	free_ext_value(&args);
684 	if (err)
685 		goto out_free_ff;
686 
687 	err = -EIO;
688 	if (!S_ISREG(outentry.attr.mode) || invalid_nodeid(outentry.nodeid) ||
689 	    fuse_invalid_attr(&outentry.attr))
690 		goto out_free_ff;
691 
692 	ff->fh = outopenp->fh;
693 	ff->nodeid = outentry.nodeid;
694 	ff->open_flags = outopenp->open_flags;
695 	inode = fuse_iget(dir->i_sb, outentry.nodeid, outentry.generation,
696 			  &outentry.attr, ATTR_TIMEOUT(&outentry), 0, 0);
697 	if (!inode) {
698 		flags &= ~(O_CREAT | O_EXCL | O_TRUNC);
699 		fuse_sync_release(NULL, ff, flags);
700 		fuse_queue_forget(fm->fc, forget, outentry.nodeid, 1);
701 		err = -ENOMEM;
702 		goto out_err;
703 	}
704 	kfree(forget);
705 	d_instantiate(entry, inode);
706 	fuse_change_entry_timeout(entry, &outentry);
707 	fuse_dir_changed(dir);
708 	err = generic_file_open(inode, file);
709 	if (!err) {
710 		file->private_data = ff;
711 		err = finish_open(file, entry, fuse_finish_open);
712 	}
713 	if (err) {
714 		fi = get_fuse_inode(inode);
715 		fuse_sync_release(fi, ff, flags);
716 	} else {
717 		if (fm->fc->atomic_o_trunc && trunc)
718 			truncate_pagecache(inode, 0);
719 		else if (!(ff->open_flags & FOPEN_KEEP_CACHE))
720 			invalidate_inode_pages2(inode->i_mapping);
721 	}
722 	return err;
723 
724 out_free_ff:
725 	fuse_file_free(ff);
726 out_put_forget_req:
727 	kfree(forget);
728 out_err:
729 	return err;
730 }
731 
732 static int fuse_mknod(struct mnt_idmap *, struct inode *, struct dentry *,
733 		      umode_t, dev_t);
734 static int fuse_atomic_open(struct inode *dir, struct dentry *entry,
735 			    struct file *file, unsigned flags,
736 			    umode_t mode)
737 {
738 	int err;
739 	struct mnt_idmap *idmap = file_mnt_idmap(file);
740 	struct fuse_conn *fc = get_fuse_conn(dir);
741 	struct dentry *res = NULL;
742 
743 	if (fuse_is_bad(dir))
744 		return -EIO;
745 
746 	if (d_in_lookup(entry)) {
747 		res = fuse_lookup(dir, entry, 0);
748 		if (IS_ERR(res))
749 			return PTR_ERR(res);
750 
751 		if (res)
752 			entry = res;
753 	}
754 
755 	if (!(flags & O_CREAT) || d_really_is_positive(entry))
756 		goto no_open;
757 
758 	/* Only creates */
759 	file->f_mode |= FMODE_CREATED;
760 
761 	if (fc->no_create)
762 		goto mknod;
763 
764 	err = fuse_create_open(idmap, dir, entry, file, flags, mode, FUSE_CREATE);
765 	if (err == -ENOSYS) {
766 		fc->no_create = 1;
767 		goto mknod;
768 	} else if (err == -EEXIST)
769 		fuse_invalidate_entry(entry);
770 out_dput:
771 	dput(res);
772 	return err;
773 
774 mknod:
775 	err = fuse_mknod(idmap, dir, entry, mode, 0);
776 	if (err)
777 		goto out_dput;
778 no_open:
779 	return finish_no_open(file, res);
780 }
781 
782 /*
783  * Code shared between mknod, mkdir, symlink and link
784  */
785 static int create_new_entry(struct mnt_idmap *idmap, struct fuse_mount *fm,
786 			    struct fuse_args *args, struct inode *dir,
787 			    struct dentry *entry, umode_t mode)
788 {
789 	struct fuse_entry_out outarg;
790 	struct inode *inode;
791 	struct dentry *d;
792 	int err;
793 	struct fuse_forget_link *forget;
794 
795 	if (fuse_is_bad(dir))
796 		return -EIO;
797 
798 	forget = fuse_alloc_forget();
799 	if (!forget)
800 		return -ENOMEM;
801 
802 	memset(&outarg, 0, sizeof(outarg));
803 	args->nodeid = get_node_id(dir);
804 	args->out_numargs = 1;
805 	args->out_args[0].size = sizeof(outarg);
806 	args->out_args[0].value = &outarg;
807 
808 	if (args->opcode != FUSE_LINK) {
809 		err = get_create_ext(idmap, args, dir, entry, mode);
810 		if (err)
811 			goto out_put_forget_req;
812 	}
813 
814 	err = fuse_simple_idmap_request(idmap, fm, args);
815 	free_ext_value(args);
816 	if (err)
817 		goto out_put_forget_req;
818 
819 	err = -EIO;
820 	if (invalid_nodeid(outarg.nodeid) || fuse_invalid_attr(&outarg.attr))
821 		goto out_put_forget_req;
822 
823 	if ((outarg.attr.mode ^ mode) & S_IFMT)
824 		goto out_put_forget_req;
825 
826 	inode = fuse_iget(dir->i_sb, outarg.nodeid, outarg.generation,
827 			  &outarg.attr, ATTR_TIMEOUT(&outarg), 0, 0);
828 	if (!inode) {
829 		fuse_queue_forget(fm->fc, forget, outarg.nodeid, 1);
830 		return -ENOMEM;
831 	}
832 	kfree(forget);
833 
834 	d_drop(entry);
835 	d = d_splice_alias(inode, entry);
836 	if (IS_ERR(d))
837 		return PTR_ERR(d);
838 
839 	if (d) {
840 		fuse_change_entry_timeout(d, &outarg);
841 		dput(d);
842 	} else {
843 		fuse_change_entry_timeout(entry, &outarg);
844 	}
845 	fuse_dir_changed(dir);
846 	return 0;
847 
848  out_put_forget_req:
849 	if (err == -EEXIST)
850 		fuse_invalidate_entry(entry);
851 	kfree(forget);
852 	return err;
853 }
854 
855 static int fuse_mknod(struct mnt_idmap *idmap, struct inode *dir,
856 		      struct dentry *entry, umode_t mode, dev_t rdev)
857 {
858 	struct fuse_mknod_in inarg;
859 	struct fuse_mount *fm = get_fuse_mount(dir);
860 	FUSE_ARGS(args);
861 
862 	if (!fm->fc->dont_mask)
863 		mode &= ~current_umask();
864 
865 	memset(&inarg, 0, sizeof(inarg));
866 	inarg.mode = mode;
867 	inarg.rdev = new_encode_dev(rdev);
868 	inarg.umask = current_umask();
869 	args.opcode = FUSE_MKNOD;
870 	args.in_numargs = 2;
871 	args.in_args[0].size = sizeof(inarg);
872 	args.in_args[0].value = &inarg;
873 	args.in_args[1].size = entry->d_name.len + 1;
874 	args.in_args[1].value = entry->d_name.name;
875 	return create_new_entry(idmap, fm, &args, dir, entry, mode);
876 }
877 
878 static int fuse_create(struct mnt_idmap *idmap, struct inode *dir,
879 		       struct dentry *entry, umode_t mode, bool excl)
880 {
881 	return fuse_mknod(idmap, dir, entry, mode, 0);
882 }
883 
884 static int fuse_tmpfile(struct mnt_idmap *idmap, struct inode *dir,
885 			struct file *file, umode_t mode)
886 {
887 	struct fuse_conn *fc = get_fuse_conn(dir);
888 	int err;
889 
890 	if (fc->no_tmpfile)
891 		return -EOPNOTSUPP;
892 
893 	err = fuse_create_open(idmap, dir, file->f_path.dentry, file,
894 			       file->f_flags, mode, FUSE_TMPFILE);
895 	if (err == -ENOSYS) {
896 		fc->no_tmpfile = 1;
897 		err = -EOPNOTSUPP;
898 	}
899 	return err;
900 }
901 
902 static int fuse_mkdir(struct mnt_idmap *idmap, struct inode *dir,
903 		      struct dentry *entry, umode_t mode)
904 {
905 	struct fuse_mkdir_in inarg;
906 	struct fuse_mount *fm = get_fuse_mount(dir);
907 	FUSE_ARGS(args);
908 
909 	if (!fm->fc->dont_mask)
910 		mode &= ~current_umask();
911 
912 	memset(&inarg, 0, sizeof(inarg));
913 	inarg.mode = mode;
914 	inarg.umask = current_umask();
915 	args.opcode = FUSE_MKDIR;
916 	args.in_numargs = 2;
917 	args.in_args[0].size = sizeof(inarg);
918 	args.in_args[0].value = &inarg;
919 	args.in_args[1].size = entry->d_name.len + 1;
920 	args.in_args[1].value = entry->d_name.name;
921 	return create_new_entry(idmap, fm, &args, dir, entry, S_IFDIR);
922 }
923 
924 static int fuse_symlink(struct mnt_idmap *idmap, struct inode *dir,
925 			struct dentry *entry, const char *link)
926 {
927 	struct fuse_mount *fm = get_fuse_mount(dir);
928 	unsigned len = strlen(link) + 1;
929 	FUSE_ARGS(args);
930 
931 	args.opcode = FUSE_SYMLINK;
932 	args.in_numargs = 2;
933 	args.in_args[0].size = entry->d_name.len + 1;
934 	args.in_args[0].value = entry->d_name.name;
935 	args.in_args[1].size = len;
936 	args.in_args[1].value = link;
937 	return create_new_entry(idmap, fm, &args, dir, entry, S_IFLNK);
938 }
939 
940 void fuse_flush_time_update(struct inode *inode)
941 {
942 	int err = sync_inode_metadata(inode, 1);
943 
944 	mapping_set_error(inode->i_mapping, err);
945 }
946 
947 static void fuse_update_ctime_in_cache(struct inode *inode)
948 {
949 	if (!IS_NOCMTIME(inode)) {
950 		inode_set_ctime_current(inode);
951 		mark_inode_dirty_sync(inode);
952 		fuse_flush_time_update(inode);
953 	}
954 }
955 
956 void fuse_update_ctime(struct inode *inode)
957 {
958 	fuse_invalidate_attr_mask(inode, STATX_CTIME);
959 	fuse_update_ctime_in_cache(inode);
960 }
961 
962 static void fuse_entry_unlinked(struct dentry *entry)
963 {
964 	struct inode *inode = d_inode(entry);
965 	struct fuse_conn *fc = get_fuse_conn(inode);
966 	struct fuse_inode *fi = get_fuse_inode(inode);
967 
968 	spin_lock(&fi->lock);
969 	fi->attr_version = atomic64_inc_return(&fc->attr_version);
970 	/*
971 	 * If i_nlink == 0 then unlink doesn't make sense, yet this can
972 	 * happen if userspace filesystem is careless.  It would be
973 	 * difficult to enforce correct nlink usage so just ignore this
974 	 * condition here
975 	 */
976 	if (S_ISDIR(inode->i_mode))
977 		clear_nlink(inode);
978 	else if (inode->i_nlink > 0)
979 		drop_nlink(inode);
980 	spin_unlock(&fi->lock);
981 	fuse_invalidate_entry_cache(entry);
982 	fuse_update_ctime(inode);
983 }
984 
985 static int fuse_unlink(struct inode *dir, struct dentry *entry)
986 {
987 	int err;
988 	struct fuse_mount *fm = get_fuse_mount(dir);
989 	FUSE_ARGS(args);
990 
991 	if (fuse_is_bad(dir))
992 		return -EIO;
993 
994 	args.opcode = FUSE_UNLINK;
995 	args.nodeid = get_node_id(dir);
996 	args.in_numargs = 1;
997 	args.in_args[0].size = entry->d_name.len + 1;
998 	args.in_args[0].value = entry->d_name.name;
999 	err = fuse_simple_request(fm, &args);
1000 	if (!err) {
1001 		fuse_dir_changed(dir);
1002 		fuse_entry_unlinked(entry);
1003 	} else if (err == -EINTR || err == -ENOENT)
1004 		fuse_invalidate_entry(entry);
1005 	return err;
1006 }
1007 
1008 static int fuse_rmdir(struct inode *dir, struct dentry *entry)
1009 {
1010 	int err;
1011 	struct fuse_mount *fm = get_fuse_mount(dir);
1012 	FUSE_ARGS(args);
1013 
1014 	if (fuse_is_bad(dir))
1015 		return -EIO;
1016 
1017 	args.opcode = FUSE_RMDIR;
1018 	args.nodeid = get_node_id(dir);
1019 	args.in_numargs = 1;
1020 	args.in_args[0].size = entry->d_name.len + 1;
1021 	args.in_args[0].value = entry->d_name.name;
1022 	err = fuse_simple_request(fm, &args);
1023 	if (!err) {
1024 		fuse_dir_changed(dir);
1025 		fuse_entry_unlinked(entry);
1026 	} else if (err == -EINTR || err == -ENOENT)
1027 		fuse_invalidate_entry(entry);
1028 	return err;
1029 }
1030 
1031 static int fuse_rename_common(struct mnt_idmap *idmap, struct inode *olddir, struct dentry *oldent,
1032 			      struct inode *newdir, struct dentry *newent,
1033 			      unsigned int flags, int opcode, size_t argsize)
1034 {
1035 	int err;
1036 	struct fuse_rename2_in inarg;
1037 	struct fuse_mount *fm = get_fuse_mount(olddir);
1038 	FUSE_ARGS(args);
1039 
1040 	memset(&inarg, 0, argsize);
1041 	inarg.newdir = get_node_id(newdir);
1042 	inarg.flags = flags;
1043 	args.opcode = opcode;
1044 	args.nodeid = get_node_id(olddir);
1045 	args.in_numargs = 3;
1046 	args.in_args[0].size = argsize;
1047 	args.in_args[0].value = &inarg;
1048 	args.in_args[1].size = oldent->d_name.len + 1;
1049 	args.in_args[1].value = oldent->d_name.name;
1050 	args.in_args[2].size = newent->d_name.len + 1;
1051 	args.in_args[2].value = newent->d_name.name;
1052 	err = fuse_simple_idmap_request(idmap, fm, &args);
1053 	if (!err) {
1054 		/* ctime changes */
1055 		fuse_update_ctime(d_inode(oldent));
1056 
1057 		if (flags & RENAME_EXCHANGE)
1058 			fuse_update_ctime(d_inode(newent));
1059 
1060 		fuse_dir_changed(olddir);
1061 		if (olddir != newdir)
1062 			fuse_dir_changed(newdir);
1063 
1064 		/* newent will end up negative */
1065 		if (!(flags & RENAME_EXCHANGE) && d_really_is_positive(newent))
1066 			fuse_entry_unlinked(newent);
1067 	} else if (err == -EINTR || err == -ENOENT) {
1068 		/* If request was interrupted, DEITY only knows if the
1069 		   rename actually took place.  If the invalidation
1070 		   fails (e.g. some process has CWD under the renamed
1071 		   directory), then there can be inconsistency between
1072 		   the dcache and the real filesystem.  Tough luck. */
1073 		fuse_invalidate_entry(oldent);
1074 		if (d_really_is_positive(newent))
1075 			fuse_invalidate_entry(newent);
1076 	}
1077 
1078 	return err;
1079 }
1080 
1081 static int fuse_rename2(struct mnt_idmap *idmap, struct inode *olddir,
1082 			struct dentry *oldent, struct inode *newdir,
1083 			struct dentry *newent, unsigned int flags)
1084 {
1085 	struct fuse_conn *fc = get_fuse_conn(olddir);
1086 	int err;
1087 
1088 	if (fuse_is_bad(olddir))
1089 		return -EIO;
1090 
1091 	if (flags & ~(RENAME_NOREPLACE | RENAME_EXCHANGE | RENAME_WHITEOUT))
1092 		return -EINVAL;
1093 
1094 	if (flags) {
1095 		if (fc->no_rename2 || fc->minor < 23)
1096 			return -EINVAL;
1097 
1098 		err = fuse_rename_common((flags & RENAME_WHITEOUT) ? idmap : &invalid_mnt_idmap,
1099 					 olddir, oldent, newdir, newent, flags,
1100 					 FUSE_RENAME2,
1101 					 sizeof(struct fuse_rename2_in));
1102 		if (err == -ENOSYS) {
1103 			fc->no_rename2 = 1;
1104 			err = -EINVAL;
1105 		}
1106 	} else {
1107 		err = fuse_rename_common(&invalid_mnt_idmap, olddir, oldent, newdir, newent, 0,
1108 					 FUSE_RENAME,
1109 					 sizeof(struct fuse_rename_in));
1110 	}
1111 
1112 	return err;
1113 }
1114 
1115 static int fuse_link(struct dentry *entry, struct inode *newdir,
1116 		     struct dentry *newent)
1117 {
1118 	int err;
1119 	struct fuse_link_in inarg;
1120 	struct inode *inode = d_inode(entry);
1121 	struct fuse_mount *fm = get_fuse_mount(inode);
1122 	FUSE_ARGS(args);
1123 
1124 	memset(&inarg, 0, sizeof(inarg));
1125 	inarg.oldnodeid = get_node_id(inode);
1126 	args.opcode = FUSE_LINK;
1127 	args.in_numargs = 2;
1128 	args.in_args[0].size = sizeof(inarg);
1129 	args.in_args[0].value = &inarg;
1130 	args.in_args[1].size = newent->d_name.len + 1;
1131 	args.in_args[1].value = newent->d_name.name;
1132 	err = create_new_entry(&invalid_mnt_idmap, fm, &args, newdir, newent, inode->i_mode);
1133 	if (!err)
1134 		fuse_update_ctime_in_cache(inode);
1135 	else if (err == -EINTR)
1136 		fuse_invalidate_attr(inode);
1137 
1138 	return err;
1139 }
1140 
1141 static void fuse_fillattr(struct mnt_idmap *idmap, struct inode *inode,
1142 			  struct fuse_attr *attr, struct kstat *stat)
1143 {
1144 	unsigned int blkbits;
1145 	struct fuse_conn *fc = get_fuse_conn(inode);
1146 	vfsuid_t vfsuid = make_vfsuid(idmap, fc->user_ns,
1147 				      make_kuid(fc->user_ns, attr->uid));
1148 	vfsgid_t vfsgid = make_vfsgid(idmap, fc->user_ns,
1149 				      make_kgid(fc->user_ns, attr->gid));
1150 
1151 	stat->dev = inode->i_sb->s_dev;
1152 	stat->ino = attr->ino;
1153 	stat->mode = (inode->i_mode & S_IFMT) | (attr->mode & 07777);
1154 	stat->nlink = attr->nlink;
1155 	stat->uid = vfsuid_into_kuid(vfsuid);
1156 	stat->gid = vfsgid_into_kgid(vfsgid);
1157 	stat->rdev = inode->i_rdev;
1158 	stat->atime.tv_sec = attr->atime;
1159 	stat->atime.tv_nsec = attr->atimensec;
1160 	stat->mtime.tv_sec = attr->mtime;
1161 	stat->mtime.tv_nsec = attr->mtimensec;
1162 	stat->ctime.tv_sec = attr->ctime;
1163 	stat->ctime.tv_nsec = attr->ctimensec;
1164 	stat->size = attr->size;
1165 	stat->blocks = attr->blocks;
1166 
1167 	if (attr->blksize != 0)
1168 		blkbits = ilog2(attr->blksize);
1169 	else
1170 		blkbits = inode->i_sb->s_blocksize_bits;
1171 
1172 	stat->blksize = 1 << blkbits;
1173 }
1174 
1175 static void fuse_statx_to_attr(struct fuse_statx *sx, struct fuse_attr *attr)
1176 {
1177 	memset(attr, 0, sizeof(*attr));
1178 	attr->ino = sx->ino;
1179 	attr->size = sx->size;
1180 	attr->blocks = sx->blocks;
1181 	attr->atime = sx->atime.tv_sec;
1182 	attr->mtime = sx->mtime.tv_sec;
1183 	attr->ctime = sx->ctime.tv_sec;
1184 	attr->atimensec = sx->atime.tv_nsec;
1185 	attr->mtimensec = sx->mtime.tv_nsec;
1186 	attr->ctimensec = sx->ctime.tv_nsec;
1187 	attr->mode = sx->mode;
1188 	attr->nlink = sx->nlink;
1189 	attr->uid = sx->uid;
1190 	attr->gid = sx->gid;
1191 	attr->rdev = new_encode_dev(MKDEV(sx->rdev_major, sx->rdev_minor));
1192 	attr->blksize = sx->blksize;
1193 }
1194 
1195 static int fuse_do_statx(struct mnt_idmap *idmap, struct inode *inode,
1196 			 struct file *file, struct kstat *stat)
1197 {
1198 	int err;
1199 	struct fuse_attr attr;
1200 	struct fuse_statx *sx;
1201 	struct fuse_statx_in inarg;
1202 	struct fuse_statx_out outarg;
1203 	struct fuse_mount *fm = get_fuse_mount(inode);
1204 	u64 attr_version = fuse_get_attr_version(fm->fc);
1205 	FUSE_ARGS(args);
1206 
1207 	memset(&inarg, 0, sizeof(inarg));
1208 	memset(&outarg, 0, sizeof(outarg));
1209 	/* Directories have separate file-handle space */
1210 	if (file && S_ISREG(inode->i_mode)) {
1211 		struct fuse_file *ff = file->private_data;
1212 
1213 		inarg.getattr_flags |= FUSE_GETATTR_FH;
1214 		inarg.fh = ff->fh;
1215 	}
1216 	/* For now leave sync hints as the default, request all stats. */
1217 	inarg.sx_flags = 0;
1218 	inarg.sx_mask = STATX_BASIC_STATS | STATX_BTIME;
1219 	args.opcode = FUSE_STATX;
1220 	args.nodeid = get_node_id(inode);
1221 	args.in_numargs = 1;
1222 	args.in_args[0].size = sizeof(inarg);
1223 	args.in_args[0].value = &inarg;
1224 	args.out_numargs = 1;
1225 	args.out_args[0].size = sizeof(outarg);
1226 	args.out_args[0].value = &outarg;
1227 	err = fuse_simple_request(fm, &args);
1228 	if (err)
1229 		return err;
1230 
1231 	sx = &outarg.stat;
1232 	if (((sx->mask & STATX_SIZE) && !fuse_valid_size(sx->size)) ||
1233 	    ((sx->mask & STATX_TYPE) && (!fuse_valid_type(sx->mode) ||
1234 					 inode_wrong_type(inode, sx->mode)))) {
1235 		fuse_make_bad(inode);
1236 		return -EIO;
1237 	}
1238 
1239 	fuse_statx_to_attr(&outarg.stat, &attr);
1240 	if ((sx->mask & STATX_BASIC_STATS) == STATX_BASIC_STATS) {
1241 		fuse_change_attributes(inode, &attr, &outarg.stat,
1242 				       ATTR_TIMEOUT(&outarg), attr_version);
1243 	}
1244 
1245 	if (stat) {
1246 		stat->result_mask = sx->mask & (STATX_BASIC_STATS | STATX_BTIME);
1247 		stat->btime.tv_sec = sx->btime.tv_sec;
1248 		stat->btime.tv_nsec = min_t(u32, sx->btime.tv_nsec, NSEC_PER_SEC - 1);
1249 		fuse_fillattr(idmap, inode, &attr, stat);
1250 		stat->result_mask |= STATX_TYPE;
1251 	}
1252 
1253 	return 0;
1254 }
1255 
1256 static int fuse_do_getattr(struct mnt_idmap *idmap, struct inode *inode,
1257 			   struct kstat *stat, struct file *file)
1258 {
1259 	int err;
1260 	struct fuse_getattr_in inarg;
1261 	struct fuse_attr_out outarg;
1262 	struct fuse_mount *fm = get_fuse_mount(inode);
1263 	FUSE_ARGS(args);
1264 	u64 attr_version;
1265 
1266 	attr_version = fuse_get_attr_version(fm->fc);
1267 
1268 	memset(&inarg, 0, sizeof(inarg));
1269 	memset(&outarg, 0, sizeof(outarg));
1270 	/* Directories have separate file-handle space */
1271 	if (file && S_ISREG(inode->i_mode)) {
1272 		struct fuse_file *ff = file->private_data;
1273 
1274 		inarg.getattr_flags |= FUSE_GETATTR_FH;
1275 		inarg.fh = ff->fh;
1276 	}
1277 	args.opcode = FUSE_GETATTR;
1278 	args.nodeid = get_node_id(inode);
1279 	args.in_numargs = 1;
1280 	args.in_args[0].size = sizeof(inarg);
1281 	args.in_args[0].value = &inarg;
1282 	args.out_numargs = 1;
1283 	args.out_args[0].size = sizeof(outarg);
1284 	args.out_args[0].value = &outarg;
1285 	err = fuse_simple_request(fm, &args);
1286 	if (!err) {
1287 		if (fuse_invalid_attr(&outarg.attr) ||
1288 		    inode_wrong_type(inode, outarg.attr.mode)) {
1289 			fuse_make_bad(inode);
1290 			err = -EIO;
1291 		} else {
1292 			fuse_change_attributes(inode, &outarg.attr, NULL,
1293 					       ATTR_TIMEOUT(&outarg),
1294 					       attr_version);
1295 			if (stat)
1296 				fuse_fillattr(idmap, inode, &outarg.attr, stat);
1297 		}
1298 	}
1299 	return err;
1300 }
1301 
1302 static int fuse_update_get_attr(struct mnt_idmap *idmap, struct inode *inode,
1303 				struct file *file, struct kstat *stat,
1304 				u32 request_mask, unsigned int flags)
1305 {
1306 	struct fuse_inode *fi = get_fuse_inode(inode);
1307 	struct fuse_conn *fc = get_fuse_conn(inode);
1308 	int err = 0;
1309 	bool sync;
1310 	u32 inval_mask = READ_ONCE(fi->inval_mask);
1311 	u32 cache_mask = fuse_get_cache_mask(inode);
1312 
1313 
1314 	/* FUSE only supports basic stats and possibly btime */
1315 	request_mask &= STATX_BASIC_STATS | STATX_BTIME;
1316 retry:
1317 	if (fc->no_statx)
1318 		request_mask &= STATX_BASIC_STATS;
1319 
1320 	if (!request_mask)
1321 		sync = false;
1322 	else if (flags & AT_STATX_FORCE_SYNC)
1323 		sync = true;
1324 	else if (flags & AT_STATX_DONT_SYNC)
1325 		sync = false;
1326 	else if (request_mask & inval_mask & ~cache_mask)
1327 		sync = true;
1328 	else
1329 		sync = time_before64(fi->i_time, get_jiffies_64());
1330 
1331 	if (sync) {
1332 		forget_all_cached_acls(inode);
1333 		/* Try statx if BTIME is requested */
1334 		if (!fc->no_statx && (request_mask & ~STATX_BASIC_STATS)) {
1335 			err = fuse_do_statx(idmap, inode, file, stat);
1336 			if (err == -ENOSYS) {
1337 				fc->no_statx = 1;
1338 				err = 0;
1339 				goto retry;
1340 			}
1341 		} else {
1342 			err = fuse_do_getattr(idmap, inode, stat, file);
1343 		}
1344 	} else if (stat) {
1345 		generic_fillattr(idmap, request_mask, inode, stat);
1346 		stat->mode = fi->orig_i_mode;
1347 		stat->ino = fi->orig_ino;
1348 		if (test_bit(FUSE_I_BTIME, &fi->state)) {
1349 			stat->btime = fi->i_btime;
1350 			stat->result_mask |= STATX_BTIME;
1351 		}
1352 	}
1353 
1354 	return err;
1355 }
1356 
1357 int fuse_update_attributes(struct inode *inode, struct file *file, u32 mask)
1358 {
1359 	return fuse_update_get_attr(&nop_mnt_idmap, inode, file, NULL, mask, 0);
1360 }
1361 
1362 int fuse_reverse_inval_entry(struct fuse_conn *fc, u64 parent_nodeid,
1363 			     u64 child_nodeid, struct qstr *name, u32 flags)
1364 {
1365 	int err = -ENOTDIR;
1366 	struct inode *parent;
1367 	struct dentry *dir;
1368 	struct dentry *entry;
1369 
1370 	parent = fuse_ilookup(fc, parent_nodeid, NULL);
1371 	if (!parent)
1372 		return -ENOENT;
1373 
1374 	inode_lock_nested(parent, I_MUTEX_PARENT);
1375 	if (!S_ISDIR(parent->i_mode))
1376 		goto unlock;
1377 
1378 	err = -ENOENT;
1379 	dir = d_find_alias(parent);
1380 	if (!dir)
1381 		goto unlock;
1382 
1383 	name->hash = full_name_hash(dir, name->name, name->len);
1384 	entry = d_lookup(dir, name);
1385 	dput(dir);
1386 	if (!entry)
1387 		goto unlock;
1388 
1389 	fuse_dir_changed(parent);
1390 	if (!(flags & FUSE_EXPIRE_ONLY))
1391 		d_invalidate(entry);
1392 	fuse_invalidate_entry_cache(entry);
1393 
1394 	if (child_nodeid != 0 && d_really_is_positive(entry)) {
1395 		inode_lock(d_inode(entry));
1396 		if (get_node_id(d_inode(entry)) != child_nodeid) {
1397 			err = -ENOENT;
1398 			goto badentry;
1399 		}
1400 		if (d_mountpoint(entry)) {
1401 			err = -EBUSY;
1402 			goto badentry;
1403 		}
1404 		if (d_is_dir(entry)) {
1405 			shrink_dcache_parent(entry);
1406 			if (!simple_empty(entry)) {
1407 				err = -ENOTEMPTY;
1408 				goto badentry;
1409 			}
1410 			d_inode(entry)->i_flags |= S_DEAD;
1411 		}
1412 		dont_mount(entry);
1413 		clear_nlink(d_inode(entry));
1414 		err = 0;
1415  badentry:
1416 		inode_unlock(d_inode(entry));
1417 		if (!err)
1418 			d_delete(entry);
1419 	} else {
1420 		err = 0;
1421 	}
1422 	dput(entry);
1423 
1424  unlock:
1425 	inode_unlock(parent);
1426 	iput(parent);
1427 	return err;
1428 }
1429 
1430 static inline bool fuse_permissible_uidgid(struct fuse_conn *fc)
1431 {
1432 	const struct cred *cred = current_cred();
1433 
1434 	return (uid_eq(cred->euid, fc->user_id) &&
1435 		uid_eq(cred->suid, fc->user_id) &&
1436 		uid_eq(cred->uid,  fc->user_id) &&
1437 		gid_eq(cred->egid, fc->group_id) &&
1438 		gid_eq(cred->sgid, fc->group_id) &&
1439 		gid_eq(cred->gid,  fc->group_id));
1440 }
1441 
1442 /*
1443  * Calling into a user-controlled filesystem gives the filesystem
1444  * daemon ptrace-like capabilities over the current process.  This
1445  * means, that the filesystem daemon is able to record the exact
1446  * filesystem operations performed, and can also control the behavior
1447  * of the requester process in otherwise impossible ways.  For example
1448  * it can delay the operation for arbitrary length of time allowing
1449  * DoS against the requester.
1450  *
1451  * For this reason only those processes can call into the filesystem,
1452  * for which the owner of the mount has ptrace privilege.  This
1453  * excludes processes started by other users, suid or sgid processes.
1454  */
1455 bool fuse_allow_current_process(struct fuse_conn *fc)
1456 {
1457 	bool allow;
1458 
1459 	if (fc->allow_other)
1460 		allow = current_in_userns(fc->user_ns);
1461 	else
1462 		allow = fuse_permissible_uidgid(fc);
1463 
1464 	if (!allow && allow_sys_admin_access && capable(CAP_SYS_ADMIN))
1465 		allow = true;
1466 
1467 	return allow;
1468 }
1469 
1470 static int fuse_access(struct inode *inode, int mask)
1471 {
1472 	struct fuse_mount *fm = get_fuse_mount(inode);
1473 	FUSE_ARGS(args);
1474 	struct fuse_access_in inarg;
1475 	int err;
1476 
1477 	BUG_ON(mask & MAY_NOT_BLOCK);
1478 
1479 	/*
1480 	 * We should not send FUSE_ACCESS to the userspace
1481 	 * when idmapped mounts are enabled as for this case
1482 	 * we have fc->default_permissions = 1 and access
1483 	 * permission checks are done on the kernel side.
1484 	 */
1485 	WARN_ON_ONCE(!(fm->sb->s_iflags & SB_I_NOIDMAP));
1486 
1487 	if (fm->fc->no_access)
1488 		return 0;
1489 
1490 	memset(&inarg, 0, sizeof(inarg));
1491 	inarg.mask = mask & (MAY_READ | MAY_WRITE | MAY_EXEC);
1492 	args.opcode = FUSE_ACCESS;
1493 	args.nodeid = get_node_id(inode);
1494 	args.in_numargs = 1;
1495 	args.in_args[0].size = sizeof(inarg);
1496 	args.in_args[0].value = &inarg;
1497 	err = fuse_simple_request(fm, &args);
1498 	if (err == -ENOSYS) {
1499 		fm->fc->no_access = 1;
1500 		err = 0;
1501 	}
1502 	return err;
1503 }
1504 
1505 static int fuse_perm_getattr(struct inode *inode, int mask)
1506 {
1507 	if (mask & MAY_NOT_BLOCK)
1508 		return -ECHILD;
1509 
1510 	forget_all_cached_acls(inode);
1511 	return fuse_do_getattr(&nop_mnt_idmap, inode, NULL, NULL);
1512 }
1513 
1514 /*
1515  * Check permission.  The two basic access models of FUSE are:
1516  *
1517  * 1) Local access checking ('default_permissions' mount option) based
1518  * on file mode.  This is the plain old disk filesystem permission
1519  * model.
1520  *
1521  * 2) "Remote" access checking, where server is responsible for
1522  * checking permission in each inode operation.  An exception to this
1523  * is if ->permission() was invoked from sys_access() in which case an
1524  * access request is sent.  Execute permission is still checked
1525  * locally based on file mode.
1526  */
1527 static int fuse_permission(struct mnt_idmap *idmap,
1528 			   struct inode *inode, int mask)
1529 {
1530 	struct fuse_conn *fc = get_fuse_conn(inode);
1531 	bool refreshed = false;
1532 	int err = 0;
1533 
1534 	if (fuse_is_bad(inode))
1535 		return -EIO;
1536 
1537 	if (!fuse_allow_current_process(fc))
1538 		return -EACCES;
1539 
1540 	/*
1541 	 * If attributes are needed, refresh them before proceeding
1542 	 */
1543 	if (fc->default_permissions ||
1544 	    ((mask & MAY_EXEC) && S_ISREG(inode->i_mode))) {
1545 		struct fuse_inode *fi = get_fuse_inode(inode);
1546 		u32 perm_mask = STATX_MODE | STATX_UID | STATX_GID;
1547 
1548 		if (perm_mask & READ_ONCE(fi->inval_mask) ||
1549 		    time_before64(fi->i_time, get_jiffies_64())) {
1550 			refreshed = true;
1551 
1552 			err = fuse_perm_getattr(inode, mask);
1553 			if (err)
1554 				return err;
1555 		}
1556 	}
1557 
1558 	if (fc->default_permissions) {
1559 		err = generic_permission(idmap, inode, mask);
1560 
1561 		/* If permission is denied, try to refresh file
1562 		   attributes.  This is also needed, because the root
1563 		   node will at first have no permissions */
1564 		if (err == -EACCES && !refreshed) {
1565 			err = fuse_perm_getattr(inode, mask);
1566 			if (!err)
1567 				err = generic_permission(idmap,
1568 							 inode, mask);
1569 		}
1570 
1571 		/* Note: the opposite of the above test does not
1572 		   exist.  So if permissions are revoked this won't be
1573 		   noticed immediately, only after the attribute
1574 		   timeout has expired */
1575 	} else if (mask & (MAY_ACCESS | MAY_CHDIR)) {
1576 		err = fuse_access(inode, mask);
1577 	} else if ((mask & MAY_EXEC) && S_ISREG(inode->i_mode)) {
1578 		if (!(inode->i_mode & S_IXUGO)) {
1579 			if (refreshed)
1580 				return -EACCES;
1581 
1582 			err = fuse_perm_getattr(inode, mask);
1583 			if (!err && !(inode->i_mode & S_IXUGO))
1584 				return -EACCES;
1585 		}
1586 	}
1587 	return err;
1588 }
1589 
1590 static int fuse_readlink_page(struct inode *inode, struct folio *folio)
1591 {
1592 	struct fuse_mount *fm = get_fuse_mount(inode);
1593 	struct fuse_folio_desc desc = { .length = PAGE_SIZE - 1 };
1594 	struct fuse_args_pages ap = {
1595 		.num_folios = 1,
1596 		.folios = &folio,
1597 		.descs = &desc,
1598 	};
1599 	char *link;
1600 	ssize_t res;
1601 
1602 	ap.args.opcode = FUSE_READLINK;
1603 	ap.args.nodeid = get_node_id(inode);
1604 	ap.args.out_pages = true;
1605 	ap.args.out_argvar = true;
1606 	ap.args.page_zeroing = true;
1607 	ap.args.out_numargs = 1;
1608 	ap.args.out_args[0].size = desc.length;
1609 	res = fuse_simple_request(fm, &ap.args);
1610 
1611 	fuse_invalidate_atime(inode);
1612 
1613 	if (res < 0)
1614 		return res;
1615 
1616 	if (WARN_ON(res >= PAGE_SIZE))
1617 		return -EIO;
1618 
1619 	link = folio_address(folio);
1620 	link[res] = '\0';
1621 
1622 	return 0;
1623 }
1624 
1625 static const char *fuse_get_link(struct dentry *dentry, struct inode *inode,
1626 				 struct delayed_call *callback)
1627 {
1628 	struct fuse_conn *fc = get_fuse_conn(inode);
1629 	struct folio *folio;
1630 	int err;
1631 
1632 	err = -EIO;
1633 	if (fuse_is_bad(inode))
1634 		goto out_err;
1635 
1636 	if (fc->cache_symlinks)
1637 		return page_get_link(dentry, inode, callback);
1638 
1639 	err = -ECHILD;
1640 	if (!dentry)
1641 		goto out_err;
1642 
1643 	folio = folio_alloc(GFP_KERNEL, 0);
1644 	err = -ENOMEM;
1645 	if (!folio)
1646 		goto out_err;
1647 
1648 	err = fuse_readlink_page(inode, folio);
1649 	if (err) {
1650 		folio_put(folio);
1651 		goto out_err;
1652 	}
1653 
1654 	set_delayed_call(callback, page_put_link, &folio->page);
1655 
1656 	return folio_address(folio);
1657 
1658 out_err:
1659 	return ERR_PTR(err);
1660 }
1661 
1662 static int fuse_dir_open(struct inode *inode, struct file *file)
1663 {
1664 	struct fuse_mount *fm = get_fuse_mount(inode);
1665 	int err;
1666 
1667 	if (fuse_is_bad(inode))
1668 		return -EIO;
1669 
1670 	err = generic_file_open(inode, file);
1671 	if (err)
1672 		return err;
1673 
1674 	err = fuse_do_open(fm, get_node_id(inode), file, true);
1675 	if (!err) {
1676 		struct fuse_file *ff = file->private_data;
1677 
1678 		/*
1679 		 * Keep handling FOPEN_STREAM and FOPEN_NONSEEKABLE for
1680 		 * directories for backward compatibility, though it's unlikely
1681 		 * to be useful.
1682 		 */
1683 		if (ff->open_flags & (FOPEN_STREAM | FOPEN_NONSEEKABLE))
1684 			nonseekable_open(inode, file);
1685 		if (!(ff->open_flags & FOPEN_KEEP_CACHE))
1686 			invalidate_inode_pages2(inode->i_mapping);
1687 	}
1688 
1689 	return err;
1690 }
1691 
1692 static int fuse_dir_release(struct inode *inode, struct file *file)
1693 {
1694 	fuse_release_common(file, true);
1695 
1696 	return 0;
1697 }
1698 
1699 static int fuse_dir_fsync(struct file *file, loff_t start, loff_t end,
1700 			  int datasync)
1701 {
1702 	struct inode *inode = file->f_mapping->host;
1703 	struct fuse_conn *fc = get_fuse_conn(inode);
1704 	int err;
1705 
1706 	if (fuse_is_bad(inode))
1707 		return -EIO;
1708 
1709 	if (fc->no_fsyncdir)
1710 		return 0;
1711 
1712 	inode_lock(inode);
1713 	err = fuse_fsync_common(file, start, end, datasync, FUSE_FSYNCDIR);
1714 	if (err == -ENOSYS) {
1715 		fc->no_fsyncdir = 1;
1716 		err = 0;
1717 	}
1718 	inode_unlock(inode);
1719 
1720 	return err;
1721 }
1722 
1723 static long fuse_dir_ioctl(struct file *file, unsigned int cmd,
1724 			    unsigned long arg)
1725 {
1726 	struct fuse_conn *fc = get_fuse_conn(file->f_mapping->host);
1727 
1728 	/* FUSE_IOCTL_DIR only supported for API version >= 7.18 */
1729 	if (fc->minor < 18)
1730 		return -ENOTTY;
1731 
1732 	return fuse_ioctl_common(file, cmd, arg, FUSE_IOCTL_DIR);
1733 }
1734 
1735 static long fuse_dir_compat_ioctl(struct file *file, unsigned int cmd,
1736 				   unsigned long arg)
1737 {
1738 	struct fuse_conn *fc = get_fuse_conn(file->f_mapping->host);
1739 
1740 	if (fc->minor < 18)
1741 		return -ENOTTY;
1742 
1743 	return fuse_ioctl_common(file, cmd, arg,
1744 				 FUSE_IOCTL_COMPAT | FUSE_IOCTL_DIR);
1745 }
1746 
1747 static bool update_mtime(unsigned ivalid, bool trust_local_mtime)
1748 {
1749 	/* Always update if mtime is explicitly set  */
1750 	if (ivalid & ATTR_MTIME_SET)
1751 		return true;
1752 
1753 	/* Or if kernel i_mtime is the official one */
1754 	if (trust_local_mtime)
1755 		return true;
1756 
1757 	/* If it's an open(O_TRUNC) or an ftruncate(), don't update */
1758 	if ((ivalid & ATTR_SIZE) && (ivalid & (ATTR_OPEN | ATTR_FILE)))
1759 		return false;
1760 
1761 	/* In all other cases update */
1762 	return true;
1763 }
1764 
1765 static void iattr_to_fattr(struct mnt_idmap *idmap, struct fuse_conn *fc,
1766 			   struct iattr *iattr, struct fuse_setattr_in *arg,
1767 			   bool trust_local_cmtime)
1768 {
1769 	unsigned ivalid = iattr->ia_valid;
1770 
1771 	if (ivalid & ATTR_MODE)
1772 		arg->valid |= FATTR_MODE,   arg->mode = iattr->ia_mode;
1773 
1774 	if (ivalid & ATTR_UID) {
1775 		kuid_t fsuid = from_vfsuid(idmap, fc->user_ns, iattr->ia_vfsuid);
1776 
1777 		arg->valid |= FATTR_UID;
1778 		arg->uid = from_kuid(fc->user_ns, fsuid);
1779 	}
1780 
1781 	if (ivalid & ATTR_GID) {
1782 		kgid_t fsgid = from_vfsgid(idmap, fc->user_ns, iattr->ia_vfsgid);
1783 
1784 		arg->valid |= FATTR_GID;
1785 		arg->gid = from_kgid(fc->user_ns, fsgid);
1786 	}
1787 
1788 	if (ivalid & ATTR_SIZE)
1789 		arg->valid |= FATTR_SIZE,   arg->size = iattr->ia_size;
1790 	if (ivalid & ATTR_ATIME) {
1791 		arg->valid |= FATTR_ATIME;
1792 		arg->atime = iattr->ia_atime.tv_sec;
1793 		arg->atimensec = iattr->ia_atime.tv_nsec;
1794 		if (!(ivalid & ATTR_ATIME_SET))
1795 			arg->valid |= FATTR_ATIME_NOW;
1796 	}
1797 	if ((ivalid & ATTR_MTIME) && update_mtime(ivalid, trust_local_cmtime)) {
1798 		arg->valid |= FATTR_MTIME;
1799 		arg->mtime = iattr->ia_mtime.tv_sec;
1800 		arg->mtimensec = iattr->ia_mtime.tv_nsec;
1801 		if (!(ivalid & ATTR_MTIME_SET) && !trust_local_cmtime)
1802 			arg->valid |= FATTR_MTIME_NOW;
1803 	}
1804 	if ((ivalid & ATTR_CTIME) && trust_local_cmtime) {
1805 		arg->valid |= FATTR_CTIME;
1806 		arg->ctime = iattr->ia_ctime.tv_sec;
1807 		arg->ctimensec = iattr->ia_ctime.tv_nsec;
1808 	}
1809 }
1810 
1811 /*
1812  * Prevent concurrent writepages on inode
1813  *
1814  * This is done by adding a negative bias to the inode write counter
1815  * and waiting for all pending writes to finish.
1816  */
1817 void fuse_set_nowrite(struct inode *inode)
1818 {
1819 	struct fuse_inode *fi = get_fuse_inode(inode);
1820 
1821 	BUG_ON(!inode_is_locked(inode));
1822 
1823 	spin_lock(&fi->lock);
1824 	BUG_ON(fi->writectr < 0);
1825 	fi->writectr += FUSE_NOWRITE;
1826 	spin_unlock(&fi->lock);
1827 	wait_event(fi->page_waitq, fi->writectr == FUSE_NOWRITE);
1828 }
1829 
1830 /*
1831  * Allow writepages on inode
1832  *
1833  * Remove the bias from the writecounter and send any queued
1834  * writepages.
1835  */
1836 static void __fuse_release_nowrite(struct inode *inode)
1837 {
1838 	struct fuse_inode *fi = get_fuse_inode(inode);
1839 
1840 	BUG_ON(fi->writectr != FUSE_NOWRITE);
1841 	fi->writectr = 0;
1842 	fuse_flush_writepages(inode);
1843 }
1844 
1845 void fuse_release_nowrite(struct inode *inode)
1846 {
1847 	struct fuse_inode *fi = get_fuse_inode(inode);
1848 
1849 	spin_lock(&fi->lock);
1850 	__fuse_release_nowrite(inode);
1851 	spin_unlock(&fi->lock);
1852 }
1853 
1854 static void fuse_setattr_fill(struct fuse_conn *fc, struct fuse_args *args,
1855 			      struct inode *inode,
1856 			      struct fuse_setattr_in *inarg_p,
1857 			      struct fuse_attr_out *outarg_p)
1858 {
1859 	args->opcode = FUSE_SETATTR;
1860 	args->nodeid = get_node_id(inode);
1861 	args->in_numargs = 1;
1862 	args->in_args[0].size = sizeof(*inarg_p);
1863 	args->in_args[0].value = inarg_p;
1864 	args->out_numargs = 1;
1865 	args->out_args[0].size = sizeof(*outarg_p);
1866 	args->out_args[0].value = outarg_p;
1867 }
1868 
1869 /*
1870  * Flush inode->i_mtime to the server
1871  */
1872 int fuse_flush_times(struct inode *inode, struct fuse_file *ff)
1873 {
1874 	struct fuse_mount *fm = get_fuse_mount(inode);
1875 	FUSE_ARGS(args);
1876 	struct fuse_setattr_in inarg;
1877 	struct fuse_attr_out outarg;
1878 
1879 	memset(&inarg, 0, sizeof(inarg));
1880 	memset(&outarg, 0, sizeof(outarg));
1881 
1882 	inarg.valid = FATTR_MTIME;
1883 	inarg.mtime = inode_get_mtime_sec(inode);
1884 	inarg.mtimensec = inode_get_mtime_nsec(inode);
1885 	if (fm->fc->minor >= 23) {
1886 		inarg.valid |= FATTR_CTIME;
1887 		inarg.ctime = inode_get_ctime_sec(inode);
1888 		inarg.ctimensec = inode_get_ctime_nsec(inode);
1889 	}
1890 	if (ff) {
1891 		inarg.valid |= FATTR_FH;
1892 		inarg.fh = ff->fh;
1893 	}
1894 	fuse_setattr_fill(fm->fc, &args, inode, &inarg, &outarg);
1895 
1896 	return fuse_simple_request(fm, &args);
1897 }
1898 
1899 /*
1900  * Set attributes, and at the same time refresh them.
1901  *
1902  * Truncation is slightly complicated, because the 'truncate' request
1903  * may fail, in which case we don't want to touch the mapping.
1904  * vmtruncate() doesn't allow for this case, so do the rlimit checking
1905  * and the actual truncation by hand.
1906  */
1907 int fuse_do_setattr(struct mnt_idmap *idmap, struct dentry *dentry,
1908 		    struct iattr *attr, struct file *file)
1909 {
1910 	struct inode *inode = d_inode(dentry);
1911 	struct fuse_mount *fm = get_fuse_mount(inode);
1912 	struct fuse_conn *fc = fm->fc;
1913 	struct fuse_inode *fi = get_fuse_inode(inode);
1914 	struct address_space *mapping = inode->i_mapping;
1915 	FUSE_ARGS(args);
1916 	struct fuse_setattr_in inarg;
1917 	struct fuse_attr_out outarg;
1918 	bool is_truncate = false;
1919 	bool is_wb = fc->writeback_cache && S_ISREG(inode->i_mode);
1920 	loff_t oldsize;
1921 	int err;
1922 	bool trust_local_cmtime = is_wb;
1923 	bool fault_blocked = false;
1924 
1925 	if (!fc->default_permissions)
1926 		attr->ia_valid |= ATTR_FORCE;
1927 
1928 	err = setattr_prepare(idmap, dentry, attr);
1929 	if (err)
1930 		return err;
1931 
1932 	if (attr->ia_valid & ATTR_SIZE) {
1933 		if (WARN_ON(!S_ISREG(inode->i_mode)))
1934 			return -EIO;
1935 		is_truncate = true;
1936 	}
1937 
1938 	if (FUSE_IS_DAX(inode) && is_truncate) {
1939 		filemap_invalidate_lock(mapping);
1940 		fault_blocked = true;
1941 		err = fuse_dax_break_layouts(inode, 0, 0);
1942 		if (err) {
1943 			filemap_invalidate_unlock(mapping);
1944 			return err;
1945 		}
1946 	}
1947 
1948 	if (attr->ia_valid & ATTR_OPEN) {
1949 		/* This is coming from open(..., ... | O_TRUNC); */
1950 		WARN_ON(!(attr->ia_valid & ATTR_SIZE));
1951 		WARN_ON(attr->ia_size != 0);
1952 		if (fc->atomic_o_trunc) {
1953 			/*
1954 			 * No need to send request to userspace, since actual
1955 			 * truncation has already been done by OPEN.  But still
1956 			 * need to truncate page cache.
1957 			 */
1958 			i_size_write(inode, 0);
1959 			truncate_pagecache(inode, 0);
1960 			goto out;
1961 		}
1962 		file = NULL;
1963 	}
1964 
1965 	/* Flush dirty data/metadata before non-truncate SETATTR */
1966 	if (is_wb &&
1967 	    attr->ia_valid &
1968 			(ATTR_MODE | ATTR_UID | ATTR_GID | ATTR_MTIME_SET |
1969 			 ATTR_TIMES_SET)) {
1970 		err = write_inode_now(inode, true);
1971 		if (err)
1972 			return err;
1973 
1974 		fuse_set_nowrite(inode);
1975 		fuse_release_nowrite(inode);
1976 	}
1977 
1978 	if (is_truncate) {
1979 		fuse_set_nowrite(inode);
1980 		set_bit(FUSE_I_SIZE_UNSTABLE, &fi->state);
1981 		if (trust_local_cmtime && attr->ia_size != inode->i_size)
1982 			attr->ia_valid |= ATTR_MTIME | ATTR_CTIME;
1983 	}
1984 
1985 	memset(&inarg, 0, sizeof(inarg));
1986 	memset(&outarg, 0, sizeof(outarg));
1987 	iattr_to_fattr(idmap, fc, attr, &inarg, trust_local_cmtime);
1988 	if (file) {
1989 		struct fuse_file *ff = file->private_data;
1990 		inarg.valid |= FATTR_FH;
1991 		inarg.fh = ff->fh;
1992 	}
1993 
1994 	/* Kill suid/sgid for non-directory chown unconditionally */
1995 	if (fc->handle_killpriv_v2 && !S_ISDIR(inode->i_mode) &&
1996 	    attr->ia_valid & (ATTR_UID | ATTR_GID))
1997 		inarg.valid |= FATTR_KILL_SUIDGID;
1998 
1999 	if (attr->ia_valid & ATTR_SIZE) {
2000 		/* For mandatory locking in truncate */
2001 		inarg.valid |= FATTR_LOCKOWNER;
2002 		inarg.lock_owner = fuse_lock_owner_id(fc, current->files);
2003 
2004 		/* Kill suid/sgid for truncate only if no CAP_FSETID */
2005 		if (fc->handle_killpriv_v2 && !capable(CAP_FSETID))
2006 			inarg.valid |= FATTR_KILL_SUIDGID;
2007 	}
2008 	fuse_setattr_fill(fc, &args, inode, &inarg, &outarg);
2009 	err = fuse_simple_request(fm, &args);
2010 	if (err) {
2011 		if (err == -EINTR)
2012 			fuse_invalidate_attr(inode);
2013 		goto error;
2014 	}
2015 
2016 	if (fuse_invalid_attr(&outarg.attr) ||
2017 	    inode_wrong_type(inode, outarg.attr.mode)) {
2018 		fuse_make_bad(inode);
2019 		err = -EIO;
2020 		goto error;
2021 	}
2022 
2023 	spin_lock(&fi->lock);
2024 	/* the kernel maintains i_mtime locally */
2025 	if (trust_local_cmtime) {
2026 		if (attr->ia_valid & ATTR_MTIME)
2027 			inode_set_mtime_to_ts(inode, attr->ia_mtime);
2028 		if (attr->ia_valid & ATTR_CTIME)
2029 			inode_set_ctime_to_ts(inode, attr->ia_ctime);
2030 		/* FIXME: clear I_DIRTY_SYNC? */
2031 	}
2032 
2033 	fuse_change_attributes_common(inode, &outarg.attr, NULL,
2034 				      ATTR_TIMEOUT(&outarg),
2035 				      fuse_get_cache_mask(inode), 0);
2036 	oldsize = inode->i_size;
2037 	/* see the comment in fuse_change_attributes() */
2038 	if (!is_wb || is_truncate)
2039 		i_size_write(inode, outarg.attr.size);
2040 
2041 	if (is_truncate) {
2042 		/* NOTE: this may release/reacquire fi->lock */
2043 		__fuse_release_nowrite(inode);
2044 	}
2045 	spin_unlock(&fi->lock);
2046 
2047 	/*
2048 	 * Only call invalidate_inode_pages2() after removing
2049 	 * FUSE_NOWRITE, otherwise fuse_launder_folio() would deadlock.
2050 	 */
2051 	if ((is_truncate || !is_wb) &&
2052 	    S_ISREG(inode->i_mode) && oldsize != outarg.attr.size) {
2053 		truncate_pagecache(inode, outarg.attr.size);
2054 		invalidate_inode_pages2(mapping);
2055 	}
2056 
2057 	clear_bit(FUSE_I_SIZE_UNSTABLE, &fi->state);
2058 out:
2059 	if (fault_blocked)
2060 		filemap_invalidate_unlock(mapping);
2061 
2062 	return 0;
2063 
2064 error:
2065 	if (is_truncate)
2066 		fuse_release_nowrite(inode);
2067 
2068 	clear_bit(FUSE_I_SIZE_UNSTABLE, &fi->state);
2069 
2070 	if (fault_blocked)
2071 		filemap_invalidate_unlock(mapping);
2072 	return err;
2073 }
2074 
2075 static int fuse_setattr(struct mnt_idmap *idmap, struct dentry *entry,
2076 			struct iattr *attr)
2077 {
2078 	struct inode *inode = d_inode(entry);
2079 	struct fuse_conn *fc = get_fuse_conn(inode);
2080 	struct file *file = (attr->ia_valid & ATTR_FILE) ? attr->ia_file : NULL;
2081 	int ret;
2082 
2083 	if (fuse_is_bad(inode))
2084 		return -EIO;
2085 
2086 	if (!fuse_allow_current_process(get_fuse_conn(inode)))
2087 		return -EACCES;
2088 
2089 	if (attr->ia_valid & (ATTR_KILL_SUID | ATTR_KILL_SGID)) {
2090 		attr->ia_valid &= ~(ATTR_KILL_SUID | ATTR_KILL_SGID |
2091 				    ATTR_MODE);
2092 
2093 		/*
2094 		 * The only sane way to reliably kill suid/sgid is to do it in
2095 		 * the userspace filesystem
2096 		 *
2097 		 * This should be done on write(), truncate() and chown().
2098 		 */
2099 		if (!fc->handle_killpriv && !fc->handle_killpriv_v2) {
2100 			/*
2101 			 * ia_mode calculation may have used stale i_mode.
2102 			 * Refresh and recalculate.
2103 			 */
2104 			ret = fuse_do_getattr(idmap, inode, NULL, file);
2105 			if (ret)
2106 				return ret;
2107 
2108 			attr->ia_mode = inode->i_mode;
2109 			if (inode->i_mode & S_ISUID) {
2110 				attr->ia_valid |= ATTR_MODE;
2111 				attr->ia_mode &= ~S_ISUID;
2112 			}
2113 			if ((inode->i_mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP)) {
2114 				attr->ia_valid |= ATTR_MODE;
2115 				attr->ia_mode &= ~S_ISGID;
2116 			}
2117 		}
2118 	}
2119 	if (!attr->ia_valid)
2120 		return 0;
2121 
2122 	ret = fuse_do_setattr(idmap, entry, attr, file);
2123 	if (!ret) {
2124 		/*
2125 		 * If filesystem supports acls it may have updated acl xattrs in
2126 		 * the filesystem, so forget cached acls for the inode.
2127 		 */
2128 		if (fc->posix_acl)
2129 			forget_all_cached_acls(inode);
2130 
2131 		/* Directory mode changed, may need to revalidate access */
2132 		if (d_is_dir(entry) && (attr->ia_valid & ATTR_MODE))
2133 			fuse_invalidate_entry_cache(entry);
2134 	}
2135 	return ret;
2136 }
2137 
2138 static int fuse_getattr(struct mnt_idmap *idmap,
2139 			const struct path *path, struct kstat *stat,
2140 			u32 request_mask, unsigned int flags)
2141 {
2142 	struct inode *inode = d_inode(path->dentry);
2143 	struct fuse_conn *fc = get_fuse_conn(inode);
2144 
2145 	if (fuse_is_bad(inode))
2146 		return -EIO;
2147 
2148 	if (!fuse_allow_current_process(fc)) {
2149 		if (!request_mask) {
2150 			/*
2151 			 * If user explicitly requested *nothing* then don't
2152 			 * error out, but return st_dev only.
2153 			 */
2154 			stat->result_mask = 0;
2155 			stat->dev = inode->i_sb->s_dev;
2156 			return 0;
2157 		}
2158 		return -EACCES;
2159 	}
2160 
2161 	return fuse_update_get_attr(idmap, inode, NULL, stat, request_mask, flags);
2162 }
2163 
2164 static const struct inode_operations fuse_dir_inode_operations = {
2165 	.lookup		= fuse_lookup,
2166 	.mkdir		= fuse_mkdir,
2167 	.symlink	= fuse_symlink,
2168 	.unlink		= fuse_unlink,
2169 	.rmdir		= fuse_rmdir,
2170 	.rename		= fuse_rename2,
2171 	.link		= fuse_link,
2172 	.setattr	= fuse_setattr,
2173 	.create		= fuse_create,
2174 	.atomic_open	= fuse_atomic_open,
2175 	.tmpfile	= fuse_tmpfile,
2176 	.mknod		= fuse_mknod,
2177 	.permission	= fuse_permission,
2178 	.getattr	= fuse_getattr,
2179 	.listxattr	= fuse_listxattr,
2180 	.get_inode_acl	= fuse_get_inode_acl,
2181 	.get_acl	= fuse_get_acl,
2182 	.set_acl	= fuse_set_acl,
2183 	.fileattr_get	= fuse_fileattr_get,
2184 	.fileattr_set	= fuse_fileattr_set,
2185 };
2186 
2187 static const struct file_operations fuse_dir_operations = {
2188 	.llseek		= generic_file_llseek,
2189 	.read		= generic_read_dir,
2190 	.iterate_shared	= fuse_readdir,
2191 	.open		= fuse_dir_open,
2192 	.release	= fuse_dir_release,
2193 	.fsync		= fuse_dir_fsync,
2194 	.unlocked_ioctl	= fuse_dir_ioctl,
2195 	.compat_ioctl	= fuse_dir_compat_ioctl,
2196 };
2197 
2198 static const struct inode_operations fuse_common_inode_operations = {
2199 	.setattr	= fuse_setattr,
2200 	.permission	= fuse_permission,
2201 	.getattr	= fuse_getattr,
2202 	.listxattr	= fuse_listxattr,
2203 	.get_inode_acl	= fuse_get_inode_acl,
2204 	.get_acl	= fuse_get_acl,
2205 	.set_acl	= fuse_set_acl,
2206 	.fileattr_get	= fuse_fileattr_get,
2207 	.fileattr_set	= fuse_fileattr_set,
2208 };
2209 
2210 static const struct inode_operations fuse_symlink_inode_operations = {
2211 	.setattr	= fuse_setattr,
2212 	.get_link	= fuse_get_link,
2213 	.getattr	= fuse_getattr,
2214 	.listxattr	= fuse_listxattr,
2215 };
2216 
2217 void fuse_init_common(struct inode *inode)
2218 {
2219 	inode->i_op = &fuse_common_inode_operations;
2220 }
2221 
2222 void fuse_init_dir(struct inode *inode)
2223 {
2224 	struct fuse_inode *fi = get_fuse_inode(inode);
2225 
2226 	inode->i_op = &fuse_dir_inode_operations;
2227 	inode->i_fop = &fuse_dir_operations;
2228 
2229 	spin_lock_init(&fi->rdc.lock);
2230 	fi->rdc.cached = false;
2231 	fi->rdc.size = 0;
2232 	fi->rdc.pos = 0;
2233 	fi->rdc.version = 0;
2234 }
2235 
2236 static int fuse_symlink_read_folio(struct file *null, struct folio *folio)
2237 {
2238 	int err = fuse_readlink_page(folio->mapping->host, folio);
2239 
2240 	if (!err)
2241 		folio_mark_uptodate(folio);
2242 
2243 	folio_unlock(folio);
2244 
2245 	return err;
2246 }
2247 
2248 static const struct address_space_operations fuse_symlink_aops = {
2249 	.read_folio	= fuse_symlink_read_folio,
2250 };
2251 
2252 void fuse_init_symlink(struct inode *inode)
2253 {
2254 	inode->i_op = &fuse_symlink_inode_operations;
2255 	inode->i_data.a_ops = &fuse_symlink_aops;
2256 	inode_nohighmem(inode);
2257 }
2258