xref: /linux/fs/fuse/dir.c (revision 492c826b9facefa84995f4dea917e301b5ee0884)
1 /*
2   FUSE: Filesystem in Userspace
3   Copyright (C) 2001-2008  Miklos Szeredi <miklos@szeredi.hu>
4 
5   This program can be distributed under the terms of the GNU GPL.
6   See the file COPYING.
7 */
8 
9 #include "fuse_i.h"
10 
11 #include <linux/pagemap.h>
12 #include <linux/file.h>
13 #include <linux/sched.h>
14 #include <linux/namei.h>
15 #include <linux/slab.h>
16 
17 #if BITS_PER_LONG >= 64
18 static inline void fuse_dentry_settime(struct dentry *entry, u64 time)
19 {
20 	entry->d_time = time;
21 }
22 
23 static inline u64 fuse_dentry_time(struct dentry *entry)
24 {
25 	return entry->d_time;
26 }
27 #else
28 /*
29  * On 32 bit archs store the high 32 bits of time in d_fsdata
30  */
31 static void fuse_dentry_settime(struct dentry *entry, u64 time)
32 {
33 	entry->d_time = time;
34 	entry->d_fsdata = (void *) (unsigned long) (time >> 32);
35 }
36 
37 static u64 fuse_dentry_time(struct dentry *entry)
38 {
39 	return (u64) entry->d_time +
40 		((u64) (unsigned long) entry->d_fsdata << 32);
41 }
42 #endif
43 
44 /*
45  * FUSE caches dentries and attributes with separate timeout.  The
46  * time in jiffies until the dentry/attributes are valid is stored in
47  * dentry->d_time and fuse_inode->i_time respectively.
48  */
49 
50 /*
51  * Calculate the time in jiffies until a dentry/attributes are valid
52  */
53 static u64 time_to_jiffies(unsigned long sec, unsigned long nsec)
54 {
55 	if (sec || nsec) {
56 		struct timespec ts = {sec, nsec};
57 		return get_jiffies_64() + timespec_to_jiffies(&ts);
58 	} else
59 		return 0;
60 }
61 
62 /*
63  * Set dentry and possibly attribute timeouts from the lookup/mk*
64  * replies
65  */
66 static void fuse_change_entry_timeout(struct dentry *entry,
67 				      struct fuse_entry_out *o)
68 {
69 	fuse_dentry_settime(entry,
70 		time_to_jiffies(o->entry_valid, o->entry_valid_nsec));
71 }
72 
73 static u64 attr_timeout(struct fuse_attr_out *o)
74 {
75 	return time_to_jiffies(o->attr_valid, o->attr_valid_nsec);
76 }
77 
78 static u64 entry_attr_timeout(struct fuse_entry_out *o)
79 {
80 	return time_to_jiffies(o->attr_valid, o->attr_valid_nsec);
81 }
82 
83 /*
84  * Mark the attributes as stale, so that at the next call to
85  * ->getattr() they will be fetched from userspace
86  */
87 void fuse_invalidate_attr(struct inode *inode)
88 {
89 	get_fuse_inode(inode)->i_time = 0;
90 }
91 
92 /*
93  * Just mark the entry as stale, so that a next attempt to look it up
94  * will result in a new lookup call to userspace
95  *
96  * This is called when a dentry is about to become negative and the
97  * timeout is unknown (unlink, rmdir, rename and in some cases
98  * lookup)
99  */
100 void fuse_invalidate_entry_cache(struct dentry *entry)
101 {
102 	fuse_dentry_settime(entry, 0);
103 }
104 
105 /*
106  * Same as fuse_invalidate_entry_cache(), but also try to remove the
107  * dentry from the hash
108  */
109 static void fuse_invalidate_entry(struct dentry *entry)
110 {
111 	d_invalidate(entry);
112 	fuse_invalidate_entry_cache(entry);
113 }
114 
115 static void fuse_lookup_init(struct fuse_conn *fc, struct fuse_req *req,
116 			     u64 nodeid, struct qstr *name,
117 			     struct fuse_entry_out *outarg)
118 {
119 	memset(outarg, 0, sizeof(struct fuse_entry_out));
120 	req->in.h.opcode = FUSE_LOOKUP;
121 	req->in.h.nodeid = nodeid;
122 	req->in.numargs = 1;
123 	req->in.args[0].size = name->len + 1;
124 	req->in.args[0].value = name->name;
125 	req->out.numargs = 1;
126 	if (fc->minor < 9)
127 		req->out.args[0].size = FUSE_COMPAT_ENTRY_OUT_SIZE;
128 	else
129 		req->out.args[0].size = sizeof(struct fuse_entry_out);
130 	req->out.args[0].value = outarg;
131 }
132 
133 u64 fuse_get_attr_version(struct fuse_conn *fc)
134 {
135 	u64 curr_version;
136 
137 	/*
138 	 * The spin lock isn't actually needed on 64bit archs, but we
139 	 * don't yet care too much about such optimizations.
140 	 */
141 	spin_lock(&fc->lock);
142 	curr_version = fc->attr_version;
143 	spin_unlock(&fc->lock);
144 
145 	return curr_version;
146 }
147 
148 /*
149  * Check whether the dentry is still valid
150  *
151  * If the entry validity timeout has expired and the dentry is
152  * positive, try to redo the lookup.  If the lookup results in a
153  * different inode, then let the VFS invalidate the dentry and redo
154  * the lookup once more.  If the lookup results in the same inode,
155  * then refresh the attributes, timeouts and mark the dentry valid.
156  */
157 static int fuse_dentry_revalidate(struct dentry *entry, struct nameidata *nd)
158 {
159 	struct inode *inode;
160 
161 	inode = ACCESS_ONCE(entry->d_inode);
162 	if (inode && is_bad_inode(inode))
163 		return 0;
164 	else if (fuse_dentry_time(entry) < get_jiffies_64()) {
165 		int err;
166 		struct fuse_entry_out outarg;
167 		struct fuse_conn *fc;
168 		struct fuse_req *req;
169 		struct fuse_forget_link *forget;
170 		struct dentry *parent;
171 		u64 attr_version;
172 
173 		/* For negative dentries, always do a fresh lookup */
174 		if (!inode)
175 			return 0;
176 
177 		if (nd && (nd->flags & LOOKUP_RCU))
178 			return -ECHILD;
179 
180 		fc = get_fuse_conn(inode);
181 		req = fuse_get_req(fc);
182 		if (IS_ERR(req))
183 			return 0;
184 
185 		forget = fuse_alloc_forget();
186 		if (!forget) {
187 			fuse_put_request(fc, req);
188 			return 0;
189 		}
190 
191 		attr_version = fuse_get_attr_version(fc);
192 
193 		parent = dget_parent(entry);
194 		fuse_lookup_init(fc, req, get_node_id(parent->d_inode),
195 				 &entry->d_name, &outarg);
196 		fuse_request_send(fc, req);
197 		dput(parent);
198 		err = req->out.h.error;
199 		fuse_put_request(fc, req);
200 		/* Zero nodeid is same as -ENOENT */
201 		if (!err && !outarg.nodeid)
202 			err = -ENOENT;
203 		if (!err) {
204 			struct fuse_inode *fi = get_fuse_inode(inode);
205 			if (outarg.nodeid != get_node_id(inode)) {
206 				fuse_queue_forget(fc, forget, outarg.nodeid, 1);
207 				return 0;
208 			}
209 			spin_lock(&fc->lock);
210 			fi->nlookup++;
211 			spin_unlock(&fc->lock);
212 		}
213 		kfree(forget);
214 		if (err || (outarg.attr.mode ^ inode->i_mode) & S_IFMT)
215 			return 0;
216 
217 		fuse_change_attributes(inode, &outarg.attr,
218 				       entry_attr_timeout(&outarg),
219 				       attr_version);
220 		fuse_change_entry_timeout(entry, &outarg);
221 	}
222 	return 1;
223 }
224 
225 static int invalid_nodeid(u64 nodeid)
226 {
227 	return !nodeid || nodeid == FUSE_ROOT_ID;
228 }
229 
230 const struct dentry_operations fuse_dentry_operations = {
231 	.d_revalidate	= fuse_dentry_revalidate,
232 };
233 
234 int fuse_valid_type(int m)
235 {
236 	return S_ISREG(m) || S_ISDIR(m) || S_ISLNK(m) || S_ISCHR(m) ||
237 		S_ISBLK(m) || S_ISFIFO(m) || S_ISSOCK(m);
238 }
239 
240 /*
241  * Add a directory inode to a dentry, ensuring that no other dentry
242  * refers to this inode.  Called with fc->inst_mutex.
243  */
244 static struct dentry *fuse_d_add_directory(struct dentry *entry,
245 					   struct inode *inode)
246 {
247 	struct dentry *alias = d_find_alias(inode);
248 	if (alias && !(alias->d_flags & DCACHE_DISCONNECTED)) {
249 		/* This tries to shrink the subtree below alias */
250 		fuse_invalidate_entry(alias);
251 		dput(alias);
252 		if (!list_empty(&inode->i_dentry))
253 			return ERR_PTR(-EBUSY);
254 	} else {
255 		dput(alias);
256 	}
257 	return d_splice_alias(inode, entry);
258 }
259 
260 int fuse_lookup_name(struct super_block *sb, u64 nodeid, struct qstr *name,
261 		     struct fuse_entry_out *outarg, struct inode **inode)
262 {
263 	struct fuse_conn *fc = get_fuse_conn_super(sb);
264 	struct fuse_req *req;
265 	struct fuse_forget_link *forget;
266 	u64 attr_version;
267 	int err;
268 
269 	*inode = NULL;
270 	err = -ENAMETOOLONG;
271 	if (name->len > FUSE_NAME_MAX)
272 		goto out;
273 
274 	req = fuse_get_req(fc);
275 	err = PTR_ERR(req);
276 	if (IS_ERR(req))
277 		goto out;
278 
279 	forget = fuse_alloc_forget();
280 	err = -ENOMEM;
281 	if (!forget) {
282 		fuse_put_request(fc, req);
283 		goto out;
284 	}
285 
286 	attr_version = fuse_get_attr_version(fc);
287 
288 	fuse_lookup_init(fc, req, nodeid, name, outarg);
289 	fuse_request_send(fc, req);
290 	err = req->out.h.error;
291 	fuse_put_request(fc, req);
292 	/* Zero nodeid is same as -ENOENT, but with valid timeout */
293 	if (err || !outarg->nodeid)
294 		goto out_put_forget;
295 
296 	err = -EIO;
297 	if (!outarg->nodeid)
298 		goto out_put_forget;
299 	if (!fuse_valid_type(outarg->attr.mode))
300 		goto out_put_forget;
301 
302 	*inode = fuse_iget(sb, outarg->nodeid, outarg->generation,
303 			   &outarg->attr, entry_attr_timeout(outarg),
304 			   attr_version);
305 	err = -ENOMEM;
306 	if (!*inode) {
307 		fuse_queue_forget(fc, forget, outarg->nodeid, 1);
308 		goto out;
309 	}
310 	err = 0;
311 
312  out_put_forget:
313 	kfree(forget);
314  out:
315 	return err;
316 }
317 
318 static struct dentry *fuse_lookup(struct inode *dir, struct dentry *entry,
319 				  struct nameidata *nd)
320 {
321 	int err;
322 	struct fuse_entry_out outarg;
323 	struct inode *inode;
324 	struct dentry *newent;
325 	struct fuse_conn *fc = get_fuse_conn(dir);
326 	bool outarg_valid = true;
327 
328 	err = fuse_lookup_name(dir->i_sb, get_node_id(dir), &entry->d_name,
329 			       &outarg, &inode);
330 	if (err == -ENOENT) {
331 		outarg_valid = false;
332 		err = 0;
333 	}
334 	if (err)
335 		goto out_err;
336 
337 	err = -EIO;
338 	if (inode && get_node_id(inode) == FUSE_ROOT_ID)
339 		goto out_iput;
340 
341 	if (inode && S_ISDIR(inode->i_mode)) {
342 		mutex_lock(&fc->inst_mutex);
343 		newent = fuse_d_add_directory(entry, inode);
344 		mutex_unlock(&fc->inst_mutex);
345 		err = PTR_ERR(newent);
346 		if (IS_ERR(newent))
347 			goto out_iput;
348 	} else {
349 		newent = d_splice_alias(inode, entry);
350 	}
351 
352 	entry = newent ? newent : entry;
353 	if (outarg_valid)
354 		fuse_change_entry_timeout(entry, &outarg);
355 	else
356 		fuse_invalidate_entry_cache(entry);
357 
358 	return newent;
359 
360  out_iput:
361 	iput(inode);
362  out_err:
363 	return ERR_PTR(err);
364 }
365 
366 /*
367  * Atomic create+open operation
368  *
369  * If the filesystem doesn't support this, then fall back to separate
370  * 'mknod' + 'open' requests.
371  */
372 static int fuse_create_open(struct inode *dir, struct dentry *entry, int mode,
373 			    struct nameidata *nd)
374 {
375 	int err;
376 	struct inode *inode;
377 	struct fuse_conn *fc = get_fuse_conn(dir);
378 	struct fuse_req *req;
379 	struct fuse_forget_link *forget;
380 	struct fuse_create_in inarg;
381 	struct fuse_open_out outopen;
382 	struct fuse_entry_out outentry;
383 	struct fuse_file *ff;
384 	struct file *file;
385 	int flags = nd->intent.open.flags - 1;
386 
387 	if (fc->no_create)
388 		return -ENOSYS;
389 
390 	if (flags & O_DIRECT)
391 		return -EINVAL;
392 
393 	forget = fuse_alloc_forget();
394 	if (!forget)
395 		return -ENOMEM;
396 
397 	req = fuse_get_req(fc);
398 	err = PTR_ERR(req);
399 	if (IS_ERR(req))
400 		goto out_put_forget_req;
401 
402 	err = -ENOMEM;
403 	ff = fuse_file_alloc(fc);
404 	if (!ff)
405 		goto out_put_request;
406 
407 	if (!fc->dont_mask)
408 		mode &= ~current_umask();
409 
410 	flags &= ~O_NOCTTY;
411 	memset(&inarg, 0, sizeof(inarg));
412 	memset(&outentry, 0, sizeof(outentry));
413 	inarg.flags = flags;
414 	inarg.mode = mode;
415 	inarg.umask = current_umask();
416 	req->in.h.opcode = FUSE_CREATE;
417 	req->in.h.nodeid = get_node_id(dir);
418 	req->in.numargs = 2;
419 	req->in.args[0].size = fc->minor < 12 ? sizeof(struct fuse_open_in) :
420 						sizeof(inarg);
421 	req->in.args[0].value = &inarg;
422 	req->in.args[1].size = entry->d_name.len + 1;
423 	req->in.args[1].value = entry->d_name.name;
424 	req->out.numargs = 2;
425 	if (fc->minor < 9)
426 		req->out.args[0].size = FUSE_COMPAT_ENTRY_OUT_SIZE;
427 	else
428 		req->out.args[0].size = sizeof(outentry);
429 	req->out.args[0].value = &outentry;
430 	req->out.args[1].size = sizeof(outopen);
431 	req->out.args[1].value = &outopen;
432 	fuse_request_send(fc, req);
433 	err = req->out.h.error;
434 	if (err) {
435 		if (err == -ENOSYS)
436 			fc->no_create = 1;
437 		goto out_free_ff;
438 	}
439 
440 	err = -EIO;
441 	if (!S_ISREG(outentry.attr.mode) || invalid_nodeid(outentry.nodeid))
442 		goto out_free_ff;
443 
444 	fuse_put_request(fc, req);
445 	ff->fh = outopen.fh;
446 	ff->nodeid = outentry.nodeid;
447 	ff->open_flags = outopen.open_flags;
448 	inode = fuse_iget(dir->i_sb, outentry.nodeid, outentry.generation,
449 			  &outentry.attr, entry_attr_timeout(&outentry), 0);
450 	if (!inode) {
451 		flags &= ~(O_CREAT | O_EXCL | O_TRUNC);
452 		fuse_sync_release(ff, flags);
453 		fuse_queue_forget(fc, forget, outentry.nodeid, 1);
454 		return -ENOMEM;
455 	}
456 	kfree(forget);
457 	d_instantiate(entry, inode);
458 	fuse_change_entry_timeout(entry, &outentry);
459 	fuse_invalidate_attr(dir);
460 	file = lookup_instantiate_filp(nd, entry, generic_file_open);
461 	if (IS_ERR(file)) {
462 		fuse_sync_release(ff, flags);
463 		return PTR_ERR(file);
464 	}
465 	file->private_data = fuse_file_get(ff);
466 	fuse_finish_open(inode, file);
467 	return 0;
468 
469  out_free_ff:
470 	fuse_file_free(ff);
471  out_put_request:
472 	fuse_put_request(fc, req);
473  out_put_forget_req:
474 	kfree(forget);
475 	return err;
476 }
477 
478 /*
479  * Code shared between mknod, mkdir, symlink and link
480  */
481 static int create_new_entry(struct fuse_conn *fc, struct fuse_req *req,
482 			    struct inode *dir, struct dentry *entry,
483 			    int mode)
484 {
485 	struct fuse_entry_out outarg;
486 	struct inode *inode;
487 	int err;
488 	struct fuse_forget_link *forget;
489 
490 	forget = fuse_alloc_forget();
491 	if (!forget) {
492 		fuse_put_request(fc, req);
493 		return -ENOMEM;
494 	}
495 
496 	memset(&outarg, 0, sizeof(outarg));
497 	req->in.h.nodeid = get_node_id(dir);
498 	req->out.numargs = 1;
499 	if (fc->minor < 9)
500 		req->out.args[0].size = FUSE_COMPAT_ENTRY_OUT_SIZE;
501 	else
502 		req->out.args[0].size = sizeof(outarg);
503 	req->out.args[0].value = &outarg;
504 	fuse_request_send(fc, req);
505 	err = req->out.h.error;
506 	fuse_put_request(fc, req);
507 	if (err)
508 		goto out_put_forget_req;
509 
510 	err = -EIO;
511 	if (invalid_nodeid(outarg.nodeid))
512 		goto out_put_forget_req;
513 
514 	if ((outarg.attr.mode ^ mode) & S_IFMT)
515 		goto out_put_forget_req;
516 
517 	inode = fuse_iget(dir->i_sb, outarg.nodeid, outarg.generation,
518 			  &outarg.attr, entry_attr_timeout(&outarg), 0);
519 	if (!inode) {
520 		fuse_queue_forget(fc, forget, outarg.nodeid, 1);
521 		return -ENOMEM;
522 	}
523 	kfree(forget);
524 
525 	if (S_ISDIR(inode->i_mode)) {
526 		struct dentry *alias;
527 		mutex_lock(&fc->inst_mutex);
528 		alias = d_find_alias(inode);
529 		if (alias) {
530 			/* New directory must have moved since mkdir */
531 			mutex_unlock(&fc->inst_mutex);
532 			dput(alias);
533 			iput(inode);
534 			return -EBUSY;
535 		}
536 		d_instantiate(entry, inode);
537 		mutex_unlock(&fc->inst_mutex);
538 	} else
539 		d_instantiate(entry, inode);
540 
541 	fuse_change_entry_timeout(entry, &outarg);
542 	fuse_invalidate_attr(dir);
543 	return 0;
544 
545  out_put_forget_req:
546 	kfree(forget);
547 	return err;
548 }
549 
550 static int fuse_mknod(struct inode *dir, struct dentry *entry, int mode,
551 		      dev_t rdev)
552 {
553 	struct fuse_mknod_in inarg;
554 	struct fuse_conn *fc = get_fuse_conn(dir);
555 	struct fuse_req *req = fuse_get_req(fc);
556 	if (IS_ERR(req))
557 		return PTR_ERR(req);
558 
559 	if (!fc->dont_mask)
560 		mode &= ~current_umask();
561 
562 	memset(&inarg, 0, sizeof(inarg));
563 	inarg.mode = mode;
564 	inarg.rdev = new_encode_dev(rdev);
565 	inarg.umask = current_umask();
566 	req->in.h.opcode = FUSE_MKNOD;
567 	req->in.numargs = 2;
568 	req->in.args[0].size = fc->minor < 12 ? FUSE_COMPAT_MKNOD_IN_SIZE :
569 						sizeof(inarg);
570 	req->in.args[0].value = &inarg;
571 	req->in.args[1].size = entry->d_name.len + 1;
572 	req->in.args[1].value = entry->d_name.name;
573 	return create_new_entry(fc, req, dir, entry, mode);
574 }
575 
576 static int fuse_create(struct inode *dir, struct dentry *entry, int mode,
577 		       struct nameidata *nd)
578 {
579 	if (nd && (nd->flags & LOOKUP_OPEN)) {
580 		int err = fuse_create_open(dir, entry, mode, nd);
581 		if (err != -ENOSYS)
582 			return err;
583 		/* Fall back on mknod */
584 	}
585 	return fuse_mknod(dir, entry, mode, 0);
586 }
587 
588 static int fuse_mkdir(struct inode *dir, struct dentry *entry, int mode)
589 {
590 	struct fuse_mkdir_in inarg;
591 	struct fuse_conn *fc = get_fuse_conn(dir);
592 	struct fuse_req *req = fuse_get_req(fc);
593 	if (IS_ERR(req))
594 		return PTR_ERR(req);
595 
596 	if (!fc->dont_mask)
597 		mode &= ~current_umask();
598 
599 	memset(&inarg, 0, sizeof(inarg));
600 	inarg.mode = mode;
601 	inarg.umask = current_umask();
602 	req->in.h.opcode = FUSE_MKDIR;
603 	req->in.numargs = 2;
604 	req->in.args[0].size = sizeof(inarg);
605 	req->in.args[0].value = &inarg;
606 	req->in.args[1].size = entry->d_name.len + 1;
607 	req->in.args[1].value = entry->d_name.name;
608 	return create_new_entry(fc, req, dir, entry, S_IFDIR);
609 }
610 
611 static int fuse_symlink(struct inode *dir, struct dentry *entry,
612 			const char *link)
613 {
614 	struct fuse_conn *fc = get_fuse_conn(dir);
615 	unsigned len = strlen(link) + 1;
616 	struct fuse_req *req = fuse_get_req(fc);
617 	if (IS_ERR(req))
618 		return PTR_ERR(req);
619 
620 	req->in.h.opcode = FUSE_SYMLINK;
621 	req->in.numargs = 2;
622 	req->in.args[0].size = entry->d_name.len + 1;
623 	req->in.args[0].value = entry->d_name.name;
624 	req->in.args[1].size = len;
625 	req->in.args[1].value = link;
626 	return create_new_entry(fc, req, dir, entry, S_IFLNK);
627 }
628 
629 static int fuse_unlink(struct inode *dir, struct dentry *entry)
630 {
631 	int err;
632 	struct fuse_conn *fc = get_fuse_conn(dir);
633 	struct fuse_req *req = fuse_get_req(fc);
634 	if (IS_ERR(req))
635 		return PTR_ERR(req);
636 
637 	req->in.h.opcode = FUSE_UNLINK;
638 	req->in.h.nodeid = get_node_id(dir);
639 	req->in.numargs = 1;
640 	req->in.args[0].size = entry->d_name.len + 1;
641 	req->in.args[0].value = entry->d_name.name;
642 	fuse_request_send(fc, req);
643 	err = req->out.h.error;
644 	fuse_put_request(fc, req);
645 	if (!err) {
646 		struct inode *inode = entry->d_inode;
647 
648 		/*
649 		 * Set nlink to zero so the inode can be cleared, if the inode
650 		 * does have more links this will be discovered at the next
651 		 * lookup/getattr.
652 		 */
653 		clear_nlink(inode);
654 		fuse_invalidate_attr(inode);
655 		fuse_invalidate_attr(dir);
656 		fuse_invalidate_entry_cache(entry);
657 	} else if (err == -EINTR)
658 		fuse_invalidate_entry(entry);
659 	return err;
660 }
661 
662 static int fuse_rmdir(struct inode *dir, struct dentry *entry)
663 {
664 	int err;
665 	struct fuse_conn *fc = get_fuse_conn(dir);
666 	struct fuse_req *req = fuse_get_req(fc);
667 	if (IS_ERR(req))
668 		return PTR_ERR(req);
669 
670 	dentry_unhash(entry);
671 
672 	req->in.h.opcode = FUSE_RMDIR;
673 	req->in.h.nodeid = get_node_id(dir);
674 	req->in.numargs = 1;
675 	req->in.args[0].size = entry->d_name.len + 1;
676 	req->in.args[0].value = entry->d_name.name;
677 	fuse_request_send(fc, req);
678 	err = req->out.h.error;
679 	fuse_put_request(fc, req);
680 	if (!err) {
681 		clear_nlink(entry->d_inode);
682 		fuse_invalidate_attr(dir);
683 		fuse_invalidate_entry_cache(entry);
684 	} else if (err == -EINTR)
685 		fuse_invalidate_entry(entry);
686 	return err;
687 }
688 
689 static int fuse_rename(struct inode *olddir, struct dentry *oldent,
690 		       struct inode *newdir, struct dentry *newent)
691 {
692 	int err;
693 	struct fuse_rename_in inarg;
694 	struct fuse_conn *fc = get_fuse_conn(olddir);
695 	struct fuse_req *req = fuse_get_req(fc);
696 
697 	if (newent->d_inode && S_ISDIR(newent->d_inode->i_mode))
698 		dentry_unhash(newent);
699 
700 	if (IS_ERR(req))
701 		return PTR_ERR(req);
702 
703 	memset(&inarg, 0, sizeof(inarg));
704 	inarg.newdir = get_node_id(newdir);
705 	req->in.h.opcode = FUSE_RENAME;
706 	req->in.h.nodeid = get_node_id(olddir);
707 	req->in.numargs = 3;
708 	req->in.args[0].size = sizeof(inarg);
709 	req->in.args[0].value = &inarg;
710 	req->in.args[1].size = oldent->d_name.len + 1;
711 	req->in.args[1].value = oldent->d_name.name;
712 	req->in.args[2].size = newent->d_name.len + 1;
713 	req->in.args[2].value = newent->d_name.name;
714 	fuse_request_send(fc, req);
715 	err = req->out.h.error;
716 	fuse_put_request(fc, req);
717 	if (!err) {
718 		/* ctime changes */
719 		fuse_invalidate_attr(oldent->d_inode);
720 
721 		fuse_invalidate_attr(olddir);
722 		if (olddir != newdir)
723 			fuse_invalidate_attr(newdir);
724 
725 		/* newent will end up negative */
726 		if (newent->d_inode) {
727 			fuse_invalidate_attr(newent->d_inode);
728 			fuse_invalidate_entry_cache(newent);
729 		}
730 	} else if (err == -EINTR) {
731 		/* If request was interrupted, DEITY only knows if the
732 		   rename actually took place.  If the invalidation
733 		   fails (e.g. some process has CWD under the renamed
734 		   directory), then there can be inconsistency between
735 		   the dcache and the real filesystem.  Tough luck. */
736 		fuse_invalidate_entry(oldent);
737 		if (newent->d_inode)
738 			fuse_invalidate_entry(newent);
739 	}
740 
741 	return err;
742 }
743 
744 static int fuse_link(struct dentry *entry, struct inode *newdir,
745 		     struct dentry *newent)
746 {
747 	int err;
748 	struct fuse_link_in inarg;
749 	struct inode *inode = entry->d_inode;
750 	struct fuse_conn *fc = get_fuse_conn(inode);
751 	struct fuse_req *req = fuse_get_req(fc);
752 	if (IS_ERR(req))
753 		return PTR_ERR(req);
754 
755 	memset(&inarg, 0, sizeof(inarg));
756 	inarg.oldnodeid = get_node_id(inode);
757 	req->in.h.opcode = FUSE_LINK;
758 	req->in.numargs = 2;
759 	req->in.args[0].size = sizeof(inarg);
760 	req->in.args[0].value = &inarg;
761 	req->in.args[1].size = newent->d_name.len + 1;
762 	req->in.args[1].value = newent->d_name.name;
763 	err = create_new_entry(fc, req, newdir, newent, inode->i_mode);
764 	/* Contrary to "normal" filesystems it can happen that link
765 	   makes two "logical" inodes point to the same "physical"
766 	   inode.  We invalidate the attributes of the old one, so it
767 	   will reflect changes in the backing inode (link count,
768 	   etc.)
769 	*/
770 	if (!err || err == -EINTR)
771 		fuse_invalidate_attr(inode);
772 	return err;
773 }
774 
775 static void fuse_fillattr(struct inode *inode, struct fuse_attr *attr,
776 			  struct kstat *stat)
777 {
778 	stat->dev = inode->i_sb->s_dev;
779 	stat->ino = attr->ino;
780 	stat->mode = (inode->i_mode & S_IFMT) | (attr->mode & 07777);
781 	stat->nlink = attr->nlink;
782 	stat->uid = attr->uid;
783 	stat->gid = attr->gid;
784 	stat->rdev = inode->i_rdev;
785 	stat->atime.tv_sec = attr->atime;
786 	stat->atime.tv_nsec = attr->atimensec;
787 	stat->mtime.tv_sec = attr->mtime;
788 	stat->mtime.tv_nsec = attr->mtimensec;
789 	stat->ctime.tv_sec = attr->ctime;
790 	stat->ctime.tv_nsec = attr->ctimensec;
791 	stat->size = attr->size;
792 	stat->blocks = attr->blocks;
793 	stat->blksize = (1 << inode->i_blkbits);
794 }
795 
796 static int fuse_do_getattr(struct inode *inode, struct kstat *stat,
797 			   struct file *file)
798 {
799 	int err;
800 	struct fuse_getattr_in inarg;
801 	struct fuse_attr_out outarg;
802 	struct fuse_conn *fc = get_fuse_conn(inode);
803 	struct fuse_req *req;
804 	u64 attr_version;
805 
806 	req = fuse_get_req(fc);
807 	if (IS_ERR(req))
808 		return PTR_ERR(req);
809 
810 	attr_version = fuse_get_attr_version(fc);
811 
812 	memset(&inarg, 0, sizeof(inarg));
813 	memset(&outarg, 0, sizeof(outarg));
814 	/* Directories have separate file-handle space */
815 	if (file && S_ISREG(inode->i_mode)) {
816 		struct fuse_file *ff = file->private_data;
817 
818 		inarg.getattr_flags |= FUSE_GETATTR_FH;
819 		inarg.fh = ff->fh;
820 	}
821 	req->in.h.opcode = FUSE_GETATTR;
822 	req->in.h.nodeid = get_node_id(inode);
823 	req->in.numargs = 1;
824 	req->in.args[0].size = sizeof(inarg);
825 	req->in.args[0].value = &inarg;
826 	req->out.numargs = 1;
827 	if (fc->minor < 9)
828 		req->out.args[0].size = FUSE_COMPAT_ATTR_OUT_SIZE;
829 	else
830 		req->out.args[0].size = sizeof(outarg);
831 	req->out.args[0].value = &outarg;
832 	fuse_request_send(fc, req);
833 	err = req->out.h.error;
834 	fuse_put_request(fc, req);
835 	if (!err) {
836 		if ((inode->i_mode ^ outarg.attr.mode) & S_IFMT) {
837 			make_bad_inode(inode);
838 			err = -EIO;
839 		} else {
840 			fuse_change_attributes(inode, &outarg.attr,
841 					       attr_timeout(&outarg),
842 					       attr_version);
843 			if (stat)
844 				fuse_fillattr(inode, &outarg.attr, stat);
845 		}
846 	}
847 	return err;
848 }
849 
850 int fuse_update_attributes(struct inode *inode, struct kstat *stat,
851 			   struct file *file, bool *refreshed)
852 {
853 	struct fuse_inode *fi = get_fuse_inode(inode);
854 	int err;
855 	bool r;
856 
857 	if (fi->i_time < get_jiffies_64()) {
858 		r = true;
859 		err = fuse_do_getattr(inode, stat, file);
860 	} else {
861 		r = false;
862 		err = 0;
863 		if (stat) {
864 			generic_fillattr(inode, stat);
865 			stat->mode = fi->orig_i_mode;
866 		}
867 	}
868 
869 	if (refreshed != NULL)
870 		*refreshed = r;
871 
872 	return err;
873 }
874 
875 int fuse_reverse_inval_entry(struct super_block *sb, u64 parent_nodeid,
876 			     struct qstr *name)
877 {
878 	int err = -ENOTDIR;
879 	struct inode *parent;
880 	struct dentry *dir;
881 	struct dentry *entry;
882 
883 	parent = ilookup5(sb, parent_nodeid, fuse_inode_eq, &parent_nodeid);
884 	if (!parent)
885 		return -ENOENT;
886 
887 	mutex_lock(&parent->i_mutex);
888 	if (!S_ISDIR(parent->i_mode))
889 		goto unlock;
890 
891 	err = -ENOENT;
892 	dir = d_find_alias(parent);
893 	if (!dir)
894 		goto unlock;
895 
896 	entry = d_lookup(dir, name);
897 	dput(dir);
898 	if (!entry)
899 		goto unlock;
900 
901 	fuse_invalidate_attr(parent);
902 	fuse_invalidate_entry(entry);
903 	dput(entry);
904 	err = 0;
905 
906  unlock:
907 	mutex_unlock(&parent->i_mutex);
908 	iput(parent);
909 	return err;
910 }
911 
912 /*
913  * Calling into a user-controlled filesystem gives the filesystem
914  * daemon ptrace-like capabilities over the requester process.  This
915  * means, that the filesystem daemon is able to record the exact
916  * filesystem operations performed, and can also control the behavior
917  * of the requester process in otherwise impossible ways.  For example
918  * it can delay the operation for arbitrary length of time allowing
919  * DoS against the requester.
920  *
921  * For this reason only those processes can call into the filesystem,
922  * for which the owner of the mount has ptrace privilege.  This
923  * excludes processes started by other users, suid or sgid processes.
924  */
925 int fuse_allow_task(struct fuse_conn *fc, struct task_struct *task)
926 {
927 	const struct cred *cred;
928 	int ret;
929 
930 	if (fc->flags & FUSE_ALLOW_OTHER)
931 		return 1;
932 
933 	rcu_read_lock();
934 	ret = 0;
935 	cred = __task_cred(task);
936 	if (cred->euid == fc->user_id &&
937 	    cred->suid == fc->user_id &&
938 	    cred->uid  == fc->user_id &&
939 	    cred->egid == fc->group_id &&
940 	    cred->sgid == fc->group_id &&
941 	    cred->gid  == fc->group_id)
942 		ret = 1;
943 	rcu_read_unlock();
944 
945 	return ret;
946 }
947 
948 static int fuse_access(struct inode *inode, int mask)
949 {
950 	struct fuse_conn *fc = get_fuse_conn(inode);
951 	struct fuse_req *req;
952 	struct fuse_access_in inarg;
953 	int err;
954 
955 	if (fc->no_access)
956 		return 0;
957 
958 	req = fuse_get_req(fc);
959 	if (IS_ERR(req))
960 		return PTR_ERR(req);
961 
962 	memset(&inarg, 0, sizeof(inarg));
963 	inarg.mask = mask & (MAY_READ | MAY_WRITE | MAY_EXEC);
964 	req->in.h.opcode = FUSE_ACCESS;
965 	req->in.h.nodeid = get_node_id(inode);
966 	req->in.numargs = 1;
967 	req->in.args[0].size = sizeof(inarg);
968 	req->in.args[0].value = &inarg;
969 	fuse_request_send(fc, req);
970 	err = req->out.h.error;
971 	fuse_put_request(fc, req);
972 	if (err == -ENOSYS) {
973 		fc->no_access = 1;
974 		err = 0;
975 	}
976 	return err;
977 }
978 
979 static int fuse_perm_getattr(struct inode *inode, int flags)
980 {
981 	if (flags & IPERM_FLAG_RCU)
982 		return -ECHILD;
983 
984 	return fuse_do_getattr(inode, NULL, NULL);
985 }
986 
987 /*
988  * Check permission.  The two basic access models of FUSE are:
989  *
990  * 1) Local access checking ('default_permissions' mount option) based
991  * on file mode.  This is the plain old disk filesystem permission
992  * modell.
993  *
994  * 2) "Remote" access checking, where server is responsible for
995  * checking permission in each inode operation.  An exception to this
996  * is if ->permission() was invoked from sys_access() in which case an
997  * access request is sent.  Execute permission is still checked
998  * locally based on file mode.
999  */
1000 static int fuse_permission(struct inode *inode, int mask, unsigned int flags)
1001 {
1002 	struct fuse_conn *fc = get_fuse_conn(inode);
1003 	bool refreshed = false;
1004 	int err = 0;
1005 
1006 	if (!fuse_allow_task(fc, current))
1007 		return -EACCES;
1008 
1009 	/*
1010 	 * If attributes are needed, refresh them before proceeding
1011 	 */
1012 	if ((fc->flags & FUSE_DEFAULT_PERMISSIONS) ||
1013 	    ((mask & MAY_EXEC) && S_ISREG(inode->i_mode))) {
1014 		struct fuse_inode *fi = get_fuse_inode(inode);
1015 
1016 		if (fi->i_time < get_jiffies_64()) {
1017 			refreshed = true;
1018 
1019 			err = fuse_perm_getattr(inode, flags);
1020 			if (err)
1021 				return err;
1022 		}
1023 	}
1024 
1025 	if (fc->flags & FUSE_DEFAULT_PERMISSIONS) {
1026 		err = generic_permission(inode, mask, flags, NULL);
1027 
1028 		/* If permission is denied, try to refresh file
1029 		   attributes.  This is also needed, because the root
1030 		   node will at first have no permissions */
1031 		if (err == -EACCES && !refreshed) {
1032 			err = fuse_perm_getattr(inode, flags);
1033 			if (!err)
1034 				err = generic_permission(inode, mask,
1035 							flags, NULL);
1036 		}
1037 
1038 		/* Note: the opposite of the above test does not
1039 		   exist.  So if permissions are revoked this won't be
1040 		   noticed immediately, only after the attribute
1041 		   timeout has expired */
1042 	} else if (mask & (MAY_ACCESS | MAY_CHDIR)) {
1043 		if (flags & IPERM_FLAG_RCU)
1044 			return -ECHILD;
1045 
1046 		err = fuse_access(inode, mask);
1047 	} else if ((mask & MAY_EXEC) && S_ISREG(inode->i_mode)) {
1048 		if (!(inode->i_mode & S_IXUGO)) {
1049 			if (refreshed)
1050 				return -EACCES;
1051 
1052 			err = fuse_perm_getattr(inode, flags);
1053 			if (!err && !(inode->i_mode & S_IXUGO))
1054 				return -EACCES;
1055 		}
1056 	}
1057 	return err;
1058 }
1059 
1060 static int parse_dirfile(char *buf, size_t nbytes, struct file *file,
1061 			 void *dstbuf, filldir_t filldir)
1062 {
1063 	while (nbytes >= FUSE_NAME_OFFSET) {
1064 		struct fuse_dirent *dirent = (struct fuse_dirent *) buf;
1065 		size_t reclen = FUSE_DIRENT_SIZE(dirent);
1066 		int over;
1067 		if (!dirent->namelen || dirent->namelen > FUSE_NAME_MAX)
1068 			return -EIO;
1069 		if (reclen > nbytes)
1070 			break;
1071 
1072 		over = filldir(dstbuf, dirent->name, dirent->namelen,
1073 			       file->f_pos, dirent->ino, dirent->type);
1074 		if (over)
1075 			break;
1076 
1077 		buf += reclen;
1078 		nbytes -= reclen;
1079 		file->f_pos = dirent->off;
1080 	}
1081 
1082 	return 0;
1083 }
1084 
1085 static int fuse_readdir(struct file *file, void *dstbuf, filldir_t filldir)
1086 {
1087 	int err;
1088 	size_t nbytes;
1089 	struct page *page;
1090 	struct inode *inode = file->f_path.dentry->d_inode;
1091 	struct fuse_conn *fc = get_fuse_conn(inode);
1092 	struct fuse_req *req;
1093 
1094 	if (is_bad_inode(inode))
1095 		return -EIO;
1096 
1097 	req = fuse_get_req(fc);
1098 	if (IS_ERR(req))
1099 		return PTR_ERR(req);
1100 
1101 	page = alloc_page(GFP_KERNEL);
1102 	if (!page) {
1103 		fuse_put_request(fc, req);
1104 		return -ENOMEM;
1105 	}
1106 	req->out.argpages = 1;
1107 	req->num_pages = 1;
1108 	req->pages[0] = page;
1109 	fuse_read_fill(req, file, file->f_pos, PAGE_SIZE, FUSE_READDIR);
1110 	fuse_request_send(fc, req);
1111 	nbytes = req->out.args[0].size;
1112 	err = req->out.h.error;
1113 	fuse_put_request(fc, req);
1114 	if (!err)
1115 		err = parse_dirfile(page_address(page), nbytes, file, dstbuf,
1116 				    filldir);
1117 
1118 	__free_page(page);
1119 	fuse_invalidate_attr(inode); /* atime changed */
1120 	return err;
1121 }
1122 
1123 static char *read_link(struct dentry *dentry)
1124 {
1125 	struct inode *inode = dentry->d_inode;
1126 	struct fuse_conn *fc = get_fuse_conn(inode);
1127 	struct fuse_req *req = fuse_get_req(fc);
1128 	char *link;
1129 
1130 	if (IS_ERR(req))
1131 		return ERR_CAST(req);
1132 
1133 	link = (char *) __get_free_page(GFP_KERNEL);
1134 	if (!link) {
1135 		link = ERR_PTR(-ENOMEM);
1136 		goto out;
1137 	}
1138 	req->in.h.opcode = FUSE_READLINK;
1139 	req->in.h.nodeid = get_node_id(inode);
1140 	req->out.argvar = 1;
1141 	req->out.numargs = 1;
1142 	req->out.args[0].size = PAGE_SIZE - 1;
1143 	req->out.args[0].value = link;
1144 	fuse_request_send(fc, req);
1145 	if (req->out.h.error) {
1146 		free_page((unsigned long) link);
1147 		link = ERR_PTR(req->out.h.error);
1148 	} else
1149 		link[req->out.args[0].size] = '\0';
1150  out:
1151 	fuse_put_request(fc, req);
1152 	fuse_invalidate_attr(inode); /* atime changed */
1153 	return link;
1154 }
1155 
1156 static void free_link(char *link)
1157 {
1158 	if (!IS_ERR(link))
1159 		free_page((unsigned long) link);
1160 }
1161 
1162 static void *fuse_follow_link(struct dentry *dentry, struct nameidata *nd)
1163 {
1164 	nd_set_link(nd, read_link(dentry));
1165 	return NULL;
1166 }
1167 
1168 static void fuse_put_link(struct dentry *dentry, struct nameidata *nd, void *c)
1169 {
1170 	free_link(nd_get_link(nd));
1171 }
1172 
1173 static int fuse_dir_open(struct inode *inode, struct file *file)
1174 {
1175 	return fuse_open_common(inode, file, true);
1176 }
1177 
1178 static int fuse_dir_release(struct inode *inode, struct file *file)
1179 {
1180 	fuse_release_common(file, FUSE_RELEASEDIR);
1181 
1182 	return 0;
1183 }
1184 
1185 static int fuse_dir_fsync(struct file *file, int datasync)
1186 {
1187 	return fuse_fsync_common(file, datasync, 1);
1188 }
1189 
1190 static bool update_mtime(unsigned ivalid)
1191 {
1192 	/* Always update if mtime is explicitly set  */
1193 	if (ivalid & ATTR_MTIME_SET)
1194 		return true;
1195 
1196 	/* If it's an open(O_TRUNC) or an ftruncate(), don't update */
1197 	if ((ivalid & ATTR_SIZE) && (ivalid & (ATTR_OPEN | ATTR_FILE)))
1198 		return false;
1199 
1200 	/* In all other cases update */
1201 	return true;
1202 }
1203 
1204 static void iattr_to_fattr(struct iattr *iattr, struct fuse_setattr_in *arg)
1205 {
1206 	unsigned ivalid = iattr->ia_valid;
1207 
1208 	if (ivalid & ATTR_MODE)
1209 		arg->valid |= FATTR_MODE,   arg->mode = iattr->ia_mode;
1210 	if (ivalid & ATTR_UID)
1211 		arg->valid |= FATTR_UID,    arg->uid = iattr->ia_uid;
1212 	if (ivalid & ATTR_GID)
1213 		arg->valid |= FATTR_GID,    arg->gid = iattr->ia_gid;
1214 	if (ivalid & ATTR_SIZE)
1215 		arg->valid |= FATTR_SIZE,   arg->size = iattr->ia_size;
1216 	if (ivalid & ATTR_ATIME) {
1217 		arg->valid |= FATTR_ATIME;
1218 		arg->atime = iattr->ia_atime.tv_sec;
1219 		arg->atimensec = iattr->ia_atime.tv_nsec;
1220 		if (!(ivalid & ATTR_ATIME_SET))
1221 			arg->valid |= FATTR_ATIME_NOW;
1222 	}
1223 	if ((ivalid & ATTR_MTIME) && update_mtime(ivalid)) {
1224 		arg->valid |= FATTR_MTIME;
1225 		arg->mtime = iattr->ia_mtime.tv_sec;
1226 		arg->mtimensec = iattr->ia_mtime.tv_nsec;
1227 		if (!(ivalid & ATTR_MTIME_SET))
1228 			arg->valid |= FATTR_MTIME_NOW;
1229 	}
1230 }
1231 
1232 /*
1233  * Prevent concurrent writepages on inode
1234  *
1235  * This is done by adding a negative bias to the inode write counter
1236  * and waiting for all pending writes to finish.
1237  */
1238 void fuse_set_nowrite(struct inode *inode)
1239 {
1240 	struct fuse_conn *fc = get_fuse_conn(inode);
1241 	struct fuse_inode *fi = get_fuse_inode(inode);
1242 
1243 	BUG_ON(!mutex_is_locked(&inode->i_mutex));
1244 
1245 	spin_lock(&fc->lock);
1246 	BUG_ON(fi->writectr < 0);
1247 	fi->writectr += FUSE_NOWRITE;
1248 	spin_unlock(&fc->lock);
1249 	wait_event(fi->page_waitq, fi->writectr == FUSE_NOWRITE);
1250 }
1251 
1252 /*
1253  * Allow writepages on inode
1254  *
1255  * Remove the bias from the writecounter and send any queued
1256  * writepages.
1257  */
1258 static void __fuse_release_nowrite(struct inode *inode)
1259 {
1260 	struct fuse_inode *fi = get_fuse_inode(inode);
1261 
1262 	BUG_ON(fi->writectr != FUSE_NOWRITE);
1263 	fi->writectr = 0;
1264 	fuse_flush_writepages(inode);
1265 }
1266 
1267 void fuse_release_nowrite(struct inode *inode)
1268 {
1269 	struct fuse_conn *fc = get_fuse_conn(inode);
1270 
1271 	spin_lock(&fc->lock);
1272 	__fuse_release_nowrite(inode);
1273 	spin_unlock(&fc->lock);
1274 }
1275 
1276 /*
1277  * Set attributes, and at the same time refresh them.
1278  *
1279  * Truncation is slightly complicated, because the 'truncate' request
1280  * may fail, in which case we don't want to touch the mapping.
1281  * vmtruncate() doesn't allow for this case, so do the rlimit checking
1282  * and the actual truncation by hand.
1283  */
1284 static int fuse_do_setattr(struct dentry *entry, struct iattr *attr,
1285 			   struct file *file)
1286 {
1287 	struct inode *inode = entry->d_inode;
1288 	struct fuse_conn *fc = get_fuse_conn(inode);
1289 	struct fuse_req *req;
1290 	struct fuse_setattr_in inarg;
1291 	struct fuse_attr_out outarg;
1292 	bool is_truncate = false;
1293 	loff_t oldsize;
1294 	int err;
1295 
1296 	if (!fuse_allow_task(fc, current))
1297 		return -EACCES;
1298 
1299 	if (!(fc->flags & FUSE_DEFAULT_PERMISSIONS))
1300 		attr->ia_valid |= ATTR_FORCE;
1301 
1302 	err = inode_change_ok(inode, attr);
1303 	if (err)
1304 		return err;
1305 
1306 	if (attr->ia_valid & ATTR_OPEN) {
1307 		if (fc->atomic_o_trunc)
1308 			return 0;
1309 		file = NULL;
1310 	}
1311 
1312 	if (attr->ia_valid & ATTR_SIZE)
1313 		is_truncate = true;
1314 
1315 	req = fuse_get_req(fc);
1316 	if (IS_ERR(req))
1317 		return PTR_ERR(req);
1318 
1319 	if (is_truncate)
1320 		fuse_set_nowrite(inode);
1321 
1322 	memset(&inarg, 0, sizeof(inarg));
1323 	memset(&outarg, 0, sizeof(outarg));
1324 	iattr_to_fattr(attr, &inarg);
1325 	if (file) {
1326 		struct fuse_file *ff = file->private_data;
1327 		inarg.valid |= FATTR_FH;
1328 		inarg.fh = ff->fh;
1329 	}
1330 	if (attr->ia_valid & ATTR_SIZE) {
1331 		/* For mandatory locking in truncate */
1332 		inarg.valid |= FATTR_LOCKOWNER;
1333 		inarg.lock_owner = fuse_lock_owner_id(fc, current->files);
1334 	}
1335 	req->in.h.opcode = FUSE_SETATTR;
1336 	req->in.h.nodeid = get_node_id(inode);
1337 	req->in.numargs = 1;
1338 	req->in.args[0].size = sizeof(inarg);
1339 	req->in.args[0].value = &inarg;
1340 	req->out.numargs = 1;
1341 	if (fc->minor < 9)
1342 		req->out.args[0].size = FUSE_COMPAT_ATTR_OUT_SIZE;
1343 	else
1344 		req->out.args[0].size = sizeof(outarg);
1345 	req->out.args[0].value = &outarg;
1346 	fuse_request_send(fc, req);
1347 	err = req->out.h.error;
1348 	fuse_put_request(fc, req);
1349 	if (err) {
1350 		if (err == -EINTR)
1351 			fuse_invalidate_attr(inode);
1352 		goto error;
1353 	}
1354 
1355 	if ((inode->i_mode ^ outarg.attr.mode) & S_IFMT) {
1356 		make_bad_inode(inode);
1357 		err = -EIO;
1358 		goto error;
1359 	}
1360 
1361 	spin_lock(&fc->lock);
1362 	fuse_change_attributes_common(inode, &outarg.attr,
1363 				      attr_timeout(&outarg));
1364 	oldsize = inode->i_size;
1365 	i_size_write(inode, outarg.attr.size);
1366 
1367 	if (is_truncate) {
1368 		/* NOTE: this may release/reacquire fc->lock */
1369 		__fuse_release_nowrite(inode);
1370 	}
1371 	spin_unlock(&fc->lock);
1372 
1373 	/*
1374 	 * Only call invalidate_inode_pages2() after removing
1375 	 * FUSE_NOWRITE, otherwise fuse_launder_page() would deadlock.
1376 	 */
1377 	if (S_ISREG(inode->i_mode) && oldsize != outarg.attr.size) {
1378 		truncate_pagecache(inode, oldsize, outarg.attr.size);
1379 		invalidate_inode_pages2(inode->i_mapping);
1380 	}
1381 
1382 	return 0;
1383 
1384 error:
1385 	if (is_truncate)
1386 		fuse_release_nowrite(inode);
1387 
1388 	return err;
1389 }
1390 
1391 static int fuse_setattr(struct dentry *entry, struct iattr *attr)
1392 {
1393 	if (attr->ia_valid & ATTR_FILE)
1394 		return fuse_do_setattr(entry, attr, attr->ia_file);
1395 	else
1396 		return fuse_do_setattr(entry, attr, NULL);
1397 }
1398 
1399 static int fuse_getattr(struct vfsmount *mnt, struct dentry *entry,
1400 			struct kstat *stat)
1401 {
1402 	struct inode *inode = entry->d_inode;
1403 	struct fuse_conn *fc = get_fuse_conn(inode);
1404 
1405 	if (!fuse_allow_task(fc, current))
1406 		return -EACCES;
1407 
1408 	return fuse_update_attributes(inode, stat, NULL, NULL);
1409 }
1410 
1411 static int fuse_setxattr(struct dentry *entry, const char *name,
1412 			 const void *value, size_t size, int flags)
1413 {
1414 	struct inode *inode = entry->d_inode;
1415 	struct fuse_conn *fc = get_fuse_conn(inode);
1416 	struct fuse_req *req;
1417 	struct fuse_setxattr_in inarg;
1418 	int err;
1419 
1420 	if (fc->no_setxattr)
1421 		return -EOPNOTSUPP;
1422 
1423 	req = fuse_get_req(fc);
1424 	if (IS_ERR(req))
1425 		return PTR_ERR(req);
1426 
1427 	memset(&inarg, 0, sizeof(inarg));
1428 	inarg.size = size;
1429 	inarg.flags = flags;
1430 	req->in.h.opcode = FUSE_SETXATTR;
1431 	req->in.h.nodeid = get_node_id(inode);
1432 	req->in.numargs = 3;
1433 	req->in.args[0].size = sizeof(inarg);
1434 	req->in.args[0].value = &inarg;
1435 	req->in.args[1].size = strlen(name) + 1;
1436 	req->in.args[1].value = name;
1437 	req->in.args[2].size = size;
1438 	req->in.args[2].value = value;
1439 	fuse_request_send(fc, req);
1440 	err = req->out.h.error;
1441 	fuse_put_request(fc, req);
1442 	if (err == -ENOSYS) {
1443 		fc->no_setxattr = 1;
1444 		err = -EOPNOTSUPP;
1445 	}
1446 	return err;
1447 }
1448 
1449 static ssize_t fuse_getxattr(struct dentry *entry, const char *name,
1450 			     void *value, size_t size)
1451 {
1452 	struct inode *inode = entry->d_inode;
1453 	struct fuse_conn *fc = get_fuse_conn(inode);
1454 	struct fuse_req *req;
1455 	struct fuse_getxattr_in inarg;
1456 	struct fuse_getxattr_out outarg;
1457 	ssize_t ret;
1458 
1459 	if (fc->no_getxattr)
1460 		return -EOPNOTSUPP;
1461 
1462 	req = fuse_get_req(fc);
1463 	if (IS_ERR(req))
1464 		return PTR_ERR(req);
1465 
1466 	memset(&inarg, 0, sizeof(inarg));
1467 	inarg.size = size;
1468 	req->in.h.opcode = FUSE_GETXATTR;
1469 	req->in.h.nodeid = get_node_id(inode);
1470 	req->in.numargs = 2;
1471 	req->in.args[0].size = sizeof(inarg);
1472 	req->in.args[0].value = &inarg;
1473 	req->in.args[1].size = strlen(name) + 1;
1474 	req->in.args[1].value = name;
1475 	/* This is really two different operations rolled into one */
1476 	req->out.numargs = 1;
1477 	if (size) {
1478 		req->out.argvar = 1;
1479 		req->out.args[0].size = size;
1480 		req->out.args[0].value = value;
1481 	} else {
1482 		req->out.args[0].size = sizeof(outarg);
1483 		req->out.args[0].value = &outarg;
1484 	}
1485 	fuse_request_send(fc, req);
1486 	ret = req->out.h.error;
1487 	if (!ret)
1488 		ret = size ? req->out.args[0].size : outarg.size;
1489 	else {
1490 		if (ret == -ENOSYS) {
1491 			fc->no_getxattr = 1;
1492 			ret = -EOPNOTSUPP;
1493 		}
1494 	}
1495 	fuse_put_request(fc, req);
1496 	return ret;
1497 }
1498 
1499 static ssize_t fuse_listxattr(struct dentry *entry, char *list, size_t size)
1500 {
1501 	struct inode *inode = entry->d_inode;
1502 	struct fuse_conn *fc = get_fuse_conn(inode);
1503 	struct fuse_req *req;
1504 	struct fuse_getxattr_in inarg;
1505 	struct fuse_getxattr_out outarg;
1506 	ssize_t ret;
1507 
1508 	if (!fuse_allow_task(fc, current))
1509 		return -EACCES;
1510 
1511 	if (fc->no_listxattr)
1512 		return -EOPNOTSUPP;
1513 
1514 	req = fuse_get_req(fc);
1515 	if (IS_ERR(req))
1516 		return PTR_ERR(req);
1517 
1518 	memset(&inarg, 0, sizeof(inarg));
1519 	inarg.size = size;
1520 	req->in.h.opcode = FUSE_LISTXATTR;
1521 	req->in.h.nodeid = get_node_id(inode);
1522 	req->in.numargs = 1;
1523 	req->in.args[0].size = sizeof(inarg);
1524 	req->in.args[0].value = &inarg;
1525 	/* This is really two different operations rolled into one */
1526 	req->out.numargs = 1;
1527 	if (size) {
1528 		req->out.argvar = 1;
1529 		req->out.args[0].size = size;
1530 		req->out.args[0].value = list;
1531 	} else {
1532 		req->out.args[0].size = sizeof(outarg);
1533 		req->out.args[0].value = &outarg;
1534 	}
1535 	fuse_request_send(fc, req);
1536 	ret = req->out.h.error;
1537 	if (!ret)
1538 		ret = size ? req->out.args[0].size : outarg.size;
1539 	else {
1540 		if (ret == -ENOSYS) {
1541 			fc->no_listxattr = 1;
1542 			ret = -EOPNOTSUPP;
1543 		}
1544 	}
1545 	fuse_put_request(fc, req);
1546 	return ret;
1547 }
1548 
1549 static int fuse_removexattr(struct dentry *entry, const char *name)
1550 {
1551 	struct inode *inode = entry->d_inode;
1552 	struct fuse_conn *fc = get_fuse_conn(inode);
1553 	struct fuse_req *req;
1554 	int err;
1555 
1556 	if (fc->no_removexattr)
1557 		return -EOPNOTSUPP;
1558 
1559 	req = fuse_get_req(fc);
1560 	if (IS_ERR(req))
1561 		return PTR_ERR(req);
1562 
1563 	req->in.h.opcode = FUSE_REMOVEXATTR;
1564 	req->in.h.nodeid = get_node_id(inode);
1565 	req->in.numargs = 1;
1566 	req->in.args[0].size = strlen(name) + 1;
1567 	req->in.args[0].value = name;
1568 	fuse_request_send(fc, req);
1569 	err = req->out.h.error;
1570 	fuse_put_request(fc, req);
1571 	if (err == -ENOSYS) {
1572 		fc->no_removexattr = 1;
1573 		err = -EOPNOTSUPP;
1574 	}
1575 	return err;
1576 }
1577 
1578 static const struct inode_operations fuse_dir_inode_operations = {
1579 	.lookup		= fuse_lookup,
1580 	.mkdir		= fuse_mkdir,
1581 	.symlink	= fuse_symlink,
1582 	.unlink		= fuse_unlink,
1583 	.rmdir		= fuse_rmdir,
1584 	.rename		= fuse_rename,
1585 	.link		= fuse_link,
1586 	.setattr	= fuse_setattr,
1587 	.create		= fuse_create,
1588 	.mknod		= fuse_mknod,
1589 	.permission	= fuse_permission,
1590 	.getattr	= fuse_getattr,
1591 	.setxattr	= fuse_setxattr,
1592 	.getxattr	= fuse_getxattr,
1593 	.listxattr	= fuse_listxattr,
1594 	.removexattr	= fuse_removexattr,
1595 };
1596 
1597 static const struct file_operations fuse_dir_operations = {
1598 	.llseek		= generic_file_llseek,
1599 	.read		= generic_read_dir,
1600 	.readdir	= fuse_readdir,
1601 	.open		= fuse_dir_open,
1602 	.release	= fuse_dir_release,
1603 	.fsync		= fuse_dir_fsync,
1604 };
1605 
1606 static const struct inode_operations fuse_common_inode_operations = {
1607 	.setattr	= fuse_setattr,
1608 	.permission	= fuse_permission,
1609 	.getattr	= fuse_getattr,
1610 	.setxattr	= fuse_setxattr,
1611 	.getxattr	= fuse_getxattr,
1612 	.listxattr	= fuse_listxattr,
1613 	.removexattr	= fuse_removexattr,
1614 };
1615 
1616 static const struct inode_operations fuse_symlink_inode_operations = {
1617 	.setattr	= fuse_setattr,
1618 	.follow_link	= fuse_follow_link,
1619 	.put_link	= fuse_put_link,
1620 	.readlink	= generic_readlink,
1621 	.getattr	= fuse_getattr,
1622 	.setxattr	= fuse_setxattr,
1623 	.getxattr	= fuse_getxattr,
1624 	.listxattr	= fuse_listxattr,
1625 	.removexattr	= fuse_removexattr,
1626 };
1627 
1628 void fuse_init_common(struct inode *inode)
1629 {
1630 	inode->i_op = &fuse_common_inode_operations;
1631 }
1632 
1633 void fuse_init_dir(struct inode *inode)
1634 {
1635 	inode->i_op = &fuse_dir_inode_operations;
1636 	inode->i_fop = &fuse_dir_operations;
1637 }
1638 
1639 void fuse_init_symlink(struct inode *inode)
1640 {
1641 	inode->i_op = &fuse_symlink_inode_operations;
1642 }
1643